ruby-dnn 0.13.4 → 0.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/dnn/core/link.rb CHANGED
@@ -14,7 +14,6 @@ module DNN
14
14
  end
15
15
  end
16
16
 
17
-
18
17
  class TwoInputLink
19
18
  attr_accessor :prev1
20
19
  attr_accessor :prev2
@@ -27,7 +26,7 @@ module DNN
27
26
  end
28
27
 
29
28
  def backward(dy)
30
- dy1, dy2 =* @layer.backward(dy)
29
+ dy1, dy2 = *@layer.backward(dy)
31
30
  @prev1&.backward(dy1)
32
31
  @prev2&.backward(dy2)
33
32
  end
@@ -6,14 +6,14 @@ module DNN
6
6
  return nil unless hash
7
7
  loss_class = DNN.const_get(hash[:class])
8
8
  loss = loss_class.allocate
9
- raise DNN_Error.new("#{loss.class} is not an instance of #{self} class.") unless loss.is_a?(self)
9
+ raise DNN_Error, "#{loss.class} is not an instance of #{self} class." unless loss.is_a?(self)
10
10
  loss.load_hash(hash)
11
11
  loss
12
12
  end
13
13
 
14
14
  def loss(y, t, layers = nil)
15
15
  unless y.shape == t.shape
16
- raise DNN_ShapeError.new("The shape of y does not match the t shape. y shape is #{y.shape}, but t shape is #{t.shape}.")
16
+ raise DNN_ShapeError, "The shape of y does not match the t shape. y shape is #{y.shape}, but t shape is #{t.shape}."
17
17
  end
18
18
  loss_value = forward(y, t)
19
19
  loss_value += regularizers_forward(layers) if layers
@@ -21,11 +21,11 @@ module DNN
21
21
  end
22
22
 
23
23
  def forward(y, t)
24
- raise NotImplementedError.new("Class '#{self.class.name}' has implement method 'forward'")
24
+ raise NotImplementedError, "Class '#{self.class.name}' has implement method 'forward'"
25
25
  end
26
26
 
27
27
  def backward(y, t)
28
- raise NotImplementedError.new("Class '#{self.class.name}' has implement method 'backward'")
28
+ raise NotImplementedError, "Class '#{self.class.name}' has implement method 'backward'"
29
29
  end
30
30
 
31
31
  def regularizers_forward(layers)
@@ -58,7 +58,7 @@ module DNN
58
58
  class MeanSquaredError < Loss
59
59
  def forward(y, t)
60
60
  batch_size = t.shape[0]
61
- 0.5 * ((y - t) ** 2).sum / batch_size
61
+ 0.5 * ((y - t)**2).sum / batch_size
62
62
  end
63
63
 
64
64
  def backward(y, t)
@@ -66,7 +66,6 @@ module DNN
66
66
  end
67
67
  end
68
68
 
69
-
70
69
  class MeanAbsoluteError < Loss
71
70
  def forward(y, t)
72
71
  batch_size = t.shape[0]
@@ -81,7 +80,6 @@ module DNN
81
80
  end
82
81
  end
83
82
 
84
-
85
83
  class Hinge < Loss
86
84
  def forward(y, t)
87
85
  @a = 1 - y * t
@@ -95,7 +93,6 @@ module DNN
95
93
  end
96
94
  end
97
95
 
98
-
99
96
  class HuberLoss < Loss
100
97
  def forward(y, t)
101
98
  loss_l1_value = loss_l1(y, t)
@@ -120,16 +117,19 @@ module DNN
120
117
 
121
118
  def loss_l2(y, t)
122
119
  batch_size = t.shape[0]
123
- 0.5 * ((y - t) ** 2).sum / batch_size
120
+ 0.5 * ((y - t)**2).sum / batch_size
124
121
  end
125
122
  end
126
123
 
127
-
128
124
  class SoftmaxCrossEntropy < Loss
129
125
  attr_accessor :eps
130
126
 
131
- def self.softmax(y)
132
- Xumo::NMath.exp(y) / Xumo::NMath.exp(y).sum(1, keepdims: true)
127
+ class << self
128
+ def softmax(y)
129
+ Xumo::NMath.exp(y) / Xumo::NMath.exp(y).sum(1, keepdims: true)
130
+ end
131
+
132
+ alias activation softmax
133
133
  end
134
134
 
135
135
  # @param [Float] eps Value to avoid nan.
@@ -156,17 +156,24 @@ module DNN
156
156
  end
157
157
  end
158
158
 
159
-
160
159
  class SigmoidCrossEntropy < Loss
161
160
  attr_accessor :eps
162
161
 
162
+ class << self
163
+ def sigmoid(y)
164
+ Layers::Sigmoid.new.forward(y)
165
+ end
166
+
167
+ alias activation sigmoid
168
+ end
169
+
163
170
  # @param [Float] eps Value to avoid nan.
164
171
  def initialize(eps: 1e-7)
165
172
  @eps = eps
166
173
  end
167
174
 
168
175
  def forward(y, t)
169
- @x = Activations::Sigmoid.new.forward(y)
176
+ @x = SigmoidCrossEntropy.sigmoid(y)
170
177
  -(t * Xumo::NMath.log(@x) + (1 - t) * Xumo::NMath.log(1 - @x))
171
178
  end
172
179
 
@@ -3,20 +3,21 @@ module DNN
3
3
 
4
4
  class MergeLayer < Layers::Layer
5
5
  def self.call(x1, x2, *args)
6
- self.new(*args).call(x1, x2)
6
+ new(*args).call(x1, x2)
7
7
  end
8
8
 
9
- def call(input1, input2)
10
- x1, prev_link1 = *input1
11
- x2, prev_link2 = *input2
9
+ def call(input_tensor1, input_tensor2)
10
+ x1 = input_tensor1.data
11
+ x2 = input_tensor2.data
12
+ prev_link1 = input_tensor1.link
13
+ prev_link2 = input_tensor2.link
12
14
  build(x1.shape[1..-1]) unless built?
13
15
  y = forward(x1, x2)
14
16
  link = TwoInputLink.new(prev_link1, prev_link2, self)
15
- [y, link]
17
+ Tensor.new(y, link)
16
18
  end
17
19
  end
18
20
 
19
-
20
21
  class Add < MergeLayer
21
22
  def forward(x1, x2)
22
23
  x1 + x2
@@ -27,7 +28,6 @@ module DNN
27
28
  end
28
29
  end
29
30
 
30
-
31
31
  class Mul < MergeLayer
32
32
  def forward(x1, x2)
33
33
  @x1, @x2 = x1, x2
@@ -39,7 +39,6 @@ module DNN
39
39
  end
40
40
  end
41
41
 
42
-
43
42
  class Concatenate < MergeLayer
44
43
  attr_reader :axis
45
44
 
@@ -1,16 +1,16 @@
1
1
  module DNN
2
2
  module Models
3
-
4
3
  # This class deals with the model of the network.
5
4
  class Model
6
5
  attr_accessor :optimizer
7
6
  attr_accessor :loss_func
7
+ attr_reader :last_log
8
8
 
9
9
  # Load marshal model.
10
10
  # @param [String] file_name File name of marshal model to load.
11
11
  # @return [DNN::Models::Model] Return the loaded model.
12
12
  def self.load(file_name)
13
- model = self.new
13
+ model = new
14
14
  loader = Loaders::MarshalLoader.new(model)
15
15
  loader.load(file_name)
16
16
  model
@@ -21,49 +21,9 @@ module DNN
21
21
  @loss_func = nil
22
22
  @last_link = nil
23
23
  @built = false
24
- @callbacks = {
25
- before_epoch: [],
26
- after_epoch: [],
27
- before_train_on_batch: [],
28
- after_train_on_batch: [],
29
- before_test_on_batch: [],
30
- after_test_on_batch: [],
31
- }
24
+ @callbacks = []
32
25
  @layers_cache = nil
33
- end
34
-
35
- # This method is provided for compatibility with v0.12.4.
36
- # Load hash model parameters.
37
- # @param [Hash] hash Hash to load model parameters.
38
- def load_hash_params(hash)
39
- has_param_layers_params = hash[:params]
40
- has_param_layers_index = 0
41
- has_param_layers.uniq.each do |layer|
42
- hash_params = has_param_layers_params[has_param_layers_index]
43
- hash_params.each do |key, (shape, bin)|
44
- data = Xumo::SFloat.from_binary(bin).reshape(*shape)
45
- layer.get_params[key].data = data
46
- end
47
- has_param_layers_index += 1
48
- end
49
- end
50
-
51
- # This method is provided for compatibility with v0.12.4.
52
- # Load json model parameters.
53
- # @param [String] json_str JSON string to load model parameters.
54
- def load_json_params(json_str)
55
- hash = JSON.parse(json_str, symbolize_names: true)
56
- has_param_layers_params = hash[:params]
57
- has_param_layers_index = 0
58
- has_param_layers.uniq.each do |layer|
59
- hash_params = has_param_layers_params[has_param_layers_index]
60
- hash_params.each do |key, (shape, base64_param)|
61
- bin = Base64.decode64(base64_param)
62
- data = Xumo::SFloat.from_binary(bin).reshape(*shape)
63
- layer.get_params[key].data = data
64
- end
65
- has_param_layers_index += 1
66
- end
26
+ @last_log = {}
67
27
  end
68
28
 
69
29
  # Set optimizer and loss_func to model.
@@ -71,10 +31,10 @@ module DNN
71
31
  # @param [DNN::Losses::Loss] loss_func Loss function to use for learning.
72
32
  def setup(optimizer, loss_func)
73
33
  unless optimizer.is_a?(Optimizers::Optimizer)
74
- raise TypeError.new("optimizer:#{optimizer.class} is not an instance of DNN::Optimizers::Optimizer class.")
34
+ raise TypeError, "optimizer:#{optimizer.class} is not an instance of DNN::Optimizers::Optimizer class."
75
35
  end
76
36
  unless loss_func.is_a?(Losses::Loss)
77
- raise TypeError.new("loss_func:#{loss_func.class} is not an instance of DNN::Losses::Loss class.")
37
+ raise TypeError, "loss_func:#{loss_func.class} is not an instance of DNN::Losses::Loss class."
78
38
  end
79
39
  @optimizer = optimizer
80
40
  @loss_func = loss_func
@@ -85,8 +45,8 @@ module DNN
85
45
  # @param [Numo::SFloat] x Input training data.
86
46
  # @param [Numo::SFloat] y Output training data.
87
47
  # @param [Integer] epochs Number of training.
88
- # @param [Integer] initial_epoch Initial epoch.
89
48
  # @param [Integer] batch_size Batch size used for one training.
49
+ # @param [Integer] initial_epoch Initial epoch.
90
50
  # @param [Array | NilClass] test If you to test the model for every 1 epoch,
91
51
  # specify [x_test, y_test]. Don't test to the model, specify nil.
92
52
  # @param [Boolean] verbose Set true to display the log. If false is set, the log is not displayed.
@@ -95,57 +55,107 @@ module DNN
95
55
  initial_epoch: 1,
96
56
  test: nil,
97
57
  verbose: true)
98
- raise DNN_Error.new("The model is not optimizer setup complete.") unless @optimizer
99
- raise DNN_Error.new("The model is not loss_func setup complete.") unless @loss_func
100
58
  check_xy_type(x, y)
101
- iter = Iterator.new(x, y)
102
- num_train_datas = x.is_a?(Array) ? x[0].shape[0] : x.shape[0]
103
- (initial_epoch..epochs).each do |epoch|
104
- call_callbacks(:before_epoch, epoch)
105
- puts "【 epoch #{epoch}/#{epochs} 】" if verbose
106
- iter.foreach(batch_size) do |x_batch, y_batch, index|
107
- loss_value = train_on_batch(x_batch, y_batch)
108
- if loss_value.is_a?(Xumo::SFloat)
109
- loss_value = loss_value.mean
110
- elsif loss_value.nan?
111
- puts "\nloss is nan" if verbose
112
- return
113
- end
114
- num_trained_datas = (index + 1) * batch_size
115
- num_trained_datas = num_trained_datas > num_train_datas ? num_train_datas : num_trained_datas
116
- log = "\r"
117
- 40.times do |i|
118
- if i < num_trained_datas * 40 / num_train_datas
119
- log << "="
120
- elsif i == num_trained_datas * 40 / num_train_datas
121
- log << ">"
122
- else
123
- log << "_"
59
+ train_iterator = Iterator.new(x, y)
60
+ train_by_iterator(train_iterator, epochs,
61
+ batch_size: batch_size,
62
+ initial_epoch: initial_epoch,
63
+ test: test,
64
+ verbose: verbose)
65
+ end
66
+
67
+ alias fit train
68
+
69
+ # Start training by iterator.
70
+ # Setup the model before use this method.
71
+ # @param [Iterator] train_iterator Iterator used for training.
72
+ # @param [Integer] epochs Number of training.
73
+ # @param [Integer] batch_size Batch size used for one training.
74
+ # @param [Integer] initial_epoch Initial epoch.
75
+ # @param [Array | NilClass] test If you to test the model for every 1 epoch,
76
+ # specify [x_test, y_test]. Don't test to the model, specify nil.
77
+ # @param [Boolean] verbose Set true to display the log. If false is set, the log is not displayed.
78
+ def train_by_iterator(train_iterator, epochs,
79
+ batch_size: 1,
80
+ initial_epoch: 1,
81
+ test: nil,
82
+ verbose: true)
83
+ raise DNN_Error, "The model is not optimizer setup complete." unless @optimizer
84
+ raise DNN_Error, "The model is not loss_func setup complete." unless @loss_func
85
+
86
+ num_train_datas = train_iterator.num_datas
87
+ num_train_datas = num_train_datas / batch_size * batch_size if train_iterator.last_round_down
88
+
89
+ stopped = catch(:stop) do
90
+ (initial_epoch..epochs).each do |epoch|
91
+ @last_log[:epoch] = epoch
92
+ call_callbacks(:before_epoch)
93
+ puts "【 epoch #{epoch}/#{epochs} 】" if verbose
94
+
95
+ train_iterator.foreach(batch_size) do |x_batch, y_batch, index|
96
+ train_step_met = train_step(x_batch, y_batch)
97
+ num_trained_datas = (index + 1) * batch_size
98
+ num_trained_datas = num_trained_datas > num_train_datas ? num_train_datas : num_trained_datas
99
+ log = "\r"
100
+ 40.times do |i|
101
+ if i < num_trained_datas * 40 / num_train_datas
102
+ log << "="
103
+ elsif i == num_trained_datas * 40 / num_train_datas
104
+ log << ">"
105
+ else
106
+ log << "_"
107
+ end
124
108
  end
109
+
110
+ log << " #{num_trained_datas}/#{num_train_datas} "
111
+ log << metrics_to_str(train_step_met)
112
+ print log if verbose
125
113
  end
126
- log << " #{num_trained_datas}/#{num_train_datas} loss: #{sprintf('%.8f', loss_value)}"
127
- print log if verbose
128
- end
129
- if test
130
- acc, test_loss = accuracy(test[0], test[1], batch_size: batch_size)
131
- print " accuracy: #{acc}, test loss: #{sprintf('%.8f', test_loss)}" if verbose
114
+
115
+ if test
116
+ test_met = test(test[0], test[1], batch_size: batch_size)
117
+ print " " + metrics_to_str(test_met) if verbose
118
+ end
119
+ puts "" if verbose
120
+ call_callbacks(:after_epoch)
132
121
  end
133
- puts "" if verbose
134
- call_callbacks(:after_epoch, epoch)
122
+ nil
123
+ end
124
+
125
+ if stopped
126
+ puts "\n#{stopped}" if verbose
135
127
  end
136
128
  end
137
129
 
138
- alias fit train
130
+ alias fit_by_iterator train_by_iterator
131
+
132
+ # Implement the training process to be performed in one step.
133
+ # @param [Numo::SFloat] x Input training data.
134
+ # @param [Numo::SFloat] y Output training data.
135
+ # @return [Hash] Hash of contents to be output to log.
136
+ private def train_step(x, y)
137
+ loss_value = train_on_batch(x, y)
138
+ { loss: loss_value.mean }
139
+ end
140
+
141
+ # Implement the test process to be performed.
142
+ # @param [Numo::SFloat] x Input training data.
143
+ # @param [Numo::SFloat] y Output training data.
144
+ # @param [Integer] batch_size Batch size used for one test.
145
+ # @return [Hash] Hash of contents to be output to log.
146
+ private def test(x, y, batch_size: 100)
147
+ acc, test_loss = accuracy(x, y, batch_size: batch_size)
148
+ { accuracy: acc, test_loss: test_loss.mean }
149
+ end
139
150
 
140
151
  # Training once.
141
152
  # Setup the model before use this method.
142
153
  # @param [Numo::SFloat] x Input training data.
143
154
  # @param [Numo::SFloat] y Output training data.
144
- # @param [Integer] batch_size Batch size used for one test.
145
155
  # @return [Float | Numo::SFloat] Return loss value in the form of Float or Numo::SFloat.
146
156
  def train_on_batch(x, y)
147
- raise DNN_Error.new("The model is not optimizer setup complete.") unless @optimizer
148
- raise DNN_Error.new("The model is not loss_func setup complete.") unless @loss_func
157
+ raise DNN_Error, "The model is not optimizer setup complete." unless @optimizer
158
+ raise DNN_Error, "The model is not loss_func setup complete." unless @loss_func
149
159
  check_xy_type(x, y)
150
160
  call_callbacks(:before_train_on_batch)
151
161
  x = forward(x, true)
@@ -154,7 +164,8 @@ module DNN
154
164
  backward(dy)
155
165
  @optimizer.update(layers)
156
166
  @loss_func.regularizers_backward(layers)
157
- call_callbacks(:after_train_on_batch, loss_value)
167
+ @last_log[:train_loss] = loss_value
168
+ call_callbacks(:after_train_on_batch)
158
169
  loss_value
159
170
  end
160
171
 
@@ -169,15 +180,18 @@ module DNN
169
180
  batch_size = batch_size >= num_test_datas[0] ? num_test_datas : batch_size
170
181
  iter = Iterator.new(x, y, random: false)
171
182
  total_correct = 0
172
- sum_loss = 0
183
+ sum_loss = Xumo::SFloat[0]
173
184
  max_steps = (num_test_datas.to_f / batch_size).ceil
174
185
  iter.foreach(batch_size) do |x_batch, y_batch|
175
186
  correct, loss_value = test_on_batch(x_batch, y_batch)
176
187
  total_correct += correct
177
- sum_loss += loss_value.is_a?(Xumo::SFloat) ? loss_value.mean : loss_value
188
+ sum_loss += loss_value.mean
178
189
  end
179
190
  mean_loss = sum_loss / max_steps
180
- [total_correct.to_f / num_test_datas, mean_loss]
191
+ acc = total_correct.to_f / num_test_datas
192
+ @last_log[:test_loss] = mean_loss
193
+ @last_log[:test_accuracy] = acc
194
+ [acc, mean_loss]
181
195
  end
182
196
 
183
197
  # Evaluate once.
@@ -189,7 +203,7 @@ module DNN
189
203
  x = forward(x, false)
190
204
  correct = evaluate(x, y)
191
205
  loss_value = @loss_func.loss(x, y, layers)
192
- call_callbacks(:after_test_on_batch, loss_value)
206
+ call_callbacks(:after_test_on_batch)
193
207
  [correct, loss_value]
194
208
  end
195
209
 
@@ -214,42 +228,33 @@ module DNN
214
228
 
215
229
  # Predict data.
216
230
  # @param [Numo::SFloat] x Input data.
217
- def predict(x)
231
+ # @param [Boolean] use_loss_activation Use loss activation when loss has an activation.
232
+ def predict(x, use_loss_activation: true)
218
233
  check_xy_type(x)
219
- forward(x, false)
234
+ y = forward(x, false)
235
+ if use_loss_activation && @loss_func.class.respond_to?(:activation)
236
+ y = @loss_func.class.activation(y)
237
+ end
238
+ y
220
239
  end
221
240
 
222
241
  # Predict one data.
223
242
  # @param [Numo::SFloat] x Input data. However, x is single data.
224
- def predict1(x)
243
+ def predict1(x, use_loss_activation: true)
225
244
  check_xy_type(x)
226
- predict(x.reshape(1, *x.shape))[0, false]
245
+ predict(x.reshape(1, *x.shape), use_loss_activation: use_loss_activation)[0, false]
227
246
  end
228
247
 
229
248
  # Add callback function.
230
- # @param [Symbol] event Callback event. The following can be used for event.
231
- # before_epoch: Process: performed before one training.
232
- # after_epoch: Process: performed after one training.
233
- # before_train_on_batch: Set the proc to be performed before train on batch processing.
234
- # after_train_on_batch: Set the proc to be performed after train on batch processing.
235
- # before_test_on_batch: Set the proc to be performed before test on batch processing.
236
- # after_test_on_batch: Set the proc to be performed after test on batch processing.
237
- def add_callback(event, callback)
238
- raise DNN_UnknownEventError.new("Unknown event #{event}.") unless @callbacks.has_key?(event)
239
- @callbacks[event] << callback
249
+ # @param [Callback] callback Callback object.
250
+ def add_callback(callback)
251
+ callback.model = self
252
+ @callbacks << callback
240
253
  end
241
254
 
242
255
  # Clear the callback function registered for each event.
243
- # @param [Symbol] event Callback event. The following can be used for event.
244
- # before_epoch: Process: performed before one training.
245
- # after_epoch: Process: performed after one training.
246
- # before_train_on_batch: Set the proc to be performed before train on batch processing.
247
- # after_train_on_batch: Set the proc to be performed after train on batch processing.
248
- # before_test_on_batch: Set the proc to be performed before test on batch processing.
249
- # after_test_on_batch: Set the proc to be performed after test on batch processing.
250
- def clear_callbacks(event)
251
- raise DNN_UnknownEventError.new("Unknown event #{event}.") unless @callbacks.has_key?(event)
252
- @callbacks[event] = []
256
+ def clear_callbacks
257
+ @callbacks = []
253
258
  end
254
259
 
255
260
  # Save the model in marshal format.
@@ -267,7 +272,7 @@ module DNN
267
272
  # Get the all layers.
268
273
  # @return [Array] All layers array.
269
274
  def layers
270
- raise DNN_Error.new("This model is not built. You need build this model using predict or train.") unless built?
275
+ raise DNN_Error, "This model is not built. You need build this model using predict or train." unless built?
271
276
  return @layers_cache if @layers_cache
272
277
  layers = []
273
278
  get_layers = -> link do
@@ -291,7 +296,7 @@ module DNN
291
296
  end
292
297
 
293
298
  # Get the layer that the model has.
294
- # @param [Symbol] The name of the layer to get.
299
+ # @param [Symbol] name The name of the layer to get.
295
300
  # @return [DNN::Layers::Layer] Return the layer.
296
301
  def get_layer(name)
297
302
  layers.find { |layer| layer.name == name }
@@ -307,21 +312,22 @@ module DNN
307
312
  def forward(x, learning_phase)
308
313
  DNN.learning_phase = learning_phase
309
314
  @layers_cache = nil
310
- y, @last_link = call(x)
315
+ output_tensor = call(Tensor.new(x, nil))
316
+ @last_link = output_tensor.link
311
317
  unless @built
312
318
  @built = true
313
319
  naming
314
320
  end
315
- y
321
+ output_tensor.data
316
322
  end
317
323
 
318
324
  def backward(dy)
319
325
  @last_link.backward(dy)
320
326
  end
321
327
 
322
- def call_callbacks(event, *args)
323
- @callbacks[event].each do |callback|
324
- callback.call(*args)
328
+ def call_callbacks(event)
329
+ @callbacks.each do |callback|
330
+ callback.send(event) if callback.respond_to?(event)
325
331
  end
326
332
  end
327
333
 
@@ -338,17 +344,20 @@ module DNN
338
344
  end
339
345
  end
340
346
 
347
+ def metrics_to_str(mertics)
348
+ mertics.map { |key, num| "#{key}: #{sprintf('%.4f', num)}" }.join(", ")
349
+ end
350
+
341
351
  def check_xy_type(x, y = nil)
342
352
  if !x.is_a?(Xumo::SFloat) && !x.is_a?(Array)
343
- raise TypeError.new("x:#{x.class.name} is not an instance of #{Xumo::SFloat.name} class or Array class.")
353
+ raise TypeError, "x:#{x.class.name} is not an instance of #{Xumo::SFloat.name} class or Array class."
344
354
  end
345
355
  if y && !y.is_a?(Xumo::SFloat) && !x.is_a?(Array)
346
- raise TypeError.new("y:#{y.class.name} is not an instance of #{Xumo::SFloat.name} class or Array class.")
356
+ raise TypeError, "y:#{y.class.name} is not an instance of #{Xumo::SFloat.name} class or Array class."
347
357
  end
348
358
  end
349
359
  end
350
360
 
351
-
352
361
  class Sequential < Model
353
362
  attr_reader :stack
354
363
 
@@ -363,7 +372,7 @@ module DNN
363
372
  # @return [DNN::Models::Model] Return self.
364
373
  def add(layer)
365
374
  unless layer.is_a?(Layers::Layer) || layer.is_a?(Model)
366
- raise TypeError.new("layer: #{layer.class.name} is not an instance of the DNN::Layers::Layer class or DNN::Models::Model class.")
375
+ raise TypeError, "layer: #{layer.class.name} is not an instance of the DNN::Layers::Layer class or DNN::Models::Model class."
367
376
  end
368
377
  @stack << layer
369
378
  self
@@ -376,7 +385,7 @@ module DNN
376
385
  # @return [Boolean] Return true if success for remove layer.
377
386
  def remove(layer)
378
387
  unless layer.is_a?(Layers::Layer) || layer.is_a?(Model)
379
- raise TypeError.new("layer: #{layer.class.name} is not an instance of the DNN::Layers::Layer class or DNN::Models::Model class.")
388
+ raise TypeError, "layer: #{layer.class.name} is not an instance of the DNN::Layers::Layer class or DNN::Models::Model class."
380
389
  end
381
390
  @stack.delete(layer) ? true : false
382
391
  end