ruby-dnn 0.12.4 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/examples/cifar100_example.rb +1 -1
- data/examples/cifar10_example.rb +1 -1
- data/examples/dcgan/dcgan.rb +7 -3
- data/examples/dcgan/imgen.rb +1 -1
- data/examples/dcgan/train.rb +5 -2
- data/examples/iris_example.rb +1 -1
- data/examples/mnist_conv2d_example.rb +1 -1
- data/examples/mnist_define_by_run.rb +1 -1
- data/examples/mnist_example.rb +1 -1
- data/examples/mnist_lstm_example.rb +1 -1
- data/lib/dnn.rb +2 -0
- data/lib/dnn/core/embedding.rb +1 -2
- data/lib/dnn/core/error.rb +2 -0
- data/lib/dnn/core/global.rb +9 -0
- data/lib/dnn/core/initializers.rb +1 -1
- data/lib/dnn/core/iterator.rb +11 -3
- data/lib/dnn/core/layers.rb +13 -15
- data/lib/dnn/core/link.rb +11 -0
- data/lib/dnn/core/losses.rb +52 -63
- data/lib/dnn/core/merge_layers.rb +3 -3
- data/lib/dnn/core/models.rb +130 -140
- data/lib/dnn/core/normalizations.rb +2 -10
- data/lib/dnn/core/optimizers.rb +62 -57
- data/lib/dnn/core/param.rb +2 -0
- data/lib/dnn/core/savers.rb +138 -0
- data/lib/dnn/{cifar10.rb → datasets/cifar10.rb} +1 -1
- data/lib/dnn/{cifar100.rb → datasets/cifar100.rb} +1 -1
- data/lib/dnn/{downloader.rb → datasets/downloader.rb} +0 -0
- data/lib/dnn/{fashion-mnist.rb → datasets/fashion-mnist.rb} +1 -1
- data/lib/dnn/{iris.rb → datasets/iris.rb} +0 -0
- data/lib/dnn/{mnist.rb → datasets/mnist.rb} +1 -1
- data/lib/dnn/version.rb +1 -1
- metadata +10 -8
@@ -7,12 +7,12 @@ module DNN
|
|
7
7
|
end
|
8
8
|
|
9
9
|
def call(input1, input2)
|
10
|
-
x1, prev_link1
|
11
|
-
x2, prev_link2
|
10
|
+
x1, prev_link1 = *input1
|
11
|
+
x2, prev_link2 = *input2
|
12
12
|
build(x1.shape[1..-1]) unless built?
|
13
13
|
y = forward(x1, x2)
|
14
14
|
link = TwoInputLink.new(prev_link1, prev_link2, self)
|
15
|
-
[y, link
|
15
|
+
[y, link]
|
16
16
|
end
|
17
17
|
end
|
18
18
|
|
data/lib/dnn/core/models.rb
CHANGED
@@ -1,25 +1,35 @@
|
|
1
|
-
require "zlib"
|
2
|
-
require "json"
|
3
|
-
require "base64"
|
4
|
-
|
5
1
|
module DNN
|
6
2
|
module Models
|
7
3
|
|
8
4
|
# This class deals with the model of the network.
|
9
5
|
class Model
|
6
|
+
attr_accessor :optimizer
|
7
|
+
attr_accessor :loss_func
|
8
|
+
|
10
9
|
# Load marshal model.
|
11
10
|
# @param [String] file_name File name of marshal model to load.
|
12
11
|
def self.load(file_name)
|
13
|
-
|
12
|
+
loader = Loaders::MarshalLoader.new(self.new)
|
13
|
+
loader.load(file_name)
|
14
14
|
end
|
15
15
|
|
16
16
|
def initialize
|
17
17
|
@optimizer = nil
|
18
|
+
@loss_func = nil
|
18
19
|
@last_link = nil
|
19
|
-
@setup_completed = false
|
20
20
|
@built = false
|
21
|
+
@callbacks = {
|
22
|
+
before_epoch: [],
|
23
|
+
after_epoch: [],
|
24
|
+
before_train_on_batch: [],
|
25
|
+
after_train_on_batch: [],
|
26
|
+
before_test_on_batch: [],
|
27
|
+
after_test_on_batch: [],
|
28
|
+
}
|
29
|
+
@layers_cache = nil
|
21
30
|
end
|
22
31
|
|
32
|
+
# This method is provided for compatibility with v0.12.4.
|
23
33
|
# Load hash model parameters.
|
24
34
|
# @param [Hash] hash Hash to load model parameters.
|
25
35
|
def load_hash_params(hash)
|
@@ -35,6 +45,7 @@ module DNN
|
|
35
45
|
end
|
36
46
|
end
|
37
47
|
|
48
|
+
# This method is provided for compatibility with v0.12.4.
|
38
49
|
# Load json model parameters.
|
39
50
|
# @param [String] json_str JSON string to load model parameters.
|
40
51
|
def load_json_params(json_str)
|
@@ -52,30 +63,6 @@ module DNN
|
|
52
63
|
end
|
53
64
|
end
|
54
65
|
|
55
|
-
# Convert model parameters to hash.
|
56
|
-
# @return [Hash] Return the hash of model parameters.
|
57
|
-
def params_to_hash
|
58
|
-
has_param_layers_params = has_param_layers.uniq.map do |layer|
|
59
|
-
layer.get_params.map { |key, param|
|
60
|
-
[key, [param.data.shape, param.data.to_binary]]
|
61
|
-
}.to_h
|
62
|
-
end
|
63
|
-
{ version: VERSION, params: has_param_layers_params }
|
64
|
-
end
|
65
|
-
|
66
|
-
# Convert model parameters to JSON string.
|
67
|
-
# @return [String] Return the JSON string.
|
68
|
-
def params_to_json
|
69
|
-
has_param_layers_params = has_param_layers.uniq.map do |layer|
|
70
|
-
layer.get_params.map { |key, param|
|
71
|
-
base64_data = Base64.encode64(param.data.to_binary)
|
72
|
-
[key, [param.data.shape, base64_data]]
|
73
|
-
}.to_h
|
74
|
-
end
|
75
|
-
hash = { version: VERSION, params: has_param_layers_params }
|
76
|
-
JSON.dump(hash)
|
77
|
-
end
|
78
|
-
|
79
66
|
# Set optimizer and loss_func to model.
|
80
67
|
# @param [DNN::Optimizers::Optimizer] optimizer Optimizer to use for learning.
|
81
68
|
# @param [DNN::Losses::Loss] loss_func Loss function to use for learning.
|
@@ -86,7 +73,6 @@ module DNN
|
|
86
73
|
unless loss_func.is_a?(Losses::Loss)
|
87
74
|
raise TypeError.new("loss_func:#{loss_func.class} is not an instance of DNN::Losses::Loss class.")
|
88
75
|
end
|
89
|
-
@setup_completed = true
|
90
76
|
@optimizer = optimizer
|
91
77
|
@loss_func = loss_func
|
92
78
|
end
|
@@ -100,32 +86,20 @@ module DNN
|
|
100
86
|
# @param [Array | NilClass] test If you to test the model for every 1 epoch,
|
101
87
|
# specify [x_test, y_test]. Don't test to the model, specify nil.
|
102
88
|
# @param [Boolean] verbose Set true to display the log. If false is set, the log is not displayed.
|
103
|
-
# @param [Lambda] before_epoch_cbk Process performed before one training.
|
104
|
-
# @param [Lambda] after_epoch_cbk Process performed after one training.
|
105
|
-
# @param [Lambda] before_train_on_batch_cbk Set the proc to be performed before train on batch processing.
|
106
|
-
# @param [Lambda] after_train_on_batch_cbk Set the proc to be performed after train on batch processing.
|
107
|
-
# @param [Lambda] before_test_on_batch_cbk Set the proc to be performed before test on batch processing.
|
108
|
-
# @param [Lambda] after_test_on_batch_cbk Set the proc to be performed after test on batch processing.
|
109
89
|
def train(x, y, epochs,
|
110
90
|
batch_size: 1,
|
111
91
|
test: nil,
|
112
|
-
verbose: true
|
113
|
-
|
114
|
-
|
115
|
-
before_train_on_batch_cbk: nil,
|
116
|
-
after_train_on_batch_cbk: nil,
|
117
|
-
before_test_on_batch_cbk: nil,
|
118
|
-
after_test_on_batch_cbk: nil)
|
119
|
-
raise DNN_Error.new("The model is not setup complete.") unless setup_completed?
|
92
|
+
verbose: true)
|
93
|
+
raise DNN_Error.new("The model is not optimizer setup complete.") unless @optimizer
|
94
|
+
raise DNN_Error.new("The model is not loss_func setup complete.") unless @loss_func
|
120
95
|
check_xy_type(x, y)
|
121
96
|
iter = Iterator.new(x, y)
|
122
|
-
num_train_datas = x.shape[0]
|
97
|
+
num_train_datas = x.is_a?(Array) ? x[0].shape[0] : x.shape[0]
|
123
98
|
(1..epochs).each do |epoch|
|
124
|
-
|
99
|
+
call_callbacks(:before_epoch, epoch)
|
125
100
|
puts "【 epoch #{epoch}/#{epochs} 】" if verbose
|
126
101
|
iter.foreach(batch_size) do |x_batch, y_batch, index|
|
127
|
-
loss_value = train_on_batch(x_batch, y_batch
|
128
|
-
after_train_on_batch_cbk: after_train_on_batch_cbk)
|
102
|
+
loss_value = train_on_batch(x_batch, y_batch)
|
129
103
|
if loss_value.is_a?(Xumo::SFloat)
|
130
104
|
loss_value = loss_value.mean
|
131
105
|
elsif loss_value.nan?
|
@@ -148,85 +122,82 @@ module DNN
|
|
148
122
|
print log if verbose
|
149
123
|
end
|
150
124
|
if test
|
151
|
-
acc, test_loss =
|
152
|
-
|
153
|
-
print " accurate: #{acc}, test loss: #{sprintf('%.8f', test_loss)}" if verbose
|
125
|
+
acc, test_loss = accuracy(test[0], test[1], batch_size: batch_size)
|
126
|
+
print " accuracy: #{acc}, test loss: #{sprintf('%.8f', test_loss)}" if verbose
|
154
127
|
end
|
155
128
|
puts "" if verbose
|
156
|
-
|
129
|
+
call_callbacks(:after_epoch, epoch)
|
157
130
|
end
|
158
131
|
end
|
159
132
|
|
133
|
+
alias fit train
|
134
|
+
|
160
135
|
# Training once.
|
161
136
|
# Setup the model before use this method.
|
162
137
|
# @param [Numo::SFloat] x Input training data.
|
163
138
|
# @param [Numo::SFloat] y Output training data.
|
164
|
-
# @param [Lambda] before_train_on_batch_cbk Set the proc to be performed before train on batch processing.
|
165
|
-
# @param [Lambda] after_train_on_batch_cbk Set the proc to be performed after train on batch processing.
|
166
139
|
# @return [Float | Numo::SFloat] Return loss value in the form of Float or Numo::SFloat.
|
167
|
-
def train_on_batch(x, y
|
168
|
-
raise DNN_Error.new("The model is not setup complete.") unless
|
140
|
+
def train_on_batch(x, y)
|
141
|
+
raise DNN_Error.new("The model is not optimizer setup complete.") unless @optimizer
|
142
|
+
raise DNN_Error.new("The model is not loss_func setup complete.") unless @loss_func
|
169
143
|
check_xy_type(x, y)
|
170
|
-
|
144
|
+
call_callbacks(:before_train_on_batch)
|
171
145
|
x = forward(x, true)
|
172
|
-
loss_value = @loss_func.
|
173
|
-
dy = @loss_func.backward(
|
146
|
+
loss_value = @loss_func.loss(x, y, layers)
|
147
|
+
dy = @loss_func.backward(x, y)
|
174
148
|
backward(dy)
|
175
149
|
@optimizer.update(layers.uniq)
|
176
|
-
|
150
|
+
@loss_func.regularizers_backward(layers)
|
151
|
+
call_callbacks(:after_train_on_batch, loss_value)
|
177
152
|
loss_value
|
178
153
|
end
|
179
154
|
|
180
|
-
# Evaluate model and get
|
155
|
+
# Evaluate model and get accuracy of test data.
|
181
156
|
# @param [Numo::SFloat] x Input test data.
|
182
157
|
# @param [Numo::SFloat] y Output test data.
|
183
|
-
# @
|
184
|
-
|
185
|
-
# @return [Array] Returns the test data accurate and mean loss in the form [accurate, mean_loss].
|
186
|
-
def accurate(x, y, batch_size: 100, before_test_on_batch_cbk: nil, after_test_on_batch_cbk: nil)
|
158
|
+
# @return [Array] Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].
|
159
|
+
def accuracy(x, y, batch_size: 100)
|
187
160
|
check_xy_type(x, y)
|
188
|
-
|
161
|
+
num_test_datas = x.is_a?(Array) ? x[0].shape[0] : x.shape[0]
|
162
|
+
batch_size = batch_size >= num_test_datas[0] ? num_test_datas : batch_size
|
189
163
|
iter = Iterator.new(x, y, random: false)
|
190
164
|
total_correct = 0
|
191
165
|
sum_loss = 0
|
192
|
-
max_steps = (
|
166
|
+
max_steps = (num_test_datas.to_f / batch_size).ceil
|
193
167
|
iter.foreach(batch_size) do |x_batch, y_batch|
|
194
|
-
correct, loss_value = test_on_batch(x_batch, y_batch
|
195
|
-
after_test_on_batch_cbk: after_test_on_batch_cbk)
|
168
|
+
correct, loss_value = test_on_batch(x_batch, y_batch)
|
196
169
|
total_correct += correct
|
197
170
|
sum_loss += loss_value.is_a?(Xumo::SFloat) ? loss_value.mean : loss_value
|
198
171
|
end
|
199
172
|
mean_loss = sum_loss / max_steps
|
200
|
-
[total_correct.to_f /
|
173
|
+
[total_correct.to_f / num_test_datas, mean_loss]
|
201
174
|
end
|
202
175
|
|
203
176
|
# Evaluate once.
|
204
177
|
# @param [Numo::SFloat] x Input test data.
|
205
178
|
# @param [Numo::SFloat] y Output test data.
|
206
|
-
# @
|
207
|
-
|
208
|
-
|
209
|
-
def test_on_batch(x, y, before_test_on_batch_cbk: nil, after_test_on_batch_cbk: nil)
|
210
|
-
before_test_on_batch_cbk&.call
|
179
|
+
# @return [Array] Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].
|
180
|
+
def test_on_batch(x, y)
|
181
|
+
call_callbacks(:before_test_on_batch)
|
211
182
|
x = forward(x, false)
|
212
183
|
correct = evaluate(x, y)
|
213
|
-
loss_value = @loss_func.
|
214
|
-
|
184
|
+
loss_value = @loss_func.loss(x, y, layers)
|
185
|
+
call_callbacks(:after_test_on_batch, loss_value)
|
215
186
|
[correct, loss_value]
|
216
187
|
end
|
217
188
|
|
218
189
|
private def evaluate(y, t)
|
219
|
-
|
220
|
-
|
221
|
-
|
190
|
+
if y.shape[1..-1] == [1]
|
191
|
+
correct = 0
|
192
|
+
y.shape[0].times do |i|
|
222
193
|
if @loss_func.is_a?(Losses::SigmoidCrossEntropy)
|
223
194
|
correct += 1 if (y[i, 0] < 0 && t[i, 0] < 0.5) || (y[i, 0] >= 0 && t[i, 0] >= 0.5)
|
224
195
|
else
|
225
196
|
correct += 1 if (y[i, 0] < 0 && t[i, 0] < 0) || (y[i, 0] >= 0 && t[i, 0] >= 0)
|
226
197
|
end
|
227
|
-
else
|
228
|
-
correct += 1 if y[i, true].max_index == t[i, true].max_index
|
229
198
|
end
|
199
|
+
else
|
200
|
+
correct = y.max_index(axis: 1).eq(t.max_index(axis: 1)).count
|
230
201
|
end
|
231
202
|
correct
|
232
203
|
end
|
@@ -245,17 +216,37 @@ module DNN
|
|
245
216
|
predict(x.reshape(1, *x.shape))[0, false]
|
246
217
|
end
|
247
218
|
|
219
|
+
# Add callback function.
|
220
|
+
# @param [Symbol] event Callback event. The following can be used for event.
|
221
|
+
# before_epoch: Process: performed before one training.
|
222
|
+
# after_epoch: Process: performed after one training.
|
223
|
+
# before_train_on_batch: Set the proc to be performed before train on batch processing.
|
224
|
+
# after_train_on_batch: Set the proc to be performed after train on batch processing.
|
225
|
+
# before_test_on_batch: Set the proc to be performed before test on batch processing.
|
226
|
+
# after_test_on_batch: Set the proc to be performed after test on batch processing.
|
227
|
+
def add_callback(event, callback)
|
228
|
+
raise DNN_UnknownEventError.new("Unknown event #{event}.") unless @callbacks.has_key?(event)
|
229
|
+
@callbacks[event] << callback
|
230
|
+
end
|
231
|
+
|
232
|
+
# Clear the callback function registered for each event.
|
233
|
+
# @param [Symbol] event Callback event. The following can be used for event.
|
234
|
+
# before_epoch: Process: performed before one training.
|
235
|
+
# after_epoch: Process: performed after one training.
|
236
|
+
# before_train_on_batch: Set the proc to be performed before train on batch processing.
|
237
|
+
# after_train_on_batch: Set the proc to be performed after train on batch processing.
|
238
|
+
# before_test_on_batch: Set the proc to be performed before test on batch processing.
|
239
|
+
# after_test_on_batch: Set the proc to be performed after test on batch processing.
|
240
|
+
def clear_callbacks(event)
|
241
|
+
raise DNN_UnknownEventError.new("Unknown event #{event}.") unless @callbacks.has_key?(event)
|
242
|
+
@callbacks[event] = []
|
243
|
+
end
|
244
|
+
|
248
245
|
# Save the model in marshal format.
|
249
246
|
# @param [String] file_name Name to save model.
|
250
247
|
def save(file_name)
|
251
|
-
|
252
|
-
|
253
|
-
File.binwrite(file_name, bin)
|
254
|
-
rescue Errno::ENOENT
|
255
|
-
dir_name = file_name.match(%r`(.*)/.+$`)[1]
|
256
|
-
Dir.mkdir(dir_name)
|
257
|
-
File.binwrite(file_name, bin)
|
258
|
-
end
|
248
|
+
saver = Savers::MarshalSaver.new(self)
|
249
|
+
saver.save(file_name)
|
259
250
|
end
|
260
251
|
|
261
252
|
# @return [DNN::Models::Model] Return the copy this model.
|
@@ -267,6 +258,7 @@ module DNN
|
|
267
258
|
# @return [Array] All layers array.
|
268
259
|
def layers
|
269
260
|
raise DNN_Error.new("This model is not built. You need build this model using predict or train.") unless built?
|
261
|
+
return @layers_cache if @layers_cache
|
270
262
|
layers = []
|
271
263
|
get_layers = -> link do
|
272
264
|
return unless link
|
@@ -279,7 +271,7 @@ module DNN
|
|
279
271
|
end
|
280
272
|
end
|
281
273
|
get_layers.(@last_link)
|
282
|
-
layers
|
274
|
+
@layers_cache = layers
|
283
275
|
end
|
284
276
|
|
285
277
|
# Get the all has param layers.
|
@@ -289,38 +281,10 @@ module DNN
|
|
289
281
|
end
|
290
282
|
|
291
283
|
# Get the layer that the model has.
|
292
|
-
# @
|
293
|
-
#
|
294
|
-
|
295
|
-
|
296
|
-
# @param [Integer] The index of the layer to get.
|
297
|
-
# @param [Class] The class of the layer to get.
|
298
|
-
# @return [DNN::Layers::Layer] Return the layer.
|
299
|
-
def get_layer(*args)
|
300
|
-
if args.length == 1
|
301
|
-
index = args[0]
|
302
|
-
layers[index]
|
303
|
-
else
|
304
|
-
layer_class, index = args
|
305
|
-
layers.select { |layer| layer.is_a?(layer_class) }[index]
|
306
|
-
end
|
307
|
-
end
|
308
|
-
|
309
|
-
# @return [DNN::Optimizers::Optimizer] optimizer Return the optimizer to use for learning.
|
310
|
-
def optimizer
|
311
|
-
raise DNN_Error.new("The model is not setup complete.") unless setup_completed?
|
312
|
-
@optimizer
|
313
|
-
end
|
314
|
-
|
315
|
-
# @return [DNN::Losses::Loss] loss_func Return the loss function to use for learning.
|
316
|
-
def loss_func
|
317
|
-
raise DNN_Error.new("The model is not setup complete.") unless setup_completed?
|
318
|
-
@loss_func
|
319
|
-
end
|
320
|
-
|
321
|
-
# @return [Boolean] If model have already been setup completed then return true.
|
322
|
-
def setup_completed?
|
323
|
-
@setup_completed
|
284
|
+
# @param [Symbol] The name of the layer to get.
|
285
|
+
# @return [DNN::Layers::Layer] Return the layer.
|
286
|
+
def get_layer(name)
|
287
|
+
layers.find { |layer| layer.name == name }
|
324
288
|
end
|
325
289
|
|
326
290
|
# @return [Boolean] If model have already been built then return true.
|
@@ -331,31 +295,45 @@ module DNN
|
|
331
295
|
private
|
332
296
|
|
333
297
|
def forward(x, learning_phase)
|
334
|
-
|
335
|
-
|
298
|
+
DNN.learning_phase = learning_phase
|
299
|
+
@layers_cache = nil
|
300
|
+
y, @last_link = call(x)
|
301
|
+
unless @built
|
302
|
+
@built = true
|
303
|
+
naming
|
304
|
+
end
|
336
305
|
y
|
337
306
|
end
|
338
307
|
|
339
308
|
def backward(dy)
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
309
|
+
@last_link.backward(dy)
|
310
|
+
end
|
311
|
+
|
312
|
+
def call_callbacks(event, *args)
|
313
|
+
@callbacks[event].each do |callback|
|
314
|
+
callback.call(*args)
|
315
|
+
end
|
316
|
+
end
|
317
|
+
|
318
|
+
def naming
|
319
|
+
layers.uniq.each do |layer|
|
320
|
+
id = layers.uniq.select { |l| l.is_a?(layer.class) }.index(layer)
|
321
|
+
class_name = layer.class.name.split("::").last
|
322
|
+
layer.name = "#{class_name}_#{id}".to_sym unless layer.name
|
323
|
+
if layer.is_a?(Layers::HasParamLayer)
|
324
|
+
layer.get_params.each do |param_key, param|
|
325
|
+
param.name = "#{layer.name}__#{param_key}".to_sym unless param.name
|
326
|
+
end
|
348
327
|
end
|
349
328
|
end
|
350
|
-
bwd.(@last_link, dy)
|
351
329
|
end
|
352
330
|
|
353
331
|
def check_xy_type(x, y = nil)
|
354
|
-
|
355
|
-
raise TypeError.new("x:#{x.class.name} is not an instance of #{Xumo::SFloat.name} class.")
|
332
|
+
if !x.is_a?(Xumo::SFloat) && !x.is_a?(Array)
|
333
|
+
raise TypeError.new("x:#{x.class.name} is not an instance of #{Xumo::SFloat.name} class or Array class.")
|
356
334
|
end
|
357
|
-
if y && !y.is_a?(Xumo::SFloat)
|
358
|
-
raise TypeError.new("y:#{y.class.name} is not an instance of #{Xumo::SFloat.name} class.")
|
335
|
+
if y && !y.is_a?(Xumo::SFloat) && !x.is_a?(Array)
|
336
|
+
raise TypeError.new("y:#{y.class.name} is not an instance of #{Xumo::SFloat.name} class or Array class.")
|
359
337
|
end
|
360
338
|
end
|
361
339
|
end
|
@@ -373,7 +351,7 @@ module DNN
|
|
373
351
|
# Add layer to the model.
|
374
352
|
# @param [DNN::Layers::Layer] layer Layer to add to the model.
|
375
353
|
# @return [DNN::Models::Model] Return self.
|
376
|
-
def
|
354
|
+
def add(layer)
|
377
355
|
unless layer.is_a?(Layers::Layer) || layer.is_a?(Model)
|
378
356
|
raise TypeError.new("layer: #{layer.class.name} is not an instance of the DNN::Layers::Layer class or DNN::Models::Model class.")
|
379
357
|
end
|
@@ -381,6 +359,18 @@ module DNN
|
|
381
359
|
self
|
382
360
|
end
|
383
361
|
|
362
|
+
alias << add
|
363
|
+
|
364
|
+
# Remove layer to the model.
|
365
|
+
# @param [DNN::Layers::Layer] layer Layer to remove to the model.
|
366
|
+
# @return [Boolean] Return true if success for remove layer.
|
367
|
+
def remove(layer)
|
368
|
+
unless layer.is_a?(Layers::Layer) || layer.is_a?(Model)
|
369
|
+
raise TypeError.new("layer: #{layer.class.name} is not an instance of the DNN::Layers::Layer class or DNN::Models::Model class.")
|
370
|
+
end
|
371
|
+
@stack.delete(layer) ? true : false
|
372
|
+
end
|
373
|
+
|
384
374
|
def call(x)
|
385
375
|
@stack.each do |layer|
|
386
376
|
x = layer.(x)
|
@@ -24,14 +24,6 @@ module DNN
|
|
24
24
|
@eps = eps
|
25
25
|
end
|
26
26
|
|
27
|
-
def call(input)
|
28
|
-
x, prev_link, learning_phase = *input
|
29
|
-
build(x.shape[1..-1]) unless built?
|
30
|
-
y = forward(x, learning_phase)
|
31
|
-
link = Link.new(prev_link, self)
|
32
|
-
[y, link, learning_phase]
|
33
|
-
end
|
34
|
-
|
35
27
|
def build(input_shape)
|
36
28
|
super
|
37
29
|
@gamma = Param.new(Xumo::SFloat.ones(*output_shape), 0)
|
@@ -40,8 +32,8 @@ module DNN
|
|
40
32
|
@running_var = Param.new(Xumo::SFloat.zeros(*output_shape))
|
41
33
|
end
|
42
34
|
|
43
|
-
def forward(x
|
44
|
-
if learning_phase
|
35
|
+
def forward(x)
|
36
|
+
if DNN.learning_phase
|
45
37
|
mean = x.mean(axis: @axis, keepdims: true)
|
46
38
|
@xc = x - mean
|
47
39
|
var = (@xc ** 2).mean(axis: @axis, keepdims: true)
|