ruby-dnn 0.13.2 → 0.13.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Rakefile +11 -3
- data/lib/dnn/core/embedding.rb +1 -1
- data/lib/dnn/core/layers.rb +2 -2
- data/lib/dnn/core/losses.rb +1 -1
- data/lib/dnn/core/models.rb +18 -11
- data/lib/dnn/core/normalizations.rb +2 -2
- data/lib/dnn/core/optimizers.rb +2 -2
- data/lib/dnn/core/rnn_layers.rb +1 -1
- data/lib/dnn/core/savers.rb +2 -2
- data/lib/dnn/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: cd3d1988c50c875fa619ab6c525fd604c8f264731c965f31725dd80ca8b17d72
|
4
|
+
data.tar.gz: 7149096f7222c5a4d42bfa69121a99c5df97ca054ae90de7f7c7fc9a294e7c53
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4ba83b8274994a9b90be17231791d1935de015dc2963ec4cc47f98f12782c8f4ac342aa220492c4e43d4a5d7780dae2011ba1b6fc967a23538b22318890bbb5a
|
7
|
+
data.tar.gz: 5374ea0e296ba510ce13d602daa4ffbc31757800ba9a83003f4795f195e4917511935d1bd7279ba8ce7204e78e73c7d2f918789452f329177fbd80eee981df3c
|
data/Rakefile
CHANGED
@@ -8,15 +8,23 @@ Rake::TestTask.new(:test) do |t|
|
|
8
8
|
t.test_files = FileList["test/*_test.rb"]
|
9
9
|
end
|
10
10
|
|
11
|
-
task :
|
11
|
+
task :build_cifar_loader do
|
12
12
|
sh "cd ext/cifar_loader; ruby extconf.rb; make"
|
13
13
|
end
|
14
14
|
|
15
|
-
task :
|
15
|
+
task :build_rb_stb_image do
|
16
16
|
sh "cd ext/rb_stb_image; ruby extconf.rb; make"
|
17
17
|
end
|
18
18
|
|
19
|
-
task :
|
19
|
+
task :clean_cifar_loader do
|
20
|
+
sh "cd ext/cifar_loader; make clean; unlink Makefile"
|
21
|
+
end
|
22
|
+
|
23
|
+
task :clean_rb_stb_image do
|
24
|
+
sh "cd ext/rb_stb_image; make clean; unlink Makefile"
|
25
|
+
end
|
26
|
+
|
27
|
+
task :default => [:test, :build_cifar_loader, :build_rb_stb_image]
|
20
28
|
|
21
29
|
task :doc do
|
22
30
|
src_list = Dir["lib/dnn/core/*.rb"]
|
data/lib/dnn/core/embedding.rb
CHANGED
@@ -28,7 +28,7 @@ module DNN
|
|
28
28
|
|
29
29
|
def build
|
30
30
|
@built = true
|
31
|
-
@weight = Param.new(Xumo::SFloat.new(@input_length), 0)
|
31
|
+
@weight = Param.new(Xumo::SFloat.new(@input_length), Xumo::SFloat[0])
|
32
32
|
@weight_initializer.init_param(self, @weight)
|
33
33
|
@weight_regularizer.param = @weight if @weight_regularizer
|
34
34
|
end
|
data/lib/dnn/core/layers.rb
CHANGED
@@ -168,8 +168,8 @@ module DNN
|
|
168
168
|
@bias_initializer = bias_initializer
|
169
169
|
@weight_regularizer = weight_regularizer
|
170
170
|
@bias_regularizer = bias_regularizer
|
171
|
-
@weight = Param.new(nil, 0)
|
172
|
-
@bias = use_bias ? Param.new(nil, 0) : nil
|
171
|
+
@weight = Param.new(nil, Xumo::SFloat[0])
|
172
|
+
@bias = use_bias ? Param.new(nil, Xumo::SFloat[0]) : nil
|
173
173
|
end
|
174
174
|
|
175
175
|
def regularizers
|
data/lib/dnn/core/losses.rb
CHANGED
data/lib/dnn/core/models.rb
CHANGED
@@ -82,12 +82,14 @@ module DNN
|
|
82
82
|
# @param [Numo::SFloat] x Input training data.
|
83
83
|
# @param [Numo::SFloat] y Output training data.
|
84
84
|
# @param [Integer] epochs Number of training.
|
85
|
+
# @param [Integer] initial_epoch Initial epoch.
|
85
86
|
# @param [Integer] batch_size Batch size used for one training.
|
86
87
|
# @param [Array | NilClass] test If you to test the model for every 1 epoch,
|
87
88
|
# specify [x_test, y_test]. Don't test to the model, specify nil.
|
88
89
|
# @param [Boolean] verbose Set true to display the log. If false is set, the log is not displayed.
|
89
90
|
def train(x, y, epochs,
|
90
91
|
batch_size: 1,
|
92
|
+
initial_epoch: 1,
|
91
93
|
test: nil,
|
92
94
|
verbose: true)
|
93
95
|
raise DNN_Error.new("The model is not optimizer setup complete.") unless @optimizer
|
@@ -95,7 +97,7 @@ module DNN
|
|
95
97
|
check_xy_type(x, y)
|
96
98
|
iter = Iterator.new(x, y)
|
97
99
|
num_train_datas = x.is_a?(Array) ? x[0].shape[0] : x.shape[0]
|
98
|
-
(
|
100
|
+
(initial_epoch..epochs).each do |epoch|
|
99
101
|
call_callbacks(:before_epoch, epoch)
|
100
102
|
puts "【 epoch #{epoch}/#{epochs} 】" if verbose
|
101
103
|
iter.foreach(batch_size) do |x_batch, y_batch, index|
|
@@ -136,6 +138,7 @@ module DNN
|
|
136
138
|
# Setup the model before use this method.
|
137
139
|
# @param [Numo::SFloat] x Input training data.
|
138
140
|
# @param [Numo::SFloat] y Output training data.
|
141
|
+
# @param [Integer] batch_size Batch size used for one test.
|
139
142
|
# @return [Float | Numo::SFloat] Return loss value in the form of Float or Numo::SFloat.
|
140
143
|
def train_on_batch(x, y)
|
141
144
|
raise DNN_Error.new("The model is not optimizer setup complete.") unless @optimizer
|
@@ -146,7 +149,7 @@ module DNN
|
|
146
149
|
loss_value = @loss_func.loss(x, y, layers)
|
147
150
|
dy = @loss_func.backward(x, y)
|
148
151
|
backward(dy)
|
149
|
-
@optimizer.update(layers
|
152
|
+
@optimizer.update(layers)
|
150
153
|
@loss_func.regularizers_backward(layers)
|
151
154
|
call_callbacks(:after_train_on_batch, loss_value)
|
152
155
|
loss_value
|
@@ -155,6 +158,7 @@ module DNN
|
|
155
158
|
# Evaluate model and get accuracy of test data.
|
156
159
|
# @param [Numo::SFloat] x Input test data.
|
157
160
|
# @param [Numo::SFloat] y Output test data.
|
161
|
+
# @param [Integer] batch_size Batch size used for one test.
|
158
162
|
# @return [Array] Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].
|
159
163
|
def accuracy(x, y, batch_size: 100)
|
160
164
|
check_xy_type(x, y)
|
@@ -186,18 +190,21 @@ module DNN
|
|
186
190
|
[correct, loss_value]
|
187
191
|
end
|
188
192
|
|
189
|
-
|
190
|
-
|
193
|
+
# Implement the process to evaluate this model.
|
194
|
+
# @param [Numo::SFloat] x Input test data.
|
195
|
+
# @param [Numo::SFloat] y Output test data.
|
196
|
+
private def evaluate(x, y)
|
197
|
+
if x.shape[1..-1] == [1]
|
191
198
|
correct = 0
|
192
|
-
|
199
|
+
x.shape[0].times do |i|
|
193
200
|
if @loss_func.is_a?(Losses::SigmoidCrossEntropy)
|
194
|
-
correct += 1 if (
|
201
|
+
correct += 1 if (x[i, 0] < 0 && y[i, 0] < 0.5) || (x[i, 0] >= 0 && y[i, 0] >= 0.5)
|
195
202
|
else
|
196
|
-
correct += 1 if (
|
203
|
+
correct += 1 if (x[i, 0] < 0 && y[i, 0] < 0) || (x[i, 0] >= 0 && y[i, 0] >= 0)
|
197
204
|
end
|
198
205
|
end
|
199
206
|
else
|
200
|
-
correct =
|
207
|
+
correct = x.max_index(axis: 1).eq(y.max_index(axis: 1)).count
|
201
208
|
end
|
202
209
|
correct
|
203
210
|
end
|
@@ -271,7 +278,7 @@ module DNN
|
|
271
278
|
end
|
272
279
|
end
|
273
280
|
get_layers.(@last_link)
|
274
|
-
@layers_cache = layers
|
281
|
+
@layers_cache = layers.uniq
|
275
282
|
end
|
276
283
|
|
277
284
|
# Get the all has param layers.
|
@@ -316,8 +323,8 @@ module DNN
|
|
316
323
|
end
|
317
324
|
|
318
325
|
def naming
|
319
|
-
layers.
|
320
|
-
id = layers.
|
326
|
+
layers.each do |layer|
|
327
|
+
id = layers.select { |l| l.is_a?(layer.class) }.index(layer)
|
321
328
|
class_name = layer.class.name.split("::").last
|
322
329
|
layer.name = "#{class_name}_#{id}".to_sym unless layer.name
|
323
330
|
if layer.is_a?(Layers::HasParamLayer)
|
@@ -22,8 +22,8 @@ module DNN
|
|
22
22
|
|
23
23
|
def build(input_shape)
|
24
24
|
super
|
25
|
-
@gamma = Param.new(Xumo::SFloat.ones(*output_shape), 0)
|
26
|
-
@beta = Param.new(Xumo::SFloat.zeros(*output_shape), 0)
|
25
|
+
@gamma = Param.new(Xumo::SFloat.ones(*output_shape), Xumo::SFloat[0])
|
26
|
+
@beta = Param.new(Xumo::SFloat.zeros(*output_shape), Xumo::SFloat[0])
|
27
27
|
@running_mean = Param.new(Xumo::SFloat.zeros(*output_shape))
|
28
28
|
@running_var = Param.new(Xumo::SFloat.zeros(*output_shape))
|
29
29
|
end
|
data/lib/dnn/core/optimizers.rb
CHANGED
@@ -38,7 +38,7 @@ module DNN
|
|
38
38
|
clip_grads(target_params) if @clip_norm
|
39
39
|
update_params(target_params)
|
40
40
|
target_params.each do |param|
|
41
|
-
param.grad = Xumo::SFloat
|
41
|
+
param.grad = Xumo::SFloat[0]
|
42
42
|
end
|
43
43
|
end
|
44
44
|
|
@@ -58,7 +58,7 @@ module DNN
|
|
58
58
|
end
|
59
59
|
|
60
60
|
private def clip_grads(params)
|
61
|
-
norm = Math.sqrt(params.reduce(0) { |
|
61
|
+
norm = Math.sqrt(params.reduce(0) { |total, param| total + (param.grad ** 2).sum })
|
62
62
|
return if norm <= @clip_norm
|
63
63
|
rate = @clip_norm / (norm + 1e-7)
|
64
64
|
params.each do |param|
|
data/lib/dnn/core/rnn_layers.rb
CHANGED
@@ -33,7 +33,7 @@ module DNN
|
|
33
33
|
@return_sequences = return_sequences
|
34
34
|
@layers = []
|
35
35
|
@hidden = Param.new
|
36
|
-
@recurrent_weight = Param.new(nil, 0)
|
36
|
+
@recurrent_weight = Param.new(nil, Xumo::SFloat[0])
|
37
37
|
@recurrent_weight_initializer = recurrent_weight_initializer
|
38
38
|
@recurrent_weight_regularizer = recurrent_weight_regularizer
|
39
39
|
end
|
data/lib/dnn/core/savers.rb
CHANGED
@@ -21,7 +21,7 @@ module DNN
|
|
21
21
|
end
|
22
22
|
|
23
23
|
def set_all_params_data(params_data)
|
24
|
-
all_params = @model.has_param_layers.
|
24
|
+
all_params = @model.has_param_layers.map { |layer|
|
25
25
|
layer.get_params.values
|
26
26
|
}.flatten
|
27
27
|
all_params.each do |param|
|
@@ -91,7 +91,7 @@ module DNN
|
|
91
91
|
end
|
92
92
|
|
93
93
|
def get_all_params_data
|
94
|
-
all_params = @model.has_param_layers.
|
94
|
+
all_params = @model.has_param_layers.map { |layer|
|
95
95
|
layer.get_params.values
|
96
96
|
}.flatten
|
97
97
|
all_params.to_h { |param| [param.name, param.data] }
|
data/lib/dnn/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-dnn
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.13.
|
4
|
+
version: 0.13.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- unagiootoro
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2019-
|
11
|
+
date: 2019-10-10 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: numo-narray
|