ruby-dnn 1.1.6 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +0 -0
- data/.travis.yml +0 -0
- data/CODE_OF_CONDUCT.md +0 -0
- data/Gemfile +0 -0
- data/LICENSE.txt +0 -0
- data/README.md +6 -2
- data/Rakefile +0 -0
- data/bin/console +0 -0
- data/bin/setup +0 -0
- data/examples/api-examples/early_stopping_example.rb +0 -0
- data/examples/api-examples/initializer_example.rb +0 -0
- data/examples/api-examples/regularizer_example.rb +0 -0
- data/examples/api-examples/save_example.rb +0 -0
- data/examples/cifar100_example.rb +0 -0
- data/examples/cifar10_example.rb +0 -0
- data/examples/dcgan/dcgan.rb +0 -0
- data/examples/dcgan/imgen.rb +0 -0
- data/examples/dcgan/train.rb +0 -0
- data/examples/iris_example.rb +0 -0
- data/examples/judge-number/README.md +0 -0
- data/examples/judge-number/capture.PNG +0 -0
- data/examples/judge-number/convnet8.rb +0 -0
- data/examples/judge-number/make_weights.rb +0 -0
- data/examples/judge-number/mnist_predict.rb +0 -0
- data/examples/judge-number/mnist_train.rb +0 -0
- data/examples/judge-number/public/httpRequest.js +0 -0
- data/examples/judge-number/public/judgeNumber.js +0 -0
- data/examples/judge-number/server.rb +0 -0
- data/examples/judge-number/trained_mnist_params.marshal +0 -0
- data/examples/judge-number/views/index.erb +0 -0
- data/examples/mnist_conv2d_example.rb +3 -3
- data/examples/mnist_define_by_run.rb +0 -0
- data/examples/mnist_example.rb +0 -0
- data/examples/mnist_gpu.rb +47 -0
- data/examples/mnist_lstm_example.rb +1 -1
- data/examples/pix2pix/dcgan.rb +0 -0
- data/examples/pix2pix/imgen.rb +0 -0
- data/examples/pix2pix/train.rb +0 -0
- data/examples/vae.rb +1 -1
- data/examples/xor_example.rb +0 -0
- data/ext/rb_stb_image/extconf.rb +0 -0
- data/ext/rb_stb_image/rb_stb_image.c +0 -0
- data/lib/dnn.rb +24 -3
- data/lib/dnn/core/callbacks.rb +6 -4
- data/lib/dnn/core/error.rb +0 -0
- data/lib/dnn/core/global.rb +0 -0
- data/lib/dnn/core/initializers.rb +0 -0
- data/lib/dnn/core/iterator.rb +0 -0
- data/lib/dnn/core/layers/activations.rb +0 -0
- data/lib/dnn/core/layers/basic_layers.rb +37 -18
- data/lib/dnn/core/layers/cnn_layers.rb +33 -5
- data/lib/dnn/core/layers/embedding.rb +0 -0
- data/lib/dnn/core/layers/math_layers.rb +5 -5
- data/lib/dnn/core/layers/merge_layers.rb +2 -26
- data/lib/dnn/core/layers/normalizations.rb +0 -0
- data/lib/dnn/core/layers/rnn_layers.rb +0 -0
- data/lib/dnn/core/layers/split_layers.rb +39 -0
- data/lib/dnn/core/link.rb +14 -33
- data/lib/dnn/core/losses.rb +6 -6
- data/lib/dnn/core/models.rb +58 -6
- data/lib/dnn/core/monkey_patch.rb +0 -0
- data/lib/dnn/core/optimizers.rb +8 -1
- data/lib/dnn/core/param.rb +0 -0
- data/lib/dnn/core/regularizers.rb +0 -0
- data/lib/dnn/core/savers.rb +0 -0
- data/lib/dnn/core/tensor.rb +0 -0
- data/lib/dnn/core/utils.rb +23 -0
- data/lib/dnn/datasets/cifar10.rb +0 -0
- data/lib/dnn/datasets/cifar100.rb +0 -0
- data/lib/dnn/datasets/downloader.rb +0 -0
- data/lib/dnn/datasets/fashion-mnist.rb +0 -0
- data/lib/dnn/datasets/iris.rb +0 -0
- data/lib/dnn/datasets/mnist.rb +0 -0
- data/lib/dnn/datasets/stl-10.rb +0 -0
- data/lib/dnn/image.rb +0 -0
- data/lib/dnn/keras-model-convertor.rb +0 -0
- data/lib/dnn/numo2numpy.rb +0 -0
- data/lib/dnn/version.rb +1 -1
- data/ruby-dnn.gemspec +0 -0
- data/third_party/stb_image.h +0 -0
- data/third_party/stb_image_resize.h +0 -0
- data/third_party/stb_image_write.h +0 -0
- metadata +4 -2
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: d152e645981ddb8f244abc88719c4c68a154679fa18abf6b02fa8852510e0c27
|
|
4
|
+
data.tar.gz: 2987a4ce59a8f7d60a160ac192125ab97a4e14735ae29e151d23412a22f3c13b
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 13e9cb2995d1d2851850931a0aedb01ca4fb71b6b03fa81a0ac9e78176b8ed4f520ac6284a39e9c71aaf2629c55475eac33987e6f49535bfe27153729f37624c
|
|
7
|
+
data.tar.gz: a492f778d4094fce114149841a54e2b0d8a4eff77778b8074198c8df009916598cb96bfd67494c88520e10fd99618591862854773a1f20baf00247fe2bb1f1ea
|
data/.gitignore
CHANGED
|
File without changes
|
data/.travis.yml
CHANGED
|
File without changes
|
data/CODE_OF_CONDUCT.md
CHANGED
|
File without changes
|
data/Gemfile
CHANGED
|
File without changes
|
data/LICENSE.txt
CHANGED
|
File without changes
|
data/README.md
CHANGED
|
@@ -95,7 +95,7 @@ If you want to know more detailed information, please refer to the source code.
|
|
|
95
95
|
| Losses | MeanSquaredError, MeanAbsoluteError, Hinge, HuberLoss, SoftmaxCrossEntropy, SigmoidCrossEntropy |
|
|
96
96
|
|
|
97
97
|
## Datasets
|
|
98
|
-
By setting the environment variable
|
|
98
|
+
By setting the environment variable 'RUBY_DNN_DOWNLOADS_PATH', you can specify the path to read dataset.
|
|
99
99
|
|
|
100
100
|
● Iris
|
|
101
101
|
● MNIST
|
|
@@ -109,10 +109,14 @@ By setting the environment variable "RUBY_DNN_DOWNLOADS_PATH", you can specify t
|
|
|
109
109
|
● DCGAN
|
|
110
110
|
● Pix2pix
|
|
111
111
|
|
|
112
|
+
## Use GPU
|
|
113
|
+
If you do 'require "cumo/narray"' before 'require "dnn"', you can run it on GPU.
|
|
114
|
+
Or, set the environment variable 'RUBY_DNN_USE_CUMO 'to'ENABLE' to force the GPU to be used.
|
|
115
|
+
|
|
112
116
|
## TODO
|
|
113
117
|
● Write a test.
|
|
114
118
|
● Write a document.
|
|
115
|
-
●
|
|
119
|
+
● Improve performance when using GPU.
|
|
116
120
|
|
|
117
121
|
## Development
|
|
118
122
|
|
data/Rakefile
CHANGED
|
File without changes
|
data/bin/console
CHANGED
|
File without changes
|
data/bin/setup
CHANGED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
data/examples/cifar10_example.rb
CHANGED
|
File without changes
|
data/examples/dcgan/dcgan.rb
CHANGED
|
File without changes
|
data/examples/dcgan/imgen.rb
CHANGED
|
File without changes
|
data/examples/dcgan/train.rb
CHANGED
|
File without changes
|
data/examples/iris_example.rb
CHANGED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -21,13 +21,13 @@ model = Sequential.new
|
|
|
21
21
|
|
|
22
22
|
model << InputLayer.new([28, 28, 1])
|
|
23
23
|
|
|
24
|
-
model << Conv2D.new(16,
|
|
24
|
+
model << Conv2D.new(16, 3)
|
|
25
25
|
model << BatchNormalization.new
|
|
26
26
|
model << ReLU.new
|
|
27
27
|
|
|
28
28
|
model << MaxPool2D.new(2)
|
|
29
29
|
|
|
30
|
-
model << Conv2D.new(32,
|
|
30
|
+
model << Conv2D.new(32, 3)
|
|
31
31
|
model << BatchNormalization.new
|
|
32
32
|
model << ReLU.new
|
|
33
33
|
|
|
@@ -42,7 +42,7 @@ model << Dense.new(10)
|
|
|
42
42
|
|
|
43
43
|
model.setup(Adam.new, SoftmaxCrossEntropy.new)
|
|
44
44
|
|
|
45
|
-
model.train(x_train, y_train, 10, batch_size:
|
|
45
|
+
model.train(x_train, y_train, 10, batch_size: 128, test: [x_test, y_test])
|
|
46
46
|
|
|
47
47
|
accuracy, loss = model.evaluate(x_test, y_test)
|
|
48
48
|
puts "accuracy: #{accuracy}"
|
|
File without changes
|
data/examples/mnist_example.rb
CHANGED
|
File without changes
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
require "cumo/narray"
|
|
2
|
+
require "dnn"
|
|
3
|
+
require "dnn/datasets/mnist"
|
|
4
|
+
|
|
5
|
+
include DNN::Models
|
|
6
|
+
include DNN::Layers
|
|
7
|
+
include DNN::Optimizers
|
|
8
|
+
include DNN::Losses
|
|
9
|
+
|
|
10
|
+
x_train, y_train = DNN::MNIST.load_train
|
|
11
|
+
x_test, y_test = DNN::MNIST.load_test
|
|
12
|
+
|
|
13
|
+
x_train = x_train.reshape(x_train.shape[0], 784)
|
|
14
|
+
x_test = x_test.reshape(x_test.shape[0], 784)
|
|
15
|
+
|
|
16
|
+
x_train = Numo::SFloat.cast(x_train) / 255
|
|
17
|
+
x_test = Numo::SFloat.cast(x_test) / 255
|
|
18
|
+
|
|
19
|
+
y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
|
|
20
|
+
y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
|
|
21
|
+
|
|
22
|
+
if DNN.use_cumo?
|
|
23
|
+
x_train = DNN::Utils.numo2cumo(x_train)
|
|
24
|
+
y_train = DNN::Utils.numo2cumo(y_train)
|
|
25
|
+
x_test = DNN::Utils.numo2cumo(x_test)
|
|
26
|
+
y_test = DNN::Utils.numo2cumo(y_test)
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
model = Sequential.new
|
|
30
|
+
|
|
31
|
+
model << InputLayer.new(784)
|
|
32
|
+
|
|
33
|
+
model << Dense.new(256)
|
|
34
|
+
model << ReLU.new
|
|
35
|
+
|
|
36
|
+
model << Dense.new(256)
|
|
37
|
+
model << ReLU.new
|
|
38
|
+
|
|
39
|
+
model << Dense.new(10)
|
|
40
|
+
|
|
41
|
+
model.setup(Adam.new, SoftmaxCrossEntropy.new)
|
|
42
|
+
|
|
43
|
+
model.train(x_train, y_train, 10, batch_size: 128, test: [x_test, y_test])
|
|
44
|
+
|
|
45
|
+
accuracy, loss = model.evaluate(x_test, y_test)
|
|
46
|
+
puts "accuracy: #{accuracy}"
|
|
47
|
+
puts "loss: #{loss}"
|
|
@@ -31,7 +31,7 @@ model << Dense.new(10)
|
|
|
31
31
|
|
|
32
32
|
model.setup(Adam.new, SoftmaxCrossEntropy.new)
|
|
33
33
|
|
|
34
|
-
model.train(x_train, y_train, 10, batch_size:
|
|
34
|
+
model.train(x_train, y_train, 10, batch_size: 128, test: [x_test, y_test])
|
|
35
35
|
|
|
36
36
|
accuracy, loss = model.evaluate(x_test, y_test)
|
|
37
37
|
puts "accuracy: #{accuracy}"
|
data/examples/pix2pix/dcgan.rb
CHANGED
|
File without changes
|
data/examples/pix2pix/imgen.rb
CHANGED
|
File without changes
|
data/examples/pix2pix/train.rb
CHANGED
|
File without changes
|
data/examples/vae.rb
CHANGED
data/examples/xor_example.rb
CHANGED
|
File without changes
|
data/ext/rb_stb_image/extconf.rb
CHANGED
|
File without changes
|
|
File without changes
|
data/lib/dnn.rb
CHANGED
|
@@ -1,9 +1,29 @@
|
|
|
1
|
+
require "numo/narray"
|
|
2
|
+
|
|
1
3
|
module DNN
|
|
2
|
-
if
|
|
4
|
+
if ENV["RUBY_DNN_USE_CUMO"] == "ENABLE"
|
|
5
|
+
require "cumo/narray"
|
|
3
6
|
Xumo = ::Cumo
|
|
4
7
|
else
|
|
5
|
-
|
|
6
|
-
|
|
8
|
+
if defined? ::Cumo
|
|
9
|
+
Xumo = ::Cumo
|
|
10
|
+
else
|
|
11
|
+
Xumo = ::Numo
|
|
12
|
+
end
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
def self.use_cumo?
|
|
16
|
+
defined? ::Cumo
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def self.cudnn_available?
|
|
20
|
+
return false unless defined? ::Cumo
|
|
21
|
+
Cumo::CUDA::CUDNN.available?
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def self.use_cudnn?
|
|
25
|
+
return false unless ENV["RUBY_DNN_USE_CUDNN"] == "ENABLE"
|
|
26
|
+
cudnn_available?
|
|
7
27
|
end
|
|
8
28
|
end
|
|
9
29
|
|
|
@@ -20,6 +40,7 @@ require_relative "dnn/core/layers/basic_layers"
|
|
|
20
40
|
require_relative "dnn/core/layers/normalizations"
|
|
21
41
|
require_relative "dnn/core/layers/activations"
|
|
22
42
|
require_relative "dnn/core/layers/merge_layers"
|
|
43
|
+
require_relative "dnn/core/layers/split_layers"
|
|
23
44
|
require_relative "dnn/core/layers/cnn_layers"
|
|
24
45
|
require_relative "dnn/core/layers/embedding"
|
|
25
46
|
require_relative "dnn/core/layers/rnn_layers"
|
data/lib/dnn/core/callbacks.rb
CHANGED
|
@@ -104,6 +104,7 @@ module DNN
|
|
|
104
104
|
# A callback that save the log.
|
|
105
105
|
# The following logs will be recorded.
|
|
106
106
|
# epoch: Current epoch.
|
|
107
|
+
# step: Current step in epoch.
|
|
107
108
|
# train_loss: Batch training loss.
|
|
108
109
|
# test_loss: Mean test loss.
|
|
109
110
|
# test_accuracy: Test accuracy.
|
|
@@ -111,6 +112,7 @@ module DNN
|
|
|
111
112
|
def initialize
|
|
112
113
|
@log = {
|
|
113
114
|
epoch: [],
|
|
115
|
+
step: [],
|
|
114
116
|
train_loss: [],
|
|
115
117
|
test_loss: [],
|
|
116
118
|
test_accuracy: [],
|
|
@@ -122,7 +124,7 @@ module DNN
|
|
|
122
124
|
end
|
|
123
125
|
|
|
124
126
|
def after_train_on_batch
|
|
125
|
-
logging(:train_loss)
|
|
127
|
+
logging(:train_loss, :step)
|
|
126
128
|
end
|
|
127
129
|
|
|
128
130
|
# Get a log.
|
|
@@ -130,10 +132,10 @@ module DNN
|
|
|
130
132
|
# @return [Numo::NArray] Return the recorded log.
|
|
131
133
|
def get_log(tag)
|
|
132
134
|
case tag
|
|
133
|
-
when :epoch
|
|
134
|
-
|
|
135
|
+
when :epoch, :step
|
|
136
|
+
Xumo::UInt32.cast(@log[tag])
|
|
135
137
|
else
|
|
136
|
-
|
|
138
|
+
Xumo::SFloat.cast(@log[tag])
|
|
137
139
|
end
|
|
138
140
|
end
|
|
139
141
|
|
data/lib/dnn/core/error.rb
CHANGED
|
File without changes
|
data/lib/dnn/core/global.rb
CHANGED
|
File without changes
|
|
File without changes
|
data/lib/dnn/core/iterator.rb
CHANGED
|
File without changes
|
|
File without changes
|
|
@@ -2,20 +2,21 @@ module DNN
|
|
|
2
2
|
module Layers
|
|
3
3
|
|
|
4
4
|
module LayerNode
|
|
5
|
-
def forward(
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
5
|
+
def forward(*inputs)
|
|
6
|
+
xs = inputs.map(&:data)
|
|
7
|
+
prevs = inputs.map { |input| input.is_a?(Tensor) ? input.link : input }
|
|
8
|
+
ys = forward_node(*xs)
|
|
9
|
+
num_outputs = (ys.is_a?(Array) ? ys.length : 1)
|
|
10
|
+
link = Link.new(prevs: prevs, layer_node: self, num_outputs: num_outputs)
|
|
11
|
+
prevs.map { |prev| prev.next = link if prev.is_a?(Link) }
|
|
12
|
+
Tensor.convert(ys, link)
|
|
12
13
|
end
|
|
13
14
|
|
|
14
|
-
def forward_node(
|
|
15
|
+
def forward_node(*xs)
|
|
15
16
|
raise NotImplementedError, "Class '#{self.class.name}' has implement method 'forward_node'"
|
|
16
17
|
end
|
|
17
18
|
|
|
18
|
-
def backward_node(
|
|
19
|
+
def backward_node(*dys)
|
|
19
20
|
raise NotImplementedError, "Class '#{self.class.name}' has implement method 'backward_node'"
|
|
20
21
|
end
|
|
21
22
|
end
|
|
@@ -292,14 +293,8 @@ module DNN
|
|
|
292
293
|
end
|
|
293
294
|
|
|
294
295
|
class Flatten < Layer
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
def forward_node(x)
|
|
298
|
-
x.reshape(x.shape[0], *@output_shape)
|
|
299
|
-
end
|
|
300
|
-
|
|
301
|
-
def backward_node(dy)
|
|
302
|
-
dy.reshape(dy.shape[0], *@input_shape)
|
|
296
|
+
def forward(x)
|
|
297
|
+
Reshape.(x, @output_shape)
|
|
303
298
|
end
|
|
304
299
|
|
|
305
300
|
def compute_output_shape
|
|
@@ -320,13 +315,37 @@ module DNN
|
|
|
320
315
|
end
|
|
321
316
|
|
|
322
317
|
def forward_node(x)
|
|
323
|
-
|
|
318
|
+
if DNN.use_cumo?
|
|
319
|
+
_forward_gpu(x)
|
|
320
|
+
else
|
|
321
|
+
_forward_cpu(x)
|
|
322
|
+
end
|
|
324
323
|
end
|
|
325
324
|
|
|
326
325
|
def backward_node(dy)
|
|
326
|
+
if DNN.use_cumo?
|
|
327
|
+
_backward_gpu(dy)
|
|
328
|
+
else
|
|
329
|
+
_backward_cpu(dy)
|
|
330
|
+
end
|
|
331
|
+
end
|
|
332
|
+
|
|
333
|
+
def _forward_cpu(x)
|
|
334
|
+
x.reshape(x.shape[0], *@output_shape)
|
|
335
|
+
end
|
|
336
|
+
|
|
337
|
+
def _backward_cpu(dy)
|
|
327
338
|
dy.reshape(dy.shape[0], *@input_shape)
|
|
328
339
|
end
|
|
329
340
|
|
|
341
|
+
def _forward_gpu(x)
|
|
342
|
+
x.flatten.reshape(x.shape[0], *@output_shape)
|
|
343
|
+
end
|
|
344
|
+
|
|
345
|
+
def _backward_gpu(dy)
|
|
346
|
+
dy.flatten.reshape(dy.shape[0], *@input_shape)
|
|
347
|
+
end
|
|
348
|
+
|
|
330
349
|
def to_hash
|
|
331
350
|
super(shape: @shape)
|
|
332
351
|
end
|
|
@@ -6,10 +6,27 @@ module DNN
|
|
|
6
6
|
module_function
|
|
7
7
|
|
|
8
8
|
# img[bsize, out_h, out_w, ch] to col[bsize * out_h * out_w, fil_h * fil_w * ch]
|
|
9
|
-
def im2col(
|
|
9
|
+
def im2col(*args)
|
|
10
|
+
if DNN.use_cumo?
|
|
11
|
+
im2col_gpu(*args)
|
|
12
|
+
else
|
|
13
|
+
im2col_cpu(*args)
|
|
14
|
+
end
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
# col[bsize * out_h * out_w, fil_h * fil_w * ch] to img[bsize, out_h, out_w, ch]
|
|
18
|
+
def col2im(*args)
|
|
19
|
+
if DNN.use_cumo?
|
|
20
|
+
col2im_gpu(*args)
|
|
21
|
+
else
|
|
22
|
+
col2im_cpu(*args)
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def im2col_cpu(img, out_h, out_w, fil_h, fil_w, strides)
|
|
10
27
|
bsize = img.shape[0]
|
|
11
28
|
ch = img.shape[3]
|
|
12
|
-
col =
|
|
29
|
+
col = img.class.zeros(bsize, out_h, out_w, fil_h, fil_w, ch)
|
|
13
30
|
(0...fil_h).each do |i|
|
|
14
31
|
i_range = (i...(i + strides[0] * out_h)).step(strides[0]).to_a
|
|
15
32
|
(0...fil_w).each do |j|
|
|
@@ -20,11 +37,16 @@ module DNN
|
|
|
20
37
|
col.reshape(bsize * out_h * out_w, fil_h * fil_w * ch)
|
|
21
38
|
end
|
|
22
39
|
|
|
23
|
-
|
|
24
|
-
|
|
40
|
+
def im2col_gpu(img, out_h, out_w, fil_h, fil_w, strides)
|
|
41
|
+
img = Utils.cumo2numo(img)
|
|
42
|
+
col = im2col_cpu(img, out_h, out_w, fil_h, fil_w, strides)
|
|
43
|
+
Utils.numo2cumo(col)
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
def col2im_cpu(col, img_shape, out_h, out_w, fil_h, fil_w, strides)
|
|
25
47
|
bsize, img_h, img_w, ch = img_shape
|
|
26
48
|
col = col.reshape(bsize, out_h, out_w, fil_h, fil_w, ch)
|
|
27
|
-
img =
|
|
49
|
+
img = col.class.zeros(bsize, img_h, img_w, ch)
|
|
28
50
|
(0...fil_h).each do |i|
|
|
29
51
|
i_range = (i...(i + strides[0] * out_h)).step(strides[0]).to_a
|
|
30
52
|
(0...fil_w).each do |j|
|
|
@@ -35,6 +57,12 @@ module DNN
|
|
|
35
57
|
img
|
|
36
58
|
end
|
|
37
59
|
|
|
60
|
+
def col2im_gpu(col, img_shape, out_h, out_w, fil_h, fil_w, strides)
|
|
61
|
+
col = Utils.cumo2numo(col)
|
|
62
|
+
img = col2im_cpu(col, img_shape, out_h, out_w, fil_h, fil_w, strides)
|
|
63
|
+
Utils.numo2cumo(img)
|
|
64
|
+
end
|
|
65
|
+
|
|
38
66
|
def zero_padding(img, pad)
|
|
39
67
|
bsize, img_h, img_w, ch = img.shape
|
|
40
68
|
img2 = Xumo::SFloat.zeros(bsize, img_h + pad[0], img_w + pad[1], ch)
|
|
File without changes
|
|
@@ -61,7 +61,7 @@ module DNN
|
|
|
61
61
|
end
|
|
62
62
|
|
|
63
63
|
class Add < MergeLayer
|
|
64
|
-
include
|
|
64
|
+
include LayerNode
|
|
65
65
|
|
|
66
66
|
def forward_node(x1, x2)
|
|
67
67
|
@x1_shape = x1.shape
|
|
@@ -77,7 +77,7 @@ module DNN
|
|
|
77
77
|
end
|
|
78
78
|
|
|
79
79
|
class Sub < MergeLayer
|
|
80
|
-
include
|
|
80
|
+
include LayerNode
|
|
81
81
|
|
|
82
82
|
def forward_node(x1, x2)
|
|
83
83
|
@x1_shape = x1.shape
|
|
@@ -93,7 +93,7 @@ module DNN
|
|
|
93
93
|
end
|
|
94
94
|
|
|
95
95
|
class Mul < MergeLayer
|
|
96
|
-
include
|
|
96
|
+
include LayerNode
|
|
97
97
|
|
|
98
98
|
def forward_node(x1, x2)
|
|
99
99
|
@x1, @x2 = x1, x2
|
|
@@ -108,7 +108,7 @@ module DNN
|
|
|
108
108
|
end
|
|
109
109
|
|
|
110
110
|
class Div < MergeLayer
|
|
111
|
-
include
|
|
111
|
+
include LayerNode
|
|
112
112
|
|
|
113
113
|
def forward_node(x1, x2)
|
|
114
114
|
@x1, @x2 = x1, x2
|
|
@@ -123,7 +123,7 @@ module DNN
|
|
|
123
123
|
end
|
|
124
124
|
|
|
125
125
|
class Dot < MergeLayer
|
|
126
|
-
include
|
|
126
|
+
include LayerNode
|
|
127
127
|
|
|
128
128
|
def forward_node(x1, x2)
|
|
129
129
|
@x1, @x2 = x1, x2
|
|
@@ -1,30 +1,6 @@
|
|
|
1
1
|
module DNN
|
|
2
2
|
module Layers
|
|
3
3
|
|
|
4
|
-
module MergeLayerNode
|
|
5
|
-
def forward(input1, input2)
|
|
6
|
-
x1 = input1.data
|
|
7
|
-
x2 = input2.data
|
|
8
|
-
prev1 = (input1.is_a?(Tensor) ? input1.link : input1)
|
|
9
|
-
prev2 = (input2.is_a?(Tensor) ? input2.link : input2)
|
|
10
|
-
y = forward_node(x1, x2)
|
|
11
|
-
link = TwoInputLink.new(prev1, prev2, self)
|
|
12
|
-
Tensor.convert(y, link)
|
|
13
|
-
end
|
|
14
|
-
|
|
15
|
-
def backward(dy)
|
|
16
|
-
backward_node(dy)
|
|
17
|
-
end
|
|
18
|
-
|
|
19
|
-
def forward_node(x1, x2)
|
|
20
|
-
raise NotImplementedError, "Class '#{self.class.name}' has implement method 'forward_node'"
|
|
21
|
-
end
|
|
22
|
-
|
|
23
|
-
def backward_node(dy)
|
|
24
|
-
raise NotImplementedError, "Class '#{self.class.name}' has implement method 'backward_node'"
|
|
25
|
-
end
|
|
26
|
-
end
|
|
27
|
-
|
|
28
4
|
class MergeLayer < Layer
|
|
29
5
|
def self.call(x1, x2, *args)
|
|
30
6
|
new(*args).call(x1, x2)
|
|
@@ -33,7 +9,7 @@ module DNN
|
|
|
33
9
|
def call(input1, input2)
|
|
34
10
|
input1 = Tensor.convert(input1) if !input1.is_a?(Tensor) && !input1.is_a?(Param)
|
|
35
11
|
input2 = Tensor.convert(input2) if !input2.is_a?(Tensor) && !input2.is_a?(Param)
|
|
36
|
-
if input1.data.is_a?(
|
|
12
|
+
if input1.data.is_a?(Xumo::NArray)
|
|
37
13
|
build(input1.data.shape[1..-1]) unless built?
|
|
38
14
|
else
|
|
39
15
|
build([1]) unless built?
|
|
@@ -43,7 +19,7 @@ module DNN
|
|
|
43
19
|
end
|
|
44
20
|
|
|
45
21
|
class Concatenate < MergeLayer
|
|
46
|
-
include
|
|
22
|
+
include LayerNode
|
|
47
23
|
|
|
48
24
|
attr_reader :axis
|
|
49
25
|
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
module DNN
|
|
2
|
+
module Layers
|
|
3
|
+
|
|
4
|
+
class Split < Layer
|
|
5
|
+
include LayerNode
|
|
6
|
+
|
|
7
|
+
attr_reader :axis
|
|
8
|
+
attr_reader :dim
|
|
9
|
+
|
|
10
|
+
def initialize(axis: 1, dim: nil)
|
|
11
|
+
super()
|
|
12
|
+
raise DNNError, "dim is nil" if dim == nil
|
|
13
|
+
@axis = axis
|
|
14
|
+
@dim = dim
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
def forward_node(x)
|
|
18
|
+
x1_dim = @dim
|
|
19
|
+
x2_dim = x.shape[@axis] - @dim
|
|
20
|
+
y1, y2others = x.split([x1_dim, x1_dim + x2_dim], axis: @axis)
|
|
21
|
+
y2 = y2others.is_a?(Array) ? y2others[0].concatenate(y2others[1..-1], axis: @axis) : y2others
|
|
22
|
+
[y1, y2]
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def backward_node(dy1, dy2)
|
|
26
|
+
dy1.concatenate(dy2, axis: @axis)
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
def to_hash
|
|
30
|
+
super(axis: @axis, dim: @dim)
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
def load_hash(hash)
|
|
34
|
+
initialize(axis: hash[:axis], dim: hash[:dim])
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
end
|
|
39
|
+
end
|
data/lib/dnn/core/link.rb
CHANGED
|
@@ -1,57 +1,38 @@
|
|
|
1
1
|
module DNN
|
|
2
2
|
class Link
|
|
3
|
-
attr_accessor :
|
|
3
|
+
attr_accessor :prevs
|
|
4
4
|
attr_accessor :next
|
|
5
5
|
attr_accessor :layer_node
|
|
6
|
+
attr_reader :num_outputs
|
|
6
7
|
|
|
7
|
-
def initialize(
|
|
8
|
-
@
|
|
9
|
-
@layer_node = layer_node
|
|
10
|
-
@next = nil
|
|
11
|
-
end
|
|
12
|
-
|
|
13
|
-
def forward(x)
|
|
14
|
-
x = @layer_node.(x)
|
|
15
|
-
@next ? @next.forward(x) : x
|
|
16
|
-
end
|
|
17
|
-
|
|
18
|
-
def backward(dy = Xumo::SFloat[1])
|
|
19
|
-
dy = @layer_node.backward_node(dy)
|
|
20
|
-
@prev&.backward(dy)
|
|
21
|
-
end
|
|
22
|
-
end
|
|
23
|
-
|
|
24
|
-
class TwoInputLink
|
|
25
|
-
attr_accessor :prev1
|
|
26
|
-
attr_accessor :prev2
|
|
27
|
-
attr_accessor :next
|
|
28
|
-
attr_accessor :layer_node
|
|
29
|
-
|
|
30
|
-
def initialize(prev1 = nil, prev2 = nil, layer_node = nil)
|
|
31
|
-
@prev1 = prev1
|
|
32
|
-
@prev2 = prev2
|
|
8
|
+
def initialize(prevs: nil, layer_node: nil, num_outputs: 1)
|
|
9
|
+
@prevs = prevs
|
|
33
10
|
@layer_node = layer_node
|
|
11
|
+
@num_outputs = num_outputs
|
|
34
12
|
@next = nil
|
|
35
13
|
@hold = []
|
|
36
14
|
end
|
|
37
15
|
|
|
38
16
|
def forward(x)
|
|
39
17
|
@hold << x
|
|
40
|
-
return if @hold.length <
|
|
18
|
+
return if @hold.length < @prevs.length
|
|
41
19
|
x = @layer_node.(*@hold)
|
|
42
20
|
@hold = []
|
|
43
21
|
@next ? @next.forward(x) : x
|
|
44
22
|
end
|
|
45
23
|
|
|
46
24
|
def backward(dy = Xumo::SFloat[1])
|
|
47
|
-
|
|
25
|
+
@hold << dy
|
|
26
|
+
return if @hold.length < @num_outputs
|
|
27
|
+
dys = @layer_node.backward_node(*@hold)
|
|
28
|
+
@hold = []
|
|
48
29
|
if dys.is_a?(Array)
|
|
49
|
-
|
|
30
|
+
dys.each.with_index do |dy, i|
|
|
31
|
+
@prevs[i]&.backward(dy)
|
|
32
|
+
end
|
|
50
33
|
else
|
|
51
|
-
|
|
34
|
+
@prevs.first&.backward(dys)
|
|
52
35
|
end
|
|
53
|
-
@prev1&.backward(dy1)
|
|
54
|
-
@prev2&.backward(dy2) if dy2
|
|
55
36
|
end
|
|
56
37
|
end
|
|
57
38
|
end
|
data/lib/dnn/core/losses.rb
CHANGED
|
@@ -62,7 +62,7 @@ module DNN
|
|
|
62
62
|
end
|
|
63
63
|
|
|
64
64
|
class MeanSquaredError < Loss
|
|
65
|
-
include Layers::
|
|
65
|
+
include Layers::LayerNode
|
|
66
66
|
|
|
67
67
|
def forward_node(y, t)
|
|
68
68
|
@y = y
|
|
@@ -76,7 +76,7 @@ module DNN
|
|
|
76
76
|
end
|
|
77
77
|
|
|
78
78
|
class MeanAbsoluteError < Loss
|
|
79
|
-
include Layers::
|
|
79
|
+
include Layers::LayerNode
|
|
80
80
|
|
|
81
81
|
def forward_node(y, t)
|
|
82
82
|
@y = y
|
|
@@ -93,7 +93,7 @@ module DNN
|
|
|
93
93
|
end
|
|
94
94
|
|
|
95
95
|
class Hinge < Loss
|
|
96
|
-
include Layers::
|
|
96
|
+
include Layers::LayerNode
|
|
97
97
|
|
|
98
98
|
def forward_node(y, t)
|
|
99
99
|
@t = t
|
|
@@ -109,7 +109,7 @@ module DNN
|
|
|
109
109
|
end
|
|
110
110
|
|
|
111
111
|
class HuberLoss < Loss
|
|
112
|
-
include Layers::
|
|
112
|
+
include Layers::LayerNode
|
|
113
113
|
|
|
114
114
|
def forward_node(y, t)
|
|
115
115
|
@y = y
|
|
@@ -129,7 +129,7 @@ module DNN
|
|
|
129
129
|
end
|
|
130
130
|
|
|
131
131
|
class SoftmaxCrossEntropy < Loss
|
|
132
|
-
include Layers::
|
|
132
|
+
include Layers::LayerNode
|
|
133
133
|
|
|
134
134
|
attr_accessor :eps
|
|
135
135
|
|
|
@@ -166,7 +166,7 @@ module DNN
|
|
|
166
166
|
end
|
|
167
167
|
|
|
168
168
|
class SigmoidCrossEntropy < Loss
|
|
169
|
-
include Layers::
|
|
169
|
+
include Layers::LayerNode
|
|
170
170
|
|
|
171
171
|
attr_accessor :eps
|
|
172
172
|
|
data/lib/dnn/core/models.rb
CHANGED
|
@@ -230,6 +230,7 @@ module DNN
|
|
|
230
230
|
puts "【 epoch #{epoch}/#{epochs} 】" if verbose
|
|
231
231
|
|
|
232
232
|
train_iterator.foreach(batch_size) do |x_batch, y_batch, index|
|
|
233
|
+
@last_log[:step] = index
|
|
233
234
|
train_step_met = train_step(x_batch, y_batch)
|
|
234
235
|
num_trained_datas = (index + 1) * batch_size
|
|
235
236
|
num_trained_datas = num_trained_datas > num_train_datas ? num_train_datas : num_trained_datas
|
|
@@ -305,13 +306,13 @@ module DNN
|
|
|
305
306
|
loss_opt[:layers] = layers if i == 0
|
|
306
307
|
loss_opt[:loss_weight] = @loss_weights[i] if @loss_weights
|
|
307
308
|
loss = @loss_func[i].loss(out, Tensor.convert(y[i]), **loss_opt)
|
|
308
|
-
loss_data << loss.data
|
|
309
|
+
loss_data << Utils.to_f(loss.data)
|
|
309
310
|
loss.link.backward(Xumo::SFloat.ones(y[i][0...1, false].shape[0], 1))
|
|
310
311
|
end
|
|
311
312
|
else
|
|
312
313
|
out = output_tensors
|
|
313
314
|
loss = @loss_func.loss(out, Tensor.convert(y), layers: layers)
|
|
314
|
-
loss_data = loss.data
|
|
315
|
+
loss_data = Utils.to_f(loss.data)
|
|
315
316
|
loss.link.backward(Xumo::SFloat.ones(y[0...1, false].shape[0], 1))
|
|
316
317
|
end
|
|
317
318
|
@optimizer.update(get_all_trainable_params)
|
|
@@ -392,13 +393,13 @@ module DNN
|
|
|
392
393
|
output_tensors.each.with_index do |out, i|
|
|
393
394
|
correct << accuracy(out.data, y[i]) if accuracy
|
|
394
395
|
loss = @loss_func[i].(out, Tensor.convert(y[i]))
|
|
395
|
-
loss_data << loss.data
|
|
396
|
+
loss_data << Utils.to_f(loss.data)
|
|
396
397
|
end
|
|
397
398
|
else
|
|
398
399
|
out = output_tensors
|
|
399
400
|
correct = accuracy(out.data, y) if accuracy
|
|
400
401
|
loss = @loss_func.(out, Tensor.convert(y))
|
|
401
|
-
loss_data = loss.data
|
|
402
|
+
loss_data = Utils.to_f(loss.data)
|
|
402
403
|
end
|
|
403
404
|
call_callbacks(:after_test_on_batch)
|
|
404
405
|
[correct, loss_data]
|
|
@@ -567,6 +568,56 @@ module DNN
|
|
|
567
568
|
end
|
|
568
569
|
end
|
|
569
570
|
|
|
571
|
+
# Convert the parameters of model and optimizer for cpu.
|
|
572
|
+
# @return [DNN::Models::Model] Return self.
|
|
573
|
+
def to_cpu
|
|
574
|
+
params_data = get_all_params_data
|
|
575
|
+
clean_layers
|
|
576
|
+
set_all_params_data(params_data)
|
|
577
|
+
trainable_layers.each do |layer|
|
|
578
|
+
layer.get_params.each do |key, param|
|
|
579
|
+
data = param.data
|
|
580
|
+
if DNN.use_cumo? && data.is_a?(Cumo::NArray)
|
|
581
|
+
param.data = Utils.cumo2numo(data)
|
|
582
|
+
end
|
|
583
|
+
end
|
|
584
|
+
end
|
|
585
|
+
@optimizer.status.each do |key, state|
|
|
586
|
+
next unless state
|
|
587
|
+
state.each do |param, data|
|
|
588
|
+
if DNN.use_cumo? && data.is_a?(Cumo::NArray)
|
|
589
|
+
state[param] = Utils.cumo2numo(data)
|
|
590
|
+
end
|
|
591
|
+
end
|
|
592
|
+
end
|
|
593
|
+
self
|
|
594
|
+
end
|
|
595
|
+
|
|
596
|
+
# Convert the parameters of model and optimizer for gpu.
|
|
597
|
+
# @return [DNN::Models::Model] Return self.
|
|
598
|
+
def to_gpu
|
|
599
|
+
params_data = get_all_params_data
|
|
600
|
+
clean_layers
|
|
601
|
+
set_all_params_data(params_data)
|
|
602
|
+
trainable_layers.each do |layer|
|
|
603
|
+
layer.get_params.each do |(key, param)|
|
|
604
|
+
data = param.data
|
|
605
|
+
if DNN.use_cumo? && data.is_a?(Numo::NArray)
|
|
606
|
+
param.data = Utils.numo2cumo(data)
|
|
607
|
+
end
|
|
608
|
+
end
|
|
609
|
+
end
|
|
610
|
+
@optimizer.status.each do |(key, state)|
|
|
611
|
+
next unless state
|
|
612
|
+
state.each do |(param, data)|
|
|
613
|
+
if DNN.use_cumo? && data.is_a?(Numo::NArray)
|
|
614
|
+
state[param] = Utils.numo2cumo(data)
|
|
615
|
+
end
|
|
616
|
+
end
|
|
617
|
+
end
|
|
618
|
+
self
|
|
619
|
+
end
|
|
620
|
+
|
|
570
621
|
private
|
|
571
622
|
|
|
572
623
|
def get_all_trainable_params
|
|
@@ -584,10 +635,10 @@ module DNN
|
|
|
584
635
|
def metrics_to_str(mertics)
|
|
585
636
|
mertics.map { |key, values|
|
|
586
637
|
str_values = if values.is_a?(Array)
|
|
587
|
-
values_fmt = values.map { |v| sprintf('%.4f', v) }
|
|
638
|
+
values_fmt = values.map { |v| sprintf('%.4f', Utils.to_f(v)) }
|
|
588
639
|
"[#{values_fmt.join(", ")}]"
|
|
589
640
|
else
|
|
590
|
-
sprintf('%.4f', values)
|
|
641
|
+
sprintf('%.4f', Utils.to_f(values))
|
|
591
642
|
end
|
|
592
643
|
"#{key}: #{str_values}"
|
|
593
644
|
}.join(", ")
|
|
@@ -656,6 +707,7 @@ module DNN
|
|
|
656
707
|
raise TypeError, "layer: #{layer.class.name} is not an instance of the DNN::Layers::Layer class or DNN::Models::Chain class."
|
|
657
708
|
end
|
|
658
709
|
@stack.insert(index, layer)
|
|
710
|
+
self
|
|
659
711
|
end
|
|
660
712
|
|
|
661
713
|
# Remove layer to the model.
|
|
File without changes
|
data/lib/dnn/core/optimizers.rb
CHANGED
|
@@ -3,6 +3,7 @@ module DNN
|
|
|
3
3
|
|
|
4
4
|
# Super class of all optimizer classes.
|
|
5
5
|
class Optimizer
|
|
6
|
+
attr_reader :status
|
|
6
7
|
attr_accessor :clip_norm
|
|
7
8
|
|
|
8
9
|
def self.from_hash(hash)
|
|
@@ -47,7 +48,7 @@ module DNN
|
|
|
47
48
|
end
|
|
48
49
|
|
|
49
50
|
private def clip_grads(params)
|
|
50
|
-
norm = Math.sqrt(params.reduce(0) { |total, param| total + (param.grad**2).sum })
|
|
51
|
+
norm = Math.sqrt(params.reduce(0) { |total, param| total + (param.grad**2).sum.to_f })
|
|
51
52
|
return if norm <= @clip_norm
|
|
52
53
|
rate = @clip_norm / (norm + 1e-7)
|
|
53
54
|
params.each do |param|
|
|
@@ -71,6 +72,7 @@ module DNN
|
|
|
71
72
|
@lr = lr
|
|
72
73
|
@momentum = momentum
|
|
73
74
|
@v = {}
|
|
75
|
+
@status = { v: @v }
|
|
74
76
|
end
|
|
75
77
|
|
|
76
78
|
def to_hash
|
|
@@ -120,6 +122,7 @@ module DNN
|
|
|
120
122
|
@lr = lr
|
|
121
123
|
@eps = eps
|
|
122
124
|
@g = {}
|
|
125
|
+
@status = { g: @g }
|
|
123
126
|
end
|
|
124
127
|
|
|
125
128
|
private def update_params(params)
|
|
@@ -153,6 +156,7 @@ module DNN
|
|
|
153
156
|
@alpha = alpha
|
|
154
157
|
@eps = eps
|
|
155
158
|
@g = {}
|
|
159
|
+
@status = { g: @g }
|
|
156
160
|
end
|
|
157
161
|
|
|
158
162
|
def to_hash
|
|
@@ -184,6 +188,7 @@ module DNN
|
|
|
184
188
|
@eps = eps
|
|
185
189
|
@h = {}
|
|
186
190
|
@s = {}
|
|
191
|
+
@status = { h: @h, s: @s }
|
|
187
192
|
end
|
|
188
193
|
|
|
189
194
|
def to_hash
|
|
@@ -221,6 +226,7 @@ module DNN
|
|
|
221
226
|
@eps = eps
|
|
222
227
|
@m = {}
|
|
223
228
|
@v = {}
|
|
229
|
+
@status = { m: @m, v: @v }
|
|
224
230
|
end
|
|
225
231
|
|
|
226
232
|
def to_hash
|
|
@@ -265,6 +271,7 @@ module DNN
|
|
|
265
271
|
@m = {}
|
|
266
272
|
@v = {}
|
|
267
273
|
@s = amsgrad ? {} : nil
|
|
274
|
+
@status = { m: @m, v: @v, s: @s }
|
|
268
275
|
end
|
|
269
276
|
|
|
270
277
|
def to_hash
|
data/lib/dnn/core/param.rb
CHANGED
|
File without changes
|
|
File without changes
|
data/lib/dnn/core/savers.rb
CHANGED
|
File without changes
|
data/lib/dnn/core/tensor.rb
CHANGED
|
File without changes
|
data/lib/dnn/core/utils.rb
CHANGED
|
@@ -43,5 +43,28 @@ module DNN
|
|
|
43
43
|
def self.numerical_grad(x, func)
|
|
44
44
|
(func.(x + 1e-7) - func.(x)) / 1e-7
|
|
45
45
|
end
|
|
46
|
+
|
|
47
|
+
# Convert numo to cumo.
|
|
48
|
+
def self.numo2cumo(na)
|
|
49
|
+
b = na.to_binary
|
|
50
|
+
ca = Cumo::SFloat.from_binary(b)
|
|
51
|
+
ca.reshape(*na.shape)
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
# Convert cumo to numo.
|
|
55
|
+
def self.cumo2numo(ca)
|
|
56
|
+
b = ca.to_binary
|
|
57
|
+
na = Numo::SFloat.from_binary(b)
|
|
58
|
+
na.reshape(*ca.shape)
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
# Force convert to Float.
|
|
62
|
+
def self.to_f(x)
|
|
63
|
+
if x.is_a?(Xumo::NArray)
|
|
64
|
+
x[0].to_f
|
|
65
|
+
else
|
|
66
|
+
x.to_f
|
|
67
|
+
end
|
|
68
|
+
end
|
|
46
69
|
end
|
|
47
70
|
end
|
data/lib/dnn/datasets/cifar10.rb
CHANGED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
data/lib/dnn/datasets/iris.rb
CHANGED
|
File without changes
|
data/lib/dnn/datasets/mnist.rb
CHANGED
|
File without changes
|
data/lib/dnn/datasets/stl-10.rb
CHANGED
|
File without changes
|
data/lib/dnn/image.rb
CHANGED
|
File without changes
|
|
File without changes
|
data/lib/dnn/numo2numpy.rb
CHANGED
|
File without changes
|
data/lib/dnn/version.rb
CHANGED
data/ruby-dnn.gemspec
CHANGED
|
File without changes
|
data/third_party/stb_image.h
CHANGED
|
File without changes
|
|
File without changes
|
|
File without changes
|
metadata
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: ruby-dnn
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 1.
|
|
4
|
+
version: 1.2.0
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- unagiootoro
|
|
8
8
|
autorequire:
|
|
9
9
|
bindir: exe
|
|
10
10
|
cert_chain: []
|
|
11
|
-
date: 2020-05-
|
|
11
|
+
date: 2020-05-16 00:00:00.000000000 Z
|
|
12
12
|
dependencies:
|
|
13
13
|
- !ruby/object:Gem::Dependency
|
|
14
14
|
name: numo-narray
|
|
@@ -135,6 +135,7 @@ files:
|
|
|
135
135
|
- examples/mnist_conv2d_example.rb
|
|
136
136
|
- examples/mnist_define_by_run.rb
|
|
137
137
|
- examples/mnist_example.rb
|
|
138
|
+
- examples/mnist_gpu.rb
|
|
138
139
|
- examples/mnist_lstm_example.rb
|
|
139
140
|
- examples/pix2pix/dcgan.rb
|
|
140
141
|
- examples/pix2pix/imgen.rb
|
|
@@ -157,6 +158,7 @@ files:
|
|
|
157
158
|
- lib/dnn/core/layers/merge_layers.rb
|
|
158
159
|
- lib/dnn/core/layers/normalizations.rb
|
|
159
160
|
- lib/dnn/core/layers/rnn_layers.rb
|
|
161
|
+
- lib/dnn/core/layers/split_layers.rb
|
|
160
162
|
- lib/dnn/core/link.rb
|
|
161
163
|
- lib/dnn/core/losses.rb
|
|
162
164
|
- lib/dnn/core/models.rb
|