ruby-dnn 0.10.4 → 0.12.4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (46) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +1 -2
  3. data/README.md +33 -6
  4. data/examples/cifar100_example.rb +3 -3
  5. data/examples/cifar10_example.rb +3 -3
  6. data/examples/dcgan/dcgan.rb +112 -0
  7. data/examples/dcgan/imgen.rb +20 -0
  8. data/examples/dcgan/train.rb +41 -0
  9. data/examples/iris_example.rb +3 -6
  10. data/examples/mnist_conv2d_example.rb +5 -5
  11. data/examples/mnist_define_by_run.rb +52 -0
  12. data/examples/mnist_example.rb +3 -3
  13. data/examples/mnist_lstm_example.rb +3 -3
  14. data/examples/xor_example.rb +4 -5
  15. data/ext/rb_stb_image/rb_stb_image.c +103 -0
  16. data/lib/dnn.rb +10 -10
  17. data/lib/dnn/cifar10.rb +1 -1
  18. data/lib/dnn/cifar100.rb +1 -1
  19. data/lib/dnn/core/activations.rb +21 -22
  20. data/lib/dnn/core/cnn_layers.rb +94 -111
  21. data/lib/dnn/core/embedding.rb +30 -9
  22. data/lib/dnn/core/initializers.rb +31 -21
  23. data/lib/dnn/core/iterator.rb +52 -0
  24. data/lib/dnn/core/layers.rb +99 -66
  25. data/lib/dnn/core/link.rb +24 -0
  26. data/lib/dnn/core/losses.rb +69 -59
  27. data/lib/dnn/core/merge_layers.rb +71 -0
  28. data/lib/dnn/core/models.rb +393 -0
  29. data/lib/dnn/core/normalizations.rb +27 -14
  30. data/lib/dnn/core/optimizers.rb +212 -134
  31. data/lib/dnn/core/param.rb +8 -6
  32. data/lib/dnn/core/regularizers.rb +10 -7
  33. data/lib/dnn/core/rnn_layers.rb +78 -85
  34. data/lib/dnn/core/utils.rb +6 -3
  35. data/lib/dnn/downloader.rb +3 -3
  36. data/lib/dnn/fashion-mnist.rb +89 -0
  37. data/lib/dnn/image.rb +57 -18
  38. data/lib/dnn/iris.rb +1 -3
  39. data/lib/dnn/mnist.rb +38 -34
  40. data/lib/dnn/version.rb +1 -1
  41. data/third_party/stb_image.h +16 -4
  42. data/third_party/stb_image_resize.h +2630 -0
  43. data/third_party/stb_image_write.h +4 -7
  44. metadata +12 -4
  45. data/lib/dnn/core/dataset.rb +0 -34
  46. data/lib/dnn/core/model.rb +0 -440
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 589418cabcfddc5f0066011a7094a9740903d891c0a53248c2dec2ae6ba050f9
4
- data.tar.gz: af86c3663a8e855a8ccf2ffed6825037ebe452c0e83810d0595282959715516f
3
+ metadata.gz: 2835cd619265328ffa05c41515915aaeb54924ca4ddd4958edba992ea4feda89
4
+ data.tar.gz: 5ab01dbcf1219cdfac7b3a0e20bc5783fa15baaf82225ea1a9a2d076569cb7f1
5
5
  SHA512:
6
- metadata.gz: d842fe4bbbd6801726e97177ab1212b6eab9d09bcb553a5bf35e082a9685ffea362fcf729ea41a4f438790bc5d2ddd0c962ad629af6c2afe45d95049989ddd37
7
- data.tar.gz: fa13a086a3232e6117d5976a467a1a0bb3640f2b8cd7c6db1b2ef5a6c17d42a3d47ca9ce493f6278c331beda12380b4344d502b3cebef0b6978891c383d19167
6
+ metadata.gz: 1335b77cf701501930e32f07a9faa027706b908d79a24b1b369228b45c5ef15a38ac476c6951d14b7ce398b1eb7789bf0c96566427b84fffb1ed892af7e43629
7
+ data.tar.gz: baf8ceeec91699a75a14bde79e815561189e26c9317e34f09580f90f45562120ba12540a7a64e42e562d4d26162490b6c2170721459b8e635731e900812a357b
data/.travis.yml CHANGED
@@ -1,7 +1,6 @@
1
1
  sudo: false
2
2
  language: ruby
3
3
  rvm:
4
- - 2.5.1
5
- before_install: gem install bundler -v 1.16.2
4
+ - 2.6.1
6
5
  script:
7
6
  - rake test
data/README.md CHANGED
@@ -26,7 +26,7 @@ Or install it yourself as:
26
26
  ### MNIST MLP example
27
27
 
28
28
  ```ruby
29
- model = Model.new
29
+ model = Sequential.new
30
30
 
31
31
  model << InputLayer.new(784)
32
32
 
@@ -38,10 +38,38 @@ model << ReLU.new
38
38
 
39
39
  model << Dense.new(10)
40
40
 
41
- model.compile(RMSProp.new, SoftmaxCrossEntropy.new)
41
+ model.setup(RMSProp.new, SoftmaxCrossEntropy.new)
42
42
 
43
43
  model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
44
+ ```
45
+
46
+ When create a model with 'define by run' style:
44
47
 
48
+ ```ruby
49
+ class MLP < Model
50
+ def initialize
51
+ super
52
+ @l1 = Dense.new(256)
53
+ @l2 = Dense.new(256)
54
+ @l3 = Dense.new(10)
55
+ end
56
+
57
+ def call(x)
58
+ x = InputLayer.(x)
59
+ x = @l1.(x)
60
+ x = ReLU.(x)
61
+ x = @l2.(x)
62
+ x = ReLU.(x)
63
+ x = @l3.(x)
64
+ x
65
+ end
66
+ end
67
+
68
+ model = MLP.new
69
+
70
+ model.setup(Adam.new, SoftmaxCrossEntropy.new)
71
+
72
+ model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
45
73
  ```
46
74
 
47
75
  Please refer to examples for basic usage.
@@ -50,14 +78,13 @@ If you want to know more detailed information, please refer to the source code.
50
78
  ## Implemented
51
79
  || Implemented classes |
52
80
  |:-----------|------------:|
53
- | Connections | Dense, Conv2D, Conv2D_Transpose, Embedding, SimpleRNN, LSTM, GRU |
81
+ | Connections | Dense, Conv2D, Conv2DTranspose, Embedding, SimpleRNN, LSTM, GRU |
54
82
  | Layers | Flatten, Reshape, Dropout, BatchNormalization, MaxPool2D, AvgPool2D, UnPool2D |
55
83
  | Activations | Sigmoid, Tanh, Softsign, Softplus, Swish, ReLU, LeakyReLU, ELU |
56
- | Optimizers | SGD, Nesterov, AdaGrad, RMSProp, AdaDelta, Adam, RMSPropGraves |
57
- | Losses | MeanSquaredError, MeanAbsoluteError, HuberLoss, SoftmaxCrossEntropy, SigmoidCrossEntropy |
84
+ | Optimizers | SGD, Nesterov, AdaGrad, RMSProp, AdaDelta, RMSPropGraves, Adam, AdaBound |
85
+ | Losses | MeanSquaredError, MeanAbsoluteError, Hinge, HuberLoss, SoftmaxCrossEntropy, SigmoidCrossEntropy |
58
86
 
59
87
  ## TODO
60
- ● Support to define by run model.
61
88
  ● Write a test.
62
89
  ● Write a document.
63
90
  ● Support to GPU.
@@ -7,7 +7,7 @@ include DNN::Layers
7
7
  include DNN::Activations
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- Model = DNN::Model
10
+ include DNN::Models
11
11
  CIFAR100 = DNN::CIFAR100
12
12
 
13
13
  x_train, y_train = CIFAR100.load_train
@@ -25,7 +25,7 @@ y_test = y_test[true, 1]
25
25
  y_train = DNN::Utils.to_categorical(y_train, 100, Numo::SFloat)
26
26
  y_test = DNN::Utils.to_categorical(y_test, 100, Numo::SFloat)
27
27
 
28
- model = Model.new
28
+ model = Sequential.new
29
29
 
30
30
  model << InputLayer.new([32, 32, 3])
31
31
 
@@ -66,6 +66,6 @@ model << Dropout.new(0.5)
66
66
 
67
67
  model << Dense.new(100)
68
68
 
69
- model.compile(Adam.new, SoftmaxCrossEntropy.new)
69
+ model.setup(Adam.new, SoftmaxCrossEntropy.new)
70
70
 
71
71
  model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
@@ -7,7 +7,7 @@ include DNN::Layers
7
7
  include DNN::Activations
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- Model = DNN::Model
10
+ include DNN::Models
11
11
  CIFAR10 = DNN::CIFAR10
12
12
 
13
13
  x_train, y_train = CIFAR10.load_train
@@ -22,7 +22,7 @@ x_test /= 255
22
22
  y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
23
23
  y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
24
24
 
25
- model = Model.new
25
+ model = Sequential.new
26
26
 
27
27
  model << InputLayer.new([32, 32, 3])
28
28
 
@@ -63,6 +63,6 @@ model << Dropout.new(0.5)
63
63
 
64
64
  model << Dense.new(10)
65
65
 
66
- model.compile(Adam.new, SoftmaxCrossEntropy.new)
66
+ model.setup(Adam.new, SoftmaxCrossEntropy.new)
67
67
 
68
68
  model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
@@ -0,0 +1,112 @@
1
+ include DNN::Layers
2
+ include DNN::Activations
3
+ include DNN::Optimizers
4
+ include DNN::Losses
5
+ include DNN::Models
6
+
7
+ class Generator < Model
8
+ def initialize
9
+ super
10
+ @l1 = Dense.new(1024)
11
+ @l2 = Dense.new(7 * 7 * 64)
12
+ @l3 = Conv2DTranspose.new(64, 4, strides: 2, padding: true)
13
+ @l4 = Conv2D.new(64, 4, padding: true)
14
+ @l5 = Conv2DTranspose.new(32, 4, strides: 2, padding: true)
15
+ @l6 = Conv2D.new(32, 4, padding: true)
16
+ @l7 = Conv2D.new(1, 4, padding: true)
17
+ @bn1 = BatchNormalization.new
18
+ @bn2 = BatchNormalization.new
19
+ @bn3 = BatchNormalization.new
20
+ @bn4 = BatchNormalization.new
21
+ @bn5 = BatchNormalization.new
22
+ @bn6 = BatchNormalization.new
23
+ end
24
+
25
+ def call(x)
26
+ x = InputLayer.new(20).(x)
27
+ x = @l1.(x)
28
+ x = @bn1.(x)
29
+ x = ReLU.(x)
30
+
31
+ x = @l2.(x)
32
+ x = @bn2.(x)
33
+ x = ReLU.(x)
34
+
35
+ x = Reshape.(x, [7, 7, 64])
36
+ x = @l3.(x)
37
+ x = @bn3.(x)
38
+ x = ReLU.(x)
39
+
40
+ x = @l4.(x)
41
+ x = @bn4.(x)
42
+ x = ReLU.(x)
43
+
44
+ x = @l5.(x)
45
+ x = @bn5.(x)
46
+ x = ReLU.(x)
47
+
48
+ x = @l6.(x)
49
+ x = @bn6.(x)
50
+ x = ReLU.(x)
51
+
52
+ x = @l7.(x)
53
+ x = Tanh.(x)
54
+ x
55
+ end
56
+ end
57
+
58
+
59
+ class Discriminator < Model
60
+ def initialize
61
+ super
62
+ @l1 = Conv2D.new(32, 4, strides: 2, padding: true)
63
+ @l2 = Conv2D.new(32, 4, padding: true)
64
+ @l3 = Conv2D.new(64, 4, strides: 2, padding: true)
65
+ @l4 = Conv2D.new(64, 4, padding: true)
66
+ @l5 = Dense.new(1024)
67
+ @l6 = Dense.new(1)
68
+ end
69
+
70
+ def call(x, trainable = true)
71
+ has_param_layers.each do |layer|
72
+ layer.trainable = trainable
73
+ end
74
+ x = InputLayer.new([28, 28, 1]).(x)
75
+ x = @l1.(x)
76
+ x = LeakyReLU.(x, 0.2)
77
+
78
+ x = @l2.(x)
79
+ x = LeakyReLU.(x, 0.2)
80
+
81
+ x = @l3.(x)
82
+ x = LeakyReLU.(x, 0.2)
83
+
84
+ x = @l4.(x)
85
+ x = LeakyReLU.(x, 0.2)
86
+
87
+ x = Flatten.(x)
88
+ x = @l5.(x)
89
+ x = LeakyReLU.(x, 0.2)
90
+
91
+ x = @l6.(x)
92
+ x
93
+ end
94
+ end
95
+
96
+
97
+ class DCGAN < Model
98
+ attr_reader :gen
99
+ attr_reader :dis
100
+
101
+ def initialize(gen, dis)
102
+ super()
103
+ @gen = gen
104
+ @dis = dis
105
+ end
106
+
107
+ def call(x)
108
+ x = @gen.(x)
109
+ x = @dis.(x, false)
110
+ x
111
+ end
112
+ end
@@ -0,0 +1,20 @@
1
+ require "dnn"
2
+ require "dnn/image"
3
+ require "numo/linalg/autoloader"
4
+ require_relative "dcgan"
5
+
6
+ Image = DNN::Image
7
+
8
+ batch_size = 100
9
+
10
+ dcgan = Model.load("trained/dcgan_model_epoch20.marshal")
11
+ gen = dcgan.gen
12
+
13
+ Numo::SFloat.srand(rand(1 << 31))
14
+ noise = Numo::SFloat.new(batch_size, 20).rand(-1, 1)
15
+ images = gen.predict(noise)
16
+
17
+ batch_size.times do |i|
18
+ img = Numo::UInt8.cast(((images[i, false] + 1) * 127.5).round)
19
+ Image.write("img/img_#{i}.jpg", img)
20
+ end
@@ -0,0 +1,41 @@
1
+ require "dnn"
2
+ require "dnn/mnist"
3
+ require "numo/linalg/autoloader"
4
+ require_relative "dcgan"
5
+
6
+ MNIST = DNN::MNIST
7
+
8
+ Numo::SFloat.srand(rand(1 << 31))
9
+
10
+ epochs = 20
11
+ batch_size = 128
12
+
13
+ gen = Generator.new
14
+ dis = Discriminator.new
15
+ dcgan = DCGAN.new(gen, dis)
16
+
17
+ dis.setup(Adam.new(alpha: 0.00001, beta1: 0.1), SigmoidCrossEntropy.new)
18
+ dcgan.setup(Adam.new(alpha: 0.0002, beta1: 0.5), SigmoidCrossEntropy.new)
19
+
20
+ x_train, y_train = MNIST.load_train
21
+ x_train = Numo::SFloat.cast(x_train)
22
+ x_train = x_train / 127.5 - 1
23
+
24
+ iter = DNN::Iterator.new(x_train, y_train)
25
+ (1..epochs).each do |epoch|
26
+ puts "epoch: #{epoch}"
27
+ iter.foreach(batch_size) do |x_batch, y_batch, index|
28
+ noise = Numo::SFloat.new(batch_size, 20).rand(-1, 1)
29
+ images = gen.predict(noise)
30
+ x = x_batch.concatenate(images)
31
+ y = Numo::SFloat.cast([1] * batch_size + [0] * batch_size).reshape(batch_size * 2, 1)
32
+ dis_loss = dis.train_on_batch(x, y)
33
+
34
+ noise = Numo::SFloat.new(batch_size, 20).rand(-1, 1)
35
+ label = Numo::SFloat.cast([1] * batch_size).reshape(batch_size, 1)
36
+ dcgan_loss = dcgan.train_on_batch(noise, label)
37
+
38
+ puts "index: #{index}, dis_loss: #{dis_loss.mean}, dcgan_loss: #{dcgan_loss.mean}"
39
+ end
40
+ dcgan.save("trained/dcgan_model_epoch#{epoch}.marshal")
41
+ end
@@ -7,20 +7,17 @@ include DNN::Layers
7
7
  include DNN::Activations
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- Model = DNN::Model
10
+ include DNN::Models
11
11
  Iris = DNN::Iris
12
12
 
13
13
  x, y = Iris.load(true)
14
14
  x_train, y_train = x[0...100, true], y[0...100]
15
15
  x_test, y_test = x[100...150, true], y[100...150]
16
16
 
17
- x_train /= 255
18
- x_test /= 255
19
-
20
17
  y_train = DNN::Utils.to_categorical(y_train, 3, Numo::SFloat)
21
18
  y_test = DNN::Utils.to_categorical(y_test, 3, Numo::SFloat)
22
19
 
23
- model = Model.new
20
+ model = Sequential.new
24
21
 
25
22
  model << InputLayer.new(4)
26
23
 
@@ -29,6 +26,6 @@ model << ReLU.new
29
26
 
30
27
  model << Dense.new(3)
31
28
 
32
- model.compile(Adam.new, SoftmaxCrossEntropy.new)
29
+ model.setup(Adam.new, SoftmaxCrossEntropy.new)
33
30
 
34
31
  model.train(x_train, y_train, 1000, batch_size: 10, test: [x_test, y_test])
@@ -7,14 +7,14 @@ include DNN::Layers
7
7
  include DNN::Activations
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- Model = DNN::Model
10
+ include DNN::Models
11
11
  MNIST = DNN::MNIST
12
12
 
13
13
  x_train, y_train = MNIST.load_train
14
14
  x_test, y_test = MNIST.load_test
15
15
 
16
- x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 28, 28, 1)
17
- x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 28, 28, 1)
16
+ x_train = Numo::SFloat.cast(x_train)
17
+ x_test = Numo::SFloat.cast(x_test)
18
18
 
19
19
  x_train /= 255
20
20
  x_test /= 255
@@ -22,7 +22,7 @@ x_test /= 255
22
22
  y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
23
23
  y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
24
24
 
25
- model = Model.new
25
+ model = Sequential.new
26
26
 
27
27
  model << InputLayer.new([28, 28, 1])
28
28
 
@@ -45,6 +45,6 @@ model << Dropout.new(0.5)
45
45
 
46
46
  model << Dense.new(10)
47
47
 
48
- model.compile(Adam.new, SoftmaxCrossEntropy.new)
48
+ model.setup(Adam.new, SoftmaxCrossEntropy.new)
49
49
 
50
50
  model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
@@ -0,0 +1,52 @@
1
+ require "dnn"
2
+ require "dnn/mnist"
3
+ # If you use numo/linalg then please uncomment out.
4
+ # require "numo/linalg/autoloader"
5
+
6
+ include DNN::Layers
7
+ include DNN::Activations
8
+ include DNN::Optimizers
9
+ include DNN::Losses
10
+ include DNN::Models
11
+ MNIST = DNN::MNIST
12
+
13
+ x_train, y_train = MNIST.load_train
14
+ x_test, y_test = MNIST.load_test
15
+
16
+ x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 784)
17
+ x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 784)
18
+
19
+ x_train /= 255
20
+ x_test /= 255
21
+
22
+ y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
23
+ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
24
+
25
+ class MLP < Model
26
+ def initialize
27
+ super
28
+ @l1 = Dense.new(256)
29
+ @l2 = Dense.new(256)
30
+ @l3 = Dense.new(10)
31
+ @bn1 = BatchNormalization.new
32
+ @bn2 = BatchNormalization.new
33
+ end
34
+
35
+ def call(x)
36
+ x = InputLayer.(x)
37
+ x = @l1.(x)
38
+ x = @bn1.(x)
39
+ x = ReLU.(x)
40
+ x = @l2.(x)
41
+ x = @bn2.(x)
42
+ x = ReLU.(x)
43
+ x = @l3.(x)
44
+ x
45
+ end
46
+ end
47
+
48
+ model = MLP.new
49
+
50
+ model.setup(Adam.new, SoftmaxCrossEntropy.new)
51
+
52
+ model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
@@ -7,7 +7,7 @@ include DNN::Layers
7
7
  include DNN::Activations
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- Model = DNN::Model
10
+ include DNN::Models
11
11
  MNIST = DNN::MNIST
12
12
 
13
13
  x_train, y_train = MNIST.load_train
@@ -22,7 +22,7 @@ x_test /= 255
22
22
  y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
23
23
  y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
24
24
 
25
- model = Model.new
25
+ model = Sequential.new
26
26
 
27
27
  model << InputLayer.new(784)
28
28
 
@@ -34,6 +34,6 @@ model << ReLU.new
34
34
 
35
35
  model << Dense.new(10)
36
36
 
37
- model.compile(RMSProp.new, SoftmaxCrossEntropy.new)
37
+ model.setup(RMSProp.new, SoftmaxCrossEntropy.new)
38
38
 
39
39
  model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])