ruby-dnn 0.13.4 → 0.14.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: cfe06541a0afe5fd3465839f02865efe7105c7635003840f60fd46e2976b0c26
4
- data.tar.gz: d0f2150f86fe1ee423231c410af8809cecf9d412ecefebb0a3bee1ea14abeee7
3
+ metadata.gz: '08960a5015b086d49e27dc44ea1076cb56c6548cf2945698dd81ae30a1f738e7'
4
+ data.tar.gz: 2b78bfa3f91b224226fa2f268f0fb0cc94c2d711bc1d7324dddf32d7e50be409
5
5
  SHA512:
6
- metadata.gz: 65ad242e2647f2035409432b6568a72d73b95a10b096d863938f9e0cef3bc07aac9a770a9d900d021af2afc3f912a9a3ed4725ba6488411c758b5f5a717a8964
7
- data.tar.gz: d4102ded5ade667087736ecea6f897f1136b138682c93a53869e68b3fa70683a3bc8570e3ef3eb840c0a5f13ceb38a7f41b6c9a6f6b4d9bee9543b52ead2a95a
6
+ metadata.gz: ccdc09a73a918fd8eb206283d9d2db396a528f737d9981d49ecb90affcfa1d417429c3fb0a7070d57563a3168c3283a6aa762e04271df58d9b57bb2232a96545
7
+ data.tar.gz: 5e01577078cfee549f2f5b963fa3565a59ea1fc805eca9bebea5a368b95e32f5c248db0e42d28c883eb71b138ce70372abb7f799acc3e2fde72550b3db0996f3
@@ -3,11 +3,10 @@ require "dnn/datasets/cifar100"
3
3
  # If you use numo/linalg then please uncomment out.
4
4
  # require "numo/linalg/autoloader"
5
5
 
6
+ include DNN::Models
6
7
  include DNN::Layers
7
- include DNN::Activations
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- include DNN::Models
11
10
  CIFAR100 = DNN::CIFAR100
12
11
 
13
12
  x_train, y_train = CIFAR100.load_train
@@ -3,11 +3,10 @@ require "dnn/datasets/cifar10"
3
3
  # If you use numo/linalg then please uncomment out.
4
4
  # require "numo/linalg/autoloader"
5
5
 
6
+ include DNN::Models
6
7
  include DNN::Layers
7
- include DNN::Activations
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- include DNN::Models
11
10
  CIFAR10 = DNN::CIFAR10
12
11
 
13
12
  x_train, y_train = CIFAR10.load_train
@@ -1,8 +1,5 @@
1
- include DNN::Layers
2
- include DNN::Activations
3
- include DNN::Optimizers
4
- include DNN::Losses
5
1
  include DNN::Models
2
+ include DNN::Layers
6
3
 
7
4
  class Generator < Model
8
5
  def initialize
@@ -55,7 +52,6 @@ class Generator < Model
55
52
  end
56
53
  end
57
54
 
58
-
59
55
  class Discriminator < Model
60
56
  def initialize
61
57
  super
@@ -97,12 +93,11 @@ class Discriminator < Model
97
93
  end
98
94
  end
99
95
 
100
-
101
96
  class DCGAN < Model
102
- attr_reader :gen
103
- attr_reader :dis
97
+ attr_accessor :gen
98
+ attr_accessor :dis
104
99
 
105
- def initialize(gen, dis)
100
+ def initialize(gen = nil, dis = nil)
106
101
  super()
107
102
  @gen = gen
108
103
  @dis = dis
@@ -113,4 +108,19 @@ class DCGAN < Model
113
108
  x = @dis.(x, false)
114
109
  x
115
110
  end
111
+
112
+ def train_step(x_batch, y_batch)
113
+ batch_size = x_batch.shape[0]
114
+ noise = Numo::SFloat.new(batch_size, 20).rand(-1, 1)
115
+ images = @gen.predict(noise)
116
+ x = x_batch.concatenate(images)
117
+ y = Numo::SFloat.cast([1] * batch_size + [0] * batch_size).reshape(batch_size * 2, 1)
118
+ dis_loss = @dis.train_on_batch(x, y)
119
+
120
+ noise = Numo::SFloat.new(batch_size, 20).rand(-1, 1)
121
+ label = Numo::SFloat.cast([1] * batch_size).reshape(batch_size, 1)
122
+ dcgan_loss = train_on_batch(noise, label)
123
+
124
+ { dis_loss: dis_loss.mean, dcgan_loss: dcgan_loss.mean }
125
+ end
116
126
  end
@@ -3,15 +3,22 @@ require "dnn/image"
3
3
  require "numo/linalg/autoloader"
4
4
  require_relative "dcgan"
5
5
 
6
+ include DNN::Loaders
6
7
  Image = DNN::Image
7
8
 
8
9
  batch_size = 100
9
10
 
10
- dcgan = DCGAN.load("trained/dcgan_model_epoch20.marshal")
11
- gen = dcgan.gen
11
+ gen = Generator.new
12
+ dis = Discriminator.new
13
+ dcgan = DCGAN.new(gen, dis)
14
+ dcgan.predict1(Numo::SFloat.zeros(20))
15
+
16
+ loader = MarshalLoader.new(dcgan)
17
+ loader.load("trained/dcgan_model_epoch20.marshal")
12
18
 
13
19
  Numo::SFloat.srand(rand(1 << 31))
14
20
  noise = Numo::SFloat.new(batch_size, 20).rand(-1, 1)
21
+
15
22
  images = gen.predict(noise)
16
23
 
17
24
  batch_size.times do |i|
@@ -3,6 +3,9 @@ require "dnn/datasets/mnist"
3
3
  require "numo/linalg/autoloader"
4
4
  require_relative "dcgan"
5
5
 
6
+ include DNN::Optimizers
7
+ include DNN::Losses
8
+ include DNN::Callbacks
6
9
  MNIST = DNN::MNIST
7
10
 
8
11
  Numo::SFloat.srand(rand(1 << 31))
@@ -17,28 +20,10 @@ dcgan = DCGAN.new(gen, dis)
17
20
  dis.setup(Adam.new(alpha: 0.00001, beta1: 0.1), SigmoidCrossEntropy.new)
18
21
  dcgan.setup(Adam.new(alpha: 0.0002, beta1: 0.5), SigmoidCrossEntropy.new)
19
22
 
20
- x_train, y_train = MNIST.load_train
23
+ x_train, * = MNIST.load_train
21
24
  x_train = Numo::SFloat.cast(x_train)
22
25
  x_train = x_train / 127.5 - 1
23
26
 
24
- iter = DNN::Iterator.new(x_train, y_train)
25
- num_batchs = x_train.shape[0] / batch_size
26
- (1..epochs).each do |epoch|
27
- puts "epoch: #{epoch}"
28
- num_batchs.times do |index|
29
- x_batch, y_batch = iter.next_batch(batch_size)
30
- noise = Numo::SFloat.new(batch_size, 20).rand(-1, 1)
31
- images = gen.predict(noise)
32
- x = x_batch.concatenate(images)
33
- y = Numo::SFloat.cast([1] * batch_size + [0] * batch_size).reshape(batch_size * 2, 1)
34
- dis_loss = dis.train_on_batch(x, y)
35
-
36
- noise = Numo::SFloat.new(batch_size, 20).rand(-1, 1)
37
- label = Numo::SFloat.cast([1] * batch_size).reshape(batch_size, 1)
38
- dcgan_loss = dcgan.train_on_batch(noise, label)
39
-
40
- puts "index: #{index}, dis_loss: #{dis_loss.mean}, dcgan_loss: #{dcgan_loss.mean}"
41
- end
42
- iter.reset
43
- dcgan.save("trained/dcgan_model_epoch#{epoch}.marshal")
44
- end
27
+ dcgan.add_callback(CheckPoint.new("trained/dcgan_model"))
28
+ dcgan.predict1(Numo::SFloat.zeros(20))
29
+ dcgan.train(x_train, x_train, epochs, batch_size: batch_size, last_round_down: true)
@@ -3,11 +3,10 @@ require "dnn/datasets/iris"
3
3
  # If you use numo/linalg then please uncomment out.
4
4
  # require "numo/linalg/autoloader"
5
5
 
6
+ include DNN::Models
6
7
  include DNN::Layers
7
- include DNN::Activations
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- include DNN::Models
11
10
  Iris = DNN::Iris
12
11
 
13
12
  x, y = Iris.load(true)
@@ -3,11 +3,10 @@ require "dnn/datasets/mnist"
3
3
  # If you use numo/linalg then please uncomment out.
4
4
  # require "numo/linalg/autoloader"
5
5
 
6
+ include DNN::Models
6
7
  include DNN::Layers
7
- include DNN::Activations
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- include DNN::Models
11
10
  MNIST = DNN::MNIST
12
11
 
13
12
  x_train, y_train = MNIST.load_train
@@ -3,11 +3,10 @@ require "dnn/datasets/mnist"
3
3
  # If you use numo/linalg then please uncomment out.
4
4
  # require "numo/linalg/autoloader"
5
5
 
6
+ include DNN::Models
6
7
  include DNN::Layers
7
- include DNN::Activations
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- include DNN::Models
11
10
  MNIST = DNN::MNIST
12
11
 
13
12
  x_train, y_train = MNIST.load_train
@@ -3,11 +3,10 @@ require "dnn/datasets/mnist"
3
3
  # If you use numo/linalg then please uncomment out.
4
4
  # require "numo/linalg/autoloader"
5
5
 
6
+ include DNN::Models
6
7
  include DNN::Layers
7
- include DNN::Activations
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- include DNN::Models
11
10
  MNIST = DNN::MNIST
12
11
 
13
12
  x_train, y_train = MNIST.load_train
@@ -3,11 +3,10 @@ require "dnn/datasets/mnist"
3
3
  # If you use numo/linalg then please uncomment out.
4
4
  # require "numo/linalg/autoloader"
5
5
 
6
+ include DNN::Models
6
7
  include DNN::Layers
7
- include DNN::Activations
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- include DNN::Models
11
10
  MNIST = DNN::MNIST
12
11
 
13
12
  x_train, y_train = MNIST.load_train
@@ -1,10 +1,9 @@
1
1
  require "dnn"
2
2
 
3
+ include DNN::Models
3
4
  include DNN::Layers
4
- include DNN::Activations
5
5
  include DNN::Optimizers
6
6
  include DNN::Losses
7
- include DNN::Models
8
7
 
9
8
  x = Numo::SFloat[[0, 0], [1, 0], [0, 1], [1, 1]]
10
9
  y = Numo::SFloat[[0], [1], [1], [0]]
@@ -20,4 +19,4 @@ model.setup(SGD.new, SigmoidCrossEntropy.new)
20
19
 
21
20
  model.train(x, y, 20000, batch_size: 4, verbose: false)
22
21
 
23
- p DNN::Utils.sigmoid(model.predict(x))
22
+ p model.predict(x)
data/lib/dnn.rb CHANGED
@@ -10,6 +10,7 @@ end
10
10
  require_relative "dnn/version"
11
11
  require_relative "dnn/core/error"
12
12
  require_relative "dnn/core/global"
13
+ require_relative "dnn/core/tensor"
13
14
  require_relative "dnn/core/models"
14
15
  require_relative "dnn/core/param"
15
16
  require_relative "dnn/core/link"
@@ -25,5 +26,6 @@ require_relative "dnn/core/cnn_layers"
25
26
  require_relative "dnn/core/embedding"
26
27
  require_relative "dnn/core/rnn_layers"
27
28
  require_relative "dnn/core/optimizers"
29
+ require_relative "dnn/core/callbacks"
28
30
  require_relative "dnn/core/savers"
29
31
  require_relative "dnn/core/utils"
@@ -1,7 +1,7 @@
1
1
  module DNN
2
- module Activations
2
+ module Layers
3
3
 
4
- class Sigmoid < Layers::Layer
4
+ class Sigmoid < Layer
5
5
  def forward(x)
6
6
  @y = 1 / (1 + Xumo::NMath.exp(-x))
7
7
  end
@@ -11,31 +11,28 @@ module DNN
11
11
  end
12
12
  end
13
13
 
14
-
15
- class Tanh < Layers::Layer
14
+ class Tanh < Layer
16
15
  def forward(x)
17
16
  @y = Xumo::NMath.tanh(x)
18
17
  end
19
18
 
20
19
  def backward(dy)
21
- dy * (1 - @y ** 2)
20
+ dy * (1 - @y**2)
22
21
  end
23
22
  end
24
23
 
25
-
26
- class Softsign < Layers::Layer
24
+ class Softsign < Layer
27
25
  def forward(x)
28
26
  @x = x
29
27
  x / (1 + x.abs)
30
28
  end
31
29
 
32
30
  def backward(dy)
33
- dy * (1 / (1 + @x.abs) ** 2)
31
+ dy * (1 / (1 + @x.abs)**2)
34
32
  end
35
33
  end
36
34
 
37
-
38
- class Softplus < Layers::Layer
35
+ class Softplus < Layer
39
36
  def forward(x)
40
37
  @x = x
41
38
  Xumo::NMath.log(1 + Xumo::NMath.exp(x))
@@ -46,8 +43,7 @@ module DNN
46
43
  end
47
44
  end
48
45
 
49
-
50
- class Swish < Layers::Layer
46
+ class Swish < Layer
51
47
  def forward(x)
52
48
  @x = x
53
49
  @y = x * (1 / (1 + Xumo::NMath.exp(-x)))
@@ -58,8 +54,7 @@ module DNN
58
54
  end
59
55
  end
60
56
 
61
-
62
- class ReLU < Layers::Layer
57
+ class ReLU < Layer
63
58
  def forward(x)
64
59
  @x = x
65
60
  Xumo::SFloat.maximum(0, x)
@@ -70,8 +65,7 @@ module DNN
70
65
  end
71
66
  end
72
67
 
73
-
74
- class LeakyReLU < Layers::Layer
68
+ class LeakyReLU < Layer
75
69
  attr_reader :alpha
76
70
 
77
71
  # @param [Float] alpha The slope when the output value is negative.
@@ -102,8 +96,7 @@ module DNN
102
96
  end
103
97
  end
104
98
 
105
-
106
- class ELU < Layers::Layer
99
+ class ELU < Layer
107
100
  attr_reader :alpha
108
101
 
109
102
  # @param [Float] alpha The slope when the output value is negative.
@@ -0,0 +1,136 @@
1
+ module DNN
2
+ module Callbacks
3
+
4
+ class Callback
5
+ attr_accessor :model
6
+
7
+ # Please implement the method used for callback event.
8
+
9
+ # Process performed before one training.
10
+ # def before_epoch; end
11
+
12
+ # Process performed after one training.
13
+ # def after_epoch; end
14
+
15
+ # Set the proc to be performed before train on batch processing.
16
+ # def before_train_on_batch; end
17
+
18
+ # Set the proc to be performed after train on batch processing.
19
+ # def after_train_on_batch; end
20
+
21
+ # Set the proc to be performed before test on batch processing.
22
+ # def before_test_on_batch; end
23
+
24
+ # Set the proc to be performed after test on batch processing.
25
+ # def after_test_on_batch; end
26
+ end
27
+
28
+ # This callback wrap the lambda function.
29
+ class LambdaCallback < Callback
30
+ def initialize(event, lambda)
31
+ instance_eval do
32
+ define_singleton_method(event) { lambda.call }
33
+ end
34
+ end
35
+ end
36
+
37
+ # A callback that save the model at the after of the epoch.
38
+ class CheckPoint < Callback
39
+ def initialize(base_file_name)
40
+ @base_file_name = base_file_name
41
+ end
42
+
43
+ def after_epoch
44
+ model.save(@base_file_name + "_epoch#{model.last_log[:epoch]}.marshal")
45
+ end
46
+ end
47
+
48
+ # A callback to stop training the model early after test on batch.
49
+ class EarlyStopping < Callback
50
+ def initialize(trigger, tolerance)
51
+ @trigger = trigger
52
+ @tolerance = tolerance
53
+ end
54
+
55
+ def after_train_on_batch
56
+ throw :stop, "Early stopped." if judge_early_stopping_train
57
+ end
58
+
59
+ def after_epoch
60
+ throw :stop, "Early stopped." if judge_early_stopping_test
61
+ end
62
+
63
+ private
64
+
65
+ def judge_early_stopping_train
66
+ case @trigger
67
+ when :train_loss
68
+ return true if model.last_log[@trigger].mean <= @tolerance
69
+ end
70
+ false
71
+ end
72
+
73
+ def judge_early_stopping_test
74
+ case @trigger
75
+ when :test_loss
76
+ return true if model.last_log[@trigger].mean <= @tolerance
77
+ when :test_accuracy
78
+ return true if model.last_log[@trigger] >= @tolerance
79
+ end
80
+ false
81
+ end
82
+ end
83
+
84
+ # A callback to stop training the model if loss is NaN by after train on batch.
85
+ class NaNStopping < Callback
86
+ def after_train_on_batch
87
+ throw :stop, "loss is NaN." if model.last_log[:train_loss].nan?
88
+ end
89
+ end
90
+
91
+ # A callback that save the log.
92
+ # The following logs will be recorded.
93
+ # epoch: Current epoch.
94
+ # train_loss: Batch training loss.
95
+ # test_loss: Mean test loss.
96
+ # test_accuracy: Test accuracy.
97
+ class Logger < Callback
98
+ def initialize
99
+ @log = {
100
+ epoch: [],
101
+ train_loss: [],
102
+ test_loss: [],
103
+ test_accuracy: [],
104
+ }
105
+ end
106
+
107
+ def after_epoch
108
+ logging(:epoch, :test_loss, :test_accuracy)
109
+ end
110
+
111
+ def after_train_on_batch
112
+ logging(:train_loss)
113
+ end
114
+
115
+ # Get a log.
116
+ # @param [Symbol] tag Tag indicating the type of Log.
117
+ # @return [Numo::NArray] Return the recorded log.
118
+ def get_log(tag)
119
+ case tag
120
+ when :epoch
121
+ Numo::UInt32.cast(@log[tag])
122
+ else
123
+ Numo::SFloat.cast(@log[tag])
124
+ end
125
+ end
126
+
127
+ private def logging(*tags)
128
+ tags.each do |tag|
129
+ @log[tag] ||= []
130
+ @log[tag] << model.last_log[tag]
131
+ end
132
+ end
133
+ end
134
+
135
+ end
136
+ end