ruby-dnn 1.1.4 → 1.1.5

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d33f1d5472229d184630d37063d4c42888230044a9e1b069035a144e8aae0964
4
- data.tar.gz: 530d31b5fc5073fa02253eb0f7d7b78a007ccaed16ea4a49aba56a87174afb59
3
+ metadata.gz: 55dea04f1d2a6bb4806c3f029086474b46b8a225f7168e4c72af92e0f7d69f71
4
+ data.tar.gz: 2522778ffabbce31315b48ad3abfedd2dea2e9dba0d09af4e9700cb9c588393b
5
5
  SHA512:
6
- metadata.gz: acf104aec7d661da52623930dbd8a60e105aff6b0c7fd4bdc675f507a50f55372451d7c4066992d13a390629d89ec7ba42d7c316376797e439e6b399d702e5aa
7
- data.tar.gz: 1e338be765295d9c9827dc82c70e7af79473386be1574ae647ef2568c24be9f9142d94104b62726bda800aff064c37ede794066cfb098a40744dd24a2e7addb4
6
+ metadata.gz: 05056e7619f52dd8efac34c6aeae1e1652e3635257baf302f15908131d6908d7ee49e892040514b0ea3011c46148eb3c0fedd3857b85ab0cf271c521bb907349
7
+ data.tar.gz: 41828ada6a07129fdfc4ed5aff6af9c6dc7d934dd0f39eec8792d77a7b2fc5769b2f3d278ced6b5def360db58a7a34c014a0876e65ca7525d35a67219285aab7
data/README.md CHANGED
@@ -54,18 +54,18 @@ When create a model with 'define by run' style:
54
54
  class MLP < Model
55
55
  def initialize
56
56
  super
57
- @l1 = Dense.new(256)
58
- @l2 = Dense.new(256)
59
- @l3 = Dense.new(10)
57
+ @d1 = Dense.new(256)
58
+ @d2 = Dense.new(256)
59
+ @d3 = Dense.new(10)
60
60
  end
61
61
 
62
62
  def forward(x)
63
63
  x = InputLayer.new(784).(x)
64
- x = @l1.(x)
64
+ x = @d1.(x)
65
65
  x = ReLU.(x)
66
- x = @l2.(x)
66
+ x = @d2.(x)
67
67
  x = ReLU.(x)
68
- x = @l3.(x)
68
+ x = @d3.(x)
69
69
  x
70
70
  end
71
71
  end
@@ -27,22 +27,22 @@ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
27
27
  class MLP < Model
28
28
  def initialize
29
29
  super
30
- @l1 = Dense.new(256)
31
- @l2 = Dense.new(256)
32
- @l3 = Dense.new(10)
30
+ @d1 = Dense.new(256)
31
+ @d2 = Dense.new(256)
32
+ @d3 = Dense.new(10)
33
33
  @bn1 = BatchNormalization.new
34
34
  @bn2 = BatchNormalization.new
35
35
  end
36
36
 
37
37
  def forward(x)
38
38
  x = InputLayer.new(784).(x)
39
- x = @l1.(x)
39
+ x = @d1.(x)
40
40
  x = @bn1.(x)
41
41
  x = ReLU.(x)
42
- x = @l2.(x)
42
+ x = @d2.(x)
43
43
  x = @bn2.(x)
44
44
  x = ReLU.(x)
45
- x = @l3.(x)
45
+ x = @d3.(x)
46
46
  x
47
47
  end
48
48
  end
@@ -28,22 +28,22 @@ class MLP < Model
28
28
  def initialize
29
29
  super
30
30
  # Set the initial values of weight and bias to the initial values of He.
31
- @l1 = Dense.new(256, weight_initializer: He.new, bias_initializer: He.new)
32
- @l2 = Dense.new(256, weight_initializer: He.new, bias_initializer: He.new)
33
- @l3 = Dense.new(10, weight_initializer: He.new, bias_initializer: He.new)
31
+ @d1 = Dense.new(256, weight_initializer: He.new, bias_initializer: He.new)
32
+ @d2 = Dense.new(256, weight_initializer: He.new, bias_initializer: He.new)
33
+ @d3 = Dense.new(10, weight_initializer: He.new, bias_initializer: He.new)
34
34
  @bn1 = BatchNormalization.new
35
35
  @bn2 = BatchNormalization.new
36
36
  end
37
37
 
38
38
  def forward(x)
39
39
  x = InputLayer.new(784).(x)
40
- x = @l1.(x)
40
+ x = @d1.(x)
41
41
  x = @bn1.(x)
42
42
  x = ReLU.(x)
43
- x = @l2.(x)
43
+ x = @d2.(x)
44
44
  x = @bn2.(x)
45
45
  x = ReLU.(x)
46
- x = @l3.(x)
46
+ x = @d3.(x)
47
47
  x
48
48
  end
49
49
  end
@@ -29,22 +29,22 @@ class MLP < Model
29
29
  def initialize
30
30
  super
31
31
  # Set L2 regularizer(weight decay) for weight and bias.
32
- @l1 = Dense.new(256, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
33
- @l2 = Dense.new(256, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
34
- @l3 = Dense.new(10, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
32
+ @d1 = Dense.new(256, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
33
+ @d2 = Dense.new(256, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
34
+ @d3 = Dense.new(10, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
35
35
  @bn1 = BatchNormalization.new
36
36
  @bn2 = BatchNormalization.new
37
37
  end
38
38
 
39
39
  def forward(x)
40
40
  x = InputLayer.new(784).(x)
41
- x = @l1.(x)
41
+ x = @d1.(x)
42
42
  x = @bn1.(x)
43
43
  x = ReLU.(x)
44
- x = @l2.(x)
44
+ x = @d2.(x)
45
45
  x = @bn2.(x)
46
46
  x = ReLU.(x)
47
- x = @l3.(x)
47
+ x = @d3.(x)
48
48
  x
49
49
  end
50
50
  end
@@ -38,22 +38,22 @@ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
38
38
  class MLP < Model
39
39
  def initialize
40
40
  super
41
- @l1 = Dense.new(256)
42
- @l2 = Dense.new(256)
43
- @l3 = Dense.new(10)
41
+ @d1 = Dense.new(256)
42
+ @d2 = Dense.new(256)
43
+ @d3 = Dense.new(10)
44
44
  @bn1 = BatchNormalization.new
45
45
  @bn2 = BatchNormalization.new
46
46
  end
47
47
 
48
48
  def forward(x)
49
49
  x = InputLayer.new(784).(x)
50
- x = @l1.(x)
50
+ x = @d1.(x)
51
51
  x = @bn1.(x)
52
52
  x = ReLU.(x)
53
- x = @l2.(x)
53
+ x = @d2.(x)
54
54
  x = @bn2.(x)
55
55
  x = ReLU.(x)
56
- x = @l3.(x)
56
+ x = @d3.(x)
57
57
  x
58
58
  end
59
59
  end
@@ -4,13 +4,13 @@ include DNN::Layers
4
4
  class Generator < Model
5
5
  def initialize
6
6
  super
7
- @l1 = Dense.new(1024)
8
- @l2 = Dense.new(7 * 7 * 64)
9
- @l3 = Conv2DTranspose.new(64, 4, strides: 2, padding: true)
10
- @l4 = Conv2D.new(64, 4, padding: true)
11
- @l5 = Conv2DTranspose.new(32, 4, strides: 2, padding: true)
12
- @l6 = Conv2D.new(32, 4, padding: true)
13
- @l7 = Conv2D.new(1, 4, padding: true)
7
+ @d1 = Dense.new(1024)
8
+ @d2 = Dense.new(7 * 7 * 64)
9
+ @cv1 = Conv2D.new(64, 4, padding: true)
10
+ @cvt1 = Conv2DTranspose.new(64, 4, strides: 2, padding: true)
11
+ @cvt2 = Conv2DTranspose.new(32, 4, strides: 2, padding: true)
12
+ @cv2 = Conv2D.new(32, 4, padding: true)
13
+ @cv3 = Conv2D.new(1, 4, padding: true)
14
14
  @bn1 = BatchNormalization.new
15
15
  @bn2 = BatchNormalization.new
16
16
  @bn3 = BatchNormalization.new
@@ -21,32 +21,32 @@ class Generator < Model
21
21
 
22
22
  def forward(x)
23
23
  x = InputLayer.new(20).(x)
24
- x = @l1.(x)
24
+ x = @d1.(x)
25
25
  x = @bn1.(x)
26
26
  x = ReLU.(x)
27
27
 
28
- x = @l2.(x)
28
+ x = @d2.(x)
29
29
  x = @bn2.(x)
30
30
  x = ReLU.(x)
31
31
 
32
32
  x = Reshape.(x, [7, 7, 64])
33
- x = @l3.(x)
33
+ x = @cvt1.(x)
34
34
  x = @bn3.(x)
35
35
  x = ReLU.(x)
36
36
 
37
- x = @l4.(x)
37
+ x = @cv1.(x)
38
38
  x = @bn4.(x)
39
39
  x = ReLU.(x)
40
40
 
41
- x = @l5.(x)
41
+ x = @cvt2.(x)
42
42
  x = @bn5.(x)
43
43
  x = ReLU.(x)
44
44
 
45
- x = @l6.(x)
45
+ x = @cv2.(x)
46
46
  x = @bn6.(x)
47
47
  x = ReLU.(x)
48
48
 
49
- x = @l7.(x)
49
+ x = @cv3.(x)
50
50
  x = Tanh.(x)
51
51
  x
52
52
  end
@@ -55,12 +55,12 @@ end
55
55
  class Discriminator < Model
56
56
  def initialize
57
57
  super
58
- @l1 = Conv2D.new(32, 4, strides: 2, padding: true)
59
- @l2 = Conv2D.new(32, 4, padding: true)
60
- @l3 = Conv2D.new(64, 4, strides: 2, padding: true)
61
- @l4 = Conv2D.new(64, 4, padding: true)
62
- @l5 = Dense.new(1024)
63
- @l6 = Dense.new(1)
58
+ @cv1 = Conv2D.new(32, 4, strides: 2, padding: true)
59
+ @cv2 = Conv2D.new(32, 4, padding: true)
60
+ @cv3 = Conv2D.new(64, 4, strides: 2, padding: true)
61
+ @cv4 = Conv2D.new(64, 4, padding: true)
62
+ @d1 = Dense.new(1024)
63
+ @d2 = Dense.new(1)
64
64
  @bn1 = BatchNormalization.new
65
65
  @bn2 = BatchNormalization.new
66
66
  @bn3 = BatchNormalization.new
@@ -68,26 +68,26 @@ class Discriminator < Model
68
68
 
69
69
  def forward(x)
70
70
  x = InputLayer.new([28, 28, 1]).(x)
71
- x = @l1.(x)
71
+ x = @cv1.(x)
72
72
  x = LeakyReLU.(x, 0.2)
73
73
 
74
- x = @l2.(x)
74
+ x = @cv2.(x)
75
75
  x = @bn1.(x)
76
76
  x = LeakyReLU.(x, 0.2)
77
77
 
78
- x = @l3.(x)
78
+ x = @cv3.(x)
79
79
  x = @bn2.(x)
80
80
  x = LeakyReLU.(x, 0.2)
81
81
 
82
- x = @l4.(x)
82
+ x = @cv4.(x)
83
83
  x = @bn3.(x)
84
84
  x = LeakyReLU.(x, 0.2)
85
85
 
86
86
  x = Flatten.(x)
87
- x = @l5.(x)
87
+ x = @d1.(x)
88
88
  x = LeakyReLU.(x, 0.2)
89
89
 
90
- x = @l6.(x)
90
+ x = @d2.(x)
91
91
  x
92
92
  end
93
93
 
@@ -1,7 +1,7 @@
1
1
  require "dnn"
2
2
  require "dnn/datasets/mnist"
3
3
  # If you use numo/linalg then please uncomment out.
4
- require "numo/linalg/autoloader"
4
+ # require "numo/linalg/autoloader"
5
5
 
6
6
  include DNN::Models
7
7
  include DNN::Layers
@@ -23,18 +23,18 @@ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
23
23
  class MLP < Model
24
24
  def initialize
25
25
  super
26
- @l1 = Dense.new(256)
27
- @l2 = Dense.new(256)
28
- @l3 = Dense.new(10)
26
+ @d1 = Dense.new(256)
27
+ @d2 = Dense.new(256)
28
+ @d3 = Dense.new(10)
29
29
  end
30
30
 
31
31
  def forward(x)
32
32
  x = InputLayer.new(784).(x)
33
- x = @l1.(x)
33
+ x = @d1.(x)
34
34
  x = ReLU.(x)
35
- x = @l2.(x)
35
+ x = @d2.(x)
36
36
  x = ReLU.(x)
37
- x = @l3.(x)
37
+ x = @d3.(x)
38
38
  x
39
39
  end
40
40
  end
@@ -2,20 +2,19 @@ include DNN::Models
2
2
  include DNN::Layers
3
3
 
4
4
  class Generator < Model
5
- def initialize(input_shape)
5
+ def initialize(input_shape, base_num_filters)
6
6
  super()
7
7
  @input_shape = input_shape
8
- @l1 = Conv2D.new(32, 4, padding: true)
9
- @l2 = Conv2D.new(32, 4, strides: 2, padding: true)
10
- @l3 = Conv2D.new(64, 4, padding: true)
11
- @l4 = Conv2D.new(64, 4, strides: 2, padding: true)
12
- @l5 = Conv2D.new(128, 4, padding: true)
13
- @l6 = Conv2DTranspose.new(64, 4, strides: 2, padding: true)
14
- @l7 = Conv2D.new(64, 4, padding: true)
15
- @l8 = Conv2DTranspose.new(32, 4, strides: 2, padding: true)
16
- @l9 = Conv2D.new(32, 4, padding: true)
17
- @l10 = Conv2D.new(32, 4, padding: true)
18
- @l11 = Conv2D.new(3, 4, padding: true)
8
+ @cv1 = Conv2D.new(base_num_filters, 4, padding: true)
9
+ @cv2 = Conv2D.new(base_num_filters, 4, strides: 2, padding: true)
10
+ @cv3 = Conv2D.new(base_num_filters * 2, 4, padding: true)
11
+ @cv4 = Conv2D.new(base_num_filters * 2, 4, strides: 2, padding: true)
12
+ @cv5 = Conv2D.new(base_num_filters * 2, 4, padding: true)
13
+ @cv6 = Conv2D.new(base_num_filters, 4, padding: true)
14
+ @cv7 = Conv2D.new(base_num_filters, 4, padding: true)
15
+ @cv8 = Conv2D.new(3, 4, padding: true)
16
+ @cvt1 = Conv2DTranspose.new(base_num_filters * 2, 4, strides: 2, padding: true)
17
+ @cvt2 = Conv2DTranspose.new(base_num_filters, 4, strides: 2, padding: true)
19
18
  @bn1 = BatchNormalization.new
20
19
  @bn2 = BatchNormalization.new
21
20
  @bn3 = BatchNormalization.new
@@ -24,53 +23,48 @@ class Generator < Model
24
23
  @bn6 = BatchNormalization.new
25
24
  @bn7 = BatchNormalization.new
26
25
  @bn8 = BatchNormalization.new
27
- @bn9 = BatchNormalization.new
28
26
  end
29
27
 
30
28
  def forward(x)
31
29
  input = InputLayer.new(@input_shape).(x)
32
- x = @l1.(input)
30
+ x = @cv1.(input)
33
31
  x = @bn1.(x)
34
- h1 = ReLU.(x)
32
+ h1 = LeakyReLU.(x, 0.2)
35
33
 
36
- x = @l2.(h1)
34
+ x = @cv2.(h1)
37
35
  x = @bn2.(x)
38
- x = ReLU.(x)
36
+ x = LeakyReLU.(x, 0.2)
39
37
 
40
- x = @l3.(x)
38
+ x = @cv3.(x)
41
39
  x = @bn3.(x)
42
- h2 = ReLU.(x)
40
+ h2 = LeakyReLU.(x, 0.2)
43
41
 
44
- x = @l4.(x)
42
+ x = @cv4.(h2)
45
43
  x = @bn4.(x)
46
- x = ReLU.(x)
44
+ x = LeakyReLU.(x, 0.2)
47
45
 
48
- x = @l5.(x)
46
+ x = @cv5.(x)
49
47
  x = @bn5.(x)
50
- x = ReLU.(x)
48
+ x = LeakyReLU.(x, 0.2)
51
49
 
52
- x = @l6.(x)
50
+ x = @cvt1.(x)
53
51
  x = @bn6.(x)
54
- x = ReLU.(x)
52
+ x = LeakyReLU.(x, 0.2)
53
+ x = Concatenate.(x, h2, axis: 3)
55
54
 
56
- x = @l7.(x)
55
+ x = @cv6.(x)
57
56
  x = @bn7.(x)
58
- x = ReLU.(x)
59
- x = Concatenate.(x, h2, axis: 3)
57
+ x = LeakyReLU.(x, 0.2)
60
58
 
61
- x = @l8.(x)
59
+ x = @cvt2.(x)
62
60
  x = @bn8.(x)
63
- x = ReLU.(x)
64
-
65
- x = @l9.(x)
66
- x = @bn9.(x)
67
- x = ReLU.(x)
61
+ x = LeakyReLU.(x, 0.2)
68
62
  x = Concatenate.(x, h1, axis: 3)
69
63
 
70
- x = @l10.(x)
71
- x = ReLU.(x)
64
+ x = @cv7.(x)
65
+ x = LeakyReLU.(x, 0.2)
72
66
 
73
- x = @l11.(x)
67
+ x = @cv8.(x)
74
68
  x = Tanh.(x)
75
69
  x
76
70
  end
@@ -147,6 +141,71 @@ class Discriminator < Model
147
141
  end
148
142
  end
149
143
 
144
+ class Discriminator < Model
145
+ def initialize(gen_input_shape, gen_output_shape, base_num_filters)
146
+ super()
147
+ @gen_input_shape = gen_input_shape
148
+ @gen_output_shape = gen_output_shape
149
+ @cv1_1 = Conv2D.new(base_num_filters, 4, padding: true)
150
+ @cv1_2 = Conv2D.new(base_num_filters, 4, padding: true)
151
+ @cv2 = Conv2D.new(base_num_filters, 4, strides: 2, padding: true)
152
+ @cv3 = Conv2D.new(base_num_filters * 2, 4, padding: true)
153
+ @cv4 = Conv2D.new(base_num_filters * 2, 4, strides: 2, padding: true)
154
+ @d1 = Dense.new(1024)
155
+ @d2 = Dense.new(1)
156
+ @bn1_1 = BatchNormalization.new
157
+ @bn1_2 = BatchNormalization.new
158
+ @bn2 = BatchNormalization.new
159
+ @bn3 = BatchNormalization.new
160
+ @bn4 = BatchNormalization.new
161
+ end
162
+
163
+ def forward(inputs)
164
+ input, images = *inputs
165
+ x = InputLayer.new(@gen_input_shape).(input)
166
+ x = @cv1_1.(x)
167
+ x = @bn1_1.(x)
168
+ x1 = LeakyReLU.(x, 0.2)
169
+
170
+ x = InputLayer.new(@gen_output_shape).(images)
171
+ x = @cv1_2.(x)
172
+ x = @bn1_2.(x)
173
+ x2 = LeakyReLU.(x, 0.2)
174
+
175
+ x = Concatenate.(x1, x2)
176
+ x = @cv2.(x)
177
+ x = @bn2.(x)
178
+ x = LeakyReLU.(x, 0.2)
179
+
180
+ x = @cv3.(x)
181
+ x = @bn3.(x)
182
+ x = LeakyReLU.(x, 0.2)
183
+
184
+ x = @cv4.(x)
185
+ x = @bn4.(x)
186
+ x = LeakyReLU.(x, 0.2)
187
+
188
+ x = Flatten.(x)
189
+ x = @d1.(x)
190
+ x = LeakyReLU.(x, 0.2)
191
+
192
+ x = @d2.(x)
193
+ x
194
+ end
195
+
196
+ def enable_training
197
+ trainable_layers.each do |layer|
198
+ layer.trainable = true
199
+ end
200
+ end
201
+
202
+ def disable_training
203
+ trainable_layers.each do |layer|
204
+ layer.trainable = false
205
+ end
206
+ end
207
+ end
208
+
150
209
  class DCGAN < Model
151
210
  attr_reader :gen
152
211
  attr_reader :dis
@@ -23,8 +23,8 @@ epochs = 20
23
23
  batch_size = 128
24
24
 
25
25
  if initial_epoch == 1
26
- gen = Generator.new([32, 32, 1])
27
- dis = Discriminator.new([32, 32, 1], [32, 32, 3])
26
+ gen = Generator.new([32, 32, 1], 32)
27
+ dis = Discriminator.new([32, 32, 1], [32, 32, 3], 32)
28
28
  dcgan = DCGAN.new(gen, dis)
29
29
  gen.setup(Adam.new(alpha: 0.0002, beta1: 0.5), MeanAbsoluteError.new)
30
30
  dis.setup(Adam.new(alpha: 0.00001, beta1: 0.1), SigmoidCrossEntropy.new)
data/examples/vae.rb CHANGED
@@ -28,24 +28,24 @@ end
28
28
  class Encoder < Model
29
29
  def initialize
30
30
  super
31
- @l1 = Dense.new(196)
32
- @l2 = Dense.new(49)
33
- @l3_1 = Dense.new($z_dim)
34
- @l3_2 = Dense.new($z_dim)
31
+ @d1 = Dense.new(196)
32
+ @d2 = Dense.new(49)
33
+ @d3_1 = Dense.new($z_dim)
34
+ @d3_2 = Dense.new($z_dim)
35
35
  @bn1 = BatchNormalization.new
36
36
  @bn2 = BatchNormalization.new
37
37
  end
38
38
 
39
39
  def forward(x)
40
40
  x = InputLayer.new(784).(x)
41
- x = @l1.(x)
41
+ x = @d1.(x)
42
42
  x = @bn1.(x)
43
43
  x = ReLU.(x)
44
- x = @l2.(x)
44
+ x = @d2.(x)
45
45
  x = @bn2.(x)
46
46
  x = ReLU.(x)
47
- z_mean = @l3_1.(x)
48
- z_sigma = @l3_2.(x)
47
+ z_mean = @d3_1.(x)
48
+ z_sigma = @d3_2.(x)
49
49
  [z_mean, z_sigma]
50
50
  end
51
51
  end
@@ -53,16 +53,16 @@ end
53
53
  class Decoder < Model
54
54
  def initialize
55
55
  super
56
- @l3 = Dense.new(196)
57
- @l4 = Dense.new(784)
56
+ @d1 = Dense.new(196)
57
+ @d2 = Dense.new(784)
58
58
  @bn1 = BatchNormalization.new
59
59
  end
60
60
 
61
61
  def forward(z)
62
- x = @l3.(z)
62
+ x = @d1.(z)
63
63
  x = @bn1.(x)
64
64
  x = ReLU.(x)
65
- x = @l4.(x)
65
+ x = @d2.(x)
66
66
  x
67
67
  end
68
68
  end
@@ -205,8 +205,11 @@ module DNN
205
205
 
206
206
  def forward_node(x)
207
207
  @x_shape = x.shape
208
- @dim = x.shape[@axis]
209
- x.sum(axis: @axis, keepdims: true)
208
+ if @axis
209
+ x.sum(axis: @axis, keepdims: true)
210
+ else
211
+ x.sum
212
+ end
210
213
  end
211
214
 
212
215
  def backward_node(dy)
@@ -236,8 +239,13 @@ module DNN
236
239
 
237
240
  def forward_node(x)
238
241
  @x_shape = x.shape
239
- @dim = x.shape[@axis]
240
- x.mean(axis: @axis, keepdims: true)
242
+ if @axis
243
+ @dim = x.shape[@axis]
244
+ x.mean(axis: @axis, keepdims: true)
245
+ else
246
+ @dim = x.size
247
+ x.mean
248
+ end
241
249
  end
242
250
 
243
251
  def backward_node(dy)
@@ -441,8 +441,9 @@ module DNN
441
441
  ys = []
442
442
  ary_output_tensors.each.with_index do |out, i|
443
443
  y = out.data
444
- if use_loss_activation && lfs[i].class.respond_to?(:activation)
445
- y = lfs[i].class.activation(y)
444
+ lf = lfs[i]
445
+ if use_loss_activation && lf && lf.class.respond_to?(:activation)
446
+ y = lf.class.activation(y)
446
447
  end
447
448
  ys << y
448
449
  end
@@ -458,7 +459,12 @@ module DNN
458
459
  else
459
460
  x.reshape(1, *x.shape)
460
461
  end
461
- predict(input, use_loss_activation: use_loss_activation)[0, false]
462
+ y = predict(input, use_loss_activation: use_loss_activation)
463
+ if y.is_a?(Array)
464
+ y.map { |v| v[0, false] }
465
+ else
466
+ y[0, false]
467
+ end
462
468
  end
463
469
 
464
470
  # Add callback function.
@@ -526,7 +532,7 @@ module DNN
526
532
  @loss_func.each do |lf|
527
533
  lf.clean
528
534
  end
529
- else
535
+ elsif @loss_func.is_a?(Losses::Loss)
530
536
  @loss_func.clean
531
537
  end
532
538
  @layers_cache = nil
data/lib/dnn/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module DNN
2
- VERSION = "1.1.4"
2
+ VERSION = "1.1.5"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-dnn
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.1.4
4
+ version: 1.1.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - unagiootoro
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2020-02-29 00:00:00.000000000 Z
11
+ date: 2020-03-20 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: numo-narray