ruby-dnn 1.1.4 → 1.1.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +6 -6
- data/examples/api-examples/early_stopping_example.rb +6 -6
- data/examples/api-examples/initializer_example.rb +6 -6
- data/examples/api-examples/regularizer_example.rb +6 -6
- data/examples/api-examples/save_example.rb +6 -6
- data/examples/dcgan/dcgan.rb +26 -26
- data/examples/mnist_define_by_run.rb +7 -7
- data/examples/pix2pix/dcgan.rb +96 -37
- data/examples/pix2pix/train.rb +2 -2
- data/examples/vae.rb +12 -12
- data/lib/dnn/core/layers/math_layers.rb +12 -4
- data/lib/dnn/core/models.rb +10 -4
- data/lib/dnn/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 55dea04f1d2a6bb4806c3f029086474b46b8a225f7168e4c72af92e0f7d69f71
|
4
|
+
data.tar.gz: 2522778ffabbce31315b48ad3abfedd2dea2e9dba0d09af4e9700cb9c588393b
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 05056e7619f52dd8efac34c6aeae1e1652e3635257baf302f15908131d6908d7ee49e892040514b0ea3011c46148eb3c0fedd3857b85ab0cf271c521bb907349
|
7
|
+
data.tar.gz: 41828ada6a07129fdfc4ed5aff6af9c6dc7d934dd0f39eec8792d77a7b2fc5769b2f3d278ced6b5def360db58a7a34c014a0876e65ca7525d35a67219285aab7
|
data/README.md
CHANGED
@@ -54,18 +54,18 @@ When create a model with 'define by run' style:
|
|
54
54
|
class MLP < Model
|
55
55
|
def initialize
|
56
56
|
super
|
57
|
-
@
|
58
|
-
@
|
59
|
-
@
|
57
|
+
@d1 = Dense.new(256)
|
58
|
+
@d2 = Dense.new(256)
|
59
|
+
@d3 = Dense.new(10)
|
60
60
|
end
|
61
61
|
|
62
62
|
def forward(x)
|
63
63
|
x = InputLayer.new(784).(x)
|
64
|
-
x = @
|
64
|
+
x = @d1.(x)
|
65
65
|
x = ReLU.(x)
|
66
|
-
x = @
|
66
|
+
x = @d2.(x)
|
67
67
|
x = ReLU.(x)
|
68
|
-
x = @
|
68
|
+
x = @d3.(x)
|
69
69
|
x
|
70
70
|
end
|
71
71
|
end
|
@@ -27,22 +27,22 @@ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
|
|
27
27
|
class MLP < Model
|
28
28
|
def initialize
|
29
29
|
super
|
30
|
-
@
|
31
|
-
@
|
32
|
-
@
|
30
|
+
@d1 = Dense.new(256)
|
31
|
+
@d2 = Dense.new(256)
|
32
|
+
@d3 = Dense.new(10)
|
33
33
|
@bn1 = BatchNormalization.new
|
34
34
|
@bn2 = BatchNormalization.new
|
35
35
|
end
|
36
36
|
|
37
37
|
def forward(x)
|
38
38
|
x = InputLayer.new(784).(x)
|
39
|
-
x = @
|
39
|
+
x = @d1.(x)
|
40
40
|
x = @bn1.(x)
|
41
41
|
x = ReLU.(x)
|
42
|
-
x = @
|
42
|
+
x = @d2.(x)
|
43
43
|
x = @bn2.(x)
|
44
44
|
x = ReLU.(x)
|
45
|
-
x = @
|
45
|
+
x = @d3.(x)
|
46
46
|
x
|
47
47
|
end
|
48
48
|
end
|
@@ -28,22 +28,22 @@ class MLP < Model
|
|
28
28
|
def initialize
|
29
29
|
super
|
30
30
|
# Set the initial values of weight and bias to the initial values of He.
|
31
|
-
@
|
32
|
-
@
|
33
|
-
@
|
31
|
+
@d1 = Dense.new(256, weight_initializer: He.new, bias_initializer: He.new)
|
32
|
+
@d2 = Dense.new(256, weight_initializer: He.new, bias_initializer: He.new)
|
33
|
+
@d3 = Dense.new(10, weight_initializer: He.new, bias_initializer: He.new)
|
34
34
|
@bn1 = BatchNormalization.new
|
35
35
|
@bn2 = BatchNormalization.new
|
36
36
|
end
|
37
37
|
|
38
38
|
def forward(x)
|
39
39
|
x = InputLayer.new(784).(x)
|
40
|
-
x = @
|
40
|
+
x = @d1.(x)
|
41
41
|
x = @bn1.(x)
|
42
42
|
x = ReLU.(x)
|
43
|
-
x = @
|
43
|
+
x = @d2.(x)
|
44
44
|
x = @bn2.(x)
|
45
45
|
x = ReLU.(x)
|
46
|
-
x = @
|
46
|
+
x = @d3.(x)
|
47
47
|
x
|
48
48
|
end
|
49
49
|
end
|
@@ -29,22 +29,22 @@ class MLP < Model
|
|
29
29
|
def initialize
|
30
30
|
super
|
31
31
|
# Set L2 regularizer(weight decay) for weight and bias.
|
32
|
-
@
|
33
|
-
@
|
34
|
-
@
|
32
|
+
@d1 = Dense.new(256, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
|
33
|
+
@d2 = Dense.new(256, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
|
34
|
+
@d3 = Dense.new(10, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
|
35
35
|
@bn1 = BatchNormalization.new
|
36
36
|
@bn2 = BatchNormalization.new
|
37
37
|
end
|
38
38
|
|
39
39
|
def forward(x)
|
40
40
|
x = InputLayer.new(784).(x)
|
41
|
-
x = @
|
41
|
+
x = @d1.(x)
|
42
42
|
x = @bn1.(x)
|
43
43
|
x = ReLU.(x)
|
44
|
-
x = @
|
44
|
+
x = @d2.(x)
|
45
45
|
x = @bn2.(x)
|
46
46
|
x = ReLU.(x)
|
47
|
-
x = @
|
47
|
+
x = @d3.(x)
|
48
48
|
x
|
49
49
|
end
|
50
50
|
end
|
@@ -38,22 +38,22 @@ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
|
|
38
38
|
class MLP < Model
|
39
39
|
def initialize
|
40
40
|
super
|
41
|
-
@
|
42
|
-
@
|
43
|
-
@
|
41
|
+
@d1 = Dense.new(256)
|
42
|
+
@d2 = Dense.new(256)
|
43
|
+
@d3 = Dense.new(10)
|
44
44
|
@bn1 = BatchNormalization.new
|
45
45
|
@bn2 = BatchNormalization.new
|
46
46
|
end
|
47
47
|
|
48
48
|
def forward(x)
|
49
49
|
x = InputLayer.new(784).(x)
|
50
|
-
x = @
|
50
|
+
x = @d1.(x)
|
51
51
|
x = @bn1.(x)
|
52
52
|
x = ReLU.(x)
|
53
|
-
x = @
|
53
|
+
x = @d2.(x)
|
54
54
|
x = @bn2.(x)
|
55
55
|
x = ReLU.(x)
|
56
|
-
x = @
|
56
|
+
x = @d3.(x)
|
57
57
|
x
|
58
58
|
end
|
59
59
|
end
|
data/examples/dcgan/dcgan.rb
CHANGED
@@ -4,13 +4,13 @@ include DNN::Layers
|
|
4
4
|
class Generator < Model
|
5
5
|
def initialize
|
6
6
|
super
|
7
|
-
@
|
8
|
-
@
|
9
|
-
@
|
10
|
-
@
|
11
|
-
@
|
12
|
-
@
|
13
|
-
@
|
7
|
+
@d1 = Dense.new(1024)
|
8
|
+
@d2 = Dense.new(7 * 7 * 64)
|
9
|
+
@cv1 = Conv2D.new(64, 4, padding: true)
|
10
|
+
@cvt1 = Conv2DTranspose.new(64, 4, strides: 2, padding: true)
|
11
|
+
@cvt2 = Conv2DTranspose.new(32, 4, strides: 2, padding: true)
|
12
|
+
@cv2 = Conv2D.new(32, 4, padding: true)
|
13
|
+
@cv3 = Conv2D.new(1, 4, padding: true)
|
14
14
|
@bn1 = BatchNormalization.new
|
15
15
|
@bn2 = BatchNormalization.new
|
16
16
|
@bn3 = BatchNormalization.new
|
@@ -21,32 +21,32 @@ class Generator < Model
|
|
21
21
|
|
22
22
|
def forward(x)
|
23
23
|
x = InputLayer.new(20).(x)
|
24
|
-
x = @
|
24
|
+
x = @d1.(x)
|
25
25
|
x = @bn1.(x)
|
26
26
|
x = ReLU.(x)
|
27
27
|
|
28
|
-
x = @
|
28
|
+
x = @d2.(x)
|
29
29
|
x = @bn2.(x)
|
30
30
|
x = ReLU.(x)
|
31
31
|
|
32
32
|
x = Reshape.(x, [7, 7, 64])
|
33
|
-
x = @
|
33
|
+
x = @cvt1.(x)
|
34
34
|
x = @bn3.(x)
|
35
35
|
x = ReLU.(x)
|
36
36
|
|
37
|
-
x = @
|
37
|
+
x = @cv1.(x)
|
38
38
|
x = @bn4.(x)
|
39
39
|
x = ReLU.(x)
|
40
40
|
|
41
|
-
x = @
|
41
|
+
x = @cvt2.(x)
|
42
42
|
x = @bn5.(x)
|
43
43
|
x = ReLU.(x)
|
44
44
|
|
45
|
-
x = @
|
45
|
+
x = @cv2.(x)
|
46
46
|
x = @bn6.(x)
|
47
47
|
x = ReLU.(x)
|
48
48
|
|
49
|
-
x = @
|
49
|
+
x = @cv3.(x)
|
50
50
|
x = Tanh.(x)
|
51
51
|
x
|
52
52
|
end
|
@@ -55,12 +55,12 @@ end
|
|
55
55
|
class Discriminator < Model
|
56
56
|
def initialize
|
57
57
|
super
|
58
|
-
@
|
59
|
-
@
|
60
|
-
@
|
61
|
-
@
|
62
|
-
@
|
63
|
-
@
|
58
|
+
@cv1 = Conv2D.new(32, 4, strides: 2, padding: true)
|
59
|
+
@cv2 = Conv2D.new(32, 4, padding: true)
|
60
|
+
@cv3 = Conv2D.new(64, 4, strides: 2, padding: true)
|
61
|
+
@cv4 = Conv2D.new(64, 4, padding: true)
|
62
|
+
@d1 = Dense.new(1024)
|
63
|
+
@d2 = Dense.new(1)
|
64
64
|
@bn1 = BatchNormalization.new
|
65
65
|
@bn2 = BatchNormalization.new
|
66
66
|
@bn3 = BatchNormalization.new
|
@@ -68,26 +68,26 @@ class Discriminator < Model
|
|
68
68
|
|
69
69
|
def forward(x)
|
70
70
|
x = InputLayer.new([28, 28, 1]).(x)
|
71
|
-
x = @
|
71
|
+
x = @cv1.(x)
|
72
72
|
x = LeakyReLU.(x, 0.2)
|
73
73
|
|
74
|
-
x = @
|
74
|
+
x = @cv2.(x)
|
75
75
|
x = @bn1.(x)
|
76
76
|
x = LeakyReLU.(x, 0.2)
|
77
77
|
|
78
|
-
x = @
|
78
|
+
x = @cv3.(x)
|
79
79
|
x = @bn2.(x)
|
80
80
|
x = LeakyReLU.(x, 0.2)
|
81
81
|
|
82
|
-
x = @
|
82
|
+
x = @cv4.(x)
|
83
83
|
x = @bn3.(x)
|
84
84
|
x = LeakyReLU.(x, 0.2)
|
85
85
|
|
86
86
|
x = Flatten.(x)
|
87
|
-
x = @
|
87
|
+
x = @d1.(x)
|
88
88
|
x = LeakyReLU.(x, 0.2)
|
89
89
|
|
90
|
-
x = @
|
90
|
+
x = @d2.(x)
|
91
91
|
x
|
92
92
|
end
|
93
93
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
require "dnn"
|
2
2
|
require "dnn/datasets/mnist"
|
3
3
|
# If you use numo/linalg then please uncomment out.
|
4
|
-
require "numo/linalg/autoloader"
|
4
|
+
# require "numo/linalg/autoloader"
|
5
5
|
|
6
6
|
include DNN::Models
|
7
7
|
include DNN::Layers
|
@@ -23,18 +23,18 @@ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
|
|
23
23
|
class MLP < Model
|
24
24
|
def initialize
|
25
25
|
super
|
26
|
-
@
|
27
|
-
@
|
28
|
-
@
|
26
|
+
@d1 = Dense.new(256)
|
27
|
+
@d2 = Dense.new(256)
|
28
|
+
@d3 = Dense.new(10)
|
29
29
|
end
|
30
30
|
|
31
31
|
def forward(x)
|
32
32
|
x = InputLayer.new(784).(x)
|
33
|
-
x = @
|
33
|
+
x = @d1.(x)
|
34
34
|
x = ReLU.(x)
|
35
|
-
x = @
|
35
|
+
x = @d2.(x)
|
36
36
|
x = ReLU.(x)
|
37
|
-
x = @
|
37
|
+
x = @d3.(x)
|
38
38
|
x
|
39
39
|
end
|
40
40
|
end
|
data/examples/pix2pix/dcgan.rb
CHANGED
@@ -2,20 +2,19 @@ include DNN::Models
|
|
2
2
|
include DNN::Layers
|
3
3
|
|
4
4
|
class Generator < Model
|
5
|
-
def initialize(input_shape)
|
5
|
+
def initialize(input_shape, base_num_filters)
|
6
6
|
super()
|
7
7
|
@input_shape = input_shape
|
8
|
-
@
|
9
|
-
@
|
10
|
-
@
|
11
|
-
@
|
12
|
-
@
|
13
|
-
@
|
14
|
-
@
|
15
|
-
@
|
16
|
-
@
|
17
|
-
@
|
18
|
-
@l11 = Conv2D.new(3, 4, padding: true)
|
8
|
+
@cv1 = Conv2D.new(base_num_filters, 4, padding: true)
|
9
|
+
@cv2 = Conv2D.new(base_num_filters, 4, strides: 2, padding: true)
|
10
|
+
@cv3 = Conv2D.new(base_num_filters * 2, 4, padding: true)
|
11
|
+
@cv4 = Conv2D.new(base_num_filters * 2, 4, strides: 2, padding: true)
|
12
|
+
@cv5 = Conv2D.new(base_num_filters * 2, 4, padding: true)
|
13
|
+
@cv6 = Conv2D.new(base_num_filters, 4, padding: true)
|
14
|
+
@cv7 = Conv2D.new(base_num_filters, 4, padding: true)
|
15
|
+
@cv8 = Conv2D.new(3, 4, padding: true)
|
16
|
+
@cvt1 = Conv2DTranspose.new(base_num_filters * 2, 4, strides: 2, padding: true)
|
17
|
+
@cvt2 = Conv2DTranspose.new(base_num_filters, 4, strides: 2, padding: true)
|
19
18
|
@bn1 = BatchNormalization.new
|
20
19
|
@bn2 = BatchNormalization.new
|
21
20
|
@bn3 = BatchNormalization.new
|
@@ -24,53 +23,48 @@ class Generator < Model
|
|
24
23
|
@bn6 = BatchNormalization.new
|
25
24
|
@bn7 = BatchNormalization.new
|
26
25
|
@bn8 = BatchNormalization.new
|
27
|
-
@bn9 = BatchNormalization.new
|
28
26
|
end
|
29
27
|
|
30
28
|
def forward(x)
|
31
29
|
input = InputLayer.new(@input_shape).(x)
|
32
|
-
x = @
|
30
|
+
x = @cv1.(input)
|
33
31
|
x = @bn1.(x)
|
34
|
-
h1 =
|
32
|
+
h1 = LeakyReLU.(x, 0.2)
|
35
33
|
|
36
|
-
x = @
|
34
|
+
x = @cv2.(h1)
|
37
35
|
x = @bn2.(x)
|
38
|
-
x =
|
36
|
+
x = LeakyReLU.(x, 0.2)
|
39
37
|
|
40
|
-
x = @
|
38
|
+
x = @cv3.(x)
|
41
39
|
x = @bn3.(x)
|
42
|
-
h2 =
|
40
|
+
h2 = LeakyReLU.(x, 0.2)
|
43
41
|
|
44
|
-
x = @
|
42
|
+
x = @cv4.(h2)
|
45
43
|
x = @bn4.(x)
|
46
|
-
x =
|
44
|
+
x = LeakyReLU.(x, 0.2)
|
47
45
|
|
48
|
-
x = @
|
46
|
+
x = @cv5.(x)
|
49
47
|
x = @bn5.(x)
|
50
|
-
x =
|
48
|
+
x = LeakyReLU.(x, 0.2)
|
51
49
|
|
52
|
-
x = @
|
50
|
+
x = @cvt1.(x)
|
53
51
|
x = @bn6.(x)
|
54
|
-
x =
|
52
|
+
x = LeakyReLU.(x, 0.2)
|
53
|
+
x = Concatenate.(x, h2, axis: 3)
|
55
54
|
|
56
|
-
x = @
|
55
|
+
x = @cv6.(x)
|
57
56
|
x = @bn7.(x)
|
58
|
-
x =
|
59
|
-
x = Concatenate.(x, h2, axis: 3)
|
57
|
+
x = LeakyReLU.(x, 0.2)
|
60
58
|
|
61
|
-
x = @
|
59
|
+
x = @cvt2.(x)
|
62
60
|
x = @bn8.(x)
|
63
|
-
x =
|
64
|
-
|
65
|
-
x = @l9.(x)
|
66
|
-
x = @bn9.(x)
|
67
|
-
x = ReLU.(x)
|
61
|
+
x = LeakyReLU.(x, 0.2)
|
68
62
|
x = Concatenate.(x, h1, axis: 3)
|
69
63
|
|
70
|
-
x = @
|
71
|
-
x =
|
64
|
+
x = @cv7.(x)
|
65
|
+
x = LeakyReLU.(x, 0.2)
|
72
66
|
|
73
|
-
x = @
|
67
|
+
x = @cv8.(x)
|
74
68
|
x = Tanh.(x)
|
75
69
|
x
|
76
70
|
end
|
@@ -147,6 +141,71 @@ class Discriminator < Model
|
|
147
141
|
end
|
148
142
|
end
|
149
143
|
|
144
|
+
class Discriminator < Model
|
145
|
+
def initialize(gen_input_shape, gen_output_shape, base_num_filters)
|
146
|
+
super()
|
147
|
+
@gen_input_shape = gen_input_shape
|
148
|
+
@gen_output_shape = gen_output_shape
|
149
|
+
@cv1_1 = Conv2D.new(base_num_filters, 4, padding: true)
|
150
|
+
@cv1_2 = Conv2D.new(base_num_filters, 4, padding: true)
|
151
|
+
@cv2 = Conv2D.new(base_num_filters, 4, strides: 2, padding: true)
|
152
|
+
@cv3 = Conv2D.new(base_num_filters * 2, 4, padding: true)
|
153
|
+
@cv4 = Conv2D.new(base_num_filters * 2, 4, strides: 2, padding: true)
|
154
|
+
@d1 = Dense.new(1024)
|
155
|
+
@d2 = Dense.new(1)
|
156
|
+
@bn1_1 = BatchNormalization.new
|
157
|
+
@bn1_2 = BatchNormalization.new
|
158
|
+
@bn2 = BatchNormalization.new
|
159
|
+
@bn3 = BatchNormalization.new
|
160
|
+
@bn4 = BatchNormalization.new
|
161
|
+
end
|
162
|
+
|
163
|
+
def forward(inputs)
|
164
|
+
input, images = *inputs
|
165
|
+
x = InputLayer.new(@gen_input_shape).(input)
|
166
|
+
x = @cv1_1.(x)
|
167
|
+
x = @bn1_1.(x)
|
168
|
+
x1 = LeakyReLU.(x, 0.2)
|
169
|
+
|
170
|
+
x = InputLayer.new(@gen_output_shape).(images)
|
171
|
+
x = @cv1_2.(x)
|
172
|
+
x = @bn1_2.(x)
|
173
|
+
x2 = LeakyReLU.(x, 0.2)
|
174
|
+
|
175
|
+
x = Concatenate.(x1, x2)
|
176
|
+
x = @cv2.(x)
|
177
|
+
x = @bn2.(x)
|
178
|
+
x = LeakyReLU.(x, 0.2)
|
179
|
+
|
180
|
+
x = @cv3.(x)
|
181
|
+
x = @bn3.(x)
|
182
|
+
x = LeakyReLU.(x, 0.2)
|
183
|
+
|
184
|
+
x = @cv4.(x)
|
185
|
+
x = @bn4.(x)
|
186
|
+
x = LeakyReLU.(x, 0.2)
|
187
|
+
|
188
|
+
x = Flatten.(x)
|
189
|
+
x = @d1.(x)
|
190
|
+
x = LeakyReLU.(x, 0.2)
|
191
|
+
|
192
|
+
x = @d2.(x)
|
193
|
+
x
|
194
|
+
end
|
195
|
+
|
196
|
+
def enable_training
|
197
|
+
trainable_layers.each do |layer|
|
198
|
+
layer.trainable = true
|
199
|
+
end
|
200
|
+
end
|
201
|
+
|
202
|
+
def disable_training
|
203
|
+
trainable_layers.each do |layer|
|
204
|
+
layer.trainable = false
|
205
|
+
end
|
206
|
+
end
|
207
|
+
end
|
208
|
+
|
150
209
|
class DCGAN < Model
|
151
210
|
attr_reader :gen
|
152
211
|
attr_reader :dis
|
data/examples/pix2pix/train.rb
CHANGED
@@ -23,8 +23,8 @@ epochs = 20
|
|
23
23
|
batch_size = 128
|
24
24
|
|
25
25
|
if initial_epoch == 1
|
26
|
-
gen = Generator.new([32, 32, 1])
|
27
|
-
dis = Discriminator.new([32, 32, 1], [32, 32, 3])
|
26
|
+
gen = Generator.new([32, 32, 1], 32)
|
27
|
+
dis = Discriminator.new([32, 32, 1], [32, 32, 3], 32)
|
28
28
|
dcgan = DCGAN.new(gen, dis)
|
29
29
|
gen.setup(Adam.new(alpha: 0.0002, beta1: 0.5), MeanAbsoluteError.new)
|
30
30
|
dis.setup(Adam.new(alpha: 0.00001, beta1: 0.1), SigmoidCrossEntropy.new)
|
data/examples/vae.rb
CHANGED
@@ -28,24 +28,24 @@ end
|
|
28
28
|
class Encoder < Model
|
29
29
|
def initialize
|
30
30
|
super
|
31
|
-
@
|
32
|
-
@
|
33
|
-
@
|
34
|
-
@
|
31
|
+
@d1 = Dense.new(196)
|
32
|
+
@d2 = Dense.new(49)
|
33
|
+
@d3_1 = Dense.new($z_dim)
|
34
|
+
@d3_2 = Dense.new($z_dim)
|
35
35
|
@bn1 = BatchNormalization.new
|
36
36
|
@bn2 = BatchNormalization.new
|
37
37
|
end
|
38
38
|
|
39
39
|
def forward(x)
|
40
40
|
x = InputLayer.new(784).(x)
|
41
|
-
x = @
|
41
|
+
x = @d1.(x)
|
42
42
|
x = @bn1.(x)
|
43
43
|
x = ReLU.(x)
|
44
|
-
x = @
|
44
|
+
x = @d2.(x)
|
45
45
|
x = @bn2.(x)
|
46
46
|
x = ReLU.(x)
|
47
|
-
z_mean = @
|
48
|
-
z_sigma = @
|
47
|
+
z_mean = @d3_1.(x)
|
48
|
+
z_sigma = @d3_2.(x)
|
49
49
|
[z_mean, z_sigma]
|
50
50
|
end
|
51
51
|
end
|
@@ -53,16 +53,16 @@ end
|
|
53
53
|
class Decoder < Model
|
54
54
|
def initialize
|
55
55
|
super
|
56
|
-
@
|
57
|
-
@
|
56
|
+
@d1 = Dense.new(196)
|
57
|
+
@d2 = Dense.new(784)
|
58
58
|
@bn1 = BatchNormalization.new
|
59
59
|
end
|
60
60
|
|
61
61
|
def forward(z)
|
62
|
-
x = @
|
62
|
+
x = @d1.(z)
|
63
63
|
x = @bn1.(x)
|
64
64
|
x = ReLU.(x)
|
65
|
-
x = @
|
65
|
+
x = @d2.(x)
|
66
66
|
x
|
67
67
|
end
|
68
68
|
end
|
@@ -205,8 +205,11 @@ module DNN
|
|
205
205
|
|
206
206
|
def forward_node(x)
|
207
207
|
@x_shape = x.shape
|
208
|
-
|
209
|
-
|
208
|
+
if @axis
|
209
|
+
x.sum(axis: @axis, keepdims: true)
|
210
|
+
else
|
211
|
+
x.sum
|
212
|
+
end
|
210
213
|
end
|
211
214
|
|
212
215
|
def backward_node(dy)
|
@@ -236,8 +239,13 @@ module DNN
|
|
236
239
|
|
237
240
|
def forward_node(x)
|
238
241
|
@x_shape = x.shape
|
239
|
-
|
240
|
-
|
242
|
+
if @axis
|
243
|
+
@dim = x.shape[@axis]
|
244
|
+
x.mean(axis: @axis, keepdims: true)
|
245
|
+
else
|
246
|
+
@dim = x.size
|
247
|
+
x.mean
|
248
|
+
end
|
241
249
|
end
|
242
250
|
|
243
251
|
def backward_node(dy)
|
data/lib/dnn/core/models.rb
CHANGED
@@ -441,8 +441,9 @@ module DNN
|
|
441
441
|
ys = []
|
442
442
|
ary_output_tensors.each.with_index do |out, i|
|
443
443
|
y = out.data
|
444
|
-
|
445
|
-
|
444
|
+
lf = lfs[i]
|
445
|
+
if use_loss_activation && lf && lf.class.respond_to?(:activation)
|
446
|
+
y = lf.class.activation(y)
|
446
447
|
end
|
447
448
|
ys << y
|
448
449
|
end
|
@@ -458,7 +459,12 @@ module DNN
|
|
458
459
|
else
|
459
460
|
x.reshape(1, *x.shape)
|
460
461
|
end
|
461
|
-
predict(input, use_loss_activation: use_loss_activation)
|
462
|
+
y = predict(input, use_loss_activation: use_loss_activation)
|
463
|
+
if y.is_a?(Array)
|
464
|
+
y.map { |v| v[0, false] }
|
465
|
+
else
|
466
|
+
y[0, false]
|
467
|
+
end
|
462
468
|
end
|
463
469
|
|
464
470
|
# Add callback function.
|
@@ -526,7 +532,7 @@ module DNN
|
|
526
532
|
@loss_func.each do |lf|
|
527
533
|
lf.clean
|
528
534
|
end
|
529
|
-
|
535
|
+
elsif @loss_func.is_a?(Losses::Loss)
|
530
536
|
@loss_func.clean
|
531
537
|
end
|
532
538
|
@layers_cache = nil
|
data/lib/dnn/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-dnn
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.1.
|
4
|
+
version: 1.1.5
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- unagiootoro
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2020-
|
11
|
+
date: 2020-03-20 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: numo-narray
|