ruby-dnn 1.1.4 → 1.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +1 -0
- data/.travis.yml +2 -1
- data/README.md +39 -22
- data/examples/api-examples/early_stopping_example.rb +6 -6
- data/examples/api-examples/initializer_example.rb +6 -6
- data/examples/api-examples/regularizer_example.rb +6 -6
- data/examples/api-examples/save_example.rb +6 -6
- data/examples/dcgan/dcgan.rb +27 -27
- data/examples/judge-number/README.md +29 -0
- data/examples/judge-number/capture.PNG +0 -0
- data/examples/judge-number/convnet8.rb +70 -0
- data/examples/judge-number/make_weights.rb +5 -0
- data/examples/judge-number/mnist_predict.rb +20 -0
- data/examples/judge-number/mnist_train.rb +19 -0
- data/examples/judge-number/public/httpRequest.js +44 -0
- data/examples/judge-number/public/judgeNumber.js +61 -0
- data/examples/judge-number/server.rb +19 -0
- data/examples/judge-number/trained_mnist_params.marshal +0 -0
- data/examples/judge-number/views/index.erb +7 -0
- data/examples/mnist_conv2d_example.rb +3 -3
- data/examples/mnist_define_by_run.rb +7 -7
- data/examples/mnist_gpu.rb +47 -0
- data/examples/mnist_lstm_example.rb +1 -1
- data/examples/pix2pix/dcgan.rb +54 -66
- data/examples/pix2pix/train.rb +2 -2
- data/examples/vae.rb +13 -13
- data/img/cart-pole.gif +0 -0
- data/img/cycle-gan.PNG +0 -0
- data/img/facade-pix2pix.png +0 -0
- data/lib/dnn.rb +24 -3
- data/lib/dnn/core/callbacks.rb +6 -4
- data/lib/dnn/core/layers/basic_layers.rb +40 -22
- data/lib/dnn/core/layers/cnn_layers.rb +33 -5
- data/lib/dnn/core/layers/math_layers.rb +17 -9
- data/lib/dnn/core/layers/merge_layers.rb +2 -26
- data/lib/dnn/core/layers/split_layers.rb +39 -0
- data/lib/dnn/core/link.rb +14 -33
- data/lib/dnn/core/losses.rb +6 -12
- data/lib/dnn/core/models.rb +77 -10
- data/lib/dnn/core/optimizers.rb +8 -1
- data/lib/dnn/core/utils.rb +23 -0
- data/lib/dnn/image.rb +48 -0
- data/lib/dnn/version.rb +1 -1
- data/ruby-dnn.gemspec +2 -15
- metadata +40 -20
- data/bin/console +0 -14
- data/bin/setup +0 -8
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 2aa12b717ef532b8afe44de7cb388c7d87cb271bf38a25adaad2c335c8817d4b
|
|
4
|
+
data.tar.gz: e1322f86b06c11ac3728e948e18469ccc6e454eeefb180108a0a9bfc2dbd8143
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 6c9c53ca73a5ab7fc53935f37e0804d761e36735aa06972f63e151ce76a44f01878a2c01dfe0622bf39baa5c98b16ee8d06db212356fcc329a9dd40ae2d78f1c
|
|
7
|
+
data.tar.gz: 5a195c0afd677127afad2433df2fda3a09c711464c46cb174713d00d695026d2b86faa64f82cc96f2b417b468fc372c04efee22761800c900e2f6b0ca05ac57d
|
data/.gitignore
CHANGED
data/.travis.yml
CHANGED
data/README.md
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
# ruby-dnn
|
|
2
|
-
[](https://badge.fury.io/rb/ruby-dnn)
|
|
2
|
+
[](https://badge.fury.io/rb/ruby-dnn)
|
|
3
3
|
[](https://travis-ci.org/unagiootoro/ruby-dnn)
|
|
4
|
+
[](https://rubydoc.info/gems/ruby-dnn)
|
|
4
5
|
|
|
5
6
|
ruby-dnn is a ruby deep learning library. This library supports full connected neural network and convolution neural network
|
|
6
7
|
and recurrent neural network.
|
|
@@ -54,18 +55,18 @@ When create a model with 'define by run' style:
|
|
|
54
55
|
class MLP < Model
|
|
55
56
|
def initialize
|
|
56
57
|
super
|
|
57
|
-
@
|
|
58
|
-
@
|
|
59
|
-
@
|
|
58
|
+
@d1 = Dense.new(256)
|
|
59
|
+
@d2 = Dense.new(256)
|
|
60
|
+
@d3 = Dense.new(10)
|
|
60
61
|
end
|
|
61
62
|
|
|
62
63
|
def forward(x)
|
|
63
64
|
x = InputLayer.new(784).(x)
|
|
64
|
-
x = @
|
|
65
|
+
x = @d1.(x)
|
|
65
66
|
x = ReLU.(x)
|
|
66
|
-
x = @
|
|
67
|
+
x = @d2.(x)
|
|
67
68
|
x = ReLU.(x)
|
|
68
|
-
x = @
|
|
69
|
+
x = @d3.(x)
|
|
69
70
|
x
|
|
70
71
|
end
|
|
71
72
|
end
|
|
@@ -84,6 +85,23 @@ puts "loss: #{loss}"
|
|
|
84
85
|
Please refer to examples for basic usage.
|
|
85
86
|
If you want to know more detailed information, please refer to the source code.
|
|
86
87
|
|
|
88
|
+
## Sample
|
|
89
|
+
|
|
90
|
+
* Pix2pix
|
|
91
|
+
Convert an abstract image into a building image.
|
|
92
|
+
[https://github.com/unagiootoro/facade-pix2pix](https://github.com/unagiootoro/facade-pix2pix)
|
|
93
|
+

|
|
94
|
+
|
|
95
|
+
* Cycle-GAN
|
|
96
|
+
Convert apples to oranges and oranges to apples.
|
|
97
|
+
[https://github.com/unagiootoro/apple2orange-cyclegan](https://github.com/unagiootoro/apple2orange-cyclegan)
|
|
98
|
+

|
|
99
|
+
|
|
100
|
+
* DQN
|
|
101
|
+
Learn the game so that the pole on the cart does not fall.
|
|
102
|
+
[https://github.com/unagiootoro/ruby-rl](https://github.com/unagiootoro/ruby-rl)
|
|
103
|
+

|
|
104
|
+
|
|
87
105
|
## Implemented
|
|
88
106
|
|| Implemented classes |
|
|
89
107
|
|:-----------|------------:|
|
|
@@ -95,28 +113,27 @@ If you want to know more detailed information, please refer to the source code.
|
|
|
95
113
|
| Losses | MeanSquaredError, MeanAbsoluteError, Hinge, HuberLoss, SoftmaxCrossEntropy, SigmoidCrossEntropy |
|
|
96
114
|
|
|
97
115
|
## Datasets
|
|
98
|
-
By setting the environment variable
|
|
116
|
+
By setting the environment variable `RUBY_DNN_DOWNLOADS_PATH`, you can specify the path to read dataset.
|
|
99
117
|
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
118
|
+
* Iris
|
|
119
|
+
* MNIST
|
|
120
|
+
* Fashion-MNIST
|
|
121
|
+
* CIFAR-10
|
|
122
|
+
* CIFAR-100
|
|
123
|
+
* STL-10
|
|
106
124
|
|
|
107
|
-
##
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
● Pix2pix
|
|
125
|
+
## Use GPU
|
|
126
|
+
If you do `require "cumo/narray"` before `require "dnn"`, you can run it on GPU.
|
|
127
|
+
Or, set the environment variable `RUBY_DNN_USE_CUMO` to `ENABLE` to force the GPU to be used.
|
|
111
128
|
|
|
112
129
|
## TODO
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
130
|
+
* Write a test.
|
|
131
|
+
* Write a document.
|
|
132
|
+
* Improve performance when using GPU.
|
|
116
133
|
|
|
117
134
|
## Development
|
|
118
135
|
|
|
119
|
-
After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake "
|
|
136
|
+
After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake "test"` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
|
|
120
137
|
|
|
121
138
|
To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org).
|
|
122
139
|
|
|
@@ -27,22 +27,22 @@ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
|
|
|
27
27
|
class MLP < Model
|
|
28
28
|
def initialize
|
|
29
29
|
super
|
|
30
|
-
@
|
|
31
|
-
@
|
|
32
|
-
@
|
|
30
|
+
@d1 = Dense.new(256)
|
|
31
|
+
@d2 = Dense.new(256)
|
|
32
|
+
@d3 = Dense.new(10)
|
|
33
33
|
@bn1 = BatchNormalization.new
|
|
34
34
|
@bn2 = BatchNormalization.new
|
|
35
35
|
end
|
|
36
36
|
|
|
37
37
|
def forward(x)
|
|
38
38
|
x = InputLayer.new(784).(x)
|
|
39
|
-
x = @
|
|
39
|
+
x = @d1.(x)
|
|
40
40
|
x = @bn1.(x)
|
|
41
41
|
x = ReLU.(x)
|
|
42
|
-
x = @
|
|
42
|
+
x = @d2.(x)
|
|
43
43
|
x = @bn2.(x)
|
|
44
44
|
x = ReLU.(x)
|
|
45
|
-
x = @
|
|
45
|
+
x = @d3.(x)
|
|
46
46
|
x
|
|
47
47
|
end
|
|
48
48
|
end
|
|
@@ -28,22 +28,22 @@ class MLP < Model
|
|
|
28
28
|
def initialize
|
|
29
29
|
super
|
|
30
30
|
# Set the initial values of weight and bias to the initial values of He.
|
|
31
|
-
@
|
|
32
|
-
@
|
|
33
|
-
@
|
|
31
|
+
@d1 = Dense.new(256, weight_initializer: He.new, bias_initializer: He.new)
|
|
32
|
+
@d2 = Dense.new(256, weight_initializer: He.new, bias_initializer: He.new)
|
|
33
|
+
@d3 = Dense.new(10, weight_initializer: He.new, bias_initializer: He.new)
|
|
34
34
|
@bn1 = BatchNormalization.new
|
|
35
35
|
@bn2 = BatchNormalization.new
|
|
36
36
|
end
|
|
37
37
|
|
|
38
38
|
def forward(x)
|
|
39
39
|
x = InputLayer.new(784).(x)
|
|
40
|
-
x = @
|
|
40
|
+
x = @d1.(x)
|
|
41
41
|
x = @bn1.(x)
|
|
42
42
|
x = ReLU.(x)
|
|
43
|
-
x = @
|
|
43
|
+
x = @d2.(x)
|
|
44
44
|
x = @bn2.(x)
|
|
45
45
|
x = ReLU.(x)
|
|
46
|
-
x = @
|
|
46
|
+
x = @d3.(x)
|
|
47
47
|
x
|
|
48
48
|
end
|
|
49
49
|
end
|
|
@@ -29,22 +29,22 @@ class MLP < Model
|
|
|
29
29
|
def initialize
|
|
30
30
|
super
|
|
31
31
|
# Set L2 regularizer(weight decay) for weight and bias.
|
|
32
|
-
@
|
|
33
|
-
@
|
|
34
|
-
@
|
|
32
|
+
@d1 = Dense.new(256, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
|
|
33
|
+
@d2 = Dense.new(256, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
|
|
34
|
+
@d3 = Dense.new(10, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
|
|
35
35
|
@bn1 = BatchNormalization.new
|
|
36
36
|
@bn2 = BatchNormalization.new
|
|
37
37
|
end
|
|
38
38
|
|
|
39
39
|
def forward(x)
|
|
40
40
|
x = InputLayer.new(784).(x)
|
|
41
|
-
x = @
|
|
41
|
+
x = @d1.(x)
|
|
42
42
|
x = @bn1.(x)
|
|
43
43
|
x = ReLU.(x)
|
|
44
|
-
x = @
|
|
44
|
+
x = @d2.(x)
|
|
45
45
|
x = @bn2.(x)
|
|
46
46
|
x = ReLU.(x)
|
|
47
|
-
x = @
|
|
47
|
+
x = @d3.(x)
|
|
48
48
|
x
|
|
49
49
|
end
|
|
50
50
|
end
|
|
@@ -38,22 +38,22 @@ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
|
|
|
38
38
|
class MLP < Model
|
|
39
39
|
def initialize
|
|
40
40
|
super
|
|
41
|
-
@
|
|
42
|
-
@
|
|
43
|
-
@
|
|
41
|
+
@d1 = Dense.new(256)
|
|
42
|
+
@d2 = Dense.new(256)
|
|
43
|
+
@d3 = Dense.new(10)
|
|
44
44
|
@bn1 = BatchNormalization.new
|
|
45
45
|
@bn2 = BatchNormalization.new
|
|
46
46
|
end
|
|
47
47
|
|
|
48
48
|
def forward(x)
|
|
49
49
|
x = InputLayer.new(784).(x)
|
|
50
|
-
x = @
|
|
50
|
+
x = @d1.(x)
|
|
51
51
|
x = @bn1.(x)
|
|
52
52
|
x = ReLU.(x)
|
|
53
|
-
x = @
|
|
53
|
+
x = @d2.(x)
|
|
54
54
|
x = @bn2.(x)
|
|
55
55
|
x = ReLU.(x)
|
|
56
|
-
x = @
|
|
56
|
+
x = @d3.(x)
|
|
57
57
|
x
|
|
58
58
|
end
|
|
59
59
|
end
|
data/examples/dcgan/dcgan.rb
CHANGED
|
@@ -4,13 +4,13 @@ include DNN::Layers
|
|
|
4
4
|
class Generator < Model
|
|
5
5
|
def initialize
|
|
6
6
|
super
|
|
7
|
-
@
|
|
8
|
-
@
|
|
9
|
-
@
|
|
10
|
-
@
|
|
11
|
-
@
|
|
12
|
-
@
|
|
13
|
-
@
|
|
7
|
+
@d1 = Dense.new(1024)
|
|
8
|
+
@d2 = Dense.new(7 * 7 * 64)
|
|
9
|
+
@cv1 = Conv2D.new(64, 4, padding: true)
|
|
10
|
+
@cvt1 = Conv2DTranspose.new(64, 4, strides: 2, padding: true)
|
|
11
|
+
@cvt2 = Conv2DTranspose.new(32, 4, strides: 2, padding: true)
|
|
12
|
+
@cv2 = Conv2D.new(32, 4, padding: true)
|
|
13
|
+
@cv3 = Conv2D.new(1, 4, padding: true)
|
|
14
14
|
@bn1 = BatchNormalization.new
|
|
15
15
|
@bn2 = BatchNormalization.new
|
|
16
16
|
@bn3 = BatchNormalization.new
|
|
@@ -21,32 +21,32 @@ class Generator < Model
|
|
|
21
21
|
|
|
22
22
|
def forward(x)
|
|
23
23
|
x = InputLayer.new(20).(x)
|
|
24
|
-
x = @
|
|
24
|
+
x = @d1.(x)
|
|
25
25
|
x = @bn1.(x)
|
|
26
26
|
x = ReLU.(x)
|
|
27
27
|
|
|
28
|
-
x = @
|
|
28
|
+
x = @d2.(x)
|
|
29
29
|
x = @bn2.(x)
|
|
30
30
|
x = ReLU.(x)
|
|
31
31
|
|
|
32
32
|
x = Reshape.(x, [7, 7, 64])
|
|
33
|
-
x = @
|
|
33
|
+
x = @cvt1.(x)
|
|
34
34
|
x = @bn3.(x)
|
|
35
35
|
x = ReLU.(x)
|
|
36
36
|
|
|
37
|
-
x = @
|
|
37
|
+
x = @cv1.(x)
|
|
38
38
|
x = @bn4.(x)
|
|
39
39
|
x = ReLU.(x)
|
|
40
40
|
|
|
41
|
-
x = @
|
|
41
|
+
x = @cvt2.(x)
|
|
42
42
|
x = @bn5.(x)
|
|
43
43
|
x = ReLU.(x)
|
|
44
44
|
|
|
45
|
-
x = @
|
|
45
|
+
x = @cv2.(x)
|
|
46
46
|
x = @bn6.(x)
|
|
47
47
|
x = ReLU.(x)
|
|
48
48
|
|
|
49
|
-
x = @
|
|
49
|
+
x = @cv3.(x)
|
|
50
50
|
x = Tanh.(x)
|
|
51
51
|
x
|
|
52
52
|
end
|
|
@@ -55,12 +55,12 @@ end
|
|
|
55
55
|
class Discriminator < Model
|
|
56
56
|
def initialize
|
|
57
57
|
super
|
|
58
|
-
@
|
|
59
|
-
@
|
|
60
|
-
@
|
|
61
|
-
@
|
|
62
|
-
@
|
|
63
|
-
@
|
|
58
|
+
@cv1 = Conv2D.new(32, 4, strides: 2, padding: true)
|
|
59
|
+
@cv2 = Conv2D.new(32, 4, padding: true)
|
|
60
|
+
@cv3 = Conv2D.new(64, 4, strides: 2, padding: true)
|
|
61
|
+
@cv4 = Conv2D.new(64, 4, padding: true)
|
|
62
|
+
@d1 = Dense.new(1024)
|
|
63
|
+
@d2 = Dense.new(1)
|
|
64
64
|
@bn1 = BatchNormalization.new
|
|
65
65
|
@bn2 = BatchNormalization.new
|
|
66
66
|
@bn3 = BatchNormalization.new
|
|
@@ -68,26 +68,26 @@ class Discriminator < Model
|
|
|
68
68
|
|
|
69
69
|
def forward(x)
|
|
70
70
|
x = InputLayer.new([28, 28, 1]).(x)
|
|
71
|
-
x = @
|
|
71
|
+
x = @cv1.(x)
|
|
72
72
|
x = LeakyReLU.(x, 0.2)
|
|
73
73
|
|
|
74
|
-
x = @
|
|
74
|
+
x = @cv2.(x)
|
|
75
75
|
x = @bn1.(x)
|
|
76
76
|
x = LeakyReLU.(x, 0.2)
|
|
77
77
|
|
|
78
|
-
x = @
|
|
78
|
+
x = @cv3.(x)
|
|
79
79
|
x = @bn2.(x)
|
|
80
80
|
x = LeakyReLU.(x, 0.2)
|
|
81
81
|
|
|
82
|
-
x = @
|
|
82
|
+
x = @cv4.(x)
|
|
83
83
|
x = @bn3.(x)
|
|
84
84
|
x = LeakyReLU.(x, 0.2)
|
|
85
85
|
|
|
86
86
|
x = Flatten.(x)
|
|
87
|
-
x = @
|
|
87
|
+
x = @d1.(x)
|
|
88
88
|
x = LeakyReLU.(x, 0.2)
|
|
89
89
|
|
|
90
|
-
x = @
|
|
90
|
+
x = @d2.(x)
|
|
91
91
|
x
|
|
92
92
|
end
|
|
93
93
|
|
|
@@ -129,7 +129,7 @@ class DCGAN < Model
|
|
|
129
129
|
y_fake = Numo::SFloat.zeros(batch_size, 1)
|
|
130
130
|
@dis.enable_training
|
|
131
131
|
dis_loss = @dis.train_on_batch(x_batch, y_real)
|
|
132
|
-
dis_loss
|
|
132
|
+
dis_loss += @dis.train_on_batch(images, y_fake)
|
|
133
133
|
|
|
134
134
|
noise = Numo::SFloat.new(batch_size, 20).rand(-1, 1)
|
|
135
135
|
label = Numo::SFloat.cast([1] * batch_size).reshape(batch_size, 1)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# Prepare
|
|
2
|
+
This example use to sinatra.
|
|
3
|
+
|
|
4
|
+
```
|
|
5
|
+
$ gem install sinatra
|
|
6
|
+
$ gem install sinatra-contrib
|
|
7
|
+
```
|
|
8
|
+
|
|
9
|
+
# Let's try
|
|
10
|
+
This example prepared weights that have already been trained.
|
|
11
|
+
If you want to try it immediately, skip steps (1) and (2).
|
|
12
|
+
|
|
13
|
+
### (1) Training MNIST
|
|
14
|
+
```
|
|
15
|
+
$ ruby mnist_train.rb
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
### (2) Make weights
|
|
19
|
+
```
|
|
20
|
+
$ ruby make_weights.rb
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
### (3) Launch sinatra server
|
|
24
|
+
```
|
|
25
|
+
$ ruby server.rb
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
### (4) Access 127.0.0.1:4567 with your browser
|
|
29
|
+

|
|
Binary file
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
require "dnn"
|
|
2
|
+
require "numo/linalg/autoloader"
|
|
3
|
+
|
|
4
|
+
include DNN::Models
|
|
5
|
+
include DNN::Layers
|
|
6
|
+
include DNN::Optimizers
|
|
7
|
+
include DNN::Losses
|
|
8
|
+
|
|
9
|
+
class ConvNet < Model
|
|
10
|
+
def self.create(input_shape)
|
|
11
|
+
convnet = ConvNet.new(input_shape, 32)
|
|
12
|
+
convnet.setup(Adam.new, SoftmaxCrossEntropy.new)
|
|
13
|
+
convnet
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def initialize(input_shape, base_filter_size)
|
|
17
|
+
super()
|
|
18
|
+
@input_shape = input_shape
|
|
19
|
+
@cv1 = Conv2D.new(base_filter_size, 3, padding: true)
|
|
20
|
+
@cv2 = Conv2D.new(base_filter_size, 3, padding: true)
|
|
21
|
+
@cv3 = Conv2D.new(base_filter_size * 2, 3, padding: true)
|
|
22
|
+
@cv4 = Conv2D.new(base_filter_size * 2, 3, padding: true)
|
|
23
|
+
@cv5 = Conv2D.new(base_filter_size * 4, 3, padding: true)
|
|
24
|
+
@cv6 = Conv2D.new(base_filter_size * 4, 3, padding: true)
|
|
25
|
+
@bn1 = BatchNormalization.new
|
|
26
|
+
@bn2 = BatchNormalization.new
|
|
27
|
+
@bn3 = BatchNormalization.new
|
|
28
|
+
@bn4 = BatchNormalization.new
|
|
29
|
+
@d1 = Dense.new(512)
|
|
30
|
+
@d2 = Dense.new(10)
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
def forward(x)
|
|
34
|
+
x = InputLayer.new(@input_shape).(x)
|
|
35
|
+
|
|
36
|
+
x = @cv1.(x)
|
|
37
|
+
x = ReLU.(x)
|
|
38
|
+
x = Dropout.(x, 0.25)
|
|
39
|
+
|
|
40
|
+
x = @cv2.(x)
|
|
41
|
+
x = @bn1.(x)
|
|
42
|
+
x = ReLU.(x)
|
|
43
|
+
x = MaxPool2D.(x, 2)
|
|
44
|
+
|
|
45
|
+
x = @cv3.(x)
|
|
46
|
+
x = ReLU.(x)
|
|
47
|
+
x = Dropout.(x, 0.25)
|
|
48
|
+
|
|
49
|
+
x = @cv4.(x)
|
|
50
|
+
x = @bn2.(x)
|
|
51
|
+
x = ReLU.(x)
|
|
52
|
+
x = MaxPool2D.(x, 2)
|
|
53
|
+
|
|
54
|
+
x = @cv5.(x)
|
|
55
|
+
x = ReLU.(x)
|
|
56
|
+
x = Dropout.(x, 0.25)
|
|
57
|
+
|
|
58
|
+
x = @cv6.(x)
|
|
59
|
+
x = @bn3.(x)
|
|
60
|
+
x = ReLU.(x)
|
|
61
|
+
x = MaxPool2D.(x, 2)
|
|
62
|
+
|
|
63
|
+
x = Flatten.(x)
|
|
64
|
+
x = @d1.(x)
|
|
65
|
+
x = @bn4.(x)
|
|
66
|
+
x = ReLU.(x)
|
|
67
|
+
x = @d2.(x)
|
|
68
|
+
x
|
|
69
|
+
end
|
|
70
|
+
end
|