ruby-dnn 0.10.1 → 0.10.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2283358c50e7c60f4f1d67763f16498544a370158c27dd9df4865b8e9e75f2c4
4
- data.tar.gz: 1d6a2fb9043a7d652906cfd10b8c2301eeb4d110687aef96a62e6ef26e01fed9
3
+ metadata.gz: 4587938452bbdaf1a51f7fb9d7c3693570194d349204bdbfb8aa17c7f3f0db73
4
+ data.tar.gz: 8b893be6c2a546ddae5c39c8a5a5967864b1af0c320a11e8d7ed72f5a738d144
5
5
  SHA512:
6
- metadata.gz: 3827f751e0f7b2ff4f3df5a88d4b768ee986ec4036ec6a3324de52504cbe1324c55c8b6bddfe8f84f4e39d783ed3bfd93b65a8c45947401bf00fb3f34810ac2e
7
- data.tar.gz: 2292999ec27ba89d23d0f592dc314ee8350caaff164eee8faf5e2e353e1415407af9a793a78c4dfa77aea13f9ed100cb6b25a69a49e93ffcd1dfffe2d28fe4d2
6
+ metadata.gz: 591c74427f134032c4a9ee0ed01fa0ced4b238a918ae1001c3d98b86cc9d5693be94e33f91f069dff58da0dd156a38746eb6665667bd872081f42d41230e77eb
7
+ data.tar.gz: 4ac43a25bcee0f02453764fefc2dbd8e812158cd0d1f78eeb4d3c1ad1f3047634a831b49caf55202f9e9824c73bd7d2f40b318c8f22e005c9702d6d8fe2ceebf
data/README.md CHANGED
@@ -49,7 +49,7 @@ If you want to know more detailed information, please refer to the source code.
49
49
  ## Implemented
50
50
  || Implemented classes |
51
51
  |:-----------|------------:|
52
- | Connections | Dense, Conv2D, Conv2D_Transpose, SimpleRNN, LSTM, GRU |
52
+ | Connections | Dense, Conv2D, Conv2D_Transpose, Embedding, SimpleRNN, LSTM, GRU |
53
53
  | Layers | Flatten, Reshape, Dropout, BatchNormalization, MaxPool2D, AvgPool2D, UnPool2D |
54
54
  | Activations | Sigmoid, Tanh, Softsign, Softplus, Swish, ReLU, LeakyReLU, ELU |
55
55
  | Optimizers | SGD, Nesterov, AdaGrad, RMSProp, AdaDelta, Adam, RMSPropGraves |
@@ -77,4 +77,4 @@ The gem is available as open source under the terms of the [MIT License](https:/
77
77
 
78
78
  ## Code of Conduct
79
79
 
80
- Everyone interacting in the ruby-dnn project’s codebases, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct](https://github.com/[USERNAME]/dnn/blob/master/CODE_OF_CONDUCT.md).
80
+ Everyone interacting in the ruby-dnn project’s codebases, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct](https://github.com/unagiootoro/ruby-dnn/blob/master/CODE_OF_CONDUCT.md).
@@ -1,71 +1,71 @@
1
- require "dnn"
2
- require "dnn/cifar100"
3
- # If you use numo/linalg then please uncomment out.
4
- # require "numo/linalg/autoloader"
5
-
6
- include DNN::Layers
7
- include DNN::Activations
8
- include DNN::Optimizers
9
- include DNN::Losses
10
- Model = DNN::Model
11
- CIFAR100 = DNN::CIFAR100
12
-
13
- x_train, y_train = CIFAR100.load_train
14
- x_test, y_test = CIFAR100.load_test
15
-
16
- x_train = Numo::SFloat.cast(x_train)
17
- x_test = Numo::SFloat.cast(x_test)
18
-
19
- x_train /= 255
20
- x_test /= 255
21
-
22
- y_train = y_train[true, 1]
23
- y_test = y_test[true, 1]
24
-
25
- y_train = DNN::Utils.to_categorical(y_train, 100, Numo::SFloat)
26
- y_test = DNN::Utils.to_categorical(y_test, 100, Numo::SFloat)
27
-
28
- model = Model.new
29
-
30
- model << InputLayer.new([32, 32, 3])
31
-
32
- model << Conv2D.new(16, 5, padding: true)
33
- model << BatchNormalization.new
34
- model << ReLU.new
35
-
36
- model << Conv2D.new(16, 5, padding: true)
37
- model << BatchNormalization.new
38
- model << ReLU.new
39
-
40
- model << MaxPool2D.new(2)
41
-
42
- model << Conv2D.new(32, 5, padding: true)
43
- model << BatchNormalization.new
44
- model << ReLU.new
45
-
46
- model << Conv2D.new(32, 5, padding: true)
47
- model << BatchNormalization.new
48
- model << ReLU.new
49
-
50
- model << MaxPool2D.new(2)
51
-
52
- model << Conv2D.new(64, 5, padding: true)
53
- model << BatchNormalization.new
54
- model << ReLU.new
55
-
56
- model << Conv2D.new(64, 5, padding: true)
57
- model << BatchNormalization.new
58
- model << ReLU.new
59
-
60
- model << Flatten.new
61
-
62
- model << Dense.new(1024)
63
- model << BatchNormalization.new
64
- model << ReLU.new
65
- model << Dropout.new(0.5)
66
-
67
- model << Dense.new(100)
68
-
69
- model.compile(Adam.new, SoftmaxCrossEntropy.new)
70
-
71
- model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
1
+ require "dnn"
2
+ require "dnn/cifar100"
3
+ # If you use numo/linalg then please uncomment out.
4
+ # require "numo/linalg/autoloader"
5
+
6
+ include DNN::Layers
7
+ include DNN::Activations
8
+ include DNN::Optimizers
9
+ include DNN::Losses
10
+ Model = DNN::Model
11
+ CIFAR100 = DNN::CIFAR100
12
+
13
+ x_train, y_train = CIFAR100.load_train
14
+ x_test, y_test = CIFAR100.load_test
15
+
16
+ x_train = Numo::SFloat.cast(x_train)
17
+ x_test = Numo::SFloat.cast(x_test)
18
+
19
+ x_train /= 255
20
+ x_test /= 255
21
+
22
+ y_train = y_train[true, 1]
23
+ y_test = y_test[true, 1]
24
+
25
+ y_train = DNN::Utils.to_categorical(y_train, 100, Numo::SFloat)
26
+ y_test = DNN::Utils.to_categorical(y_test, 100, Numo::SFloat)
27
+
28
+ model = Model.new
29
+
30
+ model << InputLayer.new([32, 32, 3])
31
+
32
+ model << Conv2D.new(16, 5, padding: true)
33
+ model << BatchNormalization.new
34
+ model << ReLU.new
35
+
36
+ model << Conv2D.new(16, 5, padding: true)
37
+ model << BatchNormalization.new
38
+ model << ReLU.new
39
+
40
+ model << MaxPool2D.new(2)
41
+
42
+ model << Conv2D.new(32, 5, padding: true)
43
+ model << BatchNormalization.new
44
+ model << ReLU.new
45
+
46
+ model << Conv2D.new(32, 5, padding: true)
47
+ model << BatchNormalization.new
48
+ model << ReLU.new
49
+
50
+ model << MaxPool2D.new(2)
51
+
52
+ model << Conv2D.new(64, 5, padding: true)
53
+ model << BatchNormalization.new
54
+ model << ReLU.new
55
+
56
+ model << Conv2D.new(64, 5, padding: true)
57
+ model << BatchNormalization.new
58
+ model << ReLU.new
59
+
60
+ model << Flatten.new
61
+
62
+ model << Dense.new(1024)
63
+ model << BatchNormalization.new
64
+ model << ReLU.new
65
+ model << Dropout.new(0.5)
66
+
67
+ model << Dense.new(100)
68
+
69
+ model.compile(Adam.new, SoftmaxCrossEntropy.new)
70
+
71
+ model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
@@ -1,68 +1,68 @@
1
- require "dnn"
2
- require "dnn/cifar10"
3
- # If you use numo/linalg then please uncomment out.
4
- # require "numo/linalg/autoloader"
5
-
6
- include DNN::Layers
7
- include DNN::Activations
8
- include DNN::Optimizers
9
- include DNN::Losses
10
- Model = DNN::Model
11
- CIFAR10 = DNN::CIFAR10
12
-
13
- x_train, y_train = CIFAR10.load_train
14
- x_test, y_test = CIFAR10.load_test
15
-
16
- x_train = Numo::SFloat.cast(x_train)
17
- x_test = Numo::SFloat.cast(x_test)
18
-
19
- x_train /= 255
20
- x_test /= 255
21
-
22
- y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
23
- y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
24
-
25
- model = Model.new
26
-
27
- model << InputLayer.new([32, 32, 3])
28
-
29
- model << Conv2D.new(16, 5, padding: true)
30
- model << BatchNormalization.new
31
- model << ReLU.new
32
-
33
- model << Conv2D.new(16, 5, padding: true)
34
- model << BatchNormalization.new
35
- model << ReLU.new
36
-
37
- model << MaxPool2D.new(2)
38
-
39
- model << Conv2D.new(32, 5, padding: true)
40
- model << BatchNormalization.new
41
- model << ReLU.new
42
-
43
- model << Conv2D.new(32, 5, padding: true)
44
- model << BatchNormalization.new
45
- model << ReLU.new
46
-
47
- model << MaxPool2D.new(2)
48
-
49
- model << Conv2D.new(64, 5, padding: true)
50
- model << BatchNormalization.new
51
- model << ReLU.new
52
-
53
- model << Conv2D.new(64, 5, padding: true)
54
- model << BatchNormalization.new
55
- model << ReLU.new
56
-
57
- model << Flatten.new
58
-
59
- model << Dense.new(512)
60
- model << BatchNormalization.new
61
- model << ReLU.new
62
- model << Dropout.new(0.5)
63
-
64
- model << Dense.new(10)
65
-
66
- model.compile(Adam.new, SoftmaxCrossEntropy.new)
67
-
68
- model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
1
+ require "dnn"
2
+ require "dnn/cifar10"
3
+ # If you use numo/linalg then please uncomment out.
4
+ # require "numo/linalg/autoloader"
5
+
6
+ include DNN::Layers
7
+ include DNN::Activations
8
+ include DNN::Optimizers
9
+ include DNN::Losses
10
+ Model = DNN::Model
11
+ CIFAR10 = DNN::CIFAR10
12
+
13
+ x_train, y_train = CIFAR10.load_train
14
+ x_test, y_test = CIFAR10.load_test
15
+
16
+ x_train = Numo::SFloat.cast(x_train)
17
+ x_test = Numo::SFloat.cast(x_test)
18
+
19
+ x_train /= 255
20
+ x_test /= 255
21
+
22
+ y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
23
+ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
24
+
25
+ model = Model.new
26
+
27
+ model << InputLayer.new([32, 32, 3])
28
+
29
+ model << Conv2D.new(16, 5, padding: true)
30
+ model << BatchNormalization.new
31
+ model << ReLU.new
32
+
33
+ model << Conv2D.new(16, 5, padding: true)
34
+ model << BatchNormalization.new
35
+ model << ReLU.new
36
+
37
+ model << MaxPool2D.new(2)
38
+
39
+ model << Conv2D.new(32, 5, padding: true)
40
+ model << BatchNormalization.new
41
+ model << ReLU.new
42
+
43
+ model << Conv2D.new(32, 5, padding: true)
44
+ model << BatchNormalization.new
45
+ model << ReLU.new
46
+
47
+ model << MaxPool2D.new(2)
48
+
49
+ model << Conv2D.new(64, 5, padding: true)
50
+ model << BatchNormalization.new
51
+ model << ReLU.new
52
+
53
+ model << Conv2D.new(64, 5, padding: true)
54
+ model << BatchNormalization.new
55
+ model << ReLU.new
56
+
57
+ model << Flatten.new
58
+
59
+ model << Dense.new(512)
60
+ model << BatchNormalization.new
61
+ model << ReLU.new
62
+ model << Dropout.new(0.5)
63
+
64
+ model << Dense.new(10)
65
+
66
+ model.compile(Adam.new, SoftmaxCrossEntropy.new)
67
+
68
+ model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
@@ -1,34 +1,34 @@
1
- require "dnn"
2
- require "dnn/iris"
3
- # If you use numo/linalg then please uncomment out.
4
- # require "numo/linalg/autoloader"
5
-
6
- include DNN::Layers
7
- include DNN::Activations
8
- include DNN::Optimizers
9
- include DNN::Losses
10
- Model = DNN::Model
11
- Iris = DNN::Iris
12
-
13
- x, y = Iris.load(true)
14
- x_train, y_train = x[0...100, true], y[0...100]
15
- x_test, y_test = x[100...150, true], y[100...150]
16
-
17
- x_train /= 255
18
- x_test /= 255
19
-
20
- y_train = DNN::Utils.to_categorical(y_train, 3, Numo::SFloat)
21
- y_test = DNN::Utils.to_categorical(y_test, 3, Numo::SFloat)
22
-
23
- model = Model.new
24
-
25
- model << InputLayer.new(4)
26
-
27
- model << Dense.new(64)
28
- model << ReLU.new
29
-
30
- model << Dense.new(3)
31
-
32
- model.compile(Adam.new, SoftmaxCrossEntropy.new)
33
-
34
- model.train(x_train, y_train, 1000, batch_size: 10, test: [x_test, y_test])
1
+ require "dnn"
2
+ require "dnn/iris"
3
+ # If you use numo/linalg then please uncomment out.
4
+ # require "numo/linalg/autoloader"
5
+
6
+ include DNN::Layers
7
+ include DNN::Activations
8
+ include DNN::Optimizers
9
+ include DNN::Losses
10
+ Model = DNN::Model
11
+ Iris = DNN::Iris
12
+
13
+ x, y = Iris.load(true)
14
+ x_train, y_train = x[0...100, true], y[0...100]
15
+ x_test, y_test = x[100...150, true], y[100...150]
16
+
17
+ x_train /= 255
18
+ x_test /= 255
19
+
20
+ y_train = DNN::Utils.to_categorical(y_train, 3, Numo::SFloat)
21
+ y_test = DNN::Utils.to_categorical(y_test, 3, Numo::SFloat)
22
+
23
+ model = Model.new
24
+
25
+ model << InputLayer.new(4)
26
+
27
+ model << Dense.new(64)
28
+ model << ReLU.new
29
+
30
+ model << Dense.new(3)
31
+
32
+ model.compile(Adam.new, SoftmaxCrossEntropy.new)
33
+
34
+ model.train(x_train, y_train, 1000, batch_size: 10, test: [x_test, y_test])
@@ -1,50 +1,50 @@
1
- require "dnn"
2
- require "dnn/mnist"
3
- # If you use numo/linalg then please uncomment out.
4
- # require "numo/linalg/autoloader"
5
-
6
- include DNN::Layers
7
- include DNN::Activations
8
- include DNN::Optimizers
9
- include DNN::Losses
10
- Model = DNN::Model
11
- MNIST = DNN::MNIST
12
-
13
- x_train, y_train = MNIST.load_train
14
- x_test, y_test = MNIST.load_test
15
-
16
- x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 28, 28, 1)
17
- x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 28, 28, 1)
18
-
19
- x_train /= 255
20
- x_test /= 255
21
-
22
- y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
23
- y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
24
-
25
- model = Model.new
26
-
27
- model << InputLayer.new([28, 28, 1])
28
-
29
- model << Conv2D.new(16, 5)
30
- model << BatchNormalization.new
31
- model << ReLU.new
32
-
33
- model << MaxPool2D.new(2)
34
-
35
- model << Conv2D.new(32, 5)
36
- model << BatchNormalization.new
37
- model << ReLU.new
38
-
39
- model << Flatten.new
40
-
41
- model << Dense.new(256)
42
- model << BatchNormalization.new
43
- model << ReLU.new
44
- model << Dropout.new(0.5)
45
-
46
- model << Dense.new(10)
47
-
48
- model.compile(Adam.new, SoftmaxCrossEntropy.new)
49
-
50
- model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
1
+ require "dnn"
2
+ require "dnn/mnist"
3
+ # If you use numo/linalg then please uncomment out.
4
+ # require "numo/linalg/autoloader"
5
+
6
+ include DNN::Layers
7
+ include DNN::Activations
8
+ include DNN::Optimizers
9
+ include DNN::Losses
10
+ Model = DNN::Model
11
+ MNIST = DNN::MNIST
12
+
13
+ x_train, y_train = MNIST.load_train
14
+ x_test, y_test = MNIST.load_test
15
+
16
+ x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 28, 28, 1)
17
+ x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 28, 28, 1)
18
+
19
+ x_train /= 255
20
+ x_test /= 255
21
+
22
+ y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
23
+ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
24
+
25
+ model = Model.new
26
+
27
+ model << InputLayer.new([28, 28, 1])
28
+
29
+ model << Conv2D.new(16, 5)
30
+ model << BatchNormalization.new
31
+ model << ReLU.new
32
+
33
+ model << MaxPool2D.new(2)
34
+
35
+ model << Conv2D.new(32, 5)
36
+ model << BatchNormalization.new
37
+ model << ReLU.new
38
+
39
+ model << Flatten.new
40
+
41
+ model << Dense.new(256)
42
+ model << BatchNormalization.new
43
+ model << ReLU.new
44
+ model << Dropout.new(0.5)
45
+
46
+ model << Dense.new(10)
47
+
48
+ model.compile(Adam.new, SoftmaxCrossEntropy.new)
49
+
50
+ model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])