ruby-dnn 0.15.1 → 0.15.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 8fb316d982b142cfc31201f65e89b99579bdd1669d226c42dbc562c7af3b2c2a
4
- data.tar.gz: 1839e28107f410d0008a21d9de3670fd5880e1a526ec24eddc34562e18390958
3
+ metadata.gz: ebd331d481ec073102bb4b8c79a6ef280741deaf147a7908ef83f35603f784b9
4
+ data.tar.gz: c65399f9f29b65be2ffdc179047f58ae17d6b598328163fc45bb2da5f5f64751
5
5
  SHA512:
6
- metadata.gz: fd07fbd6409079cca07d14d08051e87b0e4b43772128f46f8cfe62857d2a1053bcb5dc13964f992bcb637ae64e7430613541bb19699d1fe90b142f9f950acfb8
7
- data.tar.gz: 575171f1247cf0aa84fc71aee2b7318db10b51a0772e8dc1647e8189e1af70c39bd275edca1e0de8c9b45793051fe0fe8d75bce7bb6ec2e32f11359710592aa3
6
+ metadata.gz: fb50c04b888f520d441de0643a6c9870be6df0c7e6d5c54f7c2b8e441e339723945f427cef67c51dc4d250bf0f02c20150e0ca47e1c13974fbe19d272f2301fb
7
+ data.tar.gz: 696f4fcbe01aa29f31ea08d32c7e399789ef841e22bfd77029701f132c10687ea81f25fe56e3044de95e4e8cd4c86b947fc32b257896f4dbcc3c125710d52ab8
@@ -8,19 +8,18 @@ include DNN::Layers
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
10
  include DNN::Callbacks
11
- MNIST = DNN::MNIST
12
11
 
13
12
  EPOCHS = 3
14
13
  BATCH_SIZE = 128
15
14
 
16
- x_train, y_train = MNIST.load_train
17
- x_test, y_test = MNIST.load_test
15
+ x_train, y_train = DNN::MNIST.load_train
16
+ x_test, y_test = DNN::MNIST.load_test
18
17
 
19
- x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 784)
20
- x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 784)
18
+ x_train = x_train.reshape(x_train.shape[0], 784)
19
+ x_test = x_test.reshape(x_test.shape[0], 784)
21
20
 
22
- x_train /= 255
23
- x_test /= 255
21
+ x_train = Numo::SFloat.cast(x_train) / 255
22
+ x_test = Numo::SFloat.cast(x_test) / 255
24
23
 
25
24
  y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
26
25
  y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
@@ -8,19 +8,18 @@ include DNN::Layers
8
8
  include DNN::Initializers
9
9
  include DNN::Optimizers
10
10
  include DNN::Losses
11
- MNIST = DNN::MNIST
12
11
 
13
12
  EPOCHS = 3
14
13
  BATCH_SIZE = 128
15
14
 
16
- x_train, y_train = MNIST.load_train
17
- x_test, y_test = MNIST.load_test
15
+ x_train, y_train = DNN::MNIST.load_train
16
+ x_test, y_test = DNN::MNIST.load_test
18
17
 
19
- x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 784)
20
- x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 784)
18
+ x_train = x_train.reshape(x_train.shape[0], 784)
19
+ x_test = x_test.reshape(x_test.shape[0], 784)
21
20
 
22
- x_train /= 255
23
- x_test /= 255
21
+ x_train = Numo::SFloat.cast(x_train) / 255
22
+ x_test = Numo::SFloat.cast(x_test) / 255
24
23
 
25
24
  y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
26
25
  y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
@@ -8,20 +8,19 @@ include DNN::Layers
8
8
  include DNN::Regularizers
9
9
  include DNN::Optimizers
10
10
  include DNN::Losses
11
- MNIST = DNN::MNIST
12
11
 
13
12
  EPOCHS = 3
14
13
  BATCH_SIZE = 128
15
14
  L2_LAMBDA = 0.01
16
15
 
17
- x_train, y_train = MNIST.load_train
18
- x_test, y_test = MNIST.load_test
16
+ x_train, y_train = DNN::MNIST.load_train
17
+ x_test, y_test = DNN::MNIST.load_test
19
18
 
20
- x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 784)
21
- x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 784)
19
+ x_train = x_train.reshape(x_train.shape[0], 784)
20
+ x_test = x_test.reshape(x_test.shape[0], 784)
22
21
 
23
- x_train /= 255
24
- x_test /= 255
22
+ x_train = Numo::SFloat.cast(x_train) / 255
23
+ x_test = Numo::SFloat.cast(x_test) / 255
25
24
 
26
25
  y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
27
26
  y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
@@ -9,7 +9,6 @@ include DNN::Optimizers
9
9
  include DNN::Losses
10
10
  include DNN::Savers
11
11
  include DNN::Loaders
12
- MNIST = DNN::MNIST
13
12
 
14
13
  USE_MARSHAL = 0
15
14
  USE_JSON = 1
@@ -24,14 +23,14 @@ SAVE_STYLE = USE_MARSHAL
24
23
  # This setting is enabled when SAVE_STYLE is USE_MARSHAL.
25
24
  INCLUDE_MODEL = true
26
25
 
27
- x_train, y_train = MNIST.load_train
28
- x_test, y_test = MNIST.load_test
26
+ x_train, y_train = DNN::MNIST.load_train
27
+ x_test, y_test = DNN::MNIST.load_test
29
28
 
30
- x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 784)
31
- x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 784)
29
+ x_train = x_train.reshape(x_train.shape[0], 784)
30
+ x_test = x_test.reshape(x_test.shape[0], 784)
32
31
 
33
- x_train /= 255
34
- x_test /= 255
32
+ x_train = Numo::SFloat.cast(x_train) / 255
33
+ x_test = Numo::SFloat.cast(x_test) / 255
35
34
 
36
35
  y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
37
36
  y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
@@ -7,19 +7,12 @@ include DNN::Models
7
7
  include DNN::Layers
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- CIFAR100 = DNN::CIFAR100
11
10
 
12
- x_train, y_train = CIFAR100.load_train
13
- x_test, y_test = CIFAR100.load_test
11
+ x_train, y_train = DNN::CIFAR100.load_train
12
+ x_test, y_test = DNN::CIFAR100.load_test
14
13
 
15
- x_train = Numo::SFloat.cast(x_train)
16
- x_test = Numo::SFloat.cast(x_test)
17
-
18
- x_train /= 255
19
- x_test /= 255
20
-
21
- y_train = y_train[true, 1]
22
- y_test = y_test[true, 1]
14
+ x_train = Numo::SFloat.cast(x_train) / 255
15
+ x_test = Numo::SFloat.cast(x_test) / 255
23
16
 
24
17
  y_train = DNN::Utils.to_categorical(y_train, 100, Numo::SFloat)
25
18
  y_test = DNN::Utils.to_categorical(y_test, 100, Numo::SFloat)
@@ -28,43 +21,44 @@ model = Sequential.new
28
21
 
29
22
  model << InputLayer.new([32, 32, 3])
30
23
 
31
- model << Conv2D.new(16, 5, padding: true)
32
- model << BatchNormalization.new
24
+ model << Conv2D.new(32, 3, padding: true)
25
+ model << Dropout.new(0.25)
33
26
  model << ReLU.new
34
27
 
35
- model << Conv2D.new(16, 5, padding: true)
28
+ model << Conv2D.new(32, 3, padding: true)
36
29
  model << BatchNormalization.new
37
30
  model << ReLU.new
38
-
39
31
  model << MaxPool2D.new(2)
40
32
 
41
- model << Conv2D.new(32, 5, padding: true)
42
- model << BatchNormalization.new
33
+ model << Conv2D.new(64, 3, padding: true)
34
+ model << Dropout.new(0.25)
43
35
  model << ReLU.new
44
36
 
45
- model << Conv2D.new(32, 5, padding: true)
37
+ model << Conv2D.new(64, 3, padding: true)
46
38
  model << BatchNormalization.new
47
39
  model << ReLU.new
48
-
49
40
  model << MaxPool2D.new(2)
50
41
 
51
- model << Conv2D.new(64, 5, padding: true)
52
- model << BatchNormalization.new
42
+ model << Conv2D.new(128, 3, padding: true)
43
+ model << Dropout.new(0.25)
53
44
  model << ReLU.new
54
45
 
55
- model << Conv2D.new(64, 5, padding: true)
46
+ model << Conv2D.new(128, 3, padding: true)
56
47
  model << BatchNormalization.new
57
48
  model << ReLU.new
58
49
 
59
50
  model << Flatten.new
60
51
 
61
- model << Dense.new(1024)
52
+ model << Dense.new(512)
62
53
  model << BatchNormalization.new
63
54
  model << ReLU.new
64
- model << Dropout.new(0.5)
65
55
 
66
56
  model << Dense.new(100)
67
57
 
68
58
  model.setup(Adam.new, SoftmaxCrossEntropy.new)
69
59
 
70
- model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
60
+ model.train(x_train, y_train, 10, batch_size: 128, test: [x_test, y_test])
61
+
62
+ accuracy, loss = model.evaluate(x_test, y_test)
63
+ puts "accuracy: #{accuracy}"
64
+ puts "loss: #{loss}"
@@ -7,16 +7,12 @@ include DNN::Models
7
7
  include DNN::Layers
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- CIFAR10 = DNN::CIFAR10
11
10
 
12
- x_train, y_train = CIFAR10.load_train
13
- x_test, y_test = CIFAR10.load_test
11
+ x_train, y_train = DNN::CIFAR10.load_train
12
+ x_test, y_test = DNN::CIFAR10.load_test
14
13
 
15
- x_train = Numo::SFloat.cast(x_train)
16
- x_test = Numo::SFloat.cast(x_test)
17
-
18
- x_train /= 255
19
- x_test /= 255
14
+ x_train = Numo::SFloat.cast(x_train) / 255
15
+ x_test = Numo::SFloat.cast(x_test) / 255
20
16
 
21
17
  y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
22
18
  y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
@@ -25,31 +21,29 @@ model = Sequential.new
25
21
 
26
22
  model << InputLayer.new([32, 32, 3])
27
23
 
28
- model << Conv2D.new(16, 5, padding: true)
29
- model << BatchNormalization.new
24
+ model << Conv2D.new(32, 3, padding: true)
25
+ model << Dropout.new(0.25)
30
26
  model << ReLU.new
31
27
 
32
- model << Conv2D.new(16, 5, padding: true)
28
+ model << Conv2D.new(32, 3, padding: true)
33
29
  model << BatchNormalization.new
34
30
  model << ReLU.new
35
-
36
31
  model << MaxPool2D.new(2)
37
32
 
38
- model << Conv2D.new(32, 5, padding: true)
39
- model << BatchNormalization.new
33
+ model << Conv2D.new(64, 3, padding: true)
34
+ model << Dropout.new(0.25)
40
35
  model << ReLU.new
41
36
 
42
- model << Conv2D.new(32, 5, padding: true)
37
+ model << Conv2D.new(64, 3, padding: true)
43
38
  model << BatchNormalization.new
44
39
  model << ReLU.new
45
-
46
40
  model << MaxPool2D.new(2)
47
41
 
48
- model << Conv2D.new(64, 5, padding: true)
49
- model << BatchNormalization.new
42
+ model << Conv2D.new(128, 3, padding: true)
43
+ model << Dropout.new(0.25)
50
44
  model << ReLU.new
51
45
 
52
- model << Conv2D.new(64, 5, padding: true)
46
+ model << Conv2D.new(128, 3, padding: true)
53
47
  model << BatchNormalization.new
54
48
  model << ReLU.new
55
49
 
@@ -58,10 +52,13 @@ model << Flatten.new
58
52
  model << Dense.new(512)
59
53
  model << BatchNormalization.new
60
54
  model << ReLU.new
61
- model << Dropout.new(0.5)
62
55
 
63
56
  model << Dense.new(10)
64
57
 
65
58
  model.setup(Adam.new, SoftmaxCrossEntropy.new)
66
59
 
67
- model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
60
+ model.train(x_train, y_train, 10, batch_size: 128, test: [x_test, y_test])
61
+
62
+ accuracy, loss = model.evaluate(x_test, y_test)
63
+ puts "accuracy: #{accuracy}"
64
+ puts "loss: #{loss}"
@@ -6,7 +6,6 @@ require_relative "dcgan"
6
6
  include DNN::Optimizers
7
7
  include DNN::Losses
8
8
  include DNN::Callbacks
9
- MNIST = DNN::MNIST
10
9
 
11
10
  Numo::SFloat.srand(rand(1 << 31))
12
11
 
@@ -21,7 +20,7 @@ dis.setup(Adam.new(alpha: 0.00001, beta1: 0.1), SigmoidCrossEntropy.new)
21
20
  dcgan.setup(Adam.new(alpha: 0.0002, beta1: 0.5), SigmoidCrossEntropy.new)
22
21
  dcgan.add_callback(CheckPoint.new("trained/dcgan_model"))
23
22
 
24
- x_train, * = MNIST.load_train
23
+ x_train, * = DNN::MNIST.load_train
25
24
  x_train = Numo::SFloat.cast(x_train)
26
25
  x_train = x_train / 127.5 - 1
27
26
 
@@ -7,9 +7,8 @@ include DNN::Models
7
7
  include DNN::Layers
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- Iris = DNN::Iris
11
10
 
12
- x, y = Iris.load(true)
11
+ x, y = DNN::Iris.load(true)
13
12
  x_train, y_train = x[0...100, true], y[0...100]
14
13
  x_test, y_test = x[100...150, true], y[100...150]
15
14
 
@@ -27,4 +26,8 @@ model << Dense.new(3)
27
26
 
28
27
  model.setup(Adam.new, SoftmaxCrossEntropy.new)
29
28
 
30
- model.train(x_train, y_train, 1000, batch_size: 10, test: [x_test, y_test])
29
+ model.train(x_train, y_train, 500, batch_size: 32, test: [x_test, y_test])
30
+
31
+ accuracy, loss = model.evaluate(x_test, y_test)
32
+ puts "accuracy: #{accuracy}"
33
+ puts "loss: #{loss}"
@@ -7,16 +7,12 @@ include DNN::Models
7
7
  include DNN::Layers
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- MNIST = DNN::MNIST
11
10
 
12
- x_train, y_train = MNIST.load_train
13
- x_test, y_test = MNIST.load_test
11
+ x_train, y_train = DNN::MNIST.load_train
12
+ x_test, y_test = DNN::MNIST.load_test
14
13
 
15
- x_train = Numo::SFloat.cast(x_train)
16
- x_test = Numo::SFloat.cast(x_test)
17
-
18
- x_train /= 255
19
- x_test /= 255
14
+ x_train = Numo::SFloat.cast(x_train) / 255
15
+ x_test = Numo::SFloat.cast(x_test) / 255
20
16
 
21
17
  y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
22
18
  y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
@@ -47,3 +43,7 @@ model << Dense.new(10)
47
43
  model.setup(Adam.new, SoftmaxCrossEntropy.new)
48
44
 
49
45
  model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
46
+
47
+ accuracy, loss = model.evaluate(x_test, y_test)
48
+ puts "accuracy: #{accuracy}"
49
+ puts "loss: #{loss}"
@@ -1,22 +1,21 @@
1
1
  require "dnn"
2
2
  require "dnn/datasets/mnist"
3
3
  # If you use numo/linalg then please uncomment out.
4
- # require "numo/linalg/autoloader"
4
+ require "numo/linalg/autoloader"
5
5
 
6
6
  include DNN::Models
7
7
  include DNN::Layers
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- MNIST = DNN::MNIST
11
10
 
12
- x_train, y_train = MNIST.load_train
13
- x_test, y_test = MNIST.load_test
11
+ x_train, y_train = DNN::MNIST.load_train
12
+ x_test, y_test = DNN::MNIST.load_test
14
13
 
15
- x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 784)
16
- x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 784)
14
+ x_train = x_train.reshape(x_train.shape[0], 784)
15
+ x_test = x_test.reshape(x_test.shape[0], 784)
17
16
 
18
- x_train /= 255
19
- x_test /= 255
17
+ x_train = Numo::SFloat.cast(x_train) / 255
18
+ x_test = Numo::SFloat.cast(x_test) / 255
20
19
 
21
20
  y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
22
21
  y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
@@ -27,17 +26,13 @@ class MLP < Model
27
26
  @l1 = Dense.new(256)
28
27
  @l2 = Dense.new(256)
29
28
  @l3 = Dense.new(10)
30
- @bn1 = BatchNormalization.new
31
- @bn2 = BatchNormalization.new
32
29
  end
33
30
 
34
31
  def call(x)
35
32
  x = InputLayer.new(784).(x)
36
33
  x = @l1.(x)
37
- x = @bn1.(x)
38
34
  x = ReLU.(x)
39
35
  x = @l2.(x)
40
- x = @bn2.(x)
41
36
  x = ReLU.(x)
42
37
  x = @l3.(x)
43
38
  x
@@ -45,7 +40,10 @@ class MLP < Model
45
40
  end
46
41
 
47
42
  model = MLP.new
48
-
49
43
  model.setup(Adam.new, SoftmaxCrossEntropy.new)
50
44
 
51
- model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
45
+ model.train(x_train, y_train, 10, batch_size: 128, test: [x_test, y_test])
46
+
47
+ accuracy, loss = model.evaluate(x_test, y_test)
48
+ puts "accuracy: #{accuracy}"
49
+ puts "loss: #{loss}"
@@ -7,16 +7,15 @@ include DNN::Models
7
7
  include DNN::Layers
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- MNIST = DNN::MNIST
11
10
 
12
- x_train, y_train = MNIST.load_train
13
- x_test, y_test = MNIST.load_test
11
+ x_train, y_train = DNN::MNIST.load_train
12
+ x_test, y_test = DNN::MNIST.load_test
14
13
 
15
- x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 784)
16
- x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 784)
14
+ x_train = x_train.reshape(x_train.shape[0], 784)
15
+ x_test = x_test.reshape(x_test.shape[0], 784)
17
16
 
18
- x_train /= 255
19
- x_test /= 255
17
+ x_train = Numo::SFloat.cast(x_train) / 255
18
+ x_test = Numo::SFloat.cast(x_test) / 255
20
19
 
21
20
  y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
22
21
  y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
@@ -33,6 +32,10 @@ model << ReLU.new
33
32
 
34
33
  model << Dense.new(10)
35
34
 
36
- model.setup(RMSProp.new, SoftmaxCrossEntropy.new)
35
+ model.setup(Adam.new, SoftmaxCrossEntropy.new)
37
36
 
38
- model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
37
+ model.train(x_train, y_train, 10, batch_size: 128, test: [x_test, y_test])
38
+
39
+ accuracy, loss = model.evaluate(x_test, y_test)
40
+ puts "accuracy: #{accuracy}"
41
+ puts "loss: #{loss}"
@@ -7,16 +7,15 @@ include DNN::Models
7
7
  include DNN::Layers
8
8
  include DNN::Optimizers
9
9
  include DNN::Losses
10
- MNIST = DNN::MNIST
11
10
 
12
- x_train, y_train = MNIST.load_train
13
- x_test, y_test = MNIST.load_test
11
+ x_train, y_train = DNN::MNIST.load_train
12
+ x_test, y_test = DNN::MNIST.load_test
14
13
 
15
- x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 28, 28)
16
- x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 28, 28)
14
+ x_train = x_train.reshape(x_train.shape[0], 28, 28)
15
+ x_test = x_test.reshape(x_test.shape[0], 28, 28)
17
16
 
18
- x_train /= 255
19
- x_test /= 255
17
+ x_train = Numo::SFloat.cast(x_train) / 255
18
+ x_test = Numo::SFloat.cast(x_test) / 255
20
19
 
21
20
  y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
22
21
  y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
@@ -33,3 +32,7 @@ model << Dense.new(10)
33
32
  model.setup(Adam.new, SoftmaxCrossEntropy.new)
34
33
 
35
34
  model.train(x_train, y_train, 10, batch_size: 100, test: [x_test, y_test])
35
+
36
+ accuracy, loss = model.evaluate(x_test, y_test)
37
+ puts "accuracy: #{accuracy}"
38
+ puts "loss: #{loss}"
@@ -269,7 +269,7 @@ module DNN
269
269
  # @return [Array] Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].
270
270
  def evaluate_by_iterator(test_iterator, batch_size: 100)
271
271
  num_test_datas = test_iterator.num_datas
272
- batch_size = batch_size >= num_test_datas[0] ? num_test_datas : batch_size
272
+ batch_size = batch_size >= num_test_datas ? num_test_datas : batch_size
273
273
  total_correct = 0
274
274
  sum_loss = 0
275
275
  max_steps = (num_test_datas.to_f / batch_size).ceil
data/lib/dnn/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module DNN
2
- VERSION = "0.15.1"
2
+ VERSION = "0.15.2"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-dnn
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.15.1
4
+ version: 0.15.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - unagiootoro
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2019-11-23 00:00:00.000000000 Z
11
+ date: 2019-12-01 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: numo-narray