ruby-dnn 0.16.0 → 0.16.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ea912bca075445de925fda876e2b003bf0f63936100a3c14adf58bf810a6a9af
4
- data.tar.gz: 41a40ee396cbda27faa7719cfcf2bed5e1337dc2963c1315561e186e05b81d77
3
+ metadata.gz: 2e04eef16303e2e223bfdb1ac17e2a35fc20d0eafff022f71e412d8b41dc49c9
4
+ data.tar.gz: d0cd83a209069eba6ea2c48fc6c3480e5edf5b99932b31cb9d7a73adb0eb573d
5
5
  SHA512:
6
- metadata.gz: 16a12d59eb61b73f76b1361ddf2e5d3e2ca8929d5195f8f8d89299f829fa4a32284d2138ca6b8c4b8c5de5249c18e4a273390e9e80298e52d0e9a8109f4533a8
7
- data.tar.gz: 859b180d139bff4f8904c939a41482574b1dd2c972fc83cf42743e078c3e04c28a1f8d740ba20dc970923bcb9411accefa86442fb7b77661de9ae015db1fc6b0
6
+ metadata.gz: 2b7126723c81495c603a1b98fc64fdd111d4853db78e4f0c840f75e925d60d97a3dd6ba22a9a9e2a8f643562a1284bfe502e38e0e9e4870c7699f16ed151df40
7
+ data.tar.gz: c1c8900e92ec77a03f46853a9ff14d4b09c8442a597f6ae87d1ee049764a3316e1b7ff92cdb4c13c4b2787d2759ff5c8f6c28c11668503d1d0062a2f671a04b7
data/README.md CHANGED
@@ -55,7 +55,7 @@ class MLP < Model
55
55
  @l3 = Dense.new(10)
56
56
  end
57
57
 
58
- def call(x)
58
+ def forward(x)
59
59
  x = InputLayer.new(784).(x)
60
60
  x = @l1.(x)
61
61
  x = ReLU.(x)
@@ -63,14 +63,7 @@ class Discriminator < Model
63
63
  @l6 = Dense.new(1)
64
64
  end
65
65
 
66
- def forward(x, trainable = true)
67
- @l1.trainable = trainable
68
- @l2.trainable = trainable
69
- @l3.trainable = trainable
70
- @l4.trainable = trainable
71
- @l5.trainable = trainable
72
- @l6.trainable = trainable
73
-
66
+ def forward(x)
74
67
  x = InputLayer.new([28, 28, 1]).(x)
75
68
  x = @l1.(x)
76
69
  x = LeakyReLU.(x, 0.2)
@@ -91,6 +84,18 @@ class Discriminator < Model
91
84
  x = @l6.(x)
92
85
  x
93
86
  end
87
+
88
+ def enable_training
89
+ trainable_layers.each do |layer|
90
+ layer.trainable = true
91
+ end
92
+ end
93
+
94
+ def disable_training
95
+ trainable_layers.each do |layer|
96
+ layer.trainable = false
97
+ end
98
+ end
94
99
  end
95
100
 
96
101
  class DCGAN < Model
@@ -105,7 +110,8 @@ class DCGAN < Model
105
110
 
106
111
  def forward(x)
107
112
  x = @gen.(x)
108
- x = @dis.(x, false)
113
+ @dis.disable_training
114
+ x = @dis.(x)
109
115
  x
110
116
  end
111
117
 
@@ -115,6 +121,7 @@ class DCGAN < Model
115
121
  images = @gen.predict(noise)
116
122
  x = x_batch.concatenate(images)
117
123
  y = Numo::SFloat.cast([1] * batch_size + [0] * batch_size).reshape(batch_size * 2, 1)
124
+ @dis.enable_training
118
125
  dis_loss = @dis.train_on_batch(x, y)
119
126
 
120
127
  noise = Numo::SFloat.new(batch_size, 20).rand(-1, 1)
@@ -41,7 +41,7 @@ end
41
41
  y = DNN::Tensor.convert(y_batch)
42
42
  out = net.(x, y)
43
43
  loss = lf.(out, y)
44
- loss.link.backward(nil)
44
+ loss.link.backward
45
45
  puts "epoch: #{epoch}, step: #{step}, loss = #{loss.data}"
46
46
  opt.update([w1, b1, w2, b2])
47
47
  end
@@ -97,11 +97,7 @@ class Discriminator < Model
97
97
  @bn6 = BatchNormalization.new
98
98
  end
99
99
 
100
- def forward(inputs, trainable = true)
101
- trainable_layers.each do |layer|
102
- layer.trainable = trainable
103
- end
104
-
100
+ def forward(inputs)
105
101
  input, images = *inputs
106
102
  x = InputLayer.new(@gen_input_shape).(input)
107
103
  x = @l1_1.(x)
@@ -137,6 +133,18 @@ class Discriminator < Model
137
133
  x = @l7.(x)
138
134
  x
139
135
  end
136
+
137
+ def enable_training
138
+ trainable_layers.each do |layer|
139
+ layer.trainable = true
140
+ end
141
+ end
142
+
143
+ def disable_training
144
+ trainable_layers.each do |layer|
145
+ layer.trainable = false
146
+ end
147
+ end
140
148
  end
141
149
 
142
150
  class DCGAN < Model
@@ -151,7 +159,8 @@ class DCGAN < Model
151
159
 
152
160
  def forward(input)
153
161
  x = @gen.(input)
154
- x = @dis.([input, x], false)
162
+ @dis.disable_training
163
+ x = @dis.([input, x])
155
164
  x
156
165
  end
157
166
  end
@@ -38,6 +38,7 @@ num_batchs = x_in.shape[0] / batch_size
38
38
  images = gen.predict(x_in)
39
39
  y_real = Numo::SFloat.ones(batch_size, 1)
40
40
  y_fake = Numo::SFloat.zeros(batch_size, 1)
41
+ dis.enable_training
41
42
  dis_loss = dis.train_on_batch([x_in, x_out], y_real)
42
43
  dis_loss += dis.train_on_batch([x_in, images], y_fake)
43
44
 
@@ -96,7 +96,7 @@ module DNN
96
96
  end
97
97
 
98
98
  def backward_node(dy)
99
- @index * @x**(@index - 1)
99
+ dy * @index * @x**(@index - 1)
100
100
  end
101
101
  end
102
102
 
@@ -60,7 +60,7 @@ module DNN
60
60
  xs.shape[1].times do |t|
61
61
  x = xs[true, t, false]
62
62
  @hidden_layers[t].trainable = @trainable
63
- h = @hidden_layers[t].forward_node(x, h)
63
+ h = @hidden_layers[t].forward(x, h)
64
64
  hs[true, t, false] = h
65
65
  end
66
66
  @hidden.data = h
@@ -77,7 +77,7 @@ module DNN
77
77
  dh = 0
78
78
  (dh2s.shape[1] - 1).downto(0) do |t|
79
79
  dh2 = dh2s[true, t, false]
80
- dx, dh = @hidden_layers[t].backward_node(dh2 + dh)
80
+ dx, dh = @hidden_layers[t].backward(dh2 + dh)
81
81
  dxs[true, t, false] = dx
82
82
  end
83
83
  dxs
@@ -136,9 +136,7 @@ module DNN
136
136
  end
137
137
  end
138
138
 
139
- class SimpleRNNDense < Layer
140
- include LayerNode
141
-
139
+ class SimpleRNNDense
142
140
  attr_accessor :trainable
143
141
 
144
142
  def initialize(weight, recurrent_weight, bias, activation)
@@ -149,7 +147,7 @@ module DNN
149
147
  @trainable = true
150
148
  end
151
149
 
152
- def forward_node(x, h)
150
+ def forward(x, h)
153
151
  @x = x
154
152
  @h = h
155
153
  h2 = x.dot(@weight.data) + h.dot(@recurrent_weight.data)
@@ -157,7 +155,7 @@ module DNN
157
155
  @activation.forward_node(h2)
158
156
  end
159
157
 
160
- def backward_node(dh2)
158
+ def backward(dh2)
161
159
  dh2 = @activation.backward_node(dh2)
162
160
  if @trainable
163
161
  @weight.grad += @x.transpose.dot(dh2)
@@ -230,9 +228,7 @@ module DNN
230
228
  end
231
229
  end
232
230
 
233
- class LSTMDense < Layer
234
- include LayerNode
235
-
231
+ class LSTMDense
236
232
  attr_accessor :trainable
237
233
 
238
234
  def initialize(weight, recurrent_weight, bias)
@@ -247,7 +243,7 @@ module DNN
247
243
  @trainable = true
248
244
  end
249
245
 
250
- def forward_node(x, h, c)
246
+ def forward(x, h, c)
251
247
  @x = x
252
248
  @h = h
253
249
  @c = c
@@ -266,7 +262,7 @@ module DNN
266
262
  [h2, c2]
267
263
  end
268
264
 
269
- def backward_node(dh2, dc2)
265
+ def backward(dh2, dc2)
270
266
  dh2_tmp = @tanh_c2 * dh2
271
267
  dc2_tmp = @tanh.backward_node(@out * dh2) + dc2
272
268
 
@@ -334,7 +330,7 @@ module DNN
334
330
  xs.shape[1].times do |t|
335
331
  x = xs[true, t, false]
336
332
  @hidden_layers[t].trainable = @trainable
337
- h, c = @hidden_layers[t].forward_node(x, h, c)
333
+ h, c = @hidden_layers[t].forward(x, h, c)
338
334
  hs[true, t, false] = h
339
335
  end
340
336
  @hidden.data = h
@@ -353,7 +349,7 @@ module DNN
353
349
  dc = 0
354
350
  (dh2s.shape[1] - 1).downto(0) do |t|
355
351
  dh2 = dh2s[true, t, false]
356
- dx, dh, dc = @hidden_layers[t].backward_node(dh2 + dh, dc)
352
+ dx, dh, dc = @hidden_layers[t].backward(dh2 + dh, dc)
357
353
  dxs[true, t, false] = dx
358
354
  end
359
355
  dxs
@@ -370,8 +366,6 @@ module DNN
370
366
  end
371
367
 
372
368
  class GRUDense < Layer
373
- include LayerNode
374
-
375
369
  attr_accessor :trainable
376
370
 
377
371
  def initialize(weight, recurrent_weight, bias)
@@ -384,7 +378,7 @@ module DNN
384
378
  @trainable = true
385
379
  end
386
380
 
387
- def forward_node(x, h)
381
+ def forward(x, h)
388
382
  @x = x
389
383
  @h = h
390
384
  num_nodes = h.shape[1]
@@ -407,7 +401,7 @@ module DNN
407
401
  h2
408
402
  end
409
403
 
410
- def backward_node(dh2)
404
+ def backward(dh2)
411
405
  dtanh_h = @tanh.backward_node(dh2 * (1 - @update))
412
406
  dh = dh2 * @update
413
407
 
@@ -8,7 +8,7 @@ module DNN
8
8
  @layer = layer
9
9
  end
10
10
 
11
- def backward(dy)
11
+ def backward(dy = Numo::SFloat[1])
12
12
  dy = @layer.backward(dy)
13
13
  @prev&.backward(dy)
14
14
  end
@@ -25,7 +25,7 @@ module DNN
25
25
  @layer = layer
26
26
  end
27
27
 
28
- def backward(dy)
28
+ def backward(dy = Numo::SFloat[1])
29
29
  dys = @layer.backward(dy)
30
30
  if dys.is_a?(Array)
31
31
  dy1, dy2 = *dys
@@ -1,7 +1,7 @@
1
1
  class Integer
2
2
  alias dnn__add +
3
3
  def +(other)
4
- if other.is_a?(DNN::Tensor)
4
+ if other.is_a?(DNN::Tensor) || other.is_a?(DNN::Param)
5
5
  DNN::Layers::Add.(self, other)
6
6
  else
7
7
  dnn__add(other)
@@ -10,7 +10,7 @@ class Integer
10
10
 
11
11
  alias dnn__sub -
12
12
  def -(other)
13
- if other.is_a?(DNN::Tensor)
13
+ if other.is_a?(DNN::Tensor) || other.is_a?(DNN::Param)
14
14
  DNN::Layers::Sub.(self, other)
15
15
  else
16
16
  dnn__sub(other)
@@ -19,7 +19,7 @@ class Integer
19
19
 
20
20
  alias dnn__mul *
21
21
  def *(other)
22
- if other.is_a?(DNN::Tensor)
22
+ if other.is_a?(DNN::Tensor) || other.is_a?(DNN::Param)
23
23
  DNN::Layers::Mul.(self, other)
24
24
  else
25
25
  dnn__mul(other)
@@ -28,7 +28,7 @@ class Integer
28
28
 
29
29
  alias dnn__div /
30
30
  def /(other)
31
- if other.is_a?(DNN::Tensor)
31
+ if other.is_a?(DNN::Tensor) || other.is_a?(DNN::Param)
32
32
  DNN::Layers::Div.(self, other)
33
33
  else
34
34
  dnn__div(other)
@@ -39,7 +39,7 @@ end
39
39
  class Float
40
40
  alias dnn__add +
41
41
  def +(other)
42
- if other.is_a?(DNN::Tensor)
42
+ if other.is_a?(DNN::Tensor) || other.is_a?(DNN::Param)
43
43
  DNN::Layers::Add.(self, other)
44
44
  else
45
45
  dnn__add(other)
@@ -48,7 +48,7 @@ class Float
48
48
 
49
49
  alias dnn__sub -
50
50
  def -(other)
51
- if other.is_a?(DNN::Tensor)
51
+ if other.is_a?(DNN::Tensor) || other.is_a?(DNN::Param)
52
52
  DNN::Layers::Sub.(self, other)
53
53
  else
54
54
  dnn__sub(other)
@@ -57,7 +57,7 @@ class Float
57
57
 
58
58
  alias dnn__mul *
59
59
  def *(other)
60
- if other.is_a?(DNN::Tensor)
60
+ if other.is_a?(DNN::Tensor) || other.is_a?(DNN::Param)
61
61
  DNN::Layers::Mul.(self, other)
62
62
  else
63
63
  dnn__mul(other)
@@ -66,7 +66,7 @@ class Float
66
66
 
67
67
  alias dnn__div /
68
68
  def /(other)
69
- if other.is_a?(DNN::Tensor)
69
+ if other.is_a?(DNN::Tensor) || other.is_a?(DNN::Param)
70
70
  DNN::Layers::Div.(self, other)
71
71
  else
72
72
  dnn__div(other)
@@ -24,5 +24,37 @@ module DNN
24
24
  @grad = Xumo::SFloat[0]
25
25
  end
26
26
  end
27
+
28
+ def shape
29
+ @data.shape
30
+ end
31
+
32
+ def +@
33
+ self
34
+ end
35
+
36
+ def -@
37
+ self * -1
38
+ end
39
+
40
+ def +(other)
41
+ Layers::Add.(self, other)
42
+ end
43
+
44
+ def -(other)
45
+ Layers::Sub.(self, other)
46
+ end
47
+
48
+ def *(other)
49
+ Layers::Mul.(self, other)
50
+ end
51
+
52
+ def /(other)
53
+ Layers::Div.(self, other)
54
+ end
55
+
56
+ def **(index)
57
+ Layers::Pow.new(index).(self)
58
+ end
27
59
  end
28
60
  end
@@ -1,3 +1,3 @@
1
1
  module DNN
2
- VERSION = "0.16.0"
2
+ VERSION = "0.16.1"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-dnn
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.16.0
4
+ version: 0.16.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - unagiootoro
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2020-01-05 00:00:00.000000000 Z
11
+ date: 2020-01-06 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: numo-narray