ruby-dnn 0.4.1 → 0.4.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 8156ccf7edaf425d09a1463bb1c9280a1eeddaa67448323a19d301d451a78a0f
4
- data.tar.gz: 851e5aa8c87fd60ee9d5296f995c9b884a1bcaed823ab9e5d034f0b7b52d740b
3
+ metadata.gz: 6c69ef32fd33f04c1452e3b16a6de42d69274fa717726b0306b30a530935d059
4
+ data.tar.gz: 3ef80bd96e8328c0bc06762de1440f05eab109fecd3519ff0460abdcbfc2194d
5
5
  SHA512:
6
- metadata.gz: 520b9dc2ff534d36d86af82750b12a1772d29b487cfc18e02425f14c006e9fc1a15c525a0f1cfbe1ce541d2c7cac0cc7649a0542f3b9448634daf9869471f5d3
7
- data.tar.gz: 2ffe038bc5e5b69c61f32f99e9b21df8b2321b32148c6226d85e9aa82cedd8bade8bcbdab66c983a827a923ab3d3e449bb03cedb4c52f9b34fcbd1a732220e43
6
+ metadata.gz: dc501da4b2681cc1a3c65c8af2234685618872c466ca720e38692a7c2610d9b4520709babac1c20f91b675cfb74a4e317a71831aba04a71337148d1f4c8effda
7
+ data.tar.gz: 01cc6815889a578da2601c205b86277ad674531c4020ff624a2f15ea17ea4fe070a4b69228d6f53585f235d96d28d8d44132035d7adb2d963de94c9a3a785fbf
@@ -2,7 +2,7 @@
2
2
  ruby-dnnのAPIリファレンスです。このリファレンスでは、APIを利用するうえで必要となるクラスとメソッドしか記載していません。
3
3
  そのため、プログラムの詳細が必要な場合は、ソースコードを参照してください。
4
4
 
5
- 最終更新バージョン:0.4.0
5
+ 最終更新バージョン:0.4.2
6
6
 
7
7
  # module DNN
8
8
  ruby-dnnの名前空間をなすモジュールです。
@@ -471,6 +471,11 @@ tanh関数のレイヤーです。
471
471
  # class LeakyReLU < Layer
472
472
  LeakyReLU関数のレイヤーです。
473
473
 
474
+ ## 【Properties】
475
+ ## attr_reader :alpha
476
+ Float alpha
477
+ 出力値が負のときの傾き。
478
+
474
479
  ## 【Instance methods】
475
480
 
476
481
  ## def initialize(alpha)
@@ -5,11 +5,11 @@ module DNN
5
5
 
6
6
  class Sigmoid < Layer
7
7
  def forward(x)
8
- @out = 1.0 / (1 + NMath.exp(-x))
8
+ @out = 1 / (1 + NMath.exp(-x))
9
9
  end
10
10
 
11
11
  def backward(dout)
12
- dout * (1.0 - @out) * @out
12
+ dout * (1 - @out) * @out
13
13
  end
14
14
  end
15
15
 
@@ -46,6 +46,8 @@ module DNN
46
46
  class LeakyReLU < Layer
47
47
  include Xumo
48
48
 
49
+ attr_reader :alpha
50
+
49
51
  def initialize(alpha = 0.3)
50
52
  @alpha = alpha
51
53
  end
@@ -6,8 +6,8 @@ module DNN
6
6
  layer.params[param_key] = param
7
7
  end
8
8
 
9
- def to_hash
10
- {name: self.class.name}
9
+ def to_hash(hash)
10
+ {name: self.class.name}.merge(hash)
11
11
  end
12
12
  end
13
13
 
@@ -37,7 +37,7 @@ module DNN
37
37
  end
38
38
 
39
39
  def to_hash
40
- {name: self.class.name, mean: @mean, std: @std}
40
+ super({mean: @mean, std: @std})
41
41
  end
42
42
  end
43
43
 
@@ -1,7 +1,7 @@
1
1
  module DNN
2
2
  module Layers
3
3
 
4
- #Super class of all optimizer classes.
4
+ # Super class of all optimizer classes.
5
5
  class Layer
6
6
  include Xumo
7
7
 
@@ -9,43 +9,44 @@ module DNN
9
9
  @built = false
10
10
  end
11
11
 
12
- #Build the layer.
12
+ # Build the layer.
13
13
  def build(model)
14
14
  @built = true
15
15
  @model = model
16
16
  end
17
17
 
18
- #Does the layer have already been built?
18
+ # Does the layer have already been built?
19
19
  def built?
20
20
  @built
21
21
  end
22
22
 
23
- #Forward propagation.
23
+ # Forward propagation.
24
24
  def forward() end
25
25
 
26
- #Backward propagation.
26
+ # Backward propagation.
27
27
  def backward() end
28
28
 
29
- #Get the shape of the layer.
29
+ # Get the shape of the layer.
30
30
  def shape
31
31
  prev_layer.shape
32
32
  end
33
33
 
34
- #Layer to a hash.
35
- def to_hash
36
- {name: self.class.name}
34
+ # Layer to a hash.
35
+ def to_hash(hash)
36
+ {name: self.class.name}.merge(hash)
37
37
  end
38
38
 
39
- #Get the previous layer.
39
+ # Get the previous layer.
40
40
  def prev_layer
41
41
  @model.layers[@model.layers.index(self) - 1]
42
42
  end
43
43
  end
44
44
 
45
45
 
46
+ # This class is a superclass of all classes with learning parameters.
46
47
  class HasParamLayer < Layer
47
- attr_reader :params #The parameters of the layer.
48
- attr_reader :grads #Differential value of parameter of layer.
48
+ attr_reader :params # The parameters of the layer.
49
+ attr_reader :grads # Differential value of parameter of layer.
49
50
 
50
51
  def initialize
51
52
  super
@@ -58,14 +59,14 @@ module DNN
58
59
  init_params
59
60
  end
60
61
 
61
- #Update the parameters.
62
+ # Update the parameters.
62
63
  def update
63
64
  @model.optimizer.update(self)
64
65
  end
65
66
 
66
67
  private
67
68
 
68
- #Initialize of the parameters.
69
+ # Initialize of the parameters.
69
70
  def init_params() end
70
71
  end
71
72
 
@@ -91,7 +92,7 @@ module DNN
91
92
  end
92
93
 
93
94
  def to_hash
94
- {name: self.class.name, shape: @shape}
95
+ super({shape: @shape})
95
96
  end
96
97
  end
97
98
 
@@ -140,13 +141,10 @@ module DNN
140
141
  end
141
142
 
142
143
  def to_hash
143
- {
144
- name: self.class.name,
145
- num_nodes: @num_nodes,
146
- weight_initializer: @weight_initializer.to_hash,
147
- bias_initializer: @bias_initializer.to_hash,
148
- weight_decay: @weight_decay,
149
- }
144
+ super({num_nodes: @num_nodes,
145
+ weight_initializer: @weight_initializer.to_hash,
146
+ bias_initializer: @bias_initializer.to_hash,
147
+ weight_decay: @weight_decay})
150
148
  end
151
149
 
152
150
  private
@@ -292,16 +290,13 @@ module DNN
292
290
  end
293
291
 
294
292
  def to_hash
295
- {
296
- name: self.class.name,
297
- num_filters: @num_filters,
298
- filter_size: @filter_size,
299
- weight_initializer: @weight_initializer.to_hash,
300
- bias_initializer: @bias_initializer.to_hash,
301
- strides: @strides,
302
- padding: @padding,
303
- weight_decay: @weight_decay,
304
- }
293
+ super({num_filters: @num_filters,
294
+ filter_size: @filter_size,
295
+ weight_initializer: @weight_initializer.to_hash,
296
+ bias_initializer: @bias_initializer.to_hash,
297
+ strides: @strides,
298
+ padding: @padding,
299
+ weight_decay: @weight_decay})
305
300
  end
306
301
 
307
302
  private
@@ -371,13 +366,10 @@ module DNN
371
366
  end
372
367
 
373
368
  def to_hash
374
- {
375
- name: self.class.name,
376
- pool_width: @pool_width,
377
- pool_height: @pool_height,
378
- strides: @strides,
379
- padding: @padding,
380
- }
369
+ super({pool_width: @pool_width,
370
+ pool_height: @pool_height,
371
+ strides: @strides,
372
+ padding: @padding})
381
373
  end
382
374
  end
383
375
 
@@ -423,10 +415,7 @@ module DNN
423
415
  end
424
416
 
425
417
  def to_hash
426
- {
427
- name: self.class.name,
428
- unpool_size: @unpool_size,
429
- }
418
+ super({unpool_size: @unpool_size})
430
419
  end
431
420
  end
432
421
 
@@ -470,7 +459,7 @@ module DNN
470
459
  end
471
460
 
472
461
  def to_hash
473
- {name: self.class.name, shape: @shape}
462
+ super({shape: @shape})
474
463
  end
475
464
  end
476
465
 
@@ -518,7 +507,7 @@ module DNN
518
507
  end
519
508
 
520
509
  def to_hash
521
- {name: self.class.name, dropout_ratio: @dropout_ratio}
510
+ super({dropout_ratio: @dropout_ratio})
522
511
  end
523
512
  end
524
513
 
@@ -576,12 +565,9 @@ module DNN
576
565
  end
577
566
 
578
567
  def to_hash
579
- {
580
- name: self.class.name,
581
- momentum: @momentum,
582
- running_mean: @running_mean.to_a,
583
- running_var: @running_var.to_a,
584
- }
568
+ super({momentum: @momentum,
569
+ running_mean: @running_mean.to_a,
570
+ running_var: @running_var.to_a})
585
571
  end
586
572
 
587
573
  private
@@ -1,6 +1,7 @@
1
1
  require "json"
2
2
 
3
3
  module DNN
4
+ # This class deals with the model of the network.
4
5
  class Model
5
6
  include Xumo
6
7
 
@@ -1,7 +1,7 @@
1
1
  module DNN
2
2
  module Optimizers
3
3
 
4
- #Super class of all optimizer classes.
4
+ # Super class of all optimizer classes.
5
5
  class Optimizer
6
6
  attr_accessor :learning_rate
7
7
 
@@ -9,11 +9,11 @@ module DNN
9
9
  @learning_rate = learning_rate
10
10
  end
11
11
 
12
- #Update layer has params.
12
+ # Update layer has params.
13
13
  def update(layer) end
14
14
 
15
- def to_hash
16
- {name: self.class.name, learning_rate: @learning_rate}
15
+ def to_hash(hash)
16
+ {name: self.class.name, learning_rate: @learning_rate}.merge(hash)
17
17
  end
18
18
  end
19
19
 
@@ -21,39 +21,31 @@ module DNN
21
21
  class SGD < Optimizer
22
22
  attr_accessor :momentum
23
23
 
24
+ def self.load_hash(hash)
25
+ self.new(hash[:learning_rate], momentum: hash[:momentum])
26
+ end
27
+
24
28
  def initialize(learning_rate = 0.01, momentum: 0)
25
29
  super(learning_rate)
26
30
  @momentum = momentum
27
- @amounts = {}
28
- end
29
-
30
- def self.load_hash(hash)
31
- self.new(hash[:learning_rate], hash[:momentum])
31
+ @v = {}
32
32
  end
33
33
 
34
34
  def update(layer)
35
- amount = if @amounts[layer]
36
- @amounts[layer]
37
- else
38
- @amounts[layer] = {}
39
- end
35
+ @v[layer] ||= {}
40
36
  layer.params.each_key do |key|
41
- amount[key] = layer.grads[key] * @learning_rate
37
+ amount = layer.grads[key] * @learning_rate
42
38
  if @momentum > 0
43
- @amounts[layer][key] ||= 0
44
- amount[key] += @momentum * @amounts[layer][key]
45
- @amounts[layer] = amount
39
+ @v[layer][key] ||= 0
40
+ amount += @momentum * @v[layer][key]
41
+ @v[layer][key] = amount
46
42
  end
47
- layer.params[key] -= amount[key]
43
+ layer.params[key] -= amount
48
44
  end
49
45
  end
50
46
 
51
47
  def to_hash
52
- {
53
- name: self.class.name,
54
- learning_rate: @learning_rate,
55
- momentum: @momentum,
56
- }
48
+ super({momentum: @momentum})
57
49
  end
58
50
  end
59
51
 
@@ -102,11 +94,7 @@ module DNN
102
94
  end
103
95
 
104
96
  def to_hash
105
- {
106
- name: self.class.name,
107
- learning_rate: @learning_rate,
108
- muse: @muse,
109
- }
97
+ super({muse: @muse})
110
98
  end
111
99
  end
112
100
 
@@ -145,12 +133,7 @@ module DNN
145
133
  end
146
134
 
147
135
  def to_hash
148
- {
149
- name: self.class.name,
150
- learning_rate: @learning_rate,
151
- beta1: @beta1,
152
- beta2: @beta2,
153
- }
136
+ super({beta1: @beta1, beta2: @beta2})
154
137
  end
155
138
  end
156
139
 
@@ -1,12 +1,13 @@
1
1
  module DNN
2
+ # This module provides utility functions.
2
3
  module Util
3
- #Create a mini batch for batch size.
4
+ # Create a mini batch for "batch_size".
4
5
  def self.get_minibatch(x, y, batch_size)
5
6
  indexes = (0...x.shape[0]).to_a.sample(batch_size)
6
7
  [x[indexes, false], y[indexes, false]]
7
8
  end
8
9
 
9
- #Categorize labels into "num_classes" classes.
10
+ # Categorize labels into "num_classes" classes.
10
11
  def self.to_categorical(y, num_classes, narray_type = nil)
11
12
  narray_type ||= y.class
12
13
  y2 = narray_type.zeros(y.shape[0], num_classes)
@@ -16,12 +17,12 @@ module DNN
16
17
  y2
17
18
  end
18
19
 
19
- #Perform numerical differentiation on "forward" of "layer".
20
+ # Perform numerical differentiation.
20
21
  def self.numerical_grad(x, func)
21
22
  (func.(x + 1e-7) - func.(x)) / 1e-7
22
23
  end
23
24
 
24
- #Convert hash to an object.
25
+ # Convert hash to an object.
25
26
  def self.load_hash(hash)
26
27
  dnn_class = DNN.const_get(hash[:name])
27
28
  if dnn_class.respond_to?(:load_hash)
@@ -1,3 +1,3 @@
1
1
  module DNN
2
- VERSION = "0.4.1"
2
+ VERSION = "0.4.2"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-dnn
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.1
4
+ version: 0.4.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - unagiootoro
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-07-22 00:00:00.000000000 Z
11
+ date: 2018-07-23 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: numo-narray