rubygrad 1.2.1 → 1.2.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/nn.rb +37 -17
- data/mlp_example.rb +6 -0
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f05df7e74616deb2db9a7c7340e105e0997268d9d2938d07dbabf51395684fe5
|
4
|
+
data.tar.gz: 6179a5c6a57c37411784195a9ca3a15f73debdd06aaeae46a754ff5857e60770
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 2187327cf7c8b2a8e323b90138c49b7bd6cb5c84ed0e784a5c288556525cf0f8c619dd2aef024b6afaa27aad5b4f2a104457e67dd7158a121f41e9b5dfa631c3
|
7
|
+
data.tar.gz: ebb19b5217fb756fa9072dcf9e2b39ff503f93ac94f782cc45e2ffdc73ee85e6f5c9e471d7995d1017aa0697b97437cf5eb56be1cee34d79e30b0aaf4d5818b9
|
data/lib/nn.rb
CHANGED
@@ -12,6 +12,8 @@ class Neuron
|
|
12
12
|
@activation_function = activation_function
|
13
13
|
end
|
14
14
|
|
15
|
+
attr_reader :weights, :bias
|
16
|
+
|
15
17
|
def reset_params
|
16
18
|
@initial_weights.each_with_index do |w,i|
|
17
19
|
@weights[i].value = w
|
@@ -26,7 +28,9 @@ class Neuron
|
|
26
28
|
(1...params.size).each { |i| @weights[i - 1].value = params[i] }
|
27
29
|
end
|
28
30
|
|
29
|
-
|
31
|
+
def set_activation_function(activation_function)
|
32
|
+
@activation_function = activation_function
|
33
|
+
end
|
30
34
|
|
31
35
|
def parameters
|
32
36
|
self.weights + [self.bias]
|
@@ -71,6 +75,11 @@ class Layer
|
|
71
75
|
self.neurons.each { |n| n.reset_params }
|
72
76
|
end
|
73
77
|
|
78
|
+
def set_activation_function(activation_function)
|
79
|
+
@activation_function = activation_function
|
80
|
+
self.neurons.each { |n| n.set_activation_function(activation_function) }
|
81
|
+
end
|
82
|
+
|
74
83
|
def calc(inputs)
|
75
84
|
outs = []
|
76
85
|
self.neurons.each do |neuron|
|
@@ -86,37 +95,43 @@ class MLP
|
|
86
95
|
|
87
96
|
number_of_layers = layers_config.size - 1 # last param is the activation function
|
88
97
|
|
89
|
-
|
98
|
+
act_array = validate_act_array(layers_config.last, number_of_layers)
|
90
99
|
|
91
|
-
|
92
|
-
|
100
|
+
@layers = Array.new(number_of_layers - 1) # input layer is not really a layer object
|
101
|
+
(number_of_layers - 1).times do |i|
|
102
|
+
@layers[i] = Layer.new(layers_config[i], layers_config[i + 1], act_array[i])
|
93
103
|
end
|
94
104
|
|
95
|
-
|
105
|
+
@layers_config = layers_config
|
106
|
+
end
|
96
107
|
|
97
|
-
|
108
|
+
private def validate_act_array(act, number_of_layers)
|
98
109
|
|
99
|
-
|
110
|
+
if !act.is_a?(Symbol) and !act.is_a?(Array)
|
111
|
+
raise "Activation function must be passed as the last parameter: #{act.class} expected Symbol or Array of Symbols"
|
112
|
+
end
|
100
113
|
|
101
|
-
|
114
|
+
if act.is_a?(Array)
|
102
115
|
|
103
116
|
if not act.all? { |item| item.is_a?(Symbol) }
|
104
117
|
raise "Array with activation functions must contain symbols: #{act}"
|
105
118
|
end
|
106
119
|
|
107
120
|
if act.size == 1
|
108
|
-
|
109
|
-
|
121
|
+
return Array.new(number_of_layers - 1) { act.first }
|
122
|
+
end
|
123
|
+
|
124
|
+
if act.size != number_of_layers - 1
|
110
125
|
raise "Array size does not match number of layers with activation functions: #{act.size} expected #{number_of_layers - 1}"
|
111
126
|
end
|
112
|
-
end
|
113
|
-
|
114
|
-
@layers = Array.new(number_of_layers - 1) # input layer is not really a layer object
|
115
|
-
(number_of_layers - 1).times do |i|
|
116
|
-
@layers[i] = Layer.new(layers_config[i], layers_config[i + 1], single_activation_function.nil? ? act[i] : single_activation_function)
|
117
|
-
end
|
118
127
|
|
119
|
-
|
128
|
+
return act
|
129
|
+
|
130
|
+
else # is a Symbol
|
131
|
+
|
132
|
+
return Array.new(number_of_layers - 1) { act }
|
133
|
+
|
134
|
+
end
|
120
135
|
end
|
121
136
|
|
122
137
|
attr_reader :layers
|
@@ -179,6 +194,11 @@ class MLP
|
|
179
194
|
end
|
180
195
|
end
|
181
196
|
|
197
|
+
def set_activation_function(activation_function)
|
198
|
+
act_array = validate_act_array(activation_function, @layers_config.size - 1)
|
199
|
+
self.layers.each_with_index { |layer, i| layer.set_activation_function(act_array[i]) }
|
200
|
+
end
|
201
|
+
|
182
202
|
def zero_grad
|
183
203
|
self.parameters.each { |p| p.grad = 0.0 }
|
184
204
|
end
|
data/mlp_example.rb
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
require 'rubygrad'
|
2
|
+
#require_relative 'lib/nn.rb'
|
2
3
|
|
3
4
|
# Build a Machine Learning Perceptron with 4 layers
|
4
5
|
# First Layer (Layer 0) => Input Layer => 3 Neurons => 3 Inputs
|
@@ -7,6 +8,11 @@ require 'rubygrad'
|
|
7
8
|
# Fourth Layer (Layer 3) => Output Layer => 1 Neuron => 1 Output
|
8
9
|
nn = MLP.new(3, 4, 4, 1, :tanh)
|
9
10
|
|
11
|
+
nn.show_params
|
12
|
+
puts
|
13
|
+
nn.show_params(in_words = true)
|
14
|
+
puts
|
15
|
+
|
10
16
|
# 4 input samples
|
11
17
|
x_inputs = [
|
12
18
|
[2.0, 3.0, -1.0],
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: rubygrad
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.2.
|
4
|
+
version: 1.2.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Sergio Oliveira Jr
|
@@ -40,7 +40,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
40
40
|
- !ruby/object:Gem::Version
|
41
41
|
version: '0'
|
42
42
|
requirements: []
|
43
|
-
rubygems_version: 3.
|
43
|
+
rubygems_version: 3.4.9
|
44
44
|
signing_key:
|
45
45
|
specification_version: 4
|
46
46
|
summary: A port of Andrej Karpathy's micrograd to Ruby.
|