rubygrad 1.1.2 → 1.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/nn.rb +77 -20
- data/mlp_example.rb +21 -6
- metadata +3 -3
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: f05df7e74616deb2db9a7c7340e105e0997268d9d2938d07dbabf51395684fe5
|
|
4
|
+
data.tar.gz: 6179a5c6a57c37411784195a9ca3a15f73debdd06aaeae46a754ff5857e60770
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 2187327cf7c8b2a8e323b90138c49b7bd6cb5c84ed0e784a5c288556525cf0f8c619dd2aef024b6afaa27aad5b4f2a104457e67dd7158a121f41e9b5dfa631c3
|
|
7
|
+
data.tar.gz: ebb19b5217fb756fa9072dcf9e2b39ff503f93ac94f782cc45e2ffdc73ee85e6f5c9e471d7995d1017aa0697b97437cf5eb56be1cee34d79e30b0aaf4d5818b9
|
data/lib/nn.rb
CHANGED
|
@@ -2,14 +2,18 @@ require_relative "value.rb"
|
|
|
2
2
|
|
|
3
3
|
class Neuron
|
|
4
4
|
|
|
5
|
-
def initialize(number_of_inputs)
|
|
5
|
+
def initialize(number_of_inputs, activation_function)
|
|
6
6
|
@initial_weights = Array.new(number_of_inputs) { rand(-1.0..1.0) }
|
|
7
7
|
@initial_bias = rand(-1.0..1.0)
|
|
8
8
|
|
|
9
9
|
@weights = @initial_weights.map { |w| Value.new(w) }
|
|
10
10
|
@bias = Value.new(@initial_bias)
|
|
11
|
+
|
|
12
|
+
@activation_function = activation_function
|
|
11
13
|
end
|
|
12
14
|
|
|
15
|
+
attr_reader :weights, :bias
|
|
16
|
+
|
|
13
17
|
def reset_params
|
|
14
18
|
@initial_weights.each_with_index do |w,i|
|
|
15
19
|
@weights[i].value = w
|
|
@@ -24,13 +28,15 @@ class Neuron
|
|
|
24
28
|
(1...params.size).each { |i| @weights[i - 1].value = params[i] }
|
|
25
29
|
end
|
|
26
30
|
|
|
27
|
-
|
|
31
|
+
def set_activation_function(activation_function)
|
|
32
|
+
@activation_function = activation_function
|
|
33
|
+
end
|
|
28
34
|
|
|
29
35
|
def parameters
|
|
30
36
|
self.weights + [self.bias]
|
|
31
37
|
end
|
|
32
38
|
|
|
33
|
-
def calc(inputs
|
|
39
|
+
def calc(inputs)
|
|
34
40
|
# xw + b
|
|
35
41
|
n = self.weights.size
|
|
36
42
|
raise "Wrong number of inputs! #{inputs.size} expected #{n}" unless n == inputs.size
|
|
@@ -38,25 +44,26 @@ class Neuron
|
|
|
38
44
|
n.times do |index|
|
|
39
45
|
sum += self.weights[index] * inputs[index]
|
|
40
46
|
end
|
|
41
|
-
if
|
|
47
|
+
if @activation_function == :tanh
|
|
42
48
|
sum.tanh
|
|
43
|
-
elsif
|
|
49
|
+
elsif @activation_function == :relu
|
|
44
50
|
sum.relu
|
|
45
|
-
elsif
|
|
51
|
+
elsif @activation_function == :sigmoid
|
|
46
52
|
sum.sigmoid
|
|
47
53
|
else
|
|
48
|
-
raise "Unsupported activation function: #{
|
|
54
|
+
raise "Unsupported activation function: #{activation_function}"
|
|
49
55
|
end
|
|
50
56
|
end
|
|
51
57
|
end
|
|
52
58
|
|
|
53
59
|
class Layer
|
|
54
60
|
|
|
55
|
-
def initialize(number_of_inputs, number_of_outputs)
|
|
56
|
-
@neurons = Array.new(number_of_outputs) { Neuron.new(number_of_inputs) }
|
|
61
|
+
def initialize(number_of_inputs, number_of_outputs, activation_function)
|
|
62
|
+
@neurons = Array.new(number_of_outputs) { Neuron.new(number_of_inputs, activation_function) }
|
|
63
|
+
@activation_function = activation_function
|
|
57
64
|
end
|
|
58
65
|
|
|
59
|
-
attr_reader :neurons
|
|
66
|
+
attr_reader :neurons, :activation_function
|
|
60
67
|
|
|
61
68
|
def parameters
|
|
62
69
|
params = []
|
|
@@ -68,10 +75,15 @@ class Layer
|
|
|
68
75
|
self.neurons.each { |n| n.reset_params }
|
|
69
76
|
end
|
|
70
77
|
|
|
71
|
-
def
|
|
78
|
+
def set_activation_function(activation_function)
|
|
79
|
+
@activation_function = activation_function
|
|
80
|
+
self.neurons.each { |n| n.set_activation_function(activation_function) }
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
def calc(inputs)
|
|
72
84
|
outs = []
|
|
73
85
|
self.neurons.each do |neuron|
|
|
74
|
-
outs << neuron.calc(inputs
|
|
86
|
+
outs << neuron.calc(inputs)
|
|
75
87
|
end
|
|
76
88
|
outs
|
|
77
89
|
end
|
|
@@ -80,18 +92,58 @@ end
|
|
|
80
92
|
class MLP
|
|
81
93
|
|
|
82
94
|
def initialize(*layers_config)
|
|
83
|
-
|
|
95
|
+
|
|
96
|
+
number_of_layers = layers_config.size - 1 # last param is the activation function
|
|
97
|
+
|
|
98
|
+
act_array = validate_act_array(layers_config.last, number_of_layers)
|
|
99
|
+
|
|
84
100
|
@layers = Array.new(number_of_layers - 1) # input layer is not really a layer object
|
|
85
101
|
(number_of_layers - 1).times do |i|
|
|
86
|
-
@layers[i] = Layer.new(layers_config[i], layers_config[i + 1])
|
|
102
|
+
@layers[i] = Layer.new(layers_config[i], layers_config[i + 1], act_array[i])
|
|
87
103
|
end
|
|
104
|
+
|
|
88
105
|
@layers_config = layers_config
|
|
89
106
|
end
|
|
90
107
|
|
|
108
|
+
private def validate_act_array(act, number_of_layers)
|
|
109
|
+
|
|
110
|
+
if !act.is_a?(Symbol) and !act.is_a?(Array)
|
|
111
|
+
raise "Activation function must be passed as the last parameter: #{act.class} expected Symbol or Array of Symbols"
|
|
112
|
+
end
|
|
113
|
+
|
|
114
|
+
if act.is_a?(Array)
|
|
115
|
+
|
|
116
|
+
if not act.all? { |item| item.is_a?(Symbol) }
|
|
117
|
+
raise "Array with activation functions must contain symbols: #{act}"
|
|
118
|
+
end
|
|
119
|
+
|
|
120
|
+
if act.size == 1
|
|
121
|
+
return Array.new(number_of_layers - 1) { act.first }
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
if act.size != number_of_layers - 1
|
|
125
|
+
raise "Array size does not match number of layers with activation functions: #{act.size} expected #{number_of_layers - 1}"
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
return act
|
|
129
|
+
|
|
130
|
+
else # is a Symbol
|
|
131
|
+
|
|
132
|
+
return Array.new(number_of_layers - 1) { act }
|
|
133
|
+
|
|
134
|
+
end
|
|
135
|
+
end
|
|
136
|
+
|
|
91
137
|
attr_reader :layers
|
|
92
138
|
|
|
93
139
|
def inspect
|
|
94
|
-
|
|
140
|
+
lay = @layers_config[0..-2].join(", ") # slice to remove last element
|
|
141
|
+
act = @layers_config.last.inspect
|
|
142
|
+
"MLP(#{lay}, #{act})"
|
|
143
|
+
end
|
|
144
|
+
|
|
145
|
+
def to_s
|
|
146
|
+
inspect
|
|
95
147
|
end
|
|
96
148
|
|
|
97
149
|
def parameters
|
|
@@ -102,11 +154,11 @@ class MLP
|
|
|
102
154
|
|
|
103
155
|
def show_params(in_words = false)
|
|
104
156
|
if in_words
|
|
105
|
-
n = @layers_config
|
|
157
|
+
n = @layers_config.first
|
|
106
158
|
puts "Layer 0: (#{n} input#{n > 1 ? "s" : ""})"
|
|
107
159
|
self.layers.each_with_index do |layer, i|
|
|
108
160
|
n = layer.neurons.size
|
|
109
|
-
puts "Layer #{i + 1}: (#{n} neuron#{n > 1 ? "s" : ""})"
|
|
161
|
+
puts "Layer #{i + 1}: (#{n} neuron#{n > 1 ? "s" : ""}, #{layer.activation_function.inspect} activation)"
|
|
110
162
|
layer.neurons.each_with_index do |neuron, ii|
|
|
111
163
|
n = neuron.weights.size
|
|
112
164
|
puts "\tNeuron #{ii + 1}: (#{n} weight#{n > 1 ? "s" : ""})"
|
|
@@ -116,7 +168,7 @@ class MLP
|
|
|
116
168
|
end
|
|
117
169
|
end
|
|
118
170
|
else
|
|
119
|
-
n = @layers_config
|
|
171
|
+
n = @layers_config.first
|
|
120
172
|
self.layers.each_with_index do |layer, i|
|
|
121
173
|
n = layer.neurons.size
|
|
122
174
|
puts "["
|
|
@@ -142,14 +194,19 @@ class MLP
|
|
|
142
194
|
end
|
|
143
195
|
end
|
|
144
196
|
|
|
197
|
+
def set_activation_function(activation_function)
|
|
198
|
+
act_array = validate_act_array(activation_function, @layers_config.size - 1)
|
|
199
|
+
self.layers.each_with_index { |layer, i| layer.set_activation_function(act_array[i]) }
|
|
200
|
+
end
|
|
201
|
+
|
|
145
202
|
def zero_grad
|
|
146
203
|
self.parameters.each { |p| p.grad = 0.0 }
|
|
147
204
|
end
|
|
148
205
|
|
|
149
|
-
def calc(inputs
|
|
206
|
+
def calc(inputs)
|
|
150
207
|
out = inputs
|
|
151
208
|
self.layers.each do |layer|
|
|
152
|
-
out = layer.calc(out
|
|
209
|
+
out = layer.calc(out) # chain the results forward, layer by layer
|
|
153
210
|
end
|
|
154
211
|
out.size == 1 ? out[0] : out # for convenience
|
|
155
212
|
end
|
data/mlp_example.rb
CHANGED
|
@@ -1,14 +1,28 @@
|
|
|
1
|
-
|
|
1
|
+
require 'rubygrad'
|
|
2
|
+
#require_relative 'lib/nn.rb'
|
|
2
3
|
|
|
3
|
-
|
|
4
|
+
# Build a Machine Learning Perceptron with 4 layers
|
|
5
|
+
# First Layer (Layer 0) => Input Layer => 3 Neurons => 3 Inputs
|
|
6
|
+
# Second Layer (Layer 1) => Hidden Layer => 4 Neurons
|
|
7
|
+
# Third Layer (Layer 2) => Hidden Layer => 4 Neurons
|
|
8
|
+
# Fourth Layer (Layer 3) => Output Layer => 1 Neuron => 1 Output
|
|
9
|
+
nn = MLP.new(3, 4, 4, 1, :tanh)
|
|
4
10
|
|
|
11
|
+
nn.show_params
|
|
12
|
+
puts
|
|
13
|
+
nn.show_params(in_words = true)
|
|
14
|
+
puts
|
|
15
|
+
|
|
16
|
+
# 4 input samples
|
|
5
17
|
x_inputs = [
|
|
6
18
|
[2.0, 3.0, -1.0],
|
|
7
19
|
[3.0, -1.0, 0.5],
|
|
8
20
|
[0.5, 1.0, 1.0],
|
|
9
21
|
[1.0, 1.0, -1.0]
|
|
10
22
|
]
|
|
11
|
-
|
|
23
|
+
|
|
24
|
+
# expected output for each of the 4 inputs above
|
|
25
|
+
y_expected = [1.0, -1.0, -1.0, 1.0]
|
|
12
26
|
|
|
13
27
|
passes = 2000
|
|
14
28
|
learning_rate = 0.2
|
|
@@ -20,7 +34,7 @@ _loss_format = "%.#{_loss_precision}f"
|
|
|
20
34
|
(0...passes).each do |pass|
|
|
21
35
|
|
|
22
36
|
# forward pass (calculate output)
|
|
23
|
-
y_calculated = x_inputs.map { |x| nn.calc(x
|
|
37
|
+
y_calculated = x_inputs.map { |x| nn.calc(x) }
|
|
24
38
|
|
|
25
39
|
# loss function (check how good the neural net is)
|
|
26
40
|
loss = 0.0
|
|
@@ -38,6 +52,7 @@ _loss_format = "%.#{_loss_precision}f"
|
|
|
38
52
|
break if loss.value == 0 # just for fun and just in case
|
|
39
53
|
end
|
|
40
54
|
|
|
41
|
-
y_calculated = x_inputs.map { |x| nn.calc(x
|
|
55
|
+
y_calculated = x_inputs.map { |x| nn.calc(x) }
|
|
42
56
|
puts
|
|
43
|
-
puts
|
|
57
|
+
puts "Final NN results:"
|
|
58
|
+
y_calculated.each_with_index { |y_c, i| puts "Output: #{y_c} => Expected: #{y_expected[i]}" }
|
metadata
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: rubygrad
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 1.
|
|
4
|
+
version: 1.2.2
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Sergio Oliveira Jr
|
|
8
8
|
autorequire:
|
|
9
9
|
bindir: bin
|
|
10
10
|
cert_chain: []
|
|
11
|
-
date: 2023-03-
|
|
11
|
+
date: 2023-03-22 00:00:00.000000000 Z
|
|
12
12
|
dependencies: []
|
|
13
13
|
description:
|
|
14
14
|
email: sergio.oliveira.jr@gmail.com
|
|
@@ -40,7 +40,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
|
40
40
|
- !ruby/object:Gem::Version
|
|
41
41
|
version: '0'
|
|
42
42
|
requirements: []
|
|
43
|
-
rubygems_version: 3.
|
|
43
|
+
rubygems_version: 3.4.9
|
|
44
44
|
signing_key:
|
|
45
45
|
specification_version: 4
|
|
46
46
|
summary: A port of Andrej Karpathy's micrograd to Ruby.
|