rubygrad 1.1.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (6) hide show
  1. checksums.yaml +7 -0
  2. data/lib/nn.rb +156 -0
  3. data/lib/test_coerce.rb +79 -0
  4. data/lib/value.rb +164 -0
  5. data/mlp_example.rb +43 -0
  6. metadata +46 -0
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: 557100157146af78688b97ea078e871fe7caf7fcac573dbaf1df072bdcdc0f43
4
+ data.tar.gz: 44b62c4a04b6ef83f7a5a6d21df1ae97364d7b604169fd76de9259512de9aebc
5
+ SHA512:
6
+ metadata.gz: fdd16f9646f97c678dcf6239bc19e228767f01b02ec0456875dfa53a0dc27f0d218e20a3565840c13262efd46036d1589157ffb3ef6c1e4727d16a36688e3e86
7
+ data.tar.gz: 643ea4365dfc79de6101a57bd7750e925f8c659bb42c4706fa64542ddb64e246fb9c90549f450d08015043c904ef818e59c3a6c5dacb939248a68cfed01bdf2f
data/lib/nn.rb ADDED
@@ -0,0 +1,156 @@
1
+ require_relative "value.rb"
2
+
3
+ class Neuron
4
+
5
+ def initialize(number_of_inputs)
6
+ @initial_weights = Array.new(number_of_inputs) { rand(-1.0..1.0) }
7
+ @initial_bias = rand(-1.0..1.0)
8
+
9
+ @weights = @initial_weights.map { |w| Value.new(w) }
10
+ @bias = Value.new(@initial_bias)
11
+ end
12
+
13
+ def reset_params
14
+ @initial_weights.each_with_index do |w,i|
15
+ @weights[i].value = w
16
+ end
17
+ @bias.value = @initial_bias
18
+ end
19
+
20
+ def set_params(params)
21
+ n = 1 + @weights.size
22
+ raise "Illegal number of parameters: #{params.size} expected #{n}" if n != params.size
23
+ @bias.value = params[0]
24
+ (1...params.size).each { |i| @weights[i - 1].value = params[i] }
25
+ end
26
+
27
+ attr_reader :weights, :bias
28
+
29
+ def parameters
30
+ self.weights + [self.bias]
31
+ end
32
+
33
+ def calc(inputs, activation)
34
+ # xw + b
35
+ n = self.weights.size
36
+ raise "Wrong number of inputs! #{inputs.size} expected #{n}" unless n == inputs.size
37
+ sum = self.bias
38
+ n.times do |index|
39
+ sum += self.weights[index] * inputs[index]
40
+ end
41
+ if activation == :tanh
42
+ sum.tanh
43
+ elsif activation == :relu
44
+ sum.relu
45
+ elsif activation == :sigmoid
46
+ sum.sigmoid
47
+ else
48
+ raise "Unsupported activation function: #{activation}"
49
+ end
50
+ end
51
+ end
52
+
53
+ class Layer
54
+
55
+ def initialize(number_of_inputs, number_of_outputs)
56
+ @neurons = Array.new(number_of_outputs) { Neuron.new(number_of_inputs) }
57
+ end
58
+
59
+ attr_reader :neurons
60
+
61
+ def parameters
62
+ params = []
63
+ self.neurons.each { |n| params += n.parameters }
64
+ params
65
+ end
66
+
67
+ def reset_params
68
+ self.neurons.each { |n| n.reset_params }
69
+ end
70
+
71
+ def calc(inputs, activation)
72
+ outs = []
73
+ self.neurons.each do |neuron|
74
+ outs << neuron.calc(inputs, activation)
75
+ end
76
+ outs
77
+ end
78
+ end
79
+
80
+ class MLP
81
+
82
+ def initialize(*layers_config)
83
+ number_of_layers = layers_config.size
84
+ @layers = Array.new(number_of_layers - 1) # input layer is not really a layer object
85
+ (number_of_layers - 1).times do |i|
86
+ @layers[i] = Layer.new(layers_config[i], layers_config[i + 1])
87
+ end
88
+ @layers_config = layers_config
89
+ end
90
+
91
+ attr_reader :layers
92
+
93
+ def inspect
94
+ "MLP(#{@layers_config.join(", ")})"
95
+ end
96
+
97
+ def parameters
98
+ params = []
99
+ self.layers.each { |layer| params += layer.parameters }
100
+ params
101
+ end
102
+
103
+ def show_params(in_words = false)
104
+ if in_words
105
+ n = @layers_config[0]
106
+ puts "Layer 0: (#{n} input#{n > 1 ? "s" : ""})"
107
+ self.layers.each_with_index do |layer, i|
108
+ n = layer.neurons.size
109
+ puts "Layer #{i + 1}: (#{n} neuron#{n > 1 ? "s" : ""})"
110
+ layer.neurons.each_with_index do |neuron, ii|
111
+ n = neuron.weights.size
112
+ puts "\tNeuron #{ii + 1}: (#{n} weight#{n > 1 ? "s" : ""})"
113
+ puts "\t\tBias: #{neuron.bias.value}"
114
+ w = neuron.weights.map { |v| v.value }.join(", ")
115
+ puts "\t\tWeights: #{w}"
116
+ end
117
+ end
118
+ else
119
+ n = @layers_config[0]
120
+ self.layers.each_with_index do |layer, i|
121
+ n = layer.neurons.size
122
+ puts "["
123
+ layer.neurons.each_with_index do |neuron, ii|
124
+ w = neuron.weights.map { |v| v.value }.join(", ")
125
+ puts "\t[ #{neuron.bias.value}, #{w} #{ii == layer.neurons.size - 1 ? ']' : '],'}"
126
+ end
127
+ puts i == self.layers.size - 1 ? "]" : "],"
128
+ end
129
+ end
130
+ nil
131
+ end
132
+
133
+ def reset_params
134
+ self.layers.each { |layer| layer.reset_params }
135
+ end
136
+
137
+ def set_params(params)
138
+ params.each_with_index do |layer, li|
139
+ layer.each_with_index do |neuron, ni|
140
+ self.layers[li].neurons[ni].set_params(neuron)
141
+ end
142
+ end
143
+ end
144
+
145
+ def zero_grad
146
+ self.parameters.each { |p| p.grad = 0.0 }
147
+ end
148
+
149
+ def calc(inputs, activation)
150
+ out = inputs
151
+ self.layers.each do |layer|
152
+ out = layer.calc(out, activation) # chain the results forward, layer by layer
153
+ end
154
+ out.size == 1 ? out[0] : out # for convenience
155
+ end
156
+ end
@@ -0,0 +1,79 @@
1
+
2
+ class MyValue
3
+
4
+ def initialize(value)
5
+ @value = value
6
+ end
7
+
8
+ attr_reader :value
9
+
10
+ private def to_v(other) = other.is_a?(MyValue) ? other : MyValue.new(other)
11
+
12
+ def +(other)
13
+ other = to_v(other)
14
+ out = MyValue.new(self.value + other.value)
15
+ end
16
+
17
+ def *(other)
18
+ other = to_v(other)
19
+ out = MyValue.new(self.value * other.value)
20
+ end
21
+
22
+ def **(other)
23
+ out = MyValue.new(self.value ** other)
24
+ end
25
+
26
+ def -@
27
+ self * -1
28
+ end
29
+
30
+ def -(other)
31
+ self + (-other)
32
+ end
33
+
34
+ def /(other)
35
+ self * (other ** -1)
36
+ end
37
+
38
+ def coerce(other)
39
+ other = to_v(other)
40
+ [other, self.value]
41
+ end
42
+
43
+ def to_s
44
+ value.to_s
45
+ end
46
+
47
+ end
48
+
49
+ a = MyValue.new(4.0) # a MyValue
50
+ b = MyValue.new(2.0) # a MyValue
51
+ c = 2.0 # an Integer
52
+
53
+ res = a - b
54
+ puts "#{res} is #{res.class}" # => 2.0 is MyValue
55
+
56
+ res = b - a
57
+ puts "#{res} is #{res.class}" # => -2.0 is MyValue
58
+
59
+ res = a - c
60
+ puts "#{res} is #{res.class}" # => 2.0 is MyValue
61
+
62
+ res = c - a
63
+ puts "#{res} is #{res.class}" # => -2.0 is MyValue
64
+
65
+ puts
66
+
67
+ res = a / b
68
+ puts "#{res} is #{res.class}" # => 2.0 is MyValue
69
+
70
+ res = b / a
71
+ puts "#{res} is #{res.class}" # => 0.5 is MyValue
72
+
73
+ res = a / c
74
+ puts "#{res} is #{res.class}" # => 2.0 is MyValue
75
+
76
+ res = c / a
77
+ puts "#{res} is #{res.class}" # => 0.5 is MyValue
78
+
79
+
data/lib/value.rb ADDED
@@ -0,0 +1,164 @@
1
+ require 'set'
2
+
3
+ class Value
4
+
5
+ def initialize(value, prev = [])
6
+ @value = value
7
+ @grad = 0
8
+ @prev = prev.uniq.freeze
9
+ @calc_gradient = lambda { }
10
+ end
11
+
12
+ attr_reader :value, :grad, :prev, :calc_gradient
13
+ attr_writer :calc_gradient, :grad, :value
14
+
15
+ def +(other)
16
+ other = to_v(other)
17
+ out = Value.new(self.value + other.value, [self, other])
18
+
19
+ out.calc_gradient = lambda do
20
+ self.grad += out.grad
21
+ other.grad += out.grad
22
+ end
23
+
24
+ return out
25
+ end
26
+
27
+ def *(other)
28
+ other = to_v(other)
29
+ out = Value.new(self.value * other.value, [self, other])
30
+
31
+ out.calc_gradient = lambda do
32
+ self.grad += other.value * out.grad
33
+ other.grad += self.value * out.grad
34
+ end
35
+
36
+ return out
37
+ end
38
+
39
+ def **(other)
40
+ out = Value.new(self.value ** other, [self])
41
+
42
+ out.calc_gradient = lambda do
43
+ self.grad += (other * self.value ** (other - 1)) * out.grad
44
+ end
45
+
46
+ return out
47
+ end
48
+
49
+ def tanh
50
+ t = (Math.exp(2.0 * self.value) - 1.0) / (Math.exp(2.0 * self.value) + 1.0)
51
+ out = Value.new(t, [self])
52
+
53
+ out.calc_gradient = lambda do
54
+ self.grad += (1.0 - t ** 2.0) * out.grad
55
+ end
56
+
57
+ return out
58
+ end
59
+
60
+ def sigmoid
61
+ e = Math.exp(-1.0 * self.value)
62
+ t = 1.0 / (1.0 + e)
63
+ out = Value.new(t, [self])
64
+
65
+ out.calc_gradient = lambda do
66
+ self.grad += t * (1.0 - t) * out.grad
67
+ end
68
+
69
+ return out
70
+ end
71
+
72
+ def relu
73
+ n = self.value < 0 ? 0.0 : self.value
74
+ out = Value.new(n, [self])
75
+
76
+ out.calc_gradient = lambda do
77
+ self.grad += (out.value > 0 ? 1.0 : 0.0) * out.grad
78
+ end
79
+
80
+ return out
81
+ end
82
+
83
+ def exp
84
+ out = Value.new(Math.exp(self.value), [self])
85
+
86
+ out.calc_gradient = lambda do
87
+ self.grad += out.value * out.grad
88
+ end
89
+
90
+ return out
91
+ end
92
+
93
+ def -@
94
+ self * -1
95
+ end
96
+
97
+ def -(other)
98
+ self + (-other)
99
+ end
100
+
101
+ def /(other)
102
+ self * (other ** -1)
103
+ end
104
+
105
+ def coerce(other)
106
+ other = to_v(other)
107
+ [other, self.value]
108
+ end
109
+
110
+ def build_topo_graph(start)
111
+ topo = []
112
+ visited = Set.new
113
+ build_topo = lambda do |v|
114
+ if !visited.include?(v)
115
+ visited.add(v)
116
+ v.prev.each do |child|
117
+ build_topo.call(child)
118
+ end
119
+ topo.append(v)
120
+ end
121
+ end
122
+ build_topo.call(start)
123
+ return topo
124
+ end
125
+
126
+ def backward
127
+ topo = build_topo_graph(self)
128
+ self.grad = 1.0
129
+ topo.reverse_each do |node|
130
+ node.calc_gradient.call
131
+ end
132
+ end
133
+
134
+ def to_s
135
+ value.to_s
136
+ end
137
+
138
+ def inspect
139
+ "Value(value=#{value}, grad=#{grad})"
140
+ end
141
+
142
+ private def to_v(other) = other.is_a?(Value) ? other : Value.new(other)
143
+
144
+ end
145
+
146
+
147
+ =begin
148
+ x1 = Value.new(2.0)
149
+ x2 = Value.new(0.0)
150
+ w1 = Value.new(-3.0)
151
+ w2 = Value.new(1.0)
152
+ b = Value.new(6.881373587)
153
+ x1w1 = x1 * w1
154
+ x2w2 = x2 * w2
155
+ x1w1x2w2 = x1w1 + x2w2
156
+ n = x1w1x2w2 + b
157
+ o = n.tanh
158
+ o.backward
159
+
160
+ puts x1.inspect,x2.inspect,w1.inspect,w2.inspect
161
+
162
+ =end
163
+
164
+
data/mlp_example.rb ADDED
@@ -0,0 +1,43 @@
1
+ require_relative 'lib/nn.rb'
2
+
3
+ nn = MLP.new(3, 4, 4, 1)
4
+
5
+ x_inputs = [
6
+ [2.0, 3.0, -1.0],
7
+ [3.0, -1.0, 0.5],
8
+ [0.5, 1.0, 1.0],
9
+ [1.0, 1.0, -1.0]
10
+ ]
11
+ y_expected = [1.0, -1.0, -1.0, 1.0] # desired
12
+
13
+ passes = 2000
14
+ learning_rate = 0.2
15
+
16
+ _loss_precision = 10
17
+ _passes_format = "%#{passes.digits.length}d"
18
+ _loss_format = "%.#{_loss_precision}f"
19
+
20
+ (0...passes).each do |pass|
21
+
22
+ # forward pass (calculate output)
23
+ y_calculated = x_inputs.map { |x| nn.calc(x, :tanh) }
24
+
25
+ # loss function (check how good the neural net is)
26
+ loss = 0.0
27
+ y_expected.each_index { |i| loss += (y_calculated[i] - y_expected[i]) ** 2 }
28
+
29
+ # backward pass (calculate gradients)
30
+ nn.zero_grad
31
+ loss.backward
32
+
33
+ # improve neural net (update weights and biases)
34
+ nn.parameters.each { |p| p.value -= learning_rate * p.grad }
35
+
36
+ puts "Pass #{_passes_format % (pass + 1)} => Learning rate: #{"%.10f" % learning_rate} => Loss: #{_loss_format % loss.value}" if (pass + 1) % 100 == 0 or pass == 0
37
+
38
+ break if loss.value == 0 # just for fun and just in case
39
+ end
40
+
41
+ y_calculated = x_inputs.map { |x| nn.calc(x, :tanh) }
42
+ puts
43
+ puts y_calculated
metadata ADDED
@@ -0,0 +1,46 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: rubygrad
3
+ version: !ruby/object:Gem::Version
4
+ version: 1.1.1
5
+ platform: ruby
6
+ authors:
7
+ - Sergio Oliveira Jr
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2023-03-21 00:00:00.000000000 Z
12
+ dependencies: []
13
+ description:
14
+ email: sergio.oliveira.jr@gmail.com
15
+ executables: []
16
+ extensions: []
17
+ extra_rdoc_files: []
18
+ files:
19
+ - lib/nn.rb
20
+ - lib/test_coerce.rb
21
+ - lib/value.rb
22
+ - mlp_example.rb
23
+ homepage: https://github.com/saoj/rubygrad
24
+ licenses:
25
+ - MIT
26
+ metadata: {}
27
+ post_install_message:
28
+ rdoc_options: []
29
+ require_paths:
30
+ - lib
31
+ required_ruby_version: !ruby/object:Gem::Requirement
32
+ requirements:
33
+ - - ">="
34
+ - !ruby/object:Gem::Version
35
+ version: 3.0.0
36
+ required_rubygems_version: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - ">="
39
+ - !ruby/object:Gem::Version
40
+ version: '0'
41
+ requirements: []
42
+ rubygems_version: 3.4.7
43
+ signing_key:
44
+ specification_version: 4
45
+ summary: A port of Andrej Karpathy's micrograd to Ruby.
46
+ test_files: []