rubygrad 1.2.2 → 1.2.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/nn.rb +9 -0
- data/mlp_example.rb +2 -5
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: dc5192646258aef548a0f7d3d939424aeee3030a85884832abd2060e47982416
|
4
|
+
data.tar.gz: e3bd36e7f881538f6b5afe32398172c5e1ed736e64a5bec70a6ef09afd266870
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ffafa66dedb81bd1b5df7166f9e3927681422f4b91a5db76e5c640a6edfd6c9625be07245944929082ac492e1001d6ce508c5a727ddfdaa70daa7ba584a87edb
|
7
|
+
data.tar.gz: 8342618653c1166f9c646030c95f94ed165fefb2be82d41fc05b1a2c2e55779599719e88055f87fa5bafa785656c0ac15a0d966a9b834b6f78da8ddb779f5499
|
data/lib/nn.rb
CHANGED
@@ -50,6 +50,8 @@ class Neuron
|
|
50
50
|
sum.relu
|
51
51
|
elsif @activation_function == :sigmoid
|
52
52
|
sum.sigmoid
|
53
|
+
elsif @activation_function == :none
|
54
|
+
sum
|
53
55
|
else
|
54
56
|
raise "Unsupported activation function: #{activation_function}"
|
55
57
|
end
|
@@ -210,4 +212,11 @@ class MLP
|
|
210
212
|
end
|
211
213
|
out.size == 1 ? out[0] : out # for convenience
|
212
214
|
end
|
215
|
+
|
216
|
+
def print_pass(learning_rate, loss, pass, passes, learning_rate_precision = 2, loss_precision = 10)
|
217
|
+
passes_format = "%#{passes.digits.length}d"
|
218
|
+
learning_rate_format = "%.#{learning_rate_precision}f"
|
219
|
+
loss_format = "%.#{loss_precision}f"
|
220
|
+
puts "Pass #{passes_format % (pass + 1)} => Learning rate: #{learning_rate_format % learning_rate} => Loss: #{loss_format % loss.value}"
|
221
|
+
end
|
213
222
|
end
|
data/mlp_example.rb
CHANGED
@@ -27,10 +27,6 @@ y_expected = [1.0, -1.0, -1.0, 1.0]
|
|
27
27
|
passes = 2000
|
28
28
|
learning_rate = 0.2
|
29
29
|
|
30
|
-
_loss_precision = 10
|
31
|
-
_passes_format = "%#{passes.digits.length}d"
|
32
|
-
_loss_format = "%.#{_loss_precision}f"
|
33
|
-
|
34
30
|
(0...passes).each do |pass|
|
35
31
|
|
36
32
|
# forward pass (calculate output)
|
@@ -47,7 +43,8 @@ _loss_format = "%.#{_loss_precision}f"
|
|
47
43
|
# improve neural net (update weights and biases)
|
48
44
|
nn.parameters.each { |p| p.value -= learning_rate * p.grad }
|
49
45
|
|
50
|
-
|
46
|
+
# print some info about our progress from time to time
|
47
|
+
nn.print_pass(learning_rate, loss, pass, passes) if (pass + 1) % 100 == 0 or pass == 0
|
51
48
|
|
52
49
|
break if loss.value == 0 # just for fun and just in case
|
53
50
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: rubygrad
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.2.
|
4
|
+
version: 1.2.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Sergio Oliveira Jr
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-03-
|
11
|
+
date: 2023-03-23 00:00:00.000000000 Z
|
12
12
|
dependencies: []
|
13
13
|
description:
|
14
14
|
email: sergio.oliveira.jr@gmail.com
|