the_noggin 0.0.4 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 53abd15e32f1bc58d542d5bb2480f4e6f1d75c89
4
- data.tar.gz: cd335f6a5b853e04334690f16db4f57698cb916a
3
+ metadata.gz: 607a3501df480a778864de21cc7ad5cff59ea17a
4
+ data.tar.gz: 3cd6dd3d9fb66a40bb90cf8bf37d00af9e64a8cf
5
5
  SHA512:
6
- metadata.gz: 8b740e1ba1f091ba957de78416fad3ab7758dc07aeb2af496300335a5ebaa490c00492000277e903ef47e7c3c34d5f3ba57d5f491c0e0cf6689690ed96bd98b0
7
- data.tar.gz: 44a6217cedcf96933f0c902aecf95d5732439e2fee62d08205a7180ac80b6310070680bf3559574b46db24dc1a592ec1e9eca89449047ea372a0326e877f80e3
6
+ metadata.gz: 7e0caaabbe999c3f60fdd82edefebd455c066c4259760aa6c448c6ded85d15a13ae308d296bbe0ec0ffee30a14f64b5bc589f8f3a03aaa8454f7b694358eff80
7
+ data.tar.gz: 39e80d7c547bf95dd01cc6d51dcf72f6be7e2b0485766084c448440699af72a35225ff05b46e393deb07d33860e70a51686478be75c2556d84c7680ccf7f9ca4
data/README.md CHANGED
@@ -1,19 +1,21 @@
1
1
  # Noggin
2
- Ruby Neural Network implementation using backpropagation and gradient descent for training
2
+ A Neural Network written in Ruby with an objected oriented implementation. Training is done using http://en.wikipedia.org/wiki/Backpropagation and http://en.wikipedia.org/wiki/Gradient_descent.
3
+
4
+ The simple API was inspired from https://github.com/harthur/brain
3
5
 
4
6
 
5
7
  ``` Ruby
6
8
  network = Noggin::Network.new
7
9
 
8
10
  network.train([
9
- { input: [0, 0], output: 0 },
10
- { input: [0, 1], output: 1 },
11
- { input: [1, 0], output: 1 },
12
- { input: [1, 1], output: 0 }
11
+ { input: [0, 0], expected: 0 },
12
+ { input: [0, 1], expected: 1 },
13
+ { input: [1, 0], expected: 1 },
14
+ { input: [1, 1], expected: 0 }
13
15
  ])
14
16
 
15
17
  network.run [0, 0] # 0.0163
16
- network.run [0, 1] # 0.9573
18
+ network.run [0, 1] # 0.9873
17
19
  network.run [1, 0] # 0.9702
18
20
  network.run [1, 1] # 0.0142
19
21
 
@@ -25,28 +27,10 @@ network.run [1, 1] # 0.0142
25
27
  ## Options
26
28
  ``` Ruby
27
29
  Noggin::Network.new(
28
- max_training_laps: 100000, # How many propgation of errors to do when training
30
+ training_laps: 100000, # How many propgation of errors to do when training
29
31
  learning_rate: 0.1, # How fast the network learns
30
32
  momentum: 0.2, # How much of previous weight deltas should be applied to next delta
31
33
  hidden_layer_size: 1 , # Number of hidden layers
32
34
  hidden_layer_node_size: 2, # Number of nodes each hidden layer has
33
- log: true # print out network state for each input during last iteration.
34
35
  )
35
36
  ```
36
-
37
- ## Print Network
38
-
39
- ``` Ruby
40
- network.pretty_print
41
- ```
42
- ```
43
- ------ ------ --------------
44
- | | -EDGE--(w: 0.438443, d: 0.01759) | | -EDGE--(w: 0.515923, d: 0.09704) | ed: 0.668486
45
- | | -EDGE--(w: 0.746539, d: 0.013825) ------ | d: 0.148145
46
- ------ ------ | e: 0.223437
47
- ------ | | -EDGE--(w: 0.485781, d: 0.11099) | o: 0.668486
48
- | | -EDGE--(w: 0.199745, d: 0.01759) ------ --------------
49
- | | -EDGE--(w: 0.345684, d: 0.013825)
50
- ------
51
-
52
- ```
data/lib/noggin.rb CHANGED
@@ -1,6 +1,5 @@
1
- require 'noggin/node/base'
2
- require 'noggin/node/input'
3
- require 'noggin/node/output'
4
- require 'noggin/node/edge'
5
- require 'noggin/network'
6
- require 'noggin/pretty_printer'
1
+ require 'noggin/neuron'
2
+ require 'noggin/bias'
3
+ require 'noggin/edge'
4
+ require 'noggin/layer'
5
+ require 'noggin/network'
@@ -0,0 +1,23 @@
1
+ module Noggin
2
+ class Bias
3
+
4
+ attr_accessor :dests, :forward_output
5
+
6
+ def initialize
7
+ @dests = []
8
+ @forward_output = 1
9
+ end
10
+
11
+ def forward_activate!
12
+ dests.each do |edge|
13
+ edge.forward_input = @forward_output
14
+ edge.forward_activate!
15
+ end
16
+ end
17
+
18
+ def learn!
19
+ dests.each(&:learn!)
20
+ end
21
+
22
+ end
23
+ end
@@ -0,0 +1,31 @@
1
+ module Noggin
2
+ class Edge
3
+
4
+ attr_accessor :origin, :dest, :weight, :derivative, :forward_input, :forward_output, :backward_input, :backward_output
5
+
6
+ def initialize origin: origin, dest: dest, weight: rand(0.20...0.80), momentum: 1, learning_rate: 0.3
7
+ @origin = origin
8
+ @dest = dest
9
+ @weight = weight
10
+ @momentum = momentum
11
+ @previous_derivative = 0
12
+ @learning_rate = learning_rate
13
+ end
14
+
15
+ def forward_activate!
16
+ @forward_output = @forward_input * weight
17
+ end
18
+
19
+ def backward_activate!
20
+ @backward_output = dest.backward_output * weight
21
+ @derivative = dest.backward_output * origin.forward_output
22
+ end
23
+
24
+ def learn!
25
+ @weight -= @learning_rate * @derivative + (@previous_derivative * @momentum)
26
+ @previous_derivative = @derivative
27
+ end
28
+
29
+ end
30
+ end
31
+
@@ -0,0 +1,46 @@
1
+ module Noggin
2
+ class Layer
3
+
4
+ attr_accessor :origin_layer, :dest_layer, :neurons, :bias
5
+
6
+ def initialize
7
+ @neurons = []
8
+ end
9
+
10
+ def forward_activate!
11
+ @bias.forward_activate!
12
+ @neurons.each do |neuron|
13
+ neuron.forward_input = neuron.origins.inject(0){ |sum, edge| sum += edge.forward_output }
14
+ neuron.forward_activate!
15
+ end
16
+ end
17
+
18
+ def backward_activate!
19
+ @neurons.each do |neuron|
20
+ neuron.backward_input = neuron.dests.inject(0){ |sum, edge| sum += edge.backward_output }
21
+ neuron.backward_activate!
22
+ end
23
+ end
24
+
25
+ def biased
26
+ @bias = Bias.new if bias.nil?
27
+ end
28
+
29
+ class << self
30
+
31
+ def connect_layers origin_layer, dest_layer, momentum
32
+ dest_layer.biased
33
+ dest_layer.neurons.each do |dest_neuron|
34
+ Noggin::Neuron.connect_neurons dest_layer.bias, dest_neuron, momentum
35
+ origin_layer.neurons.each do |origin_neuron|
36
+ Noggin::Neuron.connect_neurons origin_neuron, dest_neuron, momentum
37
+ end
38
+ end
39
+ origin_layer.dest_layer = dest_layer
40
+ dest_layer.origin_layer = origin_layer
41
+ end
42
+
43
+ end
44
+
45
+ end
46
+ end
@@ -1,118 +1,127 @@
1
1
  module Noggin
2
2
  class Network
3
3
 
4
- attr_reader :input_nodes
5
- attr_reader :output_node
6
- attr_reader :layers
7
- attr_reader :options
8
-
9
- DEFAULTS = {
10
- learning_rate: 0.3,
11
- momentum: 5,
12
- max_training_laps: 2000,
13
- hidden_layer_size: 1,
14
- hidden_layer_node_size: 2,
15
- log: false
4
+ attr_accessor :layers, :options, :forward_input, :forward_output
5
+
6
+ DEFAULTS = {
7
+ learning_rate: 0.3,
8
+ momentum: 1,
9
+ training_laps: 50000,
10
+ hidden_layer_size: 1,
11
+ hidden_layer_node_size: 2,
12
+ min_training_error: 0.001
16
13
  }
17
14
 
18
15
  def initialize **opts
19
16
  @options = DEFAULTS.merge opts
20
- @ready = false
21
17
  @layers = []
18
+ @ready = false
22
19
  end
23
20
 
24
- def run input
25
- update_input_nodes input
26
- output_node.output
27
- end
28
-
29
- def train data_batch
30
- init_network(data_batch)unless @ready
31
- options[:max_training_laps].times do |i|
32
- data_batch.each do |batch|
33
- propagate_error! batch[:input], batch[:output]
34
- if options[:log] && i == options[:max_training_laps] - 1
35
- print "Last train for input: #{batch[:input]}, expected: #{batch[:output]}"
36
- pretty_print
37
- end
21
+ def train data_set
22
+ @forward_input = data_set
23
+ setup_layers unless @ready
24
+ error = Float::INFINITY
25
+ laps = 0
26
+ until laps >= options[:training_laps] || error < options[:min_training_error] do
27
+ laps += 1
28
+ error = 0
29
+ data_set.each do |set|
30
+ setup_input set[:input]
31
+ setup_expected set[:expected]
32
+ error += run_for_error set[:input]
33
+ setup_backwards
34
+ run_backwards
35
+ update_weights
38
36
  end
39
37
  end
40
- return self
38
+ { total_error: error, training_laps_needed: laps }
41
39
  end
42
40
 
43
- def propagate_error! input, expected
44
- update_input_nodes input
45
- output_node.expected = expected
46
- input_nodes.each { |node| node.derivative_chain }
47
- update_weights!
41
+ def run input
42
+ setup_input input
43
+ run_forwards input
44
+ layers.last.neurons.first.forward_output
48
45
  end
49
46
 
50
- def update_weights!
51
- edges.each do |edge|
52
- delta_weight = options[:learning_rate] * edge.derivative
53
- edge.weight -= delta_weight + (edge.momentum * edge.previous_weight)
54
- edge.previous_weight = delta_weight
55
- end
47
+ def run_for_error input
48
+ run_forwards input
49
+ layers.last.neurons.inject(0){ |sum, neuron| sum += neuron.forward_error_output }
56
50
  end
57
51
 
58
- def init_network data_batch
59
- @input_nodes = Array.new(data_batch.first[:input].size + 1){ Noggin::Node::Input.new }
60
- @layers << @input_nodes
61
- last_layer = @input_nodes
62
- options[:hidden_layer_size].times do |i|
63
- new_layer = Array.new(options[:hidden_layer_node_size]){ Noggin::Node::Base.new }
64
- bias_node = Noggin::Node::Input.new
65
- bias_node.output = 1
66
- new_layer << bias_node
67
- @layers << new_layer
68
- connect_layer last_layer, new_layer
69
- last_layer = new_layer
52
+ def run_forwards input
53
+ layers[1..-1].each(&:forward_activate!)
54
+ end
55
+
56
+ def run_backwards
57
+ layers.reverse[1..-1].each(&:backward_activate!)
58
+ end
59
+
60
+ def update_weights
61
+ layers.each do |layer|
62
+ layer.bias.learn! if layer.bias
63
+ layer.neurons.each do |neuron|
64
+ neuron.dests.each(&:learn!)
65
+ end
70
66
  end
71
- @output_node = Noggin::Node::Output.new
72
- @layers << [@output_node]
73
- last_layer.each { |node| connect_nodes(node, output_node) }
67
+ end
68
+
69
+ def setup_layers
70
+ setup_input_layer
71
+ setup_hidden_layers
72
+ setup_output_layer
74
73
  @ready = true
75
74
  end
76
75
 
77
- def edges
78
- edges = []
79
- queue = [output_node]
80
- while queue.size != 0 do
81
- node = queue.pop
82
- node.origins.each do |edge|
83
- edges << edge
84
- queue << edge.origin
85
- end
76
+ def setup_input_layer
77
+ input_layer = Layer.new
78
+ input_layer.neurons = Array.new(input_set_size){ Noggin::Neuron.new }
79
+ layers << input_layer
80
+ end
81
+
82
+ def setup_hidden_layers
83
+ options[:hidden_layer_size].times do |i|
84
+ hidden_layer = Layer.new
85
+ hidden_layer.neurons = Array.new(options[:hidden_layer_node_size]){ Noggin::Neuron.new }
86
+ Noggin::Layer.connect_layers layers.last, hidden_layer, options[:momentum]
87
+ layers << hidden_layer
86
88
  end
87
- edges
88
89
  end
89
90
 
90
- def connect_nodes origin, dest
91
- edge = Noggin::Node::Edge.new origin: origin, dest: dest, momentum: options[:momentum]
92
- origin.dests << edge
93
- dest.origins << edge
91
+ def setup_output_layer
92
+ output_layer = Layer.new
93
+ output_layer.neurons = [ Noggin::Neuron.new ]
94
+ Noggin::Layer.connect_layers layers.last, output_layer, options[:momentum]
95
+ layers << output_layer
94
96
  end
95
97
 
96
- def connect_layer origins, dests
97
- origins.each do |origin|
98
- dests.each do |dest|
99
- connect_nodes origin, dest
98
+ def setup_input set
99
+ layers.first.neurons.each_with_index do |neuron, i|
100
+ neuron.forward_input = neuron.forward_output = set[i]
101
+ neuron.dests.each do |edge|
102
+ edge.forward_input = neuron.forward_output
103
+ edge.forward_activate!
100
104
  end
101
105
  end
102
106
  end
103
107
 
104
- def update_input_nodes input
105
- input_nodes.each_with_index do | node, i |
106
- if i == (input.size)
107
- node.output = 1
108
- else
109
- node.output = input[i]
110
- end
108
+ def setup_backwards
109
+ layers.last.neurons.each do |output_neuron|
110
+ output_neuron.backward_activate_error!
111
+ output_neuron.backward_activate!
111
112
  end
112
113
  end
113
114
 
115
+ def setup_expected expected
116
+ layers.last.neurons.each { |neuron| neuron.expected = expected }
117
+ end
118
+
119
+ def input_set_size
120
+ forward_input.first[:input].size
121
+ end
122
+
114
123
  def pretty_print
115
- Noggin::PrettyPrinter.print_network layers
124
+ Noggin::PrettyPrinter.print_network self
116
125
  end
117
126
 
118
127
  end
@@ -0,0 +1,47 @@
1
+ module Noggin
2
+ class Neuron
3
+
4
+ attr_accessor :origins, :dests, :derivative, :expected, :forward_input, :forward_output, :forward_error_output,
5
+ :backward_input, :backward_output, :backward_error_output
6
+
7
+ def initialize
8
+ @origins = []
9
+ @dests = []
10
+ end
11
+
12
+ def forward_activate!
13
+ @forward_output = 1 / ( 1 + Math.exp(-1 * forward_input) )
14
+ dests.each do |edge|
15
+ edge.forward_input = @forward_output
16
+ edge.forward_activate!
17
+ end
18
+ forward_activate_error! unless @expected.nil?
19
+ end
20
+
21
+ def backward_activate!
22
+ @backward_output = @forward_output * ( 1 - @forward_output ) * @backward_input
23
+ origins.each do |edge|
24
+ edge.backward_input = @backward_output
25
+ edge.backward_activate!
26
+ end
27
+ end
28
+
29
+ def forward_activate_error!
30
+ @forward_error_output = 0.5 * (@expected - @forward_output)**2
31
+ end
32
+
33
+ def backward_activate_error!
34
+ @backward_error_output = @forward_output - @expected
35
+ @backward_input = @backward_error_output
36
+ end
37
+
38
+ class << self
39
+ def connect_neurons origin, dest, momentum
40
+ edge = Noggin::Edge.new origin: origin, dest: dest, momentum: momentum
41
+ origin.dests << edge
42
+ dest.origins << edge
43
+ end
44
+ end
45
+
46
+ end
47
+ end
@@ -1,3 +1,3 @@
1
1
  module Noggin
2
- VERSION = '0.0.4'
2
+ VERSION = '0.1.0'
3
3
  end
data/spec/network_spec.rb CHANGED
@@ -1,55 +1,155 @@
1
1
  require_relative '../lib/noggin'
2
2
 
3
- describe Noggin::Network do
3
+ describe :calculations do
4
+
5
+ MOCK_WEIGHT = 0.66
6
+
7
+ subject { Noggin::Network.new hidden_layer_node_size: 2, training_laps: 1 }
8
+
9
+ let(:input_neuron_one) { subject.layers[0].neurons[0] }
10
+
11
+ let(:input_neuron_two) { subject.layers[0].neurons[1] }
12
+
13
+ let(:hidden_bias) { subject.layers[1].bias }
14
+
15
+ let(:hidden_neuron_one) { subject.layers[1].neurons[0] }
16
+
17
+ let(:hidden_neuron_two) { subject.layers[1].neurons[1] }
18
+
19
+ let(:output_bias) { subject.layers[2].bias }
20
+
21
+ let(:output_neuron) { subject.layers[2].neurons[0] }
22
+
23
+ let(:input_neuron_one_edge_one) { input_neuron_one.dests[0] }
24
+
25
+ let(:input_neuron_one_edge_two) { input_neuron_one.dests[1] }
26
+
27
+ let(:input_neuron_two_edge_one) { input_neuron_two.dests[0] }
28
+
29
+ let(:input_neuron_two_edge_two) { input_neuron_two.dests[1] }
30
+
31
+ let(:hidden_neuron_one_edge_one) { hidden_neuron_one.dests[0] }
32
+
33
+ let(:hidden_neuron_two_edge_one) { hidden_neuron_two.dests[0] }
34
+
35
+ let(:hidden_bias_edge_one) { hidden_bias.dests[0] }
36
+
37
+ let(:hidden_bias_edge_two) { hidden_bias.dests[1] }
38
+
39
+ let(:output_bias_edge_one) { output_bias.dests[0] }
4
40
 
5
- subject { Noggin::Network.new( max_training_laps: 1, learning_rate: 0.1, hidden_layer_size: 1, hidden_layer_node_size: 1 ) }
6
41
 
7
- let(:input_node) { subject.layers[0].first }
8
- let(:hidden_node) { subject.layers[1].first }
9
- let(:output_node) { subject.layers[2].first }
10
-
11
42
  before do
12
- allow_any_instance_of(Noggin::Node::Edge).to receive(:weight).and_return(0.66)
13
- subject.train [{ input: [1], output: 0 }]
14
- end
15
43
 
16
- it 'sets up the network graph according to settings' do
17
- expect(input_node.dests.first.dest).to eq hidden_node
18
- expect(hidden_node.dests.first.dest).to eq output_node
19
- end
44
+ allow_any_instance_of(Noggin::Edge).to receive(:weight).and_return(MOCK_WEIGHT)
20
45
 
21
- it 'sets hidden layer size' do
22
- expect(subject.layers.size).to eq(3)
23
- end
46
+ subject.train( [{ input: [1,0], expected: 0.2 }] )
24
47
 
25
- it 'sets hidden layer node size' do
26
- expect(subject.layers[1].size).to eq(2)
27
48
  end
28
49
 
29
- it 'backpropagates error' do
30
- expect(input_node.dests.first.derivative).to be_within(0.00001).of(0.015098)
31
- expect(hidden_node.dests.first.derivative).to be_within(0.00001).of(0.10851)
50
+ describe 'forward' do
51
+
52
+ it 'forwards data correctly' do
53
+
54
+ expect(input_neuron_one.forward_output).to eq(1)
55
+ expect(input_neuron_two.forward_output).to eq(0)
56
+
57
+ expect(input_neuron_one_edge_one.forward_input).to eq(1)
58
+ expect(input_neuron_one_edge_two.forward_input).to eq(1)
59
+ expect(input_neuron_two_edge_one.forward_input).to eq(0)
60
+ expect(input_neuron_two_edge_two.forward_input).to eq(0)
61
+
62
+ expect(input_neuron_one_edge_one.forward_output).to eq(0.66)
63
+ expect(input_neuron_one_edge_two.forward_output).to eq(0.66)
64
+ expect(input_neuron_two_edge_one.forward_output).to eq(0)
65
+ expect(input_neuron_two_edge_two.forward_output).to eq(0)
66
+
67
+ expect(hidden_neuron_one.forward_input).to be_within(0.001).of(1.3200)
68
+ expect(hidden_neuron_two.forward_input).to be_within(0.001).of(1.3200)
69
+
70
+ expect(hidden_neuron_one.forward_output).to be_within(0.001).of(0.7892)
71
+ expect(hidden_neuron_two.forward_output).to be_within(0.001).of(0.7892)
72
+
73
+ expect(hidden_neuron_one_edge_one.forward_input).to be_within(0.001).of(0.7892)
74
+ expect(hidden_neuron_two_edge_one.forward_input).to be_within(0.001).of(0.7892)
75
+
76
+ expect(hidden_neuron_one_edge_one.forward_output).to be_within(0.0001).of(0.5208)
77
+ expect(hidden_neuron_two_edge_one.forward_output).to be_within(0.0001).of(0.5208)
78
+
79
+ expect(output_neuron.forward_input).to be_within(0.001).of(1.7017)
80
+ expect(output_neuron.forward_output).to be_within(0.001).of(0.8458)
81
+ expect(output_neuron.forward_error_output).to be_within(0.001).of(0.2085)
82
+
83
+ end
84
+
85
+ it 'backwards data correctly' do
86
+
87
+ expect(output_neuron.backward_error_output).to be_within(0.001).of(0.6458)
88
+ expect(output_neuron.backward_input).to be_within(0.001).of(0.6458)
89
+ expect(output_neuron.backward_output).to be_within(0.001).of(0.0842)
90
+
91
+ expect(output_bias_edge_one.backward_input).to be_within(0.001).of(0.0842)
92
+ expect(output_bias_edge_one.derivative).to be_within(0.001).of(0.0842)
93
+
94
+ expect(hidden_neuron_one_edge_one.backward_input).to be_within(0.001).of(0.0842)
95
+ expect(hidden_neuron_two_edge_one.backward_input).to be_within(0.001).of(0.0842)
96
+
97
+ expect(hidden_neuron_one_edge_one.backward_output).to be_within(0.001).of(0.0556)
98
+ expect(hidden_neuron_two_edge_one.backward_output).to be_within(0.001).of(0.0556)
99
+
100
+ expect(hidden_neuron_one_edge_one.derivative).to be_within(0.001).of(0.0664)
101
+ expect(hidden_neuron_two_edge_one.derivative).to be_within(0.001).of(0.0664)
102
+
103
+ expect(hidden_neuron_one.backward_input).to be_within(0.0001).of(0.0556)
104
+ expect(hidden_neuron_two.backward_input).to be_within(0.0001).of(0.0556)
105
+
106
+ expect(hidden_neuron_one.backward_output).to be_within(0.001).of(0.0092)
107
+ expect(hidden_neuron_two.backward_output).to be_within(0.001).of(0.0092)
108
+
109
+ expect(hidden_bias_edge_one.backward_input).to be_within(0.001).of(0.0092)
110
+ expect(hidden_bias_edge_one.derivative).to be_within(0.001).of(0.0092)
111
+ expect(hidden_bias_edge_two.backward_input).to be_within(0.001).of(0.0092)
112
+ expect(hidden_bias_edge_two.derivative).to be_within(0.001).of(0.0092)
113
+
114
+ expect(input_neuron_one_edge_one.backward_input).to be_within(0.001).of(0.0092)
115
+ expect(input_neuron_one_edge_two.backward_input).to be_within(0.001).of(0.0092)
116
+ expect(input_neuron_two_edge_one.backward_input).to be_within(0.001).of(0.0092)
117
+ expect(input_neuron_two_edge_two.backward_input).to be_within(0.001).of(0.0092)
118
+
119
+ expect(input_neuron_one_edge_one.backward_output).to be_within(0.001).of(0.0061)
120
+ expect(input_neuron_one_edge_two.backward_output).to be_within(0.0001).of(0.0061)
121
+ expect(input_neuron_two_edge_one.backward_output).to be_within(0.0001).of(0.0061)
122
+ expect(input_neuron_two_edge_two.backward_output).to be_within(0.0001).of(0.0061)
123
+
124
+ expect(input_neuron_one_edge_one.derivative).to be_within(0.001).of(0.0093)
125
+ expect(input_neuron_one_edge_two.derivative).to be_within(0.001).of(0.0093)
126
+ expect(input_neuron_two_edge_one.derivative).to be_within(0.01).of(0.0)
127
+ expect(input_neuron_two_edge_two.derivative).to be_within(0.01).of(0.0)
128
+
129
+ end
130
+
32
131
  end
33
132
 
34
133
  end
35
134
 
36
-
37
135
  describe :xor do
38
136
 
39
- subject { Noggin::Network.new( max_training_laps: 2000, learning_rate: 0.3, hidden_layer_size: 1, hidden_layer_node_size: 2, momentum: 5, log: true ) }
40
-
137
+ subject { Noggin::Network.new training_laps: 20000}
41
138
 
42
139
  before do
43
140
  subject.train([
44
- { input: [0, 0], output: 0 },
45
- { input: [0, 1], output: 1 },
46
- { input: [1, 0], output: 1 },
47
- { input: [1, 1], output: 0 }
141
+ { input: [0, 0], expected: 0 },
142
+ { input: [0, 1], expected: 1 },
143
+ { input: [1, 0], expected: 1 },
144
+ { input: [1, 1], expected: 0 }
48
145
  ])
49
146
  end
50
147
 
51
- it 'learns xor' do
52
-
148
+ it 'learns' do
149
+ expect(subject.run([0,1])).to be_within(0.05).of(1.0)
150
+ expect(subject.run([1,1])).to be_within(0.05).of(0.0)
151
+ expect(subject.run([0,0])).to be_within(0.05).of(0.0)
152
+ expect(subject.run([1,0])).to be_within(0.05).of(1.0)
53
153
  end
54
154
 
55
155
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: the_noggin
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.4
4
+ version: 0.1.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Shawn
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2015-02-23 00:00:00.000000000 Z
11
+ date: 2015-03-12 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -66,12 +66,11 @@ files:
66
66
  - README.md
67
67
  - Rakefile
68
68
  - lib/noggin.rb
69
+ - lib/noggin/bias.rb
70
+ - lib/noggin/edge.rb
71
+ - lib/noggin/layer.rb
69
72
  - lib/noggin/network.rb
70
- - lib/noggin/node/base.rb
71
- - lib/noggin/node/edge.rb
72
- - lib/noggin/node/input.rb
73
- - lib/noggin/node/output.rb
74
- - lib/noggin/pretty_printer.rb
73
+ - lib/noggin/neuron.rb
75
74
  - lib/noggin/version.rb
76
75
  - noggin.gemspec
77
76
  - spec/network_spec.rb
@@ -1,44 +0,0 @@
1
- module Noggin
2
- module Node
3
- class Base
4
-
5
- attr_reader :origins
6
- attr_reader :dests
7
- attr_accessor :derivative
8
- attr_accessor :cached_input
9
- attr_accessor :cached_output
10
-
11
- def initialize
12
- @origins = []
13
- @dests = []
14
- end
15
-
16
- def input
17
- origins.inject(0) { |sum, edge | sum += edge.value }
18
- end
19
-
20
- def output
21
- 1 / ( 1 + Math.exp(-1 * input) )
22
- end
23
-
24
- def output_derivative
25
- output * (1 - output)
26
- end
27
-
28
- def derivative_chain
29
- derivative = output_derivative * dests.inject(0) { |sum, edge| sum += edge.derivative_chain }
30
- end
31
-
32
- def pretty_print
33
- out = []
34
- out << " ------"
35
- dests.each do |edge|
36
- out << "| | -EDGE--(#{edge.pretty_print}) "
37
- end
38
- out << " ------"
39
- out
40
- end
41
-
42
- end
43
- end
44
- end
@@ -1,40 +0,0 @@
1
- module Noggin
2
- module Node
3
- class Edge
4
-
5
- attr_accessor :origin
6
- attr_accessor :dest
7
- attr_accessor :weight
8
- attr_accessor :previous_weight
9
- attr_accessor :derivative
10
- attr_accessor :momentum
11
-
12
- def initialize origin: origin, dest: dest, weight: rand(0.20...0.80), momentum: 0.1
13
- @origin = origin
14
- @dest = dest
15
- @weight = weight
16
- @momentum = momentum
17
- @previous_weight = 0
18
- end
19
-
20
- def input
21
- origin.output
22
- end
23
-
24
- def value
25
- origin.output * weight
26
- end
27
-
28
- def derivative_chain
29
- derivative_chain = dest.derivative_chain
30
- @derivative = input * derivative_chain
31
- weight * derivative_chain
32
- end
33
-
34
- def pretty_print
35
- "w: #{weight.round(6)}, d: #{derivative.round(6)}"
36
- end
37
-
38
- end
39
- end
40
- end
@@ -1,7 +0,0 @@
1
- module Noggin
2
- module Node
3
- class Input < Noggin::Node::Base
4
- attr_accessor :output
5
- end
6
- end
7
- end
@@ -1,31 +0,0 @@
1
- module Noggin
2
- module Node
3
- class Output < Noggin::Node::Base
4
-
5
- attr_accessor :expected
6
-
7
- def error
8
- 0.5 * (expected - output)**2
9
- end
10
-
11
- def error_derivative
12
- output - expected
13
- end
14
-
15
- def derivative_chain
16
- output_derivative * error_derivative
17
- end
18
-
19
- def pretty_print
20
- out = []
21
- out << " --------------"
22
- out << "| ed: #{error_derivative.round(6)}"
23
- out << "| d: #{derivative_chain.round(6)}"
24
- out << "| e: #{error.round(6)}"
25
- out << "| o: #{output.round(6)}"
26
- out << " --------------"
27
- out
28
- end
29
- end
30
- end
31
- end
@@ -1,27 +0,0 @@
1
- module Noggin
2
- class PrettyPrinter
3
- def self.print_network layers
4
- print "\n"
5
- grid = []
6
- layers.each do |layer|
7
- grid << col = []
8
- layer.each do |node|
9
- col << node.pretty_print
10
- end
11
- col.flatten!
12
- end
13
- grid[0].zip(*grid[1..-1]).each do |row|
14
- row.each_with_index do |cell, col_i|
15
- max_length = grid[col_i].max_by{|s| s.size }.size
16
- if cell
17
- room = max_length - cell.length
18
- print cell
19
- print " " * room
20
- print " "
21
- end
22
- end
23
- print "\n"
24
- end
25
- end
26
- end
27
- end