reddavis-db_mlp 0.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/.autotest ADDED
@@ -0,0 +1 @@
1
+ require 'redgreen/autotest'
data/LICENSE ADDED
@@ -0,0 +1,20 @@
1
+ Copyright (c) 2009 reddavis
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining
4
+ a copy of this software and associated documentation files (the
5
+ "Software"), to deal in the Software without restriction, including
6
+ without limitation the rights to use, copy, modify, merge, publish,
7
+ distribute, sublicense, and/or sell copies of the Software, and to
8
+ permit persons to whom the Software is furnished to do so, subject to
9
+ the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be
12
+ included in all copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.rdoc ADDED
@@ -0,0 +1,41 @@
1
+ = Multi-Layer Perceptron Neural Network
2
+
3
+ This is a sqlite backed version of my previous MLP.
4
+
5
+ This version also provides training validation to prevent the MLP from overfitting.
6
+
7
+ This is first release and because of that it's a bit slow, I'll probably try out using Memcached or something else as its data store.
8
+
9
+ == Install
10
+
11
+ gem sources -a http://gems.github.com
12
+ sudo gem install reddavis-db_mlp
13
+
14
+ == How To Use
15
+ require 'rubygems'
16
+ require 'db_mlp'
17
+
18
+ a = DBMLP.new(path_to_db, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
19
+
20
+ training = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
21
+ testing = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
22
+ validation = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
23
+
24
+ a.train(training, testing, validation, number_of_training_iterations)
25
+
26
+ puts "Test data"
27
+ puts "[0,0] = > #{a.feed_forward([0,0]).inspect}"
28
+ puts "[0,1] = > #{a.feed_forward([0,1]).inspect}"
29
+ puts "[1,0] = > #{a.feed_forward([1,0]).inspect}"
30
+ puts "[1,1] = > #{a.feed_forward([1,1]).inspect}"
31
+
32
+ == Benchmarks
33
+
34
+ The above example produces these times
35
+
36
+        user     system      total        real
37
+ DBMLP 9.100000 0.040000 9.140000 ( 9.170872)
38
+
39
+ == Copyright
40
+
41
+ Copyright (c) 2009 Red Davis. See LICENSE for details.
data/Rakefile ADDED
@@ -0,0 +1,57 @@
1
+ require 'rubygems'
2
+ require 'rake'
3
+
4
+ begin
5
+ require 'jeweler'
6
+ Jeweler::Tasks.new do |gem|
7
+ gem.name = "db_mlp"
8
+ gem.summary = %Q{Database backed Multi-Layer Perceptron Neural Network in Ruby}
9
+ gem.description = %Q{Database backed Multi-Layer Perceptron Neural Network in Ruby}
10
+ gem.email = "reddavis@gmail.com"
11
+ gem.homepage = "http://github.com/reddavis/dbmlp"
12
+ gem.authors = ["reddavis"]
13
+ # gem is a Gem::Specification... see http://www.rubygems.org/read/chapter/20 for additional settings
14
+ end
15
+
16
+ rescue LoadError
17
+ puts "Jeweler (or a dependency) not available. Install it with: sudo gem install jeweler"
18
+ end
19
+
20
+ require 'rake/testtask'
21
+ Rake::TestTask.new(:test) do |test|
22
+ test.libs << 'lib' << 'test'
23
+ test.pattern = 'test/**/*_test.rb'
24
+ test.verbose = true
25
+ end
26
+
27
+ begin
28
+ require 'rcov/rcovtask'
29
+ Rcov::RcovTask.new do |test|
30
+ test.libs << 'test'
31
+ test.pattern = 'test/**/*_test.rb'
32
+ test.verbose = true
33
+ end
34
+ rescue LoadError
35
+ task :rcov do
36
+ abort "RCov is not available. In order to run rcov, you must: sudo gem install spicycode-rcov"
37
+ end
38
+ end
39
+
40
+
41
+
42
+
43
+ task :default => :test
44
+
45
+ require 'rake/rdoctask'
46
+ Rake::RDocTask.new do |rdoc|
47
+ if File.exist?('VERSION')
48
+ version = File.read('VERSION')
49
+ else
50
+ version = ""
51
+ end
52
+
53
+ rdoc.rdoc_dir = 'rdoc'
54
+ rdoc.title = "mlp #{version}"
55
+ rdoc.rdoc_files.include('README*')
56
+ rdoc.rdoc_files.include('lib/**/*.rb')
57
+ end
data/VERSION ADDED
@@ -0,0 +1 @@
1
+ 0.0.0
Binary file
@@ -0,0 +1,22 @@
1
+ require 'rubygems'
2
+ require 'benchmarker'
3
+ require 'benchmark'
4
+ require File.dirname(__FILE__) + '/../lib/db_mlp'
5
+
6
+ Benchmarker.go('lib') do
7
+
8
+ db = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/data.rdb"
9
+
10
+
11
+ training = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
12
+ testing = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
13
+ validation = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
14
+
15
+ Benchmark.bm do |x|
16
+ x.report do
17
+ a = DBMLP.new(db, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
18
+ a.train(training, testing, validation, 1000)
19
+ end
20
+ end
21
+
22
+ end
data/db_mlp.gemspec ADDED
@@ -0,0 +1,65 @@
1
+ # Generated by jeweler
2
+ # DO NOT EDIT THIS FILE
3
+ # Instead, edit Jeweler::Tasks in Rakefile, and run `rake gemspec`
4
+ # -*- encoding: utf-8 -*-
5
+
6
+ Gem::Specification.new do |s|
7
+ s.name = %q{db_mlp}
8
+ s.version = "0.0.0"
9
+
10
+ s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
11
+ s.authors = ["reddavis"]
12
+ s.date = %q{2009-09-05}
13
+ s.description = %q{Database backed Multi-Layer Perceptron Neural Network in Ruby}
14
+ s.email = %q{reddavis@gmail.com}
15
+ s.extra_rdoc_files = [
16
+ "LICENSE",
17
+ "README.rdoc"
18
+ ]
19
+ s.files = [
20
+ ".autotest",
21
+ "LICENSE",
22
+ "README.rdoc",
23
+ "Rakefile",
24
+ "VERSION",
25
+ "benchmarks/data.rdb",
26
+ "benchmarks/mlp_benchmark.rb",
27
+ "db_mlp.gemspec",
28
+ "examples/backpropagation_example.rb",
29
+ "examples/data.rdb",
30
+ "examples/patterns_with_base_noise.rb",
31
+ "examples/patterns_with_noise.rb",
32
+ "examples/training_patterns.rb",
33
+ "examples/xor.rb",
34
+ "lib/db_mlp.rb",
35
+ "lib/models/neuron.rb",
36
+ "profiling/profile.rb",
37
+ "test/db/data.rdb",
38
+ "test/helper.rb",
39
+ "test/test_db_mlp.rb"
40
+ ]
41
+ s.homepage = %q{http://github.com/reddavis/dbmlp}
42
+ s.rdoc_options = ["--charset=UTF-8"]
43
+ s.require_paths = ["lib"]
44
+ s.rubygems_version = %q{1.3.5}
45
+ s.summary = %q{Database backed Multi-Layer Perceptron Neural Network in Ruby}
46
+ s.test_files = [
47
+ "test/helper.rb",
48
+ "test/test_db_mlp.rb",
49
+ "examples/backpropagation_example.rb",
50
+ "examples/patterns_with_base_noise.rb",
51
+ "examples/patterns_with_noise.rb",
52
+ "examples/training_patterns.rb",
53
+ "examples/xor.rb"
54
+ ]
55
+
56
+ if s.respond_to? :specification_version then
57
+ current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION
58
+ s.specification_version = 3
59
+
60
+ if Gem::Version.new(Gem::RubyGemsVersion) >= Gem::Version.new('1.2.0') then
61
+ else
62
+ end
63
+ else
64
+ end
65
+ end
@@ -0,0 +1,73 @@
1
+ # This test was taken from ai4r gem
2
+
3
+ # Author:: Sergio Fierens
4
+ # License:: MPL 1.1
5
+ # Project:: ai4r
6
+ # Url:: http://ai4r.rubyforge.org/
7
+ #
8
+ # You can redistribute it and/or modify it under the terms of
9
+ # the Mozilla Public License version 1.1 as published by the
10
+ # Mozilla Foundation at http://www.mozilla.org/MPL/MPL-1.1.txt
11
+
12
+ require File.dirname(__FILE__) + '/training_patterns'
13
+ require File.dirname(__FILE__) + '/patterns_with_noise'
14
+ require File.dirname(__FILE__) + '/patterns_with_base_noise'
15
+ require File.dirname(__FILE__) + '/../lib/mlp'
16
+ require 'benchmark'
17
+
18
+ times = Benchmark.measure do
19
+
20
+ srand 1
21
+
22
+ net = MLP.new(:hidden_layers => [2], :output_nodes => 3, :inputs => 256)
23
+
24
+ tr_with_noise = TRIANGLE_WITH_NOISE.flatten.collect { |input| input.to_f / 5.0}
25
+ sq_with_noise = SQUARE_WITH_NOISE.flatten.collect { |input| input.to_f / 5.0}
26
+ cr_with_noise = CROSS_WITH_NOISE.flatten.collect { |input| input.to_f / 5.0}
27
+
28
+ tr_with_base_noise = TRIANGLE_WITH_BASE_NOISE.flatten.collect { |input| input.to_f / 5.0}
29
+ sq_with_base_noise = SQUARE_WITH_BASE_NOISE.flatten.collect { |input| input.to_f / 5.0}
30
+ cr_with_base_noise = CROSS_WITH_BASE_NOISE.flatten.collect { |input| input.to_f / 5.0}
31
+
32
+ puts "Training the network, please wait."
33
+ 101.times do |i|
34
+ tr_input = TRIANGLE.flatten.collect { |input| input.to_f / 5.0}
35
+ sq_input = SQUARE.flatten.collect { |input| input.to_f / 5.0}
36
+ cr_input = CROSS.flatten.collect { |input| input.to_f / 5.0}
37
+
38
+ error1 = net.train(tr_input, [1,0,0])
39
+ error2 = net.train(sq_input, [0,1,0])
40
+ error3 = net.train(cr_input, [0,0,1])
41
+ puts "Error after iteration #{i}:\t#{error1} - #{error2} - #{error3}" if i%20 == 0
42
+ end
43
+
44
+ def result_label(result)
45
+ if result[0] > result[1] && result[0] > result[2]
46
+ "TRIANGLE"
47
+ elsif result[1] > result[2]
48
+ "SQUARE"
49
+ else
50
+ "CROSS"
51
+ end
52
+ end
53
+
54
+ tr_input = TRIANGLE.flatten.collect { |input| input.to_f / 5.0}
55
+ sq_input = SQUARE.flatten.collect { |input| input.to_f / 5.0}
56
+ cr_input = CROSS.flatten.collect { |input| input.to_f / 5.0}
57
+
58
+ puts "Training Examples"
59
+ puts "#{net.feed_forward(tr_input).inspect} => #{result_label(net.feed_forward(tr_input))}"
60
+ puts "#{net.feed_forward(sq_input).inspect} => #{result_label(net.feed_forward(sq_input))}"
61
+ puts "#{net.feed_forward(cr_input).inspect} => #{result_label(net.feed_forward(cr_input))}"
62
+ puts "Examples with noise"
63
+ puts "#{net.feed_forward(tr_with_noise).inspect} => #{result_label(net.feed_forward(tr_with_noise))}"
64
+ puts "#{net.feed_forward(sq_with_noise).inspect} => #{result_label(net.feed_forward(sq_with_noise))}"
65
+ puts "#{net.feed_forward(cr_with_noise).inspect} => #{result_label(net.feed_forward(cr_with_noise))}"
66
+ puts "Examples with base noise"
67
+ puts "#{net.feed_forward(tr_with_base_noise).inspect} => #{result_label(net.feed_forward(tr_with_base_noise))}"
68
+ puts "#{net.feed_forward(sq_with_base_noise).inspect} => #{result_label(net.feed_forward(sq_with_base_noise))}"
69
+ puts "#{net.feed_forward(cr_with_base_noise).inspect} => #{result_label(net.feed_forward(cr_with_base_noise))}"
70
+
71
+ end
72
+
73
+ puts "Elapsed time: #{times}"
data/examples/data.rdb ADDED
Binary file
@@ -0,0 +1,68 @@
1
+ # Author:: Sergio Fierens
2
+ # License:: MPL 1.1
3
+ # Project:: ai4r
4
+ # Url:: http://ai4r.rubyforge.org/
5
+ #
6
+ # You can redistribute it and/or modify it under the terms of
7
+ # the Mozilla Public License version 1.1 as published by the
8
+ # Mozilla Foundation at http://www.mozilla.org/MPL/MPL-1.1.txt
9
+
10
+
11
+ TRIANGLE_WITH_BASE_NOISE = [
12
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
13
+ [ 3, 3, 3, 3, 3, 3, 4, 10, 10, 4, 3, 3, 3, 3, 3, 3],
14
+ [ 3, 3, 3, 3, 3, 3, 8, 8, 8, 8, 3, 3, 3, 3, 3, 3],
15
+ [ 3, 3, 3, 3, 3, 4, 10, 4, 4, 10, 4, 3, 3, 3, 3, 3],
16
+ [ 3, 3, 3, 3, 3, 8, 8, 3, 3, 8, 8, 3, 3, 3, 3, 3],
17
+ [ 3, 3, 3, 3, 4, 10, 4, 3, 3, 4, 10, 4, 3, 3, 3, 3],
18
+ [ 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3],
19
+ [ 3, 3, 3, 4, 10, 4, 3, 3, 3, 3, 4, 10, 4, 3, 3, 3],
20
+ [ 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3],
21
+ [ 3, 3, 4, 10, 4, 3, 3, 3, 3, 3, 3, 4, 10, 4, 3, 3],
22
+ [ 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3],
23
+ [ 3, 4, 10, 4, 3, 3, 3, 3, 3, 3, 3, 3, 4, 10, 4, 3],
24
+ [ 3, 8, 8, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 8, 8, 3],
25
+ [ 4, 10, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 10, 4],
26
+ [ 8, 8, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 8, 8],
27
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
28
+ ]
29
+
30
+ SQUARE_WITH_BASE_NOISE = [
31
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10],
32
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
33
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
34
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
35
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
36
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
37
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
38
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
39
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
40
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
41
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
42
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
43
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
44
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
45
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
46
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
47
+
48
+ ]
49
+
50
+ CROSS_WITH_BASE_NOISE = [
51
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
52
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
53
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
54
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
55
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
56
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
57
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
58
+ [ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8],
59
+ [ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8],
60
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
61
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
62
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
63
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
64
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
65
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
66
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3]
67
+ ]
68
+
@@ -0,0 +1,66 @@
1
+ # Author:: Sergio Fierens
2
+ # License:: MPL 1.1
3
+ # Project:: ai4r
4
+ # Url:: http://ai4r.rubyforge.org/
5
+ #
6
+ # You can redistribute it and/or modify it under the terms of
7
+ # the Mozilla Public License version 1.1 as published by the
8
+ # Mozilla Foundation at http://www.mozilla.org/MPL/MPL-1.1.txt
9
+
10
+ TRIANGLE_WITH_NOISE = [
11
+ [ 1, 0, 0, 0, 0, 0, 0, 1, 5, 0, 0, 1, 0, 0, 0, 0],
12
+ [ 0, 0, 0, 0, 3, 0, 1, 9, 9, 1, 0, 0, 0, 0, 3, 0],
13
+ [ 0, 3, 0, 0, 0, 0, 5, 1, 5, 3, 0, 0, 0, 0, 0, 7],
14
+ [ 0, 0, 0, 7, 0, 1, 9, 1, 1, 9, 1, 0, 0, 0, 3, 0],
15
+ [ 0, 0, 0, 0, 0, 3, 5, 0, 3, 5, 5, 0, 0, 0, 0, 0],
16
+ [ 0, 1, 0, 0, 1, 9, 1, 0, 1, 1, 9, 1, 0, 0, 0, 0],
17
+ [ 1, 0, 0, 0, 5, 5, 0, 0, 0, 0, 5, 5, 7, 0, 0, 3],
18
+ [ 0, 0, 3, 3, 9, 1, 0, 0, 1, 0, 1, 9, 1, 0, 0, 0],
19
+ [ 0, 0, 0, 5, 5, 0, 3, 7, 0, 0, 0, 5, 5, 0, 0, 0],
20
+ [ 0, 0, 1, 9, 1, 0, 0, 0, 0, 0, 0, 1, 9, 1, 0, 0],
21
+ [ 0, 0, 5, 5, 0, 0, 0, 0, 3, 0, 0, 0, 5, 5, 0, 0],
22
+ [ 0, 1, 9, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 9, 1, 0],
23
+ [ 0, 5, 5, 0, 3, 0, 0, 3, 0, 0, 0, 0, 0, 5, 5, 0],
24
+ [ 1, 9, 1, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0, 1, 9, 1],
25
+ [ 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5],
26
+ [10, 10, 10, 10, 1, 10, 10, 10, 10, 10, 1, 10, 10, 10, 10, 10]
27
+ ]
28
+
29
+ SQUARE_WITH_NOISE = [
30
+ [10, 3, 10, 10, 10, 6, 10, 10, 10, 10, 10, 4, 10, 10, 10, 10],
31
+ [10, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
32
+ [10, 0, 3, 0, 0, 0, 0, 7, 0, 6, 1, 0, 0, 0, 0, 0],
33
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
34
+ [10, 0, 4, 0, 4, 0, 0, 0, 1, 0, 3, 0, 0, 4, 0, 10],
35
+ [10, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
36
+ [10, 0, 0, 0, 3, 6, 0, 0, 1, 0, 0, 0, 0, 0, 0, 10],
37
+ [10, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 7, 0, 0, 10],
38
+ [10, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
39
+ [10, 0, 7, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
40
+ [10, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 7, 10],
41
+ [10, 0, 3, 0, 4, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 10],
42
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 10],
43
+ [10, 0, 0, 6, 0, 0, 0, 7, 0, 0, 0, 7, 0, 0, 0, 10],
44
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
45
+ [10, 10, 10, 10, 3, 10, 10, 10, 10, 0, 10, 10, 1, 10, 1, 10]
46
+
47
+ ]
48
+
49
+ CROSS_WITH_NOISE = [
50
+ [ 0, 0, 0, 0, 0, 0, 3, 3, 5, 0, 3, 0, 0, 0, 1, 0],
51
+ [ 0, 1, 0, 0, 0, 1, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
52
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 3, 0, 0, 0],
53
+ [ 0, 0, 1, 8, 0, 0, 0, 5, 5, 0, 4, 0, 0, 0, 1, 0],
54
+ [ 0, 0, 0, 0, 0, 3, 0, 5, 0, 0, 0, 0, 1, 0, 0, 0],
55
+ [ 0, 0, 0, 8, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 1],
56
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 3, 0, 0, 0, 0, 0],
57
+ [ 5, 5, 5, 8, 5, 3, 5, 5, 5, 5, 5, 5, 5, 5, 0, 5],
58
+ [ 5, 5, 5, 5, 5, 5, 5, 5, 1, 5, 5, 5, 5, 1, 0, 0],
59
+ [ 0, 0, 0, 8, 0, 0, 0, 4, 5, 0, 0, 0, 0, 0, 0, 0],
60
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 4, 0, 0, 0, 0, 0, 0],
61
+ [ 0, 0, 0, 0, 0, 4, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
62
+ [ 4, 0, 0, 4, 0, 0, 0, 5, 5, 0, 0, 0, 1, 0, 0, 0],
63
+ [ 0, 0, 0, 0, 0, 1, 0, 5, 4, 4, 3, 0, 0, 0, 0, 0],
64
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 10, 0, 0, 0],
65
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0]
66
+ ]
@@ -0,0 +1,68 @@
1
+ # Author:: Sergio Fierens
2
+ # License:: MPL 1.1
3
+ # Project:: ai4r
4
+ # Url:: http://ai4r.rubyforge.org/
5
+ #
6
+ # You can redistribute it and/or modify it under the terms of
7
+ # the Mozilla Public License version 1.1 as published by the
8
+ # Mozilla Foundation at http://www.mozilla.org/MPL/MPL-1.1.txt
9
+
10
+
11
+ TRIANGLE = [
12
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
13
+ [ 0, 0, 0, 0, 0, 0, 1, 9, 9, 1, 0, 0, 0, 0, 0, 0],
14
+ [ 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0],
15
+ [ 0, 0, 0, 0, 0, 1, 9, 1, 1, 9, 1, 0, 0, 0, 0, 0],
16
+ [ 0, 0, 0, 0, 0, 5, 5, 0, 0, 5, 5, 0, 0, 0, 0, 0],
17
+ [ 0, 0, 0, 0, 1, 9, 1, 0, 0, 1, 9, 1, 0, 0, 0, 0],
18
+ [ 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0],
19
+ [ 0, 0, 0, 1, 9, 1, 0, 0, 0, 0, 1, 9, 1, 0, 0, 0],
20
+ [ 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0],
21
+ [ 0, 0, 1, 9, 1, 0, 0, 0, 0, 0, 0, 1, 9, 1, 0, 0],
22
+ [ 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0],
23
+ [ 0, 1, 9, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 9, 1, 0],
24
+ [ 0, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 0],
25
+ [ 1, 9, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 9, 1],
26
+ [ 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5],
27
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
28
+ ]
29
+
30
+ SQUARE = [
31
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10],
32
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
33
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
34
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
35
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
36
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
37
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
38
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
39
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
40
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
41
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
42
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
43
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
44
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
45
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
46
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
47
+
48
+ ]
49
+
50
+ CROSS = [
51
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
52
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
53
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
54
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
55
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
56
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
57
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
58
+ [ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
59
+ [ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
60
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
61
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
62
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
63
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
64
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
65
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
66
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0]
67
+ ]
68
+
data/examples/xor.rb ADDED
@@ -0,0 +1,25 @@
1
+ require File.dirname(__FILE__) + '/../lib/db_mlp'
2
+ require 'benchmark'
3
+
4
+ db = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/data.rdb"
5
+ a = DBMLP.new(db, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
6
+
7
+ times = Benchmark.measure do
8
+
9
+ srand 1
10
+
11
+ training = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
12
+ testing = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
13
+ validation = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
14
+
15
+ a.train(training, testing, validation, 3001)
16
+
17
+ puts "Test data"
18
+ puts "[0,0] = > #{a.feed_forward([0,0]).inspect}"
19
+ puts "[0,1] = > #{a.feed_forward([0,1]).inspect}"
20
+ puts "[1,0] = > #{a.feed_forward([1,0]).inspect}"
21
+ puts "[1,1] = > #{a.feed_forward([1,1]).inspect}"
22
+
23
+ end
24
+
25
+ puts "Elapsed time: #{times}"
data/lib/db_mlp.rb ADDED
@@ -0,0 +1,214 @@
1
+ require 'rubygems'
2
+ require 'datamapper'
3
+ require File.dirname(__FILE__) + '/models/neuron'
4
+
5
+ class DBMLP
6
+
7
+ def initialize(db_path, options={})
8
+ @input_size = options[:inputs]
9
+ @hidden_layers = options[:hidden_layers]
10
+ @number_of_output_nodes = options[:output_nodes]
11
+ connect_to_db(db_path)
12
+ setup_network
13
+ end
14
+
15
+ def feed_forward(input)
16
+ @network.each_with_index do |layer, layer_index|
17
+ layer.each do |neuron|
18
+ if layer_index == 0
19
+ neuron.fire(input)
20
+ else
21
+ input = @network[layer_index-1].map {|x| x.last_output}
22
+ neuron.fire(input)
23
+ end
24
+ end
25
+ end
26
+ @network.last.map {|x| x.last_output}
27
+ end
28
+
29
+ def train(training, testing, validations, n=3000)
30
+ train_and_cross_validate(training, validations, n)
31
+ end
32
+
33
+ def inspect
34
+ @network
35
+ end
36
+
37
+ private
38
+
39
+ def train_and_cross_validate(training, validations, n)
40
+ errors = []
41
+ 1.upto(n) do |i|
42
+ if i % 200 == 0
43
+ if validate(validations)
44
+ puts "Stopping at #{i}"
45
+ break
46
+ end
47
+ end
48
+ training = training.sort_by { rand } #shaken or stirred?
49
+ training.each do |t|
50
+ input, target = t[0], t[1]
51
+ training_process(input, target)
52
+ #calculate_error(target)
53
+ end
54
+ end
55
+ save_all_neurons
56
+ end
57
+
58
+ def validate(validations)
59
+ @validations ||= []
60
+ sum = 0
61
+ validations.each do |v|
62
+ input, target = v[0], v[1]
63
+ feed_forward(input)
64
+ sum += calculate_error(target)
65
+ end
66
+ @validations << sum
67
+ return false if @validations.size < 2
68
+ #puts "#{@validations[-1]} #{@validations[-2]}"
69
+ @validations[-1] > @validations[-2] ? true : false
70
+ end
71
+
72
+ def training_process(input, targets)
73
+ # To go back we must go forward
74
+ feed_forward(input)
75
+ compute_deltas(targets)
76
+ update_weights(input)
77
+ end
78
+
79
+ def save_all_neurons
80
+ @network.each do |layer|
81
+ layer.each {|n| n.save!}
82
+ end
83
+ end
84
+
85
+ def update_weights(input)
86
+ reversed_network = @network.reverse
87
+ reversed_network.each_with_index do |layer, layer_index|
88
+ if layer_index == 0
89
+ update_output_weights(layer, layer_index, input)
90
+ else
91
+ update_hidden_weights(layer, layer_index, input)
92
+ end
93
+ end
94
+ end
95
+
96
+ def update_output_weights(layer, layer_index, input)
97
+ inputs = @hidden_layers.empty? ? input : @network[-2].map {|x| x.last_output}
98
+ layer.each do |neuron|
99
+ neuron.update_weight(inputs, 0.25)
100
+ end
101
+ end
102
+
103
+ def update_hidden_weights(layer, layer_index, original_input)
104
+ if layer_index == (@network.size - 1)
105
+ inputs = original_input.clone
106
+ else
107
+ inputs = @network.reverse[layer_index+1].map {|x| x.last_output}
108
+ end
109
+ layer.each do |neuron|
110
+ neuron.update_weight(inputs, 0.25)
111
+ end
112
+ end
113
+
114
+ def compute_deltas(targets)
115
+ reversed_network = @network.reverse
116
+ reversed_network.each_with_index do |layer, layer_index|
117
+ if layer_index == 0
118
+ compute_output_deltas(layer, targets)
119
+ else
120
+ compute_hidden_deltas(layer, targets)
121
+ end
122
+ end
123
+ end
124
+
125
+ def compute_output_deltas(layer, targets)
126
+ layer.each_with_index do |neuron, i|
127
+ output = neuron.last_output
128
+ neuron.delta = output * (1 - output) * (targets[i] - output)
129
+ end
130
+ end
131
+
132
+ def compute_hidden_deltas(layer, targets)
133
+ layer.each_with_index do |neuron, neuron_index|
134
+ error = 0
135
+ @network.last.each do |output_neuron|
136
+ error += output_neuron.delta * output_neuron.weights[neuron_index]
137
+ end
138
+ output = neuron.last_output
139
+ neuron.delta = output * (1 - output) * error
140
+ end
141
+ end
142
+
143
+ def calculate_error(targets)
144
+ outputs = @network.last.map {|x| x.last_output}
145
+ sum = 0
146
+ targets.each_with_index do |t, index|
147
+ sum += (t - outputs[index]) ** 2
148
+ end
149
+ 0.5 * sum
150
+ end
151
+
152
+ def setup_network
153
+ @network = []
154
+ if new_mlp?
155
+ wipe_db!
156
+ # Hidden Layers
157
+ @hidden_layers.each_with_index do |number_of_neurons, index|
158
+ layer = []
159
+ inputs = index == 0 ? @input_size : @hidden_layers[index-1].size
160
+ number_of_neurons.times { layer << Neuron.new(inputs, index) }
161
+ @network << layer
162
+ layer.each {|x| x.save!}
163
+ end
164
+ # Output layer
165
+ inputs = @hidden_layers.empty? ? @input_size : @hidden_layers.last
166
+ layer = []
167
+ @number_of_output_nodes.times { layer << Neuron.new(inputs, -1)}
168
+ @network << layer
169
+ layer.each {|x| x.save!}
170
+ else
171
+ # Problematic area???
172
+ @hidden_layers.each_index do |index|
173
+ layer = Neuron.all(:layer_index => index, :order => [:id.asc])
174
+ @network << layer
175
+ end
176
+ layer = Neuron.all(:layer_index => -1, :order => [:id.asc])
177
+ @network << layer
178
+ end
179
+ end
180
+
181
+ def wipe_db!
182
+ DataMapper.auto_migrate!
183
+ end
184
+
185
+ # Only one mlp per DB, so if this mlp's shape is diff
186
+ # to whats in the db then we empty and create a new one
187
+ # if its the same then we carry on as we left off
188
+ def new_mlp?
189
+ new_mlp = false
190
+ # Check hidden_layers
191
+ @hidden_layers.each_index do |i|
192
+ if Neuron.count(:layer_index => i) != @hidden_layers[i]
193
+ new_mlp = true
194
+ end
195
+ end
196
+ # Check output layer
197
+ if Neuron.count(:layer_index => -1) != @number_of_output_nodes
198
+ new_mlp = true
199
+ end
200
+
201
+ if Neuron.count != (@hidden_layers.size + 1)
202
+ new_mlp = true
203
+ end
204
+ new_mlp
205
+ end
206
+
207
+ def connect_to_db(db_path)
208
+ # DataMapper::Logger.new(STDOUT, :debug)
209
+ # DataObjects::Sqlite3.logger = DataObjects::Logger.new(STDOUT, 0)
210
+ DataMapper.setup(:default, db_path)
211
+ DataMapper.auto_upgrade!
212
+ end
213
+
214
+ end
@@ -0,0 +1,62 @@
1
+ class Neuron
2
+ include DataMapper::Resource
3
+ property :id, Integer, :serial => true
4
+ property :layer_index, Integer, :index => true
5
+ property :last_output, Float
6
+ property :db_weights, String
7
+ property :delta, Float
8
+
9
+ def initialize(number_of_inputs, layer_index)
10
+ create_weights(number_of_inputs)
11
+ self.layer_index = layer_index
12
+ end
13
+
14
+ def fire(input)
15
+ self.last_output = activation_function(input)
16
+ end
17
+
18
+ def update_weight(inputs, training_rate)
19
+ inputs << -1 # Add the bias
20
+ new_weights = weights
21
+ weights.each_index do |i|
22
+ new_weights[i] += training_rate * delta * inputs[i]
23
+ end
24
+ self.db_weights = new_weights.join(',')
25
+ end
26
+
27
+ def inspect
28
+ weights
29
+ end
30
+
31
+ def weights
32
+ db_weights.split(',').map {|x| x.to_f}
33
+ end
34
+
35
+ private
36
+
37
+ def activation_function(input)
38
+ sum = 0
39
+ input.each_with_index do |n, index|
40
+ # puts "index:#{index} weight: #{@weights[index]} input: #{n} input_size: #{input.size}"
41
+ sum += weights[index] * n
42
+ end
43
+ sum += weights.last * -1 #bias node
44
+ sigmoid_function(sum)
45
+ end
46
+
47
+ # g(h) = 1 / (1+exp(-B*h(j)))
48
+ def sigmoid_function(x)
49
+ 1 / (1+Math.exp(-1 * (x)))
50
+ end
51
+
52
+ def create_weights(number_of_inputs)
53
+ # Create random weights between 0 & 1
54
+ # Plus another one for the bias node
55
+ weights = []
56
+ (number_of_inputs + 1).times do
57
+ weights << (rand > 0.5 ? -rand : rand)
58
+ end
59
+ self.db_weights = weights.join(',')
60
+ end
61
+
62
+ end
@@ -0,0 +1,18 @@
1
+ require File.dirname(__FILE__) + '/../lib/db_mlp'
2
+ require 'rubygems'
3
+ require 'ruby-prof'
4
+
5
+ db = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/../benchmarks/data.rdb"
6
+
7
+ a = DBMLP.new(db, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
8
+
9
+ @training = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
10
+ @testing = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
11
+ @validation = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
12
+
13
+ result = RubyProf.profile do
14
+ a = a.train(@training, @testing, @validation, 100)
15
+ end
16
+
17
+ printer = RubyProf::FlatPrinter.new(result)
18
+ printer.print(STDOUT, 0)
data/test/db/data.rdb ADDED
Binary file
data/test/helper.rb ADDED
@@ -0,0 +1,10 @@
1
+ require 'rubygems'
2
+ require 'test/unit'
3
+ require 'shoulda'
4
+
5
+ $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
6
+ $LOAD_PATH.unshift(File.dirname(__FILE__))
7
+ require 'db_mlp'
8
+
9
+ class Test::Unit::TestCase
10
+ end
@@ -0,0 +1,149 @@
1
+ require 'helper'
2
+
3
+ class TestDBMLP < Test::Unit::TestCase
4
+ context "DBMLP Instance" do
5
+ setup do
6
+ set_data_variables
7
+ @db_path = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/db/data.rdb"
8
+ end
9
+
10
+ should "contain 4 layers" do
11
+ a = DBMLP.new(@db_path, :hidden_layers => [2, 2, 2], :output_nodes => 2, :inputs => 2)
12
+ assert_equal 4, a.inspect.size
13
+ end
14
+
15
+ should "contain saved 3 layers" do
16
+ DBMLP.new(@db_path, :hidden_layers => [2, 2], :output_nodes => 2, :inputs => 2)
17
+ b = Neuron.all.map {|x| x.layer_index}.uniq.size
18
+ assert_equal 3, b
19
+ end
20
+
21
+ should "contain 1 output node" do
22
+ DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes =>4, :inputs => 2)
23
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
24
+ assert_equal 1, a.inspect.last.size
25
+ end
26
+
27
+ should "feed forward and set all neurons last outputs" do
28
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 2, :inputs => 2)
29
+ a.feed_forward([0,1])
30
+ b = a.inspect.inject([]) do |array, n|
31
+ array << n.map {|x| x.last_output}
32
+ end
33
+ b.flatten!
34
+ assert !b.include?(nil)
35
+ end
36
+
37
+ should "return an array after feed forward" do
38
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 2, :inputs => 2)
39
+ assert_kind_of Array, a.feed_forward([0,1])
40
+ end
41
+
42
+ should "save its neurons deltas" do
43
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
44
+ a.train(@training, @testing, @validation, 1)
45
+ b = Neuron.all(:delta.not => nil)
46
+ assert !b.empty?
47
+ end
48
+
49
+ should "save its output neurons weights" do
50
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
51
+ before = Neuron.first(:layer_index => -1).weights.inject([]) do |array, n|
52
+ array << n
53
+ end
54
+
55
+ a.train(@training, @testing, @validation, 1)
56
+
57
+ after = Neuron.first(:layer_index => -1).weights.inject([]) do |array, n|
58
+ array << n
59
+ end
60
+ assert_not_equal before, after
61
+ end
62
+
63
+ should "update its hidden neurons weights" do
64
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
65
+ before = Neuron.first(:layer_index => 0).weights.inject([]) do |array, n|
66
+ array << n
67
+ end
68
+
69
+ a.train(@training, @testing, @validation, 1)
70
+ after = Neuron.first(:layer_index => 0).weights.inject([]) do |array, n|
71
+ array << n
72
+ end
73
+ assert_not_equal before, after
74
+ end
75
+
76
+ should "return the error (array) after training" do
77
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
78
+ error = a.train(@training, @testing, @validation, 1)
79
+ assert_kind_of Array, error
80
+ end
81
+ end
82
+
83
+ context "Training Process" do
84
+ setup do
85
+ set_data_variables
86
+ db_path = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/db/data.rdb"
87
+ @a = DBMLP.new(db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
88
+ end
89
+
90
+ should "go through whole training process" do
91
+ assert @a.train(@training, @testing, @validation, 2)
92
+ end
93
+ end
94
+
95
+ context "DB for a new mlp" do
96
+ setup do
97
+ db_path = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/db/data.rdb"
98
+ @a = DBMLP.new(db_path, :hidden_layers => [2, 2], :output_nodes => 2, :inputs => 2)
99
+ end
100
+
101
+ should "save 6 neurons" do
102
+ assert_equal 6, Neuron.count
103
+ end
104
+
105
+ should "save 2 hidden neurons in the first hidden layer" do
106
+ assert_equal 2, Neuron.count(:layer_index => 0)
107
+ end
108
+ end
109
+
110
+ context "Neuron" do
111
+ setup do
112
+ @db_path = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/db/data.rdb"
113
+ end
114
+
115
+ should "have 2 weights on output neuron" do
116
+ a = DBMLP.new(@db_path, :hidden_layers => [1], :output_nodes => 1, :inputs => 2)
117
+ assert_equal 2, a.inspect.last.last.weights.size
118
+ end
119
+
120
+ should "have saved 2 weights on output neuron" do
121
+ a = DBMLP.new(@db_path, :hidden_layers => [1], :output_nodes => 1, :inputs => 2)
122
+ assert_equal 2, Neuron.first(:layer_index => -1).weights.size
123
+ end
124
+
125
+ should "have 3 weights on output neuron" do
126
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
127
+ assert_equal 3, a.inspect.last.last.weights.size
128
+ end
129
+
130
+ should "have saved 3 weights on output neuron" do
131
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
132
+ assert_equal 3, Neuron.first(:layer_index => -1).weights.size
133
+ end
134
+
135
+ should "create a hidden neuron with 3 weights" do
136
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
137
+ assert_equal 3, a.inspect.first.last.weights.size
138
+ end
139
+ end
140
+
141
+ private
142
+
143
+ def set_data_variables
144
+ @training = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
145
+ @testing = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
146
+ @validation = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
147
+ end
148
+
149
+ end
metadata ADDED
@@ -0,0 +1,79 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: reddavis-db_mlp
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.0.0
5
+ platform: ruby
6
+ authors:
7
+ - reddavis
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+
12
+ date: 2009-09-05 00:00:00 -07:00
13
+ default_executable:
14
+ dependencies: []
15
+
16
+ description: Database backed Multi-Layer Perceptron Neural Network in Ruby
17
+ email: reddavis@gmail.com
18
+ executables: []
19
+
20
+ extensions: []
21
+
22
+ extra_rdoc_files:
23
+ - LICENSE
24
+ - README.rdoc
25
+ files:
26
+ - .autotest
27
+ - LICENSE
28
+ - README.rdoc
29
+ - Rakefile
30
+ - VERSION
31
+ - benchmarks/data.rdb
32
+ - benchmarks/mlp_benchmark.rb
33
+ - db_mlp.gemspec
34
+ - examples/backpropagation_example.rb
35
+ - examples/data.rdb
36
+ - examples/patterns_with_base_noise.rb
37
+ - examples/patterns_with_noise.rb
38
+ - examples/training_patterns.rb
39
+ - examples/xor.rb
40
+ - lib/db_mlp.rb
41
+ - lib/models/neuron.rb
42
+ - profiling/profile.rb
43
+ - test/db/data.rdb
44
+ - test/helper.rb
45
+ - test/test_db_mlp.rb
46
+ has_rdoc: false
47
+ homepage: http://github.com/reddavis/dbmlp
48
+ post_install_message:
49
+ rdoc_options:
50
+ - --charset=UTF-8
51
+ require_paths:
52
+ - lib
53
+ required_ruby_version: !ruby/object:Gem::Requirement
54
+ requirements:
55
+ - - ">="
56
+ - !ruby/object:Gem::Version
57
+ version: "0"
58
+ version:
59
+ required_rubygems_version: !ruby/object:Gem::Requirement
60
+ requirements:
61
+ - - ">="
62
+ - !ruby/object:Gem::Version
63
+ version: "0"
64
+ version:
65
+ requirements: []
66
+
67
+ rubyforge_project:
68
+ rubygems_version: 1.2.0
69
+ signing_key:
70
+ specification_version: 3
71
+ summary: Database backed Multi-Layer Perceptron Neural Network in Ruby
72
+ test_files:
73
+ - test/helper.rb
74
+ - test/test_db_mlp.rb
75
+ - examples/backpropagation_example.rb
76
+ - examples/patterns_with_base_noise.rb
77
+ - examples/patterns_with_noise.rb
78
+ - examples/training_patterns.rb
79
+ - examples/xor.rb