db_mlp 0.0.2

Sign up to get free protection for your applications and to get access to all the features.
data/.autotest ADDED
@@ -0,0 +1 @@
1
+ require 'redgreen/autotest'
data/.gitignore ADDED
@@ -0,0 +1,3 @@
1
+ *.rdb
2
+ test_results.txt
3
+ pkg/*
data/LICENSE ADDED
@@ -0,0 +1,20 @@
1
+ Copyright (c) 2009 reddavis
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining
4
+ a copy of this software and associated documentation files (the
5
+ "Software"), to deal in the Software without restriction, including
6
+ without limitation the rights to use, copy, modify, merge, publish,
7
+ distribute, sublicense, and/or sell copies of the Software, and to
8
+ permit persons to whom the Software is furnished to do so, subject to
9
+ the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be
12
+ included in all copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.rdoc ADDED
@@ -0,0 +1,51 @@
1
+ = Multi-Layer Perceptron Neural Network
2
+
3
+ This is a sqlite backed version of my previous MLP.
4
+
5
+ This version also provides training validation to prevent the MLP from overfitting.
6
+
7
+ This is first release and because of that it's a bit slow, I'll probably try out using Memcached or something else as its data store.
8
+
9
+ == Install
10
+
11
+ gem sources -a http://gems.github.com
12
+ sudo gem install reddavis-db_mlp
13
+
14
+ == How To Use
15
+
16
+ require 'rubygems'
17
+ require 'db_mlp'
18
+
19
+ a = DBMLP.new(path_to_db, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
20
+
21
+ training = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
22
+ testing = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
23
+ validation = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
24
+
25
+ a.train(training, testing, validation, number_of_training_iterations)
26
+
27
+ puts "Test data"
28
+ puts "[0,0] = > #{a.feed_forward([0,0]).inspect}"
29
+ puts "[0,1] = > #{a.feed_forward([0,1]).inspect}"
30
+ puts "[1,0] = > #{a.feed_forward([1,0]).inspect}"
31
+ puts "[1,1] = > #{a.feed_forward([1,1]).inspect}"
32
+
33
+ == Test Reports
34
+
35
+ If you want it to, the MLP can produce a test report. The basic idea is that at the end of training the MLP will feedforward again all the entries that you have passed into the validation attribute. The file contains data about the index, the data that was inputted, the target, the result and the error. Here's an example:
36
+
37
+ ID: 0 Attributes: [0, 0] Target: 0 Resuts: 0.387170168937349 Error: 0.0749503698574878
38
+ ID: 1 Attributes: [0, 1] Target: 1 Resuts: 0.365112645315455 Error: 0.20154097656917
39
+ ID: 2 Attributes: [1, 0] Target: 1 Resuts: 0.40477576498281 Error: 0.1771459449759
40
+ ID: 3 Attributes: [1, 1] Target: 0 Resuts: 0.382819699838249 Error: 0.0732754612921235
41
+
42
+ == Benchmarks
43
+
44
+ The above example produces these times (3000 iterations)
45
+
46
+        user     system      total        real
47
+ DBMLP 9.460000 0.150000 9.610000 ( 10.322743)
48
+
49
+ == Copyright
50
+
51
+ Copyright (c) 2009 Red Davis. See LICENSE for details.
data/Rakefile ADDED
@@ -0,0 +1,57 @@
1
+ require 'rubygems'
2
+ require 'rake'
3
+
4
+ begin
5
+ require 'jeweler'
6
+ Jeweler::Tasks.new do |gem|
7
+ gem.name = "db_mlp"
8
+ gem.summary = %Q{Database backed Multi-Layer Perceptron Neural Network in Ruby}
9
+ gem.description = %Q{Database backed Multi-Layer Perceptron Neural Network in Ruby}
10
+ gem.email = "reddavis@gmail.com"
11
+ gem.homepage = "http://github.com/reddavis/dbmlp"
12
+ gem.authors = ["reddavis"]
13
+ # gem is a Gem::Specification... see http://www.rubygems.org/read/chapter/20 for additional settings
14
+ end
15
+ Jeweler::GemcutterTasks.new
16
+ rescue LoadError
17
+ puts "Jeweler (or a dependency) not available. Install it with: sudo gem install jeweler"
18
+ end
19
+
20
+ require 'rake/testtask'
21
+ Rake::TestTask.new(:test) do |test|
22
+ test.libs << 'lib' << 'test'
23
+ test.pattern = 'test/**/*_test.rb'
24
+ test.verbose = true
25
+ end
26
+
27
+ begin
28
+ require 'rcov/rcovtask'
29
+ Rcov::RcovTask.new do |test|
30
+ test.libs << 'test'
31
+ test.pattern = 'test/**/*_test.rb'
32
+ test.verbose = true
33
+ end
34
+ rescue LoadError
35
+ task :rcov do
36
+ abort "RCov is not available. In order to run rcov, you must: sudo gem install spicycode-rcov"
37
+ end
38
+ end
39
+
40
+
41
+
42
+
43
+ task :default => :test
44
+
45
+ require 'rake/rdoctask'
46
+ Rake::RDocTask.new do |rdoc|
47
+ if File.exist?('VERSION')
48
+ version = File.read('VERSION')
49
+ else
50
+ version = ""
51
+ end
52
+
53
+ rdoc.rdoc_dir = 'rdoc'
54
+ rdoc.title = "mlp #{version}"
55
+ rdoc.rdoc_files.include('README*')
56
+ rdoc.rdoc_files.include('lib/**/*.rb')
57
+ end
data/VERSION ADDED
@@ -0,0 +1 @@
1
+ 0.0.2
Binary file
@@ -0,0 +1,22 @@
1
+ require 'rubygems'
2
+ require 'benchmarker'
3
+ require 'benchmark'
4
+ require File.dirname(__FILE__) + '/../lib/db_mlp'
5
+
6
+ Benchmarker.go('lib') do
7
+
8
+ db = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/data.rdb"
9
+
10
+
11
+ training = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
12
+ testing = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
13
+ validation = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
14
+
15
+ Benchmark.bm do |x|
16
+ x.report do
17
+ a = DBMLP.new(db, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
18
+ a.train(training, testing, validation, 10)
19
+ end
20
+ end
21
+
22
+ end
data/db_mlp.gemspec ADDED
@@ -0,0 +1,69 @@
1
+ # Generated by jeweler
2
+ # DO NOT EDIT THIS FILE
3
+ # Instead, edit Jeweler::Tasks in Rakefile, and run `rake gemspec`
4
+ # -*- encoding: utf-8 -*-
5
+
6
+ Gem::Specification.new do |s|
7
+ s.name = %q{db_mlp}
8
+ s.version = "0.0.2"
9
+
10
+ s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
11
+ s.authors = ["reddavis"]
12
+ s.date = %q{2009-10-12}
13
+ s.description = %q{Database backed Multi-Layer Perceptron Neural Network in Ruby}
14
+ s.email = %q{reddavis@gmail.com}
15
+ s.extra_rdoc_files = [
16
+ "LICENSE",
17
+ "README.rdoc"
18
+ ]
19
+ s.files = [
20
+ ".autotest",
21
+ ".gitignore",
22
+ "LICENSE",
23
+ "README.rdoc",
24
+ "Rakefile",
25
+ "VERSION",
26
+ "benchmarks/data.rdb",
27
+ "benchmarks/mlp_benchmark.rb",
28
+ "db_mlp.gemspec",
29
+ "examples/backpropagation_example.rb",
30
+ "examples/data.rdb",
31
+ "examples/patterns_with_base_noise.rb",
32
+ "examples/patterns_with_noise.rb",
33
+ "examples/training_patterns.rb",
34
+ "examples/xor.rb",
35
+ "lib/db_mlp.rb",
36
+ "lib/models/neuron.rb",
37
+ "lib/modules/create_test_results.rb",
38
+ "lib/modules/db.rb",
39
+ "lib/modules/training.rb",
40
+ "profiling/profile.rb",
41
+ "test/db/test.txt",
42
+ "test/helper.rb",
43
+ "test/test_db_mlp.rb"
44
+ ]
45
+ s.homepage = %q{http://github.com/reddavis/dbmlp}
46
+ s.rdoc_options = ["--charset=UTF-8"]
47
+ s.require_paths = ["lib"]
48
+ s.rubygems_version = %q{1.3.5}
49
+ s.summary = %q{Database backed Multi-Layer Perceptron Neural Network in Ruby}
50
+ s.test_files = [
51
+ "test/helper.rb",
52
+ "test/test_db_mlp.rb",
53
+ "examples/backpropagation_example.rb",
54
+ "examples/patterns_with_base_noise.rb",
55
+ "examples/patterns_with_noise.rb",
56
+ "examples/training_patterns.rb",
57
+ "examples/xor.rb"
58
+ ]
59
+
60
+ if s.respond_to? :specification_version then
61
+ current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION
62
+ s.specification_version = 3
63
+
64
+ if Gem::Version.new(Gem::RubyGemsVersion) >= Gem::Version.new('1.2.0') then
65
+ else
66
+ end
67
+ else
68
+ end
69
+ end
@@ -0,0 +1,73 @@
1
+ # This test was taken from ai4r gem
2
+
3
+ # Author:: Sergio Fierens
4
+ # License:: MPL 1.1
5
+ # Project:: ai4r
6
+ # Url:: http://ai4r.rubyforge.org/
7
+ #
8
+ # You can redistribute it and/or modify it under the terms of
9
+ # the Mozilla Public License version 1.1 as published by the
10
+ # Mozilla Foundation at http://www.mozilla.org/MPL/MPL-1.1.txt
11
+
12
+ require File.dirname(__FILE__) + '/training_patterns'
13
+ require File.dirname(__FILE__) + '/patterns_with_noise'
14
+ require File.dirname(__FILE__) + '/patterns_with_base_noise'
15
+ require File.dirname(__FILE__) + '/../lib/mlp'
16
+ require 'benchmark'
17
+
18
+ times = Benchmark.measure do
19
+
20
+ srand 1
21
+
22
+ net = MLP.new(:hidden_layers => [2], :output_nodes => 3, :inputs => 256)
23
+
24
+ tr_with_noise = TRIANGLE_WITH_NOISE.flatten.collect { |input| input.to_f / 5.0}
25
+ sq_with_noise = SQUARE_WITH_NOISE.flatten.collect { |input| input.to_f / 5.0}
26
+ cr_with_noise = CROSS_WITH_NOISE.flatten.collect { |input| input.to_f / 5.0}
27
+
28
+ tr_with_base_noise = TRIANGLE_WITH_BASE_NOISE.flatten.collect { |input| input.to_f / 5.0}
29
+ sq_with_base_noise = SQUARE_WITH_BASE_NOISE.flatten.collect { |input| input.to_f / 5.0}
30
+ cr_with_base_noise = CROSS_WITH_BASE_NOISE.flatten.collect { |input| input.to_f / 5.0}
31
+
32
+ puts "Training the network, please wait."
33
+ 101.times do |i|
34
+ tr_input = TRIANGLE.flatten.collect { |input| input.to_f / 5.0}
35
+ sq_input = SQUARE.flatten.collect { |input| input.to_f / 5.0}
36
+ cr_input = CROSS.flatten.collect { |input| input.to_f / 5.0}
37
+
38
+ error1 = net.train(tr_input, [1,0,0])
39
+ error2 = net.train(sq_input, [0,1,0])
40
+ error3 = net.train(cr_input, [0,0,1])
41
+ puts "Error after iteration #{i}:\t#{error1} - #{error2} - #{error3}" if i%20 == 0
42
+ end
43
+
44
+ def result_label(result)
45
+ if result[0] > result[1] && result[0] > result[2]
46
+ "TRIANGLE"
47
+ elsif result[1] > result[2]
48
+ "SQUARE"
49
+ else
50
+ "CROSS"
51
+ end
52
+ end
53
+
54
+ tr_input = TRIANGLE.flatten.collect { |input| input.to_f / 5.0}
55
+ sq_input = SQUARE.flatten.collect { |input| input.to_f / 5.0}
56
+ cr_input = CROSS.flatten.collect { |input| input.to_f / 5.0}
57
+
58
+ puts "Training Examples"
59
+ puts "#{net.feed_forward(tr_input).inspect} => #{result_label(net.feed_forward(tr_input))}"
60
+ puts "#{net.feed_forward(sq_input).inspect} => #{result_label(net.feed_forward(sq_input))}"
61
+ puts "#{net.feed_forward(cr_input).inspect} => #{result_label(net.feed_forward(cr_input))}"
62
+ puts "Examples with noise"
63
+ puts "#{net.feed_forward(tr_with_noise).inspect} => #{result_label(net.feed_forward(tr_with_noise))}"
64
+ puts "#{net.feed_forward(sq_with_noise).inspect} => #{result_label(net.feed_forward(sq_with_noise))}"
65
+ puts "#{net.feed_forward(cr_with_noise).inspect} => #{result_label(net.feed_forward(cr_with_noise))}"
66
+ puts "Examples with base noise"
67
+ puts "#{net.feed_forward(tr_with_base_noise).inspect} => #{result_label(net.feed_forward(tr_with_base_noise))}"
68
+ puts "#{net.feed_forward(sq_with_base_noise).inspect} => #{result_label(net.feed_forward(sq_with_base_noise))}"
69
+ puts "#{net.feed_forward(cr_with_base_noise).inspect} => #{result_label(net.feed_forward(cr_with_base_noise))}"
70
+
71
+ end
72
+
73
+ puts "Elapsed time: #{times}"
data/examples/data.rdb ADDED
Binary file
@@ -0,0 +1,68 @@
1
+ # Author:: Sergio Fierens
2
+ # License:: MPL 1.1
3
+ # Project:: ai4r
4
+ # Url:: http://ai4r.rubyforge.org/
5
+ #
6
+ # You can redistribute it and/or modify it under the terms of
7
+ # the Mozilla Public License version 1.1 as published by the
8
+ # Mozilla Foundation at http://www.mozilla.org/MPL/MPL-1.1.txt
9
+
10
+
11
+ TRIANGLE_WITH_BASE_NOISE = [
12
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
13
+ [ 3, 3, 3, 3, 3, 3, 4, 10, 10, 4, 3, 3, 3, 3, 3, 3],
14
+ [ 3, 3, 3, 3, 3, 3, 8, 8, 8, 8, 3, 3, 3, 3, 3, 3],
15
+ [ 3, 3, 3, 3, 3, 4, 10, 4, 4, 10, 4, 3, 3, 3, 3, 3],
16
+ [ 3, 3, 3, 3, 3, 8, 8, 3, 3, 8, 8, 3, 3, 3, 3, 3],
17
+ [ 3, 3, 3, 3, 4, 10, 4, 3, 3, 4, 10, 4, 3, 3, 3, 3],
18
+ [ 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3],
19
+ [ 3, 3, 3, 4, 10, 4, 3, 3, 3, 3, 4, 10, 4, 3, 3, 3],
20
+ [ 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3],
21
+ [ 3, 3, 4, 10, 4, 3, 3, 3, 3, 3, 3, 4, 10, 4, 3, 3],
22
+ [ 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3],
23
+ [ 3, 4, 10, 4, 3, 3, 3, 3, 3, 3, 3, 3, 4, 10, 4, 3],
24
+ [ 3, 8, 8, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 8, 8, 3],
25
+ [ 4, 10, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 10, 4],
26
+ [ 8, 8, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 8, 8],
27
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
28
+ ]
29
+
30
+ SQUARE_WITH_BASE_NOISE = [
31
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10],
32
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
33
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
34
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
35
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
36
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
37
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
38
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
39
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
40
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
41
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
42
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
43
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
44
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
45
+ [10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 10],
46
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
47
+
48
+ ]
49
+
50
+ CROSS_WITH_BASE_NOISE = [
51
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
52
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
53
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
54
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
55
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
56
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
57
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
58
+ [ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8],
59
+ [ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8],
60
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
61
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
62
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
63
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
64
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
65
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3],
66
+ [ 3, 3, 3, 3, 3, 3, 3, 8, 8, 3, 3, 3, 3, 3, 3, 3]
67
+ ]
68
+
@@ -0,0 +1,66 @@
1
+ # Author:: Sergio Fierens
2
+ # License:: MPL 1.1
3
+ # Project:: ai4r
4
+ # Url:: http://ai4r.rubyforge.org/
5
+ #
6
+ # You can redistribute it and/or modify it under the terms of
7
+ # the Mozilla Public License version 1.1 as published by the
8
+ # Mozilla Foundation at http://www.mozilla.org/MPL/MPL-1.1.txt
9
+
10
+ TRIANGLE_WITH_NOISE = [
11
+ [ 1, 0, 0, 0, 0, 0, 0, 1, 5, 0, 0, 1, 0, 0, 0, 0],
12
+ [ 0, 0, 0, 0, 3, 0, 1, 9, 9, 1, 0, 0, 0, 0, 3, 0],
13
+ [ 0, 3, 0, 0, 0, 0, 5, 1, 5, 3, 0, 0, 0, 0, 0, 7],
14
+ [ 0, 0, 0, 7, 0, 1, 9, 1, 1, 9, 1, 0, 0, 0, 3, 0],
15
+ [ 0, 0, 0, 0, 0, 3, 5, 0, 3, 5, 5, 0, 0, 0, 0, 0],
16
+ [ 0, 1, 0, 0, 1, 9, 1, 0, 1, 1, 9, 1, 0, 0, 0, 0],
17
+ [ 1, 0, 0, 0, 5, 5, 0, 0, 0, 0, 5, 5, 7, 0, 0, 3],
18
+ [ 0, 0, 3, 3, 9, 1, 0, 0, 1, 0, 1, 9, 1, 0, 0, 0],
19
+ [ 0, 0, 0, 5, 5, 0, 3, 7, 0, 0, 0, 5, 5, 0, 0, 0],
20
+ [ 0, 0, 1, 9, 1, 0, 0, 0, 0, 0, 0, 1, 9, 1, 0, 0],
21
+ [ 0, 0, 5, 5, 0, 0, 0, 0, 3, 0, 0, 0, 5, 5, 0, 0],
22
+ [ 0, 1, 9, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 9, 1, 0],
23
+ [ 0, 5, 5, 0, 3, 0, 0, 3, 0, 0, 0, 0, 0, 5, 5, 0],
24
+ [ 1, 9, 1, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0, 1, 9, 1],
25
+ [ 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5],
26
+ [10, 10, 10, 10, 1, 10, 10, 10, 10, 10, 1, 10, 10, 10, 10, 10]
27
+ ]
28
+
29
+ SQUARE_WITH_NOISE = [
30
+ [10, 3, 10, 10, 10, 6, 10, 10, 10, 10, 10, 4, 10, 10, 10, 10],
31
+ [10, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
32
+ [10, 0, 3, 0, 0, 0, 0, 7, 0, 6, 1, 0, 0, 0, 0, 0],
33
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
34
+ [10, 0, 4, 0, 4, 0, 0, 0, 1, 0, 3, 0, 0, 4, 0, 10],
35
+ [10, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
36
+ [10, 0, 0, 0, 3, 6, 0, 0, 1, 0, 0, 0, 0, 0, 0, 10],
37
+ [10, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 7, 0, 0, 10],
38
+ [10, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
39
+ [10, 0, 7, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
40
+ [10, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 7, 10],
41
+ [10, 0, 3, 0, 4, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 10],
42
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 10],
43
+ [10, 0, 0, 6, 0, 0, 0, 7, 0, 0, 0, 7, 0, 0, 0, 10],
44
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
45
+ [10, 10, 10, 10, 3, 10, 10, 10, 10, 0, 10, 10, 1, 10, 1, 10]
46
+
47
+ ]
48
+
49
+ CROSS_WITH_NOISE = [
50
+ [ 0, 0, 0, 0, 0, 0, 3, 3, 5, 0, 3, 0, 0, 0, 1, 0],
51
+ [ 0, 1, 0, 0, 0, 1, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
52
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 3, 0, 0, 0],
53
+ [ 0, 0, 1, 8, 0, 0, 0, 5, 5, 0, 4, 0, 0, 0, 1, 0],
54
+ [ 0, 0, 0, 0, 0, 3, 0, 5, 0, 0, 0, 0, 1, 0, 0, 0],
55
+ [ 0, 0, 0, 8, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 1],
56
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 3, 0, 0, 0, 0, 0],
57
+ [ 5, 5, 5, 8, 5, 3, 5, 5, 5, 5, 5, 5, 5, 5, 0, 5],
58
+ [ 5, 5, 5, 5, 5, 5, 5, 5, 1, 5, 5, 5, 5, 1, 0, 0],
59
+ [ 0, 0, 0, 8, 0, 0, 0, 4, 5, 0, 0, 0, 0, 0, 0, 0],
60
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 4, 0, 0, 0, 0, 0, 0],
61
+ [ 0, 0, 0, 0, 0, 4, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
62
+ [ 4, 0, 0, 4, 0, 0, 0, 5, 5, 0, 0, 0, 1, 0, 0, 0],
63
+ [ 0, 0, 0, 0, 0, 1, 0, 5, 4, 4, 3, 0, 0, 0, 0, 0],
64
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 10, 0, 0, 0],
65
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0]
66
+ ]
@@ -0,0 +1,68 @@
1
+ # Author:: Sergio Fierens
2
+ # License:: MPL 1.1
3
+ # Project:: ai4r
4
+ # Url:: http://ai4r.rubyforge.org/
5
+ #
6
+ # You can redistribute it and/or modify it under the terms of
7
+ # the Mozilla Public License version 1.1 as published by the
8
+ # Mozilla Foundation at http://www.mozilla.org/MPL/MPL-1.1.txt
9
+
10
+
11
+ TRIANGLE = [
12
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
13
+ [ 0, 0, 0, 0, 0, 0, 1, 9, 9, 1, 0, 0, 0, 0, 0, 0],
14
+ [ 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0],
15
+ [ 0, 0, 0, 0, 0, 1, 9, 1, 1, 9, 1, 0, 0, 0, 0, 0],
16
+ [ 0, 0, 0, 0, 0, 5, 5, 0, 0, 5, 5, 0, 0, 0, 0, 0],
17
+ [ 0, 0, 0, 0, 1, 9, 1, 0, 0, 1, 9, 1, 0, 0, 0, 0],
18
+ [ 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0],
19
+ [ 0, 0, 0, 1, 9, 1, 0, 0, 0, 0, 1, 9, 1, 0, 0, 0],
20
+ [ 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0],
21
+ [ 0, 0, 1, 9, 1, 0, 0, 0, 0, 0, 0, 1, 9, 1, 0, 0],
22
+ [ 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0],
23
+ [ 0, 1, 9, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 9, 1, 0],
24
+ [ 0, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 0],
25
+ [ 1, 9, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 9, 1],
26
+ [ 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5],
27
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
28
+ ]
29
+
30
+ SQUARE = [
31
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10],
32
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
33
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
34
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
35
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
36
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
37
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
38
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
39
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
40
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
41
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
42
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
43
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
44
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
45
+ [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10],
46
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
47
+
48
+ ]
49
+
50
+ CROSS = [
51
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
52
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
53
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
54
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
55
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
56
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
57
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
58
+ [ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
59
+ [ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
60
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
61
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
62
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
63
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
64
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
65
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0],
66
+ [ 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0]
67
+ ]
68
+
data/examples/xor.rb ADDED
@@ -0,0 +1,25 @@
1
+ require File.dirname(__FILE__) + '/../lib/db_mlp'
2
+ require 'benchmark'
3
+
4
+ db = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/data.rdb"
5
+ a = DBMLP.new(db, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
6
+
7
+ times = Benchmark.measure do
8
+
9
+ srand 1
10
+
11
+ training = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
12
+ testing = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
13
+ validation = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
14
+
15
+ a.train(training, testing, validation, 3001)
16
+
17
+ puts "Test data"
18
+ puts "[0,0] = > #{a.feed_forward([0,0]).inspect}"
19
+ puts "[0,1] = > #{a.feed_forward([0,1]).inspect}"
20
+ puts "[1,0] = > #{a.feed_forward([1,0]).inspect}"
21
+ puts "[1,1] = > #{a.feed_forward([1,1]).inspect}"
22
+
23
+ end
24
+
25
+ puts "Elapsed time: #{times}"
data/lib/db_mlp.rb ADDED
@@ -0,0 +1,55 @@
1
+ #require 'rubygems'
2
+ require 'datamapper'
3
+ require File.expand_path(File.dirname(__FILE__) + '/models/neuron')
4
+ require File.expand_path(File.dirname(__FILE__) + '/modules/create_test_results')
5
+ require File.expand_path(File.dirname(__FILE__) + '/modules/db')
6
+ require File.expand_path(File.dirname(__FILE__) + '/modules/training')
7
+
8
+ class DBMLP
9
+ include DB
10
+ include Training
11
+ include CreateTestResults
12
+
13
+ def initialize(db_path, options={})
14
+ @input_size = options[:inputs]
15
+ @hidden_layers = options[:hidden_layers]
16
+ @number_of_output_nodes = options[:output_nodes]
17
+ @verbose = options[:verbose] || false
18
+ connect_to_db(db_path)
19
+ setup_network
20
+ end
21
+
22
+ def feed_forward(input)
23
+ @network.each_with_index do |layer, layer_index|
24
+ layer.each do |neuron|
25
+ if layer_index == 0
26
+ neuron.fire(input)
27
+ else
28
+ input = @network[layer_index-1].map {|x| x.last_output}
29
+ neuron.fire(input)
30
+ end
31
+ end
32
+ end
33
+ @network.last.map {|x| x.last_output}
34
+ end
35
+
36
+ def train(training, testing, validations, n=3000, report_path=nil)
37
+ train_and_cross_validate(training, validations, n)
38
+ create_test_report(testing, report_path) unless report_path.nil?
39
+ end
40
+
41
+ def inspect
42
+ @network
43
+ end
44
+
45
+ private
46
+
47
+ def last_outputs
48
+ @network.last.map {|x| x.last_output}
49
+ end
50
+
51
+ def print_message(message)
52
+ puts message if @verbose
53
+ end
54
+
55
+ end
@@ -0,0 +1,62 @@
1
+ class Neuron
2
+ include DataMapper::Resource
3
+ property :id, Serial
4
+ property :layer_index, Integer, :index => true
5
+ property :last_output, Float
6
+ property :db_weights, String
7
+ property :delta, Float
8
+
9
+ def initialize(number_of_inputs, layer_index)
10
+ create_weights(number_of_inputs)
11
+ self.layer_index = layer_index
12
+ end
13
+
14
+ def fire(input)
15
+ self.last_output = activation_function(input)
16
+ end
17
+
18
+ def update_weight(inputs, training_rate)
19
+ inputs << -1 # Add the bias
20
+ new_weights = weights
21
+ weights.each_index do |i|
22
+ new_weights[i] += training_rate * delta * inputs[i]
23
+ end
24
+ self.db_weights = new_weights.join(',')
25
+ end
26
+
27
+ def inspect
28
+ weights
29
+ end
30
+
31
+ def weights
32
+ db_weights.split(',').map {|x| x.to_f}
33
+ end
34
+
35
+ private
36
+
37
+ def activation_function(input)
38
+ sum = 0
39
+ input.each_with_index do |n, index|
40
+ # puts "index:#{index} weight: #{@weights[index]} input: #{n} input_size: #{input.size}"
41
+ sum += weights[index] * n
42
+ end
43
+ sum += weights.last * -1 #bias node
44
+ sigmoid_function(sum)
45
+ end
46
+
47
+ # g(h) = 1 / (1+exp(-B*h(j)))
48
+ def sigmoid_function(x)
49
+ 1 / (1+Math.exp(-1 * (x)))
50
+ end
51
+
52
+ def create_weights(number_of_inputs)
53
+ # Create random weights between 0 & 1
54
+ # Plus another one for the bias node
55
+ weights = []
56
+ (number_of_inputs + 1).times do
57
+ weights << (rand > 0.5 ? -rand : rand)
58
+ end
59
+ self.db_weights = weights.join(',')
60
+ end
61
+
62
+ end
@@ -0,0 +1,32 @@
1
+ module CreateTestResults
2
+
3
+ private
4
+
5
+ def create_test_report(test_examples, report_path)
6
+ results = []
7
+
8
+ test_examples.each_with_index do |example, index|
9
+ input, target = example[0], example[1]
10
+ feed_forward(input)
11
+ info = "ID: #{index}\tAttributes: #{input.inspect}\tTarget: #{target.inspect}\tResuts: #{last_outputs.inspect}\tError: #{calculate_error(target)}\t"
12
+ results << info
13
+ end
14
+
15
+ File.open(report_path, "w+") do |file|
16
+ results.each do |line|
17
+ file.write(line)
18
+ file.write("\n")
19
+ end
20
+ end
21
+ end
22
+
23
+ def calculate_error(targets)
24
+ outputs = last_outputs
25
+ sum = 0
26
+ targets.each_with_index do |t, index|
27
+ sum += (t - outputs[index]) ** 2
28
+ end
29
+ 0.5 * sum
30
+ end
31
+
32
+ end
data/lib/modules/db.rb ADDED
@@ -0,0 +1,67 @@
1
+ module DB
2
+
3
+ private
4
+
5
+ def setup_network
6
+ @network = []
7
+ if new_mlp?
8
+ wipe_db!
9
+ # Hidden Layers
10
+ @hidden_layers.each_with_index do |number_of_neurons, index|
11
+ layer = []
12
+ inputs = index == 0 ? @input_size : @hidden_layers[index-1].size
13
+ number_of_neurons.times { layer << Neuron.new(inputs, index) }
14
+ @network << layer
15
+ layer.each {|x| x.save!}
16
+ end
17
+ # Output layer
18
+ inputs = @hidden_layers.empty? ? @input_size : @hidden_layers.last
19
+ layer = []
20
+ @number_of_output_nodes.times { layer << Neuron.new(inputs, -1)}
21
+ @network << layer
22
+ layer.each {|x| x.save!}
23
+ else
24
+ # Problematic area???
25
+ @hidden_layers.each_index do |index|
26
+ layer = Neuron.all(:layer_index => index, :order => [:id.asc])
27
+ @network << layer
28
+ end
29
+ layer = Neuron.all(:layer_index => -1, :order => [:id.asc])
30
+ @network << layer
31
+ end
32
+ end
33
+
34
+ def wipe_db!
35
+ DataMapper.auto_migrate!
36
+ end
37
+
38
+ # Only one mlp per DB, so if this mlp's shape is diff
39
+ # to whats in the db then we empty and create a new one
40
+ # if its the same then we carry on as we left off
41
+ def new_mlp?
42
+ new_mlp = false
43
+ # Check hidden_layers
44
+ @hidden_layers.each_index do |i|
45
+ if Neuron.count(:layer_index => i) != @hidden_layers[i]
46
+ new_mlp = true
47
+ end
48
+ end
49
+ # Check output layer
50
+ if Neuron.count(:layer_index => -1) != @number_of_output_nodes
51
+ new_mlp = true
52
+ end
53
+
54
+ if Neuron.count != (@hidden_layers.size + 1)
55
+ new_mlp = true
56
+ end
57
+ new_mlp
58
+ end
59
+
60
+ def connect_to_db(db_path)
61
+ # DataMapper::Logger.new(STDOUT, :debug)
62
+ # DataObjects::Sqlite3.logger = DataObjects::Logger.new(STDOUT, 0)
63
+ DataMapper.setup(:default, db_path)
64
+ DataMapper.auto_upgrade!
65
+ end
66
+
67
+ end
@@ -0,0 +1,108 @@
1
+ module Training
2
+
3
+ private
4
+
5
+ def train_and_cross_validate(training, validations, n)
6
+ errors = []
7
+ 1.upto(n) do |i|
8
+ if i % 200 == 0
9
+ if validate(validations)
10
+ print_message("Stopping at #{i}")
11
+ break
12
+ end
13
+ end
14
+ print_message("Iteration #{i}/#{n}")
15
+ training = training.sort_by { rand } #shaken or stirred?
16
+ training.each do |t|
17
+ input, target = t[0], t[1]
18
+ training_process(input, target)
19
+ end
20
+ end
21
+ save_all_neurons
22
+ end
23
+
24
+ def validate(validations)
25
+ @validations ||= []
26
+ sum = 0
27
+ validations.each do |v|
28
+ input, target = v[0], v[1]
29
+ feed_forward(input)
30
+ sum += calculate_error(target)
31
+ end
32
+ @validations << sum
33
+ return false if @validations.size < 2
34
+ @validations[-1] > @validations[-2] ? true : false
35
+ end
36
+
37
+ def training_process(input, targets)
38
+ # To go back we must go forward
39
+ feed_forward(input)
40
+ compute_deltas(targets)
41
+ update_weights(input)
42
+ end
43
+
44
+ def save_all_neurons
45
+ @network.each do |layer|
46
+ layer.each {|n| n.save!}
47
+ end
48
+ end
49
+
50
+ def update_weights(input)
51
+ reversed_network = @network.reverse
52
+ reversed_network.each_with_index do |layer, layer_index|
53
+ if layer_index == 0
54
+ update_output_weights(layer, layer_index, input)
55
+ else
56
+ update_hidden_weights(layer, layer_index, input)
57
+ end
58
+ end
59
+ end
60
+
61
+ def update_output_weights(layer, layer_index, input)
62
+ inputs = @hidden_layers.empty? ? input : @network[-2].map {|x| x.last_output}
63
+ layer.each do |neuron|
64
+ neuron.update_weight(inputs, 0.25)
65
+ end
66
+ end
67
+
68
+ def update_hidden_weights(layer, layer_index, original_input)
69
+ if layer_index == (@network.size - 1)
70
+ inputs = original_input.clone
71
+ else
72
+ inputs = @network.reverse[layer_index+1].map {|x| x.last_output}
73
+ end
74
+ layer.each do |neuron|
75
+ neuron.update_weight(inputs, 0.25)
76
+ end
77
+ end
78
+
79
+ def compute_deltas(targets)
80
+ reversed_network = @network.reverse
81
+ reversed_network.each_with_index do |layer, layer_index|
82
+ if layer_index == 0
83
+ compute_output_deltas(layer, targets)
84
+ else
85
+ compute_hidden_deltas(layer, targets)
86
+ end
87
+ end
88
+ end
89
+
90
+ def compute_output_deltas(layer, targets)
91
+ layer.each_with_index do |neuron, i|
92
+ output = neuron.last_output
93
+ neuron.delta = output * (1 - output) * (targets[i] - output)
94
+ end
95
+ end
96
+
97
+ def compute_hidden_deltas(layer, targets)
98
+ layer.each_with_index do |neuron, neuron_index|
99
+ error = 0
100
+ @network.last.each do |output_neuron|
101
+ error += output_neuron.delta * output_neuron.weights[neuron_index]
102
+ end
103
+ output = neuron.last_output
104
+ neuron.delta = output * (1 - output) * error
105
+ end
106
+ end
107
+
108
+ end
@@ -0,0 +1,18 @@
1
+ require File.dirname(__FILE__) + '/../lib/db_mlp'
2
+ require 'rubygems'
3
+ require 'ruby-prof'
4
+
5
+ db = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/../benchmarks/data.rdb"
6
+
7
+ a = DBMLP.new(db, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
8
+
9
+ @training = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
10
+ @testing = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
11
+ @validation = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
12
+
13
+ result = RubyProf.profile do
14
+ a = a.train(@training, @testing, @validation, 100)
15
+ end
16
+
17
+ printer = RubyProf::FlatPrinter.new(result)
18
+ printer.print(STDOUT, 0)
data/test/db/test.txt ADDED
File without changes
data/test/helper.rb ADDED
@@ -0,0 +1,10 @@
1
+ require 'rubygems'
2
+ require 'test/unit'
3
+ require 'shoulda'
4
+
5
+ $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
6
+ $LOAD_PATH.unshift(File.dirname(__FILE__))
7
+ require 'db_mlp'
8
+
9
+ class Test::Unit::TestCase
10
+ end
@@ -0,0 +1,151 @@
1
+ require 'helper'
2
+
3
+ class TestDBMLP < Test::Unit::TestCase
4
+ context "Testing Report" do
5
+ setup do
6
+ set_data_variables
7
+ db_path = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/db/data.rdb"
8
+ @test_results_path = File.dirname(File.expand_path(__FILE__)) + '/db/test_results.txt'
9
+ a = DBMLP.new(db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
10
+ a.train(@training, @testing, @validation, 1, @test_results_path)
11
+ end
12
+
13
+ should "create a test results .txt file" do
14
+ assert File.exists?(@test_results_path)
15
+ end
16
+
17
+ should "contain some text" do
18
+ File.open(@test_results_path, 'r+') do |file|
19
+ assert !file.readlines.empty?
20
+ end
21
+ end
22
+ end
23
+
24
+ context "DBMLP Instance" do
25
+ setup do
26
+ set_data_variables
27
+ @db_path = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/db/data.rdb"
28
+ end
29
+
30
+ should "contain 4 layers" do
31
+ a = DBMLP.new(@db_path, :hidden_layers => [2, 2, 2], :output_nodes => 2, :inputs => 2)
32
+ assert_equal 4, a.inspect.size
33
+ end
34
+
35
+ should "contain saved 3 layers" do
36
+ DBMLP.new(@db_path, :hidden_layers => [2, 2], :output_nodes => 2, :inputs => 2)
37
+ b = Neuron.all.map {|x| x.layer_index}.uniq.size
38
+ assert_equal 3, b
39
+ end
40
+
41
+ should "contain 1 output node" do
42
+ DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes =>4, :inputs => 2)
43
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
44
+ assert_equal 1, a.inspect.last.size
45
+ end
46
+
47
+ should "feed forward and set all neurons last outputs" do
48
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 2, :inputs => 2)
49
+ a.feed_forward([0,1])
50
+ b = a.inspect.inject([]) do |array, n|
51
+ array << n.map {|x| x.last_output}
52
+ end
53
+ b.flatten!
54
+ assert !b.include?(nil)
55
+ end
56
+
57
+ should "return an array after feed forward" do
58
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 2, :inputs => 2)
59
+ assert_kind_of Array, a.feed_forward([0,1])
60
+ end
61
+
62
+ should "save its neurons deltas" do
63
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
64
+ a.train(@training, @testing, @validation, 1)
65
+ b = Neuron.all(:delta.not => nil)
66
+ assert !b.empty?
67
+ end
68
+
69
+ should "save its output neurons weights" do
70
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
71
+ before = Neuron.first(:layer_index => -1).weights.inject([]) do |array, n|
72
+ array << n
73
+ end
74
+
75
+ a.train(@training, @testing, @validation, 1)
76
+
77
+ after = Neuron.first(:layer_index => -1).weights.inject([]) do |array, n|
78
+ array << n
79
+ end
80
+ assert_not_equal before, after
81
+ end
82
+
83
+ should "update its hidden neurons weights" do
84
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
85
+ before = Neuron.first(:layer_index => 0).weights.inject([]) do |array, n|
86
+ array << n
87
+ end
88
+
89
+ a.train(@training, @testing, @validation, 1)
90
+ after = Neuron.first(:layer_index => 0).weights.inject([]) do |array, n|
91
+ array << n
92
+ end
93
+ assert_not_equal before, after
94
+ end
95
+ end
96
+
97
+ context "DB for a new mlp" do
98
+ setup do
99
+ db_path = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/db/data.rdb"
100
+ @a = DBMLP.new(db_path, :hidden_layers => [2, 2], :output_nodes => 2, :inputs => 2)
101
+ end
102
+
103
+ should "save 6 neurons" do
104
+ assert_equal 6, Neuron.count
105
+ end
106
+
107
+ should "save 2 hidden neurons in the first hidden layer" do
108
+ assert_equal 2, Neuron.count(:layer_index => 0)
109
+ end
110
+ end
111
+
112
+ context "Neuron" do
113
+ setup do
114
+ @db_path = "sqlite3://#{File.dirname(File.expand_path(__FILE__))}/db/data.rdb"
115
+ end
116
+
117
+ should "have 2 weights on output neuron" do
118
+ a = DBMLP.new(@db_path, :hidden_layers => [1], :output_nodes => 1, :inputs => 2)
119
+ assert_equal 2, a.inspect.last.last.weights.size
120
+ end
121
+
122
+ should "have saved 2 weights on output neuron" do
123
+ a = DBMLP.new(@db_path, :hidden_layers => [1], :output_nodes => 1, :inputs => 2)
124
+ assert_equal 2, Neuron.first(:layer_index => -1).weights.size
125
+ end
126
+
127
+ should "have 3 weights on output neuron" do
128
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
129
+ assert_equal 3, a.inspect.last.last.weights.size
130
+ end
131
+
132
+ should "have saved 3 weights on output neuron" do
133
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
134
+ assert_equal 3, Neuron.first(:layer_index => -1).weights.size
135
+ end
136
+
137
+ should "create a hidden neuron with 3 weights" do
138
+ a = DBMLP.new(@db_path, :hidden_layers => [2], :output_nodes => 1, :inputs => 2)
139
+ assert_equal 3, a.inspect.first.last.weights.size
140
+ end
141
+ end
142
+
143
+ private
144
+
145
+ def set_data_variables
146
+ @training = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
147
+ @testing = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
148
+ @validation = [[[0,0], [0]], [[0,1], [1]], [[1,0], [1]], [[1,1], [0]]]
149
+ end
150
+
151
+ end
metadata ADDED
@@ -0,0 +1,85 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: db_mlp
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.0.2
5
+ platform: ruby
6
+ authors:
7
+ - reddavis
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+
12
+ date: 2009-10-12 00:00:00 +01:00
13
+ default_executable:
14
+ dependencies: []
15
+
16
+ description: Database backed Multi-Layer Perceptron Neural Network in Ruby
17
+ email: reddavis@gmail.com
18
+ executables: []
19
+
20
+ extensions: []
21
+
22
+ extra_rdoc_files:
23
+ - LICENSE
24
+ - README.rdoc
25
+ files:
26
+ - .autotest
27
+ - .gitignore
28
+ - LICENSE
29
+ - README.rdoc
30
+ - Rakefile
31
+ - VERSION
32
+ - benchmarks/data.rdb
33
+ - benchmarks/mlp_benchmark.rb
34
+ - db_mlp.gemspec
35
+ - examples/backpropagation_example.rb
36
+ - examples/data.rdb
37
+ - examples/patterns_with_base_noise.rb
38
+ - examples/patterns_with_noise.rb
39
+ - examples/training_patterns.rb
40
+ - examples/xor.rb
41
+ - lib/db_mlp.rb
42
+ - lib/models/neuron.rb
43
+ - lib/modules/create_test_results.rb
44
+ - lib/modules/db.rb
45
+ - lib/modules/training.rb
46
+ - profiling/profile.rb
47
+ - test/db/test.txt
48
+ - test/helper.rb
49
+ - test/test_db_mlp.rb
50
+ has_rdoc: true
51
+ homepage: http://github.com/reddavis/dbmlp
52
+ licenses: []
53
+
54
+ post_install_message:
55
+ rdoc_options:
56
+ - --charset=UTF-8
57
+ require_paths:
58
+ - lib
59
+ required_ruby_version: !ruby/object:Gem::Requirement
60
+ requirements:
61
+ - - ">="
62
+ - !ruby/object:Gem::Version
63
+ version: "0"
64
+ version:
65
+ required_rubygems_version: !ruby/object:Gem::Requirement
66
+ requirements:
67
+ - - ">="
68
+ - !ruby/object:Gem::Version
69
+ version: "0"
70
+ version:
71
+ requirements: []
72
+
73
+ rubyforge_project:
74
+ rubygems_version: 1.3.5
75
+ signing_key:
76
+ specification_version: 3
77
+ summary: Database backed Multi-Layer Perceptron Neural Network in Ruby
78
+ test_files:
79
+ - test/helper.rb
80
+ - test/test_db_mlp.rb
81
+ - examples/backpropagation_example.rb
82
+ - examples/patterns_with_base_noise.rb
83
+ - examples/patterns_with_noise.rb
84
+ - examples/training_patterns.rb
85
+ - examples/xor.rb