ai4r 1.1 → 1.2
Sign up to get free protection for your applications and to get access to all the features.
- data/README.rdoc +21 -20
- data/examples/decision_trees/id3_example.rb +3 -2
- data/examples/genetic_algorithm/genetic_algorithm_example.rb +6 -6
- data/examples/neural_network/backpropagation_example.rb +2 -2
- data/lib/ai4r/classifiers/classifier_helper.rb +54 -0
- data/lib/ai4r/classifiers/id3.rb +356 -0
- data/lib/ai4r/classifiers/one_r.rb +148 -0
- data/lib/ai4r/classifiers/prism.rb +231 -0
- data/lib/ai4r/classifiers/zero_r.rb +104 -0
- data/lib/ai4r/genetic_algorithm/genetic_algorithm.rb +272 -0
- data/lib/ai4r/neural_network/backpropagation.rb +271 -0
- data/site/build/tmp/locationmap.xml +14 -14
- data/site/build/tmp/output.xmap +23 -23
- data/site/build/tmp/pluginlist2fetchbuild.xml +144 -144
- data/site/build/tmp/plugins-1.xml +0 -11
- data/site/build/tmp/plugins-2.xml +54 -0
- data/site/build/tmp/projfilters.properties +41 -41
- data/site/build/webapp/WEB-INF/logs/core.log +681 -788
- data/site/build/webapp/WEB-INF/logs/error.log +281 -248
- data/site/build/webapp/WEB-INF/logs/sitemap.log +1015 -0
- data/site/src/documentation/content/xdocs/forum.html +9 -0
- data/site/src/documentation/content/xdocs/geneticAlgorithms.xml +82 -68
- data/site/src/documentation/content/xdocs/index.xml +47 -18
- data/site/src/documentation/content/xdocs/machineLearning.xml +10 -9
- data/site/src/documentation/content/xdocs/neuralNetworks.xml +60 -36
- data/site/src/documentation/content/xdocs/site.xml +8 -5
- data/site/src/documentation/content/xdocs/svn.xml +11 -1
- data/site/src/documentation/resources/images/Thumbs.db +0 -0
- data/site/src/documentation/resources/images/ai4r-logo.png +0 -0
- data/site/src/documentation/resources/images/genetic_algorithms_example.png +0 -0
- data/site/src/documentation/resources/images/jadeferret.png +0 -0
- data/site/src/documentation/resources/images/neural_network_example.png +0 -0
- data/site/src/documentation/resources/images/sub-dir/Thumbs.db +0 -0
- data/site/src/documentation/skinconf.xml +18 -18
- data/test/classifiers/id3_test.rb +206 -0
- data/test/classifiers/one_r_test.rb +62 -0
- data/test/classifiers/prism_test.rb +83 -0
- data/test/classifiers/zero_r_test.rb +48 -0
- data/test/genetic_algorithm/chromosome_test.rb +41 -38
- data/test/genetic_algorithm/genetic_algorithm_test.rb +64 -61
- data/test/neural_network/backpropagation_test.rb +20 -18
- metadata +109 -199
- data/lib/decision_tree/id3.rb +0 -354
- data/lib/genetic_algorithm/genetic_algorithm.rb +0 -268
- data/lib/neural_network/backpropagation.rb +0 -264
- data/site/build/site/en/broken-links.xml +0 -2
- data/site/build/site/en/downloads.html +0 -187
- data/site/build/site/en/downloads.pdf +0 -151
- data/site/build/site/en/geneticAlgorithms.html +0 -564
- data/site/build/site/en/geneticAlgorithms.pdf +0 -911
- data/site/build/site/en/images/ai4r-logo.png +0 -0
- data/site/build/site/en/images/built-with-forrest-button.png +0 -0
- data/site/build/site/en/images/c.png +0 -0
- data/site/build/site/en/images/c_wbn.png +0 -0
- data/site/build/site/en/images/c_wn.png +0 -0
- data/site/build/site/en/images/ero.gif +0 -0
- data/site/build/site/en/images/europe2.png +0 -0
- data/site/build/site/en/images/europe3.png +0 -0
- data/site/build/site/en/images/fitness.png +0 -0
- data/site/build/site/en/images/instruction_arrow.png +0 -0
- data/site/build/site/en/images/my_email.png +0 -0
- data/site/build/site/en/images/rubyforge.png +0 -0
- data/site/build/site/en/images/s.png +0 -0
- data/site/build/site/en/images/s_wbn.png +0 -0
- data/site/build/site/en/images/s_wn.png +0 -0
- data/site/build/site/en/images/sigmoid.png +0 -0
- data/site/build/site/en/images/t.png +0 -0
- data/site/build/site/en/images/t_wbn.png +0 -0
- data/site/build/site/en/images/t_wn.png +0 -0
- data/site/build/site/en/index.html +0 -258
- data/site/build/site/en/index.pdf +0 -306
- data/site/build/site/en/linkmap.html +0 -231
- data/site/build/site/en/linkmap.pdf +0 -94
- data/site/build/site/en/locationmap.xml +0 -72
- data/site/build/site/en/machineLearning.html +0 -325
- data/site/build/site/en/machineLearning.pdf +0 -337
- data/site/build/site/en/neuralNetworks.html +0 -446
- data/site/build/site/en/neuralNetworks.pdf +0 -604
- data/site/build/site/en/skin/CommonMessages_de.xml +0 -23
- data/site/build/site/en/skin/CommonMessages_en_US.xml +0 -23
- data/site/build/site/en/skin/CommonMessages_es.xml +0 -23
- data/site/build/site/en/skin/CommonMessages_fr.xml +0 -23
- data/site/build/site/en/skin/basic.css +0 -166
- data/site/build/site/en/skin/breadcrumbs-optimized.js +0 -90
- data/site/build/site/en/skin/breadcrumbs.js +0 -237
- data/site/build/site/en/skin/fontsize.js +0 -166
- data/site/build/site/en/skin/getBlank.js +0 -40
- data/site/build/site/en/skin/getMenu.js +0 -45
- data/site/build/site/en/skin/images/README.txt +0 -1
- data/site/build/site/en/skin/images/add.jpg +0 -0
- data/site/build/site/en/skin/images/built-with-forrest-button.png +0 -0
- data/site/build/site/en/skin/images/chapter.gif +0 -0
- data/site/build/site/en/skin/images/chapter_open.gif +0 -0
- data/site/build/site/en/skin/images/current.gif +0 -0
- data/site/build/site/en/skin/images/error.png +0 -0
- data/site/build/site/en/skin/images/external-link.gif +0 -0
- data/site/build/site/en/skin/images/fix.jpg +0 -0
- data/site/build/site/en/skin/images/forrest-credit-logo.png +0 -0
- data/site/build/site/en/skin/images/hack.jpg +0 -0
- data/site/build/site/en/skin/images/header_white_line.gif +0 -0
- data/site/build/site/en/skin/images/info.png +0 -0
- data/site/build/site/en/skin/images/instruction_arrow.png +0 -0
- data/site/build/site/en/skin/images/label.gif +0 -0
- data/site/build/site/en/skin/images/page.gif +0 -0
- data/site/build/site/en/skin/images/pdfdoc.gif +0 -0
- data/site/build/site/en/skin/images/poddoc.png +0 -0
- data/site/build/site/en/skin/images/printer.gif +0 -0
- data/site/build/site/en/skin/images/rc-b-l-15-1body-2menu-3menu.png +0 -0
- data/site/build/site/en/skin/images/rc-b-r-15-1body-2menu-3menu.png +0 -0
- data/site/build/site/en/skin/images/rc-b-r-5-1header-2tab-selected-3tab-selected.png +0 -0
- data/site/build/site/en/skin/images/rc-t-l-5-1header-2searchbox-3searchbox.png +0 -0
- data/site/build/site/en/skin/images/rc-t-l-5-1header-2tab-selected-3tab-selected.png +0 -0
- data/site/build/site/en/skin/images/rc-t-l-5-1header-2tab-unselected-3tab-unselected.png +0 -0
- data/site/build/site/en/skin/images/rc-t-r-15-1body-2menu-3menu.png +0 -0
- data/site/build/site/en/skin/images/rc-t-r-5-1header-2searchbox-3searchbox.png +0 -0
- data/site/build/site/en/skin/images/rc-t-r-5-1header-2tab-selected-3tab-selected.png +0 -0
- data/site/build/site/en/skin/images/rc-t-r-5-1header-2tab-unselected-3tab-unselected.png +0 -0
- data/site/build/site/en/skin/images/remove.jpg +0 -0
- data/site/build/site/en/skin/images/rss.png +0 -0
- data/site/build/site/en/skin/images/spacer.gif +0 -0
- data/site/build/site/en/skin/images/success.png +0 -0
- data/site/build/site/en/skin/images/txtdoc.png +0 -0
- data/site/build/site/en/skin/images/update.jpg +0 -0
- data/site/build/site/en/skin/images/valid-html401.png +0 -0
- data/site/build/site/en/skin/images/vcss.png +0 -0
- data/site/build/site/en/skin/images/warning.png +0 -0
- data/site/build/site/en/skin/images/xmldoc.gif +0 -0
- data/site/build/site/en/skin/menu.js +0 -48
- data/site/build/site/en/skin/note.txt +0 -50
- data/site/build/site/en/skin/print.css +0 -54
- data/site/build/site/en/skin/profile.css +0 -163
- data/site/build/site/en/skin/prototype.js +0 -1257
- data/site/build/site/en/skin/screen.css +0 -587
- data/site/build/site/en/svn.html +0 -223
- data/site/build/site/en/svn.pdf +0 -239
- data/site/build/site/en/wholesite.pdf +0 -1686
- data/site/build/tmp/brokenlinks.xml +0 -2
- data/site/build/tmp/cocoon-work/cache-dir/cocoon-ehcache-1.data +0 -0
- data/site/build/tmp/cocoon-work/cache-dir/cocoon-ehcache-1.index +0 -0
- data/test/decision_tree/id3_test.rb +0 -209
@@ -0,0 +1,272 @@
|
|
1
|
+
#
|
2
|
+
# The GeneticAlgorithm module implements the GeneticSearch and Chromosome
|
3
|
+
# classes. The GeneticSearch is a generic class, and can be used to solved
|
4
|
+
# any kind of problems. The GeneticSearch class performs a stochastic search
|
5
|
+
# of the solution of a given problem.
|
6
|
+
#
|
7
|
+
# The Chromosome is "problem specific". Ai4r built-in Chromosomeclass was
|
8
|
+
# designed to model the Travelling salesman problem. If you want to solve other
|
9
|
+
# type of problem, you will have to modify the Chromosome class, by overwriting
|
10
|
+
# its fitness, reproduce, and mutate functions, to model you specific problem.
|
11
|
+
#
|
12
|
+
# Author:: Sergio Fierens
|
13
|
+
# License:: MPL 1.1
|
14
|
+
# Project:: ai4r
|
15
|
+
# Url:: http://ai4r.rubyforge.org/
|
16
|
+
#
|
17
|
+
# You can redistribute it and/or modify it under the terms of
|
18
|
+
# the Mozilla Public License version 1.1 as published by the
|
19
|
+
# Mozilla Foundation at http://www.mozilla.org/MPL/MPL-1.1.txt
|
20
|
+
|
21
|
+
module Ai4r
|
22
|
+
|
23
|
+
module GeneticAlgorithm
|
24
|
+
|
25
|
+
# This class is used to automatically:
|
26
|
+
#
|
27
|
+
# 1. Choose initial population
|
28
|
+
# 2. Evaluate the fitness of each individual in the population
|
29
|
+
# 3. Repeat
|
30
|
+
# 1. Select best-ranking individuals to reproduce
|
31
|
+
# 2. Breed new generation through crossover and mutation (genetic operations) and give birth to offspring
|
32
|
+
# 3. Evaluate the individual fitnesses of the offspring
|
33
|
+
# 4. Replace worst ranked part of population with offspring
|
34
|
+
# 4. Until termination
|
35
|
+
#
|
36
|
+
# If you want to customize the algorithm, you must modify any of the following classes:
|
37
|
+
# - Chromosome
|
38
|
+
# - Population
|
39
|
+
class GeneticSearch
|
40
|
+
|
41
|
+
attr_accessor :population
|
42
|
+
|
43
|
+
|
44
|
+
def initialize(initial_population_size, generations)
|
45
|
+
@population_size = initial_population_size
|
46
|
+
@max_generation = generations
|
47
|
+
@generation = 0
|
48
|
+
end
|
49
|
+
|
50
|
+
# 1. Choose initial population
|
51
|
+
# 2. Evaluate the fitness of each individual in the population
|
52
|
+
# 3. Repeat
|
53
|
+
# 1. Select best-ranking individuals to reproduce
|
54
|
+
# 2. Breed new generation through crossover and mutation (genetic operations) and give birth to offspring
|
55
|
+
# 3. Evaluate the individual fitnesses of the offspring
|
56
|
+
# 4. Replace worst ranked part of population with offspring
|
57
|
+
# 4. Until termination
|
58
|
+
# 5. Return the best chromosome
|
59
|
+
def run
|
60
|
+
generate_initial_population #Generate initial population
|
61
|
+
@max_generation.times do
|
62
|
+
selected_to_breed = selection #Evaluates current population
|
63
|
+
offsprings = reproduction selected_to_breed #Generate the population for this new generation
|
64
|
+
replace_worst_ranked offsprings
|
65
|
+
end
|
66
|
+
return best_chromosome
|
67
|
+
end
|
68
|
+
|
69
|
+
|
70
|
+
def generate_initial_population
|
71
|
+
@population = []
|
72
|
+
@population_size.times do
|
73
|
+
population << Chromosome.seed
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
# Select best-ranking individuals to reproduce
|
78
|
+
#
|
79
|
+
# Selection is the stage of a genetic algorithm in which individual
|
80
|
+
# genomes are chosen from a population for later breeding.
|
81
|
+
# There are several generic selection algorithms, such as
|
82
|
+
# tournament selection and roulette wheel selection. We implemented the
|
83
|
+
# latest.
|
84
|
+
#
|
85
|
+
# Steps:
|
86
|
+
#
|
87
|
+
# 1. The fitness function is evaluated for each individual, providing fitness values
|
88
|
+
# 2. The population is sorted by descending fitness values.
|
89
|
+
# 3. The fitness values ar then normalized. (Highest fitness gets 1, lowest fitness gets 0). The normalized value is stored in the "normalized_fitness" attribute of the chromosomes.
|
90
|
+
# 4. A random number R is chosen. R is between 0 and the accumulated normalized value (all the normalized fitness values added togheter).
|
91
|
+
# 5. The selected individual is the first one whose accumulated normalized value (its is normalized value plus the normalized values of the chromosomes prior it) greater than R.
|
92
|
+
# 6. We repeat steps 4 and 5, 2/3 times the population size.
|
93
|
+
def selection
|
94
|
+
@population.sort! { |a, b| b.fitness <=> a.fitness}
|
95
|
+
best_fitness = @population[0].fitness
|
96
|
+
worst_fitness = @population.last.fitness
|
97
|
+
acum_fitness = 0
|
98
|
+
if best_fitness-worst_fitness > 0
|
99
|
+
@population.each do |chromosome|
|
100
|
+
chromosome.normalized_fitness = (chromosome.fitness - worst_fitness)/(best_fitness-worst_fitness)
|
101
|
+
acum_fitness += chromosome.normalized_fitness
|
102
|
+
end
|
103
|
+
else
|
104
|
+
@population.each { |chromosome| chromosome.normalized_fitness = 1}
|
105
|
+
end
|
106
|
+
selected_to_breed = []
|
107
|
+
((2*@population_size)/3).times do
|
108
|
+
selected_to_breed << select_random_individual(acum_fitness)
|
109
|
+
end
|
110
|
+
selected_to_breed
|
111
|
+
end
|
112
|
+
|
113
|
+
# We combine each pair of selected chromosome using the method
|
114
|
+
# Chromosome.reproduce
|
115
|
+
#
|
116
|
+
# The reproduction will also call the Chromosome.mutate method with
|
117
|
+
# each member of the population. You should implement Chromosome.mutate
|
118
|
+
# to only change (mutate) randomly. E.g. You could effectivly change the
|
119
|
+
# chromosome only if
|
120
|
+
# rand < ((1 - chromosome.normalized_fitness) * 0.4)
|
121
|
+
def reproduction(selected_to_breed)
|
122
|
+
offsprings = []
|
123
|
+
0.upto(selected_to_breed.length/2-1) do |i|
|
124
|
+
offsprings << Chromosome.reproduce(selected_to_breed[2*i], selected_to_breed[2*i+1])
|
125
|
+
end
|
126
|
+
@population.each do |individual|
|
127
|
+
Chromosome.mutate(individual)
|
128
|
+
end
|
129
|
+
return offsprings
|
130
|
+
end
|
131
|
+
|
132
|
+
# Replace worst ranked part of population with offspring
|
133
|
+
def replace_worst_ranked(offsprings)
|
134
|
+
size = offsprings.length
|
135
|
+
@population = @population [0..((-1*size)-1)] + offsprings
|
136
|
+
end
|
137
|
+
|
138
|
+
# Select the best chromosome in the population
|
139
|
+
def best_chromosome
|
140
|
+
the_best = @population[0]
|
141
|
+
@population.each do |chromosome|
|
142
|
+
the_best = chromosome if chromosome.fitness > the_best.fitness
|
143
|
+
end
|
144
|
+
return the_best
|
145
|
+
end
|
146
|
+
|
147
|
+
private
|
148
|
+
def select_random_individual(acum_fitness)
|
149
|
+
select_random_target = acum_fitness * rand
|
150
|
+
local_acum = 0
|
151
|
+
@population.each do |chromosome|
|
152
|
+
local_acum += chromosome.normalized_fitness
|
153
|
+
return chromosome if local_acum >= select_random_target
|
154
|
+
end
|
155
|
+
end
|
156
|
+
|
157
|
+
end
|
158
|
+
|
159
|
+
# A Chromosome is a representation of an individual solutions for a specific
|
160
|
+
# problem. You will have to redifine you Chromosome representation for each
|
161
|
+
# particular problem, along with its fitness, mutate, reproduce, and seed
|
162
|
+
# functions.
|
163
|
+
class Chromosome
|
164
|
+
|
165
|
+
attr_accessor :data
|
166
|
+
attr_accessor :normalized_fitness
|
167
|
+
|
168
|
+
def initialize(data)
|
169
|
+
@data = data
|
170
|
+
end
|
171
|
+
|
172
|
+
# The fitness function quantifies the optimality of a solution
|
173
|
+
# (that is, a chromosome) in a genetic algorithm so that that particular
|
174
|
+
# chromosome may be ranked against all the other chromosomes.
|
175
|
+
#
|
176
|
+
# Optimal chromosomes, or at least chromosomes which are more optimal,
|
177
|
+
# are allowed to breed and mix their datasets by any of several techniques,
|
178
|
+
# producing a new generation that will (hopefully) be even better.
|
179
|
+
def fitness
|
180
|
+
return @fitness if @fitness
|
181
|
+
last_token = @data[0]
|
182
|
+
cost = 0
|
183
|
+
@data[1..-1].each do |token|
|
184
|
+
cost += @@costs[last_token][token]
|
185
|
+
last_token = token
|
186
|
+
end
|
187
|
+
@fitness = -1 * cost
|
188
|
+
return @fitness
|
189
|
+
end
|
190
|
+
|
191
|
+
# mutation is a function used to maintain genetic diversity from one
|
192
|
+
# generation of a population of chromosomes to the next. It is analogous
|
193
|
+
# to biological mutation.
|
194
|
+
#
|
195
|
+
# The purpose of mutation in GAs is to allow the
|
196
|
+
# algorithm to avoid local minima by preventing the population of
|
197
|
+
# chromosomes from becoming too similar to each other, thus slowing or even
|
198
|
+
# stopping evolution.
|
199
|
+
#
|
200
|
+
# Calling the mutate function will "probably" slightly change a chromosome
|
201
|
+
# randomly.
|
202
|
+
#
|
203
|
+
# This implementation of "mutation" will (probably) reverse the
|
204
|
+
# order of 2 consecutive randome nodes
|
205
|
+
# (e.g. from [ 0, 1, 2, 4] to [0, 2, 1, 4]) if:
|
206
|
+
# ((1 - chromosome.normalized_fitness) * 0.4)
|
207
|
+
def self.mutate(chromosome)
|
208
|
+
if chromosome.normalized_fitness && rand < ((1 - chromosome.normalized_fitness) * 0.3)
|
209
|
+
data = chromosome.data
|
210
|
+
index = rand(data.length-1)
|
211
|
+
data[index], data[index+1] = data[index+1], data[index]
|
212
|
+
chromosome.data = data
|
213
|
+
@fitness = nil
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
# Reproduction is used to vary the programming of a chromosome or
|
218
|
+
# chromosomes from one generation to the next. There are several ways to
|
219
|
+
# combine two chromosomes: One-point crossover, Two-point crossover,
|
220
|
+
# "Cut and splice", edge recombination, and more.
|
221
|
+
#
|
222
|
+
# The method is usually dependant of the problem domain.
|
223
|
+
# In this case, we have implemented edge recombination, wich is the
|
224
|
+
# most used reproduction algorithm for the Travelling salesman problem.
|
225
|
+
def self.reproduce(a, b)
|
226
|
+
data_size = @@costs[0].length
|
227
|
+
available = []
|
228
|
+
0.upto(data_size-1) { |n| available << n }
|
229
|
+
token = a.data[0]
|
230
|
+
spawn = [token]
|
231
|
+
available.delete(token)
|
232
|
+
while available.length > 0 do
|
233
|
+
#Select next
|
234
|
+
if token != b.data.last && available.include?(b.data[b.data.index(token)+1])
|
235
|
+
next_token = b.data[b.data.index(token)+1]
|
236
|
+
elsif token != a.data.last && available.include?(a.data[a.data.index(token)+1])
|
237
|
+
next_token = a.data[a.data.index(token)+1]
|
238
|
+
else
|
239
|
+
next_token = available[rand(available.length)]
|
240
|
+
end
|
241
|
+
#Add to spawn
|
242
|
+
token = next_token
|
243
|
+
available.delete(token)
|
244
|
+
spawn << next_token
|
245
|
+
a, b = b, a if rand < 0.4
|
246
|
+
end
|
247
|
+
return Chromosome.new(spawn)
|
248
|
+
end
|
249
|
+
|
250
|
+
# Initializes an individual solution (chromosome) for the initial
|
251
|
+
# population. Usually the chromosome is generated randomly, but you can
|
252
|
+
# use some problem domain knowledge, to generate better initial solutions.
|
253
|
+
def self.seed
|
254
|
+
data_size = @@costs[0].length
|
255
|
+
available = []
|
256
|
+
0.upto(data_size-1) { |n| available << n }
|
257
|
+
seed = []
|
258
|
+
while available.length > 0 do
|
259
|
+
index = rand(available.length)
|
260
|
+
seed << available.delete_at(index)
|
261
|
+
end
|
262
|
+
return Chromosome.new(seed)
|
263
|
+
end
|
264
|
+
|
265
|
+
def self.set_cost_matrix(costs)
|
266
|
+
@@costs = costs
|
267
|
+
end
|
268
|
+
end
|
269
|
+
|
270
|
+
end
|
271
|
+
|
272
|
+
end
|
@@ -0,0 +1,271 @@
|
|
1
|
+
# The utility of artificial neural network
|
2
|
+
# models lies in the fact that they can be used
|
3
|
+
# to infer a function from observations.
|
4
|
+
# This is particularly useful in applications
|
5
|
+
# where the complexity of the data or task makes the
|
6
|
+
# design of such a function by hand impractical.
|
7
|
+
# Neural Networks are being used in many businesses and applications. Their
|
8
|
+
# ability to learn by example makes them attractive in environments where
|
9
|
+
# the business rules are either not well defined or are hard to enumerate and
|
10
|
+
# define. Many people believe that Neural Networks can only solve toy problems.
|
11
|
+
# Give them a try, and let you decide if they are good enough to solve your
|
12
|
+
# needs.
|
13
|
+
#
|
14
|
+
# In this module you will find an implementation of neural networks
|
15
|
+
# using the Backpropagation is a supervised learning technique (described
|
16
|
+
# by Paul Werbos in 1974, and further developed by David E.
|
17
|
+
# Rumelhart, Geoffrey E. Hinton and Ronald J. Williams in 1986)
|
18
|
+
#
|
19
|
+
# More about neural networks and backpropagation:
|
20
|
+
#
|
21
|
+
# * http://en.wikipedia.org/wiki/Backpropagation
|
22
|
+
# * http://en.wikipedia.org/wiki/Neural_networks
|
23
|
+
#
|
24
|
+
# Author:: Sergio Fierens
|
25
|
+
# License:: MPL 1.1
|
26
|
+
# Project:: ai4r
|
27
|
+
# Url:: http://ai4r.rubyforge.org/
|
28
|
+
#
|
29
|
+
# Specials thanks to John Miller, for several bugs fixes and comments in the
|
30
|
+
# Backpropagation implementation
|
31
|
+
#
|
32
|
+
# You can redistribute it and/or modify it under the terms of
|
33
|
+
# the Mozilla Public License version 1.1 as published by the
|
34
|
+
# Mozilla Foundation at http://www.mozilla.org/MPL/MPL-1.1.txt
|
35
|
+
#
|
36
|
+
|
37
|
+
module Ai4r
|
38
|
+
|
39
|
+
module NeuralNetwork
|
40
|
+
|
41
|
+
# = Introduction
|
42
|
+
#
|
43
|
+
# This is an implementation of neural networks
|
44
|
+
# using the Backpropagation is a supervised learning technique (described
|
45
|
+
# by Paul Werbos in 1974, and further developed by David E.
|
46
|
+
# Rumelhart, Geoffrey E. Hinton and Ronald J. Williams in 1986)
|
47
|
+
#
|
48
|
+
# = How to use it
|
49
|
+
#
|
50
|
+
# # Create the network
|
51
|
+
# net = Backpropagation.new([4, 3, 2]) # 4 inputs
|
52
|
+
# # 1 hidden layer with 3 neurons,
|
53
|
+
# # 2 outputs
|
54
|
+
# # Train the network
|
55
|
+
# 1..upto(100) do |i|
|
56
|
+
# net.train(example[i], result[i])
|
57
|
+
# end
|
58
|
+
#
|
59
|
+
# # Use it: Evaluate data with the trained network
|
60
|
+
# net.eval([12, 48, 12, 25]) # => [0.86, 0.01]
|
61
|
+
#
|
62
|
+
class Backpropagation
|
63
|
+
|
64
|
+
DEFAULT_BETA = 0.5
|
65
|
+
DEFAULT_LAMBDA = 0.25
|
66
|
+
DEFAULT_THRESHOLD = 0.66
|
67
|
+
|
68
|
+
# Creates a new network specifying the its architecture.
|
69
|
+
# E.g.
|
70
|
+
#
|
71
|
+
# net = Backpropagation.new([4, 3, 2]) # 4 inputs
|
72
|
+
# # 1 hidden layer with 3 neurons,
|
73
|
+
# # 2 outputs
|
74
|
+
# net = Backpropagation.new([2, 3, 3, 4]) # 2 inputs
|
75
|
+
# # 2 hidden layer with 3 neurons each,
|
76
|
+
# # 4 outputs
|
77
|
+
# net = Backpropagation.new([2, 1]) # 2 inputs
|
78
|
+
# # No hidden layer
|
79
|
+
# # 1 output
|
80
|
+
#
|
81
|
+
# Optionally you can customize certain parameters:
|
82
|
+
#
|
83
|
+
# threshold = A real number which we will call Threshold.
|
84
|
+
# Experiments have shown that best values for q are between 0.25 and 1.
|
85
|
+
#
|
86
|
+
# lambda = The Learning Rate: a real number, usually between 0.05 and 0.25.
|
87
|
+
#
|
88
|
+
# momentum = A momentum will avoid oscillations during learning, converging
|
89
|
+
# to a solution in less iterations.
|
90
|
+
def initialize(layer_sizes, threshold=DEFAULT_THRESHOLD, lambda=DEFAULT_LAMBDA, momentum=DEFAULT_BETA)
|
91
|
+
@neurons = []
|
92
|
+
layer_sizes.reverse.each do |layer_size|
|
93
|
+
layer = []
|
94
|
+
layer_size.times { layer << Neuron.new(@neurons.last, threshold, lambda, momentum) }
|
95
|
+
@neurons << layer
|
96
|
+
end
|
97
|
+
@neurons.reverse!
|
98
|
+
end
|
99
|
+
|
100
|
+
# Evaluates the input.
|
101
|
+
# E.g.
|
102
|
+
# net = Backpropagation.new([4, 3, 2])
|
103
|
+
# net.eval([25, 32.3, 12.8, 1.5])
|
104
|
+
# # => [0.83, 0.03]
|
105
|
+
def eval(input)
|
106
|
+
#check input size
|
107
|
+
if(input.length != @neurons.first.length)
|
108
|
+
raise "Wrong input dimension. Expected: #{@neurons.first.length}, received: #{input.length}"
|
109
|
+
end
|
110
|
+
#Present input
|
111
|
+
input.each_index do |input_index|
|
112
|
+
@neurons.first[input_index].propagate(input[input_index])
|
113
|
+
end
|
114
|
+
#Propagate
|
115
|
+
@neurons[1..-1].each do |layer|
|
116
|
+
layer.each {|neuron| neuron.propagate}
|
117
|
+
end
|
118
|
+
output = []
|
119
|
+
@neurons.last.each { |neuron| output << neuron.state }
|
120
|
+
return output
|
121
|
+
end
|
122
|
+
|
123
|
+
# This method trains the network using the backpropagation algorithm.
|
124
|
+
#
|
125
|
+
# input: Networks input
|
126
|
+
#
|
127
|
+
# output: Expected output for the given input.
|
128
|
+
#
|
129
|
+
# This method returns the network error (not an absolut amount,
|
130
|
+
# the difference between real output and the expected output)
|
131
|
+
def train(input, output)
|
132
|
+
#check output size
|
133
|
+
if(output.length != @neurons.last.length)
|
134
|
+
raise "Wrong output dimension. Expected: #{@neurons.last.length}, received: #{output.length}"
|
135
|
+
end
|
136
|
+
#Eval input
|
137
|
+
eval(input)
|
138
|
+
#Set expected output
|
139
|
+
output.each_index do |output_index|
|
140
|
+
@neurons.last[output_index].expected_output = output[output_index]
|
141
|
+
end
|
142
|
+
#Calculate error
|
143
|
+
@neurons.reverse.each do |layer|
|
144
|
+
layer.each {|neuron| neuron.calc_error}
|
145
|
+
end
|
146
|
+
#Change weight
|
147
|
+
@neurons.each do |layer|
|
148
|
+
layer.each {|neuron| neuron.change_weights }
|
149
|
+
end
|
150
|
+
#return net error
|
151
|
+
return @neurons.last.collect { |x| x.calc_error }
|
152
|
+
end
|
153
|
+
|
154
|
+
private
|
155
|
+
def print_weight
|
156
|
+
@neurons.each_index do |layer_index|
|
157
|
+
@neurons[layer_index].each_index do |neuron_index|
|
158
|
+
puts "L #{layer_index} N #{neuron_index} W #{@neurons[layer_index][neuron_index].w.inspect}"
|
159
|
+
end
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|
163
|
+
end
|
164
|
+
|
165
|
+
|
166
|
+
class Neuron
|
167
|
+
|
168
|
+
attr_accessor :state
|
169
|
+
attr_accessor :error
|
170
|
+
attr_accessor :expected_output
|
171
|
+
attr_accessor :w
|
172
|
+
attr_accessor :x
|
173
|
+
|
174
|
+
def initialize(childs, threshold, lambda, momentum)
|
175
|
+
#instance state
|
176
|
+
@w = nil
|
177
|
+
@childs = childs
|
178
|
+
@error = nil
|
179
|
+
@state = 0
|
180
|
+
@pushed = 0
|
181
|
+
@last_delta = 0
|
182
|
+
@x = 0
|
183
|
+
#Parameters
|
184
|
+
@lambda = lambda
|
185
|
+
@momentum = momentum
|
186
|
+
@threshold = threshold
|
187
|
+
#init w
|
188
|
+
if(childs)
|
189
|
+
@w = []
|
190
|
+
childs.each { @w << init_weight }
|
191
|
+
end
|
192
|
+
end
|
193
|
+
|
194
|
+
def push(x)
|
195
|
+
@pushed += x
|
196
|
+
end
|
197
|
+
|
198
|
+
def propagate(input = nil)
|
199
|
+
if(input)
|
200
|
+
input = input.to_f
|
201
|
+
@x = input
|
202
|
+
@state = input
|
203
|
+
@childs.each_index do |child_index|
|
204
|
+
@childs[child_index].push(input * @w[child_index])
|
205
|
+
end
|
206
|
+
else
|
207
|
+
@x = @pushed + @threshold
|
208
|
+
@pushed = 0
|
209
|
+
@state = Neuron.f(@x)
|
210
|
+
if @childs
|
211
|
+
@childs.each_index do |child_index|
|
212
|
+
@childs[child_index].push(@state * @w[child_index])
|
213
|
+
end
|
214
|
+
end
|
215
|
+
end
|
216
|
+
end
|
217
|
+
|
218
|
+
def calc_error
|
219
|
+
if(!@childs && @expected_output)
|
220
|
+
@error = (@expected_output - @state)
|
221
|
+
elsif(@childs)
|
222
|
+
@error = 0
|
223
|
+
@childs.each_index do |child_index|
|
224
|
+
@error += (@childs[child_index].error * @w[child_index])
|
225
|
+
end
|
226
|
+
end
|
227
|
+
end
|
228
|
+
|
229
|
+
def change_weights
|
230
|
+
return if !@childs
|
231
|
+
@childs.each_index do |child_index |
|
232
|
+
delta = @lambda * @childs[child_index].error * (@state) * Neuron.f_prime(@childs[child_index].x)
|
233
|
+
@w[child_index] += (delta + @momentum * @last_delta)
|
234
|
+
@last_delta = delta
|
235
|
+
end
|
236
|
+
end
|
237
|
+
|
238
|
+
# Propagation function.
|
239
|
+
# By default:
|
240
|
+
# f(x) = 1/(1 + e^(-x))
|
241
|
+
# You can override it with any derivable function.
|
242
|
+
# A usually usefull one is:
|
243
|
+
# f(x) = x.
|
244
|
+
# If you override this function, you will have to override
|
245
|
+
# f_prime too.
|
246
|
+
def self.f(x)
|
247
|
+
return 1/(1+Math.exp(-1*(x)))
|
248
|
+
end
|
249
|
+
|
250
|
+
# Derived function of the propagation function (self.f)
|
251
|
+
# By default:
|
252
|
+
# f_prime(x) = f(x)(1- f(x))
|
253
|
+
# If you override f(x) with:
|
254
|
+
# f(x) = x.
|
255
|
+
# Then you must override f_prime as:
|
256
|
+
# f_prime(x) = 1
|
257
|
+
def self.f_prime(x)
|
258
|
+
val = f(x)
|
259
|
+
return val*(1-val)
|
260
|
+
end
|
261
|
+
|
262
|
+
private
|
263
|
+
def init_weight
|
264
|
+
rand/4
|
265
|
+
end
|
266
|
+
|
267
|
+
end
|
268
|
+
|
269
|
+
end
|
270
|
+
|
271
|
+
end
|