nirvdrum-ai4r 1.9.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (150) hide show
  1. data/.gitignore +1 -0
  2. data/.rakeTasks +7 -0
  3. data/README.rdoc +56 -0
  4. data/Rakefile.rb +42 -0
  5. data/VERSION +1 -0
  6. data/ai4r.gemspec +221 -0
  7. data/change_log +49 -0
  8. data/examples/classifiers/id3_data.csv +121 -0
  9. data/examples/classifiers/id3_example.rb +29 -0
  10. data/examples/classifiers/naive_bayes_data.csv +11 -0
  11. data/examples/classifiers/naive_bayes_example.rb +16 -0
  12. data/examples/classifiers/results.txt +31 -0
  13. data/examples/genetic_algorithm/genetic_algorithm_example.rb +37 -0
  14. data/examples/genetic_algorithm/travel_cost.csv +16 -0
  15. data/examples/neural_network/backpropagation_example.rb +67 -0
  16. data/examples/neural_network/patterns_with_base_noise.rb +68 -0
  17. data/examples/neural_network/patterns_with_noise.rb +66 -0
  18. data/examples/neural_network/training_patterns.rb +68 -0
  19. data/examples/neural_network/xor_example.rb +35 -0
  20. data/examples/som/som_data.rb +156 -0
  21. data/examples/som/som_multi_node_example.rb +22 -0
  22. data/examples/som/som_single_example.rb +24 -0
  23. data/lib/ai4r.rb +32 -0
  24. data/lib/ai4r/classifiers/classifier.rb +59 -0
  25. data/lib/ai4r/classifiers/hyperpipes.rb +118 -0
  26. data/lib/ai4r/classifiers/id3.rb +326 -0
  27. data/lib/ai4r/classifiers/multilayer_perceptron.rb +135 -0
  28. data/lib/ai4r/classifiers/naive_bayes.rb +259 -0
  29. data/lib/ai4r/classifiers/one_r.rb +110 -0
  30. data/lib/ai4r/classifiers/prism.rb +197 -0
  31. data/lib/ai4r/classifiers/zero_r.rb +73 -0
  32. data/lib/ai4r/clusterers/average_linkage.rb +59 -0
  33. data/lib/ai4r/clusterers/bisecting_k_means.rb +93 -0
  34. data/lib/ai4r/clusterers/centroid_linkage.rb +66 -0
  35. data/lib/ai4r/clusterers/clusterer.rb +61 -0
  36. data/lib/ai4r/clusterers/complete_linkage.rb +67 -0
  37. data/lib/ai4r/clusterers/diana.rb +139 -0
  38. data/lib/ai4r/clusterers/k_means.rb +126 -0
  39. data/lib/ai4r/clusterers/median_linkage.rb +61 -0
  40. data/lib/ai4r/clusterers/single_linkage.rb +194 -0
  41. data/lib/ai4r/clusterers/ward_linkage.rb +64 -0
  42. data/lib/ai4r/clusterers/weighted_average_linkage.rb +61 -0
  43. data/lib/ai4r/data/data_set.rb +266 -0
  44. data/lib/ai4r/data/parameterizable.rb +64 -0
  45. data/lib/ai4r/data/proximity.rb +100 -0
  46. data/lib/ai4r/data/statistics.rb +77 -0
  47. data/lib/ai4r/experiment/classifier_evaluator.rb +95 -0
  48. data/lib/ai4r/genetic_algorithm/genetic_algorithm.rb +270 -0
  49. data/lib/ai4r/neural_network/backpropagation.rb +293 -0
  50. data/lib/ai4r/neural_network/hopfield.rb +149 -0
  51. data/lib/ai4r/som/layer.rb +68 -0
  52. data/lib/ai4r/som/node.rb +96 -0
  53. data/lib/ai4r/som/som.rb +155 -0
  54. data/lib/ai4r/som/two_phase_layer.rb +90 -0
  55. data/site/forrest.properties +152 -0
  56. data/site/forrest.properties.dispatcher.properties +25 -0
  57. data/site/forrest.properties.xml +29 -0
  58. data/site/src/documentation/README.txt +7 -0
  59. data/site/src/documentation/classes/CatalogManager.properties +62 -0
  60. data/site/src/documentation/content/locationmap.xml +72 -0
  61. data/site/src/documentation/content/xdocs/downloads.html +9 -0
  62. data/site/src/documentation/content/xdocs/geneticAlgorithms.xml +294 -0
  63. data/site/src/documentation/content/xdocs/index.xml +155 -0
  64. data/site/src/documentation/content/xdocs/machineLearning.xml +131 -0
  65. data/site/src/documentation/content/xdocs/neuralNetworks.xml +270 -0
  66. data/site/src/documentation/content/xdocs/site.xml +54 -0
  67. data/site/src/documentation/content/xdocs/sourceCode.xml +43 -0
  68. data/site/src/documentation/content/xdocs/tabs.xml +35 -0
  69. data/site/src/documentation/resources/images/ai4r-logo.png +0 -0
  70. data/site/src/documentation/resources/images/c.png +0 -0
  71. data/site/src/documentation/resources/images/c_wbn.png +0 -0
  72. data/site/src/documentation/resources/images/c_wn.png +0 -0
  73. data/site/src/documentation/resources/images/ellipse-2.svg +30 -0
  74. data/site/src/documentation/resources/images/ero.gif +0 -0
  75. data/site/src/documentation/resources/images/europe2.png +0 -0
  76. data/site/src/documentation/resources/images/europe3.png +0 -0
  77. data/site/src/documentation/resources/images/fitness.png +0 -0
  78. data/site/src/documentation/resources/images/genetic_algorithms_example.png +0 -0
  79. data/site/src/documentation/resources/images/icon-a.png +0 -0
  80. data/site/src/documentation/resources/images/icon-b.png +0 -0
  81. data/site/src/documentation/resources/images/icon.png +0 -0
  82. data/site/src/documentation/resources/images/jadeferret.png +0 -0
  83. data/site/src/documentation/resources/images/my_email.png +0 -0
  84. data/site/src/documentation/resources/images/neural_network_example.png +0 -0
  85. data/site/src/documentation/resources/images/project-logo.png +0 -0
  86. data/site/src/documentation/resources/images/rubyforge.png +0 -0
  87. data/site/src/documentation/resources/images/s.png +0 -0
  88. data/site/src/documentation/resources/images/s_wbn.png +0 -0
  89. data/site/src/documentation/resources/images/s_wn.png +0 -0
  90. data/site/src/documentation/resources/images/sigmoid.png +0 -0
  91. data/site/src/documentation/resources/images/sub-dir/icon-c.png +0 -0
  92. data/site/src/documentation/resources/images/t.png +0 -0
  93. data/site/src/documentation/resources/images/t_wbn.png +0 -0
  94. data/site/src/documentation/resources/images/t_wn.png +0 -0
  95. data/site/src/documentation/resources/schema/catalog.xcat +29 -0
  96. data/site/src/documentation/resources/schema/hello-v10.dtd +51 -0
  97. data/site/src/documentation/resources/schema/symbols-project-v10.ent +26 -0
  98. data/site/src/documentation/resources/stylesheets/hello2document.xsl +33 -0
  99. data/site/src/documentation/sitemap.xmap +66 -0
  100. data/site/src/documentation/skinconf.xml +418 -0
  101. data/site/src/documentation/translations/langcode.xml +29 -0
  102. data/site/src/documentation/translations/languages_de.xml +24 -0
  103. data/site/src/documentation/translations/languages_en.xml +24 -0
  104. data/site/src/documentation/translations/languages_es.xml +22 -0
  105. data/site/src/documentation/translations/languages_fr.xml +24 -0
  106. data/site/src/documentation/translations/languages_nl.xml +24 -0
  107. data/site/src/documentation/translations/menu.xml +33 -0
  108. data/site/src/documentation/translations/menu_af.xml +33 -0
  109. data/site/src/documentation/translations/menu_de.xml +33 -0
  110. data/site/src/documentation/translations/menu_es.xml +33 -0
  111. data/site/src/documentation/translations/menu_fr.xml +33 -0
  112. data/site/src/documentation/translations/menu_it.xml +33 -0
  113. data/site/src/documentation/translations/menu_nl.xml +33 -0
  114. data/site/src/documentation/translations/menu_no.xml +33 -0
  115. data/site/src/documentation/translations/menu_ru.xml +33 -0
  116. data/site/src/documentation/translations/menu_sk.xml +33 -0
  117. data/site/src/documentation/translations/tabs.xml +22 -0
  118. data/site/src/documentation/translations/tabs_de.xml +22 -0
  119. data/site/src/documentation/translations/tabs_es.xml +22 -0
  120. data/site/src/documentation/translations/tabs_fr.xml +22 -0
  121. data/site/src/documentation/translations/tabs_nl.xml +22 -0
  122. data/test/classifiers/hyperpipes_test.rb +84 -0
  123. data/test/classifiers/id3_test.rb +208 -0
  124. data/test/classifiers/multilayer_perceptron_test.rb +79 -0
  125. data/test/classifiers/naive_bayes_test.rb +43 -0
  126. data/test/classifiers/one_r_test.rb +62 -0
  127. data/test/classifiers/prism_test.rb +85 -0
  128. data/test/classifiers/zero_r_test.rb +50 -0
  129. data/test/clusterers/average_linkage_test.rb +51 -0
  130. data/test/clusterers/bisecting_k_means_test.rb +66 -0
  131. data/test/clusterers/centroid_linkage_test.rb +53 -0
  132. data/test/clusterers/complete_linkage_test.rb +57 -0
  133. data/test/clusterers/diana_test.rb +69 -0
  134. data/test/clusterers/k_means_test.rb +100 -0
  135. data/test/clusterers/median_linkage_test.rb +53 -0
  136. data/test/clusterers/single_linkage_test.rb +122 -0
  137. data/test/clusterers/ward_linkage_test.rb +53 -0
  138. data/test/clusterers/weighted_average_linkage_test.rb +53 -0
  139. data/test/data/data_set.csv +121 -0
  140. data/test/data/data_set_test.rb +96 -0
  141. data/test/data/proximity_test.rb +81 -0
  142. data/test/data/statistics_data_set.csv +5 -0
  143. data/test/data/statistics_test.rb +65 -0
  144. data/test/experiment/classifier_evaluator_test.rb +76 -0
  145. data/test/genetic_algorithm/chromosome_test.rb +58 -0
  146. data/test/genetic_algorithm/genetic_algorithm_test.rb +81 -0
  147. data/test/neural_network/backpropagation_test.rb +69 -0
  148. data/test/neural_network/hopfield_test.rb +72 -0
  149. data/test/som/som_test.rb +97 -0
  150. metadata +238 -0
@@ -0,0 +1,293 @@
1
+ # Author:: Sergio Fierens
2
+ # License:: MPL 1.1
3
+ # Project:: ai4r
4
+ # Url:: http://ai4r.rubyforge.org/
5
+ #
6
+ # You can redistribute it and/or modify it under the terms of
7
+ # the Mozilla Public License version 1.1 as published by the
8
+ # Mozilla Foundation at http://www.mozilla.org/MPL/MPL-1.1.txt
9
+
10
+ require File.dirname(__FILE__) + '/../data/parameterizable'
11
+
12
+ module Ai4r
13
+
14
+ # Artificial Neural Networks are mathematical or computational models based on
15
+ # biological neural networks.
16
+ #
17
+ # More about neural networks:
18
+ #
19
+ # * http://en.wikipedia.org/wiki/Artificial_neural_network
20
+ #
21
+ module NeuralNetwork
22
+
23
+ # = Introduction
24
+ #
25
+ # This is an implementation of a multilayer perceptron network, using
26
+ # the backpropagation algorithm for learning.
27
+ #
28
+ # Backpropagation is a supervised learning technique (described
29
+ # by Paul Werbos in 1974, and further developed by David E.
30
+ # Rumelhart, Geoffrey E. Hinton and Ronald J. Williams in 1986)
31
+ #
32
+ # = Features
33
+ #
34
+ # * Support for any network architecture (number of layers and neurons)
35
+ # * Configurable propagation function
36
+ # * Optional usage of bias
37
+ # * Configurable momentum
38
+ # * Configurable learning rate
39
+ # * Configurable initial weight function
40
+ # * 100% ruby code, no external dependency
41
+ #
42
+ # = Parameters
43
+ #
44
+ # Use class method get_parameters_info to obtain details on the algorithm
45
+ # parameters. Use set_parameters to set values for this parameters.
46
+ #
47
+ # * :disable_bias => If true, the alforithm will not use bias nodes.
48
+ # False by default.
49
+ # * :initial_weight_function => f(n, i, j) must return the initial
50
+ # weight for the conection between the node i in layer n, and node j in
51
+ # layer n+1. By default a random number in [-1, 1) range.
52
+ # * :propagation_function => By default:
53
+ # lambda { |x| 1/(1+Math.exp(-1*(x))) }
54
+ # * :derivative_propagation_function => Derivative of the propagation
55
+ # function, based on propagation function output.
56
+ # By default: lambda { |y| y*(1-y) }, where y=propagation_function(x)
57
+ # * :learning_rate => By default 0.25
58
+ # * :momentum => By default 0.1. Set this parameter to 0 to disable
59
+ # momentum
60
+ #
61
+ # = How to use it
62
+ #
63
+ # # Create the network with 4 inputs, 1 hidden layer with 3 neurons,
64
+ # # and 2 outputs
65
+ # net = Ai4r::NeuralNetwork::Backpropagation.new([4, 3, 2])
66
+ #
67
+ # # Train the network
68
+ # 1000.times do |i|
69
+ # net.train(example[i], result[i])
70
+ # end
71
+ #
72
+ # # Use it: Evaluate data with the trained network
73
+ # net.eval([12, 48, 12, 25])
74
+ # => [0.86, 0.01]
75
+ #
76
+ # More about multilayer perceptron neural networks and backpropagation:
77
+ #
78
+ # * http://en.wikipedia.org/wiki/Backpropagation
79
+ # * http://en.wikipedia.org/wiki/Multilayer_perceptron
80
+ #
81
+ # = About the project
82
+ # Author:: Sergio Fierens
83
+ # License:: MPL 1.1
84
+ # Url:: http://ai4r.rubyforge.org
85
+ class Backpropagation
86
+
87
+ include Ai4r::Data::Parameterizable
88
+
89
+ parameters_info :disable_bias => "If true, the alforithm will not use "+
90
+ "bias nodes. False by default.",
91
+ :initial_weight_function => "f(n, i, j) must return the initial "+
92
+ "weight for the conection between the node i in layer n, and "+
93
+ "node j in layer n+1. By default a random number in [-1, 1) range.",
94
+ :propagation_function => "By default: " +
95
+ "lambda { |x| 1/(1+Math.exp(-1*(x))) }",
96
+ :derivative_propagation_function => "Derivative of the propagation "+
97
+ "function, based on propagation function output. By default: " +
98
+ "lambda { |y| y*(1-y) }, where y=propagation_function(x)",
99
+ :learning_rate => "By default 0.25",
100
+ :momentum => "By default 0.1. Set this parameter to 0 to disable "+
101
+ "momentum."
102
+
103
+ attr_accessor :structure, :weights, :activation_nodes
104
+
105
+ # Creates a new network specifying the its architecture.
106
+ # E.g.
107
+ #
108
+ # net = Backpropagation.new([4, 3, 2]) # 4 inputs
109
+ # # 1 hidden layer with 3 neurons,
110
+ # # 2 outputs
111
+ # net = Backpropagation.new([2, 3, 3, 4]) # 2 inputs
112
+ # # 2 hidden layer with 3 neurons each,
113
+ # # 4 outputs
114
+ # net = Backpropagation.new([2, 1]) # 2 inputs
115
+ # # No hidden layer
116
+ # # 1 output
117
+ def initialize(network_structure)
118
+ @structure = network_structure
119
+ @initial_weight_function = lambda { |n, i, j| ((rand 2000)/1000.0) - 1}
120
+ @propagation_function = lambda { |x| 1/(1+Math.exp(-1*(x))) } #lambda { |x| Math.tanh(x) }
121
+ @derivative_propagation_function = lambda { |y| y*(1-y) } #lambda { |y| 1.0 - y**2 }
122
+ @disable_bias = false
123
+ @learning_rate = 0.25
124
+ @momentum = 0.1
125
+ end
126
+
127
+ # Evaluates the input.
128
+ # E.g.
129
+ # net = Backpropagation.new([4, 3, 2])
130
+ # net.eval([25, 32.3, 12.8, 1.5])
131
+ # # => [0.83, 0.03]
132
+ def eval(input_values)
133
+ check_input_dimension(input_values.length)
134
+ init_network if !@weights
135
+ feedforward(input_values)
136
+ return @activation_nodes.last.clone
137
+ end
138
+
139
+ # This method trains the network using the backpropagation algorithm.
140
+ #
141
+ # input: Networks input
142
+ #
143
+ # output: Expected output for the given input.
144
+ #
145
+ # This method returns the network error:
146
+ # => 0.5 * sum( (expected_value[i] - output_value[i])**2 )
147
+ def train(inputs, outputs)
148
+ eval(inputs)
149
+ backpropagate(outputs)
150
+ calculate_error(outputs)
151
+ end
152
+
153
+ # Initialize (or reset) activation nodes and weights, with the
154
+ # provided net structure and parameters.
155
+ def init_network
156
+ init_activation_nodes
157
+ init_weights
158
+ init_last_changes
159
+ return self
160
+ end
161
+
162
+ protected
163
+
164
+ # Propagate error backwards
165
+ def backpropagate(expected_output_values)
166
+ check_output_dimension(expected_output_values.length)
167
+ calculate_output_deltas(expected_output_values)
168
+ calculate_internal_deltas
169
+ update_weights
170
+ end
171
+
172
+ # Propagate values forward
173
+ def feedforward(input_values)
174
+ input_values.each_index do |input_index|
175
+ @activation_nodes.first[input_index] = input_values[input_index]
176
+ end
177
+ @weights.each_index do |n|
178
+ @structure[n+1].times do |j|
179
+ sum = 0.0
180
+ @activation_nodes[n].each_index do |i|
181
+ sum += (@activation_nodes[n][i] * @weights[n][i][j])
182
+ end
183
+ @activation_nodes[n+1][j] = @propagation_function.call(sum)
184
+ end
185
+ end
186
+ end
187
+
188
+ # Initialize neurons structure.
189
+ def init_activation_nodes
190
+ @activation_nodes = Array.new(@structure.length) do |n|
191
+ Array.new(@structure[n], 1.0)
192
+ end
193
+ if not disable_bias
194
+ @activation_nodes[0...-1].each {|layer| layer << 1.0 }
195
+ end
196
+ end
197
+
198
+ # Initialize the weight arrays using function specified with the
199
+ # initial_weight_function parameter
200
+ def init_weights
201
+ @weights = Array.new(@structure.length-1) do |i|
202
+ nodes_origin = @activation_nodes[i].length
203
+ nodes_target = @structure[i+1]
204
+ Array.new(nodes_origin) do |j|
205
+ Array.new(nodes_target) do |k|
206
+ @initial_weight_function.call(i, j, k)
207
+ end
208
+ end
209
+ end
210
+ end
211
+
212
+ # Momentum usage need to know how much a weight changed in the
213
+ # previous training. This method initialize the @last_changes
214
+ # structure with 0 values.
215
+ def init_last_changes
216
+ @last_changes = Array.new(@weights.length) do |w|
217
+ Array.new(@weights[w].length) do |i|
218
+ Array.new(@weights[w][i].length, 0.0)
219
+ end
220
+ end
221
+ end
222
+
223
+ # Calculate deltas for output layer
224
+ def calculate_output_deltas(expected_values)
225
+ output_values = @activation_nodes.last
226
+ output_deltas = []
227
+ output_values.each_index do |output_index|
228
+ error = expected_values[output_index] - output_values[output_index]
229
+ output_deltas << @derivative_propagation_function.call(
230
+ output_values[output_index]) * error
231
+ end
232
+ @deltas = [output_deltas]
233
+ end
234
+
235
+ # Calculate deltas for hidden layers
236
+ def calculate_internal_deltas
237
+ prev_deltas = @deltas.last
238
+ (@activation_nodes.length-2).downto(1) do |layer_index|
239
+ layer_deltas = []
240
+ @activation_nodes[layer_index].each_index do |j|
241
+ error = 0.0
242
+ @structure[layer_index+1].times do |k|
243
+ error += prev_deltas[k] * @weights[layer_index][j][k]
244
+ end
245
+ layer_deltas[j] = (@derivative_propagation_function.call(
246
+ @activation_nodes[layer_index][j]) * error)
247
+ end
248
+ prev_deltas = layer_deltas
249
+ @deltas.unshift(layer_deltas)
250
+ end
251
+ end
252
+
253
+ # Update weights after @deltas have been calculated.
254
+ def update_weights
255
+ (@weights.length-1).downto(0) do |n|
256
+ @weights[n].each_index do |i|
257
+ @weights[n][i].each_index do |j|
258
+ change = @deltas[n][j]*@activation_nodes[n][i]
259
+ @weights[n][i][j] += ( learning_rate * change +
260
+ momentum * @last_changes[n][i][j])
261
+ @last_changes[n][i][j] = change
262
+ end
263
+ end
264
+ end
265
+ end
266
+
267
+ # Calculate quadratic error for a expected output value
268
+ # Error = 0.5 * sum( (expected_value[i] - output_value[i])**2 )
269
+ def calculate_error(expected_output)
270
+ output_values = @activation_nodes.last
271
+ error = 0.0
272
+ expected_output.each_index do |output_index|
273
+ error +=
274
+ 0.5*(output_values[output_index]-expected_output[output_index])**2
275
+ end
276
+ return error
277
+ end
278
+
279
+ def check_input_dimension(inputs)
280
+ raise ArgumentError, "Wrong number of inputs. " +
281
+ "Expected: #{@structure.first}, " +
282
+ "received: #{inputs}." if inputs!=@structure.first
283
+ end
284
+
285
+ def check_output_dimension(outputs)
286
+ raise ArgumentError, "Wrong number of outputs. " +
287
+ "Expected: #{@structure.last}, " +
288
+ "received: #{outputs}." if outputs!=@structure.last
289
+ end
290
+
291
+ end
292
+ end
293
+ end
@@ -0,0 +1,149 @@
1
+ # Author:: Sergio Fierens
2
+ # License:: MPL 1.1
3
+ # Project:: ai4r
4
+ # Url:: http://ai4r.rubyforge.org/
5
+ #
6
+ # You can redistribute it and/or modify it under the terms of
7
+ # the Mozilla Public License version 1.1 as published by the
8
+ # Mozilla Foundation at http://www.mozilla.org/MPL/MPL-1.1.txt
9
+
10
+ require File.dirname(__FILE__) + '/../data/parameterizable'
11
+
12
+ module Ai4r
13
+
14
+ module NeuralNetwork
15
+
16
+ # = Hopfield Net =
17
+ #
18
+ # A Hopfield Network is a recurrent Artificial Neural Network.
19
+ # Hopfield nets are able to memorize a set of patterns, and then evaluate
20
+ # an input, returning the most similar stored pattern (although
21
+ # convergence to one of the stored patterns is not guaranteed).
22
+ # Hopfield nets are great to deal with input noise. If a system accepts a
23
+ # discrete set of inputs, but inputs are subject to noise, you can use a
24
+ # Hopfield net to eliminate noise and identified the given input.
25
+ #
26
+ # = How to Use =
27
+ #
28
+ # data_set = Ai4r::Data::DataSet.new :data_items => array_of_patterns
29
+ # net = Ai4r::NeuralNetworks::Hopfield.new.train data_set
30
+ # net.eval input
31
+ # => one of the stored patterns in array_of_patterns
32
+ class Hopfield
33
+
34
+ include Ai4r::Data::Parameterizable
35
+
36
+ attr_reader :weights, :nodes
37
+
38
+ parameters_info :eval_iterations => "The network will run for a maximum "+
39
+ "of 'eval_iterations' iterations while evaluating an input. 500 by " +
40
+ "default.",
41
+ :active_node_value => "Default: 1",
42
+ :inactive_node_value => "Default: -1",
43
+ :threshold => "Default: 0"
44
+
45
+ def initialize
46
+ @eval_iterations = 500
47
+ @active_node_value = 1
48
+ @inactive_node_value = -1
49
+ @threshold = 0
50
+ end
51
+
52
+ # Prepares the network to memorize the given data set.
53
+ # Future calls to eval (should) return one of the memorized data items.
54
+ # A Hopfield network converges to a local minimum, but converge to one
55
+ # of the "memorized" patterns is not guaranteed.
56
+ def train(data_set)
57
+ @data_set = data_set
58
+ initialize_nodes(@data_set)
59
+ initialize_weights(@data_set)
60
+ return self
61
+ end
62
+
63
+ # You can use run instead of eval to propagate values step by step.
64
+ # With this you can verify the progress of the network output with
65
+ # each step.
66
+ #
67
+ # E.g.:
68
+ # pattern = input
69
+ # 100.times do
70
+ # pattern = net.run(pattern)
71
+ # puts pattern.inspect
72
+ # end
73
+ def run(input)
74
+ set_input(input)
75
+ propagate
76
+ return @nodes
77
+ end
78
+
79
+ # Propagates the input until the network returns one of the memorized
80
+ # patterns, or a maximum of "eval_iterations" times.
81
+ def eval(input)
82
+ set_input(input)
83
+ @eval_iterations.times do
84
+ propagate
85
+ break if @data_set.data_items.include?(@nodes)
86
+ end
87
+ return @nodes
88
+ end
89
+
90
+ protected
91
+ # Set all nodes state to the given input.
92
+ # inputs parameter must have the same dimension as nodes
93
+ def set_input(inputs)
94
+ raise ArgumentError unless inputs.length == @nodes.length
95
+ inputs.each_with_index { |input, i| @nodes[i] = input}
96
+ end
97
+
98
+ # Select a single node randomly and propagate its state to all other nodes
99
+ def propagate
100
+ sum = 0
101
+ i = (rand * @nodes.length).floor
102
+ @nodes.each_with_index {|node, j| sum += read_weight(i,j)*node }
103
+ @nodes[i] = (sum > @threshold) ? @active_node_value : @inactive_node_value
104
+ end
105
+
106
+ # Initialize all nodes with "inactive" state.
107
+ def initialize_nodes(data_set)
108
+ @nodes = Array.new(data_set.data_items.first.length,
109
+ @inactive_node_value)
110
+ end
111
+
112
+ # Create a partial weigth matrix:
113
+ # [
114
+ # [w(1,0)],
115
+ # [w(2,0)], [w(2,1)],
116
+ # [w(3,0)], [w(3,1)], [w(3,2)],
117
+ # ...
118
+ # [w(n-1,0)], [w(n-1,1)], [w(n-1,2)], ... , [w(n-1,n-2)]
119
+ # ]
120
+ # where n is the number of nodes.
121
+ #
122
+ # We are saving memory here, as:
123
+ #
124
+ # * w[i][i] = 0 (no node connects with itself)
125
+ # * w[i][j] = w[j][i] (weigths are symmetric)
126
+ #
127
+ # Use read_weight(i,j) to find out weight between node i and j
128
+ def initialize_weights(data_set)
129
+ @weights = Array.new(@nodes.length-1) {|l| Array.new(l+1)}
130
+ @nodes.each_index do |i|
131
+ i.times do |j|
132
+ @weights[i-1][j] = data_set.data_items.inject(0) { |sum, item| sum+= item[i]*item[j] }
133
+ end
134
+ end
135
+ end
136
+
137
+ # read_weight(i,j) reads the weigth matrix and returns weight between
138
+ # node i and j
139
+ def read_weight(index_a, index_b)
140
+ return 0 if index_a == index_b
141
+ index_a, index_b = index_b, index_a if index_b > index_a
142
+ return @weights[index_a-1][index_b]
143
+ end
144
+
145
+ end
146
+
147
+ end
148
+
149
+ end
@@ -0,0 +1,68 @@
1
+ # Author:: Thomas Kern
2
+ # License:: MPL 1.1
3
+ # Project:: ai4r
4
+ # Url:: http://ai4r.rubyforge.org/
5
+ #
6
+ # You can redistribute it and/or modify it under the terms of
7
+ # the Mozilla Public License version 1.1 as published by the
8
+ # Mozilla Foundation at http://www.mozilla.org/MPL/MPL-1.1.txt
9
+
10
+ require File.dirname(__FILE__) + '/../data/parameterizable'
11
+
12
+ module Ai4r
13
+
14
+ module Som
15
+
16
+ # responsible for the implementation of the algorithm's decays
17
+ # currently has methods for the decay of the radius, influence and learning rate.
18
+ # Has only one phase, which ends after the number of epochs is passed by the Som-class.
19
+ #
20
+ # = Parameters
21
+ # * nodes => number of nodes in the SOM (nodes x nodes). Has to be the same number
22
+ # you pass to the SOM. Has to be an integer
23
+ # * radius => the initial radius for the neighborhood
24
+ # * epochs => number of epochs the algorithm runs, has to be an integer. By default it is set to 100
25
+ # * learning_rate => sets the initial learning rate
26
+ class Layer
27
+
28
+ include Ai4r::Data::Parameterizable
29
+
30
+ parameters_info :nodes => "number of nodes, has to be equal to the som",
31
+ :epochs => "number of epochs the algorithm has to run",
32
+ :radius => "sets the initial neighborhoud radius"
33
+
34
+ def initialize(nodes, radius, epochs = 100, learning_rate = 0.7)
35
+ raise("Too few nodes") if nodes < 3
36
+
37
+ @nodes = nodes
38
+ @epochs = epochs
39
+ @radius = radius
40
+ @time_for_epoch = @epochs / Math.log(nodes / 4.0)
41
+ @time_for_epoch = @epochs + 1.0 if @time_for_epoch < @epochs
42
+
43
+ @initial_learning_rate = learning_rate
44
+ end
45
+
46
+ # calculates the influnce decay for a certain distance and the current radius
47
+ # of the epoch
48
+ def influence_decay(distance, radius)
49
+ Math.exp(- (distance.to_f**2 / 2.0 / radius.to_f**2))
50
+ end
51
+
52
+ # calculates the radius decay for the current epoch. Uses @time_for_epoch
53
+ # which has to be higher than the number of epochs, otherwise the decay will be - Infinity
54
+ def radius_decay(epoch)
55
+ (@radius * ( 1 - epoch/ @time_for_epoch)).round
56
+ end
57
+
58
+ # calculates the learning rate decay. uses @time_for_epoch again and same rule applies:
59
+ # @time_for_epoch has to be higher than the number of epochs, otherwise the decay will be - Infinity
60
+ def learning_rate_decay(epoch)
61
+ @initial_learning_rate * ( 1 - epoch / @time_for_epoch)
62
+ end
63
+
64
+ end
65
+
66
+ end
67
+
68
+ end