ruby-fann 0.7.1

Sign up to get free protection for your applications and to get access to all the features.
data/History.txt ADDED
@@ -0,0 +1,4 @@
1
+ == 0.0.1 2007-12-18
2
+
3
+ * 1 major enhancement:
4
+ * Initial release
data/License.txt ADDED
@@ -0,0 +1,20 @@
1
+ Copyright (c) 2007 FIXME full name
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining
4
+ a copy of this software and associated documentation files (the
5
+ "Software"), to deal in the Software without restriction, including
6
+ without limitation the rights to use, copy, modify, merge, publish,
7
+ distribute, sublicense, and/or sell copies of the Software, and to
8
+ permit persons to whom the Software is furnished to do so, subject to
9
+ the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be
12
+ included in all copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/Manifest.txt ADDED
@@ -0,0 +1,28 @@
1
+ History.txt
2
+ License.txt
3
+ Manifest.txt
4
+ README.txt
5
+ Rakefile
6
+ config/hoe.rb
7
+ config/requirements.rb
8
+ ext/ruby_fann/extconf.rb
9
+ ext/ruby_fann/fann_augment.h
10
+ ext/ruby_fann/neural_network.c
11
+ lib/ruby_fann.rb
12
+ lib/ruby_fann/version.rb
13
+ lib/ruby_fann/neurotica.rb
14
+ log/debug.log
15
+ script/destroy
16
+ script/generate
17
+ script/txt2html
18
+ setup.rb
19
+ tasks/deployment.rake
20
+ tasks/environment.rake
21
+ tasks/website.rake
22
+ test/test_helper.rb
23
+ test/test_ruby_fann.rb
24
+ website/index.html
25
+ website/index.txt
26
+ website/javascripts/rounded_corners_lite.inc.js
27
+ website/stylesheets/screen.css
28
+ website/template.rhtml
data/README.txt ADDED
@@ -0,0 +1 @@
1
+ README
data/Rakefile ADDED
@@ -0,0 +1,4 @@
1
+ require 'config/requirements'
2
+ require 'config/hoe' # setup Hoe + all gem configuration
3
+
4
+ Dir['tasks/**/*.rake'].each { |rake| load rake }
data/config/hoe.rb ADDED
@@ -0,0 +1,71 @@
1
+ require 'ruby_fann/version'
2
+
3
+ AUTHOR = 'Steven Miers' # can also be an array of Authors
4
+ EMAIL = "steven@7bpeople.com"
5
+ DESCRIPTION = "Bindings to use FANN from within ruby/rails environment."
6
+ GEM_NAME = 'ruby-fann' # what ppl will type to install your gem
7
+ RUBYFORGE_PROJECT = 'ruby-fann' # The unix name for your project
8
+ HOMEPATH = "http://#{RUBYFORGE_PROJECT}.rubyforge.org"
9
+ DOWNLOAD_PATH = "http://rubyforge.org/projects/#{RUBYFORGE_PROJECT}"
10
+
11
+ @config_file = "~/.rubyforge/user-config.yml"
12
+ @config = nil
13
+ RUBYFORGE_USERNAME = "unknown"
14
+ def rubyforge_username
15
+ unless @config
16
+ begin
17
+ @config = YAML.load(File.read(File.expand_path(@config_file)))
18
+ rescue
19
+ puts <<-EOS
20
+ ERROR: No rubyforge config file found: #{@config_file}
21
+ Run 'rubyforge setup' to prepare your env for access to Rubyforge
22
+ - See http://newgem.rubyforge.org/rubyforge.html for more details
23
+ EOS
24
+ exit
25
+ end
26
+ end
27
+ RUBYFORGE_USERNAME.replace @config["username"]
28
+ end
29
+
30
+
31
+ REV = nil
32
+ # UNCOMMENT IF REQUIRED:
33
+ # REV = `svn info`.each {|line| if line =~ /^Revision:/ then k,v = line.split(': '); break v.chomp; else next; end} rescue nil
34
+ VERS = RubyFann::VERSION::STRING + (REV ? ".#{REV}" : "")
35
+ RDOC_OPTS = ['--quiet', '--title', 'ruby_fann documentation',
36
+ "--opname", "index.html",
37
+ "--line-numbers",
38
+ "--main", "README",
39
+ "--inline-source"]
40
+
41
+ class Hoe
42
+ def extra_deps
43
+ @extra_deps.reject! { |x| Array(x).first == 'hoe' }
44
+ @extra_deps
45
+ end
46
+ end
47
+
48
+ # Generate all the Rake tasks
49
+ # Run 'rake -T' to see list of generated tasks (from gem root directory)
50
+ hoe = Hoe.new(GEM_NAME, VERS) do |p|
51
+ p.author = AUTHOR
52
+ p.description = DESCRIPTION
53
+ p.email = EMAIL
54
+ p.summary = DESCRIPTION
55
+ p.url = HOMEPATH
56
+ p.rubyforge_name = RUBYFORGE_PROJECT if RUBYFORGE_PROJECT
57
+ p.test_globs = ["test/**/test_*.rb"]
58
+ p.clean_globs |= ['**/.*.sw?', '*.gem', '.config', '**/.DS_Store'] #An array of file patterns to delete on clean.
59
+
60
+ # == Optional
61
+ p.changes = p.paragraphs_of("History.txt", 0..1).join("\n\n")
62
+ #p.extra_deps = [] # An array of rubygem dependencies [name, version], e.g. [ ['active_support', '>= 1.3.1'] ]
63
+
64
+ #p.spec_extras = {} # A hash of extra values to set in the gemspec.
65
+
66
+ end
67
+
68
+ CHANGES = hoe.paragraphs_of('History.txt', 0..1).join("\\n\\n")
69
+ PATH = (RUBYFORGE_PROJECT == GEM_NAME) ? RUBYFORGE_PROJECT : "#{RUBYFORGE_PROJECT}/#{GEM_NAME}"
70
+ hoe.remote_rdoc_dir = File.join(PATH.gsub(/^#{RUBYFORGE_PROJECT}\/?/,''), 'rdoc')
71
+ hoe.rsync_args = '-av --delete --ignore-errors'
@@ -0,0 +1,17 @@
1
+ require 'fileutils'
2
+ include FileUtils
3
+
4
+ require 'rubygems'
5
+ %w[rake hoe newgem rubigen].each do |req_gem|
6
+ begin
7
+ require req_gem
8
+ rescue LoadError
9
+ puts "This Rakefile requires the '#{req_gem}' RubyGem."
10
+ puts "Installation: gem install #{req_gem} -y"
11
+ exit
12
+ end
13
+ end
14
+
15
+ $:.unshift(File.join(File.dirname(__FILE__), %w[.. lib]))
16
+
17
+ require 'ruby_fann'
@@ -0,0 +1,5 @@
1
+ require 'mkmf'
2
+ dir_config("ruby_fann")
3
+ find_library("doublefann", "fann_create_standard", "/usr/local/lib")
4
+ #create_makefile("ruby-fann")
5
+ create_makefile("neural_network")
@@ -0,0 +1,110 @@
1
+
2
+ FANN_EXTERNAL struct fann_train_data * FANN_API fann_create_train_from_rb_ary2(
3
+ unsigned int num_data,
4
+ unsigned int num_input,
5
+ unsigned int num_output)
6
+ {
7
+ return 0;
8
+ }
9
+
10
+ /*
11
+ * Copied from fann_create_train_from_callback/file & modified to ease
12
+ * allocating from ruby arrays:
13
+ */
14
+ FANN_EXTERNAL struct fann_train_data * FANN_API fann_create_train_from_rb_ary(
15
+ VALUE inputs,
16
+ VALUE outputs
17
+ )
18
+ {
19
+ unsigned int i, j;
20
+ fann_type *data_input, *data_output;
21
+ struct fann_train_data *data = (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
22
+ unsigned int num_input = RARRAY(RARRAY(inputs)->ptr[0])->len;
23
+ unsigned int num_output =RARRAY(RARRAY(outputs)->ptr[0])->len;
24
+ unsigned int num_data = RARRAY(inputs)->len;
25
+
26
+ if(data == NULL) {
27
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
28
+ return NULL;
29
+ }
30
+
31
+ fann_init_error_data((struct fann_error *) data);
32
+
33
+ data->num_data = num_data;
34
+ data->num_input = num_input;
35
+ data->num_output = num_output;
36
+
37
+ data->input = (fann_type **) calloc(num_data, sizeof(fann_type *));
38
+ if(data->input == NULL)
39
+ {
40
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
41
+ fann_destroy_train(data);
42
+ return NULL;
43
+ }
44
+
45
+ data->output = (fann_type **) calloc(num_data, sizeof(fann_type *));
46
+ if(data->output == NULL)
47
+ {
48
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
49
+ fann_destroy_train(data);
50
+ return NULL;
51
+ }
52
+
53
+ data_input = (fann_type *) calloc(num_input * num_data, sizeof(fann_type));
54
+ if(data_input == NULL)
55
+ {
56
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
57
+ fann_destroy_train(data);
58
+ return NULL;
59
+ }
60
+
61
+ data_output = (fann_type *) calloc(num_output * num_data, sizeof(fann_type));
62
+ if(data_output == NULL)
63
+ {
64
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
65
+ fann_destroy_train(data);
66
+ return NULL;
67
+ }
68
+
69
+ VALUE inputs_i, outputs_i;
70
+ for(i = 0; i != num_data; i++)
71
+ {
72
+ data->input[i] = data_input;
73
+ data_input += num_input;
74
+
75
+ inputs_i = RARRAY(inputs)->ptr[i];
76
+ outputs_i = RARRAY(outputs)->ptr[i];
77
+
78
+ if(RARRAY(inputs_i)->len != num_input)
79
+ {
80
+ rb_raise (
81
+ rb_eRuntimeError,
82
+ "Number of inputs at [%d] is inconsistent: (%d != %d)",
83
+ i, RARRAY(inputs_i)->len, num_input);
84
+ }
85
+
86
+ if(RARRAY(outputs_i)->len != num_output)
87
+ {
88
+ rb_raise (
89
+ rb_eRuntimeError,
90
+ "Number of outputs at [%d] is inconsistent: (%d != %d)",
91
+ i, RARRAY(outputs_i)->len, num_output);
92
+ }
93
+
94
+
95
+ for(j = 0; j != num_input; j++)
96
+ {
97
+ data->input[i][j]=NUM2DBL(RARRAY(inputs_i)->ptr[j]);
98
+ }
99
+
100
+ data->output[i] = data_output;
101
+ data_output += num_output;
102
+
103
+ for(j = 0; j != num_output; j++)
104
+ {
105
+ data->output[i][j]=NUM2DBL(RARRAY(outputs_i)->ptr[j]);
106
+ }
107
+ }
108
+
109
+ return data;
110
+ }
@@ -0,0 +1,1499 @@
1
+ #include "ruby.h"
2
+ // #include "fann.h"
3
+ #include "doublefann.h"
4
+ #include "fann_data.h"
5
+ #include "fann_augment.h"
6
+
7
+ static VALUE m_rb_fann_module;
8
+ static VALUE m_rb_fann_standard_class;
9
+ static VALUE m_rb_fann_shortcut_class;
10
+ static VALUE m_rb_fann_train_data_class;
11
+
12
+ #define RETURN_FANN_INT(fn) \
13
+ struct fann* f; \
14
+ Data_Get_Struct (self, struct fann, f); \
15
+ return INT2NUM(fn(f));
16
+
17
+ #define SET_FANN_INT(attr_name, fann_fn) \
18
+ Check_Type(attr_name, T_FIXNUM); \
19
+ struct fann* f; \
20
+ Data_Get_Struct(self, struct fann, f); \
21
+ fann_fn(f, NUM2INT(attr_name)); \
22
+
23
+ #define RETURN_FANN_UINT(fn) \
24
+ struct fann* f; \
25
+ Data_Get_Struct (self, struct fann, f); \
26
+ return UINT2NUM(fn(f));
27
+
28
+ #define SET_FANN_UINT(attr_name, fann_fn) \
29
+ Check_Type(attr_name, T_FIXNUM); \
30
+ struct fann* f; \
31
+ Data_Get_Struct(self, struct fann, f); \
32
+ fann_fn(f, NUM2UINT(attr_name)); \
33
+
34
+ // Converts float return values to a double with same precision, avoids floating point errors.
35
+ #define RETURN_FANN_FLT(fn) \
36
+ struct fann* f; \
37
+ Data_Get_Struct (self, struct fann, f); \
38
+ char buffy[20]; \
39
+ sprintf(buffy, "%0.6g", fn(f)); \
40
+ return rb_float_new(atof(buffy));
41
+
42
+ #define SET_FANN_FLT(attr_name, fann_fn) \
43
+ Check_Type(attr_name, T_FLOAT); \
44
+ struct fann* f; \
45
+ Data_Get_Struct(self, struct fann, f); \
46
+ fann_fn(f, NUM2DBL(attr_name)); \
47
+
48
+ #define RETURN_FANN_DBL(fn) \
49
+ struct fann* f; \
50
+ Data_Get_Struct (self, struct fann, f); \
51
+ return rb_float_new(fn(f));
52
+
53
+ #define SET_FANN_DBL SET_FANN_FLT
54
+
55
+ // Convert ruby symbol to corresponding FANN enum type for activation function:
56
+ enum fann_activationfunc_enum sym_to_activation_function(VALUE activation_func)
57
+ {
58
+ ID id=SYM2ID(activation_func);
59
+ enum fann_activationfunc_enum activation_function;
60
+ if(id==rb_intern("linear")) {
61
+ activation_function = FANN_LINEAR;
62
+ } else if(id==rb_intern("threshold")) {
63
+ activation_function = FANN_THRESHOLD;
64
+ } else if(id==rb_intern("threshold_symmetric")) {
65
+ activation_function = FANN_THRESHOLD_SYMMETRIC;
66
+ } else if(id==rb_intern("sigmoid")) {
67
+ activation_function = FANN_SIGMOID;
68
+ } else if(id==rb_intern("sigmoid_stepwise")) {
69
+ activation_function = FANN_SIGMOID_STEPWISE;
70
+ } else if(id==rb_intern("sigmoid_symmetric")) {
71
+ activation_function = FANN_SIGMOID_SYMMETRIC;
72
+ } else if(id==rb_intern("sigmoid_symmetric_stepwise")) {
73
+ activation_function = FANN_SIGMOID_SYMMETRIC_STEPWISE;
74
+ } else if(id==rb_intern("gaussian")) {
75
+ activation_function = FANN_GAUSSIAN;
76
+ } else if(id==rb_intern("gaussian_symmetric")) {
77
+ activation_function = FANN_GAUSSIAN_SYMMETRIC;
78
+ } else if(id==rb_intern("gaussian_stepwise")) {
79
+ activation_function = FANN_GAUSSIAN_STEPWISE;
80
+ } else if(id==rb_intern("elliot")) {
81
+ activation_function = FANN_ELLIOT;
82
+ } else if(id==rb_intern("elliot_symmetric")) {
83
+ activation_function = FANN_ELLIOT_SYMMETRIC;
84
+ } else if(id==rb_intern("linear_piece")) {
85
+ activation_function = FANN_LINEAR_PIECE;
86
+ } else if(id==rb_intern("linear_piece_symmetric")) {
87
+ activation_function = FANN_LINEAR_PIECE_SYMMETRIC;
88
+ } else if(id==rb_intern("sin_symmetric")) {
89
+ activation_function = FANN_SIN_SYMMETRIC;
90
+ } else if(id==rb_intern("cos_symmetric")) {
91
+ activation_function = FANN_COS_SYMMETRIC;
92
+ } else if(id==rb_intern("sin")) {
93
+ activation_function = FANN_SIN;
94
+ } else if(id==rb_intern("cos")) {
95
+ activation_function = FANN_COS;
96
+ } else {
97
+ rb_raise(rb_eRuntimeError, "Unrecognized activation function: [%s]", rb_id2name(SYM2ID(activation_func)));
98
+ }
99
+ return activation_function;
100
+ }
101
+
102
+ // Convert FANN enum type for activation function to corresponding ruby symbol:
103
+ VALUE activation_function_to_sym(enum fann_activationfunc_enum fn)
104
+ {
105
+ VALUE activation_function;
106
+
107
+ if(fn==FANN_LINEAR) {
108
+ activation_function = ID2SYM(rb_intern("linear"));
109
+ } else if(fn==FANN_THRESHOLD) {
110
+ activation_function = ID2SYM(rb_intern("threshold"));
111
+ } else if(fn==FANN_THRESHOLD_SYMMETRIC) {
112
+ activation_function = ID2SYM(rb_intern("threshold_symmetric"));
113
+ } else if(fn==FANN_SIGMOID) {
114
+ activation_function = ID2SYM(rb_intern("sigmoid"));
115
+ } else if(fn==FANN_SIGMOID_STEPWISE) {
116
+ activation_function = ID2SYM(rb_intern("sigmoid_stepwise"));
117
+ } else if(fn==FANN_SIGMOID_SYMMETRIC) {
118
+ activation_function = ID2SYM(rb_intern("sigmoid_symmetric"));
119
+ } else if(fn==FANN_SIGMOID_SYMMETRIC_STEPWISE) {
120
+ activation_function = ID2SYM(rb_intern("sigmoid_symmetric_stepwise"));
121
+ } else if(fn==FANN_GAUSSIAN) {
122
+ activation_function = ID2SYM(rb_intern("gaussian"));
123
+ } else if(fn==FANN_GAUSSIAN_SYMMETRIC) {
124
+ activation_function = ID2SYM(rb_intern("gaussian_symmetric"));
125
+ } else if(fn==FANN_GAUSSIAN_STEPWISE) {
126
+ activation_function = ID2SYM(rb_intern("gaussian_stepwise"));
127
+ } else if(fn==FANN_ELLIOT) {
128
+ activation_function = ID2SYM(rb_intern("elliot"));
129
+ } else if(fn==FANN_ELLIOT_SYMMETRIC) {
130
+ activation_function = ID2SYM(rb_intern("elliot_symmetric"));
131
+ } else if(fn==FANN_LINEAR_PIECE) {
132
+ activation_function = ID2SYM(rb_intern("linear_piece"));
133
+ } else if(fn==FANN_LINEAR_PIECE_SYMMETRIC) {
134
+ activation_function = ID2SYM(rb_intern("linear_piece_symmetric"));
135
+ } else if(fn==FANN_SIN_SYMMETRIC) {
136
+ activation_function = ID2SYM(rb_intern("sin_symmetric"));
137
+ } else if(fn==FANN_COS_SYMMETRIC) {
138
+ activation_function = ID2SYM(rb_intern("cos_symmetric"));
139
+ } else if(fn==FANN_SIN) {
140
+ activation_function = ID2SYM(rb_intern("sin"));
141
+ } else if(fn==FANN_COS) {
142
+ activation_function = ID2SYM(rb_intern("cos"));
143
+ } else {
144
+ rb_raise(rb_eRuntimeError, "Unrecognized activation function: [%d]", fn);
145
+ }
146
+ return activation_function;
147
+ }
148
+
149
+
150
+ // Unused for now:
151
+ static void fann_mark (struct fann* ann){}
152
+
153
+ // #define DEBUG 1
154
+
155
+ // Free memory associated with FANN:
156
+ static void fann_free (struct fann* ann)
157
+ {
158
+ fann_destroy(ann);
159
+ printf("Destroyed FANN network [%d].\n", ann);
160
+ }
161
+
162
+ // Free memory associated with FANN Training data:
163
+ static void fann_training_data_free (struct fann_train_data* train_data)
164
+ {
165
+ fann_destroy_train(train_data);
166
+ printf("Destroyed Training data [%d].\n", train_data);
167
+ }
168
+
169
+ // Create wrapper, but don't allocate anything...do that in
170
+ // initialize, so we can construct with args:
171
+ static VALUE fann_allocate (VALUE klass)
172
+ {
173
+ return Data_Wrap_Struct (klass, fann_mark, fann_free, 0);
174
+ }
175
+
176
+ // Create wrapper, but don't allocate annything...do that in
177
+ // initialize, so we can construct with args:
178
+ static VALUE fann_training_data_allocate (VALUE klass)
179
+ {
180
+ return Data_Wrap_Struct (klass, fann_mark, fann_training_data_free, 0);
181
+ }
182
+
183
+
184
+ /** Initialization routine for both standard, shortcut & filename forms of FANN:
185
+ Standard Initialization:
186
+ RubyFann::Standard.new(:num_inputs=>1, :hidden_neurons=>[3, 4, 3, 4], :num_outputs=>1)
187
+
188
+ Shortcut Initialization (e.g., for use in cascade training):
189
+ RubyFann::Shortcut.new(:num_inputs=>5, :num_outputs=>1)
190
+
191
+ File Initialization:
192
+ RubyFann::Standard.new(:filename=>'xor_float.net') */
193
+ static VALUE fann_initialize(VALUE self, VALUE hash)
194
+ {
195
+ // Get args:
196
+ VALUE filename = rb_hash_aref(hash, ID2SYM(rb_intern("filename")));
197
+ VALUE num_inputs = rb_hash_aref(hash, ID2SYM(rb_intern("num_inputs")));
198
+ VALUE num_outputs = rb_hash_aref(hash, ID2SYM(rb_intern("num_outputs")));
199
+ VALUE hidden_neurons = rb_hash_aref(hash, ID2SYM(rb_intern("hidden_neurons")));
200
+
201
+ struct fann* ann;
202
+ if (TYPE(filename)==T_STRING)
203
+ {
204
+ // Initialize with file:
205
+ // train_data = fann_read_train_from_file(StringValuePtr(filename));
206
+ // DATA_PTR(self) = train_data;
207
+ ann = fann_create_from_file(StringValuePtr(filename));
208
+ printf("Created RubyFann::Standard [%d] from file [%s].\n", ann, StringValuePtr(filename));
209
+ }
210
+ else if(rb_obj_is_kind_of(self, m_rb_fann_shortcut_class))
211
+ {
212
+ // Initialize as shortcut, suitable for cascade training:
213
+ //ann = fann_create_shortcut_array(num_layers, layers);
214
+ Check_Type(num_inputs, T_FIXNUM);
215
+ Check_Type(num_outputs, T_FIXNUM);
216
+
217
+ ann = fann_create_shortcut(2, NUM2INT(num_inputs), NUM2INT(num_outputs));
218
+ printf("Created RubyFann::Shortcut [%d].\n", ann);
219
+ }
220
+ else
221
+ {
222
+ // Initialize as standard:
223
+ Check_Type(num_inputs, T_FIXNUM);
224
+ Check_Type(hidden_neurons, T_ARRAY);
225
+ Check_Type(num_outputs, T_FIXNUM);
226
+
227
+ // Initialize layers:
228
+ unsigned int num_layers=RARRAY(hidden_neurons)->len + 2; // NUM2INT(num_inputs) + NUM2INT(num_outputs) + RARRAY(hidden_neurons)->len;
229
+ unsigned int layers[num_layers];
230
+
231
+ // Input:
232
+ layers[0]=NUM2INT(num_inputs);
233
+ // Output:
234
+ layers[num_layers-1]=NUM2INT(num_outputs);
235
+ // Hidden:
236
+ int i;
237
+ for (i=1; i<=num_layers-2; i++) {
238
+ layers[i]=NUM2UINT(RARRAY(hidden_neurons)->ptr[i-1]);
239
+ printf("Setting layer [%d] to [%d]\n", i, layers[i]);
240
+ }
241
+
242
+ ann = fann_create_standard_array(num_layers, layers);
243
+ printf("Created RubyFann::Standard [%d].\n", ann);
244
+ }
245
+
246
+ DATA_PTR(self) = ann;
247
+ return (VALUE)ann;
248
+
249
+ DATA_PTR(self) = ann;
250
+ return (VALUE)ann;
251
+ }
252
+
253
+ /**
254
+ Initialize in one of the following forms:
255
+
256
+ # This is a flat file with training data as described in FANN docs.
257
+ RubyFann::TrainData.new(:filename => 'path/to/training_file.train')
258
+ OR
259
+ # Train with inputs (array of arrays) & desired_outputs (array of arrays)
260
+ # inputs & desired outputs should be of same length
261
+ # All sub-arrays on inputs should be of same length
262
+ # All sub-arrays on desired_outputs should be of same length
263
+ # Sub-arrays on inputs & desired_outputs can be different sizes from one another
264
+ RubyFann::TrainData.new(:inputs=>[[0.2, 0.3, 0.4], [0.8, 0.9, 0.7]], :desired_outputs=>[[3.14], [6.33]])
265
+ */
266
+ static VALUE fann_train_data_initialize(VALUE self, VALUE hash)
267
+ {
268
+ struct fann_train_data* train_data;
269
+ Check_Type(hash, T_HASH);
270
+
271
+ VALUE filename = rb_hash_aref(hash, ID2SYM(rb_intern("filename")));
272
+ VALUE inputs = rb_hash_aref(hash, ID2SYM(rb_intern("inputs")));
273
+ VALUE desired_outputs = rb_hash_aref(hash, ID2SYM(rb_intern("desired_outputs")));
274
+
275
+ if (TYPE(filename)==T_STRING)
276
+ {
277
+ train_data = fann_read_train_from_file(StringValuePtr(filename));
278
+ DATA_PTR(self) = train_data;
279
+ }
280
+ else if (TYPE(inputs)==T_ARRAY)
281
+ {
282
+ if (TYPE(desired_outputs)!=T_ARRAY)
283
+ {
284
+ rb_raise (rb_eRuntimeError, "[desired_outputs] must be present when [inputs] used.");
285
+ }
286
+
287
+ // The data is here, start constructing:
288
+ if(RARRAY(inputs)->len != RARRAY(desired_outputs)->len)
289
+ {
290
+ rb_raise (
291
+ rb_eRuntimeError,
292
+ "Number of inputs must match number of outputs: (%d != %d)",
293
+ RARRAY(inputs)->len,
294
+ RARRAY(desired_outputs)->len);
295
+ }
296
+
297
+ train_data = fann_create_train_from_rb_ary(inputs, desired_outputs);
298
+ DATA_PTR(self) = train_data;
299
+ }
300
+ else
301
+ {
302
+ rb_raise (rb_eRuntimeError, "Must construct with a filename(string) or inputs/desired_outputs(arrays). All args passed via hash with symbols as keys.");
303
+ }
304
+
305
+ return (VALUE)train_data;
306
+ }
307
+
308
+
309
+ /** Save to given filename */
310
+ static VALUE training_save(VALUE self, VALUE filename)
311
+ {
312
+ Check_Type(filename, T_STRING);
313
+ struct fann_train_data* t;
314
+ Data_Get_Struct (self, struct fann_train_data, t);
315
+ fann_save_train(t, StringValuePtr(filename));
316
+ }
317
+
318
+ /** Shuffles training data, randomizing the order.
319
+ This is recommended for incremental training, while it will have no influence during batch training. */
320
+ static VALUE shuffle(VALUE self)
321
+ {
322
+ struct fann_train_data* t;
323
+ Data_Get_Struct (self, struct fann_train_data, t);
324
+ fann_shuffle_train_data(t);
325
+ }
326
+
327
+ /** Length of training data */
328
+ static VALUE length_train_data(VALUE self)
329
+ {
330
+ struct fann_train_data* t;
331
+ Data_Get_Struct (self, struct fann_train_data, t);
332
+ return(UINT2NUM(fann_length_train_data(t)));
333
+ }
334
+
335
+ /** Set the activation function for neuron number *neuron* in layer number *layer*,
336
+ counting the input layer as layer 0. activation_func must be one of the following symbols:
337
+ :linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
338
+ :sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
339
+ :elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
340
+ :sin, :cos
341
+ */
342
+ static VALUE set_activation_function(VALUE self, VALUE activation_func, VALUE layer, VALUE neuron)
343
+ {
344
+ Check_Type(activation_func, T_SYMBOL);
345
+ Check_Type(layer, T_FIXNUM);
346
+ Check_Type(neuron, T_FIXNUM);
347
+
348
+ struct fann* f;
349
+ Data_Get_Struct(self, struct fann, f);
350
+ fann_set_activation_function(f, sym_to_activation_function(activation_func), NUM2INT(layer), NUM2INT(neuron));
351
+ }
352
+
353
+ /** Set the activation function for all of the hidden layers. activation_func must be one of the following symbols:
354
+ :linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
355
+ :sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
356
+ :elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
357
+ :sin, :cos*/
358
+ static VALUE set_activation_function_hidden(VALUE self, VALUE activation_func)
359
+ {
360
+ Check_Type(activation_func, T_SYMBOL);
361
+ struct fann* f;
362
+ Data_Get_Struct(self, struct fann, f);
363
+ fann_set_activation_function_hidden(f, sym_to_activation_function(activation_func));
364
+ }
365
+
366
+ /** Set the activation function for all the neurons in the layer number *layer*,
367
+ counting the input layer as layer 0. activation_func must be one of the following symbols:
368
+ :linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
369
+ :sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
370
+ :elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
371
+ :sin, :cos */
372
+ static VALUE set_activation_function_layer(VALUE self, VALUE activation_func, VALUE layer)
373
+ {
374
+ Check_Type(activation_func, T_SYMBOL);
375
+ Check_Type(layer, T_FIXNUM);
376
+ struct fann* f;
377
+ Data_Get_Struct(self, struct fann, f);
378
+ fann_set_activation_function_layer(f, sym_to_activation_function(activation_func), NUM2INT(layer));
379
+ }
380
+
381
+ /** Set the activation function for the output layer. activation_func must be one of the following symbols:
382
+ :linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
383
+ :sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
384
+ :elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
385
+ :sin, :cos */
386
+ static VALUE set_activation_function_output(VALUE self, VALUE activation_func)
387
+ {
388
+ Check_Type(activation_func, T_SYMBOL);
389
+ struct fann* f;
390
+ Data_Get_Struct(self, struct fann, f);
391
+ fann_set_activation_function_output(f, sym_to_activation_function(activation_func));
392
+ }
393
+
394
+ /** Get the activation steepness for neuron number neuron in layer number layer, counting the input layer as layer 0. */
395
+ static VALUE get_activation_steepness(self, layer, neuron)
396
+ VALUE self; VALUE layer; VALUE neuron;
397
+ {
398
+ Check_Type(layer, T_FIXNUM);
399
+ Check_Type(neuron, T_FIXNUM);
400
+ struct fann* f;
401
+ Data_Get_Struct(self, struct fann, f);
402
+ fann_type val = fann_get_activation_steepness(f, NUM2INT(layer), NUM2INT(neuron));
403
+ return rb_float_new(val);
404
+ }
405
+
406
+ /** Set the activation steepness for neuron number {neuron} in layer number (({layer})),
407
+ counting the input layer as layer 0. */
408
+ static VALUE set_activation_steepness(VALUE self, VALUE steepness, VALUE layer, VALUE neuron)
409
+ {
410
+ Check_Type(steepness, T_FLOAT);
411
+ Check_Type(layer, T_FIXNUM);
412
+ Check_Type(neuron, T_FIXNUM);
413
+
414
+ struct fann* f;
415
+ Data_Get_Struct(self, struct fann, f);
416
+ fann_set_activation_steepness(f, NUM2DBL(steepness), NUM2INT(layer), NUM2INT(neuron));
417
+ }
418
+
419
+ /** Set the activation steepness in all of the hidden layers. */
420
+ static VALUE set_activation_steepness_hidden(VALUE self, VALUE steepness)
421
+ {
422
+ SET_FANN_FLT(steepness, fann_set_activation_steepness_hidden);
423
+ }
424
+
425
+ /** Set the activation steepness all of the neurons in layer number *layer*,
426
+ counting the input layer as layer 0. */
427
+ static VALUE set_activation_steepness_layer(VALUE self, VALUE steepness, VALUE layer)
428
+ {
429
+ Check_Type(steepness, T_FLOAT);
430
+ Check_Type(layer, T_FIXNUM);
431
+
432
+ struct fann* f;
433
+ Data_Get_Struct(self, struct fann, f);
434
+ fann_set_activation_steepness_layer(f, NUM2DBL(steepness), NUM2INT(layer));
435
+ }
436
+
437
+ /** Set the activation steepness in the output layer. */
438
+ static VALUE set_activation_steepness_output(VALUE self, VALUE steepness)
439
+ {
440
+ SET_FANN_FLT(steepness, fann_set_activation_steepness_output);
441
+ }
442
+
443
+ /** Returns the bit fail limit used during training. */
444
+ static VALUE get_bit_fail_limit(VALUE self)
445
+ {
446
+ RETURN_FANN_DBL(fann_get_bit_fail_limit);
447
+ }
448
+
449
+ /** Sets the bit fail limit used during training. */
450
+ static VALUE set_bit_fail_limit(VALUE self, VALUE bit_fail_limit)
451
+ {
452
+ SET_FANN_FLT(bit_fail_limit, fann_set_bit_fail_limit);
453
+ }
454
+
455
+ /** The decay is a small negative valued number which is the factor that the weights
456
+ should become smaller in each iteration during quickprop training. This is used
457
+ to make sure that the weights do not become too high during training. */
458
+ static VALUE get_quickprop_decay(VALUE self)
459
+ {
460
+ RETURN_FANN_FLT(fann_get_quickprop_decay);
461
+ }
462
+
463
+ /** Sets the quickprop decay factor */
464
+ static VALUE set_quickprop_decay(VALUE self, VALUE quickprop_decay)
465
+ {
466
+ SET_FANN_FLT(quickprop_decay, fann_set_quickprop_decay);
467
+ }
468
+
469
+ /** The mu factor is used to increase and decrease the step-size during quickprop training.
470
+ The mu factor should always be above 1, since it would otherwise decrease the step-size
471
+ when it was suppose to increase it. */
472
+ static VALUE get_quickprop_mu(VALUE self)
473
+ {
474
+ RETURN_FANN_FLT(fann_get_quickprop_mu);
475
+ }
476
+
477
+ /** Sets the quickprop mu factor. */
478
+ static VALUE set_quickprop_mu(VALUE self, VALUE quickprop_mu)
479
+ {
480
+ SET_FANN_FLT(quickprop_mu, fann_set_quickprop_mu);
481
+ }
482
+
483
+ /** The increase factor is a value larger than 1, which is used to
484
+ increase the step-size during RPROP training. */
485
+ static VALUE get_rprop_increase_factor(VALUE self)
486
+ {
487
+ RETURN_FANN_FLT(fann_get_rprop_increase_factor);
488
+ }
489
+
490
+ /** The increase factor used during RPROP training. */
491
+ static VALUE set_rprop_increase_factor(VALUE self, VALUE rprop_increase_factor)
492
+ {
493
+ SET_FANN_FLT(rprop_increase_factor, fann_set_rprop_increase_factor);
494
+ }
495
+
496
+ /** The decrease factor is a value smaller than 1, which is used to decrease the step-size during RPROP training. */
497
+ static VALUE get_rprop_decrease_factor(VALUE self)
498
+ {
499
+ RETURN_FANN_FLT(fann_get_rprop_decrease_factor);
500
+ }
501
+
502
+ /** The decrease factor is a value smaller than 1, which is used to decrease the step-size during RPROP training. */
503
+ static VALUE set_rprop_decrease_factor(VALUE self, VALUE rprop_decrease_factor)
504
+ {
505
+ SET_FANN_FLT(rprop_decrease_factor, fann_set_rprop_decrease_factor);
506
+ }
507
+
508
+ /** The minimum step-size is a small positive number determining how small the minimum step-size may be. */
509
+ static VALUE get_rprop_delta_min(VALUE self)
510
+ {
511
+ RETURN_FANN_FLT(fann_get_rprop_delta_min);
512
+ }
513
+
514
+ /** The minimum step-size is a small positive number determining how small the minimum step-size may be. */
515
+ static VALUE set_rprop_delta_min(VALUE self, VALUE rprop_delta_min)
516
+ {
517
+ SET_FANN_FLT(rprop_delta_min, fann_set_rprop_delta_min);
518
+ }
519
+
520
+ /** The maximum step-size is a positive number determining how large the maximum step-size may be. */
521
+ static VALUE get_rprop_delta_max(VALUE self)
522
+ {
523
+ RETURN_FANN_FLT(fann_get_rprop_delta_max);
524
+ }
525
+
526
+ /** The maximum step-size is a positive number determining how large the maximum step-size may be. */
527
+ static VALUE set_rprop_delta_max(VALUE self, VALUE rprop_delta_max)
528
+ {
529
+ SET_FANN_FLT(rprop_delta_max, fann_set_rprop_delta_max);
530
+ }
531
+
532
+ /** The initial step-size is a positive number determining the initial step size. */
533
+ static VALUE get_rprop_delta_zero(VALUE self)
534
+ {
535
+ RETURN_FANN_FLT(fann_get_rprop_delta_zero);
536
+ }
537
+
538
+ /** The initial step-size is a positive number determining the initial step size. */
539
+ static VALUE set_rprop_delta_zero(VALUE self, VALUE rprop_delta_zero)
540
+ {
541
+ SET_FANN_FLT(rprop_delta_zero, fann_set_rprop_delta_zero);
542
+ }
543
+
544
+ /** Return array of bias(es) */
545
+ static VALUE get_bias_array(VALUE self)
546
+ {
547
+ struct fann* f;
548
+ unsigned int num_layers;
549
+ Data_Get_Struct (self, struct fann, f);
550
+ num_layers = fann_get_num_layers(f);
551
+ unsigned int layers[num_layers];
552
+ fann_get_bias_array(f, layers);
553
+
554
+ // Create ruby array & set outputs:
555
+ VALUE arr;
556
+ arr = rb_ary_new();
557
+ int i;
558
+ for (i=0; i<num_layers; i++)
559
+ {
560
+ rb_ary_push(arr, INT2NUM(layers[i]));
561
+ }
562
+
563
+ return arr;
564
+ }
565
+
566
+ /** The number of fail bits; means the number of output neurons which differ more
567
+ than the bit fail limit (see <fann_get_bit_fail_limit>, <fann_set_bit_fail_limit>).
568
+ The bits are counted in all of the training data, so this number can be higher than
569
+ the number of training data. */
570
+ static VALUE get_bit_fail(VALUE self)
571
+ {
572
+ RETURN_FANN_INT(fann_get_bit_fail);
573
+ }
574
+
575
+ /** Get the connection rate used when the network was created. */
576
+ static VALUE get_connection_rate(VALUE self)
577
+ {
578
+ RETURN_FANN_INT(fann_get_connection_rate);
579
+ }
580
+
581
+ /** Return array<hash> where each array element is a hash
582
+ representing a neuron. It contains the following keys:
583
+ :activation_function, symbol -- the activation function
584
+ :activation_steepness=float -- The steepness of the activation function
585
+ :sum=float -- The sum of the inputs multiplied with the weights
586
+ :value=float -- The value of the activation fuction applied to the sum
587
+ :connections=array<int> -- indices of connected neurons(inputs)
588
+
589
+ This could be done more elegantly (e.g., defining more ruby ext classes).
590
+ This method does not directly correlate to anything in FANN, and accesses
591
+ structs that are not guaranteed to not change.
592
+ */
593
+ static VALUE get_neurons(VALUE self, VALUE layer)
594
+ {
595
+ struct fann_layer *layer_it;
596
+ struct fann_neuron *neuron_it;
597
+
598
+ struct fann* f;
599
+ unsigned int i;
600
+ Data_Get_Struct (self, struct fann, f);
601
+
602
+ VALUE neuron_array = rb_ary_new();
603
+ VALUE activation_function_sym = ID2SYM(rb_intern("activation_function"));
604
+ VALUE activation_steepness_sym = ID2SYM(rb_intern("activation_steepness"));
605
+ VALUE layer_sym = ID2SYM(rb_intern("layer"));
606
+ VALUE sum_sym = ID2SYM(rb_intern("sum"));
607
+ VALUE value_sym = ID2SYM(rb_intern("value"));
608
+ VALUE connections_sym = ID2SYM(rb_intern("connections"));
609
+ unsigned int layer_num = 0;
610
+ for(layer_it = f->first_layer; layer_it != f->last_layer; layer_it++)
611
+ {
612
+ for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++)
613
+ {
614
+ // Create array of connection indicies:
615
+ VALUE connection_array = rb_ary_new();
616
+ for (i = neuron_it->first_con; i < neuron_it->last_con; i++) {
617
+ rb_ary_push(connection_array, INT2NUM(f->connections[i] - f->first_layer->first_neuron));
618
+ }
619
+
620
+ VALUE neuron = rb_hash_new();
621
+
622
+ // Set attributes on hash & push on array:
623
+ rb_hash_aset(neuron, activation_function_sym, activation_function_to_sym(neuron_it->activation_function));
624
+ rb_hash_aset(neuron, activation_steepness_sym, rb_float_new(neuron_it->activation_steepness));
625
+ rb_hash_aset(neuron, layer_sym, INT2NUM(layer_num));
626
+ rb_hash_aset(neuron, sum_sym, rb_float_new(neuron_it->sum));
627
+ rb_hash_aset(neuron, value_sym, rb_float_new(neuron_it->value));
628
+ rb_hash_aset(neuron, connections_sym, connection_array);
629
+
630
+ rb_ary_push(neuron_array, neuron);
631
+ }
632
+ ++layer_num;
633
+ }
634
+
635
+ return neuron_array;
636
+ }
637
+
638
+ /** Get list of layers in array format where each element contains number of neurons in that layer */
639
+ static VALUE get_layer_array(VALUE self)
640
+ {
641
+ struct fann* f;
642
+ unsigned int num_layers;
643
+ Data_Get_Struct (self, struct fann, f);
644
+ num_layers = fann_get_num_layers(f);
645
+ unsigned int layers[num_layers];
646
+ fann_get_layer_array(f, layers);
647
+
648
+ // Create ruby array & set outputs:
649
+ VALUE arr;
650
+ arr = rb_ary_new();
651
+ int i;
652
+ for (i=0; i<num_layers; i++)
653
+ {
654
+ rb_ary_push(arr, INT2NUM(layers[i]));
655
+ }
656
+
657
+ return arr;
658
+ }
659
+
660
+ /** Reads the mean square error from the network. */
661
+ static VALUE get_MSE(VALUE self)
662
+ {
663
+ RETURN_FANN_DBL(fann_get_MSE);
664
+ }
665
+
666
+ /** Resets the mean square error from the network.
667
+ This function also resets the number of bits that fail. */
668
+ static VALUE reset_MSE(VALUE self)
669
+ {
670
+ struct fann* f;
671
+ Data_Get_Struct (self, struct fann, f);
672
+ fann_reset_MSE(f);
673
+ }
674
+
675
+ /** Get the type of network. Returns as ruby symbol (one of :fann_nettype_shortcut, :fann_nettype_layer) */
676
+ static VALUE get_network_type(VALUE self)
677
+ {
678
+ struct fann* f;
679
+ enum fann_nettype_enum net_type;
680
+ VALUE ret_val;
681
+ Data_Get_Struct (self, struct fann, f);
682
+
683
+ net_type = fann_get_network_type(f);
684
+
685
+ if(net_type==FANN_NETTYPE_LAYER)
686
+ {
687
+ ret_val = ID2SYM(rb_intern("fann_nettype_layer")); // (rb_str_new2("FANN_NETTYPE_LAYER"));
688
+ }
689
+ else if(net_type==FANN_NETTYPE_SHORTCUT)
690
+ {
691
+ ret_val = ID2SYM(rb_intern("fann_nettype_shortcut")); // (rb_str_new2("FANN_NETTYPE_SHORTCUT"));
692
+ }
693
+ return ret_val;
694
+ }
695
+
696
+ /** Get the number of input neurons. */
697
+ static VALUE get_num_input(VALUE self)
698
+ {
699
+ RETURN_FANN_INT(fann_get_num_input);
700
+ }
701
+
702
+ /** Get the number of layers in the network. */
703
+ static VALUE get_num_layers(VALUE self)
704
+ {
705
+ RETURN_FANN_INT(fann_get_num_layers);
706
+ }
707
+
708
+ /** Get the number of output neurons. */
709
+ static VALUE get_num_output(VALUE self)
710
+ {
711
+ RETURN_FANN_INT(fann_get_num_output);
712
+ }
713
+
714
+ /** Get the total number of connections in the entire network. */
715
+ static VALUE get_total_connections(VALUE self)
716
+ {
717
+ RETURN_FANN_INT(fann_get_total_connections);
718
+ }
719
+
720
+ /** Get the total number of neurons in the entire network. */
721
+ static VALUE get_total_neurons(VALUE self)
722
+ {
723
+ RETURN_FANN_INT(fann_get_total_neurons);
724
+ }
725
+
726
+ /** Sets the error function used during training. One of the following symbols:
727
+ :linear, :tanh */
728
+ static VALUE set_train_error_function(VALUE self, VALUE train_error_function)
729
+ {
730
+ Check_Type(train_error_function, T_SYMBOL);
731
+
732
+ ID id=SYM2ID(train_error_function);
733
+ enum fann_errorfunc_enum fann_train_error_function;
734
+
735
+ if(id==rb_intern("linear")) {
736
+ fann_train_error_function = FANN_ERRORFUNC_LINEAR;
737
+ } else if(id==rb_intern("tanh")) {
738
+ fann_train_error_function = FANN_ERRORFUNC_TANH;
739
+ } else {
740
+ rb_raise(rb_eRuntimeError, "Unrecognized train error function: [%s]", rb_id2name(SYM2ID(train_error_function)));
741
+ }
742
+
743
+ struct fann* f;
744
+ Data_Get_Struct (self, struct fann, f);
745
+ fann_set_train_error_function(f, fann_train_error_function);
746
+ }
747
+
748
+ /** Returns the error function used during training. One of the following symbols:
749
+ :linear, :tanh */
750
+ static VALUE get_train_error_function(VALUE self)
751
+ {
752
+ struct fann* f;
753
+ enum fann_errorfunc_enum train_error;
754
+ VALUE ret_val;
755
+ Data_Get_Struct (self, struct fann, f);
756
+
757
+ train_error = fann_get_train_error_function(f);
758
+
759
+ if(train_error==FANN_ERRORFUNC_LINEAR)
760
+ {
761
+ ret_val = ID2SYM(rb_intern("linear"));
762
+ }
763
+ else if(train_error==FANN_ERRORFUNC_TANH)
764
+ {
765
+ ret_val = ID2SYM(rb_intern("tanh"));
766
+ }
767
+ return ret_val;
768
+ }
769
+
770
+ /** Set the training algorithm. One of the following symbols:
771
+ :incremental, :batch, :rprop, :quickprop */
772
+ static VALUE set_training_algorithm(VALUE self, VALUE train_error_function)
773
+ {
774
+ Check_Type(train_error_function, T_SYMBOL);
775
+
776
+ ID id=SYM2ID(train_error_function);
777
+ enum fann_train_enum fann_train_algorithm;
778
+
779
+ if(id==rb_intern("incremental")) {
780
+ fann_train_algorithm = FANN_TRAIN_INCREMENTAL;
781
+ } else if(id==rb_intern("batch")) {
782
+ fann_train_algorithm = FANN_TRAIN_BATCH;
783
+ } else if(id==rb_intern("rprop")) {
784
+ fann_train_algorithm = FANN_TRAIN_RPROP;
785
+ } else if(id==rb_intern("quickprop")) {
786
+ fann_train_algorithm = FANN_TRAIN_QUICKPROP;
787
+ } else {
788
+ rb_raise(rb_eRuntimeError, "Unrecognized training algorithm function: [%s]", rb_id2name(SYM2ID(train_error_function)));
789
+ }
790
+
791
+ struct fann* f;
792
+ Data_Get_Struct (self, struct fann, f);
793
+ fann_set_training_algorithm(f, fann_train_algorithm);
794
+ }
795
+
796
+ /** Returns the training algorithm. One of the following symbols:
797
+ :incremental, :batch, :rprop, :quickprop */
798
+ static VALUE get_training_algorithm(VALUE self)
799
+ {
800
+ struct fann* f;
801
+ enum fann_train_enum fann_train_algorithm;
802
+ VALUE ret_val;
803
+ Data_Get_Struct (self, struct fann, f);
804
+
805
+ fann_train_algorithm = fann_get_training_algorithm(f);
806
+
807
+ if(fann_train_algorithm==FANN_TRAIN_INCREMENTAL) {
808
+ ret_val = ID2SYM(rb_intern("incremental"));
809
+ } else if(fann_train_algorithm==FANN_TRAIN_BATCH) {
810
+ ret_val = ID2SYM(rb_intern("batch"));
811
+ } else if(fann_train_algorithm==FANN_TRAIN_RPROP) {
812
+ ret_val = ID2SYM(rb_intern("rprop"));
813
+ } else if(fann_train_algorithm==FANN_TRAIN_QUICKPROP) {
814
+ ret_val = ID2SYM(rb_intern("quickprop"));
815
+ }
816
+ return ret_val;
817
+ }
818
+
819
+ /** Set the training stop function. One of the following symbols:
820
+ :mse, :bit */
821
+ static VALUE set_train_stop_function(VALUE self, VALUE train_stop_function)
822
+ {
823
+ Check_Type(train_stop_function, T_SYMBOL);
824
+ ID id=SYM2ID(train_stop_function);
825
+ enum fann_stopfunc_enum fann_train_stop_function;
826
+
827
+ if(id==rb_intern("mse")) {
828
+ fann_train_stop_function = FANN_STOPFUNC_MSE;
829
+ } else if(id==rb_intern("bit")) {
830
+ fann_train_stop_function = FANN_STOPFUNC_BIT;
831
+ } else {
832
+ rb_raise(rb_eRuntimeError, "Unrecognized stop function: [%s]", rb_id2name(SYM2ID(train_stop_function)));
833
+ }
834
+
835
+ struct fann* f;
836
+ Data_Get_Struct (self, struct fann, f);
837
+ fann_set_train_stop_function(f, fann_train_stop_function);
838
+ }
839
+
840
+ /** Returns the training stop function. One of the following symbols:
841
+ :mse, :bit */
842
+ static VALUE get_train_stop_function(VALUE self)
843
+ {
844
+ struct fann* f;
845
+ enum fann_stopfunc_enum train_stop;
846
+ VALUE ret_val;
847
+ Data_Get_Struct (self, struct fann, f);
848
+
849
+ train_stop = fann_get_train_stop_function(f);
850
+
851
+ if(train_stop==FANN_STOPFUNC_MSE)
852
+ {
853
+ ret_val = ID2SYM(rb_intern("mse")); // (rb_str_new2("FANN_NETTYPE_LAYER"));
854
+ }
855
+ else if(train_stop==FANN_STOPFUNC_BIT)
856
+ {
857
+ ret_val = ID2SYM(rb_intern("bit")); // (rb_str_new2("FANN_NETTYPE_SHORTCUT"));
858
+ }
859
+ return ret_val;
860
+ }
861
+
862
+
863
+ /** Will print the connections of the ann in a compact matrix,
864
+ for easy viewing of the internals of the ann. */
865
+ static VALUE print_connections(VALUE self)
866
+ {
867
+ struct fann* f;
868
+ Data_Get_Struct (self, struct fann, f);
869
+ fann_print_connections(f);
870
+ }
871
+
872
+ /** Print current NN parameters to stdout */
873
+ static VALUE print_parameters(VALUE self)
874
+ {
875
+ struct fann* f;
876
+ Data_Get_Struct (self, struct fann, f);
877
+ fann_print_parameters(f);
878
+ return Qnil;
879
+ }
880
+
881
+ /** Give each connection a random weight between *min_weight* and *max_weight* */
882
+ static VALUE randomize_weights(VALUE self, VALUE min_weight, VALUE max_weight)
883
+ {
884
+ Check_Type(min_weight, T_FLOAT);
885
+ Check_Type(max_weight, T_FLOAT);
886
+ struct fann* f;
887
+ Data_Get_Struct (self, struct fann, f);
888
+ fann_randomize_weights(f, NUM2DBL(min_weight), NUM2DBL(max_weight));
889
+ }
890
+
891
+ /** Run neural net with current parameters */
892
+ static VALUE run (VALUE self, VALUE inputs)
893
+ {
894
+ Check_Type(inputs, T_ARRAY);
895
+
896
+ struct fann* f;
897
+ int i;
898
+ fann_type* outputs;
899
+
900
+ // Convert inputs to type needed for NN:
901
+ unsigned int len = RARRAY(inputs)->len;
902
+ fann_type fann_inputs[len];
903
+ for (i=0; i<len; i++)
904
+ {
905
+ fann_inputs[i] = NUM2DBL(RARRAY(inputs)->ptr[i]);
906
+ }
907
+
908
+
909
+ // Obtain NN & run method:
910
+ Data_Get_Struct (self, struct fann, f);
911
+ outputs = fann_run(f, fann_inputs);
912
+
913
+ // Create ruby array & set outputs:
914
+ VALUE arr;
915
+ arr = rb_ary_new();
916
+ unsigned int output_len=fann_get_num_output(f);
917
+ for (i=0; i<output_len; i++)
918
+ {
919
+ rb_ary_push(arr, rb_float_new(outputs[i]));
920
+ }
921
+
922
+ return arr;
923
+ }
924
+
925
+ /** Initialize the weights using Widrow + Nguyen's algorithm. */
926
+ static VALUE init_weights(VALUE self, VALUE train_data)
927
+ {
928
+
929
+ Check_Type(train_data, T_DATA);
930
+
931
+ struct fann* f;
932
+ struct fann_train_data* t;
933
+ Data_Get_Struct (self, struct fann, f);
934
+ Data_Get_Struct (train_data, struct fann_train_data, t);
935
+
936
+ fann_init_weights(f, t);
937
+ }
938
+
939
+
940
+
941
+ /** Train with training data created with RubyFann::TrainData.new
942
+ max_epochs - The maximum number of epochs the training should continue
943
+ epochs_between_reports - The number of epochs between printing a status report to stdout.
944
+ desired_error - The desired <get_MSE> or <get_bit_fail>, depending on which stop function
945
+ is chosen by <set_train_stop_function>. */
946
+ static VALUE train_on_data(VALUE self, VALUE train_data, VALUE max_epochs, VALUE epochs_between_reports, VALUE desired_error)
947
+ {
948
+ Check_Type(train_data, T_DATA);
949
+ Check_Type(max_epochs, T_FIXNUM);
950
+ Check_Type(epochs_between_reports, T_FIXNUM);
951
+ Check_Type(desired_error, T_FLOAT);
952
+
953
+ struct fann* f;
954
+ struct fann_train_data* t;
955
+ Data_Get_Struct (self, struct fann, f);
956
+ Data_Get_Struct (train_data, struct fann_train_data, t);
957
+
958
+ unsigned int fann_max_epochs = NUM2INT(max_epochs);
959
+ unsigned int fann_epochs_between_reports = NUM2INT(epochs_between_reports);
960
+ float fann_desired_error = NUM2DBL(desired_error);
961
+ fann_train_on_data(f, t, fann_max_epochs, fann_epochs_between_reports, fann_desired_error);
962
+ }
963
+
964
+ /** Train one epoch with a set of training data, created with RubyFann::TrainData.new */
965
+ static VALUE train_epoch(VALUE self, VALUE train_data)
966
+ {
967
+ Check_Type(train_data, T_DATA);
968
+ struct fann* f;
969
+ struct fann_train_data* t;
970
+ Data_Get_Struct (self, struct fann, f);
971
+ Data_Get_Struct (train_data, struct fann_train_data, t);
972
+ return rb_float_new(fann_train_epoch(f, t));
973
+ }
974
+
975
+ /** Test a set of training data and calculates the MSE for the training data. */
976
+ static VALUE test_data(VALUE self, VALUE train_data)
977
+ {
978
+ Check_Type(train_data, T_DATA);
979
+ struct fann* f;
980
+ struct fann_train_data* t;
981
+ Data_Get_Struct (self, struct fann, f);
982
+ Data_Get_Struct (train_data, struct fann_train_data, t);
983
+ return rb_float_new(fann_test_data(f, t));
984
+ }
985
+
986
+ // Returns the position of the decimal point in the ann.
987
+ // Only available in fixed-point mode, which we don't need:
988
+ // static VALUE get_decimal_point(VALUE self)
989
+ // {
990
+ // struct fann* f;
991
+ // Data_Get_Struct (self, struct fann, f);
992
+ // return INT2NUM(fann_get_decimal_point(f));
993
+ // }
994
+
995
+ // returns the multiplier that fix point data is multiplied with.
996
+
997
+ // Only available in fixed-point mode, which we don't need:
998
+ // static VALUE get_multiplier(VALUE self)
999
+ // {
1000
+ // struct fann* f;
1001
+ // Data_Get_Struct (self, struct fann, f);
1002
+ // return INT2NUM(fann_get_multiplier(f));
1003
+ // }
1004
+
1005
+ // Train with inputs (array of arrays) & desired_outputs (array of arrays)
1006
+ // inputs & desired outputs should be of same length
1007
+ // All sub-arrays on inputs should be of same length
1008
+ // All sub-arrays on desired outputs should be of same length
1009
+ // Sub-arrays on inputs & desired_outputs can be different sizes from one another
1010
+ static VALUE cascadetrain_on_data(VALUE self, VALUE train_data, VALUE max_neurons, VALUE neurons_between_reports, VALUE desired_error)
1011
+ {
1012
+ Check_Type(train_data, T_DATA);
1013
+ Check_Type(max_neurons, T_FIXNUM);
1014
+ Check_Type(neurons_between_reports, T_FIXNUM);
1015
+ Check_Type(desired_error, T_FLOAT);
1016
+
1017
+ struct fann* f;
1018
+ struct fann_train_data* t;
1019
+ Data_Get_Struct (self, struct fann, f);
1020
+ Data_Get_Struct (train_data, struct fann_train_data, t);
1021
+
1022
+ unsigned int fann_max_neurons = NUM2INT(max_neurons);
1023
+ unsigned int fann_neurons_between_reports = NUM2INT(neurons_between_reports);
1024
+ float fann_desired_error = NUM2DBL(desired_error);
1025
+
1026
+ fann_cascadetrain_on_data(f, t, fann_max_neurons, fann_neurons_between_reports, fann_desired_error);
1027
+
1028
+ }
1029
+
1030
+ /** The cascade output change fraction is a number between 0 and 1 */
1031
+ static VALUE get_cascade_output_change_fraction(VALUE self)
1032
+ {
1033
+ RETURN_FANN_FLT(fann_get_cascade_output_change_fraction);
1034
+ }
1035
+
1036
+ /** The cascade output change fraction is a number between 0 and 1 */
1037
+ static VALUE set_cascade_output_change_fraction(VALUE self, VALUE cascade_output_change_fraction)
1038
+ {
1039
+ SET_FANN_FLT(cascade_output_change_fraction, fann_set_cascade_output_change_fraction);
1040
+ }
1041
+
1042
+ /** The number of cascade output stagnation epochs determines the number of epochs training is allowed to
1043
+ continue without changing the MSE by a fraction of <get_cascade_output_change_fraction>. */
1044
+ static VALUE get_cascade_output_stagnation_epochs(VALUE self)
1045
+ {
1046
+ RETURN_FANN_INT(fann_get_cascade_output_stagnation_epochs);
1047
+ }
1048
+
1049
+ /** The number of cascade output stagnation epochs determines the number of epochs training is allowed to
1050
+ continue without changing the MSE by a fraction of <get_cascade_output_change_fraction>. */
1051
+ static VALUE set_cascade_output_stagnation_epochs(VALUE self, VALUE cascade_output_stagnation_epochs)
1052
+ {
1053
+ SET_FANN_INT(cascade_output_stagnation_epochs, fann_set_cascade_output_stagnation_epochs);
1054
+ }
1055
+
1056
+ /** The cascade candidate change fraction is a number between 0 and 1 */
1057
+ static VALUE get_cascade_candidate_change_fraction(VALUE self)
1058
+ {
1059
+ RETURN_FANN_FLT(fann_get_cascade_candidate_change_fraction);
1060
+ }
1061
+
1062
+ /** The cascade candidate change fraction is a number between 0 and 1 */
1063
+ static VALUE set_cascade_candidate_change_fraction(VALUE self, VALUE cascade_candidate_change_fraction)
1064
+ {
1065
+ SET_FANN_FLT(cascade_candidate_change_fraction, fann_set_cascade_candidate_change_fraction);
1066
+ }
1067
+
1068
+ /** The number of cascade candidate stagnation epochs determines the number of epochs training is allowed to
1069
+ continue without changing the MSE by a fraction of <get_cascade_candidate_change_fraction>. */
1070
+ static VALUE get_cascade_candidate_stagnation_epochs(VALUE self)
1071
+ {
1072
+ RETURN_FANN_UINT(fann_get_cascade_candidate_stagnation_epochs);
1073
+ }
1074
+
1075
+ /** The number of cascade candidate stagnation epochs determines the number of epochs training is allowed to
1076
+ continue without changing the MSE by a fraction of <get_cascade_candidate_change_fraction>. */
1077
+ static VALUE set_cascade_candidate_stagnation_epochs(VALUE self, VALUE cascade_candidate_stagnation_epochs)
1078
+ {
1079
+ SET_FANN_UINT(cascade_candidate_stagnation_epochs, fann_set_cascade_candidate_stagnation_epochs);
1080
+ }
1081
+
1082
+ /** The weight multiplier is a parameter which is used to multiply the weights from the candidate neuron
1083
+ before adding the neuron to the neural network. This parameter is usually between 0 and 1, and is used
1084
+ to make the training a bit less aggressive. */
1085
+ static VALUE get_cascade_weight_multiplier(VALUE self)
1086
+ {
1087
+ RETURN_FANN_DBL(fann_get_cascade_weight_multiplier);
1088
+ }
1089
+
1090
+ /** The weight multiplier is a parameter which is used to multiply the weights from the candidate neuron
1091
+ before adding the neuron to the neural network. This parameter is usually between 0 and 1, and is used
1092
+ to make the training a bit less aggressive. */
1093
+ static VALUE set_cascade_weight_multiplier(VALUE self, VALUE cascade_weight_multiplier)
1094
+ {
1095
+ SET_FANN_DBL(cascade_weight_multiplier, fann_set_cascade_weight_multiplier);
1096
+ }
1097
+
1098
+ /** The candidate limit is a limit for how much the candidate neuron may be trained.
1099
+ The limit is a limit on the proportion between the MSE and candidate score. */
1100
+ static VALUE get_cascade_candidate_limit(VALUE self)
1101
+ {
1102
+ RETURN_FANN_DBL(fann_get_cascade_candidate_limit);
1103
+ }
1104
+
1105
+ /** The candidate limit is a limit for how much the candidate neuron may be trained.
1106
+ The limit is a limit on the proportion between the MSE and candidate score. */
1107
+ static VALUE set_cascade_candidate_limit(VALUE self, VALUE cascade_candidate_limit)
1108
+ {
1109
+ SET_FANN_DBL(cascade_candidate_limit, fann_set_cascade_candidate_limit);
1110
+ }
1111
+
1112
+ /** The maximum out epochs determines the maximum number of epochs the output connections
1113
+ may be trained after adding a new candidate neuron. */
1114
+ static VALUE get_cascade_max_out_epochs(VALUE self)
1115
+ {
1116
+ RETURN_FANN_UINT(fann_get_cascade_max_out_epochs);
1117
+ }
1118
+
1119
+ /** The maximum out epochs determines the maximum number of epochs the output connections
1120
+ may be trained after adding a new candidate neuron. */
1121
+ static VALUE set_cascade_max_out_epochs(VALUE self, VALUE cascade_max_out_epochs)
1122
+ {
1123
+ SET_FANN_UINT(cascade_max_out_epochs, fann_set_cascade_max_out_epochs);
1124
+ }
1125
+
1126
+ /** The maximum candidate epochs determines the maximum number of epochs the input
1127
+ connections to the candidates may be trained before adding a new candidate neuron. */
1128
+ static VALUE get_cascade_max_cand_epochs(VALUE self)
1129
+ {
1130
+ RETURN_FANN_UINT(fann_get_cascade_max_cand_epochs);
1131
+ }
1132
+
1133
+ /** The maximum candidate epochs determines the maximum number of epochs the input
1134
+ connections to the candidates may be trained before adding a new candidate neuron. */
1135
+ static VALUE set_cascade_max_cand_epochs(VALUE self, VALUE cascade_max_cand_epochs)
1136
+ {
1137
+ SET_FANN_UINT(cascade_max_cand_epochs, fann_set_cascade_max_cand_epochs);
1138
+ }
1139
+
1140
+ /** The number of candidates used during training (calculated by multiplying <get_cascade_activation_functions_count>,
1141
+ <get_cascade_activation_steepnesses_count> and <get_cascade_num_candidate_groups>). */
1142
+ static VALUE get_cascade_num_candidates(VALUE self)
1143
+ {
1144
+ RETURN_FANN_UINT(fann_get_cascade_num_candidates);
1145
+ }
1146
+
1147
+ /** The number of activation functions in the <get_cascade_activation_functions> array */
1148
+ static VALUE get_cascade_activation_functions_count(VALUE self)
1149
+ {
1150
+ RETURN_FANN_UINT(fann_get_cascade_activation_functions_count);
1151
+ }
1152
+
1153
+ /** The learning rate is used to determine how aggressive training should be for some of the
1154
+ training algorithms (:incremental, :batch, :quickprop).
1155
+ Do however note that it is not used in :rprop.
1156
+ The default learning rate is 0.7. */
1157
+ static VALUE get_learning_rate(VALUE self)
1158
+ {
1159
+ RETURN_FANN_FLT(fann_get_learning_rate);
1160
+ }
1161
+
1162
+ /** The learning rate is used to determine how aggressive training should be for some of the
1163
+ training algorithms (:incremental, :batch, :quickprop).
1164
+ Do however note that it is not used in :rprop.
1165
+ The default learning rate is 0.7. */
1166
+ static VALUE set_learning_rate(VALUE self, VALUE learning_rate)
1167
+ {
1168
+ SET_FANN_FLT(learning_rate, fann_set_learning_rate);
1169
+ }
1170
+
1171
+ /** Get the learning momentum. */
1172
+ static VALUE get_learning_momentum(VALUE self)
1173
+ {
1174
+ RETURN_FANN_FLT(fann_get_learning_momentum);
1175
+ }
1176
+
1177
+ /** Set the learning momentum. */
1178
+ static VALUE set_learning_momentum(VALUE self, VALUE learning_momentum)
1179
+ {
1180
+ SET_FANN_FLT(learning_momentum, fann_set_learning_momentum);
1181
+ }
1182
+
1183
+ /** The cascade activation functions is an array of the different activation functions used by
1184
+ the candidates. The default is [:sigmoid, :sigmoid_symmetric, :gaussian, :gaussian_symmetric, :elliot, :elliot_symmetric] */
1185
+ static VALUE set_cascade_activation_functions(VALUE self, VALUE cascade_activation_functions)
1186
+ {
1187
+ Check_Type(cascade_activation_functions, T_ARRAY);
1188
+ struct fann* f;
1189
+ Data_Get_Struct (self, struct fann, f);
1190
+
1191
+ unsigned int cnt = RARRAY(cascade_activation_functions)->len;
1192
+ enum fann_activationfunc_enum fann_activation_functions[cnt];
1193
+ int i;
1194
+ for (i=0; i<cnt; i++)
1195
+ {
1196
+ fann_activation_functions[i] = sym_to_activation_function(RARRAY(cascade_activation_functions)->ptr[i]);
1197
+ }
1198
+
1199
+ fann_set_cascade_activation_functions(f, fann_activation_functions, cnt);
1200
+ }
1201
+
1202
+ /** The cascade activation functions is an array of the different activation functions used by
1203
+ the candidates. The default is [:sigmoid, :sigmoid_symmetric, :gaussian, :gaussian_symmetric, :elliot, :elliot_symmetric] */
1204
+ static VALUE get_cascade_activation_functions(VALUE self)
1205
+ {
1206
+ struct fann* f;
1207
+ Data_Get_Struct (self, struct fann, f);
1208
+ unsigned int cnt = fann_get_cascade_activation_functions_count(f);
1209
+ enum fann_activationfunc_enum* fann_functions = fann_get_cascade_activation_functions(f);
1210
+
1211
+ // Create ruby array & set outputs:
1212
+ VALUE arr;
1213
+ arr = rb_ary_new();
1214
+ int i;
1215
+ for (i=0; i<cnt; i++)
1216
+ {
1217
+ rb_ary_push(arr, activation_function_to_sym(fann_functions[i]));
1218
+ }
1219
+
1220
+ return arr;
1221
+ }
1222
+
1223
+ /** The number of activation steepnesses in the <get_cascade_activation_functions> array. */
1224
+ static VALUE get_cascade_activation_steepnesses_count(VALUE self)
1225
+ {
1226
+ RETURN_FANN_UINT(fann_get_cascade_activation_steepnesses_count);
1227
+ }
1228
+
1229
+ /** The number of candidate groups is the number of groups of identical candidates which will be used
1230
+ during training. */
1231
+ static VALUE get_cascade_num_candidate_groups(VALUE self)
1232
+ {
1233
+ RETURN_FANN_UINT(fann_get_cascade_num_candidate_groups);
1234
+ }
1235
+
1236
+ /** The number of candidate groups is the number of groups of identical candidates which will be used
1237
+ during training. */
1238
+ static VALUE set_cascade_num_candidate_groups(VALUE self, VALUE cascade_num_candidate_groups)
1239
+ {
1240
+ SET_FANN_UINT(cascade_num_candidate_groups, fann_set_cascade_num_candidate_groups);
1241
+ }
1242
+
1243
+ /** The cascade activation steepnesses array is an array of the different activation functions used by
1244
+ the candidates. */
1245
+ static VALUE set_cascade_activation_steepnesses(VALUE self, VALUE cascade_activation_steepnesses)
1246
+ {
1247
+ Check_Type(cascade_activation_steepnesses, T_ARRAY);
1248
+ struct fann* f;
1249
+ Data_Get_Struct (self, struct fann, f);
1250
+
1251
+ unsigned int cnt = RARRAY(cascade_activation_steepnesses)->len;
1252
+ fann_type fann_activation_steepnesses[cnt];
1253
+ int i;
1254
+ for (i=0; i<cnt; i++)
1255
+ {
1256
+ fann_activation_steepnesses[i] = NUM2DBL(RARRAY(cascade_activation_steepnesses)->ptr[i]);
1257
+ }
1258
+
1259
+ fann_set_cascade_activation_steepnesses(f, fann_activation_steepnesses, cnt);
1260
+ }
1261
+
1262
+ /** The cascade activation steepnesses array is an array of the different activation functions used by
1263
+ the candidates. */
1264
+ static VALUE get_cascade_activation_steepnesses(VALUE self)
1265
+ {
1266
+ struct fann* f;
1267
+ Data_Get_Struct (self, struct fann, f);
1268
+ fann_type* fann_steepnesses = fann_get_cascade_activation_steepnesses(f);
1269
+ unsigned int cnt = fann_get_cascade_activation_steepnesses_count(f);
1270
+
1271
+ // Create ruby array & set outputs:
1272
+ VALUE arr;
1273
+ arr = rb_ary_new();
1274
+ int i;
1275
+ for (i=0; i<cnt; i++)
1276
+ {
1277
+ rb_ary_push(arr, rb_float_new(fann_steepnesses[i]));
1278
+ }
1279
+
1280
+ return arr;
1281
+ }
1282
+
1283
+ /** Save the entire network to configuration file with given name */
1284
+ static VALUE nn_save(VALUE self, VALUE filename)
1285
+ {
1286
+ struct fann* f;
1287
+ Data_Get_Struct (self, struct fann, f);
1288
+ int status = fann_save(f, StringValuePtr(filename));
1289
+ return INT2NUM(status);
1290
+ }
1291
+
1292
+ /** Initializes classes under RubyFann module/namespace. */
1293
+ void Init_neural_network ()
1294
+ {
1295
+ // RubyFann module/namespace:
1296
+ m_rb_fann_module = rb_define_module ("RubyFann");
1297
+
1298
+ // Standard NN class:
1299
+ m_rb_fann_standard_class = rb_define_class_under (m_rb_fann_module, "Standard", rb_cObject);
1300
+ rb_define_alloc_func (m_rb_fann_standard_class, fann_allocate);
1301
+ rb_define_method(m_rb_fann_standard_class, "initialize", fann_initialize, 1);
1302
+ rb_define_method(m_rb_fann_standard_class, "init_weights", init_weights, 1);
1303
+ rb_define_method(m_rb_fann_standard_class, "set_activation_function", set_activation_function, 3);
1304
+ rb_define_method(m_rb_fann_standard_class, "set_activation_function_hidden", set_activation_function_hidden, 1);
1305
+ rb_define_method(m_rb_fann_standard_class, "set_activation_function_layer", set_activation_function_layer, 1);
1306
+ rb_define_method(m_rb_fann_standard_class, "set_activation_function_output", set_activation_function_output, 1);
1307
+ rb_define_method(m_rb_fann_standard_class, "get_activation_steepness", get_activation_steepness, 2);
1308
+ rb_define_method(m_rb_fann_standard_class, "set_activation_steepness", set_activation_steepness, 3);
1309
+ rb_define_method(m_rb_fann_standard_class, "set_activation_steepness_hidden", set_activation_steepness_hidden, 1);
1310
+ rb_define_method(m_rb_fann_standard_class, "set_activation_steepness_layer", set_activation_steepness_layer, 2);
1311
+ rb_define_method(m_rb_fann_standard_class, "set_activation_steepness_output", set_activation_steepness_output, 1);
1312
+ rb_define_method(m_rb_fann_standard_class, "get_train_error_function", get_train_error_function, 0);
1313
+ rb_define_method(m_rb_fann_standard_class, "set_train_error_function", set_train_error_function, 1);
1314
+ rb_define_method(m_rb_fann_standard_class, "get_train_stop_function", get_train_stop_function, 0);
1315
+ rb_define_method(m_rb_fann_standard_class, "set_train_stop_function", set_train_stop_function, 1);
1316
+ rb_define_method(m_rb_fann_standard_class, "get_bit_fail_limit", get_bit_fail_limit, 0);
1317
+ rb_define_method(m_rb_fann_standard_class, "set_bit_fail_limit", set_bit_fail_limit, 1);
1318
+ rb_define_method(m_rb_fann_standard_class, "get_quickprop_decay", get_quickprop_decay, 0);
1319
+ rb_define_method(m_rb_fann_standard_class, "set_quickprop_decay", set_quickprop_decay, 1);
1320
+ rb_define_method(m_rb_fann_standard_class, "get_quickprop_mu", get_quickprop_mu, 0);
1321
+ rb_define_method(m_rb_fann_standard_class, "set_quickprop_mu", set_quickprop_mu, 1);
1322
+ rb_define_method(m_rb_fann_standard_class, "get_rprop_increase_factor", get_rprop_increase_factor, 0);
1323
+ rb_define_method(m_rb_fann_standard_class, "set_rprop_increase_factor", set_rprop_increase_factor, 1);
1324
+ rb_define_method(m_rb_fann_standard_class, "get_rprop_decrease_factor", get_rprop_decrease_factor, 0);
1325
+ rb_define_method(m_rb_fann_standard_class, "set_rprop_decrease_factor", set_rprop_decrease_factor, 1);
1326
+ rb_define_method(m_rb_fann_standard_class, "get_rprop_delta_max", get_rprop_delta_max, 0);
1327
+ rb_define_method(m_rb_fann_standard_class, "set_rprop_delta_max", set_rprop_delta_max, 1);
1328
+ rb_define_method(m_rb_fann_standard_class, "get_rprop_delta_min", get_rprop_delta_min, 0);
1329
+ rb_define_method(m_rb_fann_standard_class, "set_rprop_delta_min", set_rprop_delta_min, 1);
1330
+ rb_define_method(m_rb_fann_standard_class, "get_rprop_delta_zero", get_rprop_delta_zero, 0);
1331
+ rb_define_method(m_rb_fann_standard_class, "set_rprop_delta_zero", set_rprop_delta_zero, 1);
1332
+ rb_define_method(m_rb_fann_standard_class, "get_bias_array", get_bias_array, 0);
1333
+ rb_define_method(m_rb_fann_standard_class, "get_connection_rate", get_connection_rate, 0);
1334
+ rb_define_method(m_rb_fann_standard_class, "get_layer_array", get_layer_array, 0);
1335
+ rb_define_method(m_rb_fann_standard_class, "get_network_type", get_network_type, 0);
1336
+ rb_define_method(m_rb_fann_standard_class, "get_neurons", get_neurons, 0);
1337
+ rb_define_method(m_rb_fann_standard_class, "get_num_input", get_num_input, 0);
1338
+ rb_define_method(m_rb_fann_standard_class, "get_num_layers", get_num_layers, 0);
1339
+ rb_define_method(m_rb_fann_standard_class, "get_num_output", get_num_output, 0);
1340
+ rb_define_method(m_rb_fann_standard_class, "get_total_connections", get_total_connections, 0);
1341
+ rb_define_method(m_rb_fann_standard_class, "get_total_neurons", get_total_neurons, 0);
1342
+ rb_define_method(m_rb_fann_standard_class, "get_train_error_function", get_train_error_function, 0);
1343
+ rb_define_method(m_rb_fann_standard_class, "set_train_error_function", set_train_error_function, 1);
1344
+ rb_define_method(m_rb_fann_standard_class, "print_connections", print_connections, 0);
1345
+ rb_define_method(m_rb_fann_standard_class, "print_parameters", print_parameters, 0);
1346
+ rb_define_method(m_rb_fann_standard_class, "randomize_weights", randomize_weights, 2);
1347
+ rb_define_method(m_rb_fann_standard_class, "run", run, 1);
1348
+ rb_define_method(m_rb_fann_standard_class, "train_on_data", train_on_data, 4);
1349
+ rb_define_method(m_rb_fann_standard_class, "train_epoch", train_epoch, 1);
1350
+ rb_define_method(m_rb_fann_standard_class, "test_data", test_data, 1);
1351
+ rb_define_method(m_rb_fann_standard_class, "get_MSE", get_MSE, 0);
1352
+ rb_define_method(m_rb_fann_standard_class, "get_bit_fail", get_bit_fail, 0);
1353
+ rb_define_method(m_rb_fann_standard_class, "reset_MSE", reset_MSE, 0);
1354
+ rb_define_method(m_rb_fann_standard_class, "get_learning_rate", get_learning_rate, 0);
1355
+ rb_define_method(m_rb_fann_standard_class, "set_learning_rate", set_learning_rate, 1);
1356
+ rb_define_method(m_rb_fann_standard_class, "get_learning_momentum", get_learning_momentum, 0);
1357
+ rb_define_method(m_rb_fann_standard_class, "set_learning_momentum", set_learning_momentum, 1);
1358
+ rb_define_method(m_rb_fann_standard_class, "get_training_algorithm", get_training_algorithm, 0);
1359
+ rb_define_method(m_rb_fann_standard_class, "set_training_algorithm", set_training_algorithm, 1);
1360
+
1361
+
1362
+ // Cascade functions:
1363
+ rb_define_method(m_rb_fann_standard_class, "cascadetrain_on_data", cascadetrain_on_data, 4);
1364
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_output_change_fraction", get_cascade_output_change_fraction, 0);
1365
+ rb_define_method(m_rb_fann_standard_class, "set_cascade_output_change_fraction", set_cascade_output_change_fraction, 1);
1366
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_output_stagnation_epochs", get_cascade_output_stagnation_epochs, 0);
1367
+ rb_define_method(m_rb_fann_standard_class, "set_cascade_output_stagnation_epochs", set_cascade_output_stagnation_epochs, 1);
1368
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_candidate_change_fraction", get_cascade_candidate_change_fraction, 0);
1369
+ rb_define_method(m_rb_fann_standard_class, "set_cascade_candidate_change_fraction", set_cascade_candidate_change_fraction, 1);
1370
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_candidate_stagnation_epochs", get_cascade_candidate_stagnation_epochs, 0);
1371
+ rb_define_method(m_rb_fann_standard_class, "set_cascade_candidate_stagnation_epochs", set_cascade_candidate_stagnation_epochs, 1);
1372
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_weight_multiplier", get_cascade_weight_multiplier, 0);
1373
+ rb_define_method(m_rb_fann_standard_class, "set_cascade_weight_multiplier", set_cascade_weight_multiplier, 1);
1374
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_candidate_limit", get_cascade_candidate_limit, 0);
1375
+ rb_define_method(m_rb_fann_standard_class, "set_cascade_candidate_limit", set_cascade_candidate_limit, 1);
1376
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_max_out_epochs", get_cascade_max_out_epochs, 0);
1377
+ rb_define_method(m_rb_fann_standard_class, "set_cascade_max_out_epochs", set_cascade_max_out_epochs, 1);
1378
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_max_cand_epochs", get_cascade_max_cand_epochs, 0);
1379
+ rb_define_method(m_rb_fann_standard_class, "set_cascade_max_cand_epochs", set_cascade_max_cand_epochs, 1);
1380
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_num_candidates", get_cascade_num_candidates, 0);
1381
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_activation_functions_count", get_cascade_activation_functions_count, 0);
1382
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_activation_functions", get_cascade_activation_functions, 0);
1383
+ rb_define_method(m_rb_fann_standard_class, "set_cascade_activation_functions", set_cascade_activation_functions, 1);
1384
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_activation_steepnesses_count", get_cascade_activation_steepnesses_count, 0);
1385
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_activation_steepnesses", get_cascade_activation_steepnesses, 0);
1386
+ rb_define_method(m_rb_fann_standard_class, "set_cascade_activation_steepnesses", set_cascade_activation_steepnesses, 1);
1387
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_num_candidate_groups", get_cascade_num_candidate_groups, 0);
1388
+ rb_define_method(m_rb_fann_standard_class, "set_cascade_num_candidate_groups", set_cascade_num_candidate_groups, 1);
1389
+ rb_define_method(m_rb_fann_standard_class, "save", nn_save, 1);
1390
+
1391
+
1392
+ // Uncomment for fixed-point mode (also recompile fann). Probably not going to be needed:
1393
+ //rb_define_method(clazz, "get_decimal_point", get_decimal_point, 0);
1394
+ //rb_define_method(clazz, "get_multiplier", get_multiplier, 0);
1395
+
1396
+ // Shortcut NN class (duplicated from above so that rdoc generation tools can find the methods:):
1397
+ m_rb_fann_shortcut_class = rb_define_class_under (m_rb_fann_module, "Shortcut", rb_cObject);
1398
+ rb_define_alloc_func (m_rb_fann_shortcut_class, fann_allocate);
1399
+ rb_define_method(m_rb_fann_shortcut_class, "initialize", fann_initialize, 1);
1400
+ rb_define_method(m_rb_fann_shortcut_class, "init_weights", init_weights, 1);
1401
+ rb_define_method(m_rb_fann_shortcut_class, "set_activation_function", set_activation_function, 3);
1402
+ rb_define_method(m_rb_fann_shortcut_class, "set_activation_function_hidden", set_activation_function_hidden, 1);
1403
+ rb_define_method(m_rb_fann_shortcut_class, "set_activation_function_layer", set_activation_function_layer, 1);
1404
+ rb_define_method(m_rb_fann_shortcut_class, "set_activation_function_output", set_activation_function_output, 1);
1405
+ rb_define_method(m_rb_fann_shortcut_class, "get_activation_steepness", get_activation_steepness, 2);
1406
+ rb_define_method(m_rb_fann_shortcut_class, "set_activation_steepness", set_activation_steepness, 3);
1407
+ rb_define_method(m_rb_fann_shortcut_class, "set_activation_steepness_hidden", set_activation_steepness_hidden, 1);
1408
+ rb_define_method(m_rb_fann_shortcut_class, "set_activation_steepness_layer", set_activation_steepness_layer, 2);
1409
+ rb_define_method(m_rb_fann_shortcut_class, "set_activation_steepness_output", set_activation_steepness_output, 1);
1410
+ rb_define_method(m_rb_fann_shortcut_class, "get_train_error_function", get_train_error_function, 0);
1411
+ rb_define_method(m_rb_fann_shortcut_class, "set_train_error_function", set_train_error_function, 1);
1412
+ rb_define_method(m_rb_fann_shortcut_class, "get_train_stop_function", get_train_stop_function, 0);
1413
+ rb_define_method(m_rb_fann_shortcut_class, "set_train_stop_function", set_train_stop_function, 1);
1414
+ rb_define_method(m_rb_fann_shortcut_class, "get_bit_fail_limit", get_bit_fail_limit, 0);
1415
+ rb_define_method(m_rb_fann_shortcut_class, "set_bit_fail_limit", set_bit_fail_limit, 1);
1416
+ rb_define_method(m_rb_fann_shortcut_class, "get_quickprop_decay", get_quickprop_decay, 0);
1417
+ rb_define_method(m_rb_fann_shortcut_class, "set_quickprop_decay", set_quickprop_decay, 1);
1418
+ rb_define_method(m_rb_fann_shortcut_class, "get_quickprop_mu", get_quickprop_mu, 0);
1419
+ rb_define_method(m_rb_fann_shortcut_class, "set_quickprop_mu", set_quickprop_mu, 1);
1420
+ rb_define_method(m_rb_fann_shortcut_class, "get_rprop_increase_factor", get_rprop_increase_factor, 0);
1421
+ rb_define_method(m_rb_fann_shortcut_class, "set_rprop_increase_factor", set_rprop_increase_factor, 1);
1422
+ rb_define_method(m_rb_fann_shortcut_class, "get_rprop_decrease_factor", get_rprop_decrease_factor, 0);
1423
+ rb_define_method(m_rb_fann_shortcut_class, "set_rprop_decrease_factor", set_rprop_decrease_factor, 1);
1424
+ rb_define_method(m_rb_fann_shortcut_class, "get_rprop_delta_max", get_rprop_delta_max, 0);
1425
+ rb_define_method(m_rb_fann_shortcut_class, "set_rprop_delta_max", set_rprop_delta_max, 1);
1426
+ rb_define_method(m_rb_fann_shortcut_class, "get_rprop_delta_min", get_rprop_delta_min, 0);
1427
+ rb_define_method(m_rb_fann_shortcut_class, "set_rprop_delta_min", set_rprop_delta_min, 1);
1428
+ rb_define_method(m_rb_fann_shortcut_class, "get_rprop_delta_zero", get_rprop_delta_zero, 0);
1429
+ rb_define_method(m_rb_fann_shortcut_class, "set_rprop_delta_zero", set_rprop_delta_zero, 1);
1430
+ rb_define_method(m_rb_fann_shortcut_class, "get_bias_array", get_bias_array, 0);
1431
+ rb_define_method(m_rb_fann_shortcut_class, "get_connection_rate", get_connection_rate, 0);
1432
+ rb_define_method(m_rb_fann_shortcut_class, "get_layer_array", get_layer_array, 0);
1433
+ rb_define_method(m_rb_fann_shortcut_class, "get_network_type", get_network_type, 0);
1434
+ rb_define_method(m_rb_fann_shortcut_class, "get_neurons", get_neurons, 0);
1435
+ rb_define_method(m_rb_fann_shortcut_class, "get_num_input", get_num_input, 0);
1436
+ rb_define_method(m_rb_fann_shortcut_class, "get_num_layers", get_num_layers, 0);
1437
+ rb_define_method(m_rb_fann_shortcut_class, "get_num_output", get_num_output, 0);
1438
+ rb_define_method(m_rb_fann_shortcut_class, "get_total_connections", get_total_connections, 0);
1439
+ rb_define_method(m_rb_fann_shortcut_class, "get_total_neurons", get_total_neurons, 0);
1440
+ rb_define_method(m_rb_fann_shortcut_class, "get_train_error_function", get_train_error_function, 0);
1441
+ rb_define_method(m_rb_fann_shortcut_class, "set_train_error_function", set_train_error_function, 1);
1442
+ rb_define_method(m_rb_fann_shortcut_class, "print_connections", print_connections, 0);
1443
+ rb_define_method(m_rb_fann_shortcut_class, "print_parameters", print_parameters, 0);
1444
+ rb_define_method(m_rb_fann_shortcut_class, "randomize_weights", randomize_weights, 2);
1445
+ rb_define_method(m_rb_fann_shortcut_class, "run", run, 1);
1446
+ rb_define_method(m_rb_fann_shortcut_class, "train_on_data", train_on_data, 4);
1447
+ rb_define_method(m_rb_fann_shortcut_class, "train_epoch", train_epoch, 1);
1448
+ rb_define_method(m_rb_fann_shortcut_class, "test_data", test_data, 1);
1449
+ rb_define_method(m_rb_fann_shortcut_class, "get_MSE", get_MSE, 0);
1450
+ rb_define_method(m_rb_fann_shortcut_class, "get_bit_fail", get_bit_fail, 0);
1451
+ rb_define_method(m_rb_fann_shortcut_class, "reset_MSE", reset_MSE, 0);
1452
+ rb_define_method(m_rb_fann_shortcut_class, "get_learning_rate", get_learning_rate, 0);
1453
+ rb_define_method(m_rb_fann_shortcut_class, "set_learning_rate", set_learning_rate, 1);
1454
+ rb_define_method(m_rb_fann_shortcut_class, "get_learning_momentum", get_learning_momentum, 0);
1455
+ rb_define_method(m_rb_fann_shortcut_class, "set_learning_momentum", set_learning_momentum, 1);
1456
+ rb_define_method(m_rb_fann_shortcut_class, "get_training_algorithm", get_training_algorithm, 0);
1457
+ rb_define_method(m_rb_fann_shortcut_class, "set_training_algorithm", set_training_algorithm, 1);
1458
+
1459
+ // Cascade functions:
1460
+ rb_define_method(m_rb_fann_shortcut_class, "cascadetrain_on_data", cascadetrain_on_data, 4);
1461
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_output_change_fraction", get_cascade_output_change_fraction, 0);
1462
+ rb_define_method(m_rb_fann_shortcut_class, "set_cascade_output_change_fraction", set_cascade_output_change_fraction, 1);
1463
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_output_stagnation_epochs", get_cascade_output_stagnation_epochs, 0);
1464
+ rb_define_method(m_rb_fann_shortcut_class, "set_cascade_output_stagnation_epochs", set_cascade_output_stagnation_epochs, 1);
1465
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_candidate_change_fraction", get_cascade_candidate_change_fraction, 0);
1466
+ rb_define_method(m_rb_fann_shortcut_class, "set_cascade_candidate_change_fraction", set_cascade_candidate_change_fraction, 1);
1467
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_candidate_stagnation_epochs", get_cascade_candidate_stagnation_epochs, 0);
1468
+ rb_define_method(m_rb_fann_shortcut_class, "set_cascade_candidate_stagnation_epochs", set_cascade_candidate_stagnation_epochs, 1);
1469
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_weight_multiplier", get_cascade_weight_multiplier, 0);
1470
+ rb_define_method(m_rb_fann_shortcut_class, "set_cascade_weight_multiplier", set_cascade_weight_multiplier, 1);
1471
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_candidate_limit", get_cascade_candidate_limit, 0);
1472
+ rb_define_method(m_rb_fann_shortcut_class, "set_cascade_candidate_limit", set_cascade_candidate_limit, 1);
1473
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_max_out_epochs", get_cascade_max_out_epochs, 0);
1474
+ rb_define_method(m_rb_fann_shortcut_class, "set_cascade_max_out_epochs", set_cascade_max_out_epochs, 1);
1475
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_max_cand_epochs", get_cascade_max_cand_epochs, 0);
1476
+ rb_define_method(m_rb_fann_shortcut_class, "set_cascade_max_cand_epochs", set_cascade_max_cand_epochs, 1);
1477
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_num_candidates", get_cascade_num_candidates, 0);
1478
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_activation_functions_count", get_cascade_activation_functions_count, 0);
1479
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_activation_functions", get_cascade_activation_functions, 0);
1480
+ rb_define_method(m_rb_fann_shortcut_class, "set_cascade_activation_functions", set_cascade_activation_functions, 1);
1481
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_activation_steepnesses_count", get_cascade_activation_steepnesses_count, 0);
1482
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_activation_steepnesses", get_cascade_activation_steepnesses, 0);
1483
+ rb_define_method(m_rb_fann_shortcut_class, "set_cascade_activation_steepnesses", set_cascade_activation_steepnesses, 1);
1484
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_num_candidate_groups", get_cascade_num_candidate_groups, 0);
1485
+ rb_define_method(m_rb_fann_shortcut_class, "set_cascade_num_candidate_groups", set_cascade_num_candidate_groups, 1);
1486
+ rb_define_method(m_rb_fann_shortcut_class, "save", nn_save, 1);
1487
+
1488
+
1489
+ // TrainData NN class:
1490
+ m_rb_fann_train_data_class = rb_define_class_under (m_rb_fann_module, "TrainData", rb_cObject);
1491
+ rb_define_alloc_func (m_rb_fann_train_data_class, fann_training_data_allocate);
1492
+ rb_define_method(m_rb_fann_train_data_class, "initialize", fann_train_data_initialize, 1);
1493
+ rb_define_method(m_rb_fann_train_data_class, "length", length_train_data, 0);
1494
+ rb_define_method(m_rb_fann_train_data_class, "shuffle", shuffle, 0);
1495
+ rb_define_method(m_rb_fann_train_data_class, "save", training_save, 1);
1496
+
1497
+ printf("Initialized Ruby Bindings for FANN.\n");
1498
+ }
1499
+