ruby-fann 1.4.2 → 2.0.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -12,7 +12,7 @@ static VALUE m_rb_fann_train_data_class;
12
12
  #define RETURN_FANN_INT(fn) \
13
13
  struct fann* f; \
14
14
  Data_Get_Struct (self, struct fann, f); \
15
- return INT2NUM(fn(f));
15
+ return INT2NUM(fn(f));
16
16
 
17
17
  #define SET_FANN_INT(attr_name, fann_fn) \
18
18
  Check_Type(attr_name, T_FIXNUM); \
@@ -24,7 +24,7 @@ return 0;
24
24
  #define RETURN_FANN_UINT(fn) \
25
25
  struct fann* f; \
26
26
  Data_Get_Struct (self, struct fann, f); \
27
- return UINT2NUM(fn(f));
27
+ return rb_int_new(fn(f));
28
28
 
29
29
  #define SET_FANN_UINT(attr_name, fann_fn) \
30
30
  Check_Type(attr_name, T_FIXNUM); \
@@ -39,7 +39,7 @@ struct fann* f; \
39
39
  Data_Get_Struct (self, struct fann, f); \
40
40
  char buffy[20]; \
41
41
  sprintf(buffy, "%0.6g", fn(f)); \
42
- return rb_float_new(atof(buffy));
42
+ return rb_float_new(atof(buffy));
43
43
 
44
44
  #define SET_FANN_FLT(attr_name, fann_fn) \
45
45
  Check_Type(attr_name, T_FLOAT); \
@@ -51,7 +51,7 @@ return self;
51
51
  #define RETURN_FANN_DBL(fn) \
52
52
  struct fann* f; \
53
53
  Data_Get_Struct (self, struct fann, f); \
54
- return rb_float_new(fn(f));
54
+ return rb_float_new(fn(f));
55
55
 
56
56
  #define SET_FANN_DBL SET_FANN_FLT
57
57
 
@@ -61,44 +61,44 @@ enum fann_activationfunc_enum sym_to_activation_function(VALUE activation_func)
61
61
  ID id=SYM2ID(activation_func);
62
62
  enum fann_activationfunc_enum activation_function;
63
63
  if(id==rb_intern("linear")) {
64
- activation_function = FANN_LINEAR;
64
+ activation_function = FANN_LINEAR;
65
65
  } else if(id==rb_intern("threshold")) {
66
- activation_function = FANN_THRESHOLD;
66
+ activation_function = FANN_THRESHOLD;
67
67
  } else if(id==rb_intern("threshold_symmetric")) {
68
- activation_function = FANN_THRESHOLD_SYMMETRIC;
68
+ activation_function = FANN_THRESHOLD_SYMMETRIC;
69
69
  } else if(id==rb_intern("sigmoid")) {
70
- activation_function = FANN_SIGMOID;
70
+ activation_function = FANN_SIGMOID;
71
71
  } else if(id==rb_intern("sigmoid_stepwise")) {
72
- activation_function = FANN_SIGMOID_STEPWISE;
72
+ activation_function = FANN_SIGMOID_STEPWISE;
73
73
  } else if(id==rb_intern("sigmoid_symmetric")) {
74
- activation_function = FANN_SIGMOID_SYMMETRIC;
74
+ activation_function = FANN_SIGMOID_SYMMETRIC;
75
75
  } else if(id==rb_intern("sigmoid_symmetric_stepwise")) {
76
- activation_function = FANN_SIGMOID_SYMMETRIC_STEPWISE;
76
+ activation_function = FANN_SIGMOID_SYMMETRIC_STEPWISE;
77
77
  } else if(id==rb_intern("gaussian")) {
78
- activation_function = FANN_GAUSSIAN;
78
+ activation_function = FANN_GAUSSIAN;
79
79
  } else if(id==rb_intern("gaussian_symmetric")) {
80
- activation_function = FANN_GAUSSIAN_SYMMETRIC;
80
+ activation_function = FANN_GAUSSIAN_SYMMETRIC;
81
81
  } else if(id==rb_intern("gaussian_stepwise")) {
82
- activation_function = FANN_GAUSSIAN_STEPWISE;
82
+ activation_function = FANN_GAUSSIAN_STEPWISE;
83
83
  } else if(id==rb_intern("elliot")) {
84
- activation_function = FANN_ELLIOT;
84
+ activation_function = FANN_ELLIOT;
85
85
  } else if(id==rb_intern("elliot_symmetric")) {
86
- activation_function = FANN_ELLIOT_SYMMETRIC;
86
+ activation_function = FANN_ELLIOT_SYMMETRIC;
87
87
  } else if(id==rb_intern("linear_piece")) {
88
- activation_function = FANN_LINEAR_PIECE;
88
+ activation_function = FANN_LINEAR_PIECE;
89
89
  } else if(id==rb_intern("linear_piece_symmetric")) {
90
- activation_function = FANN_LINEAR_PIECE_SYMMETRIC;
90
+ activation_function = FANN_LINEAR_PIECE_SYMMETRIC;
91
91
  } else if(id==rb_intern("sin_symmetric")) {
92
- activation_function = FANN_SIN_SYMMETRIC;
92
+ activation_function = FANN_SIN_SYMMETRIC;
93
93
  } else if(id==rb_intern("cos_symmetric")) {
94
- activation_function = FANN_COS_SYMMETRIC;
94
+ activation_function = FANN_COS_SYMMETRIC;
95
95
  } else if(id==rb_intern("sin")) {
96
- activation_function = FANN_SIN;
96
+ activation_function = FANN_SIN;
97
97
  } else if(id==rb_intern("cos")) {
98
- activation_function = FANN_COS;
98
+ activation_function = FANN_COS;
99
99
  } else {
100
100
  rb_raise(rb_eRuntimeError, "Unrecognized activation function: [%s]", rb_id2name(SYM2ID(activation_func)));
101
- }
101
+ }
102
102
  return activation_function;
103
103
  }
104
104
 
@@ -106,46 +106,46 @@ enum fann_activationfunc_enum sym_to_activation_function(VALUE activation_func)
106
106
  VALUE activation_function_to_sym(enum fann_activationfunc_enum fn)
107
107
  {
108
108
  VALUE activation_function;
109
-
109
+
110
110
  if(fn==FANN_LINEAR) {
111
- activation_function = ID2SYM(rb_intern("linear"));
111
+ activation_function = ID2SYM(rb_intern("linear"));
112
112
  } else if(fn==FANN_THRESHOLD) {
113
- activation_function = ID2SYM(rb_intern("threshold"));
113
+ activation_function = ID2SYM(rb_intern("threshold"));
114
114
  } else if(fn==FANN_THRESHOLD_SYMMETRIC) {
115
- activation_function = ID2SYM(rb_intern("threshold_symmetric"));
115
+ activation_function = ID2SYM(rb_intern("threshold_symmetric"));
116
116
  } else if(fn==FANN_SIGMOID) {
117
- activation_function = ID2SYM(rb_intern("sigmoid"));
117
+ activation_function = ID2SYM(rb_intern("sigmoid"));
118
118
  } else if(fn==FANN_SIGMOID_STEPWISE) {
119
- activation_function = ID2SYM(rb_intern("sigmoid_stepwise"));
119
+ activation_function = ID2SYM(rb_intern("sigmoid_stepwise"));
120
120
  } else if(fn==FANN_SIGMOID_SYMMETRIC) {
121
- activation_function = ID2SYM(rb_intern("sigmoid_symmetric"));
121
+ activation_function = ID2SYM(rb_intern("sigmoid_symmetric"));
122
122
  } else if(fn==FANN_SIGMOID_SYMMETRIC_STEPWISE) {
123
- activation_function = ID2SYM(rb_intern("sigmoid_symmetric_stepwise"));
123
+ activation_function = ID2SYM(rb_intern("sigmoid_symmetric_stepwise"));
124
124
  } else if(fn==FANN_GAUSSIAN) {
125
- activation_function = ID2SYM(rb_intern("gaussian"));
125
+ activation_function = ID2SYM(rb_intern("gaussian"));
126
126
  } else if(fn==FANN_GAUSSIAN_SYMMETRIC) {
127
- activation_function = ID2SYM(rb_intern("gaussian_symmetric"));
127
+ activation_function = ID2SYM(rb_intern("gaussian_symmetric"));
128
128
  } else if(fn==FANN_GAUSSIAN_STEPWISE) {
129
- activation_function = ID2SYM(rb_intern("gaussian_stepwise"));
129
+ activation_function = ID2SYM(rb_intern("gaussian_stepwise"));
130
130
  } else if(fn==FANN_ELLIOT) {
131
- activation_function = ID2SYM(rb_intern("elliot"));
131
+ activation_function = ID2SYM(rb_intern("elliot"));
132
132
  } else if(fn==FANN_ELLIOT_SYMMETRIC) {
133
- activation_function = ID2SYM(rb_intern("elliot_symmetric"));
133
+ activation_function = ID2SYM(rb_intern("elliot_symmetric"));
134
134
  } else if(fn==FANN_LINEAR_PIECE) {
135
- activation_function = ID2SYM(rb_intern("linear_piece"));
135
+ activation_function = ID2SYM(rb_intern("linear_piece"));
136
136
  } else if(fn==FANN_LINEAR_PIECE_SYMMETRIC) {
137
- activation_function = ID2SYM(rb_intern("linear_piece_symmetric"));
137
+ activation_function = ID2SYM(rb_intern("linear_piece_symmetric"));
138
138
  } else if(fn==FANN_SIN_SYMMETRIC) {
139
- activation_function = ID2SYM(rb_intern("sin_symmetric"));
139
+ activation_function = ID2SYM(rb_intern("sin_symmetric"));
140
140
  } else if(fn==FANN_COS_SYMMETRIC) {
141
- activation_function = ID2SYM(rb_intern("cos_symmetric"));
141
+ activation_function = ID2SYM(rb_intern("cos_symmetric"));
142
142
  } else if(fn==FANN_SIN) {
143
- activation_function = ID2SYM(rb_intern("sin"));
143
+ activation_function = ID2SYM(rb_intern("sin"));
144
144
  } else if(fn==FANN_COS) {
145
- activation_function = ID2SYM(rb_intern("cos"));
145
+ activation_function = ID2SYM(rb_intern("cos"));
146
146
  } else {
147
147
  rb_raise(rb_eRuntimeError, "Unrecognized activation function: [%d]", fn);
148
- }
148
+ }
149
149
  return activation_function;
150
150
  }
151
151
 
@@ -169,14 +169,14 @@ static void fann_training_data_free (struct fann_train_data* train_data)
169
169
  // printf("Destroyed Training data [%d].\n", train_data);
170
170
  }
171
171
 
172
- // Create wrapper, but don't allocate anything...do that in
172
+ // Create wrapper, but don't allocate anything...do that in
173
173
  // initialize, so we can construct with args:
174
174
  static VALUE fann_allocate (VALUE klass)
175
175
  {
176
176
  return Data_Wrap_Struct (klass, fann_mark, fann_free, 0);
177
177
  }
178
178
 
179
- // Create wrapper, but don't allocate annything...do that in
179
+ // Create wrapper, but don't allocate annything...do that in
180
180
  // initialize, so we can construct with args:
181
181
  static VALUE fann_training_data_allocate (VALUE klass)
182
182
  {
@@ -184,13 +184,13 @@ static VALUE fann_training_data_allocate (VALUE klass)
184
184
  }
185
185
 
186
186
 
187
- // static VALUE invoke_training_callback(VALUE self)
187
+ // static VALUE invoke_training_callback(VALUE self)
188
188
  // {
189
189
  // VALUE callback = rb_funcall(self, rb_intern("training_callback"), 0);
190
190
  // return callback;
191
191
  // }
192
192
 
193
- // static int FANN_API internal_callback(struct fann *ann, struct fann_train_data *train,
193
+ // static int FANN_API internal_callback(struct fann *ann, struct fann_train_data *train,
194
194
  // unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error, unsigned int epochs)
195
195
 
196
196
  static int FANN_API fann_training_callback(struct fann *ann, struct fann_train_data *train,
@@ -199,49 +199,49 @@ static int FANN_API fann_training_callback(struct fann *ann, struct fann_train_d
199
199
  {
200
200
  VALUE self = (VALUE)fann_get_user_data(ann);
201
201
  VALUE args = rb_hash_new();
202
-
202
+
203
203
  // Set attributes on hash & push on array:
204
204
  VALUE max_epochs_sym = ID2SYM(rb_intern("max_epochs"));
205
205
  VALUE epochs_between_reports_sym = ID2SYM(rb_intern("epochs_between_reports"));
206
206
  VALUE desired_error_sym = ID2SYM(rb_intern("desired_error"));
207
207
  VALUE epochs_sym = ID2SYM(rb_intern("epochs"));
208
-
208
+
209
209
  rb_hash_aset(args, max_epochs_sym, INT2NUM(max_epochs));
210
210
  rb_hash_aset(args, epochs_between_reports_sym, INT2NUM(epochs_between_reports));
211
211
  rb_hash_aset(args, desired_error_sym, rb_float_new(desired_error));
212
212
  rb_hash_aset(args, epochs_sym, INT2NUM(epochs));
213
-
213
+
214
214
  VALUE callback = rb_funcall(self, rb_intern("training_callback"), 1, args);
215
-
215
+
216
216
  if (TYPE(callback)!=T_FIXNUM)
217
217
  {
218
218
  rb_raise (rb_eRuntimeError, "Callback method must return an integer (-1 to stop training).");
219
219
  }
220
220
 
221
- int status = NUM2INT(callback);
221
+ int status = NUM2INT(callback);
222
222
  if (status==-1)
223
223
  {
224
224
  printf("Callback method returned -1; training will stop.\n");
225
225
  }
226
-
226
+
227
227
  return status;
228
228
  }
229
229
 
230
- /** call-seq: new(hash) -> new ruby-fann neural network object
230
+ /** call-seq: new(hash) -> new ruby-fann neural network object
231
231
 
232
232
  Initialization routine for both standard, shortcut & filename forms of FANN:
233
233
 
234
234
  Standard Initialization:
235
235
  RubyFann::Standard.new(:num_inputs=>1, :hidden_neurons=>[3, 4, 3, 4], :num_outputs=>1)
236
-
236
+
237
237
  Shortcut Initialization (e.g., for use in cascade training):
238
- RubyFann::Shortcut.new(:num_inputs=>5, :num_outputs=>1)
239
-
238
+ RubyFann::Shortcut.new(:num_inputs=>5, :num_outputs=>1)
239
+
240
240
  File Initialization
241
- RubyFann::Standard.new(:filename=>'xor_float.net')
242
-
243
-
244
-
241
+ RubyFann::Standard.new(:filename=>'xor_float.net')
242
+
243
+
244
+
245
245
  */
246
246
  static VALUE fann_initialize(VALUE self, VALUE hash)
247
247
  {
@@ -250,24 +250,24 @@ static VALUE fann_initialize(VALUE self, VALUE hash)
250
250
  VALUE num_inputs = rb_hash_aref(hash, ID2SYM(rb_intern("num_inputs")));
251
251
  VALUE num_outputs = rb_hash_aref(hash, ID2SYM(rb_intern("num_outputs")));
252
252
  VALUE hidden_neurons = rb_hash_aref(hash, ID2SYM(rb_intern("hidden_neurons")));
253
- // printf("initializing\n\n\n");
253
+ // printf("initializing\n\n\n");
254
254
  struct fann* ann;
255
- if (TYPE(filename)==T_STRING)
255
+ if (TYPE(filename)==T_STRING)
256
256
  {
257
257
  // Initialize with file:
258
258
  // train_data = fann_read_train_from_file(StringValuePtr(filename));
259
259
  // DATA_PTR(self) = train_data;
260
260
  ann = fann_create_from_file(StringValuePtr(filename));
261
- // printf("Created RubyFann::Standard [%d] from file [%s].\n", ann, StringValuePtr(filename));
262
- }
261
+ // printf("Created RubyFann::Standard [%d] from file [%s].\n", ann, StringValuePtr(filename));
262
+ }
263
263
  else if(rb_obj_is_kind_of(self, m_rb_fann_shortcut_class))
264
264
  {
265
265
  // Initialize as shortcut, suitable for cascade training:
266
- //ann = fann_create_shortcut_array(num_layers, layers);
266
+ //ann = fann_create_shortcut_array(num_layers, layers);
267
267
  Check_Type(num_inputs, T_FIXNUM);
268
268
  Check_Type(num_outputs, T_FIXNUM);
269
-
270
- ann = fann_create_shortcut(2, NUM2INT(num_inputs), NUM2INT(num_outputs));
269
+
270
+ ann = fann_create_shortcut(2, NUM2INT(num_inputs), NUM2INT(num_outputs));
271
271
  // printf("Created RubyFann::Shortcut [%d].\n", ann);
272
272
  }
273
273
  else
@@ -276,29 +276,27 @@ static VALUE fann_initialize(VALUE self, VALUE hash)
276
276
  Check_Type(num_inputs, T_FIXNUM);
277
277
  Check_Type(hidden_neurons, T_ARRAY);
278
278
  Check_Type(num_outputs, T_FIXNUM);
279
-
279
+
280
280
  // Initialize layers:
281
- unsigned int num_layers=NUM2UINT(RARRAY_LEN(hidden_neurons)) + 2;
281
+ unsigned int num_layers=RARRAY_LEN(hidden_neurons) + 2;
282
282
  unsigned int layers[num_layers];
283
283
 
284
284
  // Input:
285
- layers[0]=NUM2INT(num_inputs);
285
+ layers[0]=NUM2INT(num_inputs);
286
286
  // Output:
287
- layers[num_layers-1]=NUM2INT(num_outputs);
287
+ layers[num_layers-1]=NUM2INT(num_outputs);
288
288
  // Hidden:
289
289
  unsigned int i;
290
290
  for (i=1; i<=num_layers-2; i++) {
291
- layers[i]=NUM2UINT(RARRAY_PTR(hidden_neurons)[i-1]);
291
+ layers[i]=NUM2INT(RARRAY_PTR(hidden_neurons)[i-1]);
292
292
  }
293
-
294
- ann = fann_create_standard_array(num_layers, layers);
295
- // printf("Created RubyFann::Standard [%d].\n", ann);
296
- }
293
+ ann = fann_create_standard_array(num_layers, layers);
294
+ }
297
295
 
298
296
  DATA_PTR(self) = ann;
299
-
297
+
300
298
  // printf("Checking for callback...");
301
-
299
+
302
300
  //int callback = rb_protect(invoke_training_callback, (self), &status);
303
301
  // VALUE callback = rb_funcall(DATA_PTR(self), "training_callback", 0);
304
302
  if(rb_respond_to(self, rb_intern("training_callback")))
@@ -311,14 +309,14 @@ static VALUE fann_initialize(VALUE self, VALUE hash)
311
309
  {
312
310
  // printf("none found.\n");
313
311
  }
314
-
315
- return (VALUE)ann;
312
+
313
+ return (VALUE)ann;
316
314
  }
317
315
 
318
316
  /** call-seq: new(hash) -> new ruby-fann training data object (RubyFann::TrainData)
319
-
317
+
320
318
  Initialize in one of the following forms:
321
-
319
+
322
320
  # This is a flat file with training data as described in FANN docs.
323
321
  RubyFann::TrainData.new(:filename => 'path/to/training_file.train')
324
322
  OR
@@ -327,23 +325,23 @@ static VALUE fann_initialize(VALUE self, VALUE hash)
327
325
  # All sub-arrays on inputs should be of same length
328
326
  # All sub-arrays on desired_outputs should be of same length
329
327
  # Sub-arrays on inputs & desired_outputs can be different sizes from one another
330
- RubyFann::TrainData.new(:inputs=>[[0.2, 0.3, 0.4], [0.8, 0.9, 0.7]], :desired_outputs=>[[3.14], [6.33]])
328
+ RubyFann::TrainData.new(:inputs=>[[0.2, 0.3, 0.4], [0.8, 0.9, 0.7]], :desired_outputs=>[[3.14], [6.33]])
331
329
  */
332
330
  static VALUE fann_train_data_initialize(VALUE self, VALUE hash)
333
331
  {
334
332
  struct fann_train_data* train_data;
335
333
  Check_Type(hash, T_HASH);
336
-
334
+
337
335
  VALUE filename = rb_hash_aref(hash, ID2SYM(rb_intern("filename")));
338
336
  VALUE inputs = rb_hash_aref(hash, ID2SYM(rb_intern("inputs")));
339
337
  VALUE desired_outputs = rb_hash_aref(hash, ID2SYM(rb_intern("desired_outputs")));
340
338
 
341
- if (TYPE(filename)==T_STRING)
339
+ if (TYPE(filename)==T_STRING)
342
340
  {
343
341
  train_data = fann_read_train_from_file(StringValuePtr(filename));
344
342
  DATA_PTR(self) = train_data;
345
- }
346
- else if (TYPE(inputs)==T_ARRAY)
343
+ }
344
+ else if (TYPE(inputs)==T_ARRAY)
347
345
  {
348
346
  if (TYPE(desired_outputs)!=T_ARRAY)
349
347
  {
@@ -352,50 +350,55 @@ static VALUE fann_train_data_initialize(VALUE self, VALUE hash)
352
350
 
353
351
  if (RARRAY_LEN(inputs) < 1)
354
352
  {
355
- rb_raise (rb_eRuntimeError, "[inputs/desired_outputs] must contain at least one value.");
353
+ rb_raise (rb_eRuntimeError, "[inputs] must contain at least one value.");
354
+ }
355
+
356
+ if (RARRAY_LEN(desired_outputs) < 1)
357
+ {
358
+ rb_raise (rb_eRuntimeError, "[desired_outputs] must contain at least one value.");
356
359
  }
357
360
 
358
361
  // The data is here, start constructing:
359
- if(RARRAY_LEN(inputs) != RARRAY_LEN(desired_outputs))
362
+ if(RARRAY_LEN(inputs) != RARRAY_LEN(desired_outputs))
360
363
  {
361
364
  rb_raise (
362
- rb_eRuntimeError,
363
- "Number of inputs must match number of outputs: (%d != %d)",
364
- (int)RARRAY_LEN(inputs),
365
+ rb_eRuntimeError,
366
+ "Number of inputs must match number of outputs: (%d != %d)",
367
+ (int)RARRAY_LEN(inputs),
365
368
  (int)RARRAY_LEN(desired_outputs));
366
369
  }
367
370
 
368
- train_data = fann_create_train_from_rb_ary(inputs, desired_outputs);
369
- DATA_PTR(self) = train_data;
370
- }
371
- else
371
+ train_data = fann_create_train_from_rb_ary(inputs, desired_outputs);
372
+ DATA_PTR(self) = train_data;
373
+ }
374
+ else
372
375
  {
373
376
  rb_raise (rb_eRuntimeError, "Must construct with a filename(string) or inputs/desired_outputs(arrays). All args passed via hash with symbols as keys.");
374
377
  }
375
-
378
+
376
379
  return (VALUE)train_data;
377
380
  }
378
381
 
379
382
 
380
383
  /** call-seq: save(filename)
381
384
 
382
- Save to given filename
385
+ Save to given filename
383
386
  */
384
387
  static VALUE training_save(VALUE self, VALUE filename)
385
388
  {
386
- Check_Type(filename, T_STRING);
389
+ Check_Type(filename, T_STRING);
387
390
  struct fann_train_data* t;
388
- Data_Get_Struct (self, struct fann_train_data, t);
391
+ Data_Get_Struct (self, struct fann_train_data, t);
389
392
  fann_save_train(t, StringValuePtr(filename));
390
- return self;
393
+ return self;
391
394
  }
392
395
 
393
- /** Shuffles training data, randomizing the order.
396
+ /** Shuffles training data, randomizing the order.
394
397
  This is recommended for incremental training, while it will have no influence during batch training.*/
395
398
  static VALUE shuffle(VALUE self)
396
399
  {
397
- struct fann_train_data* t;
398
- Data_Get_Struct (self, struct fann_train_data, t);
400
+ struct fann_train_data* t;
401
+ Data_Get_Struct (self, struct fann_train_data, t);
399
402
  fann_shuffle_train_data(t);
400
403
  return self;
401
404
  }
@@ -403,26 +406,26 @@ static VALUE shuffle(VALUE self)
403
406
  /** Length of training data*/
404
407
  static VALUE length_train_data(VALUE self)
405
408
  {
406
- struct fann_train_data* t;
407
- Data_Get_Struct (self, struct fann_train_data, t);
409
+ struct fann_train_data* t;
410
+ Data_Get_Struct (self, struct fann_train_data, t);
408
411
  return(UINT2NUM(fann_length_train_data(t)));
409
412
  return self;
410
413
  }
411
414
 
412
415
  /** call-seq: set_activation_function(activation_func, layer, neuron)
413
416
 
414
- Set the activation function for neuron number *neuron* in layer number *layer*,
417
+ Set the activation function for neuron number *neuron* in layer number *layer*,
415
418
  counting the input layer as layer 0. activation_func must be one of the following symbols:
416
- :linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
417
- :sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
418
- :elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
419
+ :linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
420
+ :sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
421
+ :elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
419
422
  :sin, :cos*/
420
423
  static VALUE set_activation_function(VALUE self, VALUE activation_func, VALUE layer, VALUE neuron)
421
424
  {
422
425
  Check_Type(activation_func, T_SYMBOL);
423
426
  Check_Type(layer, T_FIXNUM);
424
427
  Check_Type(neuron, T_FIXNUM);
425
-
428
+
426
429
  struct fann* f;
427
430
  Data_Get_Struct(self, struct fann, f);
428
431
  fann_set_activation_function(f, sym_to_activation_function(activation_func), NUM2INT(layer), NUM2INT(neuron));
@@ -432,9 +435,9 @@ static VALUE set_activation_function(VALUE self, VALUE activation_func, VALUE la
432
435
  /** call-seq: set_activation_function_hidden(activation_func)
433
436
 
434
437
  Set the activation function for all of the hidden layers. activation_func must be one of the following symbols:
435
- :linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
436
- :sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
437
- :elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
438
+ :linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
439
+ :sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
440
+ :elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
438
441
  :sin, :cos*/
439
442
  static VALUE set_activation_function_hidden(VALUE self, VALUE activation_func)
440
443
  {
@@ -447,15 +450,15 @@ static VALUE set_activation_function_hidden(VALUE self, VALUE activation_func)
447
450
 
448
451
  /** call-seq: set_activation_function_layer(activation_func, layer)
449
452
 
450
- Set the activation function for all the neurons in the layer number *layer*,
453
+ Set the activation function for all the neurons in the layer number *layer*,
451
454
  counting the input layer as layer 0. activation_func must be one of the following symbols:
452
- :linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
453
- :sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
454
- :elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
455
+ :linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
456
+ :sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
457
+ :elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
455
458
  :sin, :cos
456
-
459
+
457
460
  It is not possible to set activation functions for the neurons in the input layer.
458
- */
461
+ */
459
462
  static VALUE set_activation_function_layer(VALUE self, VALUE activation_func, VALUE layer)
460
463
  {
461
464
  Check_Type(activation_func, T_SYMBOL);
@@ -466,12 +469,12 @@ static VALUE set_activation_function_layer(VALUE self, VALUE activation_func, VA
466
469
  return self;
467
470
  }
468
471
 
469
- /** call-seq: get_activation_function(layer) -> return value
470
-
471
- Get the activation function for neuron number *neuron* in layer number *layer*,
472
- counting the input layer as layer 0.
472
+ /** call-seq: get_activation_function(layer) -> return value
473
473
 
474
- It is not possible to get activation functions for the neurons in the input layer.
474
+ Get the activation function for neuron number *neuron* in layer number *layer*,
475
+ counting the input layer as layer 0.
476
+
477
+ It is not possible to get activation functions for the neurons in the input layer.
475
478
  */
476
479
  static VALUE get_activation_function(VALUE self, VALUE layer, VALUE neuron)
477
480
  {
@@ -486,9 +489,9 @@ static VALUE get_activation_function(VALUE self, VALUE layer, VALUE neuron)
486
489
  /** call-seq: set_activation_function_output(activation_func)
487
490
 
488
491
  Set the activation function for the output layer. activation_func must be one of the following symbols:
489
- :linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
490
- :sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
491
- :elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
492
+ :linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
493
+ :sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
494
+ :elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
492
495
  :sin, :cos*/
493
496
 
494
497
  static VALUE set_activation_function_output(VALUE self, VALUE activation_func)
@@ -500,9 +503,9 @@ static VALUE set_activation_function_output(VALUE self, VALUE activation_func)
500
503
  return self;
501
504
  }
502
505
 
503
- /** call-seq: get_activation_steepness(layer, neuron) -> return value
504
-
505
- Get the activation steepness for neuron number neuron in layer number layer, counting the input layer as layer 0.
506
+ /** call-seq: get_activation_steepness(layer, neuron) -> return value
507
+
508
+ Get the activation steepness for neuron number neuron in layer number layer, counting the input layer as layer 0.
506
509
  */
507
510
  static VALUE get_activation_steepness(VALUE self, VALUE layer, VALUE neuron)
508
511
  {
@@ -516,21 +519,21 @@ static VALUE get_activation_steepness(VALUE self, VALUE layer, VALUE neuron)
516
519
 
517
520
  /** call-seq: set_activation_steepness(steepness, layer, neuron)
518
521
 
519
- Set the activation steepness for neuron number {neuron} in layer number {layer},
522
+ Set the activation steepness for neuron number {neuron} in layer number {layer},
520
523
  counting the input layer as layer 0.*/
521
524
  static VALUE set_activation_steepness(VALUE self, VALUE steepness, VALUE layer, VALUE neuron)
522
525
  {
523
526
  Check_Type(steepness, T_FLOAT);
524
527
  Check_Type(layer, T_FIXNUM);
525
528
  Check_Type(neuron, T_FIXNUM);
526
-
529
+
527
530
  struct fann* f;
528
531
  Data_Get_Struct(self, struct fann, f);
529
532
  fann_set_activation_steepness(f, NUM2DBL(steepness), NUM2INT(layer), NUM2INT(neuron));
530
533
  return self;
531
534
  }
532
535
 
533
- /** call-seq: set_activation_steepness_hidden(arg) -> return value
536
+ /** call-seq: set_activation_steepness_hidden(arg) -> return value
534
537
 
535
538
  Set the activation steepness in all of the hidden layers.*/
536
539
  static VALUE set_activation_steepness_hidden(VALUE self, VALUE steepness)
@@ -540,13 +543,13 @@ static VALUE set_activation_steepness_hidden(VALUE self, VALUE steepness)
540
543
 
541
544
  /** call-seq: set_activation_steepness_layer(steepness, layer)
542
545
 
543
- Set the activation steepness all of the neurons in layer number *layer*,
546
+ Set the activation steepness all of the neurons in layer number *layer*,
544
547
  counting the input layer as layer 0.*/
545
548
  static VALUE set_activation_steepness_layer(VALUE self, VALUE steepness, VALUE layer)
546
549
  {
547
550
  Check_Type(steepness, T_FLOAT);
548
551
  Check_Type(layer, T_FIXNUM);
549
-
552
+
550
553
  struct fann* f;
551
554
  Data_Get_Struct(self, struct fann, f);
552
555
  fann_set_activation_steepness_layer(f, NUM2DBL(steepness), NUM2INT(layer));
@@ -575,8 +578,8 @@ static VALUE set_bit_fail_limit(VALUE self, VALUE bit_fail_limit)
575
578
  SET_FANN_FLT(bit_fail_limit, fann_set_bit_fail_limit);
576
579
  }
577
580
 
578
- /** The decay is a small negative valued number which is the factor that the weights
579
- should become smaller in each iteration during quickprop training. This is used
581
+ /** The decay is a small negative valued number which is the factor that the weights
582
+ should become smaller in each iteration during quickprop training. This is used
580
583
  to make sure that the weights do not become too high during training.*/
581
584
  static VALUE get_quickprop_decay(VALUE self)
582
585
  {
@@ -591,8 +594,8 @@ static VALUE set_quickprop_decay(VALUE self, VALUE quickprop_decay)
591
594
  SET_FANN_FLT(quickprop_decay, fann_set_quickprop_decay);
592
595
  }
593
596
 
594
- /** The mu factor is used to increase and decrease the step-size during quickprop training.
595
- The mu factor should always be above 1, since it would otherwise decrease the step-size
597
+ /** The mu factor is used to increase and decrease the step-size during quickprop training.
598
+ The mu factor should always be above 1, since it would otherwise decrease the step-size
596
599
  when it was suppose to increase it. */
597
600
  static VALUE get_quickprop_mu(VALUE self)
598
601
  {
@@ -607,7 +610,7 @@ static VALUE set_quickprop_mu(VALUE self, VALUE quickprop_mu)
607
610
  SET_FANN_FLT(quickprop_mu, fann_set_quickprop_mu);
608
611
  }
609
612
 
610
- /** The increase factor is a value larger than 1, which is used to
613
+ /** The increase factor is a value larger than 1, which is used to
611
614
  increase the step-size during RPROP training.*/
612
615
  static VALUE get_rprop_increase_factor(VALUE self)
613
616
  {
@@ -686,8 +689,8 @@ static VALUE get_bias_array(VALUE self)
686
689
  Data_Get_Struct (self, struct fann, f);
687
690
  num_layers = fann_get_num_layers(f);
688
691
  unsigned int layers[num_layers];
689
- fann_get_bias_array(f, layers);
690
-
692
+ fann_get_bias_array(f, layers);
693
+
691
694
  // Create ruby array & set outputs:
692
695
  VALUE arr;
693
696
  arr = rb_ary_new();
@@ -696,12 +699,12 @@ static VALUE get_bias_array(VALUE self)
696
699
  {
697
700
  rb_ary_push(arr, INT2NUM(layers[i]));
698
701
  }
699
-
702
+
700
703
  return arr;
701
704
  }
702
705
 
703
- /** The number of fail bits; means the number of output neurons which differ more
704
- than the bit fail limit (see <fann_get_bit_fail_limit>, <fann_set_bit_fail_limit>).
706
+ /** The number of fail bits; means the number of output neurons which differ more
707
+ than the bit fail limit (see <fann_get_bit_fail_limit>, <fann_set_bit_fail_limit>).
705
708
  The bits are counted in all of the training data, so this number can be higher than
706
709
  the number of training data.*/
707
710
  static VALUE get_bit_fail(VALUE self)
@@ -715,7 +718,7 @@ static VALUE get_connection_rate(VALUE self)
715
718
  RETURN_FANN_INT(fann_get_connection_rate);
716
719
  }
717
720
 
718
- /** call-seq: get_neurons(layer) -> return value
721
+ /** call-seq: get_neurons(layer) -> return value
719
722
 
720
723
  Return array<hash> where each array element is a hash
721
724
  representing a neuron. It contains the following keys:
@@ -724,16 +727,16 @@ static VALUE get_connection_rate(VALUE self)
724
727
  :sum=float -- The sum of the inputs multiplied with the weights
725
728
  :value=float -- The value of the activation fuction applied to the sum
726
729
  :connections=array<int> -- indices of connected neurons(inputs)
727
-
730
+
728
731
  This could be done more elegantly (e.g., defining more ruby ext classes).
729
732
  This method does not directly correlate to anything in FANN, and accesses
730
- structs that are not guaranteed to not change.
733
+ structs that are not guaranteed to not change.
731
734
  */
732
735
  static VALUE get_neurons(VALUE self, VALUE layer)
733
736
  {
734
737
  struct fann_layer *layer_it;
735
738
  struct fann_neuron *neuron_it;
736
-
739
+
737
740
  struct fann* f;
738
741
  unsigned int i;
739
742
  Data_Get_Struct (self, struct fann, f);
@@ -746,8 +749,8 @@ static VALUE get_neurons(VALUE self, VALUE layer)
746
749
  VALUE value_sym = ID2SYM(rb_intern("value"));
747
750
  VALUE connections_sym = ID2SYM(rb_intern("connections"));
748
751
  unsigned int layer_num = 0;
749
-
750
-
752
+
753
+
751
754
  int nuke_bias_neuron = (fann_get_network_type(f)==FANN_NETTYPE_LAYER);
752
755
  for(layer_it = f->first_layer; layer_it != f->last_layer; layer_it++)
753
756
  {
@@ -756,12 +759,12 @@ static VALUE get_neurons(VALUE self, VALUE layer)
756
759
  if (nuke_bias_neuron && (neuron_it==(layer_it->last_neuron)-1)) continue;
757
760
  // Create array of connection indicies:
758
761
  VALUE connection_array = rb_ary_new();
759
- for (i = neuron_it->first_con; i < neuron_it->last_con; i++) {
760
- rb_ary_push(connection_array, INT2NUM(f->connections[i] - f->first_layer->first_neuron));
762
+ for (i = neuron_it->first_con; i < neuron_it->last_con; i++) {
763
+ rb_ary_push(connection_array, INT2NUM(f->connections[i] - f->first_layer->first_neuron));
761
764
  }
762
765
 
763
766
  VALUE neuron = rb_hash_new();
764
-
767
+
765
768
  // Set attributes on hash & push on array:
766
769
  rb_hash_aset(neuron, activation_function_sym, activation_function_to_sym(neuron_it->activation_function));
767
770
  rb_hash_aset(neuron, activation_steepness_sym, rb_float_new(neuron_it->activation_steepness));
@@ -769,8 +772,8 @@ static VALUE get_neurons(VALUE self, VALUE layer)
769
772
  rb_hash_aset(neuron, sum_sym, rb_float_new(neuron_it->sum));
770
773
  rb_hash_aset(neuron, value_sym, rb_float_new(neuron_it->value));
771
774
  rb_hash_aset(neuron, connections_sym, connection_array);
772
-
773
- rb_ary_push(neuron_array, neuron);
775
+
776
+ rb_ary_push(neuron_array, neuron);
774
777
  }
775
778
  ++layer_num;
776
779
  }
@@ -787,7 +790,7 @@ static VALUE get_neurons(VALUE self, VALUE layer)
787
790
  // case FANN_NETTYPE_SHORTCUT: {
788
791
 
789
792
 
790
- return neuron_array;
793
+ return neuron_array;
791
794
  }
792
795
 
793
796
  /** Get list of layers in array format where each element contains number of neurons in that layer*/
@@ -798,8 +801,8 @@ static VALUE get_layer_array(VALUE self)
798
801
  Data_Get_Struct (self, struct fann, f);
799
802
  num_layers = fann_get_num_layers(f);
800
803
  unsigned int layers[num_layers];
801
- fann_get_layer_array(f, layers);
802
-
804
+ fann_get_layer_array(f, layers);
805
+
803
806
  // Create ruby array & set outputs:
804
807
  VALUE arr;
805
808
  arr = rb_ary_new();
@@ -808,7 +811,7 @@ static VALUE get_layer_array(VALUE self)
808
811
  {
809
812
  rb_ary_push(arr, INT2NUM(layers[i]));
810
813
  }
811
-
814
+
812
815
  return arr;
813
816
  }
814
817
 
@@ -819,13 +822,13 @@ static VALUE get_MSE(VALUE self)
819
822
  }
820
823
 
821
824
  /** Resets the mean square error from the network.
822
- This function also resets the number of bits that fail.*/
825
+ This function also resets the number of bits that fail.*/
823
826
  static VALUE reset_MSE(VALUE self)
824
827
  {
825
828
  struct fann* f;
826
829
  Data_Get_Struct (self, struct fann, f);
827
830
  fann_reset_MSE(f);
828
- return self;
831
+ return self;
829
832
  }
830
833
 
831
834
  /** Get the type of network. Returns as ruby symbol (one of :shortcut, :layer)*/
@@ -837,15 +840,15 @@ static VALUE get_network_type(VALUE self)
837
840
  Data_Get_Struct (self, struct fann, f);
838
841
 
839
842
  net_type = fann_get_network_type(f);
840
-
841
- if(net_type==FANN_NETTYPE_LAYER)
843
+
844
+ if(net_type==FANN_NETTYPE_LAYER)
842
845
  {
843
846
  ret_val = ID2SYM(rb_intern("layer")); // (rb_str_new2("FANN_NETTYPE_LAYER"));
844
847
  }
845
848
  else if(net_type==FANN_NETTYPE_SHORTCUT)
846
849
  {
847
850
  ret_val = ID2SYM(rb_intern("shortcut")); // (rb_str_new2("FANN_NETTYPE_SHORTCUT"));
848
- }
851
+ }
849
852
  return ret_val;
850
853
  }
851
854
 
@@ -854,7 +857,7 @@ static VALUE get_num_input(VALUE self)
854
857
  {
855
858
  RETURN_FANN_INT(fann_get_num_input);
856
859
  }
857
-
860
+
858
861
  /** Get the number of layers in the network.*/
859
862
  static VALUE get_num_layers(VALUE self)
860
863
  {
@@ -886,26 +889,26 @@ static VALUE get_total_neurons(VALUE self)
886
889
  static VALUE set_train_error_function(VALUE self, VALUE train_error_function)
887
890
  {
888
891
  Check_Type(train_error_function, T_SYMBOL);
889
-
892
+
890
893
  ID id=SYM2ID(train_error_function);
891
894
  enum fann_errorfunc_enum fann_train_error_function;
892
895
 
893
896
  if(id==rb_intern("linear")) {
894
- fann_train_error_function = FANN_ERRORFUNC_LINEAR;
897
+ fann_train_error_function = FANN_ERRORFUNC_LINEAR;
895
898
  } else if(id==rb_intern("tanh")) {
896
- fann_train_error_function = FANN_ERRORFUNC_TANH;
899
+ fann_train_error_function = FANN_ERRORFUNC_TANH;
897
900
  } else {
898
901
  rb_raise(rb_eRuntimeError, "Unrecognized train error function: [%s]", rb_id2name(SYM2ID(train_error_function)));
899
- }
902
+ }
900
903
 
901
904
  struct fann* f;
902
905
  Data_Get_Struct (self, struct fann, f);
903
906
  fann_set_train_error_function(f, fann_train_error_function);
904
- return self;
907
+ return self;
905
908
  }
906
909
 
907
910
  /** Returns the error function used during training. One of the following symbols:
908
- :linear, :tanh*/
911
+ :linear, :tanh*/
909
912
  static VALUE get_train_error_function(VALUE self)
910
913
  {
911
914
  struct fann* f;
@@ -914,15 +917,15 @@ static VALUE get_train_error_function(VALUE self)
914
917
  Data_Get_Struct (self, struct fann, f);
915
918
 
916
919
  train_error = fann_get_train_error_function(f);
917
-
918
- if(train_error==FANN_ERRORFUNC_LINEAR)
920
+
921
+ if(train_error==FANN_ERRORFUNC_LINEAR)
919
922
  {
920
- ret_val = ID2SYM(rb_intern("linear"));
923
+ ret_val = ID2SYM(rb_intern("linear"));
921
924
  }
922
- else if(train_error==FANN_ERRORFUNC_TANH)
925
+ else
923
926
  {
924
- ret_val = ID2SYM(rb_intern("tanh"));
925
- }
927
+ ret_val = ID2SYM(rb_intern("tanh"));
928
+ }
926
929
  return ret_val;
927
930
  }
928
931
 
@@ -933,26 +936,26 @@ static VALUE get_train_error_function(VALUE self)
933
936
  static VALUE set_training_algorithm(VALUE self, VALUE train_error_function)
934
937
  {
935
938
  Check_Type(train_error_function, T_SYMBOL);
936
-
939
+
937
940
  ID id=SYM2ID(train_error_function);
938
941
  enum fann_train_enum fann_train_algorithm;
939
942
 
940
943
  if(id==rb_intern("incremental")) {
941
- fann_train_algorithm = FANN_TRAIN_INCREMENTAL;
944
+ fann_train_algorithm = FANN_TRAIN_INCREMENTAL;
942
945
  } else if(id==rb_intern("batch")) {
943
- fann_train_algorithm = FANN_TRAIN_BATCH;
946
+ fann_train_algorithm = FANN_TRAIN_BATCH;
944
947
  } else if(id==rb_intern("rprop")) {
945
- fann_train_algorithm = FANN_TRAIN_RPROP;
948
+ fann_train_algorithm = FANN_TRAIN_RPROP;
946
949
  } else if(id==rb_intern("quickprop")) {
947
- fann_train_algorithm = FANN_TRAIN_QUICKPROP;
950
+ fann_train_algorithm = FANN_TRAIN_QUICKPROP;
948
951
  } else {
949
952
  rb_raise(rb_eRuntimeError, "Unrecognized training algorithm function: [%s]", rb_id2name(SYM2ID(train_error_function)));
950
- }
953
+ }
951
954
 
952
955
  struct fann* f;
953
956
  Data_Get_Struct (self, struct fann, f);
954
957
  fann_set_training_algorithm(f, fann_train_algorithm);
955
- return self;
958
+ return self;
956
959
  }
957
960
 
958
961
  /** Returns the training algorithm. One of the following symbols:
@@ -965,20 +968,20 @@ static VALUE get_training_algorithm(VALUE self)
965
968
  Data_Get_Struct (self, struct fann, f);
966
969
 
967
970
  fann_train_algorithm = fann_get_training_algorithm(f);
968
-
971
+
969
972
  if(fann_train_algorithm==FANN_TRAIN_INCREMENTAL) {
970
973
  ret_val = ID2SYM(rb_intern("incremental"));
971
974
  } else if(fann_train_algorithm==FANN_TRAIN_BATCH) {
972
- ret_val = ID2SYM(rb_intern("batch"));
975
+ ret_val = ID2SYM(rb_intern("batch"));
973
976
  } else if(fann_train_algorithm==FANN_TRAIN_RPROP) {
974
- ret_val = ID2SYM(rb_intern("rprop"));
977
+ ret_val = ID2SYM(rb_intern("rprop"));
975
978
  } else if(fann_train_algorithm==FANN_TRAIN_QUICKPROP) {
976
- ret_val = ID2SYM(rb_intern("quickprop"));
977
- }
979
+ ret_val = ID2SYM(rb_intern("quickprop"));
980
+ }
978
981
  return ret_val;
979
982
  }
980
983
 
981
- /** call-seq: set_train_stop_function(train_stop_function) -> return value
984
+ /** call-seq: set_train_stop_function(train_stop_function) -> return value
982
985
 
983
986
  Set the training stop function. One of the following symbols:
984
987
  :mse, :bit */
@@ -989,17 +992,17 @@ static VALUE set_train_stop_function(VALUE self, VALUE train_stop_function)
989
992
  enum fann_stopfunc_enum fann_train_stop_function;
990
993
 
991
994
  if(id==rb_intern("mse")) {
992
- fann_train_stop_function = FANN_STOPFUNC_MSE;
995
+ fann_train_stop_function = FANN_STOPFUNC_MSE;
993
996
  } else if(id==rb_intern("bit")) {
994
- fann_train_stop_function = FANN_STOPFUNC_BIT;
997
+ fann_train_stop_function = FANN_STOPFUNC_BIT;
995
998
  } else {
996
999
  rb_raise(rb_eRuntimeError, "Unrecognized stop function: [%s]", rb_id2name(SYM2ID(train_stop_function)));
997
- }
1000
+ }
998
1001
 
999
1002
  struct fann* f;
1000
1003
  Data_Get_Struct (self, struct fann, f);
1001
1004
  fann_set_train_stop_function(f, fann_train_stop_function);
1002
- return self;
1005
+ return self;
1003
1006
  }
1004
1007
 
1005
1008
  /** Returns the training stop function. One of the following symbols:
@@ -1012,27 +1015,27 @@ static VALUE get_train_stop_function(VALUE self)
1012
1015
  Data_Get_Struct (self, struct fann, f);
1013
1016
 
1014
1017
  train_stop = fann_get_train_stop_function(f);
1015
-
1016
- if(train_stop==FANN_STOPFUNC_MSE)
1018
+
1019
+ if(train_stop==FANN_STOPFUNC_MSE)
1017
1020
  {
1018
1021
  ret_val = ID2SYM(rb_intern("mse")); // (rb_str_new2("FANN_NETTYPE_LAYER"));
1019
1022
  }
1020
1023
  else // if(train_stop==FANN_STOPFUNC_BIT)
1021
1024
  {
1022
1025
  ret_val = ID2SYM(rb_intern("bit")); // (rb_str_new2("FANN_NETTYPE_SHORTCUT"));
1023
- }
1026
+ }
1024
1027
  return ret_val;
1025
1028
  }
1026
1029
 
1027
1030
 
1028
- /** Will print the connections of the ann in a compact matrix,
1031
+ /** Will print the connections of the ann in a compact matrix,
1029
1032
  for easy viewing of the internals of the ann. */
1030
1033
  static VALUE print_connections(VALUE self)
1031
1034
  {
1032
1035
  struct fann* f;
1033
1036
  Data_Get_Struct (self, struct fann, f);
1034
1037
  fann_print_connections(f);
1035
- return self;
1038
+ return self;
1036
1039
  }
1037
1040
 
1038
1041
  /** Print current NN parameters to stdout */
@@ -1052,14 +1055,14 @@ static VALUE randomize_weights(VALUE self, VALUE min_weight, VALUE max_weight)
1052
1055
  Check_Type(min_weight, T_FLOAT);
1053
1056
  Check_Type(max_weight, T_FLOAT);
1054
1057
  struct fann* f;
1055
- Data_Get_Struct (self, struct fann, f);
1058
+ Data_Get_Struct (self, struct fann, f);
1056
1059
  fann_randomize_weights(f, NUM2DBL(min_weight), NUM2DBL(max_weight));
1057
- return self;
1060
+ return self;
1058
1061
  }
1059
1062
 
1060
- /** call-seq: run(inputs) -> return value
1063
+ /** call-seq: run(inputs) -> return value
1061
1064
 
1062
- Run neural net on array<Float> of inputs with current parameters.
1065
+ Run neural net on array<Float> of inputs with current parameters.
1063
1066
  Returns array<Float> as output */
1064
1067
  static VALUE run (VALUE self, VALUE inputs)
1065
1068
  {
@@ -1068,16 +1071,16 @@ static VALUE run (VALUE self, VALUE inputs)
1068
1071
  struct fann* f;
1069
1072
  unsigned int i;
1070
1073
  fann_type* outputs;
1071
-
1074
+
1072
1075
  // Convert inputs to type needed for NN:
1073
- unsigned int len = NUM2UINT(RARRAY_LEN(inputs));
1076
+ unsigned int len = RARRAY_LEN(inputs);
1074
1077
  fann_type fann_inputs[len];
1075
1078
  for (i=0; i<len; i++)
1076
1079
  {
1077
1080
  fann_inputs[i] = NUM2DBL(RARRAY_PTR(inputs)[i]);
1078
1081
  }
1079
-
1080
-
1082
+
1083
+
1081
1084
  // Obtain NN & run method:
1082
1085
  Data_Get_Struct (self, struct fann, f);
1083
1086
  outputs = fann_run(f, fann_inputs);
@@ -1087,28 +1090,28 @@ static VALUE run (VALUE self, VALUE inputs)
1087
1090
  arr = rb_ary_new();
1088
1091
  unsigned int output_len=fann_get_num_output(f);
1089
1092
  for (i=0; i<output_len; i++)
1090
- {
1093
+ {
1091
1094
  rb_ary_push(arr, rb_float_new(outputs[i]));
1092
1095
  }
1093
-
1096
+
1094
1097
  return arr;
1095
1098
  }
1096
1099
 
1097
- /** call-seq: init_weights(train_data) -> return value
1100
+ /** call-seq: init_weights(train_data) -> return value
1098
1101
 
1099
1102
  Initialize the weights using Widrow + Nguyen's algorithm. */
1100
1103
  static VALUE init_weights(VALUE self, VALUE train_data)
1101
1104
  {
1102
-
1105
+
1103
1106
  Check_Type(train_data, T_DATA);
1104
-
1107
+
1105
1108
  struct fann* f;
1106
- struct fann_train_data* t;
1107
- Data_Get_Struct (self, struct fann, f);
1108
- Data_Get_Struct (train_data, struct fann_train_data, t);
1109
+ struct fann_train_data* t;
1110
+ Data_Get_Struct (self, struct fann, f);
1111
+ Data_Get_Struct (train_data, struct fann_train_data, t);
1109
1112
 
1110
- fann_init_weights(f, t);
1111
- return self;
1113
+ fann_init_weights(f, t);
1114
+ return self;
1112
1115
  }
1113
1116
 
1114
1117
  /** call-seq: train(input, expected_output)
@@ -1124,8 +1127,8 @@ static VALUE train(VALUE self, VALUE input, VALUE expected_output)
1124
1127
  struct fann* f;
1125
1128
  Data_Get_Struct(self, struct fann, f);
1126
1129
 
1127
- unsigned int num_input = NUM2UINT(RARRAY_LEN(input));
1128
- unsigned int num_output = NUM2UINT(RARRAY_LEN(expected_output));
1130
+ unsigned int num_input = RARRAY_LEN(input);
1131
+ unsigned int num_output = RARRAY_LEN(expected_output);
1129
1132
 
1130
1133
  fann_type data_input[num_input], data_output[num_output];
1131
1134
 
@@ -1157,42 +1160,42 @@ static VALUE train_on_data(VALUE self, VALUE train_data, VALUE max_epochs, VALUE
1157
1160
  Check_Type(max_epochs, T_FIXNUM);
1158
1161
  Check_Type(epochs_between_reports, T_FIXNUM);
1159
1162
  Check_Type(desired_error, T_FLOAT);
1160
-
1163
+
1161
1164
  struct fann* f;
1162
- struct fann_train_data* t;
1163
- Data_Get_Struct (self, struct fann, f);
1164
- Data_Get_Struct (train_data, struct fann_train_data, t);
1165
+ struct fann_train_data* t;
1166
+ Data_Get_Struct (self, struct fann, f);
1167
+ Data_Get_Struct (train_data, struct fann_train_data, t);
1165
1168
 
1166
1169
  unsigned int fann_max_epochs = NUM2INT(max_epochs);
1167
1170
  unsigned int fann_epochs_between_reports = NUM2INT(epochs_between_reports);
1168
- float fann_desired_error = NUM2DBL(desired_error);
1171
+ float fann_desired_error = NUM2DBL(desired_error);
1169
1172
  fann_train_on_data(f, t, fann_max_epochs, fann_epochs_between_reports, fann_desired_error);
1170
1173
  return rb_int_new(0);
1171
1174
  }
1172
1175
 
1173
- /** call-seq: train_epoch(train_data) -> return value
1176
+ /** call-seq: train_epoch(train_data) -> return value
1174
1177
 
1175
1178
  Train one epoch with a set of training data, created with RubyFann::TrainData.new */
1176
1179
  static VALUE train_epoch(VALUE self, VALUE train_data)
1177
1180
  {
1178
1181
  Check_Type(train_data, T_DATA);
1179
1182
  struct fann* f;
1180
- struct fann_train_data* t;
1181
- Data_Get_Struct (self, struct fann, f);
1182
- Data_Get_Struct (train_data, struct fann_train_data, t);
1183
+ struct fann_train_data* t;
1184
+ Data_Get_Struct (self, struct fann, f);
1185
+ Data_Get_Struct (train_data, struct fann_train_data, t);
1183
1186
  return rb_float_new(fann_train_epoch(f, t));
1184
1187
  }
1185
1188
 
1186
- /** call-seq: test_data(train_data) -> return value
1189
+ /** call-seq: test_data(train_data) -> return value
1187
1190
 
1188
1191
  Test a set of training data and calculates the MSE for the training data. */
1189
1192
  static VALUE test_data(VALUE self, VALUE train_data)
1190
1193
  {
1191
1194
  Check_Type(train_data, T_DATA);
1192
1195
  struct fann* f;
1193
- struct fann_train_data* t;
1194
- Data_Get_Struct (self, struct fann, f);
1195
- Data_Get_Struct (train_data, struct fann_train_data, t);
1196
+ struct fann_train_data* t;
1197
+ Data_Get_Struct (self, struct fann, f);
1198
+ Data_Get_Struct (train_data, struct fann_train_data, t);
1196
1199
  return rb_float_new(fann_test_data(f, t));
1197
1200
  }
1198
1201
 
@@ -1204,7 +1207,7 @@ static VALUE test_data(VALUE self, VALUE train_data)
1204
1207
  // Data_Get_Struct (self, struct fann, f);
1205
1208
  // return INT2NUM(fann_get_decimal_point(f));
1206
1209
  // }
1207
-
1210
+
1208
1211
  // returns the multiplier that fix point data is multiplied with.
1209
1212
 
1210
1213
  // Only available in fixed-point mode, which we don't need:
@@ -1228,19 +1231,19 @@ static VALUE cascadetrain_on_data(VALUE self, VALUE train_data, VALUE max_neuron
1228
1231
  Check_Type(max_neurons, T_FIXNUM);
1229
1232
  Check_Type(neurons_between_reports, T_FIXNUM);
1230
1233
  Check_Type(desired_error, T_FLOAT);
1231
-
1234
+
1232
1235
  struct fann* f;
1233
- struct fann_train_data* t;
1234
- Data_Get_Struct (self, struct fann, f);
1235
- Data_Get_Struct (train_data, struct fann_train_data, t);
1236
+ struct fann_train_data* t;
1237
+ Data_Get_Struct (self, struct fann, f);
1238
+ Data_Get_Struct (train_data, struct fann_train_data, t);
1236
1239
 
1237
1240
  unsigned int fann_max_neurons = NUM2INT(max_neurons);
1238
1241
  unsigned int fann_neurons_between_reports = NUM2INT(neurons_between_reports);
1239
1242
  float fann_desired_error = NUM2DBL(desired_error);
1240
-
1243
+
1241
1244
  fann_cascadetrain_on_data(f, t, fann_max_neurons, fann_neurons_between_reports, fann_desired_error);
1242
- return self;
1243
- }
1245
+ return self;
1246
+ }
1244
1247
 
1245
1248
  /** The cascade output change fraction is a number between 0 and 1 */
1246
1249
  static VALUE get_cascade_output_change_fraction(VALUE self)
@@ -1256,7 +1259,7 @@ static VALUE set_cascade_output_change_fraction(VALUE self, VALUE cascade_output
1256
1259
  SET_FANN_FLT(cascade_output_change_fraction, fann_set_cascade_output_change_fraction);
1257
1260
  }
1258
1261
 
1259
- /** The number of cascade output stagnation epochs determines the number of epochs training is allowed to
1262
+ /** The number of cascade output stagnation epochs determines the number of epochs training is allowed to
1260
1263
  continue without changing the MSE by a fraction of <get_cascade_output_change_fraction>. */
1261
1264
  static VALUE get_cascade_output_stagnation_epochs(VALUE self)
1262
1265
  {
@@ -1265,7 +1268,7 @@ static VALUE get_cascade_output_stagnation_epochs(VALUE self)
1265
1268
 
1266
1269
  /** call-seq: set_cascade_output_stagnation_epochs(cascade_output_stagnation_epochs)
1267
1270
 
1268
- The number of cascade output stagnation epochs determines the number of epochs training is allowed to
1271
+ The number of cascade output stagnation epochs determines the number of epochs training is allowed to
1269
1272
  continue without changing the MSE by a fraction of <get_cascade_output_change_fraction>. */
1270
1273
  static VALUE set_cascade_output_stagnation_epochs(VALUE self, VALUE cascade_output_stagnation_epochs)
1271
1274
  {
@@ -1300,7 +1303,8 @@ static VALUE get_cascade_candidate_stagnation_epochs(VALUE self)
1300
1303
  static VALUE set_cascade_candidate_stagnation_epochs(VALUE self, VALUE cascade_candidate_stagnation_epochs)
1301
1304
  {
1302
1305
  SET_FANN_UINT(cascade_candidate_stagnation_epochs, fann_set_cascade_candidate_stagnation_epochs);
1303
- }
1306
+ }
1307
+
1304
1308
 
1305
1309
  /** The weight multiplier is a parameter which is used to multiply the weights from the candidate neuron
1306
1310
  before adding the neuron to the neural network. This parameter is usually between 0 and 1, and is used
@@ -1352,7 +1356,7 @@ static VALUE set_cascade_max_out_epochs(VALUE self, VALUE cascade_max_out_epochs
1352
1356
  SET_FANN_UINT(cascade_max_out_epochs, fann_set_cascade_max_out_epochs);
1353
1357
  }
1354
1358
 
1355
- /** The maximum candidate epochs determines the maximum number of epochs the input
1359
+ /** The maximum candidate epochs determines the maximum number of epochs the input
1356
1360
  connections to the candidates may be trained before adding a new candidate neuron. */
1357
1361
  static VALUE get_cascade_max_cand_epochs(VALUE self)
1358
1362
  {
@@ -1361,7 +1365,7 @@ static VALUE get_cascade_max_cand_epochs(VALUE self)
1361
1365
 
1362
1366
  /** call-seq: set_cascade_max_cand_epochs(cascade_max_cand_epochs)
1363
1367
 
1364
- The maximum candidate epochs determines the maximum number of epochs the input
1368
+ The maximum candidate epochs determines the maximum number of epochs the input
1365
1369
  connections to the candidates may be trained before adding a new candidate neuron. */
1366
1370
  static VALUE set_cascade_max_cand_epochs(VALUE self, VALUE cascade_max_cand_epochs)
1367
1371
  {
@@ -1383,18 +1387,18 @@ static VALUE get_cascade_activation_functions_count(VALUE self)
1383
1387
 
1384
1388
  /** The learning rate is used to determine how aggressive training should be for some of the
1385
1389
  training algorithms (:incremental, :batch, :quickprop).
1386
- Do however note that it is not used in :rprop.
1390
+ Do however note that it is not used in :rprop.
1387
1391
  The default learning rate is 0.7. */
1388
1392
  static VALUE get_learning_rate(VALUE self)
1389
1393
  {
1390
1394
  RETURN_FANN_FLT(fann_get_learning_rate);
1391
1395
  }
1392
1396
 
1393
- /** call-seq: set_learning_rate(learning_rate) -> return value
1397
+ /** call-seq: set_learning_rate(learning_rate) -> return value
1394
1398
 
1395
1399
  The learning rate is used to determine how aggressive training should be for some of the
1396
1400
  training algorithms (:incremental, :batch, :quickprop).
1397
- Do however note that it is not used in :rprop.
1401
+ Do however note that it is not used in :rprop.
1398
1402
  The default learning rate is 0.7. */
1399
1403
  static VALUE set_learning_rate(VALUE self, VALUE learning_rate)
1400
1404
  {
@@ -1407,8 +1411,8 @@ static VALUE get_learning_momentum(VALUE self)
1407
1411
  RETURN_FANN_FLT(fann_get_learning_momentum);
1408
1412
  }
1409
1413
 
1410
- /** call-seq: set_learning_momentum(learning_momentum) -> return value
1411
-
1414
+ /** call-seq: set_learning_momentum(learning_momentum) -> return value
1415
+
1412
1416
  Set the learning momentum. */
1413
1417
  static VALUE set_learning_momentum(VALUE self, VALUE learning_momentum)
1414
1418
  {
@@ -1423,18 +1427,18 @@ static VALUE set_cascade_activation_functions(VALUE self, VALUE cascade_activati
1423
1427
  {
1424
1428
  Check_Type(cascade_activation_functions, T_ARRAY);
1425
1429
  struct fann* f;
1426
- Data_Get_Struct (self, struct fann, f);
1427
-
1428
- unsigned int cnt = NUM2UINT(RARRAY_LEN(cascade_activation_functions));
1430
+ Data_Get_Struct (self, struct fann, f);
1431
+
1432
+ unsigned long cnt = RARRAY_LEN(cascade_activation_functions);
1429
1433
  enum fann_activationfunc_enum fann_activation_functions[cnt];
1430
1434
  unsigned int i;
1431
1435
  for (i=0; i<cnt; i++)
1432
1436
  {
1433
1437
  fann_activation_functions[i] = sym_to_activation_function(RARRAY_PTR(cascade_activation_functions)[i]);
1434
1438
  }
1435
-
1439
+
1436
1440
  fann_set_cascade_activation_functions(f, fann_activation_functions, cnt);
1437
- return self;
1441
+ return self;
1438
1442
  }
1439
1443
 
1440
1444
  /** The cascade activation functions is an array of the different activation functions used by
@@ -1487,16 +1491,16 @@ static VALUE set_cascade_activation_steepnesses(VALUE self, VALUE cascade_activa
1487
1491
  {
1488
1492
  Check_Type(cascade_activation_steepnesses, T_ARRAY);
1489
1493
  struct fann* f;
1490
- Data_Get_Struct (self, struct fann, f);
1491
-
1492
- unsigned int cnt = NUM2UINT(RARRAY_LEN(cascade_activation_steepnesses));
1494
+ Data_Get_Struct (self, struct fann, f);
1495
+
1496
+ unsigned int cnt = RARRAY_LEN(cascade_activation_steepnesses);
1493
1497
  fann_type fann_activation_steepnesses[cnt];
1494
1498
  unsigned int i;
1495
1499
  for (i=0; i<cnt; i++)
1496
1500
  {
1497
1501
  fann_activation_steepnesses[i] = NUM2DBL(RARRAY_PTR(cascade_activation_steepnesses)[i]);
1498
1502
  }
1499
-
1503
+
1500
1504
  fann_set_cascade_activation_steepnesses(f, fann_activation_steepnesses, cnt);
1501
1505
  return self;
1502
1506
  }
@@ -1544,11 +1548,11 @@ void Init_ruby_fann ()
1544
1548
  rb_define_alloc_func (m_rb_fann_standard_class, fann_allocate);
1545
1549
  rb_define_method(m_rb_fann_standard_class, "initialize", fann_initialize, 1);
1546
1550
  rb_define_method(m_rb_fann_standard_class, "init_weights", init_weights, 1);
1547
- rb_define_method(m_rb_fann_standard_class, "set_activation_function", set_activation_function, 3);
1548
- rb_define_method(m_rb_fann_standard_class, "set_activation_function_hidden", set_activation_function_hidden, 1);
1549
- rb_define_method(m_rb_fann_standard_class, "set_activation_function_layer", set_activation_function_layer, 2);
1550
- rb_define_method(m_rb_fann_standard_class, "get_activation_function", get_activation_function, 2);
1551
- rb_define_method(m_rb_fann_standard_class, "set_activation_function_output", set_activation_function_output, 1);
1551
+ rb_define_method(m_rb_fann_standard_class, "set_activation_function", set_activation_function, 3);
1552
+ rb_define_method(m_rb_fann_standard_class, "set_activation_function_hidden", set_activation_function_hidden, 1);
1553
+ rb_define_method(m_rb_fann_standard_class, "set_activation_function_layer", set_activation_function_layer, 2);
1554
+ rb_define_method(m_rb_fann_standard_class, "get_activation_function", get_activation_function, 2);
1555
+ rb_define_method(m_rb_fann_standard_class, "set_activation_function_output", set_activation_function_output, 1);
1552
1556
  rb_define_method(m_rb_fann_standard_class, "get_activation_steepness", get_activation_steepness, 2);
1553
1557
  rb_define_method(m_rb_fann_standard_class, "set_activation_steepness", set_activation_steepness, 3);
1554
1558
  rb_define_method(m_rb_fann_standard_class, "set_activation_steepness_hidden", set_activation_steepness_hidden, 1);
@@ -1578,14 +1582,14 @@ void Init_ruby_fann ()
1578
1582
  rb_define_method(m_rb_fann_standard_class, "get_connection_rate", get_connection_rate, 0);
1579
1583
  rb_define_method(m_rb_fann_standard_class, "get_layer_array", get_layer_array, 0);
1580
1584
  rb_define_method(m_rb_fann_standard_class, "get_network_type", get_network_type, 0);
1581
- rb_define_method(m_rb_fann_standard_class, "get_neurons", get_neurons, 0);
1585
+ rb_define_method(m_rb_fann_standard_class, "get_neurons", get_neurons, 0);
1582
1586
  rb_define_method(m_rb_fann_standard_class, "get_num_input", get_num_input, 0);
1583
1587
  rb_define_method(m_rb_fann_standard_class, "get_num_layers", get_num_layers, 0);
1584
- rb_define_method(m_rb_fann_standard_class, "get_num_output", get_num_output, 0);
1588
+ rb_define_method(m_rb_fann_standard_class, "get_num_output", get_num_output, 0);
1585
1589
  rb_define_method(m_rb_fann_standard_class, "get_total_connections", get_total_connections, 0);
1586
1590
  rb_define_method(m_rb_fann_standard_class, "get_total_neurons", get_total_neurons, 0);
1587
1591
  // rb_define_method(m_rb_fann_standard_class, "get_train_error_function", get_train_error_function, 0);
1588
- // rb_define_method(m_rb_fann_standard_class, "set_train_error_function", set_train_error_function, 1);
1592
+ // rb_define_method(m_rb_fann_standard_class, "set_train_error_function", set_train_error_function, 1);
1589
1593
  rb_define_method(m_rb_fann_standard_class, "print_connections", print_connections, 0);
1590
1594
  rb_define_method(m_rb_fann_standard_class, "print_parameters", print_parameters, 0);
1591
1595
  rb_define_method(m_rb_fann_standard_class, "randomize_weights", randomize_weights, 2);
@@ -1593,7 +1597,7 @@ void Init_ruby_fann ()
1593
1597
  rb_define_method(m_rb_fann_standard_class, "train", train, 2);
1594
1598
  rb_define_method(m_rb_fann_standard_class, "train_on_data", train_on_data, 4);
1595
1599
  rb_define_method(m_rb_fann_standard_class, "train_epoch", train_epoch, 1);
1596
- rb_define_method(m_rb_fann_standard_class, "test_data", test_data, 1);
1600
+ rb_define_method(m_rb_fann_standard_class, "test_data", test_data, 1);
1597
1601
  rb_define_method(m_rb_fann_standard_class, "get_MSE", get_MSE, 0);
1598
1602
  rb_define_method(m_rb_fann_standard_class, "get_bit_fail", get_bit_fail, 0);
1599
1603
  rb_define_method(m_rb_fann_standard_class, "reset_MSE", reset_MSE, 0);
@@ -1603,8 +1607,8 @@ void Init_ruby_fann ()
1603
1607
  rb_define_method(m_rb_fann_standard_class, "set_learning_momentum", set_learning_momentum, 1);
1604
1608
  rb_define_method(m_rb_fann_standard_class, "get_training_algorithm", get_training_algorithm, 0);
1605
1609
  rb_define_method(m_rb_fann_standard_class, "set_training_algorithm", set_training_algorithm, 1);
1606
-
1607
-
1610
+
1611
+
1608
1612
  // Cascade functions:
1609
1613
  rb_define_method(m_rb_fann_standard_class, "cascadetrain_on_data", cascadetrain_on_data, 4);
1610
1614
  rb_define_method(m_rb_fann_standard_class, "get_cascade_output_change_fraction", get_cascade_output_change_fraction, 0);
@@ -1630,25 +1634,25 @@ void Init_ruby_fann ()
1630
1634
  rb_define_method(m_rb_fann_standard_class, "get_cascade_activation_steepnesses_count", get_cascade_activation_steepnesses_count, 0);
1631
1635
  rb_define_method(m_rb_fann_standard_class, "get_cascade_activation_steepnesses", get_cascade_activation_steepnesses, 0);
1632
1636
  rb_define_method(m_rb_fann_standard_class, "set_cascade_activation_steepnesses", set_cascade_activation_steepnesses, 1);
1633
- rb_define_method(m_rb_fann_standard_class, "get_cascade_num_candidate_groups", get_cascade_num_candidate_groups, 0);
1634
- rb_define_method(m_rb_fann_standard_class, "set_cascade_num_candidate_groups", set_cascade_num_candidate_groups, 1);
1637
+ rb_define_method(m_rb_fann_standard_class, "get_cascade_num_candidate_groups", get_cascade_num_candidate_groups, 0);
1638
+ rb_define_method(m_rb_fann_standard_class, "set_cascade_num_candidate_groups", set_cascade_num_candidate_groups, 1);
1635
1639
  rb_define_method(m_rb_fann_standard_class, "save", nn_save, 1);
1636
1640
 
1637
-
1641
+
1638
1642
  // Uncomment for fixed-point mode (also recompile fann). Probably not going to be needed:
1639
- //rb_define_method(clazz, "get_decimal_point", get_decimal_point, 0);
1640
- //rb_define_method(clazz, "get_multiplier", get_multiplier, 0);
1641
-
1643
+ //rb_define_method(clazz, "get_decimal_point", get_decimal_point, 0);
1644
+ //rb_define_method(clazz, "get_multiplier", get_multiplier, 0);
1645
+
1642
1646
  // Shortcut NN class (duplicated from above so that rdoc generation tools can find the methods:):
1643
- m_rb_fann_shortcut_class = rb_define_class_under (m_rb_fann_module, "Shortcut", rb_cObject);
1647
+ m_rb_fann_shortcut_class = rb_define_class_under (m_rb_fann_module, "Shortcut", rb_cObject);
1644
1648
  rb_define_alloc_func (m_rb_fann_shortcut_class, fann_allocate);
1645
1649
  rb_define_method(m_rb_fann_shortcut_class, "initialize", fann_initialize, 1);
1646
1650
  rb_define_method(m_rb_fann_shortcut_class, "init_weights", init_weights, 1);
1647
- rb_define_method(m_rb_fann_shortcut_class, "set_activation_function", set_activation_function, 3);
1648
- rb_define_method(m_rb_fann_shortcut_class, "set_activation_function_hidden", set_activation_function_hidden, 1);
1649
- rb_define_method(m_rb_fann_shortcut_class, "set_activation_function_layer", set_activation_function_layer, 2);
1650
- rb_define_method(m_rb_fann_shortcut_class, "get_activation_function", get_activation_function, 2);
1651
- rb_define_method(m_rb_fann_shortcut_class, "set_activation_function_output", set_activation_function_output, 1);
1651
+ rb_define_method(m_rb_fann_shortcut_class, "set_activation_function", set_activation_function, 3);
1652
+ rb_define_method(m_rb_fann_shortcut_class, "set_activation_function_hidden", set_activation_function_hidden, 1);
1653
+ rb_define_method(m_rb_fann_shortcut_class, "set_activation_function_layer", set_activation_function_layer, 2);
1654
+ rb_define_method(m_rb_fann_shortcut_class, "get_activation_function", get_activation_function, 2);
1655
+ rb_define_method(m_rb_fann_shortcut_class, "set_activation_function_output", set_activation_function_output, 1);
1652
1656
  rb_define_method(m_rb_fann_shortcut_class, "get_activation_steepness", get_activation_steepness, 2);
1653
1657
  rb_define_method(m_rb_fann_shortcut_class, "set_activation_steepness", set_activation_steepness, 3);
1654
1658
  rb_define_method(m_rb_fann_shortcut_class, "set_activation_steepness_hidden", set_activation_steepness_hidden, 1);
@@ -1678,14 +1682,14 @@ void Init_ruby_fann ()
1678
1682
  rb_define_method(m_rb_fann_shortcut_class, "get_connection_rate", get_connection_rate, 0);
1679
1683
  rb_define_method(m_rb_fann_shortcut_class, "get_layer_array", get_layer_array, 0);
1680
1684
  rb_define_method(m_rb_fann_shortcut_class, "get_network_type", get_network_type, 0);
1681
- rb_define_method(m_rb_fann_shortcut_class, "get_neurons", get_neurons, 0);
1685
+ rb_define_method(m_rb_fann_shortcut_class, "get_neurons", get_neurons, 0);
1682
1686
  rb_define_method(m_rb_fann_shortcut_class, "get_num_input", get_num_input, 0);
1683
1687
  rb_define_method(m_rb_fann_shortcut_class, "get_num_layers", get_num_layers, 0);
1684
- rb_define_method(m_rb_fann_shortcut_class, "get_num_output", get_num_output, 0);
1688
+ rb_define_method(m_rb_fann_shortcut_class, "get_num_output", get_num_output, 0);
1685
1689
  rb_define_method(m_rb_fann_shortcut_class, "get_total_connections", get_total_connections, 0);
1686
1690
  rb_define_method(m_rb_fann_shortcut_class, "get_total_neurons", get_total_neurons, 0);
1687
1691
  // rb_define_method(m_rb_fann_shortcut_class, "get_train_error_function", get_train_error_function, 0);
1688
- // rb_define_method(m_rb_fann_shortcut_class, "set_train_error_function", set_train_error_function, 1);
1692
+ // rb_define_method(m_rb_fann_shortcut_class, "set_train_error_function", set_train_error_function, 1);
1689
1693
  rb_define_method(m_rb_fann_shortcut_class, "print_connections", print_connections, 0);
1690
1694
  rb_define_method(m_rb_fann_shortcut_class, "print_parameters", print_parameters, 0);
1691
1695
  rb_define_method(m_rb_fann_shortcut_class, "randomize_weights", randomize_weights, 2);
@@ -1693,7 +1697,7 @@ void Init_ruby_fann ()
1693
1697
  rb_define_method(m_rb_fann_shortcut_class, "train", train, 2);
1694
1698
  rb_define_method(m_rb_fann_shortcut_class, "train_on_data", train_on_data, 4);
1695
1699
  rb_define_method(m_rb_fann_shortcut_class, "train_epoch", train_epoch, 1);
1696
- rb_define_method(m_rb_fann_shortcut_class, "test_data", test_data, 1);
1700
+ rb_define_method(m_rb_fann_shortcut_class, "test_data", test_data, 1);
1697
1701
  rb_define_method(m_rb_fann_shortcut_class, "get_MSE", get_MSE, 0);
1698
1702
  rb_define_method(m_rb_fann_shortcut_class, "get_bit_fail", get_bit_fail, 0);
1699
1703
  rb_define_method(m_rb_fann_shortcut_class, "reset_MSE", reset_MSE, 0);
@@ -1703,7 +1707,7 @@ void Init_ruby_fann ()
1703
1707
  rb_define_method(m_rb_fann_shortcut_class, "set_learning_momentum", set_learning_momentum, 1);
1704
1708
  rb_define_method(m_rb_fann_shortcut_class, "get_training_algorithm", get_training_algorithm, 0);
1705
1709
  rb_define_method(m_rb_fann_shortcut_class, "set_training_algorithm", set_training_algorithm, 1);
1706
-
1710
+
1707
1711
  // Cascade functions:
1708
1712
  rb_define_method(m_rb_fann_shortcut_class, "cascadetrain_on_data", cascadetrain_on_data, 4);
1709
1713
  rb_define_method(m_rb_fann_shortcut_class, "get_cascade_output_change_fraction", get_cascade_output_change_fraction, 0);
@@ -1729,19 +1733,18 @@ void Init_ruby_fann ()
1729
1733
  rb_define_method(m_rb_fann_shortcut_class, "get_cascade_activation_steepnesses_count", get_cascade_activation_steepnesses_count, 0);
1730
1734
  rb_define_method(m_rb_fann_shortcut_class, "get_cascade_activation_steepnesses", get_cascade_activation_steepnesses, 0);
1731
1735
  rb_define_method(m_rb_fann_shortcut_class, "set_cascade_activation_steepnesses", set_cascade_activation_steepnesses, 1);
1732
- rb_define_method(m_rb_fann_shortcut_class, "get_cascade_num_candidate_groups", get_cascade_num_candidate_groups, 0);
1733
- rb_define_method(m_rb_fann_shortcut_class, "set_cascade_num_candidate_groups", set_cascade_num_candidate_groups, 1);
1736
+ rb_define_method(m_rb_fann_shortcut_class, "get_cascade_num_candidate_groups", get_cascade_num_candidate_groups, 0);
1737
+ rb_define_method(m_rb_fann_shortcut_class, "set_cascade_num_candidate_groups", set_cascade_num_candidate_groups, 1);
1734
1738
  rb_define_method(m_rb_fann_shortcut_class, "save", nn_save, 1);
1735
-
1739
+
1736
1740
 
1737
1741
  // TrainData NN class:
1738
- m_rb_fann_train_data_class = rb_define_class_under (m_rb_fann_module, "TrainData", rb_cObject);
1739
- rb_define_alloc_func (m_rb_fann_train_data_class, fann_training_data_allocate);
1742
+ m_rb_fann_train_data_class = rb_define_class_under (m_rb_fann_module, "TrainData", rb_cObject);
1743
+ rb_define_alloc_func (m_rb_fann_train_data_class, fann_training_data_allocate);
1740
1744
  rb_define_method(m_rb_fann_train_data_class, "initialize", fann_train_data_initialize, 1);
1741
1745
  rb_define_method(m_rb_fann_train_data_class, "length", length_train_data, 0);
1742
- rb_define_method(m_rb_fann_train_data_class, "shuffle", shuffle, 0);
1746
+ rb_define_method(m_rb_fann_train_data_class, "shuffle", shuffle, 0);
1743
1747
  rb_define_method(m_rb_fann_train_data_class, "save", training_save, 1);
1744
-
1748
+
1745
1749
  // printf("Initialized Ruby Bindings for FANN.\n");
1746
1750
  }
1747
-