ruby-fann 1.0.2 → 1.0.3
Sign up to get free protection for your applications and to get access to all the features.
- data/History.txt +4 -0
- data/ext/ruby_fann/neural_network.c +968 -954
- data/lib/ruby_fann/version.rb +1 -1
- data/neurotica1.png +0 -0
- data/neurotica2.vrml +34 -34
- data/website/index.html +11 -14
- data/website/index.txt +10 -9
- data/xor_cascade.net +2 -2
- data/xor_float.net +1 -1
- metadata +3 -3
@@ -1,5 +1,4 @@
|
|
1
1
|
#include "ruby.h"
|
2
|
-
// #include "fann.h"
|
3
2
|
#include "doublefann.h"
|
4
3
|
#include "fann_data.h"
|
5
4
|
#include "fann_augment.h"
|
@@ -44,6 +43,7 @@ Check_Type(attr_name, T_FLOAT); \
|
|
44
43
|
struct fann* f; \
|
45
44
|
Data_Get_Struct(self, struct fann, f); \
|
46
45
|
fann_fn(f, NUM2DBL(attr_name)); \
|
46
|
+
return self;
|
47
47
|
|
48
48
|
#define RETURN_FANN_DBL(fn) \
|
49
49
|
struct fann* f; \
|
@@ -55,95 +55,95 @@ return rb_float_new(fn(f));
|
|
55
55
|
// Convert ruby symbol to corresponding FANN enum type for activation function:
|
56
56
|
enum fann_activationfunc_enum sym_to_activation_function(VALUE activation_func)
|
57
57
|
{
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
58
|
+
ID id=SYM2ID(activation_func);
|
59
|
+
enum fann_activationfunc_enum activation_function;
|
60
|
+
if(id==rb_intern("linear")) {
|
61
|
+
activation_function = FANN_LINEAR;
|
62
|
+
} else if(id==rb_intern("threshold")) {
|
63
|
+
activation_function = FANN_THRESHOLD;
|
64
|
+
} else if(id==rb_intern("threshold_symmetric")) {
|
65
|
+
activation_function = FANN_THRESHOLD_SYMMETRIC;
|
66
|
+
} else if(id==rb_intern("sigmoid")) {
|
67
|
+
activation_function = FANN_SIGMOID;
|
68
|
+
} else if(id==rb_intern("sigmoid_stepwise")) {
|
69
|
+
activation_function = FANN_SIGMOID_STEPWISE;
|
70
|
+
} else if(id==rb_intern("sigmoid_symmetric")) {
|
71
|
+
activation_function = FANN_SIGMOID_SYMMETRIC;
|
72
|
+
} else if(id==rb_intern("sigmoid_symmetric_stepwise")) {
|
73
|
+
activation_function = FANN_SIGMOID_SYMMETRIC_STEPWISE;
|
74
|
+
} else if(id==rb_intern("gaussian")) {
|
75
|
+
activation_function = FANN_GAUSSIAN;
|
76
|
+
} else if(id==rb_intern("gaussian_symmetric")) {
|
77
|
+
activation_function = FANN_GAUSSIAN_SYMMETRIC;
|
78
|
+
} else if(id==rb_intern("gaussian_stepwise")) {
|
79
|
+
activation_function = FANN_GAUSSIAN_STEPWISE;
|
80
|
+
} else if(id==rb_intern("elliot")) {
|
81
|
+
activation_function = FANN_ELLIOT;
|
82
|
+
} else if(id==rb_intern("elliot_symmetric")) {
|
83
|
+
activation_function = FANN_ELLIOT_SYMMETRIC;
|
84
|
+
} else if(id==rb_intern("linear_piece")) {
|
85
|
+
activation_function = FANN_LINEAR_PIECE;
|
86
|
+
} else if(id==rb_intern("linear_piece_symmetric")) {
|
87
|
+
activation_function = FANN_LINEAR_PIECE_SYMMETRIC;
|
88
|
+
} else if(id==rb_intern("sin_symmetric")) {
|
89
|
+
activation_function = FANN_SIN_SYMMETRIC;
|
90
|
+
} else if(id==rb_intern("cos_symmetric")) {
|
91
|
+
activation_function = FANN_COS_SYMMETRIC;
|
92
|
+
} else if(id==rb_intern("sin")) {
|
93
|
+
activation_function = FANN_SIN;
|
94
|
+
} else if(id==rb_intern("cos")) {
|
95
|
+
activation_function = FANN_COS;
|
96
|
+
} else {
|
97
|
+
rb_raise(rb_eRuntimeError, "Unrecognized activation function: [%s]", rb_id2name(SYM2ID(activation_func)));
|
98
|
+
}
|
99
|
+
return activation_function;
|
100
100
|
}
|
101
101
|
|
102
102
|
// Convert FANN enum type for activation function to corresponding ruby symbol:
|
103
103
|
VALUE activation_function_to_sym(enum fann_activationfunc_enum fn)
|
104
104
|
{
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
105
|
+
VALUE activation_function;
|
106
|
+
|
107
|
+
if(fn==FANN_LINEAR) {
|
108
|
+
activation_function = ID2SYM(rb_intern("linear"));
|
109
|
+
} else if(fn==FANN_THRESHOLD) {
|
110
|
+
activation_function = ID2SYM(rb_intern("threshold"));
|
111
|
+
} else if(fn==FANN_THRESHOLD_SYMMETRIC) {
|
112
|
+
activation_function = ID2SYM(rb_intern("threshold_symmetric"));
|
113
|
+
} else if(fn==FANN_SIGMOID) {
|
114
|
+
activation_function = ID2SYM(rb_intern("sigmoid"));
|
115
|
+
} else if(fn==FANN_SIGMOID_STEPWISE) {
|
116
|
+
activation_function = ID2SYM(rb_intern("sigmoid_stepwise"));
|
117
|
+
} else if(fn==FANN_SIGMOID_SYMMETRIC) {
|
118
|
+
activation_function = ID2SYM(rb_intern("sigmoid_symmetric"));
|
119
|
+
} else if(fn==FANN_SIGMOID_SYMMETRIC_STEPWISE) {
|
120
|
+
activation_function = ID2SYM(rb_intern("sigmoid_symmetric_stepwise"));
|
121
|
+
} else if(fn==FANN_GAUSSIAN) {
|
122
|
+
activation_function = ID2SYM(rb_intern("gaussian"));
|
123
|
+
} else if(fn==FANN_GAUSSIAN_SYMMETRIC) {
|
124
|
+
activation_function = ID2SYM(rb_intern("gaussian_symmetric"));
|
125
|
+
} else if(fn==FANN_GAUSSIAN_STEPWISE) {
|
126
|
+
activation_function = ID2SYM(rb_intern("gaussian_stepwise"));
|
127
|
+
} else if(fn==FANN_ELLIOT) {
|
128
|
+
activation_function = ID2SYM(rb_intern("elliot"));
|
129
|
+
} else if(fn==FANN_ELLIOT_SYMMETRIC) {
|
130
|
+
activation_function = ID2SYM(rb_intern("elliot_symmetric"));
|
131
|
+
} else if(fn==FANN_LINEAR_PIECE) {
|
132
|
+
activation_function = ID2SYM(rb_intern("linear_piece"));
|
133
|
+
} else if(fn==FANN_LINEAR_PIECE_SYMMETRIC) {
|
134
|
+
activation_function = ID2SYM(rb_intern("linear_piece_symmetric"));
|
135
|
+
} else if(fn==FANN_SIN_SYMMETRIC) {
|
136
|
+
activation_function = ID2SYM(rb_intern("sin_symmetric"));
|
137
|
+
} else if(fn==FANN_COS_SYMMETRIC) {
|
138
|
+
activation_function = ID2SYM(rb_intern("cos_symmetric"));
|
139
|
+
} else if(fn==FANN_SIN) {
|
140
|
+
activation_function = ID2SYM(rb_intern("sin"));
|
141
|
+
} else if(fn==FANN_COS) {
|
142
|
+
activation_function = ID2SYM(rb_intern("cos"));
|
143
|
+
} else {
|
144
|
+
rb_raise(rb_eRuntimeError, "Unrecognized activation function: [%d]", fn);
|
145
|
+
}
|
146
|
+
return activation_function;
|
147
147
|
}
|
148
148
|
|
149
149
|
|
@@ -156,28 +156,28 @@ static void fann_mark (struct fann* ann){}
|
|
156
156
|
static void fann_free (struct fann* ann)
|
157
157
|
{
|
158
158
|
fann_destroy(ann);
|
159
|
-
|
159
|
+
// ("Destroyed FANN network [%d].\n", ann);
|
160
160
|
}
|
161
161
|
|
162
162
|
// Free memory associated with FANN Training data:
|
163
163
|
static void fann_training_data_free (struct fann_train_data* train_data)
|
164
164
|
{
|
165
165
|
fann_destroy_train(train_data);
|
166
|
-
|
166
|
+
// printf("Destroyed Training data [%d].\n", train_data);
|
167
167
|
}
|
168
168
|
|
169
169
|
// Create wrapper, but don't allocate anything...do that in
|
170
170
|
// initialize, so we can construct with args:
|
171
171
|
static VALUE fann_allocate (VALUE klass)
|
172
172
|
{
|
173
|
-
|
173
|
+
return Data_Wrap_Struct (klass, fann_mark, fann_free, 0);
|
174
174
|
}
|
175
175
|
|
176
176
|
// Create wrapper, but don't allocate annything...do that in
|
177
177
|
// initialize, so we can construct with args:
|
178
178
|
static VALUE fann_training_data_allocate (VALUE klass)
|
179
179
|
{
|
180
|
-
|
180
|
+
return Data_Wrap_Struct (klass, fann_mark, fann_training_data_free, 0);
|
181
181
|
}
|
182
182
|
|
183
183
|
|
@@ -194,34 +194,34 @@ static int FANN_API fann_training_callback(struct fann *ann, struct fann_train_d
|
|
194
194
|
unsigned int max_epochs, unsigned int epochs_between_reports,
|
195
195
|
float desired_error, unsigned int epochs)
|
196
196
|
{
|
197
|
-
|
197
|
+
VALUE self = (VALUE)fann_get_user_data(ann);
|
198
198
|
VALUE args = rb_hash_new();
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
199
|
+
|
200
|
+
// Set attributes on hash & push on array:
|
201
|
+
VALUE max_epochs_sym = ID2SYM(rb_intern("max_epochs"));
|
202
|
+
VALUE epochs_between_reports_sym = ID2SYM(rb_intern("epochs_between_reports"));
|
203
|
+
VALUE desired_error_sym = ID2SYM(rb_intern("desired_error"));
|
204
|
+
VALUE epochs_sym = ID2SYM(rb_intern("epochs"));
|
205
|
+
|
206
|
+
rb_hash_aset(args, max_epochs_sym, INT2NUM(max_epochs));
|
207
|
+
rb_hash_aset(args, epochs_between_reports_sym, INT2NUM(epochs_between_reports));
|
208
|
+
rb_hash_aset(args, desired_error_sym, rb_float_new(desired_error));
|
209
|
+
rb_hash_aset(args, epochs_sym, INT2NUM(epochs));
|
210
210
|
|
211
|
-
|
211
|
+
VALUE callback = rb_funcall(self, rb_intern("training_callback"), 1, args);
|
212
212
|
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
213
|
+
if (TYPE(callback)!=T_FIXNUM)
|
214
|
+
{
|
215
|
+
rb_raise (rb_eRuntimeError, "Callback method must return an integer (-1 to stop training).");
|
216
|
+
}
|
217
|
+
|
218
|
+
int status = NUM2INT(callback);
|
219
|
+
if (status==-1)
|
220
|
+
{
|
221
|
+
printf("Callback method returned -1; training will stop.\n");
|
222
|
+
}
|
223
|
+
|
224
|
+
return status;
|
225
225
|
}
|
226
226
|
|
227
227
|
/** call-seq: new(hash) -> new ruby-fann neural network object
|
@@ -230,10 +230,10 @@ static int FANN_API fann_training_callback(struct fann *ann, struct fann_train_d
|
|
230
230
|
|
231
231
|
Standard Initialization:
|
232
232
|
RubyFann::Standard.new(:num_inputs=>1, :hidden_neurons=>[3, 4, 3, 4], :num_outputs=>1)
|
233
|
-
|
233
|
+
|
234
234
|
Shortcut Initialization (e.g., for use in cascade training):
|
235
235
|
RubyFann::Shortcut.new(:num_inputs=>5, :num_outputs=>1)
|
236
|
-
|
236
|
+
|
237
237
|
File Initialization
|
238
238
|
RubyFann::Standard.new(:filename=>'xor_float.net')
|
239
239
|
|
@@ -242,76 +242,74 @@ static int FANN_API fann_training_callback(struct fann *ann, struct fann_train_d
|
|
242
242
|
*/
|
243
243
|
static VALUE fann_initialize(VALUE self, VALUE hash)
|
244
244
|
{
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
245
|
+
// Get args:
|
246
|
+
VALUE filename = rb_hash_aref(hash, ID2SYM(rb_intern("filename")));
|
247
|
+
VALUE num_inputs = rb_hash_aref(hash, ID2SYM(rb_intern("num_inputs")));
|
248
|
+
VALUE num_outputs = rb_hash_aref(hash, ID2SYM(rb_intern("num_outputs")));
|
249
|
+
VALUE hidden_neurons = rb_hash_aref(hash, ID2SYM(rb_intern("hidden_neurons")));
|
250
250
|
// printf("initializing\n\n\n");
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
// printf("Created RubyFann::Standard [%d] from file [%s].\n", ann, StringValuePtr(filename));
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
251
|
+
struct fann* ann;
|
252
|
+
if (TYPE(filename)==T_STRING)
|
253
|
+
{
|
254
|
+
// Initialize with file:
|
255
|
+
// train_data = fann_read_train_from_file(StringValuePtr(filename));
|
256
|
+
// DATA_PTR(self) = train_data;
|
257
|
+
ann = fann_create_from_file(StringValuePtr(filename));
|
258
|
+
// printf("Created RubyFann::Standard [%d] from file [%s].\n", ann, StringValuePtr(filename));
|
259
|
+
}
|
260
|
+
else if(rb_obj_is_kind_of(self, m_rb_fann_shortcut_class))
|
261
|
+
{
|
262
|
+
// Initialize as shortcut, suitable for cascade training:
|
263
|
+
//ann = fann_create_shortcut_array(num_layers, layers);
|
264
|
+
Check_Type(num_inputs, T_FIXNUM);
|
265
|
+
Check_Type(num_outputs, T_FIXNUM);
|
266
|
+
|
267
|
+
ann = fann_create_shortcut(2, NUM2INT(num_inputs), NUM2INT(num_outputs));
|
268
|
+
// printf("Created RubyFann::Shortcut [%d].\n", ann);
|
269
|
+
}
|
270
|
+
else
|
271
|
+
{
|
272
|
+
// Initialize as standard:
|
273
|
+
Check_Type(num_inputs, T_FIXNUM);
|
274
|
+
Check_Type(hidden_neurons, T_ARRAY);
|
275
|
+
Check_Type(num_outputs, T_FIXNUM);
|
276
|
+
|
277
|
+
// Initialize layers:
|
278
|
+
unsigned int num_layers=RARRAY(hidden_neurons)->len + 2; // NUM2INT(num_inputs) + NUM2INT(num_outputs) + RARRAY(hidden_neurons)->len;
|
279
|
+
unsigned int layers[num_layers];
|
280
|
+
|
281
|
+
// Input:
|
282
|
+
layers[0]=NUM2INT(num_inputs);
|
283
|
+
// Output:
|
284
|
+
layers[num_layers-1]=NUM2INT(num_outputs);
|
285
|
+
// Hidden:
|
286
|
+
int i;
|
287
|
+
for (i=1; i<=num_layers-2; i++) {
|
288
|
+
layers[i]=NUM2UINT(RARRAY(hidden_neurons)->ptr[i-1]);
|
289
|
+
}
|
290
|
+
|
291
|
+
ann = fann_create_standard_array(num_layers, layers);
|
292
|
+
// printf("Created RubyFann::Standard [%d].\n", ann);
|
293
|
+
}
|
294
|
+
|
295
|
+
DATA_PTR(self) = ann;
|
296
|
+
|
297
|
+
// printf("Checking for callback...");
|
298
|
+
|
299
|
+
//int callback = rb_protect(invoke_training_callback, (self), &status);
|
300
|
+
// VALUE callback = rb_funcall(DATA_PTR(self), "training_callback", 0);
|
301
|
+
if(rb_respond_to(self, rb_intern("training_callback")))
|
302
|
+
{
|
303
|
+
fann_set_callback(ann, &fann_training_callback);
|
304
|
+
fann_set_user_data(ann, self);
|
305
|
+
// printf("found(%d).\n", ann->callback);
|
306
|
+
}
|
307
|
+
else
|
308
|
+
{
|
309
|
+
// printf("none found.\n");
|
310
|
+
}
|
311
311
|
|
312
|
-
|
313
|
-
//DATA_PTR(self) = ann;
|
314
|
-
return (VALUE)ann;
|
312
|
+
return (VALUE)ann;
|
315
313
|
}
|
316
314
|
|
317
315
|
/** call-seq: new(hash) -> new ruby-fann training data object (RubyFann::TrainData)
|
@@ -327,55 +325,52 @@ static VALUE fann_initialize(VALUE self, VALUE hash)
|
|
327
325
|
# All sub-arrays on desired_outputs should be of same length
|
328
326
|
# Sub-arrays on inputs & desired_outputs can be different sizes from one another
|
329
327
|
RubyFann::TrainData.new(:inputs=>[[0.2, 0.3, 0.4], [0.8, 0.9, 0.7]], :desired_outputs=>[[3.14], [6.33]])
|
330
|
-
|
331
|
-
|
332
|
-
|
333
328
|
*/
|
334
329
|
static VALUE fann_train_data_initialize(VALUE self, VALUE hash)
|
335
330
|
{
|
336
|
-
|
337
|
-
|
331
|
+
struct fann_train_data* train_data;
|
332
|
+
Check_Type(hash, T_HASH);
|
338
333
|
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
334
|
+
VALUE filename = rb_hash_aref(hash, ID2SYM(rb_intern("filename")));
|
335
|
+
VALUE inputs = rb_hash_aref(hash, ID2SYM(rb_intern("inputs")));
|
336
|
+
VALUE desired_outputs = rb_hash_aref(hash, ID2SYM(rb_intern("desired_outputs")));
|
337
|
+
|
338
|
+
if (TYPE(filename)==T_STRING)
|
339
|
+
{
|
340
|
+
train_data = fann_read_train_from_file(StringValuePtr(filename));
|
341
|
+
DATA_PTR(self) = train_data;
|
342
|
+
}
|
343
|
+
else if (TYPE(inputs)==T_ARRAY)
|
344
|
+
{
|
345
|
+
if (TYPE(desired_outputs)!=T_ARRAY)
|
346
|
+
{
|
347
|
+
rb_raise (rb_eRuntimeError, "[desired_outputs] must be present when [inputs] used.");
|
348
|
+
}
|
349
|
+
|
350
|
+
if (RARRAY(inputs)->len < 1)
|
351
|
+
{
|
352
|
+
rb_raise (rb_eRuntimeError, "[inputs/desired_outputs] must contain at least one value.");
|
353
|
+
}
|
354
|
+
|
355
|
+
// The data is here, start constructing:
|
356
|
+
if(RARRAY(inputs)->len != RARRAY(desired_outputs)->len)
|
357
|
+
{
|
358
|
+
rb_raise (
|
359
|
+
rb_eRuntimeError,
|
360
|
+
"Number of inputs must match number of outputs: (%d != %d)",
|
361
|
+
RARRAY(inputs)->len,
|
362
|
+
RARRAY(desired_outputs)->len);
|
363
|
+
}
|
364
|
+
|
365
|
+
train_data = fann_create_train_from_rb_ary(inputs, desired_outputs);
|
366
|
+
DATA_PTR(self) = train_data;
|
367
|
+
}
|
368
|
+
else
|
369
|
+
{
|
370
|
+
rb_raise (rb_eRuntimeError, "Must construct with a filename(string) or inputs/desired_outputs(arrays). All args passed via hash with symbols as keys.");
|
371
|
+
}
|
372
|
+
|
373
|
+
return (VALUE)train_data;
|
379
374
|
}
|
380
375
|
|
381
376
|
|
@@ -385,81 +380,87 @@ static VALUE fann_train_data_initialize(VALUE self, VALUE hash)
|
|
385
380
|
*/
|
386
381
|
static VALUE training_save(VALUE self, VALUE filename)
|
387
382
|
{
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
383
|
+
Check_Type(filename, T_STRING);
|
384
|
+
struct fann_train_data* t;
|
385
|
+
Data_Get_Struct (self, struct fann_train_data, t);
|
386
|
+
fann_save_train(t, StringValuePtr(filename));
|
387
|
+
return self;
|
392
388
|
}
|
393
389
|
|
394
390
|
/** Shuffles training data, randomizing the order.
|
395
391
|
This is recommended for incremental training, while it will have no influence during batch training.*/
|
396
392
|
static VALUE shuffle(VALUE self)
|
397
393
|
{
|
398
|
-
|
399
|
-
|
400
|
-
|
394
|
+
struct fann_train_data* t;
|
395
|
+
Data_Get_Struct (self, struct fann_train_data, t);
|
396
|
+
fann_shuffle_train_data(t);
|
397
|
+
return self;
|
401
398
|
}
|
402
399
|
|
403
400
|
/** Length of training data*/
|
404
401
|
static VALUE length_train_data(VALUE self)
|
405
402
|
{
|
406
|
-
|
407
|
-
|
408
|
-
|
403
|
+
struct fann_train_data* t;
|
404
|
+
Data_Get_Struct (self, struct fann_train_data, t);
|
405
|
+
return(UINT2NUM(fann_length_train_data(t)));
|
406
|
+
return self;
|
409
407
|
}
|
410
408
|
|
411
409
|
/** call-seq: set_activation_function(activation_func, layer, neuron)
|
412
410
|
|
413
411
|
Set the activation function for neuron number *neuron* in layer number *layer*,
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
412
|
+
counting the input layer as layer 0. activation_func must be one of the following symbols:
|
413
|
+
:linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
|
414
|
+
:sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
|
415
|
+
:elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
|
416
|
+
:sin, :cos*/
|
419
417
|
static VALUE set_activation_function(VALUE self, VALUE activation_func, VALUE layer, VALUE neuron)
|
420
418
|
{
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
419
|
+
Check_Type(activation_func, T_SYMBOL);
|
420
|
+
Check_Type(layer, T_FIXNUM);
|
421
|
+
Check_Type(neuron, T_FIXNUM);
|
422
|
+
|
423
|
+
struct fann* f;
|
424
|
+
Data_Get_Struct(self, struct fann, f);
|
425
|
+
fann_set_activation_function(f, sym_to_activation_function(activation_func), NUM2INT(layer), NUM2INT(neuron));
|
426
|
+
return self;
|
428
427
|
}
|
429
428
|
|
430
429
|
/** call-seq: set_activation_function_hidden(activation_func)
|
431
430
|
|
432
431
|
Set the activation function for all of the hidden layers. activation_func must be one of the following symbols:
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
432
|
+
:linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
|
433
|
+
:sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
|
434
|
+
:elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
|
435
|
+
:sin, :cos*/
|
437
436
|
static VALUE set_activation_function_hidden(VALUE self, VALUE activation_func)
|
438
437
|
{
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
438
|
+
Check_Type(activation_func, T_SYMBOL);
|
439
|
+
struct fann* f;
|
440
|
+
Data_Get_Struct(self, struct fann, f);
|
441
|
+
fann_set_activation_function_hidden(f, sym_to_activation_function(activation_func));
|
442
|
+
return self;
|
443
443
|
}
|
444
444
|
|
445
445
|
/** call-seq: set_activation_function_layer(activation_func, layer)
|
446
446
|
|
447
447
|
Set the activation function for all the neurons in the layer number *layer*,
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
448
|
+
counting the input layer as layer 0. activation_func must be one of the following symbols:
|
449
|
+
:linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
|
450
|
+
:sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
|
451
|
+
:elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
|
452
|
+
:sin, :cos
|
453
|
+
|
454
454
|
It is not possible to set activation functions for the neurons in the input layer.
|
455
|
-
*/
|
455
|
+
*/
|
456
456
|
static VALUE set_activation_function_layer(VALUE self, VALUE activation_func, VALUE layer)
|
457
457
|
{
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
458
|
+
Check_Type(activation_func, T_SYMBOL);
|
459
|
+
Check_Type(layer, T_FIXNUM);
|
460
|
+
struct fann* f;
|
461
|
+
Data_Get_Struct(self, struct fann, f);
|
462
|
+
fann_set_activation_function_layer(f, sym_to_activation_function(activation_func), NUM2INT(layer));
|
463
|
+
return self;
|
463
464
|
}
|
464
465
|
|
465
466
|
/** call-seq: get_activation_function(layer) -> return value
|
@@ -471,28 +472,29 @@ static VALUE set_activation_function_layer(VALUE self, VALUE activation_func, VA
|
|
471
472
|
*/
|
472
473
|
static VALUE get_activation_function(VALUE self, VALUE layer, VALUE neuron)
|
473
474
|
{
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
475
|
+
Check_Type(layer, T_FIXNUM);
|
476
|
+
Check_Type(neuron, T_FIXNUM);
|
477
|
+
struct fann* f;
|
478
|
+
Data_Get_Struct(self, struct fann, f);
|
479
|
+
fann_type val = fann_get_activation_function(f, NUM2INT(layer), NUM2INT(neuron));
|
480
|
+
return activation_function_to_sym(val);
|
480
481
|
}
|
481
482
|
|
482
483
|
/** call-seq: set_activation_function_output(activation_func)
|
483
484
|
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
485
|
+
Set the activation function for the output layer. activation_func must be one of the following symbols:
|
486
|
+
:linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
|
487
|
+
:sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
|
488
|
+
:elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
|
489
|
+
:sin, :cos*/
|
489
490
|
|
490
491
|
static VALUE set_activation_function_output(VALUE self, VALUE activation_func)
|
491
492
|
{
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
493
|
+
Check_Type(activation_func, T_SYMBOL);
|
494
|
+
struct fann* f;
|
495
|
+
Data_Get_Struct(self, struct fann, f);
|
496
|
+
fann_set_activation_function_output(f, sym_to_activation_function(activation_func));
|
497
|
+
return self;
|
496
498
|
}
|
497
499
|
|
498
500
|
/** call-seq: get_activation_steepness(layer, neuron) -> return value
|
@@ -501,12 +503,12 @@ static VALUE set_activation_function_output(VALUE self, VALUE activation_func)
|
|
501
503
|
*/
|
502
504
|
static VALUE get_activation_steepness(VALUE self, VALUE layer, VALUE neuron)
|
503
505
|
{
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
506
|
+
Check_Type(layer, T_FIXNUM);
|
507
|
+
Check_Type(neuron, T_FIXNUM);
|
508
|
+
struct fann* f;
|
509
|
+
Data_Get_Struct(self, struct fann, f);
|
510
|
+
fann_type val = fann_get_activation_steepness(f, NUM2INT(layer), NUM2INT(neuron));
|
511
|
+
return rb_float_new(val);
|
510
512
|
}
|
511
513
|
|
512
514
|
/** call-seq: set_activation_steepness(steepness, layer, neuron)
|
@@ -515,13 +517,14 @@ static VALUE get_activation_steepness(VALUE self, VALUE layer, VALUE neuron)
|
|
515
517
|
counting the input layer as layer 0.*/
|
516
518
|
static VALUE set_activation_steepness(VALUE self, VALUE steepness, VALUE layer, VALUE neuron)
|
517
519
|
{
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
520
|
+
Check_Type(steepness, T_FLOAT);
|
521
|
+
Check_Type(layer, T_FIXNUM);
|
522
|
+
Check_Type(neuron, T_FIXNUM);
|
523
|
+
|
524
|
+
struct fann* f;
|
525
|
+
Data_Get_Struct(self, struct fann, f);
|
526
|
+
fann_set_activation_steepness(f, NUM2DBL(steepness), NUM2INT(layer), NUM2INT(neuron));
|
527
|
+
return self;
|
525
528
|
}
|
526
529
|
|
527
530
|
/** call-seq: set_activation_steepness_hidden(arg) -> return value
|
@@ -529,7 +532,7 @@ static VALUE set_activation_steepness(VALUE self, VALUE steepness, VALUE layer,
|
|
529
532
|
Set the activation steepness in all of the hidden layers.*/
|
530
533
|
static VALUE set_activation_steepness_hidden(VALUE self, VALUE steepness)
|
531
534
|
{
|
532
|
-
|
535
|
+
SET_FANN_FLT(steepness, fann_set_activation_steepness_hidden);
|
533
536
|
}
|
534
537
|
|
535
538
|
/** call-seq: set_activation_steepness_layer(steepness, layer)
|
@@ -538,12 +541,13 @@ static VALUE set_activation_steepness_hidden(VALUE self, VALUE steepness)
|
|
538
541
|
counting the input layer as layer 0.*/
|
539
542
|
static VALUE set_activation_steepness_layer(VALUE self, VALUE steepness, VALUE layer)
|
540
543
|
{
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
544
|
+
Check_Type(steepness, T_FLOAT);
|
545
|
+
Check_Type(layer, T_FIXNUM);
|
546
|
+
|
547
|
+
struct fann* f;
|
548
|
+
Data_Get_Struct(self, struct fann, f);
|
549
|
+
fann_set_activation_steepness_layer(f, NUM2DBL(steepness), NUM2INT(layer));
|
550
|
+
return self;
|
547
551
|
}
|
548
552
|
|
549
553
|
/** call-seq: set_activation_steepness_output(steepness)
|
@@ -551,13 +555,13 @@ static VALUE set_activation_steepness_layer(VALUE self, VALUE steepness, VALUE l
|
|
551
555
|
Set the activation steepness in the output layer.*/
|
552
556
|
static VALUE set_activation_steepness_output(VALUE self, VALUE steepness)
|
553
557
|
{
|
554
|
-
|
558
|
+
SET_FANN_FLT(steepness, fann_set_activation_steepness_output);
|
555
559
|
}
|
556
560
|
|
557
561
|
/** Returns the bit fail limit used during training.*/
|
558
562
|
static VALUE get_bit_fail_limit(VALUE self)
|
559
563
|
{
|
560
|
-
|
564
|
+
RETURN_FANN_DBL(fann_get_bit_fail_limit);
|
561
565
|
}
|
562
566
|
|
563
567
|
/** call-seq: set_bit_fail_limit(bit_fail_limit)
|
@@ -565,7 +569,7 @@ static VALUE get_bit_fail_limit(VALUE self)
|
|
565
569
|
Sets the bit fail limit used during training.*/
|
566
570
|
static VALUE set_bit_fail_limit(VALUE self, VALUE bit_fail_limit)
|
567
571
|
{
|
568
|
-
|
572
|
+
SET_FANN_FLT(bit_fail_limit, fann_set_bit_fail_limit);
|
569
573
|
}
|
570
574
|
|
571
575
|
/** The decay is a small negative valued number which is the factor that the weights
|
@@ -573,7 +577,7 @@ static VALUE set_bit_fail_limit(VALUE self, VALUE bit_fail_limit)
|
|
573
577
|
to make sure that the weights do not become too high during training.*/
|
574
578
|
static VALUE get_quickprop_decay(VALUE self)
|
575
579
|
{
|
576
|
-
|
580
|
+
RETURN_FANN_FLT(fann_get_quickprop_decay);
|
577
581
|
}
|
578
582
|
|
579
583
|
/** call-seq: set_quickprop_decay(quickprop_decay)
|
@@ -581,7 +585,7 @@ static VALUE get_quickprop_decay(VALUE self)
|
|
581
585
|
Sets the quickprop decay factor*/
|
582
586
|
static VALUE set_quickprop_decay(VALUE self, VALUE quickprop_decay)
|
583
587
|
{
|
584
|
-
|
588
|
+
SET_FANN_FLT(quickprop_decay, fann_set_quickprop_decay);
|
585
589
|
}
|
586
590
|
|
587
591
|
/** The mu factor is used to increase and decrease the step-size during quickprop training.
|
@@ -589,7 +593,7 @@ static VALUE set_quickprop_decay(VALUE self, VALUE quickprop_decay)
|
|
589
593
|
when it was suppose to increase it. */
|
590
594
|
static VALUE get_quickprop_mu(VALUE self)
|
591
595
|
{
|
592
|
-
|
596
|
+
RETURN_FANN_FLT(fann_get_quickprop_mu);
|
593
597
|
}
|
594
598
|
|
595
599
|
/** call-seq: set_quickprop_mu(quickprop_mu)
|
@@ -597,14 +601,14 @@ static VALUE get_quickprop_mu(VALUE self)
|
|
597
601
|
Sets the quickprop mu factor.*/
|
598
602
|
static VALUE set_quickprop_mu(VALUE self, VALUE quickprop_mu)
|
599
603
|
{
|
600
|
-
|
604
|
+
SET_FANN_FLT(quickprop_mu, fann_set_quickprop_mu);
|
601
605
|
}
|
602
606
|
|
603
607
|
/** The increase factor is a value larger than 1, which is used to
|
604
608
|
increase the step-size during RPROP training.*/
|
605
609
|
static VALUE get_rprop_increase_factor(VALUE self)
|
606
610
|
{
|
607
|
-
|
611
|
+
RETURN_FANN_FLT(fann_get_rprop_increase_factor);
|
608
612
|
}
|
609
613
|
|
610
614
|
/** call-seq: set_rprop_increase_factor(rprop_increase_factor)
|
@@ -612,13 +616,13 @@ static VALUE get_rprop_increase_factor(VALUE self)
|
|
612
616
|
The increase factor used during RPROP training. */
|
613
617
|
static VALUE set_rprop_increase_factor(VALUE self, VALUE rprop_increase_factor)
|
614
618
|
{
|
615
|
-
|
619
|
+
SET_FANN_FLT(rprop_increase_factor, fann_set_rprop_increase_factor);
|
616
620
|
}
|
617
621
|
|
618
622
|
/** The decrease factor is a value smaller than 1, which is used to decrease the step-size during RPROP training.*/
|
619
623
|
static VALUE get_rprop_decrease_factor(VALUE self)
|
620
624
|
{
|
621
|
-
|
625
|
+
RETURN_FANN_FLT(fann_get_rprop_decrease_factor);
|
622
626
|
}
|
623
627
|
|
624
628
|
/** call-seq: set_rprop_decrease_factor(rprop_decrease_factor)
|
@@ -626,13 +630,13 @@ static VALUE get_rprop_decrease_factor(VALUE self)
|
|
626
630
|
The decrease factor is a value smaller than 1, which is used to decrease the step-size during RPROP training.*/
|
627
631
|
static VALUE set_rprop_decrease_factor(VALUE self, VALUE rprop_decrease_factor)
|
628
632
|
{
|
629
|
-
|
633
|
+
SET_FANN_FLT(rprop_decrease_factor, fann_set_rprop_decrease_factor);
|
630
634
|
}
|
631
635
|
|
632
636
|
/** The minimum step-size is a small positive number determining how small the minimum step-size may be.*/
|
633
637
|
static VALUE get_rprop_delta_min(VALUE self)
|
634
638
|
{
|
635
|
-
|
639
|
+
RETURN_FANN_FLT(fann_get_rprop_delta_min);
|
636
640
|
}
|
637
641
|
|
638
642
|
/** call-seq: set_rprop_delta_min(rprop_delta_min)
|
@@ -640,13 +644,13 @@ static VALUE get_rprop_delta_min(VALUE self)
|
|
640
644
|
The minimum step-size is a small positive number determining how small the minimum step-size may be.*/
|
641
645
|
static VALUE set_rprop_delta_min(VALUE self, VALUE rprop_delta_min)
|
642
646
|
{
|
643
|
-
|
647
|
+
SET_FANN_FLT(rprop_delta_min, fann_set_rprop_delta_min);
|
644
648
|
}
|
645
649
|
|
646
650
|
/** The maximum step-size is a positive number determining how large the maximum step-size may be.*/
|
647
651
|
static VALUE get_rprop_delta_max(VALUE self)
|
648
652
|
{
|
649
|
-
|
653
|
+
RETURN_FANN_FLT(fann_get_rprop_delta_max);
|
650
654
|
}
|
651
655
|
|
652
656
|
/** call-seq: set_rprop_delta_max(rprop_delta_max)
|
@@ -654,13 +658,13 @@ static VALUE get_rprop_delta_max(VALUE self)
|
|
654
658
|
The maximum step-size is a positive number determining how large the maximum step-size may be.*/
|
655
659
|
static VALUE set_rprop_delta_max(VALUE self, VALUE rprop_delta_max)
|
656
660
|
{
|
657
|
-
|
661
|
+
SET_FANN_FLT(rprop_delta_max, fann_set_rprop_delta_max);
|
658
662
|
}
|
659
663
|
|
660
664
|
/** The initial step-size is a positive number determining the initial step size.*/
|
661
665
|
static VALUE get_rprop_delta_zero(VALUE self)
|
662
666
|
{
|
663
|
-
|
667
|
+
RETURN_FANN_FLT(fann_get_rprop_delta_zero);
|
664
668
|
}
|
665
669
|
|
666
670
|
/** call-seq: set_rprop_delta_zero(rprop_delta_zero)
|
@@ -668,29 +672,29 @@ static VALUE get_rprop_delta_zero(VALUE self)
|
|
668
672
|
The initial step-size is a positive number determining the initial step size.*/
|
669
673
|
static VALUE set_rprop_delta_zero(VALUE self, VALUE rprop_delta_zero)
|
670
674
|
{
|
671
|
-
|
675
|
+
SET_FANN_FLT(rprop_delta_zero, fann_set_rprop_delta_zero);
|
672
676
|
}
|
673
677
|
|
674
678
|
/** Return array of bias(es)*/
|
675
679
|
static VALUE get_bias_array(VALUE self)
|
676
680
|
{
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
688
|
-
|
689
|
-
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
|
681
|
+
struct fann* f;
|
682
|
+
unsigned int num_layers;
|
683
|
+
Data_Get_Struct (self, struct fann, f);
|
684
|
+
num_layers = fann_get_num_layers(f);
|
685
|
+
unsigned int layers[num_layers];
|
686
|
+
fann_get_bias_array(f, layers);
|
687
|
+
|
688
|
+
// Create ruby array & set outputs:
|
689
|
+
VALUE arr;
|
690
|
+
arr = rb_ary_new();
|
691
|
+
int i;
|
692
|
+
for (i=0; i<num_layers; i++)
|
693
|
+
{
|
694
|
+
rb_ary_push(arr, INT2NUM(layers[i]));
|
695
|
+
}
|
696
|
+
|
697
|
+
return arr;
|
694
698
|
}
|
695
699
|
|
696
700
|
/** The number of fail bits; means the number of output neurons which differ more
|
@@ -699,74 +703,74 @@ The bits are counted in all of the training data, so this number can be higher t
|
|
699
703
|
the number of training data.*/
|
700
704
|
static VALUE get_bit_fail(VALUE self)
|
701
705
|
{
|
702
|
-
|
706
|
+
RETURN_FANN_INT(fann_get_bit_fail);
|
703
707
|
}
|
704
708
|
|
705
709
|
/** Get the connection rate used when the network was created.*/
|
706
710
|
static VALUE get_connection_rate(VALUE self)
|
707
711
|
{
|
708
|
-
|
712
|
+
RETURN_FANN_INT(fann_get_connection_rate);
|
709
713
|
}
|
710
714
|
|
711
715
|
/** call-seq: get_neurons(layer) -> return value
|
712
716
|
|
713
717
|
Return array<hash> where each array element is a hash
|
714
718
|
representing a neuron. It contains the following keys:
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
|
722
|
-
|
723
|
-
|
719
|
+
:activation_function, symbol -- the activation function
|
720
|
+
:activation_steepness=float -- The steepness of the activation function
|
721
|
+
:sum=float -- The sum of the inputs multiplied with the weights
|
722
|
+
:value=float -- The value of the activation fuction applied to the sum
|
723
|
+
:connections=array<int> -- indices of connected neurons(inputs)
|
724
|
+
|
725
|
+
This could be done more elegantly (e.g., defining more ruby ext classes).
|
726
|
+
This method does not directly correlate to anything in FANN, and accesses
|
727
|
+
structs that are not guaranteed to not change.
|
724
728
|
*/
|
725
729
|
static VALUE get_neurons(VALUE self, VALUE layer)
|
726
730
|
{
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
|
735
|
-
|
736
|
-
|
737
|
-
|
738
|
-
|
739
|
-
|
740
|
-
|
741
|
-
|
742
|
-
|
743
|
-
|
744
|
-
|
745
|
-
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
|
750
|
-
|
751
|
-
|
752
|
-
|
753
|
-
|
754
|
-
|
755
|
-
|
756
|
-
|
757
|
-
|
758
|
-
|
759
|
-
|
760
|
-
|
761
|
-
|
762
|
-
|
763
|
-
|
764
|
-
|
765
|
-
|
766
|
-
|
767
|
-
|
768
|
-
|
769
|
-
|
731
|
+
struct fann_layer *layer_it;
|
732
|
+
struct fann_neuron *neuron_it;
|
733
|
+
|
734
|
+
struct fann* f;
|
735
|
+
unsigned int i;
|
736
|
+
Data_Get_Struct (self, struct fann, f);
|
737
|
+
|
738
|
+
VALUE neuron_array = rb_ary_new();
|
739
|
+
VALUE activation_function_sym = ID2SYM(rb_intern("activation_function"));
|
740
|
+
VALUE activation_steepness_sym = ID2SYM(rb_intern("activation_steepness"));
|
741
|
+
VALUE layer_sym = ID2SYM(rb_intern("layer"));
|
742
|
+
VALUE sum_sym = ID2SYM(rb_intern("sum"));
|
743
|
+
VALUE value_sym = ID2SYM(rb_intern("value"));
|
744
|
+
VALUE connections_sym = ID2SYM(rb_intern("connections"));
|
745
|
+
unsigned int layer_num = 0;
|
746
|
+
|
747
|
+
|
748
|
+
int nuke_bias_neuron = (fann_get_network_type(f)==FANN_NETTYPE_LAYER);
|
749
|
+
for(layer_it = f->first_layer; layer_it != f->last_layer; layer_it++)
|
750
|
+
{
|
751
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++)
|
752
|
+
{
|
753
|
+
if (nuke_bias_neuron && (neuron_it==(layer_it->last_neuron)-1)) continue;
|
754
|
+
// Create array of connection indicies:
|
755
|
+
VALUE connection_array = rb_ary_new();
|
756
|
+
for (i = neuron_it->first_con; i < neuron_it->last_con; i++) {
|
757
|
+
rb_ary_push(connection_array, INT2NUM(f->connections[i] - f->first_layer->first_neuron));
|
758
|
+
}
|
759
|
+
|
760
|
+
VALUE neuron = rb_hash_new();
|
761
|
+
|
762
|
+
// Set attributes on hash & push on array:
|
763
|
+
rb_hash_aset(neuron, activation_function_sym, activation_function_to_sym(neuron_it->activation_function));
|
764
|
+
rb_hash_aset(neuron, activation_steepness_sym, rb_float_new(neuron_it->activation_steepness));
|
765
|
+
rb_hash_aset(neuron, layer_sym, INT2NUM(layer_num));
|
766
|
+
rb_hash_aset(neuron, sum_sym, rb_float_new(neuron_it->sum));
|
767
|
+
rb_hash_aset(neuron, value_sym, rb_float_new(neuron_it->value));
|
768
|
+
rb_hash_aset(neuron, connections_sym, connection_array);
|
769
|
+
|
770
|
+
rb_ary_push(neuron_array, neuron);
|
771
|
+
}
|
772
|
+
++layer_num;
|
773
|
+
}
|
770
774
|
|
771
775
|
// switch (fann_get_network_type(ann)) {
|
772
776
|
// case FANN_NETTYPE_LAYER: {
|
@@ -780,256 +784,261 @@ static VALUE get_neurons(VALUE self, VALUE layer)
|
|
780
784
|
// case FANN_NETTYPE_SHORTCUT: {
|
781
785
|
|
782
786
|
|
783
|
-
|
787
|
+
return neuron_array;
|
784
788
|
}
|
785
789
|
|
786
790
|
/** Get list of layers in array format where each element contains number of neurons in that layer*/
|
787
791
|
static VALUE get_layer_array(VALUE self)
|
788
792
|
{
|
789
|
-
|
790
|
-
|
791
|
-
|
792
|
-
|
793
|
-
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
804
|
-
|
793
|
+
struct fann* f;
|
794
|
+
unsigned int num_layers;
|
795
|
+
Data_Get_Struct (self, struct fann, f);
|
796
|
+
num_layers = fann_get_num_layers(f);
|
797
|
+
unsigned int layers[num_layers];
|
798
|
+
fann_get_layer_array(f, layers);
|
799
|
+
|
800
|
+
// Create ruby array & set outputs:
|
801
|
+
VALUE arr;
|
802
|
+
arr = rb_ary_new();
|
803
|
+
int i;
|
804
|
+
for (i=0; i<num_layers; i++)
|
805
|
+
{
|
806
|
+
rb_ary_push(arr, INT2NUM(layers[i]));
|
807
|
+
}
|
808
|
+
|
805
809
|
return arr;
|
806
810
|
}
|
807
811
|
|
808
812
|
/** Reads the mean square error from the network.*/
|
809
813
|
static VALUE get_MSE(VALUE self)
|
810
814
|
{
|
811
|
-
|
815
|
+
RETURN_FANN_DBL(fann_get_MSE);
|
812
816
|
}
|
813
817
|
|
814
818
|
/** Resets the mean square error from the network.
|
815
|
-
|
819
|
+
This function also resets the number of bits that fail.*/
|
816
820
|
static VALUE reset_MSE(VALUE self)
|
817
821
|
{
|
818
|
-
|
819
|
-
|
820
|
-
|
822
|
+
struct fann* f;
|
823
|
+
Data_Get_Struct (self, struct fann, f);
|
824
|
+
fann_reset_MSE(f);
|
825
|
+
return self;
|
821
826
|
}
|
822
827
|
|
823
828
|
/** Get the type of network. Returns as ruby symbol (one of :shortcut, :layer)*/
|
824
829
|
static VALUE get_network_type(VALUE self)
|
825
830
|
{
|
826
|
-
|
827
|
-
|
828
|
-
|
829
|
-
|
831
|
+
struct fann* f;
|
832
|
+
enum fann_nettype_enum net_type;
|
833
|
+
VALUE ret_val;
|
834
|
+
Data_Get_Struct (self, struct fann, f);
|
830
835
|
|
831
|
-
|
832
|
-
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
|
837
|
-
|
838
|
-
|
839
|
-
|
840
|
-
|
841
|
-
|
836
|
+
net_type = fann_get_network_type(f);
|
837
|
+
|
838
|
+
if(net_type==FANN_NETTYPE_LAYER)
|
839
|
+
{
|
840
|
+
ret_val = ID2SYM(rb_intern("layer")); // (rb_str_new2("FANN_NETTYPE_LAYER"));
|
841
|
+
}
|
842
|
+
else if(net_type==FANN_NETTYPE_SHORTCUT)
|
843
|
+
{
|
844
|
+
ret_val = ID2SYM(rb_intern("shortcut")); // (rb_str_new2("FANN_NETTYPE_SHORTCUT"));
|
845
|
+
}
|
846
|
+
return ret_val;
|
842
847
|
}
|
843
848
|
|
844
849
|
/** Get the number of input neurons.*/
|
845
850
|
static VALUE get_num_input(VALUE self)
|
846
851
|
{
|
847
|
-
|
852
|
+
RETURN_FANN_INT(fann_get_num_input);
|
848
853
|
}
|
849
|
-
|
854
|
+
|
850
855
|
/** Get the number of layers in the network.*/
|
851
856
|
static VALUE get_num_layers(VALUE self)
|
852
857
|
{
|
853
|
-
|
858
|
+
RETURN_FANN_INT(fann_get_num_layers);
|
854
859
|
}
|
855
860
|
|
856
861
|
/** Get the number of output neurons.*/
|
857
862
|
static VALUE get_num_output(VALUE self)
|
858
863
|
{
|
859
|
-
|
864
|
+
RETURN_FANN_INT(fann_get_num_output);
|
860
865
|
}
|
861
866
|
|
862
867
|
/** Get the total number of connections in the entire network.*/
|
863
868
|
static VALUE get_total_connections(VALUE self)
|
864
869
|
{
|
865
|
-
|
870
|
+
RETURN_FANN_INT(fann_get_total_connections);
|
866
871
|
}
|
867
872
|
|
868
873
|
/** Get the total number of neurons in the entire network.*/
|
869
874
|
static VALUE get_total_neurons(VALUE self)
|
870
875
|
{
|
871
|
-
|
876
|
+
RETURN_FANN_INT(fann_get_total_neurons);
|
872
877
|
}
|
873
878
|
|
874
879
|
/** call-seq: set_train_error_function(train_error_function)
|
875
880
|
|
876
881
|
Sets the error function used during training. One of the following symbols:
|
877
|
-
|
882
|
+
:linear, :tanh */
|
878
883
|
static VALUE set_train_error_function(VALUE self, VALUE train_error_function)
|
879
884
|
{
|
880
|
-
|
881
|
-
|
882
|
-
|
883
|
-
|
885
|
+
Check_Type(train_error_function, T_SYMBOL);
|
886
|
+
|
887
|
+
ID id=SYM2ID(train_error_function);
|
888
|
+
enum fann_errorfunc_enum fann_train_error_function;
|
884
889
|
|
885
|
-
|
886
|
-
|
887
|
-
|
888
|
-
|
889
|
-
|
890
|
-
|
891
|
-
|
890
|
+
if(id==rb_intern("linear")) {
|
891
|
+
fann_train_error_function = FANN_ERRORFUNC_LINEAR;
|
892
|
+
} else if(id==rb_intern("tanh")) {
|
893
|
+
fann_train_error_function = FANN_ERRORFUNC_TANH;
|
894
|
+
} else {
|
895
|
+
rb_raise(rb_eRuntimeError, "Unrecognized train error function: [%s]", rb_id2name(SYM2ID(train_error_function)));
|
896
|
+
}
|
892
897
|
|
893
|
-
|
894
|
-
|
895
|
-
|
898
|
+
struct fann* f;
|
899
|
+
Data_Get_Struct (self, struct fann, f);
|
900
|
+
fann_set_train_error_function(f, fann_train_error_function);
|
901
|
+
return self;
|
896
902
|
}
|
897
903
|
|
898
904
|
/** Returns the error function used during training. One of the following symbols:
|
899
|
-
|
905
|
+
:linear, :tanh*/
|
900
906
|
static VALUE get_train_error_function(VALUE self)
|
901
907
|
{
|
902
|
-
|
903
|
-
|
904
|
-
|
905
|
-
|
908
|
+
struct fann* f;
|
909
|
+
enum fann_errorfunc_enum train_error;
|
910
|
+
VALUE ret_val;
|
911
|
+
Data_Get_Struct (self, struct fann, f);
|
906
912
|
|
907
|
-
|
908
|
-
|
909
|
-
|
910
|
-
|
911
|
-
|
912
|
-
|
913
|
-
|
914
|
-
|
915
|
-
|
916
|
-
|
917
|
-
|
913
|
+
train_error = fann_get_train_error_function(f);
|
914
|
+
|
915
|
+
if(train_error==FANN_ERRORFUNC_LINEAR)
|
916
|
+
{
|
917
|
+
ret_val = ID2SYM(rb_intern("linear"));
|
918
|
+
}
|
919
|
+
else if(train_error==FANN_ERRORFUNC_TANH)
|
920
|
+
{
|
921
|
+
ret_val = ID2SYM(rb_intern("tanh"));
|
922
|
+
}
|
923
|
+
return ret_val;
|
918
924
|
}
|
919
925
|
|
920
926
|
/** call-seq: set_training_algorithm(train_error_function)
|
921
927
|
|
922
928
|
Set the training algorithm. One of the following symbols:
|
923
|
-
|
929
|
+
:incremental, :batch, :rprop, :quickprop */
|
924
930
|
static VALUE set_training_algorithm(VALUE self, VALUE train_error_function)
|
925
931
|
{
|
926
|
-
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
931
|
-
|
932
|
-
|
933
|
-
|
934
|
-
|
935
|
-
|
936
|
-
|
937
|
-
|
938
|
-
|
939
|
-
|
940
|
-
|
941
|
-
|
942
|
-
|
943
|
-
|
944
|
-
|
945
|
-
|
932
|
+
Check_Type(train_error_function, T_SYMBOL);
|
933
|
+
|
934
|
+
ID id=SYM2ID(train_error_function);
|
935
|
+
enum fann_train_enum fann_train_algorithm;
|
936
|
+
|
937
|
+
if(id==rb_intern("incremental")) {
|
938
|
+
fann_train_algorithm = FANN_TRAIN_INCREMENTAL;
|
939
|
+
} else if(id==rb_intern("batch")) {
|
940
|
+
fann_train_algorithm = FANN_TRAIN_BATCH;
|
941
|
+
} else if(id==rb_intern("rprop")) {
|
942
|
+
fann_train_algorithm = FANN_TRAIN_RPROP;
|
943
|
+
} else if(id==rb_intern("quickprop")) {
|
944
|
+
fann_train_algorithm = FANN_TRAIN_QUICKPROP;
|
945
|
+
} else {
|
946
|
+
rb_raise(rb_eRuntimeError, "Unrecognized training algorithm function: [%s]", rb_id2name(SYM2ID(train_error_function)));
|
947
|
+
}
|
948
|
+
|
949
|
+
struct fann* f;
|
950
|
+
Data_Get_Struct (self, struct fann, f);
|
951
|
+
fann_set_training_algorithm(f, fann_train_algorithm);
|
952
|
+
return self;
|
946
953
|
}
|
947
954
|
|
948
955
|
/** Returns the training algorithm. One of the following symbols:
|
949
|
-
|
956
|
+
:incremental, :batch, :rprop, :quickprop */
|
950
957
|
static VALUE get_training_algorithm(VALUE self)
|
951
958
|
{
|
952
|
-
|
953
|
-
|
954
|
-
|
955
|
-
|
956
|
-
|
957
|
-
|
958
|
-
|
959
|
-
|
960
|
-
|
961
|
-
|
962
|
-
|
963
|
-
|
964
|
-
|
965
|
-
|
966
|
-
|
967
|
-
|
968
|
-
|
959
|
+
struct fann* f;
|
960
|
+
enum fann_train_enum fann_train_algorithm;
|
961
|
+
VALUE ret_val;
|
962
|
+
Data_Get_Struct (self, struct fann, f);
|
963
|
+
|
964
|
+
fann_train_algorithm = fann_get_training_algorithm(f);
|
965
|
+
|
966
|
+
if(fann_train_algorithm==FANN_TRAIN_INCREMENTAL) {
|
967
|
+
ret_val = ID2SYM(rb_intern("incremental"));
|
968
|
+
} else if(fann_train_algorithm==FANN_TRAIN_BATCH) {
|
969
|
+
ret_val = ID2SYM(rb_intern("batch"));
|
970
|
+
} else if(fann_train_algorithm==FANN_TRAIN_RPROP) {
|
971
|
+
ret_val = ID2SYM(rb_intern("rprop"));
|
972
|
+
} else if(fann_train_algorithm==FANN_TRAIN_QUICKPROP) {
|
973
|
+
ret_val = ID2SYM(rb_intern("quickprop"));
|
974
|
+
}
|
975
|
+
return ret_val;
|
969
976
|
}
|
970
977
|
|
971
978
|
/** call-seq: set_train_stop_function(train_stop_function) -> return value
|
972
979
|
|
973
980
|
Set the training stop function. One of the following symbols:
|
974
|
-
|
981
|
+
:mse, :bit */
|
975
982
|
static VALUE set_train_stop_function(VALUE self, VALUE train_stop_function)
|
976
983
|
{
|
977
|
-
|
978
|
-
|
979
|
-
|
984
|
+
Check_Type(train_stop_function, T_SYMBOL);
|
985
|
+
ID id=SYM2ID(train_stop_function);
|
986
|
+
enum fann_stopfunc_enum fann_train_stop_function;
|
980
987
|
|
981
|
-
|
982
|
-
|
983
|
-
|
984
|
-
|
985
|
-
|
986
|
-
|
987
|
-
|
988
|
+
if(id==rb_intern("mse")) {
|
989
|
+
fann_train_stop_function = FANN_STOPFUNC_MSE;
|
990
|
+
} else if(id==rb_intern("bit")) {
|
991
|
+
fann_train_stop_function = FANN_STOPFUNC_BIT;
|
992
|
+
} else {
|
993
|
+
rb_raise(rb_eRuntimeError, "Unrecognized stop function: [%s]", rb_id2name(SYM2ID(train_stop_function)));
|
994
|
+
}
|
988
995
|
|
989
|
-
|
990
|
-
|
991
|
-
|
996
|
+
struct fann* f;
|
997
|
+
Data_Get_Struct (self, struct fann, f);
|
998
|
+
fann_set_train_stop_function(f, fann_train_stop_function);
|
999
|
+
return self;
|
992
1000
|
}
|
993
1001
|
|
994
1002
|
/** Returns the training stop function. One of the following symbols:
|
995
|
-
|
1003
|
+
:mse, :bit */
|
996
1004
|
static VALUE get_train_stop_function(VALUE self)
|
997
1005
|
{
|
998
|
-
|
999
|
-
|
1000
|
-
|
1001
|
-
|
1006
|
+
struct fann* f;
|
1007
|
+
enum fann_stopfunc_enum train_stop;
|
1008
|
+
VALUE ret_val;
|
1009
|
+
Data_Get_Struct (self, struct fann, f);
|
1002
1010
|
|
1003
|
-
|
1004
|
-
|
1005
|
-
|
1006
|
-
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1010
|
-
|
1011
|
-
|
1012
|
-
|
1013
|
-
|
1011
|
+
train_stop = fann_get_train_stop_function(f);
|
1012
|
+
|
1013
|
+
if(train_stop==FANN_STOPFUNC_MSE)
|
1014
|
+
{
|
1015
|
+
ret_val = ID2SYM(rb_intern("mse")); // (rb_str_new2("FANN_NETTYPE_LAYER"));
|
1016
|
+
}
|
1017
|
+
else if(train_stop==FANN_STOPFUNC_BIT)
|
1018
|
+
{
|
1019
|
+
ret_val = ID2SYM(rb_intern("bit")); // (rb_str_new2("FANN_NETTYPE_SHORTCUT"));
|
1020
|
+
}
|
1021
|
+
return ret_val;
|
1014
1022
|
}
|
1015
1023
|
|
1016
1024
|
|
1017
1025
|
/** Will print the connections of the ann in a compact matrix,
|
1018
|
-
|
1026
|
+
for easy viewing of the internals of the ann. */
|
1019
1027
|
static VALUE print_connections(VALUE self)
|
1020
1028
|
{
|
1021
|
-
|
1022
|
-
|
1023
|
-
|
1029
|
+
struct fann* f;
|
1030
|
+
Data_Get_Struct (self, struct fann, f);
|
1031
|
+
fann_print_connections(f);
|
1032
|
+
return self;
|
1024
1033
|
}
|
1025
1034
|
|
1026
1035
|
/** Print current NN parameters to stdout */
|
1027
1036
|
static VALUE print_parameters(VALUE self)
|
1028
1037
|
{
|
1029
|
-
|
1030
|
-
|
1031
|
-
|
1032
|
-
|
1038
|
+
struct fann* f;
|
1039
|
+
Data_Get_Struct (self, struct fann, f);
|
1040
|
+
fann_print_parameters(f);
|
1041
|
+
return Qnil;
|
1033
1042
|
}
|
1034
1043
|
|
1035
1044
|
/** call-seq: randomize_weights(min_weight, max_weight)
|
@@ -1037,11 +1046,12 @@ static VALUE print_parameters(VALUE self)
|
|
1037
1046
|
Give each connection a random weight between *min_weight* and *max_weight* */
|
1038
1047
|
static VALUE randomize_weights(VALUE self, VALUE min_weight, VALUE max_weight)
|
1039
1048
|
{
|
1040
|
-
|
1041
|
-
|
1042
|
-
|
1043
|
-
|
1044
|
-
|
1049
|
+
Check_Type(min_weight, T_FLOAT);
|
1050
|
+
Check_Type(max_weight, T_FLOAT);
|
1051
|
+
struct fann* f;
|
1052
|
+
Data_Get_Struct (self, struct fann, f);
|
1053
|
+
fann_randomize_weights(f, NUM2DBL(min_weight), NUM2DBL(max_weight));
|
1054
|
+
return self;
|
1045
1055
|
}
|
1046
1056
|
|
1047
1057
|
/** call-seq: run(inputs) -> return value
|
@@ -1050,34 +1060,34 @@ static VALUE randomize_weights(VALUE self, VALUE min_weight, VALUE max_weight)
|
|
1050
1060
|
Returns array<Float> as output */
|
1051
1061
|
static VALUE run (VALUE self, VALUE inputs)
|
1052
1062
|
{
|
1053
|
-
|
1063
|
+
Check_Type(inputs, T_ARRAY);
|
1054
1064
|
|
1055
1065
|
struct fann* f;
|
1056
|
-
|
1057
|
-
|
1058
|
-
|
1059
|
-
|
1060
|
-
|
1061
|
-
|
1062
|
-
|
1063
|
-
|
1064
|
-
|
1065
|
-
|
1066
|
-
|
1067
|
-
|
1068
|
-
|
1066
|
+
int i;
|
1067
|
+
fann_type* outputs;
|
1068
|
+
|
1069
|
+
// Convert inputs to type needed for NN:
|
1070
|
+
unsigned int len = RARRAY(inputs)->len;
|
1071
|
+
fann_type fann_inputs[len];
|
1072
|
+
for (i=0; i<len; i++)
|
1073
|
+
{
|
1074
|
+
fann_inputs[i] = NUM2DBL(RARRAY(inputs)->ptr[i]);
|
1075
|
+
}
|
1076
|
+
|
1077
|
+
|
1078
|
+
// Obtain NN & run method:
|
1069
1079
|
Data_Get_Struct (self, struct fann, f);
|
1070
|
-
|
1071
|
-
|
1072
|
-
|
1073
|
-
|
1074
|
-
|
1075
|
-
|
1076
|
-
|
1077
|
-
|
1078
|
-
|
1079
|
-
|
1080
|
-
|
1080
|
+
outputs = fann_run(f, fann_inputs);
|
1081
|
+
|
1082
|
+
// Create ruby array & set outputs:
|
1083
|
+
VALUE arr;
|
1084
|
+
arr = rb_ary_new();
|
1085
|
+
unsigned int output_len=fann_get_num_output(f);
|
1086
|
+
for (i=0; i<output_len; i++)
|
1087
|
+
{
|
1088
|
+
rb_ary_push(arr, rb_float_new(outputs[i]));
|
1089
|
+
}
|
1090
|
+
|
1081
1091
|
return arr;
|
1082
1092
|
}
|
1083
1093
|
|
@@ -1086,15 +1096,16 @@ static VALUE run (VALUE self, VALUE inputs)
|
|
1086
1096
|
Initialize the weights using Widrow + Nguyen's algorithm. */
|
1087
1097
|
static VALUE init_weights(VALUE self, VALUE train_data)
|
1088
1098
|
{
|
1089
|
-
|
1090
|
-
|
1091
|
-
|
1092
|
-
|
1093
|
-
|
1094
|
-
|
1095
|
-
|
1099
|
+
|
1100
|
+
Check_Type(train_data, T_DATA);
|
1101
|
+
|
1102
|
+
struct fann* f;
|
1103
|
+
struct fann_train_data* t;
|
1104
|
+
Data_Get_Struct (self, struct fann, f);
|
1105
|
+
Data_Get_Struct (train_data, struct fann_train_data, t);
|
1096
1106
|
|
1097
|
-
|
1107
|
+
fann_init_weights(f, t);
|
1108
|
+
return self;
|
1098
1109
|
}
|
1099
1110
|
|
1100
1111
|
|
@@ -1102,26 +1113,27 @@ static VALUE init_weights(VALUE self, VALUE train_data)
|
|
1102
1113
|
/** call-seq: train_on_data(train_data, max_epochs, epochs_between_reports, desired_error)
|
1103
1114
|
|
1104
1115
|
Train with training data created with RubyFann::TrainData.new
|
1105
|
-
|
1106
|
-
|
1107
|
-
|
1108
|
-
|
1116
|
+
max_epochs - The maximum number of epochs the training should continue
|
1117
|
+
epochs_between_reports - The number of epochs between printing a status report to stdout.
|
1118
|
+
desired_error - The desired <get_MSE> or <get_bit_fail>, depending on which stop function
|
1119
|
+
is chosen by <set_train_stop_function>. */
|
1109
1120
|
static VALUE train_on_data(VALUE self, VALUE train_data, VALUE max_epochs, VALUE epochs_between_reports, VALUE desired_error)
|
1110
1121
|
{
|
1111
|
-
|
1112
|
-
|
1113
|
-
|
1114
|
-
|
1115
|
-
|
1116
|
-
|
1117
|
-
|
1118
|
-
|
1119
|
-
|
1122
|
+
Check_Type(train_data, T_DATA);
|
1123
|
+
Check_Type(max_epochs, T_FIXNUM);
|
1124
|
+
Check_Type(epochs_between_reports, T_FIXNUM);
|
1125
|
+
Check_Type(desired_error, T_FLOAT);
|
1126
|
+
|
1127
|
+
struct fann* f;
|
1128
|
+
struct fann_train_data* t;
|
1129
|
+
Data_Get_Struct (self, struct fann, f);
|
1130
|
+
Data_Get_Struct (train_data, struct fann_train_data, t);
|
1120
1131
|
|
1121
|
-
|
1122
|
-
|
1123
|
-
|
1124
|
-
|
1132
|
+
unsigned int fann_max_epochs = NUM2INT(max_epochs);
|
1133
|
+
unsigned int fann_epochs_between_reports = NUM2INT(epochs_between_reports);
|
1134
|
+
float fann_desired_error = NUM2DBL(desired_error);
|
1135
|
+
fann_train_on_data(f, t, fann_max_epochs, fann_epochs_between_reports, fann_desired_error);
|
1136
|
+
return rb_int_new(0);
|
1125
1137
|
}
|
1126
1138
|
|
1127
1139
|
/** call-seq: train_epoch(train_data) -> return value
|
@@ -1129,12 +1141,12 @@ static VALUE train_on_data(VALUE self, VALUE train_data, VALUE max_epochs, VALUE
|
|
1129
1141
|
Train one epoch with a set of training data, created with RubyFann::TrainData.new */
|
1130
1142
|
static VALUE train_epoch(VALUE self, VALUE train_data)
|
1131
1143
|
{
|
1132
|
-
|
1133
|
-
|
1134
|
-
|
1135
|
-
|
1136
|
-
|
1137
|
-
|
1144
|
+
Check_Type(train_data, T_DATA);
|
1145
|
+
struct fann* f;
|
1146
|
+
struct fann_train_data* t;
|
1147
|
+
Data_Get_Struct (self, struct fann, f);
|
1148
|
+
Data_Get_Struct (train_data, struct fann_train_data, t);
|
1149
|
+
return rb_float_new(fann_train_epoch(f, t));
|
1138
1150
|
}
|
1139
1151
|
|
1140
1152
|
/** call-seq: test_data(train_data) -> return value
|
@@ -1142,64 +1154,64 @@ static VALUE train_epoch(VALUE self, VALUE train_data)
|
|
1142
1154
|
Test a set of training data and calculates the MSE for the training data. */
|
1143
1155
|
static VALUE test_data(VALUE self, VALUE train_data)
|
1144
1156
|
{
|
1145
|
-
|
1146
|
-
|
1147
|
-
|
1148
|
-
|
1149
|
-
|
1150
|
-
|
1157
|
+
Check_Type(train_data, T_DATA);
|
1158
|
+
struct fann* f;
|
1159
|
+
struct fann_train_data* t;
|
1160
|
+
Data_Get_Struct (self, struct fann, f);
|
1161
|
+
Data_Get_Struct (train_data, struct fann_train_data, t);
|
1162
|
+
return rb_float_new(fann_test_data(f, t));
|
1151
1163
|
}
|
1152
1164
|
|
1153
1165
|
// Returns the position of the decimal point in the ann.
|
1154
1166
|
// Only available in fixed-point mode, which we don't need:
|
1155
1167
|
// static VALUE get_decimal_point(VALUE self)
|
1156
1168
|
// {
|
1157
|
-
//
|
1158
|
-
//
|
1159
|
-
//
|
1169
|
+
// struct fann* f;
|
1170
|
+
// Data_Get_Struct (self, struct fann, f);
|
1171
|
+
// return INT2NUM(fann_get_decimal_point(f));
|
1160
1172
|
// }
|
1161
|
-
|
1173
|
+
|
1162
1174
|
// returns the multiplier that fix point data is multiplied with.
|
1163
1175
|
|
1164
1176
|
// Only available in fixed-point mode, which we don't need:
|
1165
1177
|
// static VALUE get_multiplier(VALUE self)
|
1166
1178
|
// {
|
1167
|
-
//
|
1168
|
-
//
|
1169
|
-
//
|
1179
|
+
// struct fann* f;
|
1180
|
+
// Data_Get_Struct (self, struct fann, f);
|
1181
|
+
// return INT2NUM(fann_get_multiplier(f));
|
1170
1182
|
// }
|
1171
1183
|
|
1172
1184
|
/** call-seq: cascadetrain_on_data(train_data, max_neurons, neurons_between_reports, desired_error)
|
1173
1185
|
|
1174
1186
|
Perform cascade training with training data created with RubyFann::TrainData.new
|
1175
|
-
|
1176
|
-
|
1177
|
-
|
1178
|
-
|
1187
|
+
max_epochs - The maximum number of neurons in trained network
|
1188
|
+
neurons_between_reports - The number of neurons between printing a status report to stdout.
|
1189
|
+
desired_error - The desired <get_MSE> or <get_bit_fail>, depending on which stop function
|
1190
|
+
is chosen by <set_train_stop_function>. */
|
1179
1191
|
static VALUE cascadetrain_on_data(VALUE self, VALUE train_data, VALUE max_neurons, VALUE neurons_between_reports, VALUE desired_error)
|
1180
1192
|
{
|
1181
|
-
|
1182
|
-
|
1183
|
-
|
1184
|
-
|
1185
|
-
|
1186
|
-
|
1187
|
-
|
1188
|
-
|
1189
|
-
|
1190
|
-
|
1191
|
-
|
1192
|
-
|
1193
|
-
|
1194
|
-
|
1195
|
-
|
1196
|
-
|
1197
|
-
}
|
1193
|
+
Check_Type(train_data, T_DATA);
|
1194
|
+
Check_Type(max_neurons, T_FIXNUM);
|
1195
|
+
Check_Type(neurons_between_reports, T_FIXNUM);
|
1196
|
+
Check_Type(desired_error, T_FLOAT);
|
1197
|
+
|
1198
|
+
struct fann* f;
|
1199
|
+
struct fann_train_data* t;
|
1200
|
+
Data_Get_Struct (self, struct fann, f);
|
1201
|
+
Data_Get_Struct (train_data, struct fann_train_data, t);
|
1202
|
+
|
1203
|
+
unsigned int fann_max_neurons = NUM2INT(max_neurons);
|
1204
|
+
unsigned int fann_neurons_between_reports = NUM2INT(neurons_between_reports);
|
1205
|
+
float fann_desired_error = NUM2DBL(desired_error);
|
1206
|
+
|
1207
|
+
fann_cascadetrain_on_data(f, t, fann_max_neurons, fann_neurons_between_reports, fann_desired_error);
|
1208
|
+
return self;
|
1209
|
+
}
|
1198
1210
|
|
1199
1211
|
/** The cascade output change fraction is a number between 0 and 1 */
|
1200
1212
|
static VALUE get_cascade_output_change_fraction(VALUE self)
|
1201
1213
|
{
|
1202
|
-
|
1214
|
+
RETURN_FANN_FLT(fann_get_cascade_output_change_fraction);
|
1203
1215
|
}
|
1204
1216
|
|
1205
1217
|
/** call-seq: set_cascade_output_change_fraction(cascade_output_change_fraction)
|
@@ -1207,29 +1219,29 @@ static VALUE get_cascade_output_change_fraction(VALUE self)
|
|
1207
1219
|
The cascade output change fraction is a number between 0 and 1 */
|
1208
1220
|
static VALUE set_cascade_output_change_fraction(VALUE self, VALUE cascade_output_change_fraction)
|
1209
1221
|
{
|
1210
|
-
|
1222
|
+
SET_FANN_FLT(cascade_output_change_fraction, fann_set_cascade_output_change_fraction);
|
1211
1223
|
}
|
1212
1224
|
|
1213
1225
|
/** The number of cascade output stagnation epochs determines the number of epochs training is allowed to
|
1214
|
-
|
1226
|
+
continue without changing the MSE by a fraction of <get_cascade_output_change_fraction>. */
|
1215
1227
|
static VALUE get_cascade_output_stagnation_epochs(VALUE self)
|
1216
1228
|
{
|
1217
|
-
|
1229
|
+
RETURN_FANN_INT(fann_get_cascade_output_stagnation_epochs);
|
1218
1230
|
}
|
1219
1231
|
|
1220
1232
|
/** call-seq: set_cascade_output_stagnation_epochs(cascade_output_stagnation_epochs)
|
1221
1233
|
|
1222
1234
|
The number of cascade output stagnation epochs determines the number of epochs training is allowed to
|
1223
|
-
|
1235
|
+
continue without changing the MSE by a fraction of <get_cascade_output_change_fraction>. */
|
1224
1236
|
static VALUE set_cascade_output_stagnation_epochs(VALUE self, VALUE cascade_output_stagnation_epochs)
|
1225
1237
|
{
|
1226
|
-
|
1238
|
+
SET_FANN_INT(cascade_output_stagnation_epochs, fann_set_cascade_output_stagnation_epochs);
|
1227
1239
|
}
|
1228
1240
|
|
1229
1241
|
/** The cascade candidate change fraction is a number between 0 and 1 */
|
1230
1242
|
static VALUE get_cascade_candidate_change_fraction(VALUE self)
|
1231
1243
|
{
|
1232
|
-
|
1244
|
+
RETURN_FANN_FLT(fann_get_cascade_candidate_change_fraction);
|
1233
1245
|
}
|
1234
1246
|
|
1235
1247
|
/** call-seq: set_cascade_candidate_change_fraction(cascade_candidate_change_fraction)
|
@@ -1237,128 +1249,128 @@ static VALUE get_cascade_candidate_change_fraction(VALUE self)
|
|
1237
1249
|
The cascade candidate change fraction is a number between 0 and 1 */
|
1238
1250
|
static VALUE set_cascade_candidate_change_fraction(VALUE self, VALUE cascade_candidate_change_fraction)
|
1239
1251
|
{
|
1240
|
-
|
1252
|
+
SET_FANN_FLT(cascade_candidate_change_fraction, fann_set_cascade_candidate_change_fraction);
|
1241
1253
|
}
|
1242
1254
|
|
1243
1255
|
/** The number of cascade candidate stagnation epochs determines the number of epochs training is allowed to
|
1244
|
-
|
1256
|
+
continue without changing the MSE by a fraction of <get_cascade_candidate_change_fraction>. */
|
1245
1257
|
static VALUE get_cascade_candidate_stagnation_epochs(VALUE self)
|
1246
1258
|
{
|
1247
|
-
|
1259
|
+
RETURN_FANN_UINT(fann_get_cascade_candidate_stagnation_epochs);
|
1248
1260
|
}
|
1249
1261
|
|
1250
1262
|
/** call-seq: set_cascade_candidate_stagnation_epochs(cascade_candidate_stagnation_epochs)
|
1251
1263
|
|
1252
1264
|
The number of cascade candidate stagnation epochs determines the number of epochs training is allowed to
|
1253
|
-
|
1265
|
+
continue without changing the MSE by a fraction of <get_cascade_candidate_change_fraction>. */
|
1254
1266
|
static VALUE set_cascade_candidate_stagnation_epochs(VALUE self, VALUE cascade_candidate_stagnation_epochs)
|
1255
1267
|
{
|
1256
|
-
|
1257
|
-
}
|
1268
|
+
SET_FANN_UINT(cascade_candidate_stagnation_epochs, fann_set_cascade_candidate_stagnation_epochs);
|
1269
|
+
}
|
1258
1270
|
|
1259
1271
|
/** The weight multiplier is a parameter which is used to multiply the weights from the candidate neuron
|
1260
|
-
|
1261
|
-
|
1272
|
+
before adding the neuron to the neural network. This parameter is usually between 0 and 1, and is used
|
1273
|
+
to make the training a bit less aggressive. */
|
1262
1274
|
static VALUE get_cascade_weight_multiplier(VALUE self)
|
1263
1275
|
{
|
1264
|
-
|
1276
|
+
RETURN_FANN_DBL(fann_get_cascade_weight_multiplier);
|
1265
1277
|
}
|
1266
1278
|
|
1267
1279
|
/** call-seq: set_cascade_weight_multiplier(cascade_weight_multiplier)
|
1268
1280
|
|
1269
1281
|
The weight multiplier is a parameter which is used to multiply the weights from the candidate neuron
|
1270
|
-
|
1271
|
-
|
1282
|
+
before adding the neuron to the neural network. This parameter is usually between 0 and 1, and is used
|
1283
|
+
to make the training a bit less aggressive. */
|
1272
1284
|
static VALUE set_cascade_weight_multiplier(VALUE self, VALUE cascade_weight_multiplier)
|
1273
1285
|
{
|
1274
|
-
|
1286
|
+
SET_FANN_DBL(cascade_weight_multiplier, fann_set_cascade_weight_multiplier);
|
1275
1287
|
}
|
1276
1288
|
|
1277
1289
|
/** The candidate limit is a limit for how much the candidate neuron may be trained.
|
1278
|
-
|
1290
|
+
The limit is a limit on the proportion between the MSE and candidate score. */
|
1279
1291
|
static VALUE get_cascade_candidate_limit(VALUE self)
|
1280
1292
|
{
|
1281
|
-
|
1293
|
+
RETURN_FANN_DBL(fann_get_cascade_candidate_limit);
|
1282
1294
|
}
|
1283
1295
|
|
1284
1296
|
/** call-seq: set_cascade_candidate_limit(cascade_candidate_limit)
|
1285
1297
|
|
1286
1298
|
The candidate limit is a limit for how much the candidate neuron may be trained.
|
1287
|
-
|
1299
|
+
The limit is a limit on the proportion between the MSE and candidate score. */
|
1288
1300
|
static VALUE set_cascade_candidate_limit(VALUE self, VALUE cascade_candidate_limit)
|
1289
1301
|
{
|
1290
|
-
|
1302
|
+
SET_FANN_DBL(cascade_candidate_limit, fann_set_cascade_candidate_limit);
|
1291
1303
|
}
|
1292
1304
|
|
1293
1305
|
/** The maximum out epochs determines the maximum number of epochs the output connections
|
1294
|
-
|
1306
|
+
may be trained after adding a new candidate neuron. */
|
1295
1307
|
static VALUE get_cascade_max_out_epochs(VALUE self)
|
1296
1308
|
{
|
1297
|
-
|
1309
|
+
RETURN_FANN_UINT(fann_get_cascade_max_out_epochs);
|
1298
1310
|
}
|
1299
1311
|
|
1300
1312
|
/** call-seq: set_cascade_max_out_epochs(cascade_max_out_epochs)
|
1301
1313
|
|
1302
1314
|
The maximum out epochs determines the maximum number of epochs the output connections
|
1303
|
-
|
1315
|
+
may be trained after adding a new candidate neuron. */
|
1304
1316
|
static VALUE set_cascade_max_out_epochs(VALUE self, VALUE cascade_max_out_epochs)
|
1305
1317
|
{
|
1306
|
-
|
1318
|
+
SET_FANN_UINT(cascade_max_out_epochs, fann_set_cascade_max_out_epochs);
|
1307
1319
|
}
|
1308
1320
|
|
1309
1321
|
/** The maximum candidate epochs determines the maximum number of epochs the input
|
1310
|
-
|
1322
|
+
connections to the candidates may be trained before adding a new candidate neuron. */
|
1311
1323
|
static VALUE get_cascade_max_cand_epochs(VALUE self)
|
1312
1324
|
{
|
1313
|
-
|
1325
|
+
RETURN_FANN_UINT(fann_get_cascade_max_cand_epochs);
|
1314
1326
|
}
|
1315
1327
|
|
1316
1328
|
/** call-seq: set_cascade_max_cand_epochs(cascade_max_cand_epochs)
|
1317
1329
|
|
1318
1330
|
The maximum candidate epochs determines the maximum number of epochs the input
|
1319
|
-
|
1331
|
+
connections to the candidates may be trained before adding a new candidate neuron. */
|
1320
1332
|
static VALUE set_cascade_max_cand_epochs(VALUE self, VALUE cascade_max_cand_epochs)
|
1321
1333
|
{
|
1322
|
-
|
1334
|
+
SET_FANN_UINT(cascade_max_cand_epochs, fann_set_cascade_max_cand_epochs);
|
1323
1335
|
}
|
1324
1336
|
|
1325
1337
|
/** The number of candidates used during training (calculated by multiplying <get_cascade_activation_functions_count>,
|
1326
|
-
|
1338
|
+
<get_cascade_activation_steepnesses_count> and <get_cascade_num_candidate_groups>). */
|
1327
1339
|
static VALUE get_cascade_num_candidates(VALUE self)
|
1328
1340
|
{
|
1329
|
-
|
1341
|
+
RETURN_FANN_UINT(fann_get_cascade_num_candidates);
|
1330
1342
|
}
|
1331
1343
|
|
1332
1344
|
/** The number of activation functions in the <get_cascade_activation_functions> array */
|
1333
1345
|
static VALUE get_cascade_activation_functions_count(VALUE self)
|
1334
1346
|
{
|
1335
|
-
|
1347
|
+
RETURN_FANN_UINT(fann_get_cascade_activation_functions_count);
|
1336
1348
|
}
|
1337
1349
|
|
1338
1350
|
/** The learning rate is used to determine how aggressive training should be for some of the
|
1339
|
-
|
1340
|
-
|
1341
|
-
|
1351
|
+
training algorithms (:incremental, :batch, :quickprop).
|
1352
|
+
Do however note that it is not used in :rprop.
|
1353
|
+
The default learning rate is 0.7. */
|
1342
1354
|
static VALUE get_learning_rate(VALUE self)
|
1343
1355
|
{
|
1344
|
-
|
1356
|
+
RETURN_FANN_FLT(fann_get_learning_rate);
|
1345
1357
|
}
|
1346
1358
|
|
1347
1359
|
/** call-seq: set_learning_rate(learning_rate) -> return value
|
1348
1360
|
|
1349
1361
|
The learning rate is used to determine how aggressive training should be for some of the
|
1350
|
-
|
1351
|
-
|
1352
|
-
|
1362
|
+
training algorithms (:incremental, :batch, :quickprop).
|
1363
|
+
Do however note that it is not used in :rprop.
|
1364
|
+
The default learning rate is 0.7. */
|
1353
1365
|
static VALUE set_learning_rate(VALUE self, VALUE learning_rate)
|
1354
1366
|
{
|
1355
|
-
|
1367
|
+
SET_FANN_FLT(learning_rate, fann_set_learning_rate);
|
1356
1368
|
}
|
1357
1369
|
|
1358
1370
|
/** Get the learning momentum. */
|
1359
1371
|
static VALUE get_learning_momentum(VALUE self)
|
1360
1372
|
{
|
1361
|
-
|
1373
|
+
RETURN_FANN_FLT(fann_get_learning_momentum);
|
1362
1374
|
}
|
1363
1375
|
|
1364
1376
|
/** call-seq: set_learning_momentum(learning_momentum) -> return value
|
@@ -1366,111 +1378,113 @@ static VALUE get_learning_momentum(VALUE self)
|
|
1366
1378
|
Set the learning momentum. */
|
1367
1379
|
static VALUE set_learning_momentum(VALUE self, VALUE learning_momentum)
|
1368
1380
|
{
|
1369
|
-
|
1381
|
+
SET_FANN_FLT(learning_momentum, fann_set_learning_momentum);
|
1370
1382
|
}
|
1371
1383
|
|
1372
1384
|
/** call-seq: set_cascade_activation_functions(cascade_activation_functions)
|
1373
1385
|
|
1374
1386
|
The cascade activation functions is an array of the different activation functions used by
|
1375
|
-
|
1387
|
+
the candidates. The default is [:sigmoid, :sigmoid_symmetric, :gaussian, :gaussian_symmetric, :elliot, :elliot_symmetric] */
|
1376
1388
|
static VALUE set_cascade_activation_functions(VALUE self, VALUE cascade_activation_functions)
|
1377
1389
|
{
|
1378
|
-
|
1379
|
-
|
1380
|
-
|
1381
|
-
|
1382
|
-
|
1383
|
-
|
1384
|
-
|
1385
|
-
|
1386
|
-
|
1387
|
-
|
1388
|
-
|
1389
|
-
|
1390
|
-
|
1390
|
+
Check_Type(cascade_activation_functions, T_ARRAY);
|
1391
|
+
struct fann* f;
|
1392
|
+
Data_Get_Struct (self, struct fann, f);
|
1393
|
+
|
1394
|
+
unsigned int cnt = RARRAY(cascade_activation_functions)->len;
|
1395
|
+
enum fann_activationfunc_enum fann_activation_functions[cnt];
|
1396
|
+
int i;
|
1397
|
+
for (i=0; i<cnt; i++)
|
1398
|
+
{
|
1399
|
+
fann_activation_functions[i] = sym_to_activation_function(RARRAY(cascade_activation_functions)->ptr[i]);
|
1400
|
+
}
|
1401
|
+
|
1402
|
+
fann_set_cascade_activation_functions(f, fann_activation_functions, cnt);
|
1403
|
+
return self;
|
1391
1404
|
}
|
1392
1405
|
|
1393
1406
|
/** The cascade activation functions is an array of the different activation functions used by
|
1394
|
-
|
1407
|
+
the candidates. The default is [:sigmoid, :sigmoid_symmetric, :gaussian, :gaussian_symmetric, :elliot, :elliot_symmetric] */
|
1395
1408
|
static VALUE get_cascade_activation_functions(VALUE self)
|
1396
1409
|
{
|
1397
|
-
|
1398
|
-
|
1399
|
-
|
1400
|
-
|
1410
|
+
struct fann* f;
|
1411
|
+
Data_Get_Struct (self, struct fann, f);
|
1412
|
+
unsigned int cnt = fann_get_cascade_activation_functions_count(f);
|
1413
|
+
enum fann_activationfunc_enum* fann_functions = fann_get_cascade_activation_functions(f);
|
1401
1414
|
|
1402
|
-
|
1403
|
-
|
1404
|
-
|
1405
|
-
|
1406
|
-
|
1407
|
-
|
1408
|
-
|
1409
|
-
|
1415
|
+
// Create ruby array & set outputs:
|
1416
|
+
VALUE arr;
|
1417
|
+
arr = rb_ary_new();
|
1418
|
+
int i;
|
1419
|
+
for (i=0; i<cnt; i++)
|
1420
|
+
{
|
1421
|
+
rb_ary_push(arr, activation_function_to_sym(fann_functions[i]));
|
1422
|
+
}
|
1410
1423
|
|
1411
|
-
|
1424
|
+
return arr;
|
1412
1425
|
}
|
1413
1426
|
|
1414
1427
|
/** The number of activation steepnesses in the <get_cascade_activation_functions> array. */
|
1415
1428
|
static VALUE get_cascade_activation_steepnesses_count(VALUE self)
|
1416
1429
|
{
|
1417
|
-
|
1430
|
+
RETURN_FANN_UINT(fann_get_cascade_activation_steepnesses_count);
|
1418
1431
|
}
|
1419
1432
|
|
1420
1433
|
/** The number of candidate groups is the number of groups of identical candidates which will be used
|
1421
|
-
|
1434
|
+
during training. */
|
1422
1435
|
static VALUE get_cascade_num_candidate_groups(VALUE self)
|
1423
1436
|
{
|
1424
|
-
|
1437
|
+
RETURN_FANN_UINT(fann_get_cascade_num_candidate_groups);
|
1425
1438
|
}
|
1426
1439
|
|
1427
1440
|
/** call-seq: set_cascade_num_candidate_groups(cascade_num_candidate_groups)
|
1428
1441
|
|
1429
1442
|
The number of candidate groups is the number of groups of identical candidates which will be used
|
1430
|
-
|
1443
|
+
during training. */
|
1431
1444
|
static VALUE set_cascade_num_candidate_groups(VALUE self, VALUE cascade_num_candidate_groups)
|
1432
1445
|
{
|
1433
|
-
|
1446
|
+
SET_FANN_UINT(cascade_num_candidate_groups, fann_set_cascade_num_candidate_groups);
|
1434
1447
|
}
|
1435
1448
|
|
1436
1449
|
/** The cascade activation steepnesses array is an array of the different activation functions used by
|
1437
|
-
|
1450
|
+
the candidates. */
|
1438
1451
|
static VALUE set_cascade_activation_steepnesses(VALUE self, VALUE cascade_activation_steepnesses)
|
1439
1452
|
{
|
1440
|
-
|
1441
|
-
|
1442
|
-
|
1443
|
-
|
1444
|
-
|
1445
|
-
|
1446
|
-
|
1447
|
-
|
1448
|
-
|
1449
|
-
|
1450
|
-
|
1451
|
-
|
1452
|
-
|
1453
|
+
Check_Type(cascade_activation_steepnesses, T_ARRAY);
|
1454
|
+
struct fann* f;
|
1455
|
+
Data_Get_Struct (self, struct fann, f);
|
1456
|
+
|
1457
|
+
unsigned int cnt = RARRAY(cascade_activation_steepnesses)->len;
|
1458
|
+
fann_type fann_activation_steepnesses[cnt];
|
1459
|
+
int i;
|
1460
|
+
for (i=0; i<cnt; i++)
|
1461
|
+
{
|
1462
|
+
fann_activation_steepnesses[i] = NUM2DBL(RARRAY(cascade_activation_steepnesses)->ptr[i]);
|
1463
|
+
}
|
1464
|
+
|
1465
|
+
fann_set_cascade_activation_steepnesses(f, fann_activation_steepnesses, cnt);
|
1466
|
+
return self;
|
1453
1467
|
}
|
1454
1468
|
|
1455
1469
|
/** The cascade activation steepnesses array is an array of the different activation functions used by
|
1456
|
-
|
1470
|
+
the candidates. */
|
1457
1471
|
static VALUE get_cascade_activation_steepnesses(VALUE self)
|
1458
1472
|
{
|
1459
|
-
|
1460
|
-
|
1461
|
-
|
1462
|
-
|
1473
|
+
struct fann* f;
|
1474
|
+
Data_Get_Struct (self, struct fann, f);
|
1475
|
+
fann_type* fann_steepnesses = fann_get_cascade_activation_steepnesses(f);
|
1476
|
+
unsigned int cnt = fann_get_cascade_activation_steepnesses_count(f);
|
1463
1477
|
|
1464
|
-
|
1465
|
-
|
1466
|
-
|
1467
|
-
|
1468
|
-
|
1469
|
-
|
1470
|
-
|
1471
|
-
|
1478
|
+
// Create ruby array & set outputs:
|
1479
|
+
VALUE arr;
|
1480
|
+
arr = rb_ary_new();
|
1481
|
+
int i;
|
1482
|
+
for (i=0; i<cnt; i++)
|
1483
|
+
{
|
1484
|
+
rb_ary_push(arr, rb_float_new(fann_steepnesses[i]));
|
1485
|
+
}
|
1472
1486
|
|
1473
|
-
|
1487
|
+
return arr;
|
1474
1488
|
}
|
1475
1489
|
|
1476
1490
|
/** call-seq: save(filename) -> return status
|
@@ -1478,219 +1492,219 @@ static VALUE get_cascade_activation_steepnesses(VALUE self)
|
|
1478
1492
|
Save the entire network to configuration file with given name */
|
1479
1493
|
static VALUE nn_save(VALUE self, VALUE filename)
|
1480
1494
|
{
|
1481
|
-
|
1482
|
-
|
1483
|
-
|
1484
|
-
|
1495
|
+
struct fann* f;
|
1496
|
+
Data_Get_Struct (self, struct fann, f);
|
1497
|
+
int status = fann_save(f, StringValuePtr(filename));
|
1498
|
+
return INT2NUM(status);
|
1485
1499
|
}
|
1486
1500
|
|
1487
1501
|
/** Initializes class under RubyFann module/namespace. */
|
1488
1502
|
void Init_neural_network ()
|
1489
1503
|
{
|
1490
|
-
|
1491
|
-
|
1492
|
-
|
1493
|
-
|
1494
|
-
|
1495
|
-
|
1496
|
-
|
1497
|
-
|
1498
|
-
|
1499
|
-
|
1500
|
-
|
1501
|
-
|
1502
|
-
|
1503
|
-
|
1504
|
-
|
1505
|
-
|
1506
|
-
|
1507
|
-
|
1508
|
-
|
1509
|
-
|
1510
|
-
|
1511
|
-
|
1512
|
-
|
1513
|
-
|
1514
|
-
|
1515
|
-
|
1516
|
-
|
1517
|
-
|
1518
|
-
|
1519
|
-
|
1520
|
-
|
1521
|
-
|
1522
|
-
|
1523
|
-
|
1524
|
-
|
1525
|
-
|
1526
|
-
|
1527
|
-
|
1528
|
-
|
1529
|
-
|
1530
|
-
|
1531
|
-
|
1532
|
-
|
1533
|
-
|
1534
|
-
|
1535
|
-
|
1536
|
-
|
1537
|
-
|
1538
|
-
|
1539
|
-
|
1540
|
-
|
1541
|
-
|
1542
|
-
|
1543
|
-
|
1544
|
-
|
1545
|
-
|
1546
|
-
|
1547
|
-
|
1548
|
-
|
1549
|
-
|
1550
|
-
|
1551
|
-
|
1552
|
-
|
1553
|
-
|
1554
|
-
|
1555
|
-
|
1556
|
-
|
1557
|
-
|
1558
|
-
|
1559
|
-
|
1560
|
-
|
1561
|
-
|
1562
|
-
|
1563
|
-
|
1564
|
-
|
1565
|
-
|
1566
|
-
|
1567
|
-
|
1568
|
-
|
1569
|
-
|
1570
|
-
|
1571
|
-
|
1572
|
-
|
1573
|
-
|
1574
|
-
|
1575
|
-
|
1576
|
-
|
1577
|
-
|
1578
|
-
|
1579
|
-
|
1580
|
-
|
1581
|
-
|
1582
|
-
|
1583
|
-
|
1584
|
-
|
1585
|
-
|
1586
|
-
|
1587
|
-
|
1588
|
-
|
1589
|
-
|
1590
|
-
|
1591
|
-
|
1592
|
-
|
1593
|
-
|
1594
|
-
|
1595
|
-
|
1596
|
-
|
1597
|
-
|
1598
|
-
|
1599
|
-
|
1600
|
-
|
1601
|
-
|
1602
|
-
|
1603
|
-
|
1604
|
-
|
1605
|
-
|
1606
|
-
|
1607
|
-
|
1608
|
-
|
1609
|
-
|
1610
|
-
|
1611
|
-
|
1612
|
-
|
1613
|
-
|
1614
|
-
|
1615
|
-
|
1616
|
-
|
1617
|
-
|
1618
|
-
|
1619
|
-
|
1620
|
-
|
1621
|
-
|
1622
|
-
|
1623
|
-
|
1624
|
-
|
1625
|
-
|
1626
|
-
|
1627
|
-
|
1628
|
-
|
1629
|
-
|
1630
|
-
|
1631
|
-
|
1632
|
-
|
1633
|
-
|
1634
|
-
|
1635
|
-
|
1636
|
-
|
1637
|
-
|
1638
|
-
|
1639
|
-
|
1640
|
-
|
1641
|
-
|
1642
|
-
|
1643
|
-
|
1644
|
-
|
1645
|
-
|
1646
|
-
|
1647
|
-
|
1648
|
-
|
1649
|
-
|
1650
|
-
|
1651
|
-
|
1652
|
-
|
1653
|
-
|
1654
|
-
|
1655
|
-
|
1656
|
-
|
1657
|
-
|
1658
|
-
|
1659
|
-
|
1660
|
-
|
1661
|
-
|
1662
|
-
|
1663
|
-
|
1664
|
-
|
1665
|
-
|
1666
|
-
|
1667
|
-
|
1668
|
-
|
1669
|
-
|
1670
|
-
|
1671
|
-
|
1672
|
-
|
1673
|
-
|
1674
|
-
|
1675
|
-
|
1676
|
-
|
1677
|
-
|
1678
|
-
|
1679
|
-
|
1680
|
-
|
1681
|
-
|
1682
|
-
|
1683
|
-
|
1684
|
-
|
1685
|
-
|
1686
|
-
|
1687
|
-
|
1688
|
-
|
1689
|
-
|
1690
|
-
|
1691
|
-
|
1692
|
-
|
1693
|
-
|
1694
|
-
|
1504
|
+
// RubyFann module/namespace:
|
1505
|
+
m_rb_fann_module = rb_define_module ("RubyFann");
|
1506
|
+
|
1507
|
+
// Standard NN class:
|
1508
|
+
m_rb_fann_standard_class = rb_define_class_under (m_rb_fann_module, "Standard", rb_cObject);
|
1509
|
+
rb_define_alloc_func (m_rb_fann_standard_class, fann_allocate);
|
1510
|
+
rb_define_method(m_rb_fann_standard_class, "initialize", fann_initialize, 1);
|
1511
|
+
rb_define_method(m_rb_fann_standard_class, "init_weights", init_weights, 1);
|
1512
|
+
rb_define_method(m_rb_fann_standard_class, "set_activation_function", set_activation_function, 3);
|
1513
|
+
rb_define_method(m_rb_fann_standard_class, "set_activation_function_hidden", set_activation_function_hidden, 1);
|
1514
|
+
rb_define_method(m_rb_fann_standard_class, "set_activation_function_layer", set_activation_function_layer, 2);
|
1515
|
+
rb_define_method(m_rb_fann_standard_class, "get_activation_function", get_activation_function, 2);
|
1516
|
+
rb_define_method(m_rb_fann_standard_class, "set_activation_function_output", set_activation_function_output, 1);
|
1517
|
+
rb_define_method(m_rb_fann_standard_class, "get_activation_steepness", get_activation_steepness, 2);
|
1518
|
+
rb_define_method(m_rb_fann_standard_class, "set_activation_steepness", set_activation_steepness, 3);
|
1519
|
+
rb_define_method(m_rb_fann_standard_class, "set_activation_steepness_hidden", set_activation_steepness_hidden, 1);
|
1520
|
+
rb_define_method(m_rb_fann_standard_class, "set_activation_steepness_layer", set_activation_steepness_layer, 2);
|
1521
|
+
rb_define_method(m_rb_fann_standard_class, "set_activation_steepness_output", set_activation_steepness_output, 1);
|
1522
|
+
rb_define_method(m_rb_fann_standard_class, "get_train_error_function", get_train_error_function, 0);
|
1523
|
+
rb_define_method(m_rb_fann_standard_class, "set_train_error_function", set_train_error_function, 1);
|
1524
|
+
rb_define_method(m_rb_fann_standard_class, "get_train_stop_function", get_train_stop_function, 0);
|
1525
|
+
rb_define_method(m_rb_fann_standard_class, "set_train_stop_function", set_train_stop_function, 1);
|
1526
|
+
rb_define_method(m_rb_fann_standard_class, "get_bit_fail_limit", get_bit_fail_limit, 0);
|
1527
|
+
rb_define_method(m_rb_fann_standard_class, "set_bit_fail_limit", set_bit_fail_limit, 1);
|
1528
|
+
rb_define_method(m_rb_fann_standard_class, "get_quickprop_decay", get_quickprop_decay, 0);
|
1529
|
+
rb_define_method(m_rb_fann_standard_class, "set_quickprop_decay", set_quickprop_decay, 1);
|
1530
|
+
rb_define_method(m_rb_fann_standard_class, "get_quickprop_mu", get_quickprop_mu, 0);
|
1531
|
+
rb_define_method(m_rb_fann_standard_class, "set_quickprop_mu", set_quickprop_mu, 1);
|
1532
|
+
rb_define_method(m_rb_fann_standard_class, "get_rprop_increase_factor", get_rprop_increase_factor, 0);
|
1533
|
+
rb_define_method(m_rb_fann_standard_class, "set_rprop_increase_factor", set_rprop_increase_factor, 1);
|
1534
|
+
rb_define_method(m_rb_fann_standard_class, "get_rprop_decrease_factor", get_rprop_decrease_factor, 0);
|
1535
|
+
rb_define_method(m_rb_fann_standard_class, "set_rprop_decrease_factor", set_rprop_decrease_factor, 1);
|
1536
|
+
rb_define_method(m_rb_fann_standard_class, "get_rprop_delta_max", get_rprop_delta_max, 0);
|
1537
|
+
rb_define_method(m_rb_fann_standard_class, "set_rprop_delta_max", set_rprop_delta_max, 1);
|
1538
|
+
rb_define_method(m_rb_fann_standard_class, "get_rprop_delta_min", get_rprop_delta_min, 0);
|
1539
|
+
rb_define_method(m_rb_fann_standard_class, "set_rprop_delta_min", set_rprop_delta_min, 1);
|
1540
|
+
rb_define_method(m_rb_fann_standard_class, "get_rprop_delta_zero", get_rprop_delta_zero, 0);
|
1541
|
+
rb_define_method(m_rb_fann_standard_class, "set_rprop_delta_zero", set_rprop_delta_zero, 1);
|
1542
|
+
rb_define_method(m_rb_fann_standard_class, "get_bias_array", get_bias_array, 0);
|
1543
|
+
rb_define_method(m_rb_fann_standard_class, "get_connection_rate", get_connection_rate, 0);
|
1544
|
+
rb_define_method(m_rb_fann_standard_class, "get_layer_array", get_layer_array, 0);
|
1545
|
+
rb_define_method(m_rb_fann_standard_class, "get_network_type", get_network_type, 0);
|
1546
|
+
rb_define_method(m_rb_fann_standard_class, "get_neurons", get_neurons, 0);
|
1547
|
+
rb_define_method(m_rb_fann_standard_class, "get_num_input", get_num_input, 0);
|
1548
|
+
rb_define_method(m_rb_fann_standard_class, "get_num_layers", get_num_layers, 0);
|
1549
|
+
rb_define_method(m_rb_fann_standard_class, "get_num_output", get_num_output, 0);
|
1550
|
+
rb_define_method(m_rb_fann_standard_class, "get_total_connections", get_total_connections, 0);
|
1551
|
+
rb_define_method(m_rb_fann_standard_class, "get_total_neurons", get_total_neurons, 0);
|
1552
|
+
rb_define_method(m_rb_fann_standard_class, "get_train_error_function", get_train_error_function, 0);
|
1553
|
+
rb_define_method(m_rb_fann_standard_class, "set_train_error_function", set_train_error_function, 1);
|
1554
|
+
rb_define_method(m_rb_fann_standard_class, "print_connections", print_connections, 0);
|
1555
|
+
rb_define_method(m_rb_fann_standard_class, "print_parameters", print_parameters, 0);
|
1556
|
+
rb_define_method(m_rb_fann_standard_class, "randomize_weights", randomize_weights, 2);
|
1557
|
+
rb_define_method(m_rb_fann_standard_class, "run", run, 1);
|
1558
|
+
rb_define_method(m_rb_fann_standard_class, "train_on_data", train_on_data, 4);
|
1559
|
+
rb_define_method(m_rb_fann_standard_class, "train_epoch", train_epoch, 1);
|
1560
|
+
rb_define_method(m_rb_fann_standard_class, "test_data", test_data, 1);
|
1561
|
+
rb_define_method(m_rb_fann_standard_class, "get_MSE", get_MSE, 0);
|
1562
|
+
rb_define_method(m_rb_fann_standard_class, "get_bit_fail", get_bit_fail, 0);
|
1563
|
+
rb_define_method(m_rb_fann_standard_class, "reset_MSE", reset_MSE, 0);
|
1564
|
+
rb_define_method(m_rb_fann_standard_class, "get_learning_rate", get_learning_rate, 0);
|
1565
|
+
rb_define_method(m_rb_fann_standard_class, "set_learning_rate", set_learning_rate, 1);
|
1566
|
+
rb_define_method(m_rb_fann_standard_class, "get_learning_momentum", get_learning_momentum, 0);
|
1567
|
+
rb_define_method(m_rb_fann_standard_class, "set_learning_momentum", set_learning_momentum, 1);
|
1568
|
+
rb_define_method(m_rb_fann_standard_class, "get_training_algorithm", get_training_algorithm, 0);
|
1569
|
+
rb_define_method(m_rb_fann_standard_class, "set_training_algorithm", set_training_algorithm, 1);
|
1570
|
+
|
1571
|
+
|
1572
|
+
// Cascade functions:
|
1573
|
+
rb_define_method(m_rb_fann_standard_class, "cascadetrain_on_data", cascadetrain_on_data, 4);
|
1574
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_output_change_fraction", get_cascade_output_change_fraction, 0);
|
1575
|
+
rb_define_method(m_rb_fann_standard_class, "set_cascade_output_change_fraction", set_cascade_output_change_fraction, 1);
|
1576
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_output_stagnation_epochs", get_cascade_output_stagnation_epochs, 0);
|
1577
|
+
rb_define_method(m_rb_fann_standard_class, "set_cascade_output_stagnation_epochs", set_cascade_output_stagnation_epochs, 1);
|
1578
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_candidate_change_fraction", get_cascade_candidate_change_fraction, 0);
|
1579
|
+
rb_define_method(m_rb_fann_standard_class, "set_cascade_candidate_change_fraction", set_cascade_candidate_change_fraction, 1);
|
1580
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_candidate_stagnation_epochs", get_cascade_candidate_stagnation_epochs, 0);
|
1581
|
+
rb_define_method(m_rb_fann_standard_class, "set_cascade_candidate_stagnation_epochs", set_cascade_candidate_stagnation_epochs, 1);
|
1582
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_weight_multiplier", get_cascade_weight_multiplier, 0);
|
1583
|
+
rb_define_method(m_rb_fann_standard_class, "set_cascade_weight_multiplier", set_cascade_weight_multiplier, 1);
|
1584
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_candidate_limit", get_cascade_candidate_limit, 0);
|
1585
|
+
rb_define_method(m_rb_fann_standard_class, "set_cascade_candidate_limit", set_cascade_candidate_limit, 1);
|
1586
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_max_out_epochs", get_cascade_max_out_epochs, 0);
|
1587
|
+
rb_define_method(m_rb_fann_standard_class, "set_cascade_max_out_epochs", set_cascade_max_out_epochs, 1);
|
1588
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_max_cand_epochs", get_cascade_max_cand_epochs, 0);
|
1589
|
+
rb_define_method(m_rb_fann_standard_class, "set_cascade_max_cand_epochs", set_cascade_max_cand_epochs, 1);
|
1590
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_num_candidates", get_cascade_num_candidates, 0);
|
1591
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_activation_functions_count", get_cascade_activation_functions_count, 0);
|
1592
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_activation_functions", get_cascade_activation_functions, 0);
|
1593
|
+
rb_define_method(m_rb_fann_standard_class, "set_cascade_activation_functions", set_cascade_activation_functions, 1);
|
1594
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_activation_steepnesses_count", get_cascade_activation_steepnesses_count, 0);
|
1595
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_activation_steepnesses", get_cascade_activation_steepnesses, 0);
|
1596
|
+
rb_define_method(m_rb_fann_standard_class, "set_cascade_activation_steepnesses", set_cascade_activation_steepnesses, 1);
|
1597
|
+
rb_define_method(m_rb_fann_standard_class, "get_cascade_num_candidate_groups", get_cascade_num_candidate_groups, 0);
|
1598
|
+
rb_define_method(m_rb_fann_standard_class, "set_cascade_num_candidate_groups", set_cascade_num_candidate_groups, 1);
|
1599
|
+
rb_define_method(m_rb_fann_standard_class, "save", nn_save, 1);
|
1600
|
+
|
1601
|
+
|
1602
|
+
// Uncomment for fixed-point mode (also recompile fann). Probably not going to be needed:
|
1603
|
+
//rb_define_method(clazz, "get_decimal_point", get_decimal_point, 0);
|
1604
|
+
//rb_define_method(clazz, "get_multiplier", get_multiplier, 0);
|
1605
|
+
|
1606
|
+
// Shortcut NN class (duplicated from above so that rdoc generation tools can find the methods:):
|
1607
|
+
m_rb_fann_shortcut_class = rb_define_class_under (m_rb_fann_module, "Shortcut", rb_cObject);
|
1608
|
+
rb_define_alloc_func (m_rb_fann_shortcut_class, fann_allocate);
|
1609
|
+
rb_define_method(m_rb_fann_shortcut_class, "initialize", fann_initialize, 1);
|
1610
|
+
rb_define_method(m_rb_fann_shortcut_class, "init_weights", init_weights, 1);
|
1611
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_activation_function", set_activation_function, 3);
|
1612
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_activation_function_hidden", set_activation_function_hidden, 1);
|
1613
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_activation_function_layer", set_activation_function_layer, 2);
|
1614
|
+
rb_define_method(m_rb_fann_standard_class, "get_activation_function", get_activation_function, 2);
|
1615
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_activation_function_output", set_activation_function_output, 1);
|
1616
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_activation_steepness", get_activation_steepness, 2);
|
1617
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_activation_steepness", set_activation_steepness, 3);
|
1618
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_activation_steepness_hidden", set_activation_steepness_hidden, 1);
|
1619
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_activation_steepness_layer", set_activation_steepness_layer, 2);
|
1620
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_activation_steepness_output", set_activation_steepness_output, 1);
|
1621
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_train_error_function", get_train_error_function, 0);
|
1622
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_train_error_function", set_train_error_function, 1);
|
1623
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_train_stop_function", get_train_stop_function, 0);
|
1624
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_train_stop_function", set_train_stop_function, 1);
|
1625
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_bit_fail_limit", get_bit_fail_limit, 0);
|
1626
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_bit_fail_limit", set_bit_fail_limit, 1);
|
1627
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_quickprop_decay", get_quickprop_decay, 0);
|
1628
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_quickprop_decay", set_quickprop_decay, 1);
|
1629
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_quickprop_mu", get_quickprop_mu, 0);
|
1630
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_quickprop_mu", set_quickprop_mu, 1);
|
1631
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_rprop_increase_factor", get_rprop_increase_factor, 0);
|
1632
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_rprop_increase_factor", set_rprop_increase_factor, 1);
|
1633
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_rprop_decrease_factor", get_rprop_decrease_factor, 0);
|
1634
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_rprop_decrease_factor", set_rprop_decrease_factor, 1);
|
1635
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_rprop_delta_max", get_rprop_delta_max, 0);
|
1636
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_rprop_delta_max", set_rprop_delta_max, 1);
|
1637
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_rprop_delta_min", get_rprop_delta_min, 0);
|
1638
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_rprop_delta_min", set_rprop_delta_min, 1);
|
1639
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_rprop_delta_zero", get_rprop_delta_zero, 0);
|
1640
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_rprop_delta_zero", set_rprop_delta_zero, 1);
|
1641
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_bias_array", get_bias_array, 0);
|
1642
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_connection_rate", get_connection_rate, 0);
|
1643
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_layer_array", get_layer_array, 0);
|
1644
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_network_type", get_network_type, 0);
|
1645
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_neurons", get_neurons, 0);
|
1646
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_num_input", get_num_input, 0);
|
1647
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_num_layers", get_num_layers, 0);
|
1648
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_num_output", get_num_output, 0);
|
1649
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_total_connections", get_total_connections, 0);
|
1650
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_total_neurons", get_total_neurons, 0);
|
1651
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_train_error_function", get_train_error_function, 0);
|
1652
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_train_error_function", set_train_error_function, 1);
|
1653
|
+
rb_define_method(m_rb_fann_shortcut_class, "print_connections", print_connections, 0);
|
1654
|
+
rb_define_method(m_rb_fann_shortcut_class, "print_parameters", print_parameters, 0);
|
1655
|
+
rb_define_method(m_rb_fann_shortcut_class, "randomize_weights", randomize_weights, 2);
|
1656
|
+
rb_define_method(m_rb_fann_shortcut_class, "run", run, 1);
|
1657
|
+
rb_define_method(m_rb_fann_shortcut_class, "train_on_data", train_on_data, 4);
|
1658
|
+
rb_define_method(m_rb_fann_shortcut_class, "train_epoch", train_epoch, 1);
|
1659
|
+
rb_define_method(m_rb_fann_shortcut_class, "test_data", test_data, 1);
|
1660
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_MSE", get_MSE, 0);
|
1661
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_bit_fail", get_bit_fail, 0);
|
1662
|
+
rb_define_method(m_rb_fann_shortcut_class, "reset_MSE", reset_MSE, 0);
|
1663
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_learning_rate", get_learning_rate, 0);
|
1664
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_learning_rate", set_learning_rate, 1);
|
1665
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_learning_momentum", get_learning_momentum, 0);
|
1666
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_learning_momentum", set_learning_momentum, 1);
|
1667
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_training_algorithm", get_training_algorithm, 0);
|
1668
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_training_algorithm", set_training_algorithm, 1);
|
1669
|
+
|
1670
|
+
// Cascade functions:
|
1671
|
+
rb_define_method(m_rb_fann_shortcut_class, "cascadetrain_on_data", cascadetrain_on_data, 4);
|
1672
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_output_change_fraction", get_cascade_output_change_fraction, 0);
|
1673
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_cascade_output_change_fraction", set_cascade_output_change_fraction, 1);
|
1674
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_output_stagnation_epochs", get_cascade_output_stagnation_epochs, 0);
|
1675
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_cascade_output_stagnation_epochs", set_cascade_output_stagnation_epochs, 1);
|
1676
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_candidate_change_fraction", get_cascade_candidate_change_fraction, 0);
|
1677
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_cascade_candidate_change_fraction", set_cascade_candidate_change_fraction, 1);
|
1678
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_candidate_stagnation_epochs", get_cascade_candidate_stagnation_epochs, 0);
|
1679
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_cascade_candidate_stagnation_epochs", set_cascade_candidate_stagnation_epochs, 1);
|
1680
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_weight_multiplier", get_cascade_weight_multiplier, 0);
|
1681
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_cascade_weight_multiplier", set_cascade_weight_multiplier, 1);
|
1682
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_candidate_limit", get_cascade_candidate_limit, 0);
|
1683
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_cascade_candidate_limit", set_cascade_candidate_limit, 1);
|
1684
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_max_out_epochs", get_cascade_max_out_epochs, 0);
|
1685
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_cascade_max_out_epochs", set_cascade_max_out_epochs, 1);
|
1686
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_max_cand_epochs", get_cascade_max_cand_epochs, 0);
|
1687
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_cascade_max_cand_epochs", set_cascade_max_cand_epochs, 1);
|
1688
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_num_candidates", get_cascade_num_candidates, 0);
|
1689
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_activation_functions_count", get_cascade_activation_functions_count, 0);
|
1690
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_activation_functions", get_cascade_activation_functions, 0);
|
1691
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_cascade_activation_functions", set_cascade_activation_functions, 1);
|
1692
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_activation_steepnesses_count", get_cascade_activation_steepnesses_count, 0);
|
1693
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_activation_steepnesses", get_cascade_activation_steepnesses, 0);
|
1694
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_cascade_activation_steepnesses", set_cascade_activation_steepnesses, 1);
|
1695
|
+
rb_define_method(m_rb_fann_shortcut_class, "get_cascade_num_candidate_groups", get_cascade_num_candidate_groups, 0);
|
1696
|
+
rb_define_method(m_rb_fann_shortcut_class, "set_cascade_num_candidate_groups", set_cascade_num_candidate_groups, 1);
|
1697
|
+
rb_define_method(m_rb_fann_shortcut_class, "save", nn_save, 1);
|
1698
|
+
|
1699
|
+
|
1700
|
+
// TrainData NN class:
|
1701
|
+
m_rb_fann_train_data_class = rb_define_class_under (m_rb_fann_module, "TrainData", rb_cObject);
|
1702
|
+
rb_define_alloc_func (m_rb_fann_train_data_class, fann_training_data_allocate);
|
1703
|
+
rb_define_method(m_rb_fann_train_data_class, "initialize", fann_train_data_initialize, 1);
|
1704
|
+
rb_define_method(m_rb_fann_train_data_class, "length", length_train_data, 0);
|
1705
|
+
rb_define_method(m_rb_fann_train_data_class, "shuffle", shuffle, 0);
|
1706
|
+
rb_define_method(m_rb_fann_train_data_class, "save", training_save, 1);
|
1707
|
+
|
1708
|
+
// printf("Initialized Ruby Bindings for FANN.\n");
|
1695
1709
|
}
|
1696
1710
|
|