ruby-fann 0.7.4 → 0.7.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/History.txt +6 -0
- data/ext/ruby_fann/Makefile +3 -3
- data/ext/ruby_fann/extconf.rb +1 -1
- data/ext/ruby_fann/neural_network.c +211 -116
- data/lib/ruby_fann/version.rb +1 -1
- data/neurotica1.png +0 -0
- data/neurotica2.vrml +18 -18
- data/test/test_ruby_fann.rb +2 -2
- data/website/index.html +20 -7
- data/website/index.txt +14 -2
- data/xor_cascade.net +4 -4
- data/xor_float.net +1 -1
- metadata +2 -2
data/History.txt
CHANGED
data/ext/ruby_fann/Makefile
CHANGED
@@ -43,7 +43,7 @@ LIBRUBYARG_STATIC = -l$(RUBY_SO_NAME)-static
|
|
43
43
|
RUBY_EXTCONF_H =
|
44
44
|
CFLAGS = -fno-common -g -O2 -fno-common -pipe -fno-common
|
45
45
|
INCFLAGS = -I. -I. -I/usr/local/lib/ruby/1.8/i686-darwin8.11.1 -I.
|
46
|
-
CPPFLAGS = -I/usr/
|
46
|
+
CPPFLAGS = -I/usr/local/include
|
47
47
|
CXXFLAGS = $(CFLAGS)
|
48
48
|
DLDFLAGS = -L.
|
49
49
|
LDSHARED = cc -dynamic -bundle -undefined suppress -flat_namespace
|
@@ -68,8 +68,8 @@ COPY = cp
|
|
68
68
|
|
69
69
|
preload =
|
70
70
|
|
71
|
-
libpath = . $(libdir) /usr/
|
72
|
-
LIBPATH = -L"." -L"$(libdir)" -L"/usr/
|
71
|
+
libpath = . $(libdir) /usr/local/lib
|
72
|
+
LIBPATH = -L"." -L"$(libdir)" -L"/usr/local/lib"
|
73
73
|
DEFFILE =
|
74
74
|
|
75
75
|
CLEANFILES = mkmf.log
|
data/ext/ruby_fann/extconf.rb
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
require 'mkmf'
|
2
2
|
lib = dir_config('fann', '/usr/local')
|
3
3
|
if !have_library("doublefann", "fann_create_standard")
|
4
|
-
puts "FANN must be installed and available in /usr/local or passed in with --with-fann-dir "
|
4
|
+
puts "FANN must be installed and available in /usr/local or passed in with --with-fann-dir. Windows users should use ruby compiled in Cygwin or an equivalent, such as MingW."
|
5
5
|
exit 1
|
6
6
|
end
|
7
7
|
#find_library("doublefann", "fann_create_standard", "/usr/local/lib")
|
@@ -181,15 +181,22 @@ static VALUE fann_training_data_allocate (VALUE klass)
|
|
181
181
|
}
|
182
182
|
|
183
183
|
|
184
|
-
/**
|
185
|
-
|
186
|
-
|
184
|
+
/** call-seq: new(hash) -> new ruby-fann neural network object
|
185
|
+
|
186
|
+
Initialization routine for both standard, shortcut & filename forms of FANN:
|
187
|
+
|
188
|
+
Standard Initialization:
|
189
|
+
RubyFann::Standard.new(:num_inputs=>1, :hidden_neurons=>[3, 4, 3, 4], :num_outputs=>1)
|
187
190
|
|
188
|
-
Shortcut Initialization (e.g., for use in cascade training):
|
189
|
-
|
191
|
+
Shortcut Initialization (e.g., for use in cascade training):
|
192
|
+
RubyFann::Shortcut.new(:num_inputs=>5, :num_outputs=>1)
|
190
193
|
|
191
|
-
File Initialization
|
192
|
-
|
194
|
+
File Initialization
|
195
|
+
RubyFann::Standard.new(:filename=>'xor_float.net')
|
196
|
+
|
197
|
+
|
198
|
+
|
199
|
+
*/
|
193
200
|
static VALUE fann_initialize(VALUE self, VALUE hash)
|
194
201
|
{
|
195
202
|
// Get args:
|
@@ -244,24 +251,25 @@ static VALUE fann_initialize(VALUE self, VALUE hash)
|
|
244
251
|
}
|
245
252
|
|
246
253
|
DATA_PTR(self) = ann;
|
247
|
-
return (VALUE)ann;
|
248
|
-
|
249
|
-
DATA_PTR(self) = ann;
|
250
|
-
return (VALUE)ann;
|
254
|
+
return (VALUE)ann;
|
251
255
|
}
|
252
256
|
|
253
|
-
/**
|
254
|
-
|
257
|
+
/** call-seq: new(hash) -> new ruby-fann training data object (RubyFann::TrainData)
|
258
|
+
|
259
|
+
Initialize in one of the following forms:
|
260
|
+
|
261
|
+
# This is a flat file with training data as described in FANN docs.
|
262
|
+
RubyFann::TrainData.new(:filename => 'path/to/training_file.train')
|
263
|
+
OR
|
264
|
+
# Train with inputs (array of arrays) & desired_outputs (array of arrays)
|
265
|
+
# inputs & desired outputs should be of same length
|
266
|
+
# All sub-arrays on inputs should be of same length
|
267
|
+
# All sub-arrays on desired_outputs should be of same length
|
268
|
+
# Sub-arrays on inputs & desired_outputs can be different sizes from one another
|
269
|
+
RubyFann::TrainData.new(:inputs=>[[0.2, 0.3, 0.4], [0.8, 0.9, 0.7]], :desired_outputs=>[[3.14], [6.33]])
|
270
|
+
|
271
|
+
|
255
272
|
|
256
|
-
# This is a flat file with training data as described in FANN docs.
|
257
|
-
RubyFann::TrainData.new(:filename => 'path/to/training_file.train')
|
258
|
-
OR
|
259
|
-
# Train with inputs (array of arrays) & desired_outputs (array of arrays)
|
260
|
-
# inputs & desired outputs should be of same length
|
261
|
-
# All sub-arrays on inputs should be of same length
|
262
|
-
# All sub-arrays on desired_outputs should be of same length
|
263
|
-
# Sub-arrays on inputs & desired_outputs can be different sizes from one another
|
264
|
-
RubyFann::TrainData.new(:inputs=>[[0.2, 0.3, 0.4], [0.8, 0.9, 0.7]], :desired_outputs=>[[3.14], [6.33]])
|
265
273
|
*/
|
266
274
|
static VALUE fann_train_data_initialize(VALUE self, VALUE hash)
|
267
275
|
{
|
@@ -306,7 +314,10 @@ static VALUE fann_train_data_initialize(VALUE self, VALUE hash)
|
|
306
314
|
}
|
307
315
|
|
308
316
|
|
309
|
-
/**
|
317
|
+
/** call-seq: save(filename)
|
318
|
+
|
319
|
+
Save to given filename
|
320
|
+
*/
|
310
321
|
static VALUE training_save(VALUE self, VALUE filename)
|
311
322
|
{
|
312
323
|
Check_Type(filename, T_STRING);
|
@@ -316,7 +327,7 @@ static VALUE training_save(VALUE self, VALUE filename)
|
|
316
327
|
}
|
317
328
|
|
318
329
|
/** Shuffles training data, randomizing the order.
|
319
|
-
This is recommended for incremental training, while it will have no influence during batch training
|
330
|
+
This is recommended for incremental training, while it will have no influence during batch training.*/
|
320
331
|
static VALUE shuffle(VALUE self)
|
321
332
|
{
|
322
333
|
struct fann_train_data* t;
|
@@ -324,7 +335,7 @@ static VALUE shuffle(VALUE self)
|
|
324
335
|
fann_shuffle_train_data(t);
|
325
336
|
}
|
326
337
|
|
327
|
-
/** Length of training data
|
338
|
+
/** Length of training data*/
|
328
339
|
static VALUE length_train_data(VALUE self)
|
329
340
|
{
|
330
341
|
struct fann_train_data* t;
|
@@ -332,13 +343,14 @@ static VALUE length_train_data(VALUE self)
|
|
332
343
|
return(UINT2NUM(fann_length_train_data(t)));
|
333
344
|
}
|
334
345
|
|
335
|
-
/**
|
346
|
+
/** call-seq: set_activation_function(activation_func, layer, neuron)
|
347
|
+
|
348
|
+
Set the activation function for neuron number *neuron* in layer number *layer*,
|
336
349
|
counting the input layer as layer 0. activation_func must be one of the following symbols:
|
337
350
|
:linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
|
338
351
|
:sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
|
339
352
|
:elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
|
340
|
-
:sin, :cos
|
341
|
-
*/
|
353
|
+
:sin, :cos*/
|
342
354
|
static VALUE set_activation_function(VALUE self, VALUE activation_func, VALUE layer, VALUE neuron)
|
343
355
|
{
|
344
356
|
Check_Type(activation_func, T_SYMBOL);
|
@@ -350,7 +362,9 @@ static VALUE set_activation_function(VALUE self, VALUE activation_func, VALUE la
|
|
350
362
|
fann_set_activation_function(f, sym_to_activation_function(activation_func), NUM2INT(layer), NUM2INT(neuron));
|
351
363
|
}
|
352
364
|
|
353
|
-
/**
|
365
|
+
/** call-seq: set_activation_function_hidden(activation_func)
|
366
|
+
|
367
|
+
Set the activation function for all of the hidden layers. activation_func must be one of the following symbols:
|
354
368
|
:linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
|
355
369
|
:sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
|
356
370
|
:elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
|
@@ -363,12 +377,15 @@ static VALUE set_activation_function_hidden(VALUE self, VALUE activation_func)
|
|
363
377
|
fann_set_activation_function_hidden(f, sym_to_activation_function(activation_func));
|
364
378
|
}
|
365
379
|
|
366
|
-
/**
|
380
|
+
/** call-seq: set_activation_function_layer(activation_func, layer)
|
381
|
+
|
382
|
+
Set the activation function for all the neurons in the layer number *layer*,
|
367
383
|
counting the input layer as layer 0. activation_func must be one of the following symbols:
|
368
384
|
:linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
|
369
385
|
:sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
|
370
386
|
:elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
|
371
|
-
:sin, :cos
|
387
|
+
:sin, :cos*/
|
388
|
+
|
372
389
|
static VALUE set_activation_function_layer(VALUE self, VALUE activation_func, VALUE layer)
|
373
390
|
{
|
374
391
|
Check_Type(activation_func, T_SYMBOL);
|
@@ -378,11 +395,14 @@ static VALUE set_activation_function_layer(VALUE self, VALUE activation_func, VA
|
|
378
395
|
fann_set_activation_function_layer(f, sym_to_activation_function(activation_func), NUM2INT(layer));
|
379
396
|
}
|
380
397
|
|
381
|
-
/**
|
398
|
+
/** call-seq: set_activation_function_output(activation_func)
|
399
|
+
|
400
|
+
Set the activation function for the output layer. activation_func must be one of the following symbols:
|
382
401
|
:linear, :threshold, :threshold_symmetric, :sigmoid, :sigmoid_stepwise, :sigmoid_symmetric,
|
383
402
|
:sigmoid_symmetric_stepwise, :gaussian, :gaussian_symmetric, :gaussian_stepwise, :elliot,
|
384
403
|
:elliot_symmetric, :linear_piece, :linear_piece_symmetric, :sin_symmetric, :cos_symmetric,
|
385
|
-
:sin, :cos
|
404
|
+
:sin, :cos*/
|
405
|
+
|
386
406
|
static VALUE set_activation_function_output(VALUE self, VALUE activation_func)
|
387
407
|
{
|
388
408
|
Check_Type(activation_func, T_SYMBOL);
|
@@ -391,9 +411,11 @@ static VALUE set_activation_function_output(VALUE self, VALUE activation_func)
|
|
391
411
|
fann_set_activation_function_output(f, sym_to_activation_function(activation_func));
|
392
412
|
}
|
393
413
|
|
394
|
-
/**
|
395
|
-
|
396
|
-
|
414
|
+
/** call-seq: get_activation_steepness(layer, neuron) -> return value
|
415
|
+
|
416
|
+
Get the activation steepness for neuron number neuron in layer number layer, counting the input layer as layer 0.
|
417
|
+
*/
|
418
|
+
static VALUE get_activation_steepness(VALUE self, VALUE layer, VALUE neuron)
|
397
419
|
{
|
398
420
|
Check_Type(layer, T_FIXNUM);
|
399
421
|
Check_Type(neuron, T_FIXNUM);
|
@@ -403,8 +425,10 @@ VALUE self; VALUE layer; VALUE neuron;
|
|
403
425
|
return rb_float_new(val);
|
404
426
|
}
|
405
427
|
|
406
|
-
/**
|
407
|
-
|
428
|
+
/** call-seq: set_activation_steepness(steepness, layer, neuron)
|
429
|
+
|
430
|
+
Set the activation steepness for neuron number {neuron} in layer number {layer},
|
431
|
+
counting the input layer as layer 0.*/
|
408
432
|
static VALUE set_activation_steepness(VALUE self, VALUE steepness, VALUE layer, VALUE neuron)
|
409
433
|
{
|
410
434
|
Check_Type(steepness, T_FLOAT);
|
@@ -416,14 +440,18 @@ static VALUE set_activation_steepness(VALUE self, VALUE steepness, VALUE layer,
|
|
416
440
|
fann_set_activation_steepness(f, NUM2DBL(steepness), NUM2INT(layer), NUM2INT(neuron));
|
417
441
|
}
|
418
442
|
|
419
|
-
/**
|
443
|
+
/** call-seq: set_activation_steepness_hidden(arg) -> return value
|
444
|
+
|
445
|
+
Set the activation steepness in all of the hidden layers.*/
|
420
446
|
static VALUE set_activation_steepness_hidden(VALUE self, VALUE steepness)
|
421
447
|
{
|
422
448
|
SET_FANN_FLT(steepness, fann_set_activation_steepness_hidden);
|
423
449
|
}
|
424
450
|
|
425
|
-
/**
|
426
|
-
|
451
|
+
/** call-seq: set_activation_steepness_layer(steepness, layer)
|
452
|
+
|
453
|
+
Set the activation steepness all of the neurons in layer number *layer*,
|
454
|
+
counting the input layer as layer 0.*/
|
427
455
|
static VALUE set_activation_steepness_layer(VALUE self, VALUE steepness, VALUE layer)
|
428
456
|
{
|
429
457
|
Check_Type(steepness, T_FLOAT);
|
@@ -434,114 +462,132 @@ static VALUE set_activation_steepness_layer(VALUE self, VALUE steepness, VALUE l
|
|
434
462
|
fann_set_activation_steepness_layer(f, NUM2DBL(steepness), NUM2INT(layer));
|
435
463
|
}
|
436
464
|
|
437
|
-
/**
|
465
|
+
/** call-seq: set_activation_steepness_output(steepness)
|
466
|
+
|
467
|
+
Set the activation steepness in the output layer.*/
|
438
468
|
static VALUE set_activation_steepness_output(VALUE self, VALUE steepness)
|
439
469
|
{
|
440
470
|
SET_FANN_FLT(steepness, fann_set_activation_steepness_output);
|
441
471
|
}
|
442
472
|
|
443
|
-
/** Returns the bit fail limit used during training
|
473
|
+
/** Returns the bit fail limit used during training.*/
|
444
474
|
static VALUE get_bit_fail_limit(VALUE self)
|
445
475
|
{
|
446
476
|
RETURN_FANN_DBL(fann_get_bit_fail_limit);
|
447
477
|
}
|
448
478
|
|
449
|
-
/**
|
479
|
+
/** call-seq: set_bit_fail_limit(bit_fail_limit)
|
480
|
+
|
481
|
+
Sets the bit fail limit used during training.*/
|
450
482
|
static VALUE set_bit_fail_limit(VALUE self, VALUE bit_fail_limit)
|
451
483
|
{
|
452
484
|
SET_FANN_FLT(bit_fail_limit, fann_set_bit_fail_limit);
|
453
485
|
}
|
454
486
|
|
455
487
|
/** The decay is a small negative valued number which is the factor that the weights
|
456
|
-
should become smaller in each iteration during quickprop training. This is used
|
457
|
-
to make sure that the weights do not become too high during training
|
488
|
+
should become smaller in each iteration during quickprop training. This is used
|
489
|
+
to make sure that the weights do not become too high during training.*/
|
458
490
|
static VALUE get_quickprop_decay(VALUE self)
|
459
491
|
{
|
460
492
|
RETURN_FANN_FLT(fann_get_quickprop_decay);
|
461
493
|
}
|
462
494
|
|
463
|
-
/**
|
495
|
+
/** call-seq: set_quickprop_decay(quickprop_decay)
|
496
|
+
|
497
|
+
Sets the quickprop decay factor*/
|
464
498
|
static VALUE set_quickprop_decay(VALUE self, VALUE quickprop_decay)
|
465
499
|
{
|
466
500
|
SET_FANN_FLT(quickprop_decay, fann_set_quickprop_decay);
|
467
501
|
}
|
468
502
|
|
469
503
|
/** The mu factor is used to increase and decrease the step-size during quickprop training.
|
470
|
-
The mu factor should always be above 1, since it would otherwise decrease the step-size
|
471
|
-
when it was suppose to increase it. */
|
504
|
+
The mu factor should always be above 1, since it would otherwise decrease the step-size
|
505
|
+
when it was suppose to increase it. */
|
472
506
|
static VALUE get_quickprop_mu(VALUE self)
|
473
507
|
{
|
474
508
|
RETURN_FANN_FLT(fann_get_quickprop_mu);
|
475
509
|
}
|
476
510
|
|
477
|
-
/**
|
511
|
+
/** call-seq: set_quickprop_mu(quickprop_mu)
|
512
|
+
|
513
|
+
Sets the quickprop mu factor.*/
|
478
514
|
static VALUE set_quickprop_mu(VALUE self, VALUE quickprop_mu)
|
479
515
|
{
|
480
516
|
SET_FANN_FLT(quickprop_mu, fann_set_quickprop_mu);
|
481
517
|
}
|
482
518
|
|
483
519
|
/** The increase factor is a value larger than 1, which is used to
|
484
|
-
increase the step-size during RPROP training
|
520
|
+
increase the step-size during RPROP training.*/
|
485
521
|
static VALUE get_rprop_increase_factor(VALUE self)
|
486
522
|
{
|
487
523
|
RETURN_FANN_FLT(fann_get_rprop_increase_factor);
|
488
524
|
}
|
489
525
|
|
490
|
-
/**
|
526
|
+
/** call-seq: set_rprop_increase_factor(rprop_increase_factor)
|
527
|
+
|
528
|
+
The increase factor used during RPROP training. */
|
491
529
|
static VALUE set_rprop_increase_factor(VALUE self, VALUE rprop_increase_factor)
|
492
530
|
{
|
493
531
|
SET_FANN_FLT(rprop_increase_factor, fann_set_rprop_increase_factor);
|
494
532
|
}
|
495
533
|
|
496
|
-
/** The decrease factor is a value smaller than 1, which is used to decrease the step-size during RPROP training
|
534
|
+
/** The decrease factor is a value smaller than 1, which is used to decrease the step-size during RPROP training.*/
|
497
535
|
static VALUE get_rprop_decrease_factor(VALUE self)
|
498
536
|
{
|
499
537
|
RETURN_FANN_FLT(fann_get_rprop_decrease_factor);
|
500
538
|
}
|
501
539
|
|
502
|
-
/**
|
540
|
+
/** call-seq: set_rprop_decrease_factor(rprop_decrease_factor)
|
541
|
+
|
542
|
+
The decrease factor is a value smaller than 1, which is used to decrease the step-size during RPROP training.*/
|
503
543
|
static VALUE set_rprop_decrease_factor(VALUE self, VALUE rprop_decrease_factor)
|
504
544
|
{
|
505
545
|
SET_FANN_FLT(rprop_decrease_factor, fann_set_rprop_decrease_factor);
|
506
546
|
}
|
507
547
|
|
508
|
-
/** The minimum step-size is a small positive number determining how small the minimum step-size may be
|
548
|
+
/** The minimum step-size is a small positive number determining how small the minimum step-size may be.*/
|
509
549
|
static VALUE get_rprop_delta_min(VALUE self)
|
510
550
|
{
|
511
551
|
RETURN_FANN_FLT(fann_get_rprop_delta_min);
|
512
552
|
}
|
513
553
|
|
514
|
-
/**
|
554
|
+
/** call-seq: set_rprop_delta_min(rprop_delta_min)
|
555
|
+
|
556
|
+
The minimum step-size is a small positive number determining how small the minimum step-size may be.*/
|
515
557
|
static VALUE set_rprop_delta_min(VALUE self, VALUE rprop_delta_min)
|
516
558
|
{
|
517
559
|
SET_FANN_FLT(rprop_delta_min, fann_set_rprop_delta_min);
|
518
560
|
}
|
519
561
|
|
520
|
-
/** The maximum step-size is a positive number determining how large the maximum step-size may be
|
562
|
+
/** The maximum step-size is a positive number determining how large the maximum step-size may be.*/
|
521
563
|
static VALUE get_rprop_delta_max(VALUE self)
|
522
564
|
{
|
523
565
|
RETURN_FANN_FLT(fann_get_rprop_delta_max);
|
524
566
|
}
|
525
567
|
|
526
|
-
/**
|
568
|
+
/** call-seq: set_rprop_delta_max(rprop_delta_max)
|
569
|
+
|
570
|
+
The maximum step-size is a positive number determining how large the maximum step-size may be.*/
|
527
571
|
static VALUE set_rprop_delta_max(VALUE self, VALUE rprop_delta_max)
|
528
572
|
{
|
529
573
|
SET_FANN_FLT(rprop_delta_max, fann_set_rprop_delta_max);
|
530
574
|
}
|
531
575
|
|
532
|
-
/** The initial step-size is a positive number determining the initial step size
|
576
|
+
/** The initial step-size is a positive number determining the initial step size.*/
|
533
577
|
static VALUE get_rprop_delta_zero(VALUE self)
|
534
578
|
{
|
535
579
|
RETURN_FANN_FLT(fann_get_rprop_delta_zero);
|
536
580
|
}
|
537
581
|
|
538
|
-
/**
|
582
|
+
/** call-seq: set_rprop_delta_zero(rprop_delta_zero)
|
583
|
+
|
584
|
+
The initial step-size is a positive number determining the initial step size.*/
|
539
585
|
static VALUE set_rprop_delta_zero(VALUE self, VALUE rprop_delta_zero)
|
540
586
|
{
|
541
587
|
SET_FANN_FLT(rprop_delta_zero, fann_set_rprop_delta_zero);
|
542
588
|
}
|
543
589
|
|
544
|
-
/** Return array of bias(es)
|
590
|
+
/** Return array of bias(es)*/
|
545
591
|
static VALUE get_bias_array(VALUE self)
|
546
592
|
{
|
547
593
|
struct fann* f;
|
@@ -566,30 +612,32 @@ static VALUE get_bias_array(VALUE self)
|
|
566
612
|
/** The number of fail bits; means the number of output neurons which differ more
|
567
613
|
than the bit fail limit (see <fann_get_bit_fail_limit>, <fann_set_bit_fail_limit>).
|
568
614
|
The bits are counted in all of the training data, so this number can be higher than
|
569
|
-
the number of training data
|
615
|
+
the number of training data.*/
|
570
616
|
static VALUE get_bit_fail(VALUE self)
|
571
617
|
{
|
572
618
|
RETURN_FANN_INT(fann_get_bit_fail);
|
573
619
|
}
|
574
620
|
|
575
|
-
/** Get the connection rate used when the network was created
|
621
|
+
/** Get the connection rate used when the network was created.*/
|
576
622
|
static VALUE get_connection_rate(VALUE self)
|
577
623
|
{
|
578
624
|
RETURN_FANN_INT(fann_get_connection_rate);
|
579
625
|
}
|
580
626
|
|
581
|
-
/**
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
587
|
-
|
627
|
+
/** call-seq: get_neurons(layer) -> return value
|
628
|
+
|
629
|
+
Return array<hash> where each array element is a hash
|
630
|
+
representing a neuron. It contains the following keys:
|
631
|
+
:activation_function, symbol -- the activation function
|
632
|
+
:activation_steepness=float -- The steepness of the activation function
|
633
|
+
:sum=float -- The sum of the inputs multiplied with the weights
|
634
|
+
:value=float -- The value of the activation fuction applied to the sum
|
635
|
+
:connections=array<int> -- indices of connected neurons(inputs)
|
588
636
|
|
589
|
-
|
637
|
+
This could be done more elegantly (e.g., defining more ruby ext classes).
|
590
638
|
This method does not directly correlate to anything in FANN, and accesses
|
591
639
|
structs that are not guaranteed to not change.
|
592
|
-
*/
|
640
|
+
*/
|
593
641
|
static VALUE get_neurons(VALUE self, VALUE layer)
|
594
642
|
{
|
595
643
|
struct fann_layer *layer_it;
|
@@ -635,7 +683,7 @@ static VALUE get_neurons(VALUE self, VALUE layer)
|
|
635
683
|
return neuron_array;
|
636
684
|
}
|
637
685
|
|
638
|
-
/** Get list of layers in array format where each element contains number of neurons in that layer
|
686
|
+
/** Get list of layers in array format where each element contains number of neurons in that layer*/
|
639
687
|
static VALUE get_layer_array(VALUE self)
|
640
688
|
{
|
641
689
|
struct fann* f;
|
@@ -657,14 +705,14 @@ static VALUE get_layer_array(VALUE self)
|
|
657
705
|
return arr;
|
658
706
|
}
|
659
707
|
|
660
|
-
/** Reads the mean square error from the network
|
708
|
+
/** Reads the mean square error from the network.*/
|
661
709
|
static VALUE get_MSE(VALUE self)
|
662
710
|
{
|
663
711
|
RETURN_FANN_DBL(fann_get_MSE);
|
664
712
|
}
|
665
713
|
|
666
714
|
/** Resets the mean square error from the network.
|
667
|
-
This function also resets the number of bits that fail
|
715
|
+
This function also resets the number of bits that fail.*/
|
668
716
|
static VALUE reset_MSE(VALUE self)
|
669
717
|
{
|
670
718
|
struct fann* f;
|
@@ -672,7 +720,7 @@ static VALUE reset_MSE(VALUE self)
|
|
672
720
|
fann_reset_MSE(f);
|
673
721
|
}
|
674
722
|
|
675
|
-
/** Get the type of network. Returns as ruby symbol (one of :
|
723
|
+
/** Get the type of network. Returns as ruby symbol (one of :shortcut, :layer)*/
|
676
724
|
static VALUE get_network_type(VALUE self)
|
677
725
|
{
|
678
726
|
struct fann* f;
|
@@ -684,46 +732,48 @@ static VALUE get_network_type(VALUE self)
|
|
684
732
|
|
685
733
|
if(net_type==FANN_NETTYPE_LAYER)
|
686
734
|
{
|
687
|
-
ret_val = ID2SYM(rb_intern("
|
735
|
+
ret_val = ID2SYM(rb_intern("layer")); // (rb_str_new2("FANN_NETTYPE_LAYER"));
|
688
736
|
}
|
689
737
|
else if(net_type==FANN_NETTYPE_SHORTCUT)
|
690
738
|
{
|
691
|
-
ret_val = ID2SYM(rb_intern("
|
739
|
+
ret_val = ID2SYM(rb_intern("shortcut")); // (rb_str_new2("FANN_NETTYPE_SHORTCUT"));
|
692
740
|
}
|
693
741
|
return ret_val;
|
694
742
|
}
|
695
743
|
|
696
|
-
/** Get the number of input neurons
|
744
|
+
/** Get the number of input neurons.*/
|
697
745
|
static VALUE get_num_input(VALUE self)
|
698
746
|
{
|
699
747
|
RETURN_FANN_INT(fann_get_num_input);
|
700
748
|
}
|
701
749
|
|
702
|
-
/** Get the number of layers in the network
|
750
|
+
/** Get the number of layers in the network.*/
|
703
751
|
static VALUE get_num_layers(VALUE self)
|
704
752
|
{
|
705
753
|
RETURN_FANN_INT(fann_get_num_layers);
|
706
754
|
}
|
707
755
|
|
708
|
-
/** Get the number of output neurons
|
756
|
+
/** Get the number of output neurons.*/
|
709
757
|
static VALUE get_num_output(VALUE self)
|
710
758
|
{
|
711
759
|
RETURN_FANN_INT(fann_get_num_output);
|
712
760
|
}
|
713
761
|
|
714
|
-
/** Get the total number of connections in the entire network
|
762
|
+
/** Get the total number of connections in the entire network.*/
|
715
763
|
static VALUE get_total_connections(VALUE self)
|
716
764
|
{
|
717
765
|
RETURN_FANN_INT(fann_get_total_connections);
|
718
766
|
}
|
719
767
|
|
720
|
-
/** Get the total number of neurons in the entire network
|
768
|
+
/** Get the total number of neurons in the entire network.*/
|
721
769
|
static VALUE get_total_neurons(VALUE self)
|
722
770
|
{
|
723
771
|
RETURN_FANN_INT(fann_get_total_neurons);
|
724
772
|
}
|
725
773
|
|
726
|
-
/**
|
774
|
+
/** call-seq: set_train_error_function(train_error_function)
|
775
|
+
|
776
|
+
Sets the error function used during training. One of the following symbols:
|
727
777
|
:linear, :tanh */
|
728
778
|
static VALUE set_train_error_function(VALUE self, VALUE train_error_function)
|
729
779
|
{
|
@@ -746,7 +796,7 @@ static VALUE set_train_error_function(VALUE self, VALUE train_error_function)
|
|
746
796
|
}
|
747
797
|
|
748
798
|
/** Returns the error function used during training. One of the following symbols:
|
749
|
-
:linear, :tanh
|
799
|
+
:linear, :tanh*/
|
750
800
|
static VALUE get_train_error_function(VALUE self)
|
751
801
|
{
|
752
802
|
struct fann* f;
|
@@ -767,7 +817,9 @@ static VALUE get_train_error_function(VALUE self)
|
|
767
817
|
return ret_val;
|
768
818
|
}
|
769
819
|
|
770
|
-
/**
|
820
|
+
/** call-seq: set_training_algorithm(train_error_function)
|
821
|
+
|
822
|
+
Set the training algorithm. One of the following symbols:
|
771
823
|
:incremental, :batch, :rprop, :quickprop */
|
772
824
|
static VALUE set_training_algorithm(VALUE self, VALUE train_error_function)
|
773
825
|
{
|
@@ -816,7 +868,9 @@ static VALUE get_training_algorithm(VALUE self)
|
|
816
868
|
return ret_val;
|
817
869
|
}
|
818
870
|
|
819
|
-
/**
|
871
|
+
/** call-seq: set_train_stop_function(train_stop_function) -> return value
|
872
|
+
|
873
|
+
Set the training stop function. One of the following symbols:
|
820
874
|
:mse, :bit */
|
821
875
|
static VALUE set_train_stop_function(VALUE self, VALUE train_stop_function)
|
822
876
|
{
|
@@ -878,7 +932,9 @@ static VALUE print_parameters(VALUE self)
|
|
878
932
|
return Qnil;
|
879
933
|
}
|
880
934
|
|
881
|
-
/**
|
935
|
+
/** call-seq: randomize_weights(min_weight, max_weight)
|
936
|
+
|
937
|
+
Give each connection a random weight between *min_weight* and *max_weight* */
|
882
938
|
static VALUE randomize_weights(VALUE self, VALUE min_weight, VALUE max_weight)
|
883
939
|
{
|
884
940
|
Check_Type(min_weight, T_FLOAT);
|
@@ -888,7 +944,10 @@ static VALUE randomize_weights(VALUE self, VALUE min_weight, VALUE max_weight)
|
|
888
944
|
fann_randomize_weights(f, NUM2DBL(min_weight), NUM2DBL(max_weight));
|
889
945
|
}
|
890
946
|
|
891
|
-
/**
|
947
|
+
/** call-seq: run(inputs) -> return value
|
948
|
+
|
949
|
+
Run neural net on array<Float> of inputs with current parameters.
|
950
|
+
Returns array<Float> as output */
|
892
951
|
static VALUE run (VALUE self, VALUE inputs)
|
893
952
|
{
|
894
953
|
Check_Type(inputs, T_ARRAY);
|
@@ -922,7 +981,9 @@ static VALUE run (VALUE self, VALUE inputs)
|
|
922
981
|
return arr;
|
923
982
|
}
|
924
983
|
|
925
|
-
/**
|
984
|
+
/** call-seq: init_weights(train_data) -> return value
|
985
|
+
|
986
|
+
Initialize the weights using Widrow + Nguyen's algorithm. */
|
926
987
|
static VALUE init_weights(VALUE self, VALUE train_data)
|
927
988
|
{
|
928
989
|
|
@@ -938,11 +999,13 @@ static VALUE init_weights(VALUE self, VALUE train_data)
|
|
938
999
|
|
939
1000
|
|
940
1001
|
|
941
|
-
/**
|
942
|
-
|
943
|
-
|
944
|
-
|
945
|
-
|
1002
|
+
/** call-seq: train_on_data(train_data, max_epochs, epochs_between_reports, desired_error)
|
1003
|
+
|
1004
|
+
Train with training data created with RubyFann::TrainData.new
|
1005
|
+
max_epochs - The maximum number of epochs the training should continue
|
1006
|
+
epochs_between_reports - The number of epochs between printing a status report to stdout.
|
1007
|
+
desired_error - The desired <get_MSE> or <get_bit_fail>, depending on which stop function
|
1008
|
+
is chosen by <set_train_stop_function>. */
|
946
1009
|
static VALUE train_on_data(VALUE self, VALUE train_data, VALUE max_epochs, VALUE epochs_between_reports, VALUE desired_error)
|
947
1010
|
{
|
948
1011
|
Check_Type(train_data, T_DATA);
|
@@ -961,7 +1024,9 @@ static VALUE train_on_data(VALUE self, VALUE train_data, VALUE max_epochs, VALUE
|
|
961
1024
|
fann_train_on_data(f, t, fann_max_epochs, fann_epochs_between_reports, fann_desired_error);
|
962
1025
|
}
|
963
1026
|
|
964
|
-
/**
|
1027
|
+
/** call-seq: train_epoch(train_data) -> return value
|
1028
|
+
|
1029
|
+
Train one epoch with a set of training data, created with RubyFann::TrainData.new */
|
965
1030
|
static VALUE train_epoch(VALUE self, VALUE train_data)
|
966
1031
|
{
|
967
1032
|
Check_Type(train_data, T_DATA);
|
@@ -972,7 +1037,9 @@ static VALUE train_epoch(VALUE self, VALUE train_data)
|
|
972
1037
|
return rb_float_new(fann_train_epoch(f, t));
|
973
1038
|
}
|
974
1039
|
|
975
|
-
/**
|
1040
|
+
/** call-seq: test_data(train_data) -> return value
|
1041
|
+
|
1042
|
+
Test a set of training data and calculates the MSE for the training data. */
|
976
1043
|
static VALUE test_data(VALUE self, VALUE train_data)
|
977
1044
|
{
|
978
1045
|
Check_Type(train_data, T_DATA);
|
@@ -1002,11 +1069,13 @@ static VALUE test_data(VALUE self, VALUE train_data)
|
|
1002
1069
|
// return INT2NUM(fann_get_multiplier(f));
|
1003
1070
|
// }
|
1004
1071
|
|
1005
|
-
/**
|
1006
|
-
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1072
|
+
/** call-seq: cascadetrain_on_data(train_data, max_neurons, neurons_between_reports, desired_error)
|
1073
|
+
|
1074
|
+
Perform cascade training with training data created with RubyFann::TrainData.new
|
1075
|
+
max_epochs - The maximum number of neurons in trained network
|
1076
|
+
neurons_between_reports - The number of neurons between printing a status report to stdout.
|
1077
|
+
desired_error - The desired <get_MSE> or <get_bit_fail>, depending on which stop function
|
1078
|
+
is chosen by <set_train_stop_function>. */
|
1010
1079
|
static VALUE cascadetrain_on_data(VALUE self, VALUE train_data, VALUE max_neurons, VALUE neurons_between_reports, VALUE desired_error)
|
1011
1080
|
{
|
1012
1081
|
Check_Type(train_data, T_DATA);
|
@@ -1033,7 +1102,9 @@ static VALUE get_cascade_output_change_fraction(VALUE self)
|
|
1033
1102
|
RETURN_FANN_FLT(fann_get_cascade_output_change_fraction);
|
1034
1103
|
}
|
1035
1104
|
|
1036
|
-
/**
|
1105
|
+
/** call-seq: set_cascade_output_change_fraction(cascade_output_change_fraction)
|
1106
|
+
|
1107
|
+
The cascade output change fraction is a number between 0 and 1 */
|
1037
1108
|
static VALUE set_cascade_output_change_fraction(VALUE self, VALUE cascade_output_change_fraction)
|
1038
1109
|
{
|
1039
1110
|
SET_FANN_FLT(cascade_output_change_fraction, fann_set_cascade_output_change_fraction);
|
@@ -1046,7 +1117,9 @@ static VALUE get_cascade_output_stagnation_epochs(VALUE self)
|
|
1046
1117
|
RETURN_FANN_INT(fann_get_cascade_output_stagnation_epochs);
|
1047
1118
|
}
|
1048
1119
|
|
1049
|
-
/**
|
1120
|
+
/** call-seq: set_cascade_output_stagnation_epochs(cascade_output_stagnation_epochs)
|
1121
|
+
|
1122
|
+
The number of cascade output stagnation epochs determines the number of epochs training is allowed to
|
1050
1123
|
continue without changing the MSE by a fraction of <get_cascade_output_change_fraction>. */
|
1051
1124
|
static VALUE set_cascade_output_stagnation_epochs(VALUE self, VALUE cascade_output_stagnation_epochs)
|
1052
1125
|
{
|
@@ -1059,7 +1132,9 @@ static VALUE get_cascade_candidate_change_fraction(VALUE self)
|
|
1059
1132
|
RETURN_FANN_FLT(fann_get_cascade_candidate_change_fraction);
|
1060
1133
|
}
|
1061
1134
|
|
1062
|
-
/**
|
1135
|
+
/** call-seq: set_cascade_candidate_change_fraction(cascade_candidate_change_fraction)
|
1136
|
+
|
1137
|
+
The cascade candidate change fraction is a number between 0 and 1 */
|
1063
1138
|
static VALUE set_cascade_candidate_change_fraction(VALUE self, VALUE cascade_candidate_change_fraction)
|
1064
1139
|
{
|
1065
1140
|
SET_FANN_FLT(cascade_candidate_change_fraction, fann_set_cascade_candidate_change_fraction);
|
@@ -1072,7 +1147,9 @@ static VALUE get_cascade_candidate_stagnation_epochs(VALUE self)
|
|
1072
1147
|
RETURN_FANN_UINT(fann_get_cascade_candidate_stagnation_epochs);
|
1073
1148
|
}
|
1074
1149
|
|
1075
|
-
/**
|
1150
|
+
/** call-seq: set_cascade_candidate_stagnation_epochs(cascade_candidate_stagnation_epochs)
|
1151
|
+
|
1152
|
+
The number of cascade candidate stagnation epochs determines the number of epochs training is allowed to
|
1076
1153
|
continue without changing the MSE by a fraction of <get_cascade_candidate_change_fraction>. */
|
1077
1154
|
static VALUE set_cascade_candidate_stagnation_epochs(VALUE self, VALUE cascade_candidate_stagnation_epochs)
|
1078
1155
|
{
|
@@ -1087,7 +1164,9 @@ static VALUE get_cascade_weight_multiplier(VALUE self)
|
|
1087
1164
|
RETURN_FANN_DBL(fann_get_cascade_weight_multiplier);
|
1088
1165
|
}
|
1089
1166
|
|
1090
|
-
/**
|
1167
|
+
/** call-seq: set_cascade_weight_multiplier(cascade_weight_multiplier)
|
1168
|
+
|
1169
|
+
The weight multiplier is a parameter which is used to multiply the weights from the candidate neuron
|
1091
1170
|
before adding the neuron to the neural network. This parameter is usually between 0 and 1, and is used
|
1092
1171
|
to make the training a bit less aggressive. */
|
1093
1172
|
static VALUE set_cascade_weight_multiplier(VALUE self, VALUE cascade_weight_multiplier)
|
@@ -1102,7 +1181,9 @@ static VALUE get_cascade_candidate_limit(VALUE self)
|
|
1102
1181
|
RETURN_FANN_DBL(fann_get_cascade_candidate_limit);
|
1103
1182
|
}
|
1104
1183
|
|
1105
|
-
/**
|
1184
|
+
/** call-seq: set_cascade_candidate_limit(cascade_candidate_limit)
|
1185
|
+
|
1186
|
+
The candidate limit is a limit for how much the candidate neuron may be trained.
|
1106
1187
|
The limit is a limit on the proportion between the MSE and candidate score. */
|
1107
1188
|
static VALUE set_cascade_candidate_limit(VALUE self, VALUE cascade_candidate_limit)
|
1108
1189
|
{
|
@@ -1116,7 +1197,9 @@ static VALUE get_cascade_max_out_epochs(VALUE self)
|
|
1116
1197
|
RETURN_FANN_UINT(fann_get_cascade_max_out_epochs);
|
1117
1198
|
}
|
1118
1199
|
|
1119
|
-
/**
|
1200
|
+
/** call-seq: set_cascade_max_out_epochs(cascade_max_out_epochs)
|
1201
|
+
|
1202
|
+
The maximum out epochs determines the maximum number of epochs the output connections
|
1120
1203
|
may be trained after adding a new candidate neuron. */
|
1121
1204
|
static VALUE set_cascade_max_out_epochs(VALUE self, VALUE cascade_max_out_epochs)
|
1122
1205
|
{
|
@@ -1130,7 +1213,9 @@ static VALUE get_cascade_max_cand_epochs(VALUE self)
|
|
1130
1213
|
RETURN_FANN_UINT(fann_get_cascade_max_cand_epochs);
|
1131
1214
|
}
|
1132
1215
|
|
1133
|
-
/**
|
1216
|
+
/** call-seq: set_cascade_max_cand_epochs(cascade_max_cand_epochs)
|
1217
|
+
|
1218
|
+
The maximum candidate epochs determines the maximum number of epochs the input
|
1134
1219
|
connections to the candidates may be trained before adding a new candidate neuron. */
|
1135
1220
|
static VALUE set_cascade_max_cand_epochs(VALUE self, VALUE cascade_max_cand_epochs)
|
1136
1221
|
{
|
@@ -1159,7 +1244,9 @@ static VALUE get_learning_rate(VALUE self)
|
|
1159
1244
|
RETURN_FANN_FLT(fann_get_learning_rate);
|
1160
1245
|
}
|
1161
1246
|
|
1162
|
-
/**
|
1247
|
+
/** call-seq: set_learning_rate(learning_rate) -> return value
|
1248
|
+
|
1249
|
+
The learning rate is used to determine how aggressive training should be for some of the
|
1163
1250
|
training algorithms (:incremental, :batch, :quickprop).
|
1164
1251
|
Do however note that it is not used in :rprop.
|
1165
1252
|
The default learning rate is 0.7. */
|
@@ -1174,13 +1261,17 @@ static VALUE get_learning_momentum(VALUE self)
|
|
1174
1261
|
RETURN_FANN_FLT(fann_get_learning_momentum);
|
1175
1262
|
}
|
1176
1263
|
|
1177
|
-
/**
|
1264
|
+
/** call-seq: set_learning_momentum(learning_momentum) -> return value
|
1265
|
+
|
1266
|
+
Set the learning momentum. */
|
1178
1267
|
static VALUE set_learning_momentum(VALUE self, VALUE learning_momentum)
|
1179
1268
|
{
|
1180
1269
|
SET_FANN_FLT(learning_momentum, fann_set_learning_momentum);
|
1181
1270
|
}
|
1182
1271
|
|
1183
|
-
/**
|
1272
|
+
/** call-seq: set_cascade_activation_functions(cascade_activation_functions)
|
1273
|
+
|
1274
|
+
The cascade activation functions is an array of the different activation functions used by
|
1184
1275
|
the candidates. The default is [:sigmoid, :sigmoid_symmetric, :gaussian, :gaussian_symmetric, :elliot, :elliot_symmetric] */
|
1185
1276
|
static VALUE set_cascade_activation_functions(VALUE self, VALUE cascade_activation_functions)
|
1186
1277
|
{
|
@@ -1233,7 +1324,9 @@ static VALUE get_cascade_num_candidate_groups(VALUE self)
|
|
1233
1324
|
RETURN_FANN_UINT(fann_get_cascade_num_candidate_groups);
|
1234
1325
|
}
|
1235
1326
|
|
1236
|
-
/**
|
1327
|
+
/** call-seq: set_cascade_num_candidate_groups(cascade_num_candidate_groups)
|
1328
|
+
|
1329
|
+
The number of candidate groups is the number of groups of identical candidates which will be used
|
1237
1330
|
during training. */
|
1238
1331
|
static VALUE set_cascade_num_candidate_groups(VALUE self, VALUE cascade_num_candidate_groups)
|
1239
1332
|
{
|
@@ -1280,7 +1373,9 @@ static VALUE get_cascade_activation_steepnesses(VALUE self)
|
|
1280
1373
|
return arr;
|
1281
1374
|
}
|
1282
1375
|
|
1283
|
-
/**
|
1376
|
+
/** call-seq: save(filename) -> return status
|
1377
|
+
|
1378
|
+
Save the entire network to configuration file with given name */
|
1284
1379
|
static VALUE nn_save(VALUE self, VALUE filename)
|
1285
1380
|
{
|
1286
1381
|
struct fann* f;
|
@@ -1289,7 +1384,7 @@ static VALUE nn_save(VALUE self, VALUE filename)
|
|
1289
1384
|
return INT2NUM(status);
|
1290
1385
|
}
|
1291
1386
|
|
1292
|
-
/** Initializes
|
1387
|
+
/** Initializes class under RubyFann module/namespace. */
|
1293
1388
|
void Init_neural_network ()
|
1294
1389
|
{
|
1295
1390
|
// RubyFann module/namespace:
|
data/lib/ruby_fann/version.rb
CHANGED
data/neurotica1.png
CHANGED
Binary file
|
data/neurotica2.vrml
CHANGED
@@ -4,9 +4,9 @@ Group { children [
|
|
4
4
|
scale 0.028 0.028 0.028
|
5
5
|
children [
|
6
6
|
Background { skyColor 1.000 1.000 1.000 }
|
7
|
-
# node
|
7
|
+
# node 8453280
|
8
8
|
Transform {
|
9
|
-
translation 6.000 46.000
|
9
|
+
translation 6.000 46.000 97.000
|
10
10
|
scale 2.000 2.000 2.000
|
11
11
|
children [
|
12
12
|
Transform {
|
@@ -24,9 +24,9 @@ Transform {
|
|
24
24
|
}
|
25
25
|
]
|
26
26
|
}
|
27
|
-
# node
|
27
|
+
# node 8452100
|
28
28
|
Transform {
|
29
|
-
translation 50.000 6.000
|
29
|
+
translation 50.000 6.000 94.000
|
30
30
|
scale 2.000 2.000 2.000
|
31
31
|
children [
|
32
32
|
Transform {
|
@@ -44,7 +44,7 @@ Transform {
|
|
44
44
|
}
|
45
45
|
]
|
46
46
|
}
|
47
|
-
# edge
|
47
|
+
# edge 8453280 -> 8452100
|
48
48
|
Group { children [
|
49
49
|
Transform {
|
50
50
|
children [
|
@@ -79,9 +79,9 @@ Transform {
|
|
79
79
|
translation 24.000 17.000 0.000
|
80
80
|
}
|
81
81
|
] }
|
82
|
-
# node
|
82
|
+
# node 8452650
|
83
83
|
Transform {
|
84
|
-
translation 28.000 46.000
|
84
|
+
translation 28.000 46.000 47.000
|
85
85
|
scale 2.000 2.000 2.000
|
86
86
|
children [
|
87
87
|
Transform {
|
@@ -99,7 +99,7 @@ Transform {
|
|
99
99
|
}
|
100
100
|
]
|
101
101
|
}
|
102
|
-
# edge
|
102
|
+
# edge 8452650 -> 8452100
|
103
103
|
Group { children [
|
104
104
|
Transform {
|
105
105
|
children [
|
@@ -134,9 +134,9 @@ Transform {
|
|
134
134
|
translation 35.000 17.000 0.000
|
135
135
|
}
|
136
136
|
] }
|
137
|
-
# node
|
137
|
+
# node 8452530
|
138
138
|
Transform {
|
139
|
-
translation 50.000 46.000
|
139
|
+
translation 50.000 46.000 3.000
|
140
140
|
scale 2.000 2.000 2.000
|
141
141
|
children [
|
142
142
|
Transform {
|
@@ -154,7 +154,7 @@ Transform {
|
|
154
154
|
}
|
155
155
|
]
|
156
156
|
}
|
157
|
-
# edge
|
157
|
+
# edge 8452530 -> 8452100
|
158
158
|
Group { children [
|
159
159
|
Transform {
|
160
160
|
children [
|
@@ -189,9 +189,9 @@ Transform {
|
|
189
189
|
translation 46.000 17.000 0.000
|
190
190
|
}
|
191
191
|
] }
|
192
|
-
# node
|
192
|
+
# node 8452390
|
193
193
|
Transform {
|
194
|
-
translation 72.000 46.000
|
194
|
+
translation 72.000 46.000 25.000
|
195
195
|
scale 2.000 2.000 2.000
|
196
196
|
children [
|
197
197
|
Transform {
|
@@ -209,7 +209,7 @@ Transform {
|
|
209
209
|
}
|
210
210
|
]
|
211
211
|
}
|
212
|
-
# edge
|
212
|
+
# edge 8452390 -> 8452100
|
213
213
|
Group { children [
|
214
214
|
Transform {
|
215
215
|
children [
|
@@ -244,9 +244,9 @@ Transform {
|
|
244
244
|
translation 57.000 17.000 0.000
|
245
245
|
}
|
246
246
|
] }
|
247
|
-
# node
|
247
|
+
# node 8452310
|
248
248
|
Transform {
|
249
|
-
translation 94.000 46.000
|
249
|
+
translation 94.000 46.000 91.000
|
250
250
|
scale 2.000 2.000 2.000
|
251
251
|
children [
|
252
252
|
Transform {
|
@@ -264,7 +264,7 @@ Transform {
|
|
264
264
|
}
|
265
265
|
]
|
266
266
|
}
|
267
|
-
# edge
|
267
|
+
# edge 8452310 -> 8452100
|
268
268
|
Group { children [
|
269
269
|
Transform {
|
270
270
|
children [
|
@@ -300,5 +300,5 @@ Transform {
|
|
300
300
|
}
|
301
301
|
] }
|
302
302
|
] }
|
303
|
-
Viewpoint {position 1.852 0.963 6.
|
303
|
+
Viewpoint {position 1.852 0.963 6.072}
|
304
304
|
] }
|
data/test/test_ruby_fann.rb
CHANGED
@@ -28,7 +28,7 @@ class RubyFannTest < Test::Unit::TestCase
|
|
28
28
|
assert_equal(2, fann2.get_num_layers)
|
29
29
|
assert_equal(1, fann2.get_num_input)
|
30
30
|
assert_equal(2, fann2.get_num_output)
|
31
|
-
assert_equal(:
|
31
|
+
assert_equal(:shortcut, fann.get_network_type)
|
32
32
|
|
33
33
|
sc = MyShortcut.new
|
34
34
|
end
|
@@ -103,7 +103,7 @@ class RubyFannTest < Test::Unit::TestCase
|
|
103
103
|
|
104
104
|
def test_get_network_type
|
105
105
|
fann = RubyFann::Standard.new(:num_inputs=>5, :hidden_neurons=>[2, 8, 4, 3, 4], :num_outputs=>1)
|
106
|
-
assert_equal(:
|
106
|
+
assert_equal(:layer, fann.get_network_type)
|
107
107
|
puts "fann.get_network_type: #{fann.get_network_type}"
|
108
108
|
end
|
109
109
|
|
data/website/index.html
CHANGED
@@ -33,7 +33,7 @@
|
|
33
33
|
<h1>ruby-fann</h1>
|
34
34
|
<div id="version" class="clickable" onclick='document.location = "http://rubyforge.org/projects/ruby-fann"; return false'>
|
35
35
|
<p>Get Version</p>
|
36
|
-
<a href="http://rubyforge.org/projects/ruby-fann" class="numbers">0.7.
|
36
|
+
<a href="http://rubyforge.org/projects/ruby-fann" class="numbers">0.7.4</a>
|
37
37
|
</div>
|
38
38
|
<p><em>Bindings to use <a href="http://leenissen.dk/fann/"><span class="caps">FANN</span></a> (Fast Artificial Neural Network) from within ruby/rails environment.</em></p>
|
39
39
|
|
@@ -58,29 +58,42 @@
|
|
58
58
|
|
59
59
|
|
60
60
|
<h2>Requirements:</h2>
|
61
|
-
|
62
|
-
|
63
61
|
<ul>
|
64
|
-
<li><span class="caps">FANN 2</span>.1 or greater (preferably in /usr/local/lib)
|
65
|
-
|
66
|
-
<li>
|
62
|
+
<li><span class="caps">FANN 2</span>.1 or greater (preferably in /usr/local/lib)
|
63
|
+
* Ruby 1.8.6 or greater. Windows Ruby should be built natively in Cygwin/MingW. I haven’t had much luck with any native extensions and the one click installer.</li>
|
64
|
+
<li>graphviz and ruby-graphviz is required for <a href="http://ruby-fann.rubyforge.org/rdoc/classes/RubyFann/Neurotica.html">Neurotica</a> (<em>experimental</em>) graphical output.</li>
|
65
|
+
<li>gnu make tools or equiv for native code in ext (tested on linux, mac os x, and windows with <a href="http://www.cygwin.com/">Cygwin</a>)</li>
|
67
66
|
</ul>
|
68
67
|
|
69
68
|
|
69
|
+
<h2>Unit Tests</h2>
|
70
|
+
|
71
|
+
|
72
|
+
<p>ruby-fann Gem has full complement of unit tests that are executed before the gem is deployed/redeployed to RubyForge.</p>
|
73
|
+
|
74
|
+
|
70
75
|
<h2>Demonstration of usage</h2>
|
71
76
|
|
72
77
|
|
73
78
|
<p><pre class='syntax'>
|
74
79
|
<span class="ident">require</span> <span class="punct">'</span><span class="string">rubygems</span><span class="punct">'</span>
|
75
80
|
<span class="ident">require</span> <span class="punct">'</span><span class="string">ruby_fann/neural_network</span><span class="punct">'</span>
|
81
|
+
|
82
|
+
<span class="comment"># Create Training data with 2 each of inputs(array of 3) & desired outputs(array of 1).</span>
|
76
83
|
<span class="ident">training_data</span> <span class="punct">=</span> <span class="constant">RubyFann</span><span class="punct">::</span><span class="constant">TrainData</span><span class="punct">.</span><span class="ident">new</span><span class="punct">(</span>
|
77
84
|
<span class="symbol">:inputs=</span><span class="punct">>[[</span><span class="number">0.3</span><span class="punct">,</span> <span class="number">0.4</span><span class="punct">,</span> <span class="number">0.5</span><span class="punct">],</span> <span class="punct">[</span><span class="number">0.1</span><span class="punct">,</span> <span class="number">0.2</span><span class="punct">,</span> <span class="number">0.3</span><span class="punct">]],</span>
|
78
85
|
<span class="symbol">:desired_outputs=</span><span class="punct">>[[</span><span class="number">0.7</span><span class="punct">],</span> <span class="punct">[</span><span class="number">0.8</span><span class="punct">]])</span>
|
86
|
+
|
87
|
+
<span class="comment"># Create FANN Neural Network to match appropriate training data:</span>
|
79
88
|
<span class="ident">fann</span> <span class="punct">=</span> <span class="constant">RubyFann</span><span class="punct">::</span><span class="constant">Standard</span><span class="punct">.</span><span class="ident">new</span><span class="punct">(</span>
|
80
89
|
<span class="symbol">:num_inputs=</span><span class="punct">></span><span class="number">3</span><span class="punct">,</span>
|
81
90
|
<span class="symbol">:hidden_neurons=</span><span class="punct">>[</span><span class="number">2</span><span class="punct">,</span> <span class="number">8</span><span class="punct">,</span> <span class="number">4</span><span class="punct">,</span> <span class="number">3</span><span class="punct">,</span> <span class="number">4</span><span class="punct">],</span>
|
82
91
|
<span class="symbol">:num_outputs=</span><span class="punct">></span><span class="number">1</span><span class="punct">)</span>
|
92
|
+
|
93
|
+
<span class="comment"># Training using data created above:</span>
|
83
94
|
<span class="ident">fann</span><span class="punct">.</span><span class="ident">train_on_data</span><span class="punct">(</span><span class="ident">training_data</span><span class="punct">,</span> <span class="number">1000</span><span class="punct">,</span> <span class="number">1</span><span class="punct">,</span> <span class="number">0.1</span><span class="punct">)</span>
|
95
|
+
|
96
|
+
<span class="comment"># Run with different input data:</span>
|
84
97
|
<span class="ident">outputs</span> <span class="punct">=</span> <span class="ident">fann</span><span class="punct">.</span><span class="ident">run</span><span class="punct">([</span><span class="number">0.7</span><span class="punct">,</span> <span class="number">0.9</span><span class="punct">,</span> <span class="number">0.2</span><span class="punct">])</span>
|
85
98
|
</pre></p>
|
86
99
|
|
@@ -111,7 +124,7 @@
|
|
111
124
|
|
112
125
|
<p>Comments are welcome. Send an email to <a href="mailto:steven@7bpeople.com">Steven Miers</a> email via the <a href="http://groups.google.com/group/ruby_fann">forum</a></p>
|
113
126
|
<p class="coda">
|
114
|
-
<a href="steven@7bpeople.com">Steven Miers</a>,
|
127
|
+
<a href="steven@7bpeople.com">Steven Miers</a>, 21st December 2007<br>
|
115
128
|
</p>
|
116
129
|
</div>
|
117
130
|
|
data/website/index.txt
CHANGED
@@ -17,23 +17,35 @@ h2. Installing
|
|
17
17
|
h2. Requirements:
|
18
18
|
|
19
19
|
* FANN 2.1 or greater (preferably in /usr/local/lib)
|
20
|
-
* Ruby 1.8.6 or greater.
|
21
|
-
|
20
|
+
* Ruby 1.8.6 or greater. Windows Ruby should be built natively in Cygwin/MingW. I haven't had much luck with any native extensions and the one click installer.
|
21
|
+
* graphviz and ruby-graphviz is required for "Neurotica":http://ruby-fann.rubyforge.org/rdoc/classes/RubyFann/Neurotica.html (_experimental_) graphical output.
|
22
|
+
* gnu make tools or equiv for native code in ext (tested on linux, mac os x, and windows with "Cygwin":http://www.cygwin.com/)
|
23
|
+
|
24
|
+
h2. Unit Tests
|
22
25
|
|
26
|
+
ruby-fann Gem has full complement of unit tests that are executed before the gem is deployed/redeployed to RubyForge.
|
23
27
|
|
24
28
|
h2. Demonstration of usage
|
25
29
|
|
26
30
|
<pre syntax="ruby">
|
27
31
|
require 'rubygems'
|
28
32
|
require 'ruby_fann/neural_network'
|
33
|
+
|
34
|
+
# Create Training data with 2 each of inputs(array of 3) & desired outputs(array of 1).
|
29
35
|
training_data = RubyFann::TrainData.new(
|
30
36
|
:inputs=>[[0.3, 0.4, 0.5], [0.1, 0.2, 0.3]],
|
31
37
|
:desired_outputs=>[[0.7], [0.8]])
|
38
|
+
|
39
|
+
# Create FANN Neural Network to match appropriate training data:
|
32
40
|
fann = RubyFann::Standard.new(
|
33
41
|
:num_inputs=>3,
|
34
42
|
:hidden_neurons=>[2, 8, 4, 3, 4],
|
35
43
|
:num_outputs=>1)
|
44
|
+
|
45
|
+
# Training using data created above:
|
36
46
|
fann.train_on_data(training_data, 1000, 1, 0.1)
|
47
|
+
|
48
|
+
# Run with different input data:
|
37
49
|
outputs = fann.run([0.7, 0.9, 0.2])
|
38
50
|
</pre>
|
39
51
|
|
data/xor_cascade.net
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
FANN_FLO_2.1
|
2
|
-
num_layers=
|
2
|
+
num_layers=4
|
3
3
|
learning_rate=0.700000
|
4
4
|
connection_rate=1.000000
|
5
5
|
network_type=1
|
@@ -28,7 +28,7 @@ cascade_activation_functions_count=10
|
|
28
28
|
cascade_activation_functions=3 5 7 8 10 11 14 15 16 17
|
29
29
|
cascade_activation_steepnesses_count=4
|
30
30
|
cascade_activation_steepnesses=2.50000000000000000000e-01 5.00000000000000000000e-01 7.50000000000000000000e-01 1.00000000000000000000e+00
|
31
|
-
layer_sizes=3 1 1
|
31
|
+
layer_sizes=3 1 1 1
|
32
32
|
scale_included=0
|
33
|
-
neurons (num_inputs, activation_function, activation_steepness)=(0, 0, 0.00000000000000000000e+00) (0, 0, 0.00000000000000000000e+00) (0, 0, 0.00000000000000000000e+00) (3,
|
34
|
-
connections (connected_to_neuron, weight)=(0,
|
33
|
+
neurons (num_inputs, activation_function, activation_steepness)=(0, 0, 0.00000000000000000000e+00) (0, 0, 0.00000000000000000000e+00) (0, 0, 0.00000000000000000000e+00) (3, 8, 7.50000000000000000000e-01) (4, 3, 5.00000000000000000000e-01) (5, 5, 5.00000000000000000000e-01)
|
34
|
+
connections (connected_to_neuron, weight)=(0, 1.52969231315914755420e+00) (1, 1.50824598276217747284e+00) (2, 2.41220387265133409671e-02) (0, 3.28340708551594306908e-01) (1, 1.04425133310434167799e-01) (2, -6.59535845645816038996e+01) (3, 1.39189639221561134441e-01) (0, 2.68759220062898429582e-01) (1, 2.33959475563315866253e-01) (2, 4.39841832051311443230e-02) (3, 3.95917491761458038013e+01) (4, 2.40432266113240394878e-01)
|
data/xor_float.net
CHANGED
@@ -31,4 +31,4 @@ cascade_activation_steepnesses=2.50000000000000000000e-01 5.00000000000000000000
|
|
31
31
|
layer_sizes=3 4 2
|
32
32
|
scale_included=0
|
33
33
|
neurons (num_inputs, activation_function, activation_steepness)=(0, 0, 0.00000000000000000000e+00) (0, 0, 0.00000000000000000000e+00) (0, 0, 0.00000000000000000000e+00) (3, 5, 1.00000000000000000000e+00) (3, 5, 1.00000000000000000000e+00) (3, 5, 1.00000000000000000000e+00) (0, 5, 1.00000000000000000000e+00) (4, 5, 1.00000000000000000000e+00) (0, 5, 1.00000000000000000000e+00)
|
34
|
-
connections (connected_to_neuron, weight)=(0,
|
34
|
+
connections (connected_to_neuron, weight)=(0, 1.81901505141687613865e+00) (1, -1.01895817621591100455e+00) (2, 8.42863331351461964047e-01) (0, 1.57494091898020349696e+00) (1, 1.87383001014118844019e+00) (2, 2.72897195277334425612e+00) (0, 2.21361297535147816262e+00) (1, -2.18159107932092855009e+00) (2, -1.76727410199432211080e+00) (3, -4.44379486136706614019e+00) (4, 3.59005526479922210115e+00) (5, 4.23621265514612499459e+00) (6, 4.67484948162714408770e-01)
|
metadata
CHANGED
@@ -3,8 +3,8 @@ rubygems_version: 0.9.2
|
|
3
3
|
specification_version: 1
|
4
4
|
name: ruby-fann
|
5
5
|
version: !ruby/object:Gem::Version
|
6
|
-
version: 0.7.
|
7
|
-
date: 2007-12-
|
6
|
+
version: 0.7.5
|
7
|
+
date: 2007-12-21 00:00:00 -06:00
|
8
8
|
summary: Bindings to use FANN from within ruby/rails environment.
|
9
9
|
require_paths:
|
10
10
|
- lib
|