ruby-fann 1.2.5 → 1.2.6

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,15 +1,15 @@
1
1
  ---
2
2
  !binary "U0hBMQ==":
3
3
  metadata.gz: !binary |-
4
- NjMyNmI4NDY5MjgwMDgzYjkxMDFjOTNkMGE5ZmI4ZWI2Mjc0MjQ0Zg==
4
+ Nzk2ZGQ2ODkzY2ZiN2ZmOGUwNDc1NGE0YzQyOTkyYjZlNTMwY2YzNQ==
5
5
  data.tar.gz: !binary |-
6
- MGU3NjUxNmEyZTFhOTRkMzI2OWU0Zjk5YzQzYjA1MTMxMjlkZGEwYQ==
6
+ YmZmMWYyZjVlMTY2OGJlNzM1OWIxZGE0OWQ0YWI2ZjRhNTA2M2U2Mg==
7
7
  !binary "U0hBNTEy":
8
8
  metadata.gz: !binary |-
9
- MGU5YmI0MWI3NDI2NDE4NGU1Y2FhYjcwMTgzOGJkZDM3MDUyNmE3NTA0OGEz
10
- MmU4NzU3Y2UyMDIwNmNhMjE5MDNiYTMwNjRmY2I4NGQwNzhlZWUzMjM5YWMx
11
- YWYzOWZlY2Q3NmNiN2I5OGVkNTgzOWRiODY1ZWUzMmU1MTk0YWM=
9
+ NGMzNjgwNDc3NjI0NTNlYjliNGI2MTc4NjE4OWViMjRkYzhjNGI3ZGRmNzM5
10
+ NjBmMWZkMzcwZWQ4NjcwYTM4ZjI1NjQ2MmM1OGY5Y2IyNWQxYjNjMTRmN2E1
11
+ YWE5MzJjYTVjNzRmMGEzNWM3NTQ3OTUwMDQ0OGFmM2I0Y2FiNWU=
12
12
  data.tar.gz: !binary |-
13
- ZjQyOTI0NjU0NDQ5YWE0MmQ3ZDE0ZTMwMDgyMmM0YTkyZWQ5MTFkODVmM2Ji
14
- YmE3YTU1YWEyZmI1ZjkyNDNmZDE0N2MyNzVjOTc2NGFjZmViM2EyZTI2N2Iy
15
- ZDZlYjg0ZjI4NGRiNDUzMTJhZjFkNTllOGM5OWE2ZGQ5ZTUyMzQ=
13
+ ZTIyYWY3MDUyNDdiZDIwZDc3ZjI5YzEyZmRhNDIwNmExYTBhMzhlNzdjZDdi
14
+ N2RlZDE1YjhjMWQ4YjBkZTU3ZDkxZjRhNTYyNDFhMzUzOWVkMmRmZWRkMjkz
15
+ NjY1YmVkOWIwMzczYjU0Mzc1MzlkOWZiNzIzZDVhZGY4YjI4MTA=
data/README.md CHANGED
@@ -1,7 +1,12 @@
1
1
  # RubyFann
2
+ *Artifical Intelligence in Ruby*
3
+
4
+ [![Gem Version](https://badge.fury.io/rb/ruby-fann.png)](http://badge.fury.io/rb/ruby-fann)
2
5
 
3
6
  RubyFann, or "ruby-fann" is a ruby gem that binds to FANN (Fast Artificial Neural Network) from within a ruby/rails environment. FANN is a is a free open source neural network library, which implements multilayer artificial neural networks with support for both fully connected and sparsely connected networks. It is easy to use, versatile, well documented, and fast. RubyFann makes working with neural networks a breeze using ruby, with the added benefit that most of the heavy lifting is done natively.
4
7
 
8
+ A talk given by our friend Ethan from Big-Oh Studios at Lone Star Ruby 2013: http://confreaks.com/videos/2609-lonestarruby2013-neural-networks-with-rubyfann
9
+
5
10
  ## Installation
6
11
 
7
12
  Add this line to your application's Gemfile:
@@ -77,7 +82,7 @@ https://github.com/bigohstudios/tictactoe
77
82
  1. Steven Miers
78
83
  2. Ole Krüger
79
84
  3. dignati
80
- 4. Michal Pokorný
85
+ 4. Michal Pokorny
81
86
  5. Scott Li (locksley)
82
87
 
83
88
  ## Contributing
@@ -1,65 +1,8 @@
1
- /* src/include/config.h. Generated by configure. */
2
- /* config.in. Generated from configure.in by autoheader. */
3
-
4
- /* Define to 1 if you have the <dlfcn.h> header file. */
5
- #define HAVE_DLFCN_H 1
6
-
7
- /* Define to 1 if you have the `gettimeofday' function. */
8
- #define HAVE_GETTIMEOFDAY 1
9
-
10
- /* Define to 1 if you have the <inttypes.h> header file. */
11
- #define HAVE_INTTYPES_H 1
12
-
13
- /* Define to 1 if you have the <memory.h> header file. */
14
- #define HAVE_MEMORY_H 1
15
-
16
- /* Define to 1 if you have the <stdint.h> header file. */
17
- #define HAVE_STDINT_H 1
18
-
19
- /* Define to 1 if you have the <stdlib.h> header file. */
20
- #define HAVE_STDLIB_H 1
21
-
22
- /* Define to 1 if you have the <strings.h> header file. */
23
- #define HAVE_STRINGS_H 1
24
-
25
- /* Define to 1 if you have the <string.h> header file. */
26
- #define HAVE_STRING_H 1
27
-
28
- /* Define to 1 if you have the <sys/stat.h> header file. */
29
- #define HAVE_SYS_STAT_H 1
30
-
31
- /* Define to 1 if you have the <sys/types.h> header file. */
32
- #define HAVE_SYS_TYPES_H 1
33
-
34
- /* Define to 1 if you have the <unistd.h> header file. */
35
- #define HAVE_UNISTD_H 1
36
-
37
1
  /* Name of package */
38
- #define PACKAGE "fann"
39
-
40
- /* Define to the address where bug reports for this package should be sent. */
41
- #define PACKAGE_BUGREPORT ""
42
-
43
- /* Define to the full name of this package. */
44
- #define PACKAGE_NAME ""
45
-
46
- /* Define to the full name and version of this package. */
47
- #define PACKAGE_STRING ""
48
-
49
- /* Define to the one symbol short name of this package. */
50
- #define PACKAGE_TARNAME ""
51
-
52
- /* Define to the version of this package. */
53
- #define PACKAGE_VERSION ""
54
-
55
- /* Define to 1 if you have the ANSI C header files. */
56
- #define STDC_HEADERS 1
57
-
58
- /* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
59
- #define TIME_WITH_SYS_TIME 1
2
+ /* #undef PACKAGE */
60
3
 
61
4
  /* Version number of package */
62
- #define VERSION "2.1.0"
5
+ #define VERSION "2.2.0"
63
6
 
64
- /* Define to empty if `const' does not conform to ANSI C. */
65
- /* #undef const */
7
+ /* Define for the x86_64 CPU famyly */
8
+ /* #undef X86_64 */
@@ -1,6 +1,6 @@
1
1
  /*
2
2
  Fast Artificial Neural Network Library (fann)
3
- Copyright (C) 2003 Steffen Nissen (lukesky@diku.dk)
3
+ Copyright (C) 2003-2012 Steffen Nissen (sn@leenissen.dk)
4
4
 
5
5
  This library is free software; you can redistribute it and/or
6
6
  modify it under the terms of the GNU Lesser General Public
@@ -1,6 +1,6 @@
1
1
  /*
2
2
  Fast Artificial Neural Network Library (fann)
3
- Copyright (C) 2003 Steffen Nissen (lukesky@diku.dk)
3
+ Copyright (C) 2003-2012 Steffen Nissen (sn@leenissen.dk)
4
4
 
5
5
  This library is free software; you can redistribute it and/or
6
6
  modify it under the terms of the GNU Lesser General Public
@@ -1,6 +1,6 @@
1
1
  /*
2
2
  Fast Artificial Neural Network Library (fann)
3
- Copyright (C) 2003 Steffen Nissen (lukesky@diku.dk)
3
+ Copyright (C) 2003-2012 Steffen Nissen (sn@leenissen.dk)
4
4
 
5
5
  This library is free software; you can redistribute it and/or
6
6
  modify it under the terms of the GNU Lesser General Public
@@ -27,6 +27,8 @@
27
27
  #include "config.h"
28
28
  #include "fann.h"
29
29
 
30
+ /* #define FANN_NO_SEED */
31
+
30
32
  FANN_EXTERNAL struct fann *FANN_API fann_create_standard(unsigned int num_layers, ...)
31
33
  {
32
34
  struct fann *ann;
@@ -104,7 +106,6 @@ FANN_EXTERNAL struct fann *FANN_API fann_create_sparse_array(float connection_ra
104
106
  unsigned int random_number, found_connection, tmp_con;
105
107
 
106
108
  #ifdef FIXEDFANN
107
- unsigned int decimal_point;
108
109
  unsigned int multiplier;
109
110
  #endif
110
111
  if(connection_rate > 1)
@@ -127,7 +128,6 @@ FANN_EXTERNAL struct fann *FANN_API fann_create_sparse_array(float connection_ra
127
128
 
128
129
  ann->connection_rate = connection_rate;
129
130
  #ifdef FIXEDFANN
130
- decimal_point = ann->decimal_point;
131
131
  multiplier = ann->multiplier;
132
132
  fann_update_stepwise(ann);
133
133
  #endif
@@ -158,7 +158,7 @@ FANN_EXTERNAL struct fann *FANN_API fann_create_sparse_array(float connection_ra
158
158
  printf("creating network with connection rate %f\n", connection_rate);
159
159
  printf("input\n");
160
160
  printf(" layer : %d neurons, 1 bias\n",
161
- ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1);
161
+ (int)(ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1));
162
162
  #endif
163
163
 
164
164
  num_neurons_in = ann->num_input;
@@ -169,11 +169,11 @@ FANN_EXTERNAL struct fann *FANN_API fann_create_sparse_array(float connection_ra
169
169
  * in the previous layer, and one neuron in the next layer.
170
170
  * and the bias node should be connected to the all neurons in the next layer.
171
171
  * Then this is the minimum amount of neurons */
172
- min_connections = fann_max(num_neurons_in, num_neurons_out) + num_neurons_out;
173
- max_connections = num_neurons_in * num_neurons_out; /* not calculating bias */
172
+ min_connections = fann_max(num_neurons_in, num_neurons_out); /* not calculating bias */
173
+ max_connections = num_neurons_in * num_neurons_out; /* not calculating bias */
174
174
  num_connections = fann_max(min_connections,
175
- (unsigned int) (0.5 + (connection_rate * max_connections)) +
176
- num_neurons_out);
175
+ (unsigned int) (0.5 + (connection_rate * max_connections))) +
176
+ num_neurons_out;
177
177
 
178
178
  connections_per_neuron = num_connections / num_neurons_out;
179
179
  allocated_connections = 0;
@@ -401,7 +401,6 @@ FANN_EXTERNAL struct fann *FANN_API fann_create_shortcut_array(unsigned int num_
401
401
  unsigned int num_neurons_in, num_neurons_out;
402
402
 
403
403
  #ifdef FIXEDFANN
404
- unsigned int decimal_point;
405
404
  unsigned int multiplier;
406
405
  #endif
407
406
  /* seed random */
@@ -420,7 +419,6 @@ FANN_EXTERNAL struct fann *FANN_API fann_create_shortcut_array(unsigned int num_
420
419
  ann->connection_rate = 1;
421
420
  ann->network_type = FANN_NETTYPE_SHORTCUT;
422
421
  #ifdef FIXEDFANN
423
- decimal_point = ann->decimal_point;
424
422
  multiplier = ann->multiplier;
425
423
  fann_update_stepwise(ann);
426
424
  #endif
@@ -457,7 +455,7 @@ FANN_EXTERNAL struct fann *FANN_API fann_create_shortcut_array(unsigned int num_
457
455
  printf("creating fully shortcut connected network.\n");
458
456
  printf("input\n");
459
457
  printf(" layer : %d neurons, 1 bias\n",
460
- ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1);
458
+ (int)(ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1));
461
459
  #endif
462
460
 
463
461
  num_neurons_in = ann->num_input;
@@ -550,7 +548,7 @@ FANN_EXTERNAL fann_type *FANN_API fann_run(struct fann * ann, fann_type * input)
550
548
  fann_type last_steepness = 0;
551
549
  unsigned int last_activation_function = 0;
552
550
  #else
553
- fann_type max_sum;
551
+ fann_type max_sum = 0;
554
552
  #endif
555
553
 
556
554
  /* first set the input */
@@ -831,6 +829,241 @@ FANN_EXTERNAL void FANN_API fann_randomize_weights(struct fann *ann, fann_type m
831
829
  #endif
832
830
  }
833
831
 
832
+ /* deep copy of the fann structure */
833
+ FANN_EXTERNAL struct fann* FANN_API fann_copy(struct fann* orig)
834
+ {
835
+ struct fann* copy;
836
+ unsigned int num_layers = orig->last_layer - orig->first_layer;
837
+ struct fann_layer *orig_layer_it, *copy_layer_it;
838
+ unsigned int layer_size;
839
+ struct fann_neuron *last_neuron,*orig_neuron_it,*copy_neuron_it;
840
+ unsigned int i;
841
+ struct fann_neuron *orig_first_neuron,*copy_first_neuron;
842
+ unsigned int input_neuron;
843
+
844
+ copy = fann_allocate_structure(num_layers);
845
+ if (copy==NULL) {
846
+ fann_error((struct fann_error*)orig, FANN_E_CANT_ALLOCATE_MEM);
847
+ return NULL;
848
+ }
849
+ copy->errno_f = orig->errno_f;
850
+ if (orig->errstr)
851
+ {
852
+ copy->errstr = (char *) malloc(FANN_ERRSTR_MAX);
853
+ if (copy->errstr == NULL)
854
+ {
855
+ fann_destroy(copy);
856
+ return NULL;
857
+ }
858
+ strcpy(copy->errstr,orig->errstr);
859
+ }
860
+ copy->error_log = orig->error_log;
861
+
862
+ copy->learning_rate = orig->learning_rate;
863
+ copy->learning_momentum = orig->learning_momentum;
864
+ copy->connection_rate = orig->connection_rate;
865
+ copy->network_type = orig->network_type;
866
+ copy->num_MSE = orig->num_MSE;
867
+ copy->MSE_value = orig->MSE_value;
868
+ copy->num_bit_fail = orig->num_bit_fail;
869
+ copy->bit_fail_limit = orig->bit_fail_limit;
870
+ copy->train_error_function = orig->train_error_function;
871
+ copy->train_stop_function = orig->train_stop_function;
872
+ copy->callback = orig->callback;
873
+ copy->cascade_output_change_fraction = orig->cascade_output_change_fraction;
874
+ copy->cascade_output_stagnation_epochs = orig->cascade_output_stagnation_epochs;
875
+ copy->cascade_candidate_change_fraction = orig->cascade_candidate_change_fraction;
876
+ copy->cascade_candidate_stagnation_epochs = orig->cascade_candidate_stagnation_epochs;
877
+ copy->cascade_best_candidate = orig->cascade_best_candidate;
878
+ copy->cascade_candidate_limit = orig->cascade_candidate_limit;
879
+ copy->cascade_weight_multiplier = orig->cascade_weight_multiplier;
880
+ copy->cascade_max_out_epochs = orig->cascade_max_out_epochs;
881
+ copy->cascade_max_cand_epochs = orig->cascade_max_cand_epochs;
882
+ copy->user_data = orig->user_data;
883
+
884
+ /* copy cascade activation functions */
885
+ copy->cascade_activation_functions_count = orig->cascade_activation_functions_count;
886
+ copy->cascade_activation_functions = (enum fann_activationfunc_enum *)realloc(copy->cascade_activation_functions,
887
+ copy->cascade_activation_functions_count * sizeof(enum fann_activationfunc_enum));
888
+ if(copy->cascade_activation_functions == NULL)
889
+ {
890
+ fann_error((struct fann_error*)orig, FANN_E_CANT_ALLOCATE_MEM);
891
+ fann_destroy(copy);
892
+ return NULL;
893
+ }
894
+ memcpy(copy->cascade_activation_functions,orig->cascade_activation_functions,
895
+ copy->cascade_activation_functions_count * sizeof(enum fann_activationfunc_enum));
896
+
897
+ /* copy cascade activation steepnesses */
898
+ copy->cascade_activation_steepnesses_count = orig->cascade_activation_steepnesses_count;
899
+ copy->cascade_activation_steepnesses = (fann_type *)realloc(copy->cascade_activation_steepnesses, copy->cascade_activation_steepnesses_count * sizeof(fann_type));
900
+ if(copy->cascade_activation_steepnesses == NULL)
901
+ {
902
+ fann_error((struct fann_error*)orig, FANN_E_CANT_ALLOCATE_MEM);
903
+ fann_destroy(copy);
904
+ return NULL;
905
+ }
906
+ memcpy(copy->cascade_activation_steepnesses,orig->cascade_activation_steepnesses,copy->cascade_activation_steepnesses_count * sizeof(fann_type));
907
+
908
+ copy->cascade_num_candidate_groups = orig->cascade_num_candidate_groups;
909
+
910
+ /* copy candidate scores, if used */
911
+ if (orig->cascade_candidate_scores == NULL)
912
+ {
913
+ copy->cascade_candidate_scores = NULL;
914
+ }
915
+ else
916
+ {
917
+ copy->cascade_candidate_scores =
918
+ (fann_type *) malloc(fann_get_cascade_num_candidates(copy) * sizeof(fann_type));
919
+ if(copy->cascade_candidate_scores == NULL)
920
+ {
921
+ fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);
922
+ fann_destroy(copy);
923
+ return NULL;
924
+ }
925
+ memcpy(copy->cascade_candidate_scores,orig->cascade_candidate_scores,fann_get_cascade_num_candidates(copy) * sizeof(fann_type));
926
+ }
927
+
928
+ copy->quickprop_decay = orig->quickprop_decay;
929
+ copy->quickprop_mu = orig->quickprop_mu;
930
+ copy->rprop_increase_factor = orig->rprop_increase_factor;
931
+ copy->rprop_decrease_factor = orig->rprop_decrease_factor;
932
+ copy->rprop_delta_min = orig->rprop_delta_min;
933
+ copy->rprop_delta_max = orig->rprop_delta_max;
934
+ copy->rprop_delta_zero = orig->rprop_delta_zero;
935
+
936
+ /* user_data is not deep copied. user should use fann_copy_with_user_data() for that */
937
+ copy->user_data = orig->user_data;
938
+
939
+ #ifdef FIXEDFANN
940
+ copy->decimal_point = orig->decimal_point;
941
+ copy->multiplier = orig->multiplier;
942
+ memcpy(copy->sigmoid_results,orig->sigmoid_results,6*sizeof(fann_type));
943
+ memcpy(copy->sigmoid_values,orig->sigmoid_values,6*sizeof(fann_type));
944
+ memcpy(copy->sigmoid_symmetric_results,orig->sigmoid_symmetric_results,6*sizeof(fann_type));
945
+ memcpy(copy->sigmoid_symmetric_values,orig->sigmoid_symmetric_values,6*sizeof(fann_type));
946
+ #endif
947
+
948
+
949
+ /* copy layer sizes, prepare for fann_allocate_neurons */
950
+ for (orig_layer_it = orig->first_layer, copy_layer_it = copy->first_layer;
951
+ orig_layer_it != orig->last_layer; orig_layer_it++, copy_layer_it++)
952
+ {
953
+ layer_size = orig_layer_it->last_neuron - orig_layer_it->first_neuron;
954
+ copy_layer_it->first_neuron = NULL;
955
+ copy_layer_it->last_neuron = copy_layer_it->first_neuron + layer_size;
956
+ copy->total_neurons += layer_size;
957
+ }
958
+ copy->num_input = orig->num_input;
959
+ copy->num_output = orig->num_output;
960
+
961
+
962
+ /* copy scale parameters, when used */
963
+ #ifndef FIXEDFANN
964
+ if (orig->scale_mean_in != NULL)
965
+ {
966
+ fann_allocate_scale(copy);
967
+ for (i=0; i < orig->num_input ; i++) {
968
+ copy->scale_mean_in[i] = orig->scale_mean_in[i];
969
+ copy->scale_deviation_in[i] = orig->scale_deviation_in[i];
970
+ copy->scale_new_min_in[i] = orig->scale_new_min_in[i];
971
+ copy->scale_factor_in[i] = orig->scale_factor_in[i];
972
+ }
973
+ for (i=0; i < orig->num_output ; i++) {
974
+ copy->scale_mean_out[i] = orig->scale_mean_out[i];
975
+ copy->scale_deviation_out[i] = orig->scale_deviation_out[i];
976
+ copy->scale_new_min_out[i] = orig->scale_new_min_out[i];
977
+ copy->scale_factor_out[i] = orig->scale_factor_out[i];
978
+ }
979
+ }
980
+ #endif
981
+
982
+ /* copy the neurons */
983
+ fann_allocate_neurons(copy);
984
+ if (copy->errno_f == FANN_E_CANT_ALLOCATE_MEM)
985
+ {
986
+ fann_destroy(copy);
987
+ return NULL;
988
+ }
989
+ layer_size = (orig->last_layer-1)->last_neuron - (orig->last_layer-1)->first_neuron;
990
+ memcpy(copy->output,orig->output, layer_size * sizeof(fann_type));
991
+
992
+ last_neuron = (orig->last_layer - 1)->last_neuron;
993
+ for (orig_neuron_it = orig->first_layer->first_neuron, copy_neuron_it = copy->first_layer->first_neuron;
994
+ orig_neuron_it != last_neuron; orig_neuron_it++, copy_neuron_it++)
995
+ {
996
+ memcpy(copy_neuron_it,orig_neuron_it,sizeof(struct fann_neuron));
997
+ }
998
+ /* copy the connections */
999
+ copy->total_connections = orig->total_connections;
1000
+ fann_allocate_connections(copy);
1001
+ if (copy->errno_f == FANN_E_CANT_ALLOCATE_MEM)
1002
+ {
1003
+ fann_destroy(copy);
1004
+ return NULL;
1005
+ }
1006
+
1007
+ orig_first_neuron = orig->first_layer->first_neuron;
1008
+ copy_first_neuron = copy->first_layer->first_neuron;
1009
+ for (i=0; i < orig->total_connections; i++)
1010
+ {
1011
+ copy->weights[i] = orig->weights[i];
1012
+ input_neuron = orig->connections[i] - orig_first_neuron;
1013
+ copy->connections[i] = copy_first_neuron + input_neuron;
1014
+ }
1015
+
1016
+ if (orig->train_slopes)
1017
+ {
1018
+ copy->train_slopes = (fann_type *) malloc(copy->total_connections_allocated * sizeof(fann_type));
1019
+ if (copy->train_slopes == NULL)
1020
+ {
1021
+ fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);
1022
+ fann_destroy(copy);
1023
+ return NULL;
1024
+ }
1025
+ memcpy(copy->train_slopes,orig->train_slopes,copy->total_connections_allocated * sizeof(fann_type));
1026
+ }
1027
+
1028
+ if (orig->prev_steps)
1029
+ {
1030
+ copy->prev_steps = (fann_type *) malloc(copy->total_connections_allocated * sizeof(fann_type));
1031
+ if (copy->prev_steps == NULL)
1032
+ {
1033
+ fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);
1034
+ fann_destroy(copy);
1035
+ return NULL;
1036
+ }
1037
+ memcpy(copy->prev_steps, orig->prev_steps, copy->total_connections_allocated * sizeof(fann_type));
1038
+ }
1039
+
1040
+ if (orig->prev_train_slopes)
1041
+ {
1042
+ copy->prev_train_slopes = (fann_type *) malloc(copy->total_connections_allocated * sizeof(fann_type));
1043
+ if (copy->prev_train_slopes == NULL)
1044
+ {
1045
+ fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);
1046
+ fann_destroy(copy);
1047
+ return NULL;
1048
+ }
1049
+ memcpy(copy->prev_train_slopes,orig->prev_train_slopes, copy->total_connections_allocated * sizeof(fann_type));
1050
+ }
1051
+
1052
+ if (orig->prev_weights_deltas)
1053
+ {
1054
+ copy->prev_weights_deltas = (fann_type *) malloc(copy->total_connections_allocated * sizeof(fann_type));
1055
+ if(copy->prev_weights_deltas == NULL)
1056
+ {
1057
+ fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);
1058
+ fann_destroy(copy);
1059
+ return NULL;
1060
+ }
1061
+ memcpy(copy->prev_weights_deltas, orig->prev_weights_deltas,copy->total_connections_allocated * sizeof(fann_type));
1062
+ }
1063
+
1064
+ return copy;
1065
+ }
1066
+
834
1067
  FANN_EXTERNAL void FANN_API fann_print_connections(struct fann *ann)
835
1068
  {
836
1069
  struct fann_layer *layer_it;
@@ -886,8 +1119,8 @@ FANN_EXTERNAL void FANN_API fann_print_connections(struct fann *ann)
886
1119
  neurons[ann->connections[i] - ann->first_layer->first_neuron] = (char)('A' + value);
887
1120
  }
888
1121
  }
889
- printf("L %3d / N %4d %s\n", layer_it - ann->first_layer,
890
- neuron_it - ann->first_layer->first_neuron, neurons);
1122
+ printf("L %3d / N %4d %s\n", (int)(layer_it - ann->first_layer),
1123
+ (int)(neuron_it - ann->first_layer->first_neuron), neurons);
891
1124
  }
892
1125
  }
893
1126
 
@@ -988,12 +1221,12 @@ FANN_EXTERNAL void FANN_API fann_print_parameters(struct fann *ann)
988
1221
  if(ann->network_type == FANN_NETTYPE_SHORTCUT)
989
1222
  {
990
1223
  printf(" Hidden layer :%4d neurons, 0 bias\n",
991
- layer_it->last_neuron - layer_it->first_neuron);
1224
+ (int)(layer_it->last_neuron - layer_it->first_neuron));
992
1225
  }
993
1226
  else
994
1227
  {
995
1228
  printf(" Hidden layer :%4d neurons, 1 bias\n",
996
- layer_it->last_neuron - layer_it->first_neuron - 1);
1229
+ (int)(layer_it->last_neuron - layer_it->first_neuron - 1));
997
1230
  }
998
1231
  }
999
1232
  printf("Output layer :%4d neurons\n", ann->num_output);
@@ -1026,7 +1259,9 @@ FANN_EXTERNAL void FANN_API fann_print_parameters(struct fann *ann)
1026
1259
  printf("Cascade output stagnation epochs :%4d\n", ann->cascade_output_stagnation_epochs);
1027
1260
  printf("Cascade candidate stagnation epochs :%4d\n", ann->cascade_candidate_stagnation_epochs);
1028
1261
  printf("Cascade max output epochs :%4d\n", ann->cascade_max_out_epochs);
1262
+ printf("Cascade min output epochs :%4d\n", ann->cascade_min_out_epochs);
1029
1263
  printf("Cascade max candidate epochs :%4d\n", ann->cascade_max_cand_epochs);
1264
+ printf("Cascade min candidate epochs :%4d\n", ann->cascade_min_cand_epochs);
1030
1265
  printf("Cascade weight multiplier :%8.3f\n", ann->cascade_weight_multiplier);
1031
1266
  printf("Cascade candidate limit :%8.3f\n", ann->cascade_candidate_limit);
1032
1267
  for(i = 0; i < ann->cascade_activation_functions_count; i++)
@@ -1141,7 +1376,7 @@ FANN_EXTERNAL void FANN_API fann_get_connection_array(struct fann *ann, struct f
1141
1376
  struct fann_neuron *first_neuron;
1142
1377
  struct fann_layer *layer_it;
1143
1378
  struct fann_neuron *neuron_it;
1144
- unsigned int index;
1379
+ unsigned int idx;
1145
1380
  unsigned int source_index;
1146
1381
  unsigned int destination_index;
1147
1382
 
@@ -1157,7 +1392,7 @@ FANN_EXTERNAL void FANN_API fann_get_connection_array(struct fann *ann, struct f
1157
1392
  /* for each neuron */
1158
1393
  for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++){
1159
1394
  /* for each connection */
1160
- for (index = neuron_it->first_con; index < neuron_it->last_con; index++){
1395
+ for (idx = neuron_it->first_con; idx < neuron_it->last_con; idx++){
1161
1396
  /* Assign the source, destination and weight */
1162
1397
  connections->from_neuron = ann->connections[source_index] - first_neuron;
1163
1398
  connections->to_neuron = destination_index;
@@ -1174,11 +1409,11 @@ FANN_EXTERNAL void FANN_API fann_get_connection_array(struct fann *ann, struct f
1174
1409
  FANN_EXTERNAL void FANN_API fann_set_weight_array(struct fann *ann,
1175
1410
  struct fann_connection *connections, unsigned int num_connections)
1176
1411
  {
1177
- unsigned int index;
1412
+ unsigned int idx;
1178
1413
 
1179
- for (index = 0; index < num_connections; index++) {
1180
- fann_set_weight(ann, connections[index].from_neuron,
1181
- connections[index].to_neuron, connections[index].weight);
1414
+ for (idx = 0; idx < num_connections; idx++) {
1415
+ fann_set_weight(ann, connections[idx].from_neuron,
1416
+ connections[idx].to_neuron, connections[idx].weight);
1182
1417
  }
1183
1418
  }
1184
1419
 
@@ -1188,7 +1423,7 @@ FANN_EXTERNAL void FANN_API fann_set_weight(struct fann *ann,
1188
1423
  struct fann_neuron *first_neuron;
1189
1424
  struct fann_layer *layer_it;
1190
1425
  struct fann_neuron *neuron_it;
1191
- unsigned int index;
1426
+ unsigned int idx;
1192
1427
  unsigned int source_index;
1193
1428
  unsigned int destination_index;
1194
1429
 
@@ -1206,7 +1441,7 @@ FANN_EXTERNAL void FANN_API fann_set_weight(struct fann *ann,
1206
1441
  /* for each neuron */
1207
1442
  for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++){
1208
1443
  /* for each connection */
1209
- for (index = neuron_it->first_con; index < neuron_it->last_con; index++){
1444
+ for (idx = neuron_it->first_con; idx < neuron_it->last_con; idx++){
1210
1445
  /* If the source and destination neurons match, assign the weight */
1211
1446
  if (((int)from_neuron == ann->connections[source_index] - first_neuron) &&
1212
1447
  (to_neuron == destination_index))
@@ -1347,6 +1582,8 @@ struct fann *fann_allocate_structure(unsigned int num_layers)
1347
1582
  ann->cascade_candidate_limit = (fann_type)1000.0;
1348
1583
  ann->cascade_max_out_epochs = 150;
1349
1584
  ann->cascade_max_cand_epochs = 150;
1585
+ ann->cascade_min_out_epochs = 50;
1586
+ ann->cascade_min_cand_epochs = 50;
1350
1587
  ann->cascade_candidate_scores = NULL;
1351
1588
  ann->cascade_activation_functions_count = 10;
1352
1589
  ann->cascade_activation_functions =
@@ -1388,16 +1625,23 @@ struct fann *fann_allocate_structure(unsigned int num_layers)
1388
1625
  ann->cascade_activation_steepnesses[3] = (fann_type)1.0;
1389
1626
 
1390
1627
  /* Variables for use with with Quickprop training (reasonable defaults) */
1391
- ann->quickprop_decay = (float) -0.0001;
1628
+ ann->quickprop_decay = -0.0001f;
1392
1629
  ann->quickprop_mu = 1.75;
1393
1630
 
1394
1631
  /* Variables for use with with RPROP training (reasonable defaults) */
1395
- ann->rprop_increase_factor = (float) 1.2;
1632
+ ann->rprop_increase_factor = 1.2f;
1396
1633
  ann->rprop_decrease_factor = 0.5;
1397
1634
  ann->rprop_delta_min = 0.0;
1398
1635
  ann->rprop_delta_max = 50.0;
1399
- ann->rprop_delta_zero = 0.1;
1636
+ ann->rprop_delta_zero = 0.1f;
1400
1637
 
1638
+ /* Variables for use with SARPROP training (reasonable defaults) */
1639
+ ann->sarprop_weight_decay_shift = -6.644f;
1640
+ ann->sarprop_step_error_threshold_factor = 0.1f;
1641
+ ann->sarprop_step_error_shift = 1.385f;
1642
+ ann->sarprop_temperature = 0.015f;
1643
+ ann->sarprop_epoch = 0;
1644
+
1401
1645
  fann_init_error_data((struct fann_error *) ann);
1402
1646
 
1403
1647
  #ifdef FIXEDFANN
@@ -1540,7 +1784,14 @@ void fann_seed_rand()
1540
1784
  }
1541
1785
  else
1542
1786
  {
1543
- fread(&foo, sizeof(foo), 1, fp);
1787
+ if(fread(&foo, sizeof(foo), 1, fp) != 1)
1788
+ {
1789
+ gettimeofday(&t, NULL);
1790
+ foo = t.tv_usec;
1791
+ #ifdef DEBUG
1792
+ printf("unable to read from /dev/urandom\n");
1793
+ #endif
1794
+ }
1544
1795
  fclose(fp);
1545
1796
  }
1546
1797
  srand(foo);