moo_fann 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,144 @@
1
+ /*
2
+ Fast Artificial Neural Network Library (fann)
3
+ Copyright (C) 2003-2012 Steffen Nissen (sn@leenissen.dk)
4
+
5
+ This library is free software; you can redistribute it and/or
6
+ modify it under the terms of the GNU Lesser General Public
7
+ License as published by the Free Software Foundation; either
8
+ version 2.1 of the License, or (at your option) any later version.
9
+
10
+ This library is distributed in the hope that it will be useful,
11
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13
+ Lesser General Public License for more details.
14
+
15
+ You should have received a copy of the GNU Lesser General Public
16
+ License along with this library; if not, write to the Free Software
17
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
+ */
19
+
20
+ #ifndef __fann_activation_h__
21
+ #define __fann_activation_h__
22
+ /* internal include file, not to be included directly
23
+ */
24
+
25
+ /* Implementation of the activation functions
26
+ */
27
+
28
+ /* stepwise linear functions used for some of the activation functions */
29
+
30
+ /* defines used for the stepwise linear functions */
31
+
32
+ #define fann_linear_func(v1, r1, v2, r2, sum) (((((r2)-(r1)) * ((sum)-(v1)))/((v2)-(v1))) + (r1))
33
+ #define fann_stepwise(v1, v2, v3, v4, v5, v6, r1, r2, r3, r4, r5, r6, min, max, sum) (sum < v5 ? (sum < v3 ? (sum < v2 ? (sum < v1 ? min : fann_linear_func(v1, r1, v2, r2, sum)) : fann_linear_func(v2, r2, v3, r3, sum)) : (sum < v4 ? fann_linear_func(v3, r3, v4, r4, sum) : fann_linear_func(v4, r4, v5, r5, sum))) : (sum < v6 ? fann_linear_func(v5, r5, v6, r6, sum) : max))
34
+
35
+ /* FANN_LINEAR */
36
+ /* #define fann_linear(steepness, sum) fann_mult(steepness, sum) */
37
+ #define fann_linear_derive(steepness, value) (steepness)
38
+
39
+ /* FANN_SIGMOID */
40
+ /* #define fann_sigmoid(steepness, sum) (1.0f/(1.0f + exp(-2.0f * steepness * sum))) */
41
+ #define fann_sigmoid_real(sum) (1.0f/(1.0f + exp(-2.0f * sum)))
42
+ #define fann_sigmoid_derive(steepness, value) (2.0f * steepness * value * (1.0f - value))
43
+
44
+ /* FANN_SIGMOID_SYMMETRIC */
45
+ /* #define fann_sigmoid_symmetric(steepness, sum) (2.0f/(1.0f + exp(-2.0f * steepness * sum)) - 1.0f) */
46
+ #define fann_sigmoid_symmetric_real(sum) (2.0f/(1.0f + exp(-2.0f * sum)) - 1.0f)
47
+ #define fann_sigmoid_symmetric_derive(steepness, value) steepness * (1.0f - (value*value))
48
+
49
+ /* FANN_GAUSSIAN */
50
+ /* #define fann_gaussian(steepness, sum) (exp(-sum * steepness * sum * steepness)) */
51
+ #define fann_gaussian_real(sum) (exp(-sum * sum))
52
+ #define fann_gaussian_derive(steepness, value, sum) (-2.0f * sum * value * steepness * steepness)
53
+
54
+ /* FANN_GAUSSIAN_SYMMETRIC */
55
+ /* #define fann_gaussian_symmetric(steepness, sum) ((exp(-sum * steepness * sum * steepness)*2.0)-1.0) */
56
+ #define fann_gaussian_symmetric_real(sum) ((exp(-sum * sum)*2.0f)-1.0f)
57
+ #define fann_gaussian_symmetric_derive(steepness, value, sum) (-2.0f * sum * (value+1.0f) * steepness * steepness)
58
+
59
+ /* FANN_ELLIOT */
60
+ /* #define fann_elliot(steepness, sum) (((sum * steepness) / 2.0f) / (1.0f + fann_abs(sum * steepness)) + 0.5f) */
61
+ #define fann_elliot_real(sum) (((sum) / 2.0f) / (1.0f + fann_abs(sum)) + 0.5f)
62
+ #define fann_elliot_derive(steepness, value, sum) (steepness * 1.0f / (2.0f * (1.0f + fann_abs(sum)) * (1.0f + fann_abs(sum))))
63
+
64
+ /* FANN_ELLIOT_SYMMETRIC */
65
+ /* #define fann_elliot_symmetric(steepness, sum) ((sum * steepness) / (1.0f + fann_abs(sum * steepness)))*/
66
+ #define fann_elliot_symmetric_real(sum) ((sum) / (1.0f + fann_abs(sum)))
67
+ #define fann_elliot_symmetric_derive(steepness, value, sum) (steepness * 1.0f / ((1.0f + fann_abs(sum)) * (1.0f + fann_abs(sum))))
68
+
69
+ /* FANN_SIN_SYMMETRIC */
70
+ #define fann_sin_symmetric_real(sum) (sin(sum))
71
+ #define fann_sin_symmetric_derive(steepness, sum) (steepness*cos(steepness*sum))
72
+
73
+ /* FANN_COS_SYMMETRIC */
74
+ #define fann_cos_symmetric_real(sum) (cos(sum))
75
+ #define fann_cos_symmetric_derive(steepness, sum) (steepness*-sin(steepness*sum))
76
+
77
+ /* FANN_SIN */
78
+ #define fann_sin_real(sum) (sin(sum)/2.0f+0.5f)
79
+ #define fann_sin_derive(steepness, sum) (steepness*cos(steepness*sum)/2.0f)
80
+
81
+ /* FANN_COS */
82
+ #define fann_cos_real(sum) (cos(sum)/2.0f+0.5f)
83
+ #define fann_cos_derive(steepness, sum) (steepness*-sin(steepness*sum)/2.0f)
84
+
85
+ #define fann_activation_switch(activation_function, value, result) \
86
+ switch(activation_function) \
87
+ { \
88
+ case FANN_LINEAR: \
89
+ result = (fann_type)value; \
90
+ break; \
91
+ case FANN_LINEAR_PIECE: \
92
+ result = (fann_type)((value < 0) ? 0 : (value > 1) ? 1 : value); \
93
+ break; \
94
+ case FANN_LINEAR_PIECE_SYMMETRIC: \
95
+ result = (fann_type)((value < -1) ? -1 : (value > 1) ? 1 : value); \
96
+ break; \
97
+ case FANN_SIGMOID: \
98
+ result = (fann_type)fann_sigmoid_real(value); \
99
+ break; \
100
+ case FANN_SIGMOID_SYMMETRIC: \
101
+ result = (fann_type)fann_sigmoid_symmetric_real(value); \
102
+ break; \
103
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE: \
104
+ result = (fann_type)fann_stepwise(-2.64665293693542480469e+00, -1.47221934795379638672e+00, -5.49306154251098632812e-01, 5.49306154251098632812e-01, 1.47221934795379638672e+00, 2.64665293693542480469e+00, -9.90000009536743164062e-01, -8.99999976158142089844e-01, -5.00000000000000000000e-01, 5.00000000000000000000e-01, 8.99999976158142089844e-01, 9.90000009536743164062e-01, -1, 1, value); \
105
+ break; \
106
+ case FANN_SIGMOID_STEPWISE: \
107
+ result = (fann_type)fann_stepwise(-2.64665246009826660156e+00, -1.47221946716308593750e+00, -5.49306154251098632812e-01, 5.49306154251098632812e-01, 1.47221934795379638672e+00, 2.64665293693542480469e+00, 4.99999988824129104614e-03, 5.00000007450580596924e-02, 2.50000000000000000000e-01, 7.50000000000000000000e-01, 9.49999988079071044922e-01, 9.95000004768371582031e-01, 0, 1, value); \
108
+ break; \
109
+ case FANN_THRESHOLD: \
110
+ result = (fann_type)((value < 0) ? 0 : 1); \
111
+ break; \
112
+ case FANN_THRESHOLD_SYMMETRIC: \
113
+ result = (fann_type)((value < 0) ? -1 : 1); \
114
+ break; \
115
+ case FANN_GAUSSIAN: \
116
+ result = (fann_type)fann_gaussian_real(value); \
117
+ break; \
118
+ case FANN_GAUSSIAN_SYMMETRIC: \
119
+ result = (fann_type)fann_gaussian_symmetric_real(value); \
120
+ break; \
121
+ case FANN_ELLIOT: \
122
+ result = (fann_type)fann_elliot_real(value); \
123
+ break; \
124
+ case FANN_ELLIOT_SYMMETRIC: \
125
+ result = (fann_type)fann_elliot_symmetric_real(value); \
126
+ break; \
127
+ case FANN_SIN_SYMMETRIC: \
128
+ result = (fann_type)fann_sin_symmetric_real(value); \
129
+ break; \
130
+ case FANN_COS_SYMMETRIC: \
131
+ result = (fann_type)fann_cos_symmetric_real(value); \
132
+ break; \
133
+ case FANN_SIN: \
134
+ result = (fann_type)fann_sin_real(value); \
135
+ break; \
136
+ case FANN_COS: \
137
+ result = (fann_type)fann_cos_real(value); \
138
+ break; \
139
+ case FANN_GAUSSIAN_STEPWISE: \
140
+ result = 0; \
141
+ break; \
142
+ }
143
+
144
+ #endif
@@ -0,0 +1,133 @@
1
+ /*
2
+ * Copyright 2018 Maxine Michalski <maxine@furfind.net>
3
+ * Copyright 2013 ruby-fann contributors
4
+ * <https://github.com/tangledpath/ruby-fann#contributors>
5
+ *
6
+ * This file is part of moo_fann.
7
+ *
8
+ * moo_fann is free software: you can redistribute it and/or modify
9
+ * it under the terms of the GNU General Public License as published by
10
+ * the Free Software Foundation, either version 3 of the License, or
11
+ * (at your option) any later version.
12
+ *
13
+ * moo_fann is distributed in the hope that it will be useful,
14
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
+ * GNU General Public License for more details.
17
+ *
18
+ * You should have received a copy of the GNU General Public License
19
+ * along with moo_fann. If not, see <http://www.gnu.org/licenses/>.
20
+ */
21
+
22
+ #include "ruby.h"
23
+ #include "ruby_compat.h"
24
+
25
+ FANN_EXTERNAL struct fann_train_data * FANN_API fann_create_train_from_rb_ary2(
26
+ unsigned int num_data,
27
+ unsigned int num_input,
28
+ unsigned int num_output)
29
+ {
30
+ return 0;
31
+ }
32
+
33
+ /*
34
+ * Copied from fann_create_train_from_callback/file & modified to ease
35
+ * allocating from ruby arrays:
36
+ */
37
+ FANN_EXTERNAL struct fann_train_data * FANN_API fann_create_train_from_rb_ary(
38
+ VALUE inputs,
39
+ VALUE outputs
40
+ )
41
+ {
42
+ unsigned int i, j;
43
+ fann_type *data_input, *data_output;
44
+ struct fann_train_data *data = (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
45
+ unsigned int num_input = RARRAY_LEN(RARRAY_PTR(inputs)[0]);
46
+ unsigned int num_output =RARRAY_LEN(RARRAY_PTR(outputs)[0]);
47
+ unsigned int num_data = RARRAY_LEN(inputs);
48
+
49
+ if(data == NULL) {
50
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
51
+ return NULL;
52
+ }
53
+
54
+ fann_init_error_data((struct fann_error *) data);
55
+
56
+ data->num_data = num_data;
57
+ data->num_input = num_input;
58
+ data->num_output = num_output;
59
+
60
+ data->input = (fann_type **) calloc(num_data, sizeof(fann_type *));
61
+ if(data->input == NULL)
62
+ {
63
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
64
+ fann_destroy_train(data);
65
+ return NULL;
66
+ }
67
+
68
+ data->output = (fann_type **) calloc(num_data, sizeof(fann_type *));
69
+ if(data->output == NULL)
70
+ {
71
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
72
+ fann_destroy_train(data);
73
+ return NULL;
74
+ }
75
+
76
+ data_input = (fann_type *) calloc(num_input * num_data, sizeof(fann_type));
77
+ if(data_input == NULL)
78
+ {
79
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
80
+ fann_destroy_train(data);
81
+ return NULL;
82
+ }
83
+
84
+ data_output = (fann_type *) calloc(num_output * num_data, sizeof(fann_type));
85
+ if(data_output == NULL)
86
+ {
87
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
88
+ fann_destroy_train(data);
89
+ return NULL;
90
+ }
91
+
92
+ VALUE inputs_i, outputs_i;
93
+ for(i = 0; i != num_data; i++)
94
+ {
95
+ data->input[i] = data_input;
96
+ data_input += num_input;
97
+
98
+ inputs_i = RARRAY_PTR(inputs)[i];
99
+ outputs_i = RARRAY_PTR(outputs)[i];
100
+
101
+ if(RARRAY_LEN(inputs_i) != num_input)
102
+ {
103
+ rb_raise (
104
+ rb_eRuntimeError,
105
+ "Number of inputs at [%d] is inconsistent: (%d != %d)",
106
+ i, RARRAY_LEN(inputs_i), num_input);
107
+ }
108
+
109
+ if(RARRAY_LEN(outputs_i) != num_output)
110
+ {
111
+ rb_raise (
112
+ rb_eRuntimeError,
113
+ "Number of outputs at [%d] is inconsistent: (%d != %d)",
114
+ i, RARRAY_LEN(outputs_i), num_output);
115
+ }
116
+
117
+
118
+ for(j = 0; j != num_input; j++)
119
+ {
120
+ data->input[i][j]=NUM2DBL(RARRAY_PTR(inputs_i)[j]);
121
+ }
122
+
123
+ data->output[i] = data_output;
124
+ data_output += num_output;
125
+
126
+ for(j = 0; j != num_output; j++)
127
+ {
128
+ data->output[i][j]=NUM2DBL(RARRAY_PTR(outputs_i)[j]);
129
+ }
130
+ }
131
+
132
+ return data;
133
+ }
@@ -0,0 +1,1048 @@
1
+ /*
2
+ Fast Artificial Neural Network Library (fann)
3
+ Copyright (C) 2003-2012 Steffen Nissen (sn@leenissen.dk)
4
+
5
+ This library is free software; you can redistribute it and/or
6
+ modify it under the terms of the GNU Lesser General Public
7
+ License as published by the Free Software Foundation; either
8
+ version 2.1 of the License, or (at your option) any later version.
9
+
10
+ This library is distributed in the hope that it will be useful,
11
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13
+ Lesser General Public License for more details.
14
+
15
+ You should have received a copy of the GNU Lesser General Public
16
+ License along with this library; if not, write to the Free Software
17
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
+ */
19
+
20
+ #include "config.h"
21
+ #include "fann.h"
22
+ #include "string.h"
23
+
24
+ #ifndef FIXEDFANN
25
+
26
+ /* #define CASCADE_DEBUG */
27
+ /* #define CASCADE_DEBUG_FULL */
28
+
29
+ void fann_print_connections_raw(struct fann *ann)
30
+ {
31
+ unsigned int i;
32
+
33
+ for(i = 0; i < ann->total_connections_allocated; i++)
34
+ {
35
+ if(i == ann->total_connections)
36
+ {
37
+ printf("* ");
38
+ }
39
+ printf("%f ", ann->weights[i]);
40
+ }
41
+ printf("\n\n");
42
+ }
43
+
44
+ /* Cascade training directly on the training data.
45
+ The connected_neurons pointers are not valid during training,
46
+ but they will be again after training.
47
+ */
48
+ FANN_EXTERNAL void FANN_API fann_cascadetrain_on_data(struct fann *ann, struct fann_train_data *data,
49
+ unsigned int max_neurons,
50
+ unsigned int neurons_between_reports,
51
+ float desired_error)
52
+ {
53
+ float error;
54
+ unsigned int i;
55
+ unsigned int total_epochs = 0;
56
+ int desired_error_reached;
57
+
58
+ if(neurons_between_reports && ann->callback == NULL)
59
+ {
60
+ printf("Max neurons %3d. Desired error: %.6f\n", max_neurons, desired_error);
61
+ }
62
+
63
+ for(i = 1; i <= max_neurons; i++)
64
+ {
65
+ /* train output neurons */
66
+ total_epochs += fann_train_outputs(ann, data, desired_error);
67
+ error = fann_get_MSE(ann);
68
+ desired_error_reached = fann_desired_error_reached(ann, desired_error);
69
+
70
+ /* print current error */
71
+ if(neurons_between_reports &&
72
+ (i % neurons_between_reports == 0
73
+ || i == max_neurons || i == 1 || desired_error_reached == 0))
74
+ {
75
+ if(ann->callback == NULL)
76
+ {
77
+ printf
78
+ ("Neurons %3d. Current error: %.6f. Total error:%8.4f. Epochs %5d. Bit fail %3d",
79
+ i-1, error, ann->MSE_value, total_epochs, ann->num_bit_fail);
80
+ if((ann->last_layer-2) != ann->first_layer)
81
+ {
82
+ printf(". candidate steepness %.2f. function %s",
83
+ (ann->last_layer-2)->first_neuron->activation_steepness,
84
+ FANN_ACTIVATIONFUNC_NAMES[(ann->last_layer-2)->first_neuron->activation_function]);
85
+ }
86
+ printf("\n");
87
+ }
88
+ else if((*ann->callback) (ann, data, max_neurons,
89
+ neurons_between_reports, desired_error, total_epochs) == -1)
90
+ {
91
+ /* you can break the training by returning -1 */
92
+ break;
93
+ }
94
+ }
95
+
96
+ if(desired_error_reached == 0)
97
+ break;
98
+
99
+ if(fann_initialize_candidates(ann) == -1)
100
+ {
101
+ /* Unable to initialize room for candidates */
102
+ break;
103
+ }
104
+
105
+ /* train new candidates */
106
+ total_epochs += fann_train_candidates(ann, data);
107
+
108
+ /* this installs the best candidate */
109
+ fann_install_candidate(ann);
110
+ }
111
+
112
+ /* Train outputs one last time but without any desired error */
113
+ total_epochs += fann_train_outputs(ann, data, 0.0);
114
+
115
+ if(neurons_between_reports && ann->callback == NULL)
116
+ {
117
+ printf("Train outputs Current error: %.6f. Epochs %6d\n", fann_get_MSE(ann),
118
+ total_epochs);
119
+ }
120
+
121
+ /* Set pointers in connected_neurons
122
+ * This is ONLY done in the end of cascade training,
123
+ * since there is no need for them during training.
124
+ */
125
+ fann_set_shortcut_connections(ann);
126
+ }
127
+
128
+ FANN_EXTERNAL void FANN_API fann_cascadetrain_on_file(struct fann *ann, const char *filename,
129
+ unsigned int max_neurons,
130
+ unsigned int neurons_between_reports,
131
+ float desired_error)
132
+ {
133
+ struct fann_train_data *data = fann_read_train_from_file(filename);
134
+
135
+ if(data == NULL)
136
+ {
137
+ return;
138
+ }
139
+ fann_cascadetrain_on_data(ann, data, max_neurons, neurons_between_reports, desired_error);
140
+ fann_destroy_train(data);
141
+ }
142
+
143
+ int fann_train_outputs(struct fann *ann, struct fann_train_data *data, float desired_error)
144
+ {
145
+ float error, initial_error, error_improvement;
146
+ float target_improvement = 0.0;
147
+ float backslide_improvement = -1.0e20f;
148
+ unsigned int i;
149
+ unsigned int max_epochs = ann->cascade_max_out_epochs;
150
+ unsigned int min_epochs = ann->cascade_min_out_epochs;
151
+ unsigned int stagnation = max_epochs;
152
+
153
+ /* TODO should perhaps not clear all arrays */
154
+ fann_clear_train_arrays(ann);
155
+
156
+ /* run an initial epoch to set the initital error */
157
+ initial_error = fann_train_outputs_epoch(ann, data);
158
+
159
+ if(fann_desired_error_reached(ann, desired_error) == 0)
160
+ return 1;
161
+
162
+ for(i = 1; i < max_epochs; i++)
163
+ {
164
+ error = fann_train_outputs_epoch(ann, data);
165
+
166
+ /*printf("Epoch %6d. Current error: %.6f. Bit fail %d.\n", i, error, ann->num_bit_fail); */
167
+
168
+ if(fann_desired_error_reached(ann, desired_error) == 0)
169
+ {
170
+ #ifdef CASCADE_DEBUG
171
+ printf("Error %f < %f\n", error, desired_error);
172
+ #endif
173
+ return i + 1;
174
+ }
175
+
176
+ /* Improvement since start of train */
177
+ error_improvement = initial_error - error;
178
+
179
+ /* After any significant change, set a new goal and
180
+ * allow a new quota of epochs to reach it */
181
+
182
+ if((target_improvement >= 0 &&
183
+ (error_improvement > target_improvement || error_improvement < backslide_improvement)) ||
184
+ (target_improvement < 0 &&
185
+ (error_improvement < target_improvement || error_improvement > backslide_improvement)))
186
+ {
187
+ /*printf("error_improvement=%f, target_improvement=%f, backslide_improvement=%f, stagnation=%d\n", error_improvement, target_improvement, backslide_improvement, stagnation); */
188
+
189
+ target_improvement = error_improvement * (1.0f + ann->cascade_output_change_fraction);
190
+ backslide_improvement = error_improvement * (1.0f - ann->cascade_output_change_fraction);
191
+ stagnation = i + ann->cascade_output_stagnation_epochs;
192
+ }
193
+
194
+ /* No improvement in allotted period, so quit */
195
+ if(i >= stagnation && i >= min_epochs)
196
+ {
197
+ return i + 1;
198
+ }
199
+ }
200
+
201
+ return max_epochs;
202
+ }
203
+
204
+ float fann_train_outputs_epoch(struct fann *ann, struct fann_train_data *data)
205
+ {
206
+ unsigned int i;
207
+
208
+ fann_reset_MSE(ann);
209
+
210
+ for(i = 0; i < data->num_data; i++)
211
+ {
212
+ fann_run(ann, data->input[i]);
213
+ fann_compute_MSE(ann, data->output[i]);
214
+ fann_update_slopes_batch(ann, ann->last_layer - 1, ann->last_layer - 1);
215
+ }
216
+
217
+ switch (ann->training_algorithm)
218
+ {
219
+ case FANN_TRAIN_RPROP:
220
+ fann_update_weights_irpropm(ann, (ann->last_layer - 1)->first_neuron->first_con,
221
+ ann->total_connections);
222
+ break;
223
+ case FANN_TRAIN_SARPROP:
224
+ fann_update_weights_sarprop(ann, ann->sarprop_epoch, (ann->last_layer - 1)->first_neuron->first_con,
225
+ ann->total_connections);
226
+ ++(ann->sarprop_epoch);
227
+ break;
228
+ case FANN_TRAIN_QUICKPROP:
229
+ fann_update_weights_quickprop(ann, data->num_data,
230
+ (ann->last_layer - 1)->first_neuron->first_con,
231
+ ann->total_connections);
232
+ break;
233
+ case FANN_TRAIN_BATCH:
234
+ case FANN_TRAIN_INCREMENTAL:
235
+ fann_error((struct fann_error *) ann, FANN_E_CANT_USE_TRAIN_ALG);
236
+ }
237
+
238
+ return fann_get_MSE(ann);
239
+ }
240
+
241
+ int fann_reallocate_connections(struct fann *ann, unsigned int total_connections)
242
+ {
243
+ /* The connections are allocated, but the pointers inside are
244
+ * first moved in the end of the cascade training session.
245
+ */
246
+
247
+ #ifdef CASCADE_DEBUG
248
+ printf("realloc from %d to %d\n", ann->total_connections_allocated, total_connections);
249
+ #endif
250
+ ann->connections =
251
+ (struct fann_neuron **) realloc(ann->connections,
252
+ total_connections * sizeof(struct fann_neuron *));
253
+ if(ann->connections == NULL)
254
+ {
255
+ fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
256
+ return -1;
257
+ }
258
+
259
+ ann->weights = (fann_type *) realloc(ann->weights, total_connections * sizeof(fann_type));
260
+ if(ann->weights == NULL)
261
+ {
262
+ fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
263
+ return -1;
264
+ }
265
+
266
+ ann->train_slopes =
267
+ (fann_type *) realloc(ann->train_slopes, total_connections * sizeof(fann_type));
268
+ if(ann->train_slopes == NULL)
269
+ {
270
+ fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
271
+ return -1;
272
+ }
273
+
274
+ ann->prev_steps = (fann_type *) realloc(ann->prev_steps, total_connections * sizeof(fann_type));
275
+ if(ann->prev_steps == NULL)
276
+ {
277
+ fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
278
+ return -1;
279
+ }
280
+
281
+ ann->prev_train_slopes =
282
+ (fann_type *) realloc(ann->prev_train_slopes, total_connections * sizeof(fann_type));
283
+ if(ann->prev_train_slopes == NULL)
284
+ {
285
+ fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
286
+ return -1;
287
+ }
288
+
289
+ ann->total_connections_allocated = total_connections;
290
+
291
+ return 0;
292
+ }
293
+
294
+ int fann_reallocate_neurons(struct fann *ann, unsigned int total_neurons)
295
+ {
296
+ struct fann_layer *layer_it;
297
+ struct fann_neuron *neurons;
298
+ unsigned int num_neurons = 0;
299
+ unsigned int num_neurons_so_far = 0;
300
+
301
+ neurons =
302
+ (struct fann_neuron *) realloc(ann->first_layer->first_neuron,
303
+ total_neurons * sizeof(struct fann_neuron));
304
+ ann->total_neurons_allocated = total_neurons;
305
+
306
+ if(neurons == NULL)
307
+ {
308
+ fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
309
+ return -1;
310
+ }
311
+
312
+ /* Also allocate room for more train_errors */
313
+ ann->train_errors = (fann_type *) realloc(ann->train_errors, total_neurons * sizeof(fann_type));
314
+ if(ann->train_errors == NULL)
315
+ {
316
+ fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
317
+ return -1;
318
+ }
319
+
320
+ if(neurons != ann->first_layer->first_neuron)
321
+ {
322
+ /* Then the memory has moved, also move the pointers */
323
+
324
+ #ifdef CASCADE_DEBUG_FULL
325
+ printf("Moving neuron pointers\n");
326
+ #endif
327
+
328
+ /* Move pointers from layers to neurons */
329
+ for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++)
330
+ {
331
+ num_neurons = layer_it->last_neuron - layer_it->first_neuron;
332
+ layer_it->first_neuron = neurons + num_neurons_so_far;
333
+ layer_it->last_neuron = layer_it->first_neuron + num_neurons;
334
+ num_neurons_so_far += num_neurons;
335
+ }
336
+ }
337
+
338
+ return 0;
339
+ }
340
+
341
+ void initialize_candidate_weights(struct fann *ann, unsigned int first_con, unsigned int last_con, float scale_factor)
342
+ {
343
+ fann_type prev_step;
344
+ unsigned int i = 0;
345
+ unsigned int bias_weight = first_con + (ann->first_layer->last_neuron - ann->first_layer->first_neuron) - 1;
346
+
347
+ if(ann->training_algorithm == FANN_TRAIN_RPROP)
348
+ prev_step = ann->rprop_delta_zero;
349
+ else
350
+ prev_step = 0;
351
+
352
+ for(i = first_con; i < last_con; i++)
353
+ {
354
+ if(i == bias_weight)
355
+ ann->weights[i] = fann_rand(-scale_factor, scale_factor);
356
+ else
357
+ ann->weights[i] = fann_rand(0,scale_factor);
358
+
359
+ ann->train_slopes[i] = 0;
360
+ ann->prev_steps[i] = prev_step;
361
+ ann->prev_train_slopes[i] = 0;
362
+ }
363
+ }
364
+
365
+ int fann_initialize_candidates(struct fann *ann)
366
+ {
367
+ /* The candidates are allocated after the normal neurons and connections,
368
+ * but there is an empty place between the real neurons and the candidate neurons,
369
+ * so that it will be possible to make room when the chosen candidate are copied in
370
+ * on the desired place.
371
+ */
372
+ unsigned int neurons_to_allocate, connections_to_allocate;
373
+ unsigned int num_candidates = fann_get_cascade_num_candidates(ann);
374
+ unsigned int num_neurons = ann->total_neurons + num_candidates + 1;
375
+ unsigned int num_hidden_neurons = ann->total_neurons - ann->num_input - ann->num_output;
376
+ unsigned int candidate_connections_in = ann->total_neurons - ann->num_output;
377
+ unsigned int candidate_connections_out = ann->num_output;
378
+
379
+ /* the number of connections going into a and out of a candidate is
380
+ * ann->total_neurons */
381
+ unsigned int num_connections =
382
+ ann->total_connections + (ann->total_neurons * (num_candidates + 1));
383
+ unsigned int first_candidate_connection = ann->total_connections + ann->total_neurons;
384
+ unsigned int first_candidate_neuron = ann->total_neurons + 1;
385
+ unsigned int connection_it, i, j, k, candidate_index;
386
+ struct fann_neuron *neurons;
387
+ float scale_factor;
388
+
389
+ /* First make sure that there is enough room, and if not then allocate a
390
+ * bit more so that we do not need to allocate more room each time.
391
+ */
392
+ if(num_neurons > ann->total_neurons_allocated)
393
+ {
394
+ /* Then we need to allocate more neurons
395
+ * Allocate half as many neurons as already exist (at least ten)
396
+ */
397
+ neurons_to_allocate = num_neurons + num_neurons / 2;
398
+ if(neurons_to_allocate < num_neurons + 10)
399
+ {
400
+ neurons_to_allocate = num_neurons + 10;
401
+ }
402
+
403
+ if(fann_reallocate_neurons(ann, neurons_to_allocate) == -1)
404
+ {
405
+ return -1;
406
+ }
407
+ }
408
+
409
+ if(num_connections > ann->total_connections_allocated)
410
+ {
411
+ /* Then we need to allocate more connections
412
+ * Allocate half as many connections as already exist
413
+ * (at least enough for ten neurons)
414
+ */
415
+ connections_to_allocate = num_connections + num_connections / 2;
416
+ if(connections_to_allocate < num_connections + ann->total_neurons * 10)
417
+ {
418
+ connections_to_allocate = num_connections + ann->total_neurons * 10;
419
+ }
420
+
421
+ if(fann_reallocate_connections(ann, connections_to_allocate) == -1)
422
+ {
423
+ return -1;
424
+ }
425
+ }
426
+
427
+ /* Some code to do semi Widrow + Nguyen initialization */
428
+ scale_factor = (float) (2.0 * pow(0.7f * (float)num_hidden_neurons, 1.0f / (float) ann->num_input));
429
+ if(scale_factor > 8)
430
+ scale_factor = 8;
431
+ else if(scale_factor < 0.5)
432
+ scale_factor = 0.5;
433
+
434
+ /* Set the neurons.
435
+ */
436
+ connection_it = first_candidate_connection;
437
+ neurons = ann->first_layer->first_neuron;
438
+ candidate_index = first_candidate_neuron;
439
+
440
+ for(i = 0; i < ann->cascade_activation_functions_count; i++)
441
+ {
442
+ for(j = 0; j < ann->cascade_activation_steepnesses_count; j++)
443
+ {
444
+ for(k = 0; k < ann->cascade_num_candidate_groups; k++)
445
+ {
446
+ /* TODO candidates should actually be created both in
447
+ * the last layer before the output layer, and in a new layer.
448
+ */
449
+ neurons[candidate_index].value = 0;
450
+ neurons[candidate_index].sum = 0;
451
+
452
+ neurons[candidate_index].activation_function =
453
+ ann->cascade_activation_functions[i];
454
+ neurons[candidate_index].activation_steepness =
455
+ ann->cascade_activation_steepnesses[j];
456
+
457
+ neurons[candidate_index].first_con = connection_it;
458
+ connection_it += candidate_connections_in;
459
+ neurons[candidate_index].last_con = connection_it;
460
+ /* We have no specific pointers to the output weights, but they are
461
+ * available after last_con */
462
+ connection_it += candidate_connections_out;
463
+ ann->train_errors[candidate_index] = 0;
464
+ initialize_candidate_weights(ann, neurons[candidate_index].first_con, neurons[candidate_index].last_con+candidate_connections_out, scale_factor);
465
+ candidate_index++;
466
+ }
467
+ }
468
+ }
469
+
470
+
471
+ /* Now randomize the weights and zero out the arrays that needs zeroing out.
472
+ */
473
+ /*
474
+ #ifdef CASCADE_DEBUG_FULL
475
+ printf("random cand weight [%d ... %d]\n", first_candidate_connection, num_connections - 1);
476
+ #endif
477
+
478
+ for(i = first_candidate_connection; i < num_connections; i++)
479
+ {
480
+
481
+ //ann->weights[i] = fann_random_weight();
482
+ ann->weights[i] = fann_rand(-2.0,2.0);
483
+ ann->train_slopes[i] = 0;
484
+ ann->prev_steps[i] = 0;
485
+ ann->prev_train_slopes[i] = initial_slope;
486
+ }
487
+ */
488
+
489
+ return 0;
490
+ }
491
+
492
+ int fann_train_candidates(struct fann *ann, struct fann_train_data *data)
493
+ {
494
+ fann_type best_cand_score = 0.0;
495
+ fann_type target_cand_score = 0.0;
496
+ fann_type backslide_cand_score = -1.0e20f;
497
+ unsigned int i;
498
+ unsigned int max_epochs = ann->cascade_max_cand_epochs;
499
+ unsigned int min_epochs = ann->cascade_min_cand_epochs;
500
+ unsigned int stagnation = max_epochs;
501
+
502
+ if(ann->cascade_candidate_scores == NULL)
503
+ {
504
+ ann->cascade_candidate_scores =
505
+ (fann_type *) malloc(fann_get_cascade_num_candidates(ann) * sizeof(fann_type));
506
+ if(ann->cascade_candidate_scores == NULL)
507
+ {
508
+ fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
509
+ return 0;
510
+ }
511
+ }
512
+
513
+ for(i = 0; i < max_epochs; i++)
514
+ {
515
+ best_cand_score = fann_train_candidates_epoch(ann, data);
516
+
517
+ if(best_cand_score / ann->MSE_value > ann->cascade_candidate_limit)
518
+ {
519
+ #ifdef CASCADE_DEBUG
520
+ printf("above candidate limit %f/%f > %f", best_cand_score, ann->MSE_value,
521
+ ann->cascade_candidate_limit);
522
+ #endif
523
+ return i + 1;
524
+ }
525
+
526
+ if((best_cand_score > target_cand_score) || (best_cand_score < backslide_cand_score))
527
+ {
528
+ #ifdef CASCADE_DEBUG_FULL
529
+ printf("Best candidate score %f, real score: %f\n", ann->MSE_value - best_cand_score,
530
+ best_cand_score);
531
+ /* printf("best_cand_score=%f, target_cand_score=%f, backslide_cand_score=%f, stagnation=%d\n", best_cand_score, target_cand_score, backslide_cand_score, stagnation); */
532
+ #endif
533
+
534
+ target_cand_score = best_cand_score * (1.0f + ann->cascade_candidate_change_fraction);
535
+ backslide_cand_score = best_cand_score * (1.0f - ann->cascade_candidate_change_fraction);
536
+ stagnation = i + ann->cascade_candidate_stagnation_epochs;
537
+ }
538
+
539
+ /* No improvement in allotted period, so quit */
540
+ if(i >= stagnation && i >= min_epochs)
541
+ {
542
+ #ifdef CASCADE_DEBUG
543
+ printf("Stagnation with %d epochs, best candidate score %f, real score: %f\n", i + 1,
544
+ ann->MSE_value - best_cand_score, best_cand_score);
545
+ #endif
546
+ return i + 1;
547
+ }
548
+ }
549
+
550
+ #ifdef CASCADE_DEBUG
551
+ printf("Max epochs %d reached, best candidate score %f, real score: %f\n", max_epochs,
552
+ ann->MSE_value - best_cand_score, best_cand_score);
553
+ #endif
554
+ return max_epochs;
555
+ }
556
+
557
+ void fann_update_candidate_slopes(struct fann *ann)
558
+ {
559
+ struct fann_neuron *neurons = ann->first_layer->first_neuron;
560
+ struct fann_neuron *first_cand = neurons + ann->total_neurons + 1;
561
+ struct fann_neuron *last_cand = first_cand + fann_get_cascade_num_candidates(ann);
562
+ struct fann_neuron *cand_it;
563
+ unsigned int i, j, num_connections;
564
+ unsigned int num_output = ann->num_output;
565
+ fann_type max_sum, cand_sum, activation, derived, error_value, diff, cand_score;
566
+ fann_type *weights, *cand_out_weights, *cand_slopes, *cand_out_slopes;
567
+ fann_type *output_train_errors = ann->train_errors + (ann->total_neurons - ann->num_output);
568
+
569
+ for(cand_it = first_cand; cand_it < last_cand; cand_it++)
570
+ {
571
+ cand_score = ann->cascade_candidate_scores[cand_it - first_cand];
572
+ error_value = 0.0;
573
+
574
+ /* code more or less stolen from fann_run to fast forward pass
575
+ */
576
+ cand_sum = 0.0;
577
+ num_connections = cand_it->last_con - cand_it->first_con;
578
+ weights = ann->weights + cand_it->first_con;
579
+
580
+ /* unrolled loop start */
581
+ i = num_connections & 3; /* same as modulo 4 */
582
+ switch (i)
583
+ {
584
+ case 3:
585
+ cand_sum += weights[2] * neurons[2].value;
586
+ case 2:
587
+ cand_sum += weights[1] * neurons[1].value;
588
+ case 1:
589
+ cand_sum += weights[0] * neurons[0].value;
590
+ case 0:
591
+ break;
592
+ }
593
+
594
+ for(; i != num_connections; i += 4)
595
+ {
596
+ cand_sum +=
597
+ weights[i] * neurons[i].value +
598
+ weights[i + 1] * neurons[i + 1].value +
599
+ weights[i + 2] * neurons[i + 2].value + weights[i + 3] * neurons[i + 3].value;
600
+ }
601
+ /*
602
+ * for(i = 0; i < num_connections; i++){
603
+ * cand_sum += weights[i] * neurons[i].value;
604
+ * }
605
+ */
606
+ /* unrolled loop end */
607
+
608
+ max_sum = 150/cand_it->activation_steepness;
609
+ if(cand_sum > max_sum)
610
+ cand_sum = max_sum;
611
+ else if(cand_sum < -max_sum)
612
+ cand_sum = -max_sum;
613
+
614
+ activation =
615
+ fann_activation(ann, cand_it->activation_function, cand_it->activation_steepness,
616
+ cand_sum);
617
+ /* printf("%f = sigmoid(%f);\n", activation, cand_sum); */
618
+
619
+ cand_it->sum = cand_sum;
620
+ cand_it->value = activation;
621
+
622
+ derived = fann_activation_derived(cand_it->activation_function,
623
+ cand_it->activation_steepness, activation, cand_sum);
624
+
625
+ /* The output weights is located right after the input weights in
626
+ * the weight array.
627
+ */
628
+ cand_out_weights = weights + num_connections;
629
+
630
+ cand_out_slopes = ann->train_slopes + cand_it->first_con + num_connections;
631
+ for(j = 0; j < num_output; j++)
632
+ {
633
+ diff = (activation * cand_out_weights[j]) - output_train_errors[j];
634
+ #ifdef CASCADE_DEBUG_FULL
635
+ /* printf("diff = %f = (%f * %f) - %f;\n", diff, activation, cand_out_weights[j], output_train_errors[j]); */
636
+ #endif
637
+ cand_out_slopes[j] -= 2.0f * diff * activation;
638
+ #ifdef CASCADE_DEBUG_FULL
639
+ /* printf("cand_out_slopes[%d] <= %f += %f * %f;\n", j, cand_out_slopes[j], diff, activation); */
640
+ #endif
641
+ error_value += diff * cand_out_weights[j];
642
+ cand_score -= (diff * diff);
643
+ #ifdef CASCADE_DEBUG_FULL
644
+ /* printf("cand_score[%d][%d] = %f -= (%f * %f)\n", cand_it - first_cand, j, cand_score, diff, diff); */
645
+
646
+ printf("cand[%d]: error=%f, activation=%f, diff=%f, slope=%f\n", cand_it - first_cand,
647
+ output_train_errors[j], (activation * cand_out_weights[j]), diff,
648
+ -2.0 * diff * activation);
649
+ #endif
650
+ }
651
+
652
+ ann->cascade_candidate_scores[cand_it - first_cand] = cand_score;
653
+ error_value *= derived;
654
+
655
+ cand_slopes = ann->train_slopes + cand_it->first_con;
656
+ for(i = 0; i < num_connections; i++)
657
+ {
658
+ cand_slopes[i] -= error_value * neurons[i].value;
659
+ }
660
+ }
661
+ }
662
+
663
+ void fann_update_candidate_weights(struct fann *ann, unsigned int num_data)
664
+ {
665
+ struct fann_neuron *first_cand = (ann->last_layer - 1)->last_neuron + 1; /* there is an empty neuron between the actual neurons and the candidate neuron */
666
+ struct fann_neuron *last_cand = first_cand + fann_get_cascade_num_candidates(ann) - 1;
667
+
668
+ switch (ann->training_algorithm)
669
+ {
670
+ case FANN_TRAIN_RPROP:
671
+ fann_update_weights_irpropm(ann, first_cand->first_con,
672
+ last_cand->last_con + ann->num_output);
673
+ break;
674
+ case FANN_TRAIN_SARPROP:
675
+ /* TODO: increase epoch? */
676
+ fann_update_weights_sarprop(ann, ann->sarprop_epoch, first_cand->first_con,
677
+ last_cand->last_con + ann->num_output);
678
+ break;
679
+ case FANN_TRAIN_QUICKPROP:
680
+ fann_update_weights_quickprop(ann, num_data, first_cand->first_con,
681
+ last_cand->last_con + ann->num_output);
682
+ break;
683
+ case FANN_TRAIN_BATCH:
684
+ case FANN_TRAIN_INCREMENTAL:
685
+ fann_error((struct fann_error *) ann, FANN_E_CANT_USE_TRAIN_ALG);
686
+ break;
687
+ }
688
+ }
689
+
690
+ fann_type fann_train_candidates_epoch(struct fann *ann, struct fann_train_data *data)
691
+ {
692
+ unsigned int i, j;
693
+ unsigned int best_candidate;
694
+ fann_type best_score;
695
+ unsigned int num_cand = fann_get_cascade_num_candidates(ann);
696
+ fann_type *output_train_errors = ann->train_errors + (ann->total_neurons - ann->num_output);
697
+ struct fann_neuron *output_neurons = (ann->last_layer - 1)->first_neuron;
698
+
699
+ for(i = 0; i < num_cand; i++)
700
+ {
701
+ /* The ann->MSE_value is actually the sum squared error */
702
+ ann->cascade_candidate_scores[i] = ann->MSE_value;
703
+ }
704
+ /*printf("start score: %f\n", ann->MSE_value); */
705
+
706
+ for(i = 0; i < data->num_data; i++)
707
+ {
708
+ fann_run(ann, data->input[i]);
709
+
710
+ for(j = 0; j < ann->num_output; j++)
711
+ {
712
+ /* TODO only debug, but the error is in opposite direction, this might be usefull info */
713
+ /* if(output_train_errors[j] != (ann->output[j] - data->output[i][j])){
714
+ * printf("difference in calculated error at %f != %f; %f = %f - %f;\n", output_train_errors[j], (ann->output[j] - data->output[i][j]), output_train_errors[j], ann->output[j], data->output[i][j]);
715
+ * } */
716
+
717
+ /*
718
+ * output_train_errors[j] = (data->output[i][j] - ann->output[j])/2;
719
+ * output_train_errors[j] = ann->output[j] - data->output[i][j];
720
+ */
721
+
722
+ output_train_errors[j] = (data->output[i][j] - ann->output[j]);
723
+
724
+ switch (output_neurons[j].activation_function)
725
+ {
726
+ case FANN_LINEAR_PIECE_SYMMETRIC:
727
+ case FANN_SIGMOID_SYMMETRIC:
728
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
729
+ case FANN_THRESHOLD_SYMMETRIC:
730
+ case FANN_ELLIOT_SYMMETRIC:
731
+ case FANN_GAUSSIAN_SYMMETRIC:
732
+ case FANN_SIN_SYMMETRIC:
733
+ case FANN_COS_SYMMETRIC:
734
+ output_train_errors[j] /= 2.0;
735
+ break;
736
+ case FANN_LINEAR:
737
+ case FANN_THRESHOLD:
738
+ case FANN_SIGMOID:
739
+ case FANN_SIGMOID_STEPWISE:
740
+ case FANN_GAUSSIAN:
741
+ case FANN_GAUSSIAN_STEPWISE:
742
+ case FANN_ELLIOT:
743
+ case FANN_LINEAR_PIECE:
744
+ case FANN_SIN:
745
+ case FANN_COS:
746
+ break;
747
+ }
748
+ }
749
+
750
+ fann_update_candidate_slopes(ann);
751
+ }
752
+
753
+ fann_update_candidate_weights(ann, data->num_data);
754
+
755
+ /* find the best candidate score */
756
+ best_candidate = 0;
757
+ best_score = ann->cascade_candidate_scores[best_candidate];
758
+ for(i = 1; i < num_cand; i++)
759
+ {
760
+ /*struct fann_neuron *cand = ann->first_layer->first_neuron + ann->total_neurons + 1 + i;
761
+ * printf("candidate[%d] = activation: %s, steepness: %f, score: %f\n",
762
+ * i, FANN_ACTIVATIONFUNC_NAMES[cand->activation_function],
763
+ * cand->activation_steepness, ann->cascade_candidate_scores[i]); */
764
+
765
+ if(ann->cascade_candidate_scores[i] > best_score)
766
+ {
767
+ best_candidate = i;
768
+ best_score = ann->cascade_candidate_scores[best_candidate];
769
+ }
770
+ }
771
+
772
+ ann->cascade_best_candidate = ann->total_neurons + best_candidate + 1;
773
+ #ifdef CASCADE_DEBUG
774
+ printf("Best candidate[%d]: with score %f, real score: %f\n", best_candidate,
775
+ ann->MSE_value - best_score, best_score);
776
+ #endif
777
+
778
+ return best_score;
779
+ }
780
+
781
+ /* add a layer ad the position pointed to by *layer */
782
+ struct fann_layer *fann_add_layer(struct fann *ann, struct fann_layer *layer)
783
+ {
784
+ int layer_pos = layer - ann->first_layer;
785
+ int num_layers = ann->last_layer - ann->first_layer + 1;
786
+ int i;
787
+
788
+ /* allocate the layer */
789
+ struct fann_layer *layers =
790
+ (struct fann_layer *) realloc(ann->first_layer, num_layers * sizeof(struct fann_layer));
791
+ if(layers == NULL)
792
+ {
793
+ fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
794
+ return NULL;
795
+ }
796
+
797
+ /* copy layers so that the free space is at the right location */
798
+ for(i = num_layers - 1; i >= layer_pos; i--)
799
+ {
800
+ layers[i] = layers[i - 1];
801
+ }
802
+
803
+ /* the newly allocated layer is empty */
804
+ layers[layer_pos].first_neuron = layers[layer_pos + 1].first_neuron;
805
+ layers[layer_pos].last_neuron = layers[layer_pos + 1].first_neuron;
806
+
807
+ /* Set the ann pointers correctly */
808
+ ann->first_layer = layers;
809
+ ann->last_layer = layers + num_layers;
810
+
811
+ #ifdef CASCADE_DEBUG_FULL
812
+ printf("add layer at pos %d\n", layer_pos);
813
+ #endif
814
+
815
+ return layers + layer_pos;
816
+ }
817
+
818
+ void fann_set_shortcut_connections(struct fann *ann)
819
+ {
820
+ struct fann_layer *layer_it;
821
+ struct fann_neuron *neuron_it, **neuron_pointers, *neurons;
822
+ unsigned int num_connections = 0, i;
823
+
824
+ neuron_pointers = ann->connections;
825
+ neurons = ann->first_layer->first_neuron;
826
+
827
+ for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer; layer_it++)
828
+ {
829
+ for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++)
830
+ {
831
+
832
+ neuron_pointers += num_connections;
833
+ num_connections = neuron_it->last_con - neuron_it->first_con;
834
+
835
+ for(i = 0; i != num_connections; i++)
836
+ {
837
+ neuron_pointers[i] = neurons + i;
838
+ }
839
+ }
840
+ }
841
+ }
842
+
843
+ void fann_add_candidate_neuron(struct fann *ann, struct fann_layer *layer)
844
+ {
845
+ unsigned int num_connections_in = layer->first_neuron - ann->first_layer->first_neuron;
846
+ unsigned int num_connections_out =
847
+ (ann->last_layer - 1)->last_neuron - (layer + 1)->first_neuron;
848
+ unsigned int num_connections_move = num_connections_out + num_connections_in;
849
+
850
+ unsigned int candidate_con, candidate_output_weight;
851
+ int i;
852
+
853
+ struct fann_layer *layer_it;
854
+ struct fann_neuron *neuron_it, *neuron_place, *candidate;
855
+
856
+ /* We know that there is enough room for the new neuron
857
+ * (the candidates are in the same arrays), so move
858
+ * the last neurons to make room for this neuron.
859
+ */
860
+
861
+ /* first move the pointers to neurons in the layer structs */
862
+ for(layer_it = ann->last_layer - 1; layer_it != layer; layer_it--)
863
+ {
864
+ #ifdef CASCADE_DEBUG_FULL
865
+ printf("move neuron pointers in layer %d, first(%d -> %d), last(%d -> %d)\n",
866
+ layer_it - ann->first_layer,
867
+ layer_it->first_neuron - ann->first_layer->first_neuron,
868
+ layer_it->first_neuron - ann->first_layer->first_neuron + 1,
869
+ layer_it->last_neuron - ann->first_layer->first_neuron,
870
+ layer_it->last_neuron - ann->first_layer->first_neuron + 1);
871
+ #endif
872
+ layer_it->first_neuron++;
873
+ layer_it->last_neuron++;
874
+ }
875
+
876
+ /* also move the last neuron in the layer that needs the neuron added */
877
+ layer->last_neuron++;
878
+
879
+ /* this is the place that should hold the new neuron */
880
+ neuron_place = layer->last_neuron - 1;
881
+
882
+ #ifdef CASCADE_DEBUG_FULL
883
+ printf("num_connections_in=%d, num_connections_out=%d\n", num_connections_in,
884
+ num_connections_out);
885
+ #endif
886
+
887
+ candidate = ann->first_layer->first_neuron + ann->cascade_best_candidate;
888
+
889
+ /* the output weights for the candidates are located after the input weights */
890
+ candidate_output_weight = candidate->last_con;
891
+
892
+ /* move the actual output neurons and the indexes to the connection arrays */
893
+ for(neuron_it = (ann->last_layer - 1)->last_neuron - 1; neuron_it != neuron_place; neuron_it--)
894
+ {
895
+ #ifdef CASCADE_DEBUG_FULL
896
+ printf("move neuron %d -> %d\n", neuron_it - ann->first_layer->first_neuron - 1,
897
+ neuron_it - ann->first_layer->first_neuron);
898
+ #endif
899
+ *neuron_it = *(neuron_it - 1);
900
+
901
+ /* move the weights */
902
+ #ifdef CASCADE_DEBUG_FULL
903
+ printf("move weight[%d ... %d] -> weight[%d ... %d]\n", neuron_it->first_con,
904
+ neuron_it->last_con - 1, neuron_it->first_con + num_connections_move - 1,
905
+ neuron_it->last_con + num_connections_move - 2);
906
+ #endif
907
+ for(i = neuron_it->last_con - 1; i >= (int)neuron_it->first_con; i--)
908
+ {
909
+ #ifdef CASCADE_DEBUG_FULL
910
+ printf("move weight[%d] = weight[%d]\n", i + num_connections_move - 1, i);
911
+ #endif
912
+ ann->weights[i + num_connections_move - 1] = ann->weights[i];
913
+ }
914
+
915
+ /* move the indexes to weights */
916
+ neuron_it->last_con += num_connections_move;
917
+ num_connections_move--;
918
+ neuron_it->first_con += num_connections_move;
919
+
920
+ /* set the new weight to the newly allocated neuron */
921
+ ann->weights[neuron_it->last_con - 1] =
922
+ (ann->weights[candidate_output_weight]) * ann->cascade_weight_multiplier;
923
+ candidate_output_weight++;
924
+ }
925
+
926
+ /* Now inititalize the actual neuron */
927
+ neuron_place->value = 0;
928
+ neuron_place->sum = 0;
929
+ neuron_place->activation_function = candidate->activation_function;
930
+ neuron_place->activation_steepness = candidate->activation_steepness;
931
+ neuron_place->last_con = (neuron_place + 1)->first_con;
932
+ neuron_place->first_con = neuron_place->last_con - num_connections_in;
933
+ #ifdef CASCADE_DEBUG_FULL
934
+ printf("neuron[%d] = weights[%d ... %d] activation: %s, steepness: %f\n",
935
+ neuron_place - ann->first_layer->first_neuron, neuron_place->first_con,
936
+ neuron_place->last_con - 1, FANN_ACTIVATIONFUNC_NAMES[neuron_place->activation_function],
937
+ neuron_place->activation_steepness);/* TODO remove */
938
+ #endif
939
+
940
+ candidate_con = candidate->first_con;
941
+ /* initialize the input weights at random */
942
+ #ifdef CASCADE_DEBUG_FULL
943
+ printf("move cand weights[%d ... %d] -> [%d ... %d]\n", candidate_con,
944
+ candidate_con + num_connections_in - 1, neuron_place->first_con,
945
+ neuron_place->last_con - 1);
946
+ #endif
947
+
948
+ for(i = 0; i < (int)num_connections_in; i++)
949
+ {
950
+ ann->weights[i + neuron_place->first_con] = ann->weights[i + candidate_con];
951
+ #ifdef CASCADE_DEBUG_FULL
952
+ printf("move weights[%d] -> weights[%d] (%f)\n", i + candidate_con,
953
+ i + neuron_place->first_con, ann->weights[i + neuron_place->first_con]);
954
+ #endif
955
+ }
956
+
957
+ /* Change some of main variables */
958
+ ann->total_neurons++;
959
+ ann->total_connections += num_connections_in + num_connections_out;
960
+
961
+ return;
962
+ }
963
+
964
+ void fann_install_candidate(struct fann *ann)
965
+ {
966
+ struct fann_layer *layer;
967
+
968
+ layer = fann_add_layer(ann, ann->last_layer - 1);
969
+ fann_add_candidate_neuron(ann, layer);
970
+ return;
971
+ }
972
+
973
+ #endif /* FIXEDFANN */
974
+
975
+ FANN_EXTERNAL unsigned int FANN_API fann_get_cascade_num_candidates(struct fann *ann)
976
+ {
977
+ return ann->cascade_activation_functions_count *
978
+ ann->cascade_activation_steepnesses_count *
979
+ ann->cascade_num_candidate_groups;
980
+ }
981
+
982
+ FANN_GET_SET(float, cascade_output_change_fraction)
983
+ FANN_GET_SET(unsigned int, cascade_output_stagnation_epochs)
984
+ FANN_GET_SET(float, cascade_candidate_change_fraction)
985
+ FANN_GET_SET(unsigned int, cascade_candidate_stagnation_epochs)
986
+ FANN_GET_SET(unsigned int, cascade_num_candidate_groups)
987
+ FANN_GET_SET(fann_type, cascade_weight_multiplier)
988
+ FANN_GET_SET(fann_type, cascade_candidate_limit)
989
+ FANN_GET_SET(unsigned int, cascade_max_out_epochs)
990
+ FANN_GET_SET(unsigned int, cascade_max_cand_epochs)
991
+ FANN_GET_SET(unsigned int, cascade_min_out_epochs)
992
+ FANN_GET_SET(unsigned int, cascade_min_cand_epochs)
993
+
994
+ FANN_GET(unsigned int, cascade_activation_functions_count)
995
+ FANN_GET(enum fann_activationfunc_enum *, cascade_activation_functions)
996
+
997
+ FANN_EXTERNAL void FANN_API fann_set_cascade_activation_functions(struct fann *ann,
998
+ enum fann_activationfunc_enum *
999
+ cascade_activation_functions,
1000
+ unsigned int
1001
+ cascade_activation_functions_count)
1002
+ {
1003
+ if(ann->cascade_activation_functions_count != cascade_activation_functions_count)
1004
+ {
1005
+ ann->cascade_activation_functions_count = cascade_activation_functions_count;
1006
+
1007
+ /* reallocate mem */
1008
+ ann->cascade_activation_functions =
1009
+ (enum fann_activationfunc_enum *)realloc(ann->cascade_activation_functions,
1010
+ ann->cascade_activation_functions_count * sizeof(enum fann_activationfunc_enum));
1011
+ if(ann->cascade_activation_functions == NULL)
1012
+ {
1013
+ fann_error((struct fann_error*)ann, FANN_E_CANT_ALLOCATE_MEM);
1014
+ return;
1015
+ }
1016
+ }
1017
+
1018
+ memmove(ann->cascade_activation_functions, cascade_activation_functions,
1019
+ ann->cascade_activation_functions_count * sizeof(enum fann_activationfunc_enum));
1020
+ }
1021
+
1022
+ FANN_GET(unsigned int, cascade_activation_steepnesses_count)
1023
+ FANN_GET(fann_type *, cascade_activation_steepnesses)
1024
+
1025
+ FANN_EXTERNAL void FANN_API fann_set_cascade_activation_steepnesses(struct fann *ann,
1026
+ fann_type *
1027
+ cascade_activation_steepnesses,
1028
+ unsigned int
1029
+ cascade_activation_steepnesses_count)
1030
+ {
1031
+ if(ann->cascade_activation_steepnesses_count != cascade_activation_steepnesses_count)
1032
+ {
1033
+ ann->cascade_activation_steepnesses_count = cascade_activation_steepnesses_count;
1034
+
1035
+ /* reallocate mem */
1036
+ ann->cascade_activation_steepnesses =
1037
+ (fann_type *)realloc(ann->cascade_activation_steepnesses,
1038
+ ann->cascade_activation_steepnesses_count * sizeof(fann_type));
1039
+ if(ann->cascade_activation_steepnesses == NULL)
1040
+ {
1041
+ fann_error((struct fann_error*)ann, FANN_E_CANT_ALLOCATE_MEM);
1042
+ return;
1043
+ }
1044
+ }
1045
+
1046
+ memmove(ann->cascade_activation_steepnesses, cascade_activation_steepnesses,
1047
+ ann->cascade_activation_steepnesses_count * sizeof(fann_type));
1048
+ }