ruby-fann 0.7.10 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/History.txt +6 -1
- data/License.txt +1 -1
- data/Manifest.txt +22 -1
- data/README.txt +0 -1
- data/Rakefile +0 -0
- data/config/hoe.rb +0 -0
- data/config/requirements.rb +0 -0
- data/ext/ruby_fann/MANIFEST +0 -0
- data/ext/ruby_fann/Makefile +36 -28
- data/ext/ruby_fann/doublefann.c +30 -0
- data/ext/ruby_fann/doublefann.h +33 -0
- data/ext/ruby_fann/extconf.rb +9 -5
- data/ext/ruby_fann/fann.c +1552 -0
- data/ext/ruby_fann/fann_activation.h +144 -0
- data/ext/ruby_fann/fann_augment.h +0 -0
- data/ext/ruby_fann/fann_cascade.c +1031 -0
- data/ext/ruby_fann/fann_cascade.h +503 -0
- data/ext/ruby_fann/fann_data.h +799 -0
- data/ext/ruby_fann/fann_error.c +204 -0
- data/ext/ruby_fann/fann_error.h +161 -0
- data/ext/ruby_fann/fann_internal.h +148 -0
- data/ext/ruby_fann/fann_io.c +762 -0
- data/ext/ruby_fann/fann_io.h +100 -0
- data/ext/ruby_fann/fann_train.c +962 -0
- data/ext/ruby_fann/fann_train.h +1203 -0
- data/ext/ruby_fann/fann_train_data.c +1231 -0
- data/ext/ruby_fann/neural_network.c +0 -0
- data/lib/ruby_fann/neurotica.rb +0 -0
- data/lib/ruby_fann/version.rb +3 -3
- data/lib/ruby_fann.rb +0 -0
- data/neurotica1.png +0 -0
- data/neurotica2.vrml +18 -18
- data/setup.rb +0 -0
- data/tasks/deployment.rake +0 -0
- data/tasks/environment.rake +0 -0
- data/tasks/website.rake +0 -0
- data/test/test.train +0 -0
- data/test/test_helper.rb +0 -0
- data/test/test_neurotica.rb +0 -0
- data/test/test_ruby_fann.rb +0 -0
- data/test/test_ruby_fann_functional.rb +0 -0
- data/verify.train +0 -0
- data/website/index.html +42 -92
- data/website/index.txt +0 -0
- data/website/javascripts/rounded_corners_lite.inc.js +0 -0
- data/website/stylesheets/screen.css +0 -0
- data/website/template.rhtml +0 -0
- data/xor.train +0 -0
- data/xor_cascade.net +2 -2
- data/xor_float.net +1 -1
- metadata +22 -6
- data/log/debug.log +0 -0
@@ -0,0 +1,962 @@
|
|
1
|
+
/*
|
2
|
+
Fast Artificial Neural Network Library (fann)
|
3
|
+
Copyright (C) 2003 Steffen Nissen (lukesky@diku.dk)
|
4
|
+
|
5
|
+
This library is free software; you can redistribute it and/or
|
6
|
+
modify it under the terms of the GNU Lesser General Public
|
7
|
+
License as published by the Free Software Foundation; either
|
8
|
+
version 2.1 of the License, or (at your option) any later version.
|
9
|
+
|
10
|
+
This library is distributed in the hope that it will be useful,
|
11
|
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
12
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
13
|
+
Lesser General Public License for more details.
|
14
|
+
|
15
|
+
You should have received a copy of the GNU Lesser General Public
|
16
|
+
License along with this library; if not, write to the Free Software
|
17
|
+
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
18
|
+
*/
|
19
|
+
|
20
|
+
#include <stdio.h>
|
21
|
+
#include <stdlib.h>
|
22
|
+
#include <stdarg.h>
|
23
|
+
#include <string.h>
|
24
|
+
|
25
|
+
#include "config.h"
|
26
|
+
#include "fann.h"
|
27
|
+
|
28
|
+
/*#define DEBUGTRAIN*/
|
29
|
+
|
30
|
+
#ifndef FIXEDFANN
|
31
|
+
/* INTERNAL FUNCTION
|
32
|
+
Calculates the derived of a value, given an activation function
|
33
|
+
and a steepness
|
34
|
+
*/
|
35
|
+
fann_type fann_activation_derived(unsigned int activation_function,
|
36
|
+
fann_type steepness, fann_type value, fann_type sum)
|
37
|
+
{
|
38
|
+
switch (activation_function)
|
39
|
+
{
|
40
|
+
case FANN_LINEAR:
|
41
|
+
case FANN_LINEAR_PIECE:
|
42
|
+
case FANN_LINEAR_PIECE_SYMMETRIC:
|
43
|
+
return (fann_type) fann_linear_derive(steepness, value);
|
44
|
+
case FANN_SIGMOID:
|
45
|
+
case FANN_SIGMOID_STEPWISE:
|
46
|
+
value = fann_clip(value, 0.01f, 0.99f);
|
47
|
+
return (fann_type) fann_sigmoid_derive(steepness, value);
|
48
|
+
case FANN_SIGMOID_SYMMETRIC:
|
49
|
+
case FANN_SIGMOID_SYMMETRIC_STEPWISE:
|
50
|
+
value = fann_clip(value, -0.98f, 0.98f);
|
51
|
+
return (fann_type) fann_sigmoid_symmetric_derive(steepness, value);
|
52
|
+
case FANN_GAUSSIAN:
|
53
|
+
/* value = fann_clip(value, 0.01f, 0.99f); */
|
54
|
+
return (fann_type) fann_gaussian_derive(steepness, value, sum);
|
55
|
+
case FANN_GAUSSIAN_SYMMETRIC:
|
56
|
+
/* value = fann_clip(value, -0.98f, 0.98f); */
|
57
|
+
return (fann_type) fann_gaussian_symmetric_derive(steepness, value, sum);
|
58
|
+
case FANN_ELLIOT:
|
59
|
+
value = fann_clip(value, 0.01f, 0.99f);
|
60
|
+
return (fann_type) fann_elliot_derive(steepness, value, sum);
|
61
|
+
case FANN_ELLIOT_SYMMETRIC:
|
62
|
+
value = fann_clip(value, -0.98f, 0.98f);
|
63
|
+
return (fann_type) fann_elliot_symmetric_derive(steepness, value, sum);
|
64
|
+
case FANN_SIN_SYMMETRIC:
|
65
|
+
return (fann_type) fann_sin_symmetric_derive(steepness, sum);
|
66
|
+
case FANN_COS_SYMMETRIC:
|
67
|
+
return (fann_type) fann_cos_symmetric_derive(steepness, sum);
|
68
|
+
case FANN_SIN:
|
69
|
+
return (fann_type) fann_sin_derive(steepness, sum);
|
70
|
+
case FANN_COS:
|
71
|
+
return (fann_type) fann_cos_derive(steepness, sum);
|
72
|
+
case FANN_THRESHOLD:
|
73
|
+
fann_error(NULL, FANN_E_CANT_TRAIN_ACTIVATION);
|
74
|
+
}
|
75
|
+
return 0;
|
76
|
+
}
|
77
|
+
|
78
|
+
/* INTERNAL FUNCTION
|
79
|
+
Calculates the activation of a value, given an activation function
|
80
|
+
and a steepness
|
81
|
+
*/
|
82
|
+
fann_type fann_activation(struct fann * ann, unsigned int activation_function, fann_type steepness,
|
83
|
+
fann_type value)
|
84
|
+
{
|
85
|
+
value = fann_mult(steepness, value);
|
86
|
+
fann_activation_switch(activation_function, value, value);
|
87
|
+
return value;
|
88
|
+
}
|
89
|
+
|
90
|
+
/* Trains the network with the backpropagation algorithm.
|
91
|
+
*/
|
92
|
+
FANN_EXTERNAL void FANN_API fann_train(struct fann *ann, fann_type * input,
|
93
|
+
fann_type * desired_output)
|
94
|
+
{
|
95
|
+
fann_run(ann, input);
|
96
|
+
|
97
|
+
fann_compute_MSE(ann, desired_output);
|
98
|
+
|
99
|
+
fann_backpropagate_MSE(ann);
|
100
|
+
|
101
|
+
fann_update_weights(ann);
|
102
|
+
}
|
103
|
+
#endif
|
104
|
+
|
105
|
+
|
106
|
+
/* INTERNAL FUNCTION
|
107
|
+
Helper function to update the MSE value and return a diff which takes symmetric functions into account
|
108
|
+
*/
|
109
|
+
fann_type fann_update_MSE(struct fann *ann, struct fann_neuron* neuron, fann_type neuron_diff)
|
110
|
+
{
|
111
|
+
float neuron_diff2;
|
112
|
+
|
113
|
+
switch (neuron->activation_function)
|
114
|
+
{
|
115
|
+
case FANN_LINEAR_PIECE_SYMMETRIC:
|
116
|
+
case FANN_THRESHOLD_SYMMETRIC:
|
117
|
+
case FANN_SIGMOID_SYMMETRIC:
|
118
|
+
case FANN_SIGMOID_SYMMETRIC_STEPWISE:
|
119
|
+
case FANN_ELLIOT_SYMMETRIC:
|
120
|
+
case FANN_GAUSSIAN_SYMMETRIC:
|
121
|
+
case FANN_SIN_SYMMETRIC:
|
122
|
+
case FANN_COS_SYMMETRIC:
|
123
|
+
neuron_diff /= (fann_type)2.0;
|
124
|
+
break;
|
125
|
+
case FANN_THRESHOLD:
|
126
|
+
case FANN_LINEAR:
|
127
|
+
case FANN_SIGMOID:
|
128
|
+
case FANN_SIGMOID_STEPWISE:
|
129
|
+
case FANN_GAUSSIAN:
|
130
|
+
case FANN_GAUSSIAN_STEPWISE:
|
131
|
+
case FANN_ELLIOT:
|
132
|
+
case FANN_LINEAR_PIECE:
|
133
|
+
case FANN_SIN:
|
134
|
+
case FANN_COS:
|
135
|
+
break;
|
136
|
+
}
|
137
|
+
|
138
|
+
#ifdef FIXEDFANN
|
139
|
+
neuron_diff2 =
|
140
|
+
(neuron_diff / (float) ann->multiplier) * (neuron_diff / (float) ann->multiplier);
|
141
|
+
#else
|
142
|
+
neuron_diff2 = (float) (neuron_diff * neuron_diff);
|
143
|
+
#endif
|
144
|
+
|
145
|
+
ann->MSE_value += neuron_diff2;
|
146
|
+
|
147
|
+
/*printf("neuron_diff %f = (%f - %f)[/2], neuron_diff2=%f, sum=%f, MSE_value=%f, num_MSE=%d\n", neuron_diff, *desired_output, neuron_value, neuron_diff2, last_layer_begin->sum, ann->MSE_value, ann->num_MSE); */
|
148
|
+
if(fann_abs(neuron_diff) >= ann->bit_fail_limit)
|
149
|
+
{
|
150
|
+
ann->num_bit_fail++;
|
151
|
+
}
|
152
|
+
|
153
|
+
return neuron_diff;
|
154
|
+
}
|
155
|
+
|
156
|
+
/* Tests the network.
|
157
|
+
*/
|
158
|
+
FANN_EXTERNAL fann_type *FANN_API fann_test(struct fann *ann, fann_type * input,
|
159
|
+
fann_type * desired_output)
|
160
|
+
{
|
161
|
+
fann_type neuron_value;
|
162
|
+
fann_type *output_begin = fann_run(ann, input);
|
163
|
+
fann_type *output_it;
|
164
|
+
const fann_type *output_end = output_begin + ann->num_output;
|
165
|
+
fann_type neuron_diff;
|
166
|
+
struct fann_neuron *output_neuron = (ann->last_layer - 1)->first_neuron;
|
167
|
+
|
168
|
+
/* calculate the error */
|
169
|
+
for(output_it = output_begin; output_it != output_end; output_it++)
|
170
|
+
{
|
171
|
+
neuron_value = *output_it;
|
172
|
+
|
173
|
+
neuron_diff = (*desired_output - neuron_value);
|
174
|
+
|
175
|
+
neuron_diff = fann_update_MSE(ann, output_neuron, neuron_diff);
|
176
|
+
|
177
|
+
desired_output++;
|
178
|
+
output_neuron++;
|
179
|
+
|
180
|
+
ann->num_MSE++;
|
181
|
+
}
|
182
|
+
|
183
|
+
return output_begin;
|
184
|
+
}
|
185
|
+
|
186
|
+
/* get the mean square error.
|
187
|
+
*/
|
188
|
+
FANN_EXTERNAL float FANN_API fann_get_MSE(struct fann *ann)
|
189
|
+
{
|
190
|
+
if(ann->num_MSE)
|
191
|
+
{
|
192
|
+
return ann->MSE_value / (float) ann->num_MSE;
|
193
|
+
}
|
194
|
+
else
|
195
|
+
{
|
196
|
+
return 0;
|
197
|
+
}
|
198
|
+
}
|
199
|
+
|
200
|
+
FANN_EXTERNAL unsigned int FANN_API fann_get_bit_fail(struct fann *ann)
|
201
|
+
{
|
202
|
+
return ann->num_bit_fail;
|
203
|
+
}
|
204
|
+
|
205
|
+
/* reset the mean square error.
|
206
|
+
*/
|
207
|
+
FANN_EXTERNAL void FANN_API fann_reset_MSE(struct fann *ann)
|
208
|
+
{
|
209
|
+
ann->num_MSE = 0;
|
210
|
+
ann->MSE_value = 0;
|
211
|
+
ann->num_bit_fail = 0;
|
212
|
+
}
|
213
|
+
|
214
|
+
#ifndef FIXEDFANN
|
215
|
+
|
216
|
+
/* INTERNAL FUNCTION
|
217
|
+
compute the error at the network output
|
218
|
+
(usually, after forward propagation of a certain input vector, fann_run)
|
219
|
+
the error is a sum of squares for all the output units
|
220
|
+
also increments a counter because MSE is an average of such errors
|
221
|
+
|
222
|
+
After this train_errors in the output layer will be set to:
|
223
|
+
neuron_value_derived * (desired_output - neuron_value)
|
224
|
+
*/
|
225
|
+
void fann_compute_MSE(struct fann *ann, fann_type * desired_output)
|
226
|
+
{
|
227
|
+
fann_type neuron_value, neuron_diff, *error_it = 0, *error_begin = 0;
|
228
|
+
struct fann_neuron *last_layer_begin = (ann->last_layer - 1)->first_neuron;
|
229
|
+
const struct fann_neuron *last_layer_end = last_layer_begin + ann->num_output;
|
230
|
+
const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
|
231
|
+
|
232
|
+
/* if no room allocated for the error variabels, allocate it now */
|
233
|
+
if(ann->train_errors == NULL)
|
234
|
+
{
|
235
|
+
ann->train_errors = (fann_type *) calloc(ann->total_neurons, sizeof(fann_type));
|
236
|
+
if(ann->train_errors == NULL)
|
237
|
+
{
|
238
|
+
fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
|
239
|
+
return;
|
240
|
+
}
|
241
|
+
}
|
242
|
+
else
|
243
|
+
{
|
244
|
+
/* clear the error variabels */
|
245
|
+
memset(ann->train_errors, 0, (ann->total_neurons) * sizeof(fann_type));
|
246
|
+
}
|
247
|
+
error_begin = ann->train_errors;
|
248
|
+
|
249
|
+
#ifdef DEBUGTRAIN
|
250
|
+
printf("\ncalculate errors\n");
|
251
|
+
#endif
|
252
|
+
/* calculate the error and place it in the output layer */
|
253
|
+
error_it = error_begin + (last_layer_begin - first_neuron);
|
254
|
+
|
255
|
+
for(; last_layer_begin != last_layer_end; last_layer_begin++)
|
256
|
+
{
|
257
|
+
neuron_value = last_layer_begin->value;
|
258
|
+
neuron_diff = *desired_output - neuron_value;
|
259
|
+
|
260
|
+
neuron_diff = fann_update_MSE(ann, last_layer_begin, neuron_diff);
|
261
|
+
|
262
|
+
if(ann->train_error_function)
|
263
|
+
{ /* TODO make switch when more functions */
|
264
|
+
if(neuron_diff < -.9999999)
|
265
|
+
neuron_diff = -17.0;
|
266
|
+
else if(neuron_diff > .9999999)
|
267
|
+
neuron_diff = 17.0;
|
268
|
+
else
|
269
|
+
neuron_diff = (fann_type) log((1.0 + neuron_diff) / (1.0 - neuron_diff));
|
270
|
+
}
|
271
|
+
|
272
|
+
*error_it = fann_activation_derived(last_layer_begin->activation_function,
|
273
|
+
last_layer_begin->activation_steepness, neuron_value,
|
274
|
+
last_layer_begin->sum) * neuron_diff;
|
275
|
+
|
276
|
+
desired_output++;
|
277
|
+
error_it++;
|
278
|
+
|
279
|
+
ann->num_MSE++;
|
280
|
+
}
|
281
|
+
}
|
282
|
+
|
283
|
+
/* INTERNAL FUNCTION
|
284
|
+
Propagate the error backwards from the output layer.
|
285
|
+
|
286
|
+
After this the train_errors in the hidden layers will be:
|
287
|
+
neuron_value_derived * sum(outgoing_weights * connected_neuron)
|
288
|
+
*/
|
289
|
+
void fann_backpropagate_MSE(struct fann *ann)
|
290
|
+
{
|
291
|
+
fann_type tmp_error;
|
292
|
+
unsigned int i;
|
293
|
+
struct fann_layer *layer_it;
|
294
|
+
struct fann_neuron *neuron_it, *last_neuron;
|
295
|
+
struct fann_neuron **connections;
|
296
|
+
|
297
|
+
fann_type *error_begin = ann->train_errors;
|
298
|
+
fann_type *error_prev_layer;
|
299
|
+
fann_type *weights;
|
300
|
+
const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
|
301
|
+
const struct fann_layer *second_layer = ann->first_layer + 1;
|
302
|
+
struct fann_layer *last_layer = ann->last_layer;
|
303
|
+
|
304
|
+
/* go through all the layers, from last to first.
|
305
|
+
* And propagate the error backwards */
|
306
|
+
for(layer_it = last_layer - 1; layer_it > second_layer; --layer_it)
|
307
|
+
{
|
308
|
+
last_neuron = layer_it->last_neuron;
|
309
|
+
|
310
|
+
/* for each connection in this layer, propagate the error backwards */
|
311
|
+
if(ann->connection_rate >= 1)
|
312
|
+
{
|
313
|
+
if(ann->network_type == FANN_NETTYPE_LAYER)
|
314
|
+
{
|
315
|
+
error_prev_layer = error_begin + ((layer_it - 1)->first_neuron - first_neuron);
|
316
|
+
}
|
317
|
+
else
|
318
|
+
{
|
319
|
+
error_prev_layer = error_begin;
|
320
|
+
}
|
321
|
+
|
322
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
|
323
|
+
{
|
324
|
+
|
325
|
+
tmp_error = error_begin[neuron_it - first_neuron];
|
326
|
+
weights = ann->weights + neuron_it->first_con;
|
327
|
+
for(i = neuron_it->last_con - neuron_it->first_con; i--;)
|
328
|
+
{
|
329
|
+
/*printf("i = %d\n", i);
|
330
|
+
* printf("error_prev_layer[%d] = %f\n", i, error_prev_layer[i]);
|
331
|
+
* printf("weights[%d] = %f\n", i, weights[i]); */
|
332
|
+
error_prev_layer[i] += tmp_error * weights[i];
|
333
|
+
}
|
334
|
+
}
|
335
|
+
}
|
336
|
+
else
|
337
|
+
{
|
338
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
|
339
|
+
{
|
340
|
+
|
341
|
+
tmp_error = error_begin[neuron_it - first_neuron];
|
342
|
+
weights = ann->weights + neuron_it->first_con;
|
343
|
+
connections = ann->connections + neuron_it->first_con;
|
344
|
+
for(i = neuron_it->last_con - neuron_it->first_con; i--;)
|
345
|
+
{
|
346
|
+
error_begin[connections[i] - first_neuron] += tmp_error * weights[i];
|
347
|
+
}
|
348
|
+
}
|
349
|
+
}
|
350
|
+
|
351
|
+
/* then calculate the actual errors in the previous layer */
|
352
|
+
error_prev_layer = error_begin + ((layer_it - 1)->first_neuron - first_neuron);
|
353
|
+
last_neuron = (layer_it - 1)->last_neuron;
|
354
|
+
|
355
|
+
for(neuron_it = (layer_it - 1)->first_neuron; neuron_it != last_neuron; neuron_it++)
|
356
|
+
{
|
357
|
+
*error_prev_layer *= fann_activation_derived(neuron_it->activation_function,
|
358
|
+
neuron_it->activation_steepness, neuron_it->value, neuron_it->sum);
|
359
|
+
error_prev_layer++;
|
360
|
+
}
|
361
|
+
|
362
|
+
}
|
363
|
+
}
|
364
|
+
|
365
|
+
/* INTERNAL FUNCTION
|
366
|
+
Update weights for incremental training
|
367
|
+
*/
|
368
|
+
void fann_update_weights(struct fann *ann)
|
369
|
+
{
|
370
|
+
struct fann_neuron *neuron_it, *last_neuron, *prev_neurons;
|
371
|
+
fann_type tmp_error, delta_w, *weights;
|
372
|
+
struct fann_layer *layer_it;
|
373
|
+
unsigned int i;
|
374
|
+
unsigned int num_connections;
|
375
|
+
|
376
|
+
/* store some variabels local for fast access */
|
377
|
+
const float learning_rate = ann->learning_rate;
|
378
|
+
const float learning_momentum = ann->learning_momentum;
|
379
|
+
struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
|
380
|
+
struct fann_layer *first_layer = ann->first_layer;
|
381
|
+
const struct fann_layer *last_layer = ann->last_layer;
|
382
|
+
fann_type *error_begin = ann->train_errors;
|
383
|
+
fann_type *deltas_begin, *weights_deltas;
|
384
|
+
|
385
|
+
/* if no room allocated for the deltas, allocate it now */
|
386
|
+
if(ann->prev_weights_deltas == NULL)
|
387
|
+
{
|
388
|
+
ann->prev_weights_deltas =
|
389
|
+
(fann_type *) calloc(ann->total_connections_allocated, sizeof(fann_type));
|
390
|
+
if(ann->prev_weights_deltas == NULL)
|
391
|
+
{
|
392
|
+
fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
|
393
|
+
return;
|
394
|
+
}
|
395
|
+
}
|
396
|
+
|
397
|
+
#ifdef DEBUGTRAIN
|
398
|
+
printf("\nupdate weights\n");
|
399
|
+
#endif
|
400
|
+
deltas_begin = ann->prev_weights_deltas;
|
401
|
+
prev_neurons = first_neuron;
|
402
|
+
for(layer_it = (first_layer + 1); layer_it != last_layer; layer_it++)
|
403
|
+
{
|
404
|
+
#ifdef DEBUGTRAIN
|
405
|
+
printf("layer[%d]\n", layer_it - first_layer);
|
406
|
+
#endif
|
407
|
+
last_neuron = layer_it->last_neuron;
|
408
|
+
if(ann->connection_rate >= 1)
|
409
|
+
{
|
410
|
+
if(ann->network_type == FANN_NETTYPE_LAYER)
|
411
|
+
{
|
412
|
+
prev_neurons = (layer_it - 1)->first_neuron;
|
413
|
+
}
|
414
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
|
415
|
+
{
|
416
|
+
tmp_error = error_begin[neuron_it - first_neuron] * learning_rate;
|
417
|
+
num_connections = neuron_it->last_con - neuron_it->first_con;
|
418
|
+
weights = ann->weights + neuron_it->first_con;
|
419
|
+
weights_deltas = deltas_begin + neuron_it->first_con;
|
420
|
+
for(i = 0; i != num_connections; i++)
|
421
|
+
{
|
422
|
+
delta_w = tmp_error * prev_neurons[i].value + learning_momentum * weights_deltas[i];
|
423
|
+
weights[i] += delta_w ;
|
424
|
+
weights_deltas[i] = delta_w;
|
425
|
+
}
|
426
|
+
}
|
427
|
+
}
|
428
|
+
else
|
429
|
+
{
|
430
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
|
431
|
+
{
|
432
|
+
tmp_error = error_begin[neuron_it - first_neuron] * learning_rate;
|
433
|
+
num_connections = neuron_it->last_con - neuron_it->first_con;
|
434
|
+
weights = ann->weights + neuron_it->first_con;
|
435
|
+
weights_deltas = deltas_begin + neuron_it->first_con;
|
436
|
+
for(i = 0; i != num_connections; i++)
|
437
|
+
{
|
438
|
+
delta_w = tmp_error * prev_neurons[i].value + learning_momentum * weights_deltas[i];
|
439
|
+
weights[i] += delta_w;
|
440
|
+
weights_deltas[i] = delta_w;
|
441
|
+
}
|
442
|
+
}
|
443
|
+
}
|
444
|
+
}
|
445
|
+
}
|
446
|
+
|
447
|
+
/* INTERNAL FUNCTION
|
448
|
+
Update slopes for batch training
|
449
|
+
layer_begin = ann->first_layer+1 and layer_end = ann->last_layer-1
|
450
|
+
will update all slopes.
|
451
|
+
|
452
|
+
*/
|
453
|
+
void fann_update_slopes_batch(struct fann *ann, struct fann_layer *layer_begin,
|
454
|
+
struct fann_layer *layer_end)
|
455
|
+
{
|
456
|
+
struct fann_neuron *neuron_it, *last_neuron, *prev_neurons, **connections;
|
457
|
+
fann_type tmp_error;
|
458
|
+
unsigned int i, num_connections;
|
459
|
+
|
460
|
+
/* store some variabels local for fast access */
|
461
|
+
struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
|
462
|
+
fann_type *error_begin = ann->train_errors;
|
463
|
+
fann_type *slope_begin, *neuron_slope;
|
464
|
+
|
465
|
+
/* if no room allocated for the slope variabels, allocate it now */
|
466
|
+
if(ann->train_slopes == NULL)
|
467
|
+
{
|
468
|
+
ann->train_slopes =
|
469
|
+
(fann_type *) calloc(ann->total_connections_allocated, sizeof(fann_type));
|
470
|
+
if(ann->train_slopes == NULL)
|
471
|
+
{
|
472
|
+
fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
|
473
|
+
return;
|
474
|
+
}
|
475
|
+
}
|
476
|
+
|
477
|
+
if(layer_begin == NULL)
|
478
|
+
{
|
479
|
+
layer_begin = ann->first_layer + 1;
|
480
|
+
}
|
481
|
+
|
482
|
+
if(layer_end == NULL)
|
483
|
+
{
|
484
|
+
layer_end = ann->last_layer - 1;
|
485
|
+
}
|
486
|
+
|
487
|
+
slope_begin = ann->train_slopes;
|
488
|
+
|
489
|
+
#ifdef DEBUGTRAIN
|
490
|
+
printf("\nupdate slopes\n");
|
491
|
+
#endif
|
492
|
+
|
493
|
+
prev_neurons = first_neuron;
|
494
|
+
|
495
|
+
for(; layer_begin <= layer_end; layer_begin++)
|
496
|
+
{
|
497
|
+
#ifdef DEBUGTRAIN
|
498
|
+
printf("layer[%d]\n", layer_begin - ann->first_layer);
|
499
|
+
#endif
|
500
|
+
last_neuron = layer_begin->last_neuron;
|
501
|
+
if(ann->connection_rate >= 1)
|
502
|
+
{
|
503
|
+
if(ann->network_type == FANN_NETTYPE_LAYER)
|
504
|
+
{
|
505
|
+
prev_neurons = (layer_begin - 1)->first_neuron;
|
506
|
+
}
|
507
|
+
|
508
|
+
for(neuron_it = layer_begin->first_neuron; neuron_it != last_neuron; neuron_it++)
|
509
|
+
{
|
510
|
+
tmp_error = error_begin[neuron_it - first_neuron];
|
511
|
+
neuron_slope = slope_begin + neuron_it->first_con;
|
512
|
+
num_connections = neuron_it->last_con - neuron_it->first_con;
|
513
|
+
for(i = 0; i != num_connections; i++)
|
514
|
+
{
|
515
|
+
neuron_slope[i] += tmp_error * prev_neurons[i].value;
|
516
|
+
}
|
517
|
+
}
|
518
|
+
}
|
519
|
+
else
|
520
|
+
{
|
521
|
+
for(neuron_it = layer_begin->first_neuron; neuron_it != last_neuron; neuron_it++)
|
522
|
+
{
|
523
|
+
tmp_error = error_begin[neuron_it - first_neuron];
|
524
|
+
neuron_slope = slope_begin + neuron_it->first_con;
|
525
|
+
num_connections = neuron_it->last_con - neuron_it->first_con;
|
526
|
+
connections = ann->connections + neuron_it->first_con;
|
527
|
+
for(i = 0; i != num_connections; i++)
|
528
|
+
{
|
529
|
+
neuron_slope[i] += tmp_error * connections[i]->value;
|
530
|
+
}
|
531
|
+
}
|
532
|
+
}
|
533
|
+
}
|
534
|
+
}
|
535
|
+
|
536
|
+
/* INTERNAL FUNCTION
|
537
|
+
Clears arrays used for training before a new training session.
|
538
|
+
Also creates the arrays that do not exist yet.
|
539
|
+
*/
|
540
|
+
void fann_clear_train_arrays(struct fann *ann)
|
541
|
+
{
|
542
|
+
unsigned int i;
|
543
|
+
fann_type delta_zero;
|
544
|
+
|
545
|
+
/* if no room allocated for the slope variabels, allocate it now
|
546
|
+
* (calloc clears mem) */
|
547
|
+
if(ann->train_slopes == NULL)
|
548
|
+
{
|
549
|
+
ann->train_slopes =
|
550
|
+
(fann_type *) calloc(ann->total_connections_allocated, sizeof(fann_type));
|
551
|
+
if(ann->train_slopes == NULL)
|
552
|
+
{
|
553
|
+
fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
|
554
|
+
return;
|
555
|
+
}
|
556
|
+
}
|
557
|
+
else
|
558
|
+
{
|
559
|
+
memset(ann->train_slopes, 0, (ann->total_connections_allocated) * sizeof(fann_type));
|
560
|
+
}
|
561
|
+
|
562
|
+
/* if no room allocated for the variabels, allocate it now */
|
563
|
+
if(ann->prev_steps == NULL)
|
564
|
+
{
|
565
|
+
ann->prev_steps = (fann_type *) malloc(ann->total_connections_allocated * sizeof(fann_type));
|
566
|
+
if(ann->prev_steps == NULL)
|
567
|
+
{
|
568
|
+
fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
|
569
|
+
return;
|
570
|
+
}
|
571
|
+
}
|
572
|
+
|
573
|
+
if(ann->training_algorithm == FANN_TRAIN_RPROP)
|
574
|
+
{
|
575
|
+
delta_zero = ann->rprop_delta_zero;
|
576
|
+
|
577
|
+
for(i = 0; i < ann->total_connections_allocated; i++)
|
578
|
+
ann->prev_steps[i] = delta_zero;
|
579
|
+
}
|
580
|
+
else
|
581
|
+
{
|
582
|
+
memset(ann->prev_steps, 0, (ann->total_connections_allocated) * sizeof(fann_type));
|
583
|
+
}
|
584
|
+
|
585
|
+
/* if no room allocated for the variabels, allocate it now */
|
586
|
+
if(ann->prev_train_slopes == NULL)
|
587
|
+
{
|
588
|
+
ann->prev_train_slopes =
|
589
|
+
(fann_type *) calloc(ann->total_connections_allocated, sizeof(fann_type));
|
590
|
+
if(ann->prev_train_slopes == NULL)
|
591
|
+
{
|
592
|
+
fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
|
593
|
+
return;
|
594
|
+
}
|
595
|
+
}
|
596
|
+
else
|
597
|
+
{
|
598
|
+
memset(ann->prev_train_slopes, 0, (ann->total_connections_allocated) * sizeof(fann_type));
|
599
|
+
}
|
600
|
+
}
|
601
|
+
|
602
|
+
/* INTERNAL FUNCTION
|
603
|
+
Update weights for batch training
|
604
|
+
*/
|
605
|
+
void fann_update_weights_batch(struct fann *ann, unsigned int num_data, unsigned int first_weight,
|
606
|
+
unsigned int past_end)
|
607
|
+
{
|
608
|
+
fann_type *train_slopes = ann->train_slopes;
|
609
|
+
fann_type *weights = ann->weights;
|
610
|
+
const float epsilon = ann->learning_rate / num_data;
|
611
|
+
unsigned int i = first_weight;
|
612
|
+
|
613
|
+
for(; i != past_end; i++)
|
614
|
+
{
|
615
|
+
weights[i] += train_slopes[i] * epsilon;
|
616
|
+
train_slopes[i] = 0.0;
|
617
|
+
}
|
618
|
+
}
|
619
|
+
|
620
|
+
/* INTERNAL FUNCTION
|
621
|
+
The quickprop training algorithm
|
622
|
+
*/
|
623
|
+
void fann_update_weights_quickprop(struct fann *ann, unsigned int num_data,
|
624
|
+
unsigned int first_weight, unsigned int past_end)
|
625
|
+
{
|
626
|
+
fann_type *train_slopes = ann->train_slopes;
|
627
|
+
fann_type *weights = ann->weights;
|
628
|
+
fann_type *prev_steps = ann->prev_steps;
|
629
|
+
fann_type *prev_train_slopes = ann->prev_train_slopes;
|
630
|
+
|
631
|
+
fann_type w, prev_step, slope, prev_slope, next_step;
|
632
|
+
|
633
|
+
float epsilon = ann->learning_rate / num_data;
|
634
|
+
float decay = ann->quickprop_decay; /*-0.0001;*/
|
635
|
+
float mu = ann->quickprop_mu; /*1.75; */
|
636
|
+
float shrink_factor = (float) (mu / (1.0 + mu));
|
637
|
+
|
638
|
+
unsigned int i = first_weight;
|
639
|
+
|
640
|
+
for(; i != past_end; i++)
|
641
|
+
{
|
642
|
+
w = weights[i];
|
643
|
+
prev_step = prev_steps[i];
|
644
|
+
slope = train_slopes[i] + decay * w;
|
645
|
+
prev_slope = prev_train_slopes[i];
|
646
|
+
next_step = 0.0;
|
647
|
+
|
648
|
+
/* The step must always be in direction opposite to the slope. */
|
649
|
+
if(prev_step > 0.001)
|
650
|
+
{
|
651
|
+
/* If last step was positive... */
|
652
|
+
if(slope > 0.0) /* Add in linear term if current slope is still positive. */
|
653
|
+
next_step += epsilon * slope;
|
654
|
+
|
655
|
+
/*If current slope is close to or larger than prev slope... */
|
656
|
+
if(slope > (shrink_factor * prev_slope))
|
657
|
+
next_step += mu * prev_step; /* Take maximum size negative step. */
|
658
|
+
else
|
659
|
+
next_step += prev_step * slope / (prev_slope - slope); /* Else, use quadratic estimate. */
|
660
|
+
}
|
661
|
+
else if(prev_step < -0.001)
|
662
|
+
{
|
663
|
+
/* If last step was negative... */
|
664
|
+
if(slope < 0.0) /* Add in linear term if current slope is still negative. */
|
665
|
+
next_step += epsilon * slope;
|
666
|
+
|
667
|
+
/* If current slope is close to or more neg than prev slope... */
|
668
|
+
if(slope < (shrink_factor * prev_slope))
|
669
|
+
next_step += mu * prev_step; /* Take maximum size negative step. */
|
670
|
+
else
|
671
|
+
next_step += prev_step * slope / (prev_slope - slope); /* Else, use quadratic estimate. */
|
672
|
+
}
|
673
|
+
else /* Last step was zero, so use only linear term. */
|
674
|
+
next_step += epsilon * slope;
|
675
|
+
|
676
|
+
/*
|
677
|
+
if(next_step > 1000 || next_step < -1000)
|
678
|
+
{
|
679
|
+
printf("quickprop[%d] weight=%f, slope=%f, prev_slope=%f, next_step=%f, prev_step=%f\n",
|
680
|
+
i, weights[i], slope, prev_slope, next_step, prev_step);
|
681
|
+
|
682
|
+
if(next_step > 1000)
|
683
|
+
next_step = 1000;
|
684
|
+
else
|
685
|
+
next_step = -1000;
|
686
|
+
}
|
687
|
+
*/
|
688
|
+
|
689
|
+
/* update global data arrays */
|
690
|
+
prev_steps[i] = next_step;
|
691
|
+
|
692
|
+
w += next_step;
|
693
|
+
|
694
|
+
if(w > 1500)
|
695
|
+
weights[i] = 1500;
|
696
|
+
else if(w < -1500)
|
697
|
+
weights[i] = -1500;
|
698
|
+
else
|
699
|
+
weights[i] = w;
|
700
|
+
|
701
|
+
/*weights[i] = w;*/
|
702
|
+
|
703
|
+
prev_train_slopes[i] = slope;
|
704
|
+
train_slopes[i] = 0.0;
|
705
|
+
}
|
706
|
+
}
|
707
|
+
|
708
|
+
/* INTERNAL FUNCTION
|
709
|
+
The iRprop- algorithm
|
710
|
+
*/
|
711
|
+
void fann_update_weights_irpropm(struct fann *ann, unsigned int first_weight, unsigned int past_end)
|
712
|
+
{
|
713
|
+
fann_type *train_slopes = ann->train_slopes;
|
714
|
+
fann_type *weights = ann->weights;
|
715
|
+
fann_type *prev_steps = ann->prev_steps;
|
716
|
+
fann_type *prev_train_slopes = ann->prev_train_slopes;
|
717
|
+
|
718
|
+
fann_type prev_step, slope, prev_slope, next_step, same_sign;
|
719
|
+
|
720
|
+
float increase_factor = ann->rprop_increase_factor; /*1.2; */
|
721
|
+
float decrease_factor = ann->rprop_decrease_factor; /*0.5; */
|
722
|
+
float delta_min = ann->rprop_delta_min; /*0.0; */
|
723
|
+
float delta_max = ann->rprop_delta_max; /*50.0; */
|
724
|
+
|
725
|
+
unsigned int i = first_weight;
|
726
|
+
|
727
|
+
for(; i != past_end; i++)
|
728
|
+
{
|
729
|
+
prev_step = fann_max(prev_steps[i], (fann_type) 0.0001); /* prev_step may not be zero because then the training will stop */
|
730
|
+
slope = train_slopes[i];
|
731
|
+
prev_slope = prev_train_slopes[i];
|
732
|
+
|
733
|
+
same_sign = prev_slope * slope;
|
734
|
+
|
735
|
+
if(same_sign >= 0.0)
|
736
|
+
next_step = fann_min(prev_step * increase_factor, delta_max);
|
737
|
+
else
|
738
|
+
{
|
739
|
+
next_step = fann_max(prev_step * decrease_factor, delta_min);
|
740
|
+
slope = 0;
|
741
|
+
}
|
742
|
+
|
743
|
+
if(slope < 0)
|
744
|
+
{
|
745
|
+
weights[i] -= next_step;
|
746
|
+
if(weights[i] < -1500)
|
747
|
+
weights[i] = -1500;
|
748
|
+
}
|
749
|
+
else
|
750
|
+
{
|
751
|
+
weights[i] += next_step;
|
752
|
+
if(weights[i] > 1500)
|
753
|
+
weights[i] = 1500;
|
754
|
+
}
|
755
|
+
|
756
|
+
/*if(i == 2){
|
757
|
+
* printf("weight=%f, slope=%f, next_step=%f, prev_step=%f\n", weights[i], slope, next_step, prev_step);
|
758
|
+
* } */
|
759
|
+
|
760
|
+
/* update global data arrays */
|
761
|
+
prev_steps[i] = next_step;
|
762
|
+
prev_train_slopes[i] = slope;
|
763
|
+
train_slopes[i] = 0.0;
|
764
|
+
}
|
765
|
+
}
|
766
|
+
|
767
|
+
#endif
|
768
|
+
|
769
|
+
FANN_GET_SET(enum fann_train_enum, training_algorithm)
|
770
|
+
FANN_GET_SET(float, learning_rate)
|
771
|
+
|
772
|
+
FANN_EXTERNAL void FANN_API fann_set_activation_function_hidden(struct fann *ann,
|
773
|
+
enum fann_activationfunc_enum activation_function)
|
774
|
+
{
|
775
|
+
struct fann_neuron *last_neuron, *neuron_it;
|
776
|
+
struct fann_layer *layer_it;
|
777
|
+
struct fann_layer *last_layer = ann->last_layer - 1; /* -1 to not update the output layer */
|
778
|
+
|
779
|
+
for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
|
780
|
+
{
|
781
|
+
last_neuron = layer_it->last_neuron;
|
782
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
|
783
|
+
{
|
784
|
+
neuron_it->activation_function = activation_function;
|
785
|
+
}
|
786
|
+
}
|
787
|
+
}
|
788
|
+
|
789
|
+
FANN_EXTERNAL struct fann_layer* FANN_API fann_get_layer(struct fann *ann, int layer)
|
790
|
+
{
|
791
|
+
if(layer <= 0 || layer >= (ann->last_layer - ann->first_layer))
|
792
|
+
{
|
793
|
+
fann_error((struct fann_error *) ann, FANN_E_INDEX_OUT_OF_BOUND, layer);
|
794
|
+
return NULL;
|
795
|
+
}
|
796
|
+
|
797
|
+
return ann->first_layer + layer;
|
798
|
+
}
|
799
|
+
|
800
|
+
FANN_EXTERNAL struct fann_neuron* FANN_API fann_get_neuron_layer(struct fann *ann, struct fann_layer* layer, int neuron)
|
801
|
+
{
|
802
|
+
if(neuron >= (layer->last_neuron - layer->first_neuron))
|
803
|
+
{
|
804
|
+
fann_error((struct fann_error *) ann, FANN_E_INDEX_OUT_OF_BOUND, neuron);
|
805
|
+
return NULL;
|
806
|
+
}
|
807
|
+
|
808
|
+
return layer->first_neuron + neuron;
|
809
|
+
}
|
810
|
+
|
811
|
+
FANN_EXTERNAL struct fann_neuron* FANN_API fann_get_neuron(struct fann *ann, unsigned int layer, int neuron)
|
812
|
+
{
|
813
|
+
struct fann_layer *layer_it = fann_get_layer(ann, layer);
|
814
|
+
if(layer_it == NULL)
|
815
|
+
return NULL;
|
816
|
+
return fann_get_neuron_layer(ann, layer_it, neuron);
|
817
|
+
}
|
818
|
+
|
819
|
+
FANN_EXTERNAL enum fann_activationfunc_enum FANN_API
|
820
|
+
fann_get_activation_function(struct fann *ann, int layer, int neuron)
|
821
|
+
{
|
822
|
+
struct fann_neuron* neuron_it = fann_get_neuron(ann, layer, neuron);
|
823
|
+
if (neuron_it == NULL)
|
824
|
+
{
|
825
|
+
return (enum fann_activationfunc_enum)-1; /* layer or neuron out of bounds */
|
826
|
+
}
|
827
|
+
else
|
828
|
+
{
|
829
|
+
return neuron_it->activation_function;
|
830
|
+
}
|
831
|
+
}
|
832
|
+
|
833
|
+
FANN_EXTERNAL void FANN_API fann_set_activation_function(struct fann *ann,
|
834
|
+
enum fann_activationfunc_enum
|
835
|
+
activation_function,
|
836
|
+
int layer,
|
837
|
+
int neuron)
|
838
|
+
{
|
839
|
+
struct fann_neuron* neuron_it = fann_get_neuron(ann, layer, neuron);
|
840
|
+
if(neuron_it == NULL)
|
841
|
+
return;
|
842
|
+
|
843
|
+
neuron_it->activation_function = activation_function;
|
844
|
+
}
|
845
|
+
|
846
|
+
FANN_EXTERNAL void FANN_API fann_set_activation_function_layer(struct fann *ann,
|
847
|
+
enum fann_activationfunc_enum
|
848
|
+
activation_function,
|
849
|
+
int layer)
|
850
|
+
{
|
851
|
+
struct fann_neuron *last_neuron, *neuron_it;
|
852
|
+
struct fann_layer *layer_it = fann_get_layer(ann, layer);
|
853
|
+
|
854
|
+
if(layer_it == NULL)
|
855
|
+
return;
|
856
|
+
|
857
|
+
last_neuron = layer_it->last_neuron;
|
858
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
|
859
|
+
{
|
860
|
+
neuron_it->activation_function = activation_function;
|
861
|
+
}
|
862
|
+
}
|
863
|
+
|
864
|
+
|
865
|
+
FANN_EXTERNAL void FANN_API fann_set_activation_function_output(struct fann *ann,
|
866
|
+
enum fann_activationfunc_enum activation_function)
|
867
|
+
{
|
868
|
+
struct fann_neuron *last_neuron, *neuron_it;
|
869
|
+
struct fann_layer *last_layer = ann->last_layer - 1;
|
870
|
+
|
871
|
+
last_neuron = last_layer->last_neuron;
|
872
|
+
for(neuron_it = last_layer->first_neuron; neuron_it != last_neuron; neuron_it++)
|
873
|
+
{
|
874
|
+
neuron_it->activation_function = activation_function;
|
875
|
+
}
|
876
|
+
}
|
877
|
+
|
878
|
+
FANN_EXTERNAL void FANN_API fann_set_activation_steepness_hidden(struct fann *ann,
|
879
|
+
fann_type steepness)
|
880
|
+
{
|
881
|
+
struct fann_neuron *last_neuron, *neuron_it;
|
882
|
+
struct fann_layer *layer_it;
|
883
|
+
struct fann_layer *last_layer = ann->last_layer - 1; /* -1 to not update the output layer */
|
884
|
+
|
885
|
+
for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
|
886
|
+
{
|
887
|
+
last_neuron = layer_it->last_neuron;
|
888
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
|
889
|
+
{
|
890
|
+
neuron_it->activation_steepness = steepness;
|
891
|
+
}
|
892
|
+
}
|
893
|
+
}
|
894
|
+
|
895
|
+
FANN_EXTERNAL fann_type FANN_API
|
896
|
+
fann_get_activation_steepness(struct fann *ann, int layer, int neuron)
|
897
|
+
{
|
898
|
+
struct fann_neuron* neuron_it = fann_get_neuron(ann, layer, neuron);
|
899
|
+
if(neuron_it == NULL)
|
900
|
+
{
|
901
|
+
return -1; /* layer or neuron out of bounds */
|
902
|
+
}
|
903
|
+
else
|
904
|
+
{
|
905
|
+
return neuron_it->activation_steepness;
|
906
|
+
}
|
907
|
+
}
|
908
|
+
|
909
|
+
FANN_EXTERNAL void FANN_API fann_set_activation_steepness(struct fann *ann,
|
910
|
+
fann_type steepness,
|
911
|
+
int layer,
|
912
|
+
int neuron)
|
913
|
+
{
|
914
|
+
struct fann_neuron* neuron_it = fann_get_neuron(ann, layer, neuron);
|
915
|
+
if(neuron_it == NULL)
|
916
|
+
return;
|
917
|
+
|
918
|
+
neuron_it->activation_steepness = steepness;
|
919
|
+
}
|
920
|
+
|
921
|
+
FANN_EXTERNAL void FANN_API fann_set_activation_steepness_layer(struct fann *ann,
|
922
|
+
fann_type steepness,
|
923
|
+
int layer)
|
924
|
+
{
|
925
|
+
struct fann_neuron *last_neuron, *neuron_it;
|
926
|
+
struct fann_layer *layer_it = fann_get_layer(ann, layer);
|
927
|
+
|
928
|
+
if(layer_it == NULL)
|
929
|
+
return;
|
930
|
+
|
931
|
+
last_neuron = layer_it->last_neuron;
|
932
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
|
933
|
+
{
|
934
|
+
neuron_it->activation_steepness = steepness;
|
935
|
+
}
|
936
|
+
}
|
937
|
+
|
938
|
+
FANN_EXTERNAL void FANN_API fann_set_activation_steepness_output(struct fann *ann,
|
939
|
+
fann_type steepness)
|
940
|
+
{
|
941
|
+
struct fann_neuron *last_neuron, *neuron_it;
|
942
|
+
struct fann_layer *last_layer = ann->last_layer - 1;
|
943
|
+
|
944
|
+
last_neuron = last_layer->last_neuron;
|
945
|
+
for(neuron_it = last_layer->first_neuron; neuron_it != last_neuron; neuron_it++)
|
946
|
+
{
|
947
|
+
neuron_it->activation_steepness = steepness;
|
948
|
+
}
|
949
|
+
}
|
950
|
+
|
951
|
+
FANN_GET_SET(enum fann_errorfunc_enum, train_error_function)
|
952
|
+
FANN_GET_SET(fann_callback_type, callback)
|
953
|
+
FANN_GET_SET(float, quickprop_decay)
|
954
|
+
FANN_GET_SET(float, quickprop_mu)
|
955
|
+
FANN_GET_SET(float, rprop_increase_factor)
|
956
|
+
FANN_GET_SET(float, rprop_decrease_factor)
|
957
|
+
FANN_GET_SET(float, rprop_delta_min)
|
958
|
+
FANN_GET_SET(float, rprop_delta_max)
|
959
|
+
FANN_GET_SET(float, rprop_delta_zero)
|
960
|
+
FANN_GET_SET(enum fann_stopfunc_enum, train_stop_function)
|
961
|
+
FANN_GET_SET(fann_type, bit_fail_limit)
|
962
|
+
FANN_GET_SET(float, learning_momentum)
|