moo_fann 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,824 @@
1
+ /*
2
+ Fast Artificial Neural Network Library (fann)
3
+ Copyright (C) 2003-2012 Steffen Nissen (sn@leenissen.dk)
4
+
5
+ This library is free software; you can redistribute it and/or
6
+ modify it under the terms of the GNU Lesser General Public
7
+ License as published by the Free Software Foundation; either
8
+ version 2.1 of the License, or (at your option) any later version.
9
+
10
+ This library is distributed in the hope that it will be useful,
11
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13
+ Lesser General Public License for more details.
14
+
15
+ You should have received a copy of the GNU Lesser General Public
16
+ License along with this library; if not, write to the Free Software
17
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
+ */
19
+
20
+ #ifndef __fann_data_h__
21
+ #define __fann_data_h__
22
+
23
+ #include <stdio.h>
24
+
25
+ /* Section: FANN Datatypes
26
+
27
+ The two main datatypes used in the fann library is <struct fann>,
28
+ which represents an artificial neural network, and <struct fann_train_data>,
29
+ which represent training data.
30
+ */
31
+
32
+
33
+ /* Type: fann_type
34
+ fann_type is the type used for the weights, inputs and outputs of the neural network.
35
+
36
+ fann_type is defined as a:
37
+ float - if you include fann.h or floatfann.h
38
+ double - if you include doublefann.h
39
+ int - if you include fixedfann.h (please be aware that fixed point usage is
40
+ only to be used during execution, and not during training).
41
+ */
42
+
43
+ /* Enum: fann_train_enum
44
+ The Training algorithms used when training on <struct fann_train_data> with functions like
45
+ <fann_train_on_data> or <fann_train_on_file>. The incremental training looks alters the weights
46
+ after each time it is presented an input pattern, while batch only alters the weights once after
47
+ it has been presented to all the patterns.
48
+
49
+ FANN_TRAIN_INCREMENTAL - Standard backpropagation algorithm, where the weights are
50
+ updated after each training pattern. This means that the weights are updated many
51
+ times during a single epoch. For this reason some problems, will train very fast with
52
+ this algorithm, while other more advanced problems will not train very well.
53
+ FANN_TRAIN_BATCH - Standard backpropagation algorithm, where the weights are updated after
54
+ calculating the mean square error for the whole training set. This means that the weights
55
+ are only updated once during a epoch. For this reason some problems, will train slower with
56
+ this algorithm. But since the mean square error is calculated more correctly than in
57
+ incremental training, some problems will reach a better solutions with this algorithm.
58
+ FANN_TRAIN_RPROP - A more advanced batch training algorithm which achieves good results
59
+ for many problems. The RPROP training algorithm is adaptive, and does therefore not
60
+ use the learning_rate. Some other parameters can however be set to change the way the
61
+ RPROP algorithm works, but it is only recommended for users with insight in how the RPROP
62
+ training algorithm works. The RPROP training algorithm is described by
63
+ [Riedmiller and Braun, 1993], but the actual learning algorithm used here is the
64
+ iRPROP- training algorithm which is described by [Igel and Husken, 2000] which
65
+ is an variety of the standard RPROP training algorithm.
66
+ FANN_TRAIN_QUICKPROP - A more advanced batch training algorithm which achieves good results
67
+ for many problems. The quickprop training algorithm uses the learning_rate parameter
68
+ along with other more advanced parameters, but it is only recommended to change these
69
+ advanced parameters, for users with insight in how the quickprop training algorithm works.
70
+ The quickprop training algorithm is described by [Fahlman, 1988].
71
+
72
+ See also:
73
+ <fann_set_training_algorithm>, <fann_get_training_algorithm>
74
+ */
75
+ enum fann_train_enum
76
+ {
77
+ FANN_TRAIN_INCREMENTAL = 0,
78
+ FANN_TRAIN_BATCH,
79
+ FANN_TRAIN_RPROP,
80
+ FANN_TRAIN_QUICKPROP,
81
+ FANN_TRAIN_SARPROP
82
+ };
83
+
84
+ /* Constant: FANN_TRAIN_NAMES
85
+
86
+ Constant array consisting of the names for the training algorithms, so that the name of an
87
+ training function can be received by:
88
+ (code)
89
+ char *name = FANN_TRAIN_NAMES[train_function];
90
+ (end)
91
+
92
+ See Also:
93
+ <fann_train_enum>
94
+ */
95
+ static char const *const FANN_TRAIN_NAMES[] = {
96
+ "FANN_TRAIN_INCREMENTAL",
97
+ "FANN_TRAIN_BATCH",
98
+ "FANN_TRAIN_RPROP",
99
+ "FANN_TRAIN_QUICKPROP",
100
+ "FANN_TRAIN_SARPROP"
101
+ };
102
+
103
+ /* Enums: fann_activationfunc_enum
104
+
105
+ The activation functions used for the neurons during training. The activation functions
106
+ can either be defined for a group of neurons by <fann_set_activation_function_hidden> and
107
+ <fann_set_activation_function_output> or it can be defined for a single neuron by <fann_set_activation_function>.
108
+
109
+ The steepness of an activation function is defined in the same way by
110
+ <fann_set_activation_steepness_hidden>, <fann_set_activation_steepness_output> and <fann_set_activation_steepness>.
111
+
112
+ The functions are described with functions where:
113
+ * x is the input to the activation function,
114
+ * y is the output,
115
+ * s is the steepness and
116
+ * d is the derivation.
117
+
118
+ FANN_LINEAR - Linear activation function.
119
+ * span: -inf < y < inf
120
+ * y = x*s, d = 1*s
121
+ * Can NOT be used in fixed point.
122
+
123
+ FANN_THRESHOLD - Threshold activation function.
124
+ * x < 0 -> y = 0, x >= 0 -> y = 1
125
+ * Can NOT be used during training.
126
+
127
+ FANN_THRESHOLD_SYMMETRIC - Threshold activation function.
128
+ * x < 0 -> y = 0, x >= 0 -> y = 1
129
+ * Can NOT be used during training.
130
+
131
+ FANN_SIGMOID - Sigmoid activation function.
132
+ * One of the most used activation functions.
133
+ * span: 0 < y < 1
134
+ * y = 1/(1 + exp(-2*s*x))
135
+ * d = 2*s*y*(1 - y)
136
+
137
+ FANN_SIGMOID_STEPWISE - Stepwise linear approximation to sigmoid.
138
+ * Faster than sigmoid but a bit less precise.
139
+
140
+ FANN_SIGMOID_SYMMETRIC - Symmetric sigmoid activation function, aka. tanh.
141
+ * One of the most used activation functions.
142
+ * span: -1 < y < 1
143
+ * y = tanh(s*x) = 2/(1 + exp(-2*s*x)) - 1
144
+ * d = s*(1-(y*y))
145
+
146
+ FANN_SIGMOID_SYMMETRIC - Stepwise linear approximation to symmetric sigmoid.
147
+ * Faster than symmetric sigmoid but a bit less precise.
148
+
149
+ FANN_GAUSSIAN - Gaussian activation function.
150
+ * 0 when x = -inf, 1 when x = 0 and 0 when x = inf
151
+ * span: 0 < y < 1
152
+ * y = exp(-x*s*x*s)
153
+ * d = -2*x*s*y*s
154
+
155
+ FANN_GAUSSIAN_SYMMETRIC - Symmetric gaussian activation function.
156
+ * -1 when x = -inf, 1 when x = 0 and 0 when x = inf
157
+ * span: -1 < y < 1
158
+ * y = exp(-x*s*x*s)*2-1
159
+ * d = -2*x*s*(y+1)*s
160
+
161
+ FANN_ELLIOT - Fast (sigmoid like) activation function defined by David Elliott
162
+ * span: 0 < y < 1
163
+ * y = ((x*s) / 2) / (1 + |x*s|) + 0.5
164
+ * d = s*1/(2*(1+|x*s|)*(1+|x*s|))
165
+
166
+ FANN_ELLIOT_SYMMETRIC - Fast (symmetric sigmoid like) activation function defined by David Elliott
167
+ * span: -1 < y < 1
168
+ * y = (x*s) / (1 + |x*s|)
169
+ * d = s*1/((1+|x*s|)*(1+|x*s|))
170
+
171
+ FANN_LINEAR_PIECE - Bounded linear activation function.
172
+ * span: 0 <= y <= 1
173
+ * y = x*s, d = 1*s
174
+
175
+ FANN_LINEAR_PIECE_SYMMETRIC - Bounded linear activation function.
176
+ * span: -1 <= y <= 1
177
+ * y = x*s, d = 1*s
178
+
179
+ FANN_SIN_SYMMETRIC - Periodical sinus activation function.
180
+ * span: -1 <= y <= 1
181
+ * y = sin(x*s)
182
+ * d = s*cos(x*s)
183
+
184
+ FANN_COS_SYMMETRIC - Periodical cosinus activation function.
185
+ * span: -1 <= y <= 1
186
+ * y = cos(x*s)
187
+ * d = s*-sin(x*s)
188
+
189
+ FANN_SIN - Periodical sinus activation function.
190
+ * span: 0 <= y <= 1
191
+ * y = sin(x*s)/2+0.5
192
+ * d = s*cos(x*s)/2
193
+
194
+ FANN_COS - Periodical cosinus activation function.
195
+ * span: 0 <= y <= 1
196
+ * y = cos(x*s)/2+0.5
197
+ * d = s*-sin(x*s)/2
198
+
199
+ See also:
200
+ <fann_set_activation_function_layer>, <fann_set_activation_function_hidden>,
201
+ <fann_set_activation_function_output>, <fann_set_activation_steepness>,
202
+ <fann_set_activation_function>
203
+ */
204
+ enum fann_activationfunc_enum
205
+ {
206
+ FANN_LINEAR = 0,
207
+ FANN_THRESHOLD,
208
+ FANN_THRESHOLD_SYMMETRIC,
209
+ FANN_SIGMOID,
210
+ FANN_SIGMOID_STEPWISE,
211
+ FANN_SIGMOID_SYMMETRIC,
212
+ FANN_SIGMOID_SYMMETRIC_STEPWISE,
213
+ FANN_GAUSSIAN,
214
+ FANN_GAUSSIAN_SYMMETRIC,
215
+ /* Stepwise linear approximation to gaussian.
216
+ * Faster than gaussian but a bit less precise.
217
+ * NOT implemented yet.
218
+ */
219
+ FANN_GAUSSIAN_STEPWISE,
220
+ FANN_ELLIOT,
221
+ FANN_ELLIOT_SYMMETRIC,
222
+ FANN_LINEAR_PIECE,
223
+ FANN_LINEAR_PIECE_SYMMETRIC,
224
+ FANN_SIN_SYMMETRIC,
225
+ FANN_COS_SYMMETRIC,
226
+ FANN_SIN,
227
+ FANN_COS
228
+ };
229
+
230
+ /* Constant: FANN_ACTIVATIONFUNC_NAMES
231
+
232
+ Constant array consisting of the names for the activation function, so that the name of an
233
+ activation function can be received by:
234
+ (code)
235
+ char *name = FANN_ACTIVATIONFUNC_NAMES[activation_function];
236
+ (end)
237
+
238
+ See Also:
239
+ <fann_activationfunc_enum>
240
+ */
241
+ static char const *const FANN_ACTIVATIONFUNC_NAMES[] = {
242
+ "FANN_LINEAR",
243
+ "FANN_THRESHOLD",
244
+ "FANN_THRESHOLD_SYMMETRIC",
245
+ "FANN_SIGMOID",
246
+ "FANN_SIGMOID_STEPWISE",
247
+ "FANN_SIGMOID_SYMMETRIC",
248
+ "FANN_SIGMOID_SYMMETRIC_STEPWISE",
249
+ "FANN_GAUSSIAN",
250
+ "FANN_GAUSSIAN_SYMMETRIC",
251
+ "FANN_GAUSSIAN_STEPWISE",
252
+ "FANN_ELLIOT",
253
+ "FANN_ELLIOT_SYMMETRIC",
254
+ "FANN_LINEAR_PIECE",
255
+ "FANN_LINEAR_PIECE_SYMMETRIC",
256
+ "FANN_SIN_SYMMETRIC",
257
+ "FANN_COS_SYMMETRIC",
258
+ "FANN_SIN",
259
+ "FANN_COS"
260
+ };
261
+
262
+ /* Enum: fann_errorfunc_enum
263
+ Error function used during training.
264
+
265
+ FANN_ERRORFUNC_LINEAR - Standard linear error function.
266
+ FANN_ERRORFUNC_TANH - Tanh error function, usually better
267
+ but can require a lower learning rate. This error function agressively targets outputs that
268
+ differ much from the desired, while not targetting outputs that only differ a little that much.
269
+ This activation function is not recommended for cascade training and incremental training.
270
+
271
+ See also:
272
+ <fann_set_train_error_function>, <fann_get_train_error_function>
273
+ */
274
+ enum fann_errorfunc_enum
275
+ {
276
+ FANN_ERRORFUNC_LINEAR = 0,
277
+ FANN_ERRORFUNC_TANH
278
+ };
279
+
280
+ /* Constant: FANN_ERRORFUNC_NAMES
281
+
282
+ Constant array consisting of the names for the training error functions, so that the name of an
283
+ error function can be received by:
284
+ (code)
285
+ char *name = FANN_ERRORFUNC_NAMES[error_function];
286
+ (end)
287
+
288
+ See Also:
289
+ <fann_errorfunc_enum>
290
+ */
291
+ static char const *const FANN_ERRORFUNC_NAMES[] = {
292
+ "FANN_ERRORFUNC_LINEAR",
293
+ "FANN_ERRORFUNC_TANH"
294
+ };
295
+
296
+ /* Enum: fann_stopfunc_enum
297
+ Stop criteria used during training.
298
+
299
+ FANN_STOPFUNC_MSE - Stop criteria is Mean Square Error (MSE) value.
300
+ FANN_STOPFUNC_BIT - Stop criteria is number of bits that fail. The number of bits; means the
301
+ number of output neurons which differ more than the bit fail limit
302
+ (see <fann_get_bit_fail_limit>, <fann_set_bit_fail_limit>).
303
+ The bits are counted in all of the training data, so this number can be higher than
304
+ the number of training data.
305
+
306
+ See also:
307
+ <fann_set_train_stop_function>, <fann_get_train_stop_function>
308
+ */
309
+ enum fann_stopfunc_enum
310
+ {
311
+ FANN_STOPFUNC_MSE = 0,
312
+ FANN_STOPFUNC_BIT
313
+ };
314
+
315
+ /* Constant: FANN_STOPFUNC_NAMES
316
+
317
+ Constant array consisting of the names for the training stop functions, so that the name of a
318
+ stop function can be received by:
319
+ (code)
320
+ char *name = FANN_STOPFUNC_NAMES[stop_function];
321
+ (end)
322
+
323
+ See Also:
324
+ <fann_stopfunc_enum>
325
+ */
326
+ static char const *const FANN_STOPFUNC_NAMES[] = {
327
+ "FANN_STOPFUNC_MSE",
328
+ "FANN_STOPFUNC_BIT"
329
+ };
330
+
331
+ /* Enum: fann_network_type_enum
332
+
333
+ Definition of network types used by <fann_get_network_type>
334
+
335
+ FANN_NETTYPE_LAYER - Each layer only has connections to the next layer
336
+ FANN_NETTYPE_SHORTCUT - Each layer has connections to all following layers
337
+
338
+ See Also:
339
+ <fann_get_network_type>
340
+
341
+ This enumeration appears in FANN >= 2.1.0
342
+ */
343
+ enum fann_nettype_enum
344
+ {
345
+ FANN_NETTYPE_LAYER = 0, /* Each layer only has connections to the next layer */
346
+ FANN_NETTYPE_SHORTCUT /* Each layer has connections to all following layers */
347
+ };
348
+
349
+ /* Constant: FANN_NETWORK_TYPE_NAMES
350
+
351
+ Constant array consisting of the names for the network types, so that the name of an
352
+ network type can be received by:
353
+ (code)
354
+ char *network_type_name = FANN_NETWORK_TYPE_NAMES[fann_get_network_type(ann)];
355
+ (end)
356
+
357
+ See Also:
358
+ <fann_get_network_type>
359
+
360
+ This constant appears in FANN >= 2.1.0
361
+ */
362
+ static char const *const FANN_NETTYPE_NAMES[] = {
363
+ "FANN_NETTYPE_LAYER",
364
+ "FANN_NETTYPE_SHORTCUT"
365
+ };
366
+
367
+
368
+ /* forward declarations for use with the callback */
369
+ struct fann;
370
+ struct fann_train_data;
371
+ /* Type: fann_callback_type
372
+ This callback function can be called during training when using <fann_train_on_data>,
373
+ <fann_train_on_file> or <fann_cascadetrain_on_data>.
374
+
375
+ >typedef int (FANN_API * fann_callback_type) (struct fann *ann, struct fann_train_data *train,
376
+ > unsigned int max_epochs,
377
+ > unsigned int epochs_between_reports,
378
+ > float desired_error, unsigned int epochs);
379
+
380
+ The callback can be set by using <fann_set_callback> and is very usefull for doing custom
381
+ things during training. It is recommended to use this function when implementing custom
382
+ training procedures, or when visualizing the training in a GUI etc. The parameters which the
383
+ callback function takes is the parameters given to the <fann_train_on_data>, plus an epochs
384
+ parameter which tells how many epochs the training have taken so far.
385
+
386
+ The callback function should return an integer, if the callback function returns -1, the training
387
+ will terminate.
388
+
389
+ Example of a callback function:
390
+ >int FANN_API test_callback(struct fann *ann, struct fann_train_data *train,
391
+ > unsigned int max_epochs, unsigned int epochs_between_reports,
392
+ > float desired_error, unsigned int epochs)
393
+ >{
394
+ > printf("Epochs %8d. MSE: %.5f. Desired-MSE: %.5f\n", epochs, fann_get_MSE(ann), desired_error);
395
+ > return 0;
396
+ >}
397
+
398
+ See also:
399
+ <fann_set_callback>, <fann_train_on_data>
400
+ */
401
+ FANN_EXTERNAL typedef int (FANN_API * fann_callback_type) (struct fann *ann, struct fann_train_data *train,
402
+ unsigned int max_epochs,
403
+ unsigned int epochs_between_reports,
404
+ float desired_error, unsigned int epochs);
405
+
406
+
407
+ /* ----- Data structures -----
408
+ * No data within these structures should be altered directly by the user.
409
+ */
410
+
411
+ struct fann_neuron
412
+ {
413
+ /* Index to the first and last connection
414
+ * (actually the last is a past end index)
415
+ */
416
+ unsigned int first_con;
417
+ unsigned int last_con;
418
+ /* The sum of the inputs multiplied with the weights */
419
+ fann_type sum;
420
+ /* The value of the activation function applied to the sum */
421
+ fann_type value;
422
+ /* The steepness of the activation function */
423
+ fann_type activation_steepness;
424
+ /* Used to choose which activation function to use */
425
+ enum fann_activationfunc_enum activation_function;
426
+ #ifdef __GNUC__
427
+ } __attribute__ ((packed));
428
+ #else
429
+ };
430
+ #endif
431
+
432
+ /* A single layer in the neural network.
433
+ */
434
+ struct fann_layer
435
+ {
436
+ /* A pointer to the first neuron in the layer
437
+ * When allocated, all the neurons in all the layers are actually
438
+ * in one long array, this is because we wan't to easily clear all
439
+ * the neurons at once.
440
+ */
441
+ struct fann_neuron *first_neuron;
442
+
443
+ /* A pointer to the neuron past the last neuron in the layer */
444
+ /* the number of neurons is last_neuron - first_neuron */
445
+ struct fann_neuron *last_neuron;
446
+ };
447
+
448
+ /* Struct: struct fann_error
449
+
450
+ Structure used to store error-related information, both
451
+ <struct fann> and <struct fann_train_data> can be casted to this type.
452
+
453
+ See also:
454
+ <fann_set_error_log>, <fann_get_errno>
455
+ */
456
+ struct fann_error
457
+ {
458
+ enum fann_errno_enum errno_f;
459
+ FILE *error_log;
460
+ char *errstr;
461
+ };
462
+
463
+
464
+ /* Struct: struct fann
465
+ The fast artificial neural network(fann) structure.
466
+
467
+ Data within this structure should never be accessed directly, but only by using the
468
+ *fann_get_...* and *fann_set_...* functions.
469
+
470
+ The fann structure is created using one of the *fann_create_...* functions and each of
471
+ the functions which operates on the structure takes *struct fann * ann* as the first parameter.
472
+
473
+ See also:
474
+ <fann_create_standard>, <fann_destroy>
475
+ */
476
+ struct fann
477
+ {
478
+ /* The type of error that last occured. */
479
+ enum fann_errno_enum errno_f;
480
+
481
+ /* Where to log error messages. */
482
+ FILE *error_log;
483
+
484
+ /* A string representation of the last error. */
485
+ char *errstr;
486
+
487
+ /* the learning rate of the network */
488
+ float learning_rate;
489
+
490
+ /* The learning momentum used for backpropagation algorithm. */
491
+ float learning_momentum;
492
+
493
+ /* the connection rate of the network
494
+ * between 0 and 1, 1 meaning fully connected
495
+ */
496
+ float connection_rate;
497
+
498
+ /* is 1 if shortcut connections are used in the ann otherwise 0
499
+ * Shortcut connections are connections that skip layers.
500
+ * A fully connected ann with shortcut connections are a ann where
501
+ * neurons have connections to all neurons in all later layers.
502
+ */
503
+ enum fann_nettype_enum network_type;
504
+
505
+ /* pointer to the first layer (input layer) in an array af all the layers,
506
+ * including the input and outputlayers
507
+ */
508
+ struct fann_layer *first_layer;
509
+
510
+ /* pointer to the layer past the last layer in an array af all the layers,
511
+ * including the input and outputlayers
512
+ */
513
+ struct fann_layer *last_layer;
514
+
515
+ /* Total number of neurons.
516
+ * very usefull, because the actual neurons are allocated in one long array
517
+ */
518
+ unsigned int total_neurons;
519
+
520
+ /* Number of input neurons (not calculating bias) */
521
+ unsigned int num_input;
522
+
523
+ /* Number of output neurons (not calculating bias) */
524
+ unsigned int num_output;
525
+
526
+ /* The weight array */
527
+ fann_type *weights;
528
+
529
+ /* The connection array */
530
+ struct fann_neuron **connections;
531
+
532
+ /* Used to contain the errors used during training
533
+ * Is allocated during first training session,
534
+ * which means that if we do not train, it is never allocated.
535
+ */
536
+ fann_type *train_errors;
537
+
538
+ /* Training algorithm used when calling fann_train_on_..
539
+ */
540
+ enum fann_train_enum training_algorithm;
541
+
542
+ #ifdef FIXEDFANN
543
+ /* the decimal_point, used for shifting the fix point
544
+ * in fixed point integer operatons.
545
+ */
546
+ unsigned int decimal_point;
547
+
548
+ /* the multiplier, used for multiplying the fix point
549
+ * in fixed point integer operatons.
550
+ * Only used in special cases, since the decimal_point is much faster.
551
+ */
552
+ unsigned int multiplier;
553
+
554
+ /* When in choosen (or in fixed point), the sigmoid function is
555
+ * calculated as a stepwise linear function. In the
556
+ * activation_results array, the result is saved, and in the
557
+ * two values arrays, the values that gives the results are saved.
558
+ */
559
+ fann_type sigmoid_results[6];
560
+ fann_type sigmoid_values[6];
561
+ fann_type sigmoid_symmetric_results[6];
562
+ fann_type sigmoid_symmetric_values[6];
563
+ #endif
564
+
565
+ /* Total number of connections.
566
+ * very usefull, because the actual connections
567
+ * are allocated in one long array
568
+ */
569
+ unsigned int total_connections;
570
+
571
+ /* used to store outputs in */
572
+ fann_type *output;
573
+
574
+ /* the number of data used to calculate the mean square error.
575
+ */
576
+ unsigned int num_MSE;
577
+
578
+ /* the total error value.
579
+ * the real mean square error is MSE_value/num_MSE
580
+ */
581
+ float MSE_value;
582
+
583
+ /* The number of outputs which would fail (only valid for classification problems)
584
+ */
585
+ unsigned int num_bit_fail;
586
+
587
+ /* The maximum difference between the actual output and the expected output
588
+ * which is accepted when counting the bit fails.
589
+ * This difference is multiplied by two when dealing with symmetric activation functions,
590
+ * so that symmetric and not symmetric activation functions can use the same limit.
591
+ */
592
+ fann_type bit_fail_limit;
593
+
594
+ /* The error function used during training. (default FANN_ERRORFUNC_TANH)
595
+ */
596
+ enum fann_errorfunc_enum train_error_function;
597
+
598
+ /* The stop function used during training. (default FANN_STOPFUNC_MSE)
599
+ */
600
+ enum fann_stopfunc_enum train_stop_function;
601
+
602
+ /* The callback function used during training. (default NULL)
603
+ */
604
+ fann_callback_type callback;
605
+
606
+ /* A pointer to user defined data. (default NULL)
607
+ */
608
+ void *user_data;
609
+
610
+ /* Variables for use with Cascade Correlation */
611
+
612
+ /* The error must change by at least this
613
+ * fraction of its old value to count as a
614
+ * significant change.
615
+ */
616
+ float cascade_output_change_fraction;
617
+
618
+ /* No change in this number of epochs will cause
619
+ * stagnation.
620
+ */
621
+ unsigned int cascade_output_stagnation_epochs;
622
+
623
+ /* The error must change by at least this
624
+ * fraction of its old value to count as a
625
+ * significant change.
626
+ */
627
+ float cascade_candidate_change_fraction;
628
+
629
+ /* No change in this number of epochs will cause
630
+ * stagnation.
631
+ */
632
+ unsigned int cascade_candidate_stagnation_epochs;
633
+
634
+ /* The current best candidate, which will be installed.
635
+ */
636
+ unsigned int cascade_best_candidate;
637
+
638
+ /* The upper limit for a candidate score
639
+ */
640
+ fann_type cascade_candidate_limit;
641
+
642
+ /* Scale of copied candidate output weights
643
+ */
644
+ fann_type cascade_weight_multiplier;
645
+
646
+ /* Maximum epochs to train the output neurons during cascade training
647
+ */
648
+ unsigned int cascade_max_out_epochs;
649
+
650
+ /* Maximum epochs to train the candidate neurons during cascade training
651
+ */
652
+ unsigned int cascade_max_cand_epochs;
653
+
654
+ /* Minimum epochs to train the output neurons during cascade training
655
+ */
656
+ unsigned int cascade_min_out_epochs;
657
+
658
+ /* Minimum epochs to train the candidate neurons during cascade training
659
+ */
660
+ unsigned int cascade_min_cand_epochs;
661
+
662
+ /* An array consisting of the activation functions used when doing
663
+ * cascade training.
664
+ */
665
+ enum fann_activationfunc_enum *cascade_activation_functions;
666
+
667
+ /* The number of elements in the cascade_activation_functions array.
668
+ */
669
+ unsigned int cascade_activation_functions_count;
670
+
671
+ /* An array consisting of the steepnesses used during cascade training.
672
+ */
673
+ fann_type *cascade_activation_steepnesses;
674
+
675
+ /* The number of elements in the cascade_activation_steepnesses array.
676
+ */
677
+ unsigned int cascade_activation_steepnesses_count;
678
+
679
+ /* The number of candidates of each type that will be present.
680
+ * The actual number of candidates is then
681
+ * cascade_activation_functions_count *
682
+ * cascade_activation_steepnesses_count *
683
+ * cascade_num_candidate_groups
684
+ */
685
+ unsigned int cascade_num_candidate_groups;
686
+
687
+ /* An array consisting of the score of the individual candidates,
688
+ * which is used to decide which candidate is the best
689
+ */
690
+ fann_type *cascade_candidate_scores;
691
+
692
+ /* The number of allocated neurons during cascade correlation algorithms.
693
+ * This number might be higher than the actual number of neurons to avoid
694
+ * allocating new space too often.
695
+ */
696
+ unsigned int total_neurons_allocated;
697
+
698
+ /* The number of allocated connections during cascade correlation algorithms.
699
+ * This number might be higher than the actual number of neurons to avoid
700
+ * allocating new space too often.
701
+ */
702
+ unsigned int total_connections_allocated;
703
+
704
+ /* Variables for use with Quickprop training */
705
+
706
+ /* Decay is used to make the weights not go so high */
707
+ float quickprop_decay;
708
+
709
+ /* Mu is a factor used to increase and decrease the stepsize */
710
+ float quickprop_mu;
711
+
712
+ /* Variables for use with with RPROP training */
713
+
714
+ /* Tells how much the stepsize should increase during learning */
715
+ float rprop_increase_factor;
716
+
717
+ /* Tells how much the stepsize should decrease during learning */
718
+ float rprop_decrease_factor;
719
+
720
+ /* The minimum stepsize */
721
+ float rprop_delta_min;
722
+
723
+ /* The maximum stepsize */
724
+ float rprop_delta_max;
725
+
726
+ /* The initial stepsize */
727
+ float rprop_delta_zero;
728
+
729
+ /* Defines how much the weights are constrained to smaller values at the beginning */
730
+ float sarprop_weight_decay_shift;
731
+
732
+ /* Decides if the stepsize is too big with regard to the error */
733
+ float sarprop_step_error_threshold_factor;
734
+
735
+ /* Defines how much the stepsize is influenced by the error */
736
+ float sarprop_step_error_shift;
737
+
738
+ /* Defines how much the epoch influences weight decay and noise */
739
+ float sarprop_temperature;
740
+
741
+ /* Current training epoch */
742
+ unsigned int sarprop_epoch;
743
+
744
+ /* Used to contain the slope errors used during batch training
745
+ * Is allocated during first training session,
746
+ * which means that if we do not train, it is never allocated.
747
+ */
748
+ fann_type *train_slopes;
749
+
750
+ /* The previous step taken by the quickprop/rprop procedures.
751
+ * Not allocated if not used.
752
+ */
753
+ fann_type *prev_steps;
754
+
755
+ /* The slope values used by the quickprop/rprop procedures.
756
+ * Not allocated if not used.
757
+ */
758
+ fann_type *prev_train_slopes;
759
+
760
+ /* The last delta applied to a connection weight.
761
+ * This is used for the momentum term in the backpropagation algorithm.
762
+ * Not allocated if not used.
763
+ */
764
+ fann_type *prev_weights_deltas;
765
+
766
+ #ifndef FIXEDFANN
767
+ /* Arithmetic mean used to remove steady component in input data. */
768
+ float *scale_mean_in;
769
+
770
+ /* Standart deviation used to normalize input data (mostly to [-1;1]). */
771
+ float *scale_deviation_in;
772
+
773
+ /* User-defined new minimum for input data.
774
+ * Resulting data values may be less than user-defined minimum.
775
+ */
776
+ float *scale_new_min_in;
777
+
778
+ /* Used to scale data to user-defined new maximum for input data.
779
+ * Resulting data values may be greater than user-defined maximum.
780
+ */
781
+ float *scale_factor_in;
782
+
783
+ /* Arithmetic mean used to remove steady component in output data. */
784
+ float *scale_mean_out;
785
+
786
+ /* Standart deviation used to normalize output data (mostly to [-1;1]). */
787
+ float *scale_deviation_out;
788
+
789
+ /* User-defined new minimum for output data.
790
+ * Resulting data values may be less than user-defined minimum.
791
+ */
792
+ float *scale_new_min_out;
793
+
794
+ /* Used to scale data to user-defined new maximum for output data.
795
+ * Resulting data values may be greater than user-defined maximum.
796
+ */
797
+ float *scale_factor_out;
798
+ #endif
799
+ };
800
+
801
+ /* Type: fann_connection
802
+
803
+ Describes a connection between two neurons and its weight
804
+
805
+ from_neuron - Unique number used to identify source neuron
806
+ to_neuron - Unique number used to identify destination neuron
807
+ weight - The numerical value of the weight
808
+
809
+ See Also:
810
+ <fann_get_connection_array>, <fann_set_weight_array>
811
+
812
+ This structure appears in FANN >= 2.1.0
813
+ */
814
+ struct fann_connection
815
+ {
816
+ /* Unique number used to identify source neuron */
817
+ unsigned int from_neuron;
818
+ /* Unique number used to identify destination neuron */
819
+ unsigned int to_neuron;
820
+ /* The numerical value of the weight */
821
+ fann_type weight;
822
+ };
823
+
824
+ #endif