ruby-fann 0.7.10 → 1.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (52) hide show
  1. data/History.txt +6 -1
  2. data/License.txt +1 -1
  3. data/Manifest.txt +22 -1
  4. data/README.txt +0 -1
  5. data/Rakefile +0 -0
  6. data/config/hoe.rb +0 -0
  7. data/config/requirements.rb +0 -0
  8. data/ext/ruby_fann/MANIFEST +0 -0
  9. data/ext/ruby_fann/Makefile +36 -28
  10. data/ext/ruby_fann/doublefann.c +30 -0
  11. data/ext/ruby_fann/doublefann.h +33 -0
  12. data/ext/ruby_fann/extconf.rb +9 -5
  13. data/ext/ruby_fann/fann.c +1552 -0
  14. data/ext/ruby_fann/fann_activation.h +144 -0
  15. data/ext/ruby_fann/fann_augment.h +0 -0
  16. data/ext/ruby_fann/fann_cascade.c +1031 -0
  17. data/ext/ruby_fann/fann_cascade.h +503 -0
  18. data/ext/ruby_fann/fann_data.h +799 -0
  19. data/ext/ruby_fann/fann_error.c +204 -0
  20. data/ext/ruby_fann/fann_error.h +161 -0
  21. data/ext/ruby_fann/fann_internal.h +148 -0
  22. data/ext/ruby_fann/fann_io.c +762 -0
  23. data/ext/ruby_fann/fann_io.h +100 -0
  24. data/ext/ruby_fann/fann_train.c +962 -0
  25. data/ext/ruby_fann/fann_train.h +1203 -0
  26. data/ext/ruby_fann/fann_train_data.c +1231 -0
  27. data/ext/ruby_fann/neural_network.c +0 -0
  28. data/lib/ruby_fann/neurotica.rb +0 -0
  29. data/lib/ruby_fann/version.rb +3 -3
  30. data/lib/ruby_fann.rb +0 -0
  31. data/neurotica1.png +0 -0
  32. data/neurotica2.vrml +18 -18
  33. data/setup.rb +0 -0
  34. data/tasks/deployment.rake +0 -0
  35. data/tasks/environment.rake +0 -0
  36. data/tasks/website.rake +0 -0
  37. data/test/test.train +0 -0
  38. data/test/test_helper.rb +0 -0
  39. data/test/test_neurotica.rb +0 -0
  40. data/test/test_ruby_fann.rb +0 -0
  41. data/test/test_ruby_fann_functional.rb +0 -0
  42. data/verify.train +0 -0
  43. data/website/index.html +42 -92
  44. data/website/index.txt +0 -0
  45. data/website/javascripts/rounded_corners_lite.inc.js +0 -0
  46. data/website/stylesheets/screen.css +0 -0
  47. data/website/template.rhtml +0 -0
  48. data/xor.train +0 -0
  49. data/xor_cascade.net +2 -2
  50. data/xor_float.net +1 -1
  51. metadata +22 -6
  52. data/log/debug.log +0 -0
@@ -0,0 +1,799 @@
1
+ /*
2
+ Fast Artificial Neural Network Library (fann)
3
+ Copyright (C) 2003 Steffen Nissen (lukesky@diku.dk)
4
+
5
+ This library is free software; you can redistribute it and/or
6
+ modify it under the terms of the GNU Lesser General Public
7
+ License as published by the Free Software Foundation; either
8
+ version 2.1 of the License, or (at your option) any later version.
9
+
10
+ This library is distributed in the hope that it will be useful,
11
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13
+ Lesser General Public License for more details.
14
+
15
+ You should have received a copy of the GNU Lesser General Public
16
+ License along with this library; if not, write to the Free Software
17
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
+ */
19
+
20
+ #ifndef __fann_data_h__
21
+ #define __fann_data_h__
22
+
23
+ #include <stdio.h>
24
+
25
+ /* Section: FANN Datatypes
26
+
27
+ The two main datatypes used in the fann library is <struct fann>,
28
+ which represents an artificial neural network, and <struct fann_train_data>,
29
+ which represent training data.
30
+ */
31
+
32
+
33
+ /* Type: fann_type
34
+ fann_type is the type used for the weights, inputs and outputs of the neural network.
35
+
36
+ fann_type is defined as a:
37
+ float - if you include fann.h or floatfann.h
38
+ double - if you include doublefann.h
39
+ int - if you include fixedfann.h (please be aware that fixed point usage is
40
+ only to be used during execution, and not during training).
41
+ */
42
+
43
+ /* Enum: fann_train_enum
44
+ The Training algorithms used when training on <struct fann_train_data> with functions like
45
+ <fann_train_on_data> or <fann_train_on_file>. The incremental training looks alters the weights
46
+ after each time it is presented an input pattern, while batch only alters the weights once after
47
+ it has been presented to all the patterns.
48
+
49
+ FANN_TRAIN_INCREMENTAL - Standard backpropagation algorithm, where the weights are
50
+ updated after each training pattern. This means that the weights are updated many
51
+ times during a single epoch. For this reason some problems, will train very fast with
52
+ this algorithm, while other more advanced problems will not train very well.
53
+ FANN_TRAIN_BATCH - Standard backpropagation algorithm, where the weights are updated after
54
+ calculating the mean square error for the whole training set. This means that the weights
55
+ are only updated once during a epoch. For this reason some problems, will train slower with
56
+ this algorithm. But since the mean square error is calculated more correctly than in
57
+ incremental training, some problems will reach a better solutions with this algorithm.
58
+ FANN_TRAIN_RPROP - A more advanced batch training algorithm which achieves good results
59
+ for many problems. The RPROP training algorithm is adaptive, and does therefore not
60
+ use the learning_rate. Some other parameters can however be set to change the way the
61
+ RPROP algorithm works, but it is only recommended for users with insight in how the RPROP
62
+ training algorithm works. The RPROP training algorithm is described by
63
+ [Riedmiller and Braun, 1993], but the actual learning algorithm used here is the
64
+ iRPROP- training algorithm which is described by [Igel and Husken, 2000] which
65
+ is an variety of the standard RPROP training algorithm.
66
+ FANN_TRAIN_QUICKPROP - A more advanced batch training algorithm which achieves good results
67
+ for many problems. The quickprop training algorithm uses the learning_rate parameter
68
+ along with other more advanced parameters, but it is only recommended to change these
69
+ advanced parameters, for users with insight in how the quickprop training algorithm works.
70
+ The quickprop training algorithm is described by [Fahlman, 1988].
71
+
72
+ See also:
73
+ <fann_set_training_algorithm>, <fann_get_training_algorithm>
74
+ */
75
+ enum fann_train_enum
76
+ {
77
+ FANN_TRAIN_INCREMENTAL = 0,
78
+ FANN_TRAIN_BATCH,
79
+ FANN_TRAIN_RPROP,
80
+ FANN_TRAIN_QUICKPROP
81
+ };
82
+
83
+ /* Constant: FANN_TRAIN_NAMES
84
+
85
+ Constant array consisting of the names for the training algorithms, so that the name of an
86
+ training function can be received by:
87
+ (code)
88
+ char *name = FANN_TRAIN_NAMES[train_function];
89
+ (end)
90
+
91
+ See Also:
92
+ <fann_train_enum>
93
+ */
94
+ static char const *const FANN_TRAIN_NAMES[] = {
95
+ "FANN_TRAIN_INCREMENTAL",
96
+ "FANN_TRAIN_BATCH",
97
+ "FANN_TRAIN_RPROP",
98
+ "FANN_TRAIN_QUICKPROP"
99
+ };
100
+
101
+ /* Enums: fann_activationfunc_enum
102
+
103
+ The activation functions used for the neurons during training. The activation functions
104
+ can either be defined for a group of neurons by <fann_set_activation_function_hidden> and
105
+ <fann_set_activation_function_output> or it can be defined for a single neuron by <fann_set_activation_function>.
106
+
107
+ The steepness of an activation function is defined in the same way by
108
+ <fann_set_activation_steepness_hidden>, <fann_set_activation_steepness_output> and <fann_set_activation_steepness>.
109
+
110
+ The functions are described with functions where:
111
+ * x is the input to the activation function,
112
+ * y is the output,
113
+ * s is the steepness and
114
+ * d is the derivation.
115
+
116
+ FANN_LINEAR - Linear activation function.
117
+ * span: -inf < y < inf
118
+ * y = x*s, d = 1*s
119
+ * Can NOT be used in fixed point.
120
+
121
+ FANN_THRESHOLD - Threshold activation function.
122
+ * x < 0 -> y = 0, x >= 0 -> y = 1
123
+ * Can NOT be used during training.
124
+
125
+ FANN_THRESHOLD_SYMMETRIC - Threshold activation function.
126
+ * x < 0 -> y = 0, x >= 0 -> y = 1
127
+ * Can NOT be used during training.
128
+
129
+ FANN_SIGMOID - Sigmoid activation function.
130
+ * One of the most used activation functions.
131
+ * span: 0 < y < 1
132
+ * y = 1/(1 + exp(-2*s*x))
133
+ * d = 2*s*y*(1 - y)
134
+
135
+ FANN_SIGMOID_STEPWISE - Stepwise linear approximation to sigmoid.
136
+ * Faster than sigmoid but a bit less precise.
137
+
138
+ FANN_SIGMOID_SYMMETRIC - Symmetric sigmoid activation function, aka. tanh.
139
+ * One of the most used activation functions.
140
+ * span: -1 < y < 1
141
+ * y = tanh(s*x) = 2/(1 + exp(-2*s*x)) - 1
142
+ * d = s*(1-(y*y))
143
+
144
+ FANN_SIGMOID_SYMMETRIC - Stepwise linear approximation to symmetric sigmoid.
145
+ * Faster than symmetric sigmoid but a bit less precise.
146
+
147
+ FANN_GAUSSIAN - Gaussian activation function.
148
+ * 0 when x = -inf, 1 when x = 0 and 0 when x = inf
149
+ * span: 0 < y < 1
150
+ * y = exp(-x*s*x*s)
151
+ * d = -2*x*s*y*s
152
+
153
+ FANN_GAUSSIAN_SYMMETRIC - Symmetric gaussian activation function.
154
+ * -1 when x = -inf, 1 when x = 0 and 0 when x = inf
155
+ * span: -1 < y < 1
156
+ * y = exp(-x*s*x*s)*2-1
157
+ * d = -2*x*s*(y+1)*s
158
+
159
+ FANN_ELLIOT - Fast (sigmoid like) activation function defined by David Elliott
160
+ * span: 0 < y < 1
161
+ * y = ((x*s) / 2) / (1 + |x*s|) + 0.5
162
+ * d = s*1/(2*(1+|x*s|)*(1+|x*s|))
163
+
164
+ FANN_ELLIOT_SYMMETRIC - Fast (symmetric sigmoid like) activation function defined by David Elliott
165
+ * span: -1 < y < 1
166
+ * y = (x*s) / (1 + |x*s|)
167
+ * d = s*1/((1+|x*s|)*(1+|x*s|))
168
+
169
+ FANN_LINEAR_PIECE - Bounded linear activation function.
170
+ * span: 0 <= y <= 1
171
+ * y = x*s, d = 1*s
172
+
173
+ FANN_LINEAR_PIECE_SYMMETRIC - Bounded linear activation function.
174
+ * span: -1 <= y <= 1
175
+ * y = x*s, d = 1*s
176
+
177
+ FANN_SIN_SYMMETRIC - Periodical sinus activation function.
178
+ * span: -1 <= y <= 1
179
+ * y = sin(x*s)
180
+ * d = s*cos(x*s)
181
+
182
+ FANN_COS_SYMMETRIC - Periodical cosinus activation function.
183
+ * span: -1 <= y <= 1
184
+ * y = cos(x*s)
185
+ * d = s*-sin(x*s)
186
+
187
+ FANN_SIN - Periodical sinus activation function.
188
+ * span: 0 <= y <= 1
189
+ * y = sin(x*s)/2+0.5
190
+ * d = s*cos(x*s)/2
191
+
192
+ FANN_COS - Periodical cosinus activation function.
193
+ * span: 0 <= y <= 1
194
+ * y = cos(x*s)/2+0.5
195
+ * d = s*-sin(x*s)/2
196
+
197
+ See also:
198
+ <fann_set_activation_function_layer>, <fann_set_activation_function_hidden>,
199
+ <fann_set_activation_function_output>, <fann_set_activation_steepness>,
200
+ <fann_set_activation_function>
201
+ */
202
+ enum fann_activationfunc_enum
203
+ {
204
+ FANN_LINEAR = 0,
205
+ FANN_THRESHOLD,
206
+ FANN_THRESHOLD_SYMMETRIC,
207
+ FANN_SIGMOID,
208
+ FANN_SIGMOID_STEPWISE,
209
+ FANN_SIGMOID_SYMMETRIC,
210
+ FANN_SIGMOID_SYMMETRIC_STEPWISE,
211
+ FANN_GAUSSIAN,
212
+ FANN_GAUSSIAN_SYMMETRIC,
213
+ /* Stepwise linear approximation to gaussian.
214
+ * Faster than gaussian but a bit less precise.
215
+ * NOT implemented yet.
216
+ */
217
+ FANN_GAUSSIAN_STEPWISE,
218
+ FANN_ELLIOT,
219
+ FANN_ELLIOT_SYMMETRIC,
220
+ FANN_LINEAR_PIECE,
221
+ FANN_LINEAR_PIECE_SYMMETRIC,
222
+ FANN_SIN_SYMMETRIC,
223
+ FANN_COS_SYMMETRIC,
224
+ FANN_SIN,
225
+ FANN_COS
226
+ };
227
+
228
+ /* Constant: FANN_ACTIVATIONFUNC_NAMES
229
+
230
+ Constant array consisting of the names for the activation function, so that the name of an
231
+ activation function can be received by:
232
+ (code)
233
+ char *name = FANN_ACTIVATIONFUNC_NAMES[activation_function];
234
+ (end)
235
+
236
+ See Also:
237
+ <fann_activationfunc_enum>
238
+ */
239
+ static char const *const FANN_ACTIVATIONFUNC_NAMES[] = {
240
+ "FANN_LINEAR",
241
+ "FANN_THRESHOLD",
242
+ "FANN_THRESHOLD_SYMMETRIC",
243
+ "FANN_SIGMOID",
244
+ "FANN_SIGMOID_STEPWISE",
245
+ "FANN_SIGMOID_SYMMETRIC",
246
+ "FANN_SIGMOID_SYMMETRIC_STEPWISE",
247
+ "FANN_GAUSSIAN",
248
+ "FANN_GAUSSIAN_SYMMETRIC",
249
+ "FANN_GAUSSIAN_STEPWISE",
250
+ "FANN_ELLIOT",
251
+ "FANN_ELLIOT_SYMMETRIC",
252
+ "FANN_LINEAR_PIECE",
253
+ "FANN_LINEAR_PIECE_SYMMETRIC",
254
+ "FANN_SIN_SYMMETRIC",
255
+ "FANN_COS_SYMMETRIC",
256
+ "FANN_SIN",
257
+ "FANN_COS"
258
+ };
259
+
260
+ /* Enum: fann_errorfunc_enum
261
+ Error function used during training.
262
+
263
+ FANN_ERRORFUNC_LINEAR - Standard linear error function.
264
+ FANN_ERRORFUNC_TANH - Tanh error function, usually better
265
+ but can require a lower learning rate. This error function agressively targets outputs that
266
+ differ much from the desired, while not targetting outputs that only differ a little that much.
267
+ This activation function is not recommended for cascade training and incremental training.
268
+
269
+ See also:
270
+ <fann_set_train_error_function>, <fann_get_train_error_function>
271
+ */
272
+ enum fann_errorfunc_enum
273
+ {
274
+ FANN_ERRORFUNC_LINEAR = 0,
275
+ FANN_ERRORFUNC_TANH
276
+ };
277
+
278
+ /* Constant: FANN_ERRORFUNC_NAMES
279
+
280
+ Constant array consisting of the names for the training error functions, so that the name of an
281
+ error function can be received by:
282
+ (code)
283
+ char *name = FANN_ERRORFUNC_NAMES[error_function];
284
+ (end)
285
+
286
+ See Also:
287
+ <fann_errorfunc_enum>
288
+ */
289
+ static char const *const FANN_ERRORFUNC_NAMES[] = {
290
+ "FANN_ERRORFUNC_LINEAR",
291
+ "FANN_ERRORFUNC_TANH"
292
+ };
293
+
294
+ /* Enum: fann_stopfunc_enum
295
+ Stop criteria used during training.
296
+
297
+ FANN_STOPFUNC_MSE - Stop criteria is Mean Square Error (MSE) value.
298
+ FANN_STOPFUNC_BIT - Stop criteria is number of bits that fail. The number of bits; means the
299
+ number of output neurons which differ more than the bit fail limit
300
+ (see <fann_get_bit_fail_limit>, <fann_set_bit_fail_limit>).
301
+ The bits are counted in all of the training data, so this number can be higher than
302
+ the number of training data.
303
+
304
+ See also:
305
+ <fann_set_train_stop_function>, <fann_get_train_stop_function>
306
+ */
307
+ enum fann_stopfunc_enum
308
+ {
309
+ FANN_STOPFUNC_MSE = 0,
310
+ FANN_STOPFUNC_BIT
311
+ };
312
+
313
+ /* Constant: FANN_STOPFUNC_NAMES
314
+
315
+ Constant array consisting of the names for the training stop functions, so that the name of a
316
+ stop function can be received by:
317
+ (code)
318
+ char *name = FANN_STOPFUNC_NAMES[stop_function];
319
+ (end)
320
+
321
+ See Also:
322
+ <fann_stopfunc_enum>
323
+ */
324
+ static char const *const FANN_STOPFUNC_NAMES[] = {
325
+ "FANN_STOPFUNC_MSE",
326
+ "FANN_STOPFUNC_BIT"
327
+ };
328
+
329
+ /* Enum: fann_network_type_enum
330
+
331
+ Definition of network types used by <fann_get_network_type>
332
+
333
+ FANN_NETTYPE_LAYER - Each layer only has connections to the next layer
334
+ FANN_NETTYPE_SHORTCUT - Each layer has connections to all following layers
335
+
336
+ See Also:
337
+ <fann_get_network_type>
338
+
339
+ This enumeration appears in FANN >= 2.1.0
340
+ */
341
+ enum fann_nettype_enum
342
+ {
343
+ FANN_NETTYPE_LAYER = 0, /* Each layer only has connections to the next layer */
344
+ FANN_NETTYPE_SHORTCUT /* Each layer has connections to all following layers */
345
+ };
346
+
347
+ /* Constant: FANN_NETWORK_TYPE_NAMES
348
+
349
+ Constant array consisting of the names for the network types, so that the name of an
350
+ network type can be received by:
351
+ (code)
352
+ char *network_type_name = FANN_NETWORK_TYPE_NAMES[fann_get_network_type(ann)];
353
+ (end)
354
+
355
+ See Also:
356
+ <fann_get_network_type>
357
+
358
+ This constant appears in FANN >= 2.1.0
359
+ */
360
+ static char const *const FANN_NETTYPE_NAMES[] = {
361
+ "FANN_NETTYPE_LAYER",
362
+ "FANN_NETTYPE_SHORTCUT"
363
+ };
364
+
365
+
366
+ /* forward declarations for use with the callback */
367
+ struct fann;
368
+ struct fann_train_data;
369
+ /* Type: fann_callback_type
370
+ This callback function can be called during training when using <fann_train_on_data>,
371
+ <fann_train_on_file> or <fann_cascadetrain_on_data>.
372
+
373
+ >typedef int (FANN_API * fann_callback_type) (struct fann *ann, struct fann_train_data *train,
374
+ > unsigned int max_epochs,
375
+ > unsigned int epochs_between_reports,
376
+ > float desired_error, unsigned int epochs);
377
+
378
+ The callback can be set by using <fann_set_callback> and is very usefull for doing custom
379
+ things during training. It is recommended to use this function when implementing custom
380
+ training procedures, or when visualizing the training in a GUI etc. The parameters which the
381
+ callback function takes is the parameters given to the <fann_train_on_data>, plus an epochs
382
+ parameter which tells how many epochs the training have taken so far.
383
+
384
+ The callback function should return an integer, if the callback function returns -1, the training
385
+ will terminate.
386
+
387
+ Example of a callback function:
388
+ >int FANN_API test_callback(struct fann *ann, struct fann_train_data *train,
389
+ > unsigned int max_epochs, unsigned int epochs_between_reports,
390
+ > float desired_error, unsigned int epochs)
391
+ >{
392
+ > printf("Epochs %8d. MSE: %.5f. Desired-MSE: %.5f\n", epochs, fann_get_MSE(ann), desired_error);
393
+ > return 0;
394
+ >}
395
+
396
+ See also:
397
+ <fann_set_callback>, <fann_train_on_data>
398
+ */
399
+ FANN_EXTERNAL typedef int (FANN_API * fann_callback_type) (struct fann *ann, struct fann_train_data *train,
400
+ unsigned int max_epochs,
401
+ unsigned int epochs_between_reports,
402
+ float desired_error, unsigned int epochs);
403
+
404
+
405
+ /* ----- Data structures -----
406
+ * No data within these structures should be altered directly by the user.
407
+ */
408
+
409
+ struct fann_neuron
410
+ {
411
+ /* Index to the first and last connection
412
+ * (actually the last is a past end index)
413
+ */
414
+ unsigned int first_con;
415
+ unsigned int last_con;
416
+ /* The sum of the inputs multiplied with the weights */
417
+ fann_type sum;
418
+ /* The value of the activation function applied to the sum */
419
+ fann_type value;
420
+ /* The steepness of the activation function */
421
+ fann_type activation_steepness;
422
+ /* Used to choose which activation function to use */
423
+ enum fann_activationfunc_enum activation_function;
424
+ #ifdef __GNUC__
425
+ } __attribute__ ((packed));
426
+ #else
427
+ };
428
+ #endif
429
+
430
+ /* A single layer in the neural network.
431
+ */
432
+ struct fann_layer
433
+ {
434
+ /* A pointer to the first neuron in the layer
435
+ * When allocated, all the neurons in all the layers are actually
436
+ * in one long array, this is because we wan't to easily clear all
437
+ * the neurons at once.
438
+ */
439
+ struct fann_neuron *first_neuron;
440
+
441
+ /* A pointer to the neuron past the last neuron in the layer */
442
+ /* the number of neurons is last_neuron - first_neuron */
443
+ struct fann_neuron *last_neuron;
444
+ };
445
+
446
+ /* Struct: struct fann_error
447
+
448
+ Structure used to store error-related information, both
449
+ <struct fann> and <struct fann_train_data> can be casted to this type.
450
+
451
+ See also:
452
+ <fann_set_error_log>, <fann_get_errno>
453
+ */
454
+ struct fann_error
455
+ {
456
+ enum fann_errno_enum errno_f;
457
+ FILE *error_log;
458
+ char *errstr;
459
+ };
460
+
461
+
462
+ /* Struct: struct fann
463
+ The fast artificial neural network(fann) structure.
464
+
465
+ Data within this structure should never be accessed directly, but only by using the
466
+ *fann_get_...* and *fann_set_...* functions.
467
+
468
+ The fann structure is created using one of the *fann_create_...* functions and each of
469
+ the functions which operates on the structure takes *struct fann * ann* as the first parameter.
470
+
471
+ See also:
472
+ <fann_create_standard>, <fann_destroy>
473
+ */
474
+ struct fann
475
+ {
476
+ /* The type of error that last occured. */
477
+ enum fann_errno_enum errno_f;
478
+
479
+ /* Where to log error messages. */
480
+ FILE *error_log;
481
+
482
+ /* A string representation of the last error. */
483
+ char *errstr;
484
+
485
+ /* the learning rate of the network */
486
+ float learning_rate;
487
+
488
+ /* The learning momentum used for backpropagation algorithm. */
489
+ float learning_momentum;
490
+
491
+ /* the connection rate of the network
492
+ * between 0 and 1, 1 meaning fully connected
493
+ */
494
+ float connection_rate;
495
+
496
+ /* is 1 if shortcut connections are used in the ann otherwise 0
497
+ * Shortcut connections are connections that skip layers.
498
+ * A fully connected ann with shortcut connections are a ann where
499
+ * neurons have connections to all neurons in all later layers.
500
+ */
501
+ enum fann_nettype_enum network_type;
502
+
503
+ /* pointer to the first layer (input layer) in an array af all the layers,
504
+ * including the input and outputlayers
505
+ */
506
+ struct fann_layer *first_layer;
507
+
508
+ /* pointer to the layer past the last layer in an array af all the layers,
509
+ * including the input and outputlayers
510
+ */
511
+ struct fann_layer *last_layer;
512
+
513
+ /* Total number of neurons.
514
+ * very usefull, because the actual neurons are allocated in one long array
515
+ */
516
+ unsigned int total_neurons;
517
+
518
+ /* Number of input neurons (not calculating bias) */
519
+ unsigned int num_input;
520
+
521
+ /* Number of output neurons (not calculating bias) */
522
+ unsigned int num_output;
523
+
524
+ /* The weight array */
525
+ fann_type *weights;
526
+
527
+ /* The connection array */
528
+ struct fann_neuron **connections;
529
+
530
+ /* Used to contain the errors used during training
531
+ * Is allocated during first training session,
532
+ * which means that if we do not train, it is never allocated.
533
+ */
534
+ fann_type *train_errors;
535
+
536
+ /* Training algorithm used when calling fann_train_on_..
537
+ */
538
+ enum fann_train_enum training_algorithm;
539
+
540
+ #ifdef FIXEDFANN
541
+ /* the decimal_point, used for shifting the fix point
542
+ * in fixed point integer operatons.
543
+ */
544
+ unsigned int decimal_point;
545
+
546
+ /* the multiplier, used for multiplying the fix point
547
+ * in fixed point integer operatons.
548
+ * Only used in special cases, since the decimal_point is much faster.
549
+ */
550
+ unsigned int multiplier;
551
+
552
+ /* When in choosen (or in fixed point), the sigmoid function is
553
+ * calculated as a stepwise linear function. In the
554
+ * activation_results array, the result is saved, and in the
555
+ * two values arrays, the values that gives the results are saved.
556
+ */
557
+ fann_type sigmoid_results[6];
558
+ fann_type sigmoid_values[6];
559
+ fann_type sigmoid_symmetric_results[6];
560
+ fann_type sigmoid_symmetric_values[6];
561
+ #endif
562
+
563
+ /* Total number of connections.
564
+ * very usefull, because the actual connections
565
+ * are allocated in one long array
566
+ */
567
+ unsigned int total_connections;
568
+
569
+ /* used to store outputs in */
570
+ fann_type *output;
571
+
572
+ /* the number of data used to calculate the mean square error.
573
+ */
574
+ unsigned int num_MSE;
575
+
576
+ /* the total error value.
577
+ * the real mean square error is MSE_value/num_MSE
578
+ */
579
+ float MSE_value;
580
+
581
+ /* The number of outputs which would fail (only valid for classification problems)
582
+ */
583
+ unsigned int num_bit_fail;
584
+
585
+ /* The maximum difference between the actual output and the expected output
586
+ * which is accepted when counting the bit fails.
587
+ * This difference is multiplied by two when dealing with symmetric activation functions,
588
+ * so that symmetric and not symmetric activation functions can use the same limit.
589
+ */
590
+ fann_type bit_fail_limit;
591
+
592
+ /* The error function used during training. (default FANN_ERRORFUNC_TANH)
593
+ */
594
+ enum fann_errorfunc_enum train_error_function;
595
+
596
+ /* The stop function used during training. (default FANN_STOPFUNC_MSE)
597
+ */
598
+ enum fann_stopfunc_enum train_stop_function;
599
+
600
+ /* The callback function used during training. (default NULL)
601
+ */
602
+ fann_callback_type callback;
603
+
604
+ /* A pointer to user defined data. (default NULL)
605
+ */
606
+ void *user_data;
607
+
608
+ /* Variables for use with Cascade Correlation */
609
+
610
+ /* The error must change by at least this
611
+ * fraction of its old value to count as a
612
+ * significant change.
613
+ */
614
+ float cascade_output_change_fraction;
615
+
616
+ /* No change in this number of epochs will cause
617
+ * stagnation.
618
+ */
619
+ unsigned int cascade_output_stagnation_epochs;
620
+
621
+ /* The error must change by at least this
622
+ * fraction of its old value to count as a
623
+ * significant change.
624
+ */
625
+ float cascade_candidate_change_fraction;
626
+
627
+ /* No change in this number of epochs will cause
628
+ * stagnation.
629
+ */
630
+ unsigned int cascade_candidate_stagnation_epochs;
631
+
632
+ /* The current best candidate, which will be installed.
633
+ */
634
+ unsigned int cascade_best_candidate;
635
+
636
+ /* The upper limit for a candidate score
637
+ */
638
+ fann_type cascade_candidate_limit;
639
+
640
+ /* Scale of copied candidate output weights
641
+ */
642
+ fann_type cascade_weight_multiplier;
643
+
644
+ /* Maximum epochs to train the output neurons during cascade training
645
+ */
646
+ unsigned int cascade_max_out_epochs;
647
+
648
+ /* Maximum epochs to train the candidate neurons during cascade training
649
+ */
650
+ unsigned int cascade_max_cand_epochs;
651
+
652
+ /* An array consisting of the activation functions used when doing
653
+ * cascade training.
654
+ */
655
+ enum fann_activationfunc_enum *cascade_activation_functions;
656
+
657
+ /* The number of elements in the cascade_activation_functions array.
658
+ */
659
+ unsigned int cascade_activation_functions_count;
660
+
661
+ /* An array consisting of the steepnesses used during cascade training.
662
+ */
663
+ fann_type *cascade_activation_steepnesses;
664
+
665
+ /* The number of elements in the cascade_activation_steepnesses array.
666
+ */
667
+ unsigned int cascade_activation_steepnesses_count;
668
+
669
+ /* The number of candidates of each type that will be present.
670
+ * The actual number of candidates is then
671
+ * cascade_activation_functions_count *
672
+ * cascade_activation_steepnesses_count *
673
+ * cascade_num_candidate_groups
674
+ */
675
+ unsigned int cascade_num_candidate_groups;
676
+
677
+ /* An array consisting of the score of the individual candidates,
678
+ * which is used to decide which candidate is the best
679
+ */
680
+ fann_type *cascade_candidate_scores;
681
+
682
+ /* The number of allocated neurons during cascade correlation algorithms.
683
+ * This number might be higher than the actual number of neurons to avoid
684
+ * allocating new space too often.
685
+ */
686
+ unsigned int total_neurons_allocated;
687
+
688
+ /* The number of allocated connections during cascade correlation algorithms.
689
+ * This number might be higher than the actual number of neurons to avoid
690
+ * allocating new space too often.
691
+ */
692
+ unsigned int total_connections_allocated;
693
+
694
+ /* Variables for use with Quickprop training */
695
+
696
+ /* Decay is used to make the weights not go so high */
697
+ float quickprop_decay;
698
+
699
+ /* Mu is a factor used to increase and decrease the stepsize */
700
+ float quickprop_mu;
701
+
702
+ /* Variables for use with with RPROP training */
703
+
704
+ /* Tells how much the stepsize should increase during learning */
705
+ float rprop_increase_factor;
706
+
707
+ /* Tells how much the stepsize should decrease during learning */
708
+ float rprop_decrease_factor;
709
+
710
+ /* The minimum stepsize */
711
+ float rprop_delta_min;
712
+
713
+ /* The maximum stepsize */
714
+ float rprop_delta_max;
715
+
716
+ /* The initial stepsize */
717
+ float rprop_delta_zero;
718
+
719
+ /* Used to contain the slope errors used during batch training
720
+ * Is allocated during first training session,
721
+ * which means that if we do not train, it is never allocated.
722
+ */
723
+ fann_type *train_slopes;
724
+
725
+ /* The previous step taken by the quickprop/rprop procedures.
726
+ * Not allocated if not used.
727
+ */
728
+ fann_type *prev_steps;
729
+
730
+ /* The slope values used by the quickprop/rprop procedures.
731
+ * Not allocated if not used.
732
+ */
733
+ fann_type *prev_train_slopes;
734
+
735
+ /* The last delta applied to a connection weight.
736
+ * This is used for the momentum term in the backpropagation algorithm.
737
+ * Not allocated if not used.
738
+ */
739
+ fann_type *prev_weights_deltas;
740
+
741
+ #ifndef FIXEDFANN
742
+ /* Arithmetic mean used to remove steady component in input data. */
743
+ float *scale_mean_in;
744
+
745
+ /* Standart deviation used to normalize input data (mostly to [-1;1]). */
746
+ float *scale_deviation_in;
747
+
748
+ /* User-defined new minimum for input data.
749
+ * Resulting data values may be less than user-defined minimum.
750
+ */
751
+ float *scale_new_min_in;
752
+
753
+ /* Used to scale data to user-defined new maximum for input data.
754
+ * Resulting data values may be greater than user-defined maximum.
755
+ */
756
+ float *scale_factor_in;
757
+
758
+ /* Arithmetic mean used to remove steady component in output data. */
759
+ float *scale_mean_out;
760
+
761
+ /* Standart deviation used to normalize output data (mostly to [-1;1]). */
762
+ float *scale_deviation_out;
763
+
764
+ /* User-defined new minimum for output data.
765
+ * Resulting data values may be less than user-defined minimum.
766
+ */
767
+ float *scale_new_min_out;
768
+
769
+ /* Used to scale data to user-defined new maximum for output data.
770
+ * Resulting data values may be greater than user-defined maximum.
771
+ */
772
+ float *scale_factor_out;
773
+ #endif
774
+ };
775
+
776
+ /* Type: fann_connection
777
+
778
+ Describes a connection between two neurons and its weight
779
+
780
+ from_neuron - Unique number used to identify source neuron
781
+ to_neuron - Unique number used to identify destination neuron
782
+ weight - The numerical value of the weight
783
+
784
+ See Also:
785
+ <fann_get_connection_array>, <fann_set_weight_array>
786
+
787
+ This structure appears in FANN >= 2.1.0
788
+ */
789
+ struct fann_connection
790
+ {
791
+ /* Unique number used to identify source neuron */
792
+ unsigned int from_neuron;
793
+ /* Unique number used to identify destination neuron */
794
+ unsigned int to_neuron;
795
+ /* The numerical value of the weight */
796
+ fann_type weight;
797
+ };
798
+
799
+ #endif