moo_fann 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/ext/moo_fann/config.h +8 -0
- data/ext/moo_fann/doublefann.c +30 -0
- data/ext/moo_fann/doublefann.h +33 -0
- data/ext/moo_fann/extconf.rb +25 -0
- data/ext/moo_fann/fann.c +1803 -0
- data/ext/moo_fann/fann.h +613 -0
- data/ext/moo_fann/fann_activation.h +144 -0
- data/ext/moo_fann/fann_augment.h +133 -0
- data/ext/moo_fann/fann_cascade.c +1048 -0
- data/ext/moo_fann/fann_cascade.h +557 -0
- data/ext/moo_fann/fann_data.h +824 -0
- data/ext/moo_fann/fann_error.c +210 -0
- data/ext/moo_fann/fann_error.h +165 -0
- data/ext/moo_fann/fann_internal.h +152 -0
- data/ext/moo_fann/fann_io.c +802 -0
- data/ext/moo_fann/fann_io.h +100 -0
- data/ext/moo_fann/fann_train.c +1047 -0
- data/ext/moo_fann/fann_train.h +1310 -0
- data/ext/moo_fann/fann_train_data.c +1243 -0
- data/ext/moo_fann/moo_fann.c +1768 -0
- data/ext/moo_fann/ruby_compat.h +12 -0
- data/lib/moo_fann.rb +30 -0
- data/lib/moo_fann/version.rb +30 -0
- metadata +75 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: e3eee9275747d3cf65e77eeada9e5d806956a917
|
4
|
+
data.tar.gz: b682899ec1dd9af2491d19ed51606c13d95febc8
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: a13330d6aa61e2095ce0fb60aaad7cf7ce68913f0ee97e7a31e1feccb829aeda28fae62a7358565e82ff85b65107e199dbff66a636146d4c24d507c5baa1d430
|
7
|
+
data.tar.gz: a17e44ac35899e7b24a551548a2bb09ad14006b7ef7b78a4a05dba800ea529e765c5bb1154c5bf7fcce9029317e77f8b72e2caa0e1e133b59f658d6a1b818756
|
@@ -0,0 +1,30 @@
|
|
1
|
+
/*
|
2
|
+
Fast Artificial Neural Network Library (fann)
|
3
|
+
Copyright (C) 2003-2012 Steffen Nissen (sn@leenissen.dk)
|
4
|
+
|
5
|
+
This library is free software; you can redistribute it and/or
|
6
|
+
modify it under the terms of the GNU Lesser General Public
|
7
|
+
License as published by the Free Software Foundation; either
|
8
|
+
version 2.1 of the License, or (at your option) any later version.
|
9
|
+
|
10
|
+
This library is distributed in the hope that it will be useful,
|
11
|
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
12
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
13
|
+
Lesser General Public License for more details.
|
14
|
+
|
15
|
+
You should have received a copy of the GNU Lesser General Public
|
16
|
+
License along with this library; if not, write to the Free Software
|
17
|
+
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
18
|
+
*/
|
19
|
+
|
20
|
+
/* Easy way to allow for build of multiple binaries */
|
21
|
+
|
22
|
+
#include "config.h"
|
23
|
+
#include "doublefann.h"
|
24
|
+
|
25
|
+
#include "fann.c"
|
26
|
+
#include "fann_io.c"
|
27
|
+
#include "fann_train.c"
|
28
|
+
#include "fann_train_data.c"
|
29
|
+
#include "fann_error.c"
|
30
|
+
#include "fann_cascade.c"
|
@@ -0,0 +1,33 @@
|
|
1
|
+
/*
|
2
|
+
Fast Artificial Neural Network Library (fann)
|
3
|
+
Copyright (C) 2003-2012 Steffen Nissen (sn@leenissen.dk)
|
4
|
+
|
5
|
+
This library is free software; you can redistribute it and/or
|
6
|
+
modify it under the terms of the GNU Lesser General Public
|
7
|
+
License as published by the Free Software Foundation; either
|
8
|
+
version 2.1 of the License, or (at your option) any later version.
|
9
|
+
|
10
|
+
This library is distributed in the hope that it will be useful,
|
11
|
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
12
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
13
|
+
Lesser General Public License for more details.
|
14
|
+
|
15
|
+
You should have received a copy of the GNU Lesser General Public
|
16
|
+
License along with this library; if not, write to the Free Software
|
17
|
+
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
18
|
+
*/
|
19
|
+
|
20
|
+
#ifndef __doublefann_h__
|
21
|
+
#define __doublefann_h__
|
22
|
+
|
23
|
+
typedef double fann_type;
|
24
|
+
|
25
|
+
#undef DOUBLEFANN
|
26
|
+
#define DOUBLEFANN
|
27
|
+
#define FANNPRINTF "%.20e"
|
28
|
+
#define FANNSCANF "%le"
|
29
|
+
|
30
|
+
#define FANN_INCLUDE
|
31
|
+
#include "fann.h"
|
32
|
+
|
33
|
+
#endif
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Copyright 2018 Maxine Michalski <maxine@furfind.net>
|
4
|
+
# Copyright 2013 ruby-fann contributors
|
5
|
+
# <https://github.com/tangledpath/ruby-fann#contributors>
|
6
|
+
#
|
7
|
+
# This file is part of moo_fann.
|
8
|
+
#
|
9
|
+
# moo_fann is free software: you can redistribute it and/or modify
|
10
|
+
# it under the terms of the GNU General Public License as published by
|
11
|
+
# the Free Software Foundation, either version 3 of the License, or
|
12
|
+
# (at your option) any later version.
|
13
|
+
#
|
14
|
+
# moo_fann is distributed in the hope that it will be useful,
|
15
|
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
16
|
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
17
|
+
# GNU General Public License for more details.
|
18
|
+
#
|
19
|
+
# You should have received a copy of the GNU General Public License
|
20
|
+
# along with moo_fann. If not, see <http://www.gnu.org/licenses/>.
|
21
|
+
|
22
|
+
require 'mkmf'
|
23
|
+
$objs = ['moo_fann.o', 'doublefann.o']
|
24
|
+
have_header('doublefann.h')
|
25
|
+
create_makefile('moo_fann')
|
data/ext/moo_fann/fann.c
ADDED
@@ -0,0 +1,1803 @@
|
|
1
|
+
/*
|
2
|
+
Fast Artificial Neural Network Library (fann)
|
3
|
+
Copyright (C) 2003-2012 Steffen Nissen (sn@leenissen.dk)
|
4
|
+
|
5
|
+
This library is free software; you can redistribute it and/or
|
6
|
+
modify it under the terms of the GNU Lesser General Public
|
7
|
+
License as published by the Free Software Foundation; either
|
8
|
+
version 2.1 of the License, or (at your option) any later version.
|
9
|
+
|
10
|
+
This library is distributed in the hope that it will be useful,
|
11
|
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
12
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
13
|
+
Lesser General Public License for more details.
|
14
|
+
|
15
|
+
You should have received a copy of the GNU Lesser General Public
|
16
|
+
License along with this library; if not, write to the Free Software
|
17
|
+
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
18
|
+
*/
|
19
|
+
|
20
|
+
#include <stdio.h>
|
21
|
+
#include <stdlib.h>
|
22
|
+
#include <stdarg.h>
|
23
|
+
#include <string.h>
|
24
|
+
#include <time.h>
|
25
|
+
#include <math.h>
|
26
|
+
|
27
|
+
#include "config.h"
|
28
|
+
#include "fann.h"
|
29
|
+
|
30
|
+
/* #define FANN_NO_SEED */
|
31
|
+
|
32
|
+
FANN_EXTERNAL struct fann *FANN_API fann_create_standard(unsigned int num_layers, ...)
|
33
|
+
{
|
34
|
+
struct fann *ann;
|
35
|
+
va_list layer_sizes;
|
36
|
+
int i;
|
37
|
+
unsigned int *layers = (unsigned int *) calloc(num_layers, sizeof(unsigned int));
|
38
|
+
|
39
|
+
if(layers == NULL)
|
40
|
+
{
|
41
|
+
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
|
42
|
+
return NULL;
|
43
|
+
}
|
44
|
+
|
45
|
+
va_start(layer_sizes, num_layers);
|
46
|
+
for(i = 0; i < (int) num_layers; i++)
|
47
|
+
{
|
48
|
+
layers[i] = va_arg(layer_sizes, unsigned int);
|
49
|
+
}
|
50
|
+
va_end(layer_sizes);
|
51
|
+
|
52
|
+
ann = fann_create_standard_array(num_layers, layers);
|
53
|
+
|
54
|
+
free(layers);
|
55
|
+
|
56
|
+
return ann;
|
57
|
+
}
|
58
|
+
|
59
|
+
FANN_EXTERNAL struct fann *FANN_API fann_create_standard_array(unsigned int num_layers,
|
60
|
+
const unsigned int *layers)
|
61
|
+
{
|
62
|
+
return fann_create_sparse_array(1, num_layers, layers);
|
63
|
+
}
|
64
|
+
|
65
|
+
FANN_EXTERNAL struct fann *FANN_API fann_create_sparse(float connection_rate,
|
66
|
+
unsigned int num_layers, ...)
|
67
|
+
{
|
68
|
+
struct fann *ann;
|
69
|
+
va_list layer_sizes;
|
70
|
+
int i;
|
71
|
+
unsigned int *layers = (unsigned int *) calloc(num_layers, sizeof(unsigned int));
|
72
|
+
|
73
|
+
if(layers == NULL)
|
74
|
+
{
|
75
|
+
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
|
76
|
+
return NULL;
|
77
|
+
}
|
78
|
+
|
79
|
+
va_start(layer_sizes, num_layers);
|
80
|
+
for(i = 0; i < (int) num_layers; i++)
|
81
|
+
{
|
82
|
+
layers[i] = va_arg(layer_sizes, unsigned int);
|
83
|
+
}
|
84
|
+
va_end(layer_sizes);
|
85
|
+
|
86
|
+
ann = fann_create_sparse_array(connection_rate, num_layers, layers);
|
87
|
+
|
88
|
+
free(layers);
|
89
|
+
|
90
|
+
return ann;
|
91
|
+
}
|
92
|
+
|
93
|
+
FANN_EXTERNAL struct fann *FANN_API fann_create_sparse_array(float connection_rate,
|
94
|
+
unsigned int num_layers,
|
95
|
+
const unsigned int *layers)
|
96
|
+
{
|
97
|
+
struct fann_layer *layer_it, *last_layer, *prev_layer;
|
98
|
+
struct fann *ann;
|
99
|
+
struct fann_neuron *neuron_it, *last_neuron, *random_neuron, *bias_neuron;
|
100
|
+
#ifdef DEBUG
|
101
|
+
unsigned int prev_layer_size;
|
102
|
+
#endif
|
103
|
+
unsigned int num_neurons_in, num_neurons_out, i, j;
|
104
|
+
unsigned int min_connections, max_connections, num_connections;
|
105
|
+
unsigned int connections_per_neuron, allocated_connections;
|
106
|
+
unsigned int random_number, found_connection, tmp_con;
|
107
|
+
|
108
|
+
#ifdef FIXEDFANN
|
109
|
+
unsigned int multiplier;
|
110
|
+
#endif
|
111
|
+
if(connection_rate > 1)
|
112
|
+
{
|
113
|
+
connection_rate = 1;
|
114
|
+
}
|
115
|
+
|
116
|
+
/* seed random */
|
117
|
+
#ifndef FANN_NO_SEED
|
118
|
+
fann_seed_rand();
|
119
|
+
#endif
|
120
|
+
|
121
|
+
/* allocate the general structure */
|
122
|
+
ann = fann_allocate_structure(num_layers);
|
123
|
+
if(ann == NULL)
|
124
|
+
{
|
125
|
+
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
|
126
|
+
return NULL;
|
127
|
+
}
|
128
|
+
|
129
|
+
ann->connection_rate = connection_rate;
|
130
|
+
#ifdef FIXEDFANN
|
131
|
+
multiplier = ann->multiplier;
|
132
|
+
fann_update_stepwise(ann);
|
133
|
+
#endif
|
134
|
+
|
135
|
+
/* determine how many neurons there should be in each layer */
|
136
|
+
i = 0;
|
137
|
+
for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++)
|
138
|
+
{
|
139
|
+
/* we do not allocate room here, but we make sure that
|
140
|
+
* last_neuron - first_neuron is the number of neurons */
|
141
|
+
layer_it->first_neuron = NULL;
|
142
|
+
layer_it->last_neuron = layer_it->first_neuron + layers[i++] + 1; /* +1 for bias */
|
143
|
+
ann->total_neurons += layer_it->last_neuron - layer_it->first_neuron;
|
144
|
+
}
|
145
|
+
|
146
|
+
ann->num_output = (ann->last_layer - 1)->last_neuron - (ann->last_layer - 1)->first_neuron - 1;
|
147
|
+
ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1;
|
148
|
+
|
149
|
+
/* allocate room for the actual neurons */
|
150
|
+
fann_allocate_neurons(ann);
|
151
|
+
if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
|
152
|
+
{
|
153
|
+
fann_destroy(ann);
|
154
|
+
return NULL;
|
155
|
+
}
|
156
|
+
|
157
|
+
#ifdef DEBUG
|
158
|
+
printf("creating network with connection rate %f\n", connection_rate);
|
159
|
+
printf("input\n");
|
160
|
+
printf(" layer : %d neurons, 1 bias\n",
|
161
|
+
(int)(ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1));
|
162
|
+
#endif
|
163
|
+
|
164
|
+
num_neurons_in = ann->num_input;
|
165
|
+
for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer; layer_it++)
|
166
|
+
{
|
167
|
+
num_neurons_out = layer_it->last_neuron - layer_it->first_neuron - 1;
|
168
|
+
/*�if all neurons in each layer should be connected to at least one neuron
|
169
|
+
* in the previous layer, and one neuron in the next layer.
|
170
|
+
* and the bias node should be connected to the all neurons in the next layer.
|
171
|
+
* Then this is the minimum amount of neurons */
|
172
|
+
min_connections = fann_max(num_neurons_in, num_neurons_out); /* not calculating bias */
|
173
|
+
max_connections = num_neurons_in * num_neurons_out; /* not calculating bias */
|
174
|
+
num_connections = fann_max(min_connections,
|
175
|
+
(unsigned int) (0.5 + (connection_rate * max_connections))) +
|
176
|
+
num_neurons_out;
|
177
|
+
|
178
|
+
connections_per_neuron = num_connections / num_neurons_out;
|
179
|
+
allocated_connections = 0;
|
180
|
+
/* Now split out the connections on the different neurons */
|
181
|
+
for(i = 0; i != num_neurons_out; i++)
|
182
|
+
{
|
183
|
+
layer_it->first_neuron[i].first_con = ann->total_connections + allocated_connections;
|
184
|
+
allocated_connections += connections_per_neuron;
|
185
|
+
layer_it->first_neuron[i].last_con = ann->total_connections + allocated_connections;
|
186
|
+
|
187
|
+
layer_it->first_neuron[i].activation_function = FANN_SIGMOID_STEPWISE;
|
188
|
+
#ifdef FIXEDFANN
|
189
|
+
layer_it->first_neuron[i].activation_steepness = ann->multiplier / 2;
|
190
|
+
#else
|
191
|
+
layer_it->first_neuron[i].activation_steepness = 0.5;
|
192
|
+
#endif
|
193
|
+
|
194
|
+
if(allocated_connections < (num_connections * (i + 1)) / num_neurons_out)
|
195
|
+
{
|
196
|
+
layer_it->first_neuron[i].last_con++;
|
197
|
+
allocated_connections++;
|
198
|
+
}
|
199
|
+
}
|
200
|
+
|
201
|
+
/* bias neuron also gets stuff */
|
202
|
+
layer_it->first_neuron[i].first_con = ann->total_connections + allocated_connections;
|
203
|
+
layer_it->first_neuron[i].last_con = ann->total_connections + allocated_connections;
|
204
|
+
|
205
|
+
ann->total_connections += num_connections;
|
206
|
+
|
207
|
+
/* used in the next run of the loop */
|
208
|
+
num_neurons_in = num_neurons_out;
|
209
|
+
}
|
210
|
+
|
211
|
+
fann_allocate_connections(ann);
|
212
|
+
if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
|
213
|
+
{
|
214
|
+
fann_destroy(ann);
|
215
|
+
return NULL;
|
216
|
+
}
|
217
|
+
|
218
|
+
if(connection_rate >= 1)
|
219
|
+
{
|
220
|
+
#ifdef DEBUG
|
221
|
+
prev_layer_size = ann->num_input + 1;
|
222
|
+
#endif
|
223
|
+
prev_layer = ann->first_layer;
|
224
|
+
last_layer = ann->last_layer;
|
225
|
+
for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
|
226
|
+
{
|
227
|
+
last_neuron = layer_it->last_neuron - 1;
|
228
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
|
229
|
+
{
|
230
|
+
tmp_con = neuron_it->last_con - 1;
|
231
|
+
for(i = neuron_it->first_con; i != tmp_con; i++)
|
232
|
+
{
|
233
|
+
ann->weights[i] = (fann_type) fann_random_weight();
|
234
|
+
/* these connections are still initialized for fully connected networks, to allow
|
235
|
+
* operations to work, that are not optimized for fully connected networks.
|
236
|
+
*/
|
237
|
+
ann->connections[i] = prev_layer->first_neuron + (i - neuron_it->first_con);
|
238
|
+
}
|
239
|
+
|
240
|
+
/* bias weight */
|
241
|
+
ann->weights[tmp_con] = (fann_type) fann_random_bias_weight();
|
242
|
+
ann->connections[tmp_con] = prev_layer->first_neuron + (tmp_con - neuron_it->first_con);
|
243
|
+
}
|
244
|
+
#ifdef DEBUG
|
245
|
+
prev_layer_size = layer_it->last_neuron - layer_it->first_neuron;
|
246
|
+
#endif
|
247
|
+
prev_layer = layer_it;
|
248
|
+
#ifdef DEBUG
|
249
|
+
printf(" layer : %d neurons, 1 bias\n", prev_layer_size - 1);
|
250
|
+
#endif
|
251
|
+
}
|
252
|
+
}
|
253
|
+
else
|
254
|
+
{
|
255
|
+
/* make connections for a network, that are not fully connected */
|
256
|
+
|
257
|
+
/* generally, what we do is first to connect all the input
|
258
|
+
* neurons to a output neuron, respecting the number of
|
259
|
+
* available input neurons for each output neuron. Then
|
260
|
+
* we go through all the output neurons, and connect the
|
261
|
+
* rest of the connections to input neurons, that they are
|
262
|
+
* not allready connected to.
|
263
|
+
*/
|
264
|
+
|
265
|
+
/* All the connections are cleared by calloc, because we want to
|
266
|
+
* be able to see which connections are allready connected */
|
267
|
+
|
268
|
+
for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer; layer_it++)
|
269
|
+
{
|
270
|
+
|
271
|
+
num_neurons_out = layer_it->last_neuron - layer_it->first_neuron - 1;
|
272
|
+
num_neurons_in = (layer_it - 1)->last_neuron - (layer_it - 1)->first_neuron - 1;
|
273
|
+
|
274
|
+
/* first connect the bias neuron */
|
275
|
+
bias_neuron = (layer_it - 1)->last_neuron - 1;
|
276
|
+
last_neuron = layer_it->last_neuron - 1;
|
277
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
|
278
|
+
{
|
279
|
+
|
280
|
+
ann->connections[neuron_it->first_con] = bias_neuron;
|
281
|
+
ann->weights[neuron_it->first_con] = (fann_type) fann_random_bias_weight();
|
282
|
+
}
|
283
|
+
|
284
|
+
/* then connect all neurons in the input layer */
|
285
|
+
last_neuron = (layer_it - 1)->last_neuron - 1;
|
286
|
+
for(neuron_it = (layer_it - 1)->first_neuron; neuron_it != last_neuron; neuron_it++)
|
287
|
+
{
|
288
|
+
|
289
|
+
/* random neuron in the output layer that has space
|
290
|
+
* for more connections */
|
291
|
+
do
|
292
|
+
{
|
293
|
+
random_number = (int) (0.5 + fann_rand(0, num_neurons_out - 1));
|
294
|
+
random_neuron = layer_it->first_neuron + random_number;
|
295
|
+
/* checks the last space in the connections array for room */
|
296
|
+
}
|
297
|
+
while(ann->connections[random_neuron->last_con - 1]);
|
298
|
+
|
299
|
+
/* find an empty space in the connection array and connect */
|
300
|
+
for(i = random_neuron->first_con; i < random_neuron->last_con; i++)
|
301
|
+
{
|
302
|
+
if(ann->connections[i] == NULL)
|
303
|
+
{
|
304
|
+
ann->connections[i] = neuron_it;
|
305
|
+
ann->weights[i] = (fann_type) fann_random_weight();
|
306
|
+
break;
|
307
|
+
}
|
308
|
+
}
|
309
|
+
}
|
310
|
+
|
311
|
+
/* then connect the rest of the unconnected neurons */
|
312
|
+
last_neuron = layer_it->last_neuron - 1;
|
313
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
|
314
|
+
{
|
315
|
+
/* find empty space in the connection array and connect */
|
316
|
+
for(i = neuron_it->first_con; i < neuron_it->last_con; i++)
|
317
|
+
{
|
318
|
+
/* continue if allready connected */
|
319
|
+
if(ann->connections[i] != NULL)
|
320
|
+
continue;
|
321
|
+
|
322
|
+
do
|
323
|
+
{
|
324
|
+
found_connection = 0;
|
325
|
+
random_number = (int) (0.5 + fann_rand(0, num_neurons_in - 1));
|
326
|
+
random_neuron = (layer_it - 1)->first_neuron + random_number;
|
327
|
+
|
328
|
+
/* check to see if this connection is allready there */
|
329
|
+
for(j = neuron_it->first_con; j < i; j++)
|
330
|
+
{
|
331
|
+
if(random_neuron == ann->connections[j])
|
332
|
+
{
|
333
|
+
found_connection = 1;
|
334
|
+
break;
|
335
|
+
}
|
336
|
+
}
|
337
|
+
|
338
|
+
}
|
339
|
+
while(found_connection);
|
340
|
+
|
341
|
+
/* we have found a neuron that is not allready
|
342
|
+
* connected to us, connect it */
|
343
|
+
ann->connections[i] = random_neuron;
|
344
|
+
ann->weights[i] = (fann_type) fann_random_weight();
|
345
|
+
}
|
346
|
+
}
|
347
|
+
|
348
|
+
#ifdef DEBUG
|
349
|
+
printf(" layer : %d neurons, 1 bias\n", num_neurons_out);
|
350
|
+
#endif
|
351
|
+
}
|
352
|
+
|
353
|
+
/* TODO it would be nice to have the randomly created
|
354
|
+
* connections sorted for smoother memory access.
|
355
|
+
*/
|
356
|
+
}
|
357
|
+
|
358
|
+
#ifdef DEBUG
|
359
|
+
printf("output\n");
|
360
|
+
#endif
|
361
|
+
|
362
|
+
return ann;
|
363
|
+
}
|
364
|
+
|
365
|
+
|
366
|
+
FANN_EXTERNAL struct fann *FANN_API fann_create_shortcut(unsigned int num_layers, ...)
|
367
|
+
{
|
368
|
+
struct fann *ann;
|
369
|
+
int i;
|
370
|
+
va_list layer_sizes;
|
371
|
+
unsigned int *layers = (unsigned int *) calloc(num_layers, sizeof(unsigned int));
|
372
|
+
|
373
|
+
if(layers == NULL)
|
374
|
+
{
|
375
|
+
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
|
376
|
+
return NULL;
|
377
|
+
}
|
378
|
+
|
379
|
+
|
380
|
+
va_start(layer_sizes, num_layers);
|
381
|
+
for(i = 0; i < (int) num_layers; i++)
|
382
|
+
{
|
383
|
+
layers[i] = va_arg(layer_sizes, unsigned int);
|
384
|
+
}
|
385
|
+
va_end(layer_sizes);
|
386
|
+
|
387
|
+
ann = fann_create_shortcut_array(num_layers, layers);
|
388
|
+
|
389
|
+
free(layers);
|
390
|
+
|
391
|
+
return ann;
|
392
|
+
}
|
393
|
+
|
394
|
+
FANN_EXTERNAL struct fann *FANN_API fann_create_shortcut_array(unsigned int num_layers,
|
395
|
+
const unsigned int *layers)
|
396
|
+
{
|
397
|
+
struct fann_layer *layer_it, *layer_it2, *last_layer;
|
398
|
+
struct fann *ann;
|
399
|
+
struct fann_neuron *neuron_it, *neuron_it2 = 0;
|
400
|
+
unsigned int i;
|
401
|
+
unsigned int num_neurons_in, num_neurons_out;
|
402
|
+
|
403
|
+
#ifdef FIXEDFANN
|
404
|
+
unsigned int multiplier;
|
405
|
+
#endif
|
406
|
+
/* seed random */
|
407
|
+
#ifndef FANN_NO_SEED
|
408
|
+
fann_seed_rand();
|
409
|
+
#endif
|
410
|
+
|
411
|
+
/* allocate the general structure */
|
412
|
+
ann = fann_allocate_structure(num_layers);
|
413
|
+
if(ann == NULL)
|
414
|
+
{
|
415
|
+
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
|
416
|
+
return NULL;
|
417
|
+
}
|
418
|
+
|
419
|
+
ann->connection_rate = 1;
|
420
|
+
ann->network_type = FANN_NETTYPE_SHORTCUT;
|
421
|
+
#ifdef FIXEDFANN
|
422
|
+
multiplier = ann->multiplier;
|
423
|
+
fann_update_stepwise(ann);
|
424
|
+
#endif
|
425
|
+
|
426
|
+
/* determine how many neurons there should be in each layer */
|
427
|
+
i = 0;
|
428
|
+
for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++)
|
429
|
+
{
|
430
|
+
/* we do not allocate room here, but we make sure that
|
431
|
+
* last_neuron - first_neuron is the number of neurons */
|
432
|
+
layer_it->first_neuron = NULL;
|
433
|
+
layer_it->last_neuron = layer_it->first_neuron + layers[i++];
|
434
|
+
if(layer_it == ann->first_layer)
|
435
|
+
{
|
436
|
+
/* there is a bias neuron in the first layer */
|
437
|
+
layer_it->last_neuron++;
|
438
|
+
}
|
439
|
+
|
440
|
+
ann->total_neurons += layer_it->last_neuron - layer_it->first_neuron;
|
441
|
+
}
|
442
|
+
|
443
|
+
ann->num_output = (ann->last_layer - 1)->last_neuron - (ann->last_layer - 1)->first_neuron;
|
444
|
+
ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1;
|
445
|
+
|
446
|
+
/* allocate room for the actual neurons */
|
447
|
+
fann_allocate_neurons(ann);
|
448
|
+
if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
|
449
|
+
{
|
450
|
+
fann_destroy(ann);
|
451
|
+
return NULL;
|
452
|
+
}
|
453
|
+
|
454
|
+
#ifdef DEBUG
|
455
|
+
printf("creating fully shortcut connected network.\n");
|
456
|
+
printf("input\n");
|
457
|
+
printf(" layer : %d neurons, 1 bias\n",
|
458
|
+
(int)(ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1));
|
459
|
+
#endif
|
460
|
+
|
461
|
+
num_neurons_in = ann->num_input;
|
462
|
+
last_layer = ann->last_layer;
|
463
|
+
for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
|
464
|
+
{
|
465
|
+
num_neurons_out = layer_it->last_neuron - layer_it->first_neuron;
|
466
|
+
|
467
|
+
/* Now split out the connections on the different neurons */
|
468
|
+
for(i = 0; i != num_neurons_out; i++)
|
469
|
+
{
|
470
|
+
layer_it->first_neuron[i].first_con = ann->total_connections;
|
471
|
+
ann->total_connections += num_neurons_in + 1;
|
472
|
+
layer_it->first_neuron[i].last_con = ann->total_connections;
|
473
|
+
|
474
|
+
layer_it->first_neuron[i].activation_function = FANN_SIGMOID_STEPWISE;
|
475
|
+
#ifdef FIXEDFANN
|
476
|
+
layer_it->first_neuron[i].activation_steepness = ann->multiplier / 2;
|
477
|
+
#else
|
478
|
+
layer_it->first_neuron[i].activation_steepness = 0.5;
|
479
|
+
#endif
|
480
|
+
}
|
481
|
+
|
482
|
+
#ifdef DEBUG
|
483
|
+
printf(" layer : %d neurons, 0 bias\n", num_neurons_out);
|
484
|
+
#endif
|
485
|
+
/* used in the next run of the loop */
|
486
|
+
num_neurons_in += num_neurons_out;
|
487
|
+
}
|
488
|
+
|
489
|
+
fann_allocate_connections(ann);
|
490
|
+
if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
|
491
|
+
{
|
492
|
+
fann_destroy(ann);
|
493
|
+
return NULL;
|
494
|
+
}
|
495
|
+
|
496
|
+
/* Connections are created from all neurons to all neurons in later layers
|
497
|
+
*/
|
498
|
+
num_neurons_in = ann->num_input + 1;
|
499
|
+
for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
|
500
|
+
{
|
501
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++)
|
502
|
+
{
|
503
|
+
|
504
|
+
i = neuron_it->first_con;
|
505
|
+
for(layer_it2 = ann->first_layer; layer_it2 != layer_it; layer_it2++)
|
506
|
+
{
|
507
|
+
for(neuron_it2 = layer_it2->first_neuron; neuron_it2 != layer_it2->last_neuron;
|
508
|
+
neuron_it2++)
|
509
|
+
{
|
510
|
+
|
511
|
+
ann->weights[i] = (fann_type) fann_random_weight();
|
512
|
+
ann->connections[i] = neuron_it2;
|
513
|
+
i++;
|
514
|
+
}
|
515
|
+
}
|
516
|
+
}
|
517
|
+
num_neurons_in += layer_it->last_neuron - layer_it->first_neuron;
|
518
|
+
}
|
519
|
+
|
520
|
+
#ifdef DEBUG
|
521
|
+
printf("output\n");
|
522
|
+
#endif
|
523
|
+
|
524
|
+
return ann;
|
525
|
+
}
|
526
|
+
|
527
|
+
FANN_EXTERNAL fann_type *FANN_API fann_run(struct fann * ann, fann_type * input)
|
528
|
+
{
|
529
|
+
struct fann_neuron *neuron_it, *last_neuron, *neurons, **neuron_pointers;
|
530
|
+
unsigned int i, num_connections, num_input, num_output;
|
531
|
+
fann_type neuron_sum, *output;
|
532
|
+
fann_type *weights;
|
533
|
+
struct fann_layer *layer_it, *last_layer;
|
534
|
+
unsigned int activation_function;
|
535
|
+
fann_type steepness;
|
536
|
+
|
537
|
+
/* store some variabels local for fast access */
|
538
|
+
struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
|
539
|
+
|
540
|
+
#ifdef FIXEDFANN
|
541
|
+
int multiplier = ann->multiplier;
|
542
|
+
unsigned int decimal_point = ann->decimal_point;
|
543
|
+
|
544
|
+
/* values used for the stepwise linear sigmoid function */
|
545
|
+
fann_type r1 = 0, r2 = 0, r3 = 0, r4 = 0, r5 = 0, r6 = 0;
|
546
|
+
fann_type v1 = 0, v2 = 0, v3 = 0, v4 = 0, v5 = 0, v6 = 0;
|
547
|
+
|
548
|
+
fann_type last_steepness = 0;
|
549
|
+
unsigned int last_activation_function = 0;
|
550
|
+
#else
|
551
|
+
fann_type max_sum = 0;
|
552
|
+
#endif
|
553
|
+
|
554
|
+
/* first set the input */
|
555
|
+
num_input = ann->num_input;
|
556
|
+
for(i = 0; i != num_input; i++)
|
557
|
+
{
|
558
|
+
#ifdef FIXEDFANN
|
559
|
+
if(fann_abs(input[i]) > multiplier)
|
560
|
+
{
|
561
|
+
printf
|
562
|
+
("Warning input number %d is out of range -%d - %d with value %d, integer overflow may occur.\n",
|
563
|
+
i, multiplier, multiplier, input[i]);
|
564
|
+
}
|
565
|
+
#endif
|
566
|
+
first_neuron[i].value = input[i];
|
567
|
+
}
|
568
|
+
/* Set the bias neuron in the input layer */
|
569
|
+
#ifdef FIXEDFANN
|
570
|
+
(ann->first_layer->last_neuron - 1)->value = multiplier;
|
571
|
+
#else
|
572
|
+
(ann->first_layer->last_neuron - 1)->value = 1;
|
573
|
+
#endif
|
574
|
+
|
575
|
+
last_layer = ann->last_layer;
|
576
|
+
for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
|
577
|
+
{
|
578
|
+
last_neuron = layer_it->last_neuron;
|
579
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
|
580
|
+
{
|
581
|
+
if(neuron_it->first_con == neuron_it->last_con)
|
582
|
+
{
|
583
|
+
/* bias neurons */
|
584
|
+
#ifdef FIXEDFANN
|
585
|
+
neuron_it->value = multiplier;
|
586
|
+
#else
|
587
|
+
neuron_it->value = 1;
|
588
|
+
#endif
|
589
|
+
continue;
|
590
|
+
}
|
591
|
+
|
592
|
+
activation_function = neuron_it->activation_function;
|
593
|
+
steepness = neuron_it->activation_steepness;
|
594
|
+
|
595
|
+
neuron_sum = 0;
|
596
|
+
num_connections = neuron_it->last_con - neuron_it->first_con;
|
597
|
+
weights = ann->weights + neuron_it->first_con;
|
598
|
+
|
599
|
+
if(ann->connection_rate >= 1)
|
600
|
+
{
|
601
|
+
if(ann->network_type == FANN_NETTYPE_SHORTCUT)
|
602
|
+
{
|
603
|
+
neurons = ann->first_layer->first_neuron;
|
604
|
+
}
|
605
|
+
else
|
606
|
+
{
|
607
|
+
neurons = (layer_it - 1)->first_neuron;
|
608
|
+
}
|
609
|
+
|
610
|
+
|
611
|
+
/* unrolled loop start */
|
612
|
+
i = num_connections & 3; /* same as modulo 4 */
|
613
|
+
switch (i)
|
614
|
+
{
|
615
|
+
case 3:
|
616
|
+
neuron_sum += fann_mult(weights[2], neurons[2].value);
|
617
|
+
case 2:
|
618
|
+
neuron_sum += fann_mult(weights[1], neurons[1].value);
|
619
|
+
case 1:
|
620
|
+
neuron_sum += fann_mult(weights[0], neurons[0].value);
|
621
|
+
case 0:
|
622
|
+
break;
|
623
|
+
}
|
624
|
+
|
625
|
+
for(; i != num_connections; i += 4)
|
626
|
+
{
|
627
|
+
neuron_sum +=
|
628
|
+
fann_mult(weights[i], neurons[i].value) +
|
629
|
+
fann_mult(weights[i + 1], neurons[i + 1].value) +
|
630
|
+
fann_mult(weights[i + 2], neurons[i + 2].value) +
|
631
|
+
fann_mult(weights[i + 3], neurons[i + 3].value);
|
632
|
+
}
|
633
|
+
/* unrolled loop end */
|
634
|
+
|
635
|
+
/*
|
636
|
+
* for(i = 0;i != num_connections; i++){
|
637
|
+
* printf("%f += %f*%f, ", neuron_sum, weights[i], neurons[i].value);
|
638
|
+
* neuron_sum += fann_mult(weights[i], neurons[i].value);
|
639
|
+
* }
|
640
|
+
*/
|
641
|
+
}
|
642
|
+
else
|
643
|
+
{
|
644
|
+
neuron_pointers = ann->connections + neuron_it->first_con;
|
645
|
+
|
646
|
+
i = num_connections & 3; /* same as modulo 4 */
|
647
|
+
switch (i)
|
648
|
+
{
|
649
|
+
case 3:
|
650
|
+
neuron_sum += fann_mult(weights[2], neuron_pointers[2]->value);
|
651
|
+
case 2:
|
652
|
+
neuron_sum += fann_mult(weights[1], neuron_pointers[1]->value);
|
653
|
+
case 1:
|
654
|
+
neuron_sum += fann_mult(weights[0], neuron_pointers[0]->value);
|
655
|
+
case 0:
|
656
|
+
break;
|
657
|
+
}
|
658
|
+
|
659
|
+
for(; i != num_connections; i += 4)
|
660
|
+
{
|
661
|
+
neuron_sum +=
|
662
|
+
fann_mult(weights[i], neuron_pointers[i]->value) +
|
663
|
+
fann_mult(weights[i + 1], neuron_pointers[i + 1]->value) +
|
664
|
+
fann_mult(weights[i + 2], neuron_pointers[i + 2]->value) +
|
665
|
+
fann_mult(weights[i + 3], neuron_pointers[i + 3]->value);
|
666
|
+
}
|
667
|
+
}
|
668
|
+
|
669
|
+
#ifdef FIXEDFANN
|
670
|
+
neuron_it->sum = fann_mult(steepness, neuron_sum);
|
671
|
+
|
672
|
+
if(activation_function != last_activation_function || steepness != last_steepness)
|
673
|
+
{
|
674
|
+
switch (activation_function)
|
675
|
+
{
|
676
|
+
case FANN_SIGMOID:
|
677
|
+
case FANN_SIGMOID_STEPWISE:
|
678
|
+
r1 = ann->sigmoid_results[0];
|
679
|
+
r2 = ann->sigmoid_results[1];
|
680
|
+
r3 = ann->sigmoid_results[2];
|
681
|
+
r4 = ann->sigmoid_results[3];
|
682
|
+
r5 = ann->sigmoid_results[4];
|
683
|
+
r6 = ann->sigmoid_results[5];
|
684
|
+
v1 = ann->sigmoid_values[0] / steepness;
|
685
|
+
v2 = ann->sigmoid_values[1] / steepness;
|
686
|
+
v3 = ann->sigmoid_values[2] / steepness;
|
687
|
+
v4 = ann->sigmoid_values[3] / steepness;
|
688
|
+
v5 = ann->sigmoid_values[4] / steepness;
|
689
|
+
v6 = ann->sigmoid_values[5] / steepness;
|
690
|
+
break;
|
691
|
+
case FANN_SIGMOID_SYMMETRIC:
|
692
|
+
case FANN_SIGMOID_SYMMETRIC_STEPWISE:
|
693
|
+
r1 = ann->sigmoid_symmetric_results[0];
|
694
|
+
r2 = ann->sigmoid_symmetric_results[1];
|
695
|
+
r3 = ann->sigmoid_symmetric_results[2];
|
696
|
+
r4 = ann->sigmoid_symmetric_results[3];
|
697
|
+
r5 = ann->sigmoid_symmetric_results[4];
|
698
|
+
r6 = ann->sigmoid_symmetric_results[5];
|
699
|
+
v1 = ann->sigmoid_symmetric_values[0] / steepness;
|
700
|
+
v2 = ann->sigmoid_symmetric_values[1] / steepness;
|
701
|
+
v3 = ann->sigmoid_symmetric_values[2] / steepness;
|
702
|
+
v4 = ann->sigmoid_symmetric_values[3] / steepness;
|
703
|
+
v5 = ann->sigmoid_symmetric_values[4] / steepness;
|
704
|
+
v6 = ann->sigmoid_symmetric_values[5] / steepness;
|
705
|
+
break;
|
706
|
+
case FANN_THRESHOLD:
|
707
|
+
break;
|
708
|
+
}
|
709
|
+
}
|
710
|
+
|
711
|
+
switch (activation_function)
|
712
|
+
{
|
713
|
+
case FANN_SIGMOID:
|
714
|
+
case FANN_SIGMOID_STEPWISE:
|
715
|
+
neuron_it->value =
|
716
|
+
(fann_type) fann_stepwise(v1, v2, v3, v4, v5, v6, r1, r2, r3, r4, r5, r6, 0,
|
717
|
+
multiplier, neuron_sum);
|
718
|
+
break;
|
719
|
+
case FANN_SIGMOID_SYMMETRIC:
|
720
|
+
case FANN_SIGMOID_SYMMETRIC_STEPWISE:
|
721
|
+
neuron_it->value =
|
722
|
+
(fann_type) fann_stepwise(v1, v2, v3, v4, v5, v6, r1, r2, r3, r4, r5, r6,
|
723
|
+
-multiplier, multiplier, neuron_sum);
|
724
|
+
break;
|
725
|
+
case FANN_THRESHOLD:
|
726
|
+
neuron_it->value = (fann_type) ((neuron_sum < 0) ? 0 : multiplier);
|
727
|
+
break;
|
728
|
+
case FANN_THRESHOLD_SYMMETRIC:
|
729
|
+
neuron_it->value = (fann_type) ((neuron_sum < 0) ? -multiplier : multiplier);
|
730
|
+
break;
|
731
|
+
case FANN_LINEAR:
|
732
|
+
neuron_it->value = neuron_sum;
|
733
|
+
break;
|
734
|
+
case FANN_LINEAR_PIECE:
|
735
|
+
neuron_it->value = (fann_type)((neuron_sum < 0) ? 0 : (neuron_sum > multiplier) ? multiplier : neuron_sum);
|
736
|
+
break;
|
737
|
+
case FANN_LINEAR_PIECE_SYMMETRIC:
|
738
|
+
neuron_it->value = (fann_type)((neuron_sum < -multiplier) ? -multiplier : (neuron_sum > multiplier) ? multiplier : neuron_sum);
|
739
|
+
break;
|
740
|
+
case FANN_ELLIOT:
|
741
|
+
case FANN_ELLIOT_SYMMETRIC:
|
742
|
+
case FANN_GAUSSIAN:
|
743
|
+
case FANN_GAUSSIAN_SYMMETRIC:
|
744
|
+
case FANN_GAUSSIAN_STEPWISE:
|
745
|
+
case FANN_SIN_SYMMETRIC:
|
746
|
+
case FANN_COS_SYMMETRIC:
|
747
|
+
fann_error((struct fann_error *) ann, FANN_E_CANT_USE_ACTIVATION);
|
748
|
+
break;
|
749
|
+
}
|
750
|
+
last_steepness = steepness;
|
751
|
+
last_activation_function = activation_function;
|
752
|
+
#else
|
753
|
+
neuron_sum = fann_mult(steepness, neuron_sum);
|
754
|
+
|
755
|
+
max_sum = 150/steepness;
|
756
|
+
if(neuron_sum > max_sum)
|
757
|
+
neuron_sum = max_sum;
|
758
|
+
else if(neuron_sum < -max_sum)
|
759
|
+
neuron_sum = -max_sum;
|
760
|
+
|
761
|
+
neuron_it->sum = neuron_sum;
|
762
|
+
|
763
|
+
fann_activation_switch(activation_function, neuron_sum, neuron_it->value);
|
764
|
+
#endif
|
765
|
+
}
|
766
|
+
}
|
767
|
+
|
768
|
+
/* set the output */
|
769
|
+
output = ann->output;
|
770
|
+
num_output = ann->num_output;
|
771
|
+
neurons = (ann->last_layer - 1)->first_neuron;
|
772
|
+
for(i = 0; i != num_output; i++)
|
773
|
+
{
|
774
|
+
output[i] = neurons[i].value;
|
775
|
+
}
|
776
|
+
return ann->output;
|
777
|
+
}
|
778
|
+
|
779
|
+
FANN_EXTERNAL void FANN_API fann_destroy(struct fann *ann)
|
780
|
+
{
|
781
|
+
if(ann == NULL)
|
782
|
+
return;
|
783
|
+
fann_safe_free(ann->weights);
|
784
|
+
fann_safe_free(ann->connections);
|
785
|
+
fann_safe_free(ann->first_layer->first_neuron);
|
786
|
+
fann_safe_free(ann->first_layer);
|
787
|
+
fann_safe_free(ann->output);
|
788
|
+
fann_safe_free(ann->train_errors);
|
789
|
+
fann_safe_free(ann->train_slopes);
|
790
|
+
fann_safe_free(ann->prev_train_slopes);
|
791
|
+
fann_safe_free(ann->prev_steps);
|
792
|
+
fann_safe_free(ann->prev_weights_deltas);
|
793
|
+
fann_safe_free(ann->errstr);
|
794
|
+
fann_safe_free(ann->cascade_activation_functions);
|
795
|
+
fann_safe_free(ann->cascade_activation_steepnesses);
|
796
|
+
|
797
|
+
#ifndef FIXEDFANN
|
798
|
+
fann_safe_free( ann->scale_mean_in );
|
799
|
+
fann_safe_free( ann->scale_deviation_in );
|
800
|
+
fann_safe_free( ann->scale_new_min_in );
|
801
|
+
fann_safe_free( ann->scale_factor_in );
|
802
|
+
|
803
|
+
fann_safe_free( ann->scale_mean_out );
|
804
|
+
fann_safe_free( ann->scale_deviation_out );
|
805
|
+
fann_safe_free( ann->scale_new_min_out );
|
806
|
+
fann_safe_free( ann->scale_factor_out );
|
807
|
+
#endif
|
808
|
+
|
809
|
+
fann_safe_free(ann);
|
810
|
+
}
|
811
|
+
|
812
|
+
FANN_EXTERNAL void FANN_API fann_randomize_weights(struct fann *ann, fann_type min_weight,
|
813
|
+
fann_type max_weight)
|
814
|
+
{
|
815
|
+
fann_type *last_weight;
|
816
|
+
fann_type *weights = ann->weights;
|
817
|
+
|
818
|
+
last_weight = weights + ann->total_connections;
|
819
|
+
for(; weights != last_weight; weights++)
|
820
|
+
{
|
821
|
+
*weights = (fann_type) (fann_rand(min_weight, max_weight));
|
822
|
+
}
|
823
|
+
|
824
|
+
#ifndef FIXEDFANN
|
825
|
+
if(ann->prev_train_slopes != NULL)
|
826
|
+
{
|
827
|
+
fann_clear_train_arrays(ann);
|
828
|
+
}
|
829
|
+
#endif
|
830
|
+
}
|
831
|
+
|
832
|
+
/* deep copy of the fann structure */
|
833
|
+
FANN_EXTERNAL struct fann* FANN_API fann_copy(struct fann* orig)
|
834
|
+
{
|
835
|
+
struct fann* copy;
|
836
|
+
unsigned int num_layers = orig->last_layer - orig->first_layer;
|
837
|
+
struct fann_layer *orig_layer_it, *copy_layer_it;
|
838
|
+
unsigned int layer_size;
|
839
|
+
struct fann_neuron *last_neuron,*orig_neuron_it,*copy_neuron_it;
|
840
|
+
unsigned int i;
|
841
|
+
struct fann_neuron *orig_first_neuron,*copy_first_neuron;
|
842
|
+
unsigned int input_neuron;
|
843
|
+
|
844
|
+
copy = fann_allocate_structure(num_layers);
|
845
|
+
if (copy==NULL) {
|
846
|
+
fann_error((struct fann_error*)orig, FANN_E_CANT_ALLOCATE_MEM);
|
847
|
+
return NULL;
|
848
|
+
}
|
849
|
+
copy->errno_f = orig->errno_f;
|
850
|
+
if (orig->errstr)
|
851
|
+
{
|
852
|
+
copy->errstr = (char *) malloc(FANN_ERRSTR_MAX);
|
853
|
+
if (copy->errstr == NULL)
|
854
|
+
{
|
855
|
+
fann_destroy(copy);
|
856
|
+
return NULL;
|
857
|
+
}
|
858
|
+
strcpy(copy->errstr,orig->errstr);
|
859
|
+
}
|
860
|
+
copy->error_log = orig->error_log;
|
861
|
+
|
862
|
+
copy->learning_rate = orig->learning_rate;
|
863
|
+
copy->learning_momentum = orig->learning_momentum;
|
864
|
+
copy->connection_rate = orig->connection_rate;
|
865
|
+
copy->network_type = orig->network_type;
|
866
|
+
copy->num_MSE = orig->num_MSE;
|
867
|
+
copy->MSE_value = orig->MSE_value;
|
868
|
+
copy->num_bit_fail = orig->num_bit_fail;
|
869
|
+
copy->bit_fail_limit = orig->bit_fail_limit;
|
870
|
+
copy->train_error_function = orig->train_error_function;
|
871
|
+
copy->train_stop_function = orig->train_stop_function;
|
872
|
+
copy->callback = orig->callback;
|
873
|
+
copy->cascade_output_change_fraction = orig->cascade_output_change_fraction;
|
874
|
+
copy->cascade_output_stagnation_epochs = orig->cascade_output_stagnation_epochs;
|
875
|
+
copy->cascade_candidate_change_fraction = orig->cascade_candidate_change_fraction;
|
876
|
+
copy->cascade_candidate_stagnation_epochs = orig->cascade_candidate_stagnation_epochs;
|
877
|
+
copy->cascade_best_candidate = orig->cascade_best_candidate;
|
878
|
+
copy->cascade_candidate_limit = orig->cascade_candidate_limit;
|
879
|
+
copy->cascade_weight_multiplier = orig->cascade_weight_multiplier;
|
880
|
+
copy->cascade_max_out_epochs = orig->cascade_max_out_epochs;
|
881
|
+
copy->cascade_max_cand_epochs = orig->cascade_max_cand_epochs;
|
882
|
+
copy->user_data = orig->user_data;
|
883
|
+
|
884
|
+
/* copy cascade activation functions */
|
885
|
+
copy->cascade_activation_functions_count = orig->cascade_activation_functions_count;
|
886
|
+
copy->cascade_activation_functions = (enum fann_activationfunc_enum *)realloc(copy->cascade_activation_functions,
|
887
|
+
copy->cascade_activation_functions_count * sizeof(enum fann_activationfunc_enum));
|
888
|
+
if(copy->cascade_activation_functions == NULL)
|
889
|
+
{
|
890
|
+
fann_error((struct fann_error*)orig, FANN_E_CANT_ALLOCATE_MEM);
|
891
|
+
fann_destroy(copy);
|
892
|
+
return NULL;
|
893
|
+
}
|
894
|
+
memcpy(copy->cascade_activation_functions,orig->cascade_activation_functions,
|
895
|
+
copy->cascade_activation_functions_count * sizeof(enum fann_activationfunc_enum));
|
896
|
+
|
897
|
+
/* copy cascade activation steepnesses */
|
898
|
+
copy->cascade_activation_steepnesses_count = orig->cascade_activation_steepnesses_count;
|
899
|
+
copy->cascade_activation_steepnesses = (fann_type *)realloc(copy->cascade_activation_steepnesses, copy->cascade_activation_steepnesses_count * sizeof(fann_type));
|
900
|
+
if(copy->cascade_activation_steepnesses == NULL)
|
901
|
+
{
|
902
|
+
fann_error((struct fann_error*)orig, FANN_E_CANT_ALLOCATE_MEM);
|
903
|
+
fann_destroy(copy);
|
904
|
+
return NULL;
|
905
|
+
}
|
906
|
+
memcpy(copy->cascade_activation_steepnesses,orig->cascade_activation_steepnesses,copy->cascade_activation_steepnesses_count * sizeof(fann_type));
|
907
|
+
|
908
|
+
copy->cascade_num_candidate_groups = orig->cascade_num_candidate_groups;
|
909
|
+
|
910
|
+
/* copy candidate scores, if used */
|
911
|
+
if (orig->cascade_candidate_scores == NULL)
|
912
|
+
{
|
913
|
+
copy->cascade_candidate_scores = NULL;
|
914
|
+
}
|
915
|
+
else
|
916
|
+
{
|
917
|
+
copy->cascade_candidate_scores =
|
918
|
+
(fann_type *) malloc(fann_get_cascade_num_candidates(copy) * sizeof(fann_type));
|
919
|
+
if(copy->cascade_candidate_scores == NULL)
|
920
|
+
{
|
921
|
+
fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);
|
922
|
+
fann_destroy(copy);
|
923
|
+
return NULL;
|
924
|
+
}
|
925
|
+
memcpy(copy->cascade_candidate_scores,orig->cascade_candidate_scores,fann_get_cascade_num_candidates(copy) * sizeof(fann_type));
|
926
|
+
}
|
927
|
+
|
928
|
+
copy->quickprop_decay = orig->quickprop_decay;
|
929
|
+
copy->quickprop_mu = orig->quickprop_mu;
|
930
|
+
copy->rprop_increase_factor = orig->rprop_increase_factor;
|
931
|
+
copy->rprop_decrease_factor = orig->rprop_decrease_factor;
|
932
|
+
copy->rprop_delta_min = orig->rprop_delta_min;
|
933
|
+
copy->rprop_delta_max = orig->rprop_delta_max;
|
934
|
+
copy->rprop_delta_zero = orig->rprop_delta_zero;
|
935
|
+
|
936
|
+
/* user_data is not deep copied. user should use fann_copy_with_user_data() for that */
|
937
|
+
copy->user_data = orig->user_data;
|
938
|
+
|
939
|
+
#ifdef FIXEDFANN
|
940
|
+
copy->decimal_point = orig->decimal_point;
|
941
|
+
copy->multiplier = orig->multiplier;
|
942
|
+
memcpy(copy->sigmoid_results,orig->sigmoid_results,6*sizeof(fann_type));
|
943
|
+
memcpy(copy->sigmoid_values,orig->sigmoid_values,6*sizeof(fann_type));
|
944
|
+
memcpy(copy->sigmoid_symmetric_results,orig->sigmoid_symmetric_results,6*sizeof(fann_type));
|
945
|
+
memcpy(copy->sigmoid_symmetric_values,orig->sigmoid_symmetric_values,6*sizeof(fann_type));
|
946
|
+
#endif
|
947
|
+
|
948
|
+
|
949
|
+
/* copy layer sizes, prepare for fann_allocate_neurons */
|
950
|
+
for (orig_layer_it = orig->first_layer, copy_layer_it = copy->first_layer;
|
951
|
+
orig_layer_it != orig->last_layer; orig_layer_it++, copy_layer_it++)
|
952
|
+
{
|
953
|
+
layer_size = orig_layer_it->last_neuron - orig_layer_it->first_neuron;
|
954
|
+
copy_layer_it->first_neuron = NULL;
|
955
|
+
copy_layer_it->last_neuron = copy_layer_it->first_neuron + layer_size;
|
956
|
+
copy->total_neurons += layer_size;
|
957
|
+
}
|
958
|
+
copy->num_input = orig->num_input;
|
959
|
+
copy->num_output = orig->num_output;
|
960
|
+
|
961
|
+
|
962
|
+
/* copy scale parameters, when used */
|
963
|
+
#ifndef FIXEDFANN
|
964
|
+
if (orig->scale_mean_in != NULL)
|
965
|
+
{
|
966
|
+
fann_allocate_scale(copy);
|
967
|
+
for (i=0; i < orig->num_input ; i++) {
|
968
|
+
copy->scale_mean_in[i] = orig->scale_mean_in[i];
|
969
|
+
copy->scale_deviation_in[i] = orig->scale_deviation_in[i];
|
970
|
+
copy->scale_new_min_in[i] = orig->scale_new_min_in[i];
|
971
|
+
copy->scale_factor_in[i] = orig->scale_factor_in[i];
|
972
|
+
}
|
973
|
+
for (i=0; i < orig->num_output ; i++) {
|
974
|
+
copy->scale_mean_out[i] = orig->scale_mean_out[i];
|
975
|
+
copy->scale_deviation_out[i] = orig->scale_deviation_out[i];
|
976
|
+
copy->scale_new_min_out[i] = orig->scale_new_min_out[i];
|
977
|
+
copy->scale_factor_out[i] = orig->scale_factor_out[i];
|
978
|
+
}
|
979
|
+
}
|
980
|
+
#endif
|
981
|
+
|
982
|
+
/* copy the neurons */
|
983
|
+
fann_allocate_neurons(copy);
|
984
|
+
if (copy->errno_f == FANN_E_CANT_ALLOCATE_MEM)
|
985
|
+
{
|
986
|
+
fann_destroy(copy);
|
987
|
+
return NULL;
|
988
|
+
}
|
989
|
+
layer_size = (orig->last_layer-1)->last_neuron - (orig->last_layer-1)->first_neuron;
|
990
|
+
memcpy(copy->output,orig->output, layer_size * sizeof(fann_type));
|
991
|
+
|
992
|
+
last_neuron = (orig->last_layer - 1)->last_neuron;
|
993
|
+
for (orig_neuron_it = orig->first_layer->first_neuron, copy_neuron_it = copy->first_layer->first_neuron;
|
994
|
+
orig_neuron_it != last_neuron; orig_neuron_it++, copy_neuron_it++)
|
995
|
+
{
|
996
|
+
memcpy(copy_neuron_it,orig_neuron_it,sizeof(struct fann_neuron));
|
997
|
+
}
|
998
|
+
/* copy the connections */
|
999
|
+
copy->total_connections = orig->total_connections;
|
1000
|
+
fann_allocate_connections(copy);
|
1001
|
+
if (copy->errno_f == FANN_E_CANT_ALLOCATE_MEM)
|
1002
|
+
{
|
1003
|
+
fann_destroy(copy);
|
1004
|
+
return NULL;
|
1005
|
+
}
|
1006
|
+
|
1007
|
+
orig_first_neuron = orig->first_layer->first_neuron;
|
1008
|
+
copy_first_neuron = copy->first_layer->first_neuron;
|
1009
|
+
for (i=0; i < orig->total_connections; i++)
|
1010
|
+
{
|
1011
|
+
copy->weights[i] = orig->weights[i];
|
1012
|
+
input_neuron = orig->connections[i] - orig_first_neuron;
|
1013
|
+
copy->connections[i] = copy_first_neuron + input_neuron;
|
1014
|
+
}
|
1015
|
+
|
1016
|
+
if (orig->train_slopes)
|
1017
|
+
{
|
1018
|
+
copy->train_slopes = (fann_type *) malloc(copy->total_connections_allocated * sizeof(fann_type));
|
1019
|
+
if (copy->train_slopes == NULL)
|
1020
|
+
{
|
1021
|
+
fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);
|
1022
|
+
fann_destroy(copy);
|
1023
|
+
return NULL;
|
1024
|
+
}
|
1025
|
+
memcpy(copy->train_slopes,orig->train_slopes,copy->total_connections_allocated * sizeof(fann_type));
|
1026
|
+
}
|
1027
|
+
|
1028
|
+
if (orig->prev_steps)
|
1029
|
+
{
|
1030
|
+
copy->prev_steps = (fann_type *) malloc(copy->total_connections_allocated * sizeof(fann_type));
|
1031
|
+
if (copy->prev_steps == NULL)
|
1032
|
+
{
|
1033
|
+
fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);
|
1034
|
+
fann_destroy(copy);
|
1035
|
+
return NULL;
|
1036
|
+
}
|
1037
|
+
memcpy(copy->prev_steps, orig->prev_steps, copy->total_connections_allocated * sizeof(fann_type));
|
1038
|
+
}
|
1039
|
+
|
1040
|
+
if (orig->prev_train_slopes)
|
1041
|
+
{
|
1042
|
+
copy->prev_train_slopes = (fann_type *) malloc(copy->total_connections_allocated * sizeof(fann_type));
|
1043
|
+
if (copy->prev_train_slopes == NULL)
|
1044
|
+
{
|
1045
|
+
fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);
|
1046
|
+
fann_destroy(copy);
|
1047
|
+
return NULL;
|
1048
|
+
}
|
1049
|
+
memcpy(copy->prev_train_slopes,orig->prev_train_slopes, copy->total_connections_allocated * sizeof(fann_type));
|
1050
|
+
}
|
1051
|
+
|
1052
|
+
if (orig->prev_weights_deltas)
|
1053
|
+
{
|
1054
|
+
copy->prev_weights_deltas = (fann_type *) malloc(copy->total_connections_allocated * sizeof(fann_type));
|
1055
|
+
if(copy->prev_weights_deltas == NULL)
|
1056
|
+
{
|
1057
|
+
fann_error((struct fann_error *) orig, FANN_E_CANT_ALLOCATE_MEM);
|
1058
|
+
fann_destroy(copy);
|
1059
|
+
return NULL;
|
1060
|
+
}
|
1061
|
+
memcpy(copy->prev_weights_deltas, orig->prev_weights_deltas,copy->total_connections_allocated * sizeof(fann_type));
|
1062
|
+
}
|
1063
|
+
|
1064
|
+
return copy;
|
1065
|
+
}
|
1066
|
+
|
1067
|
+
FANN_EXTERNAL void FANN_API fann_print_connections(struct fann *ann)
|
1068
|
+
{
|
1069
|
+
struct fann_layer *layer_it;
|
1070
|
+
struct fann_neuron *neuron_it;
|
1071
|
+
unsigned int i;
|
1072
|
+
int value;
|
1073
|
+
char *neurons;
|
1074
|
+
unsigned int num_neurons = fann_get_total_neurons(ann) - fann_get_num_output(ann);
|
1075
|
+
|
1076
|
+
neurons = (char *) malloc(num_neurons + 1);
|
1077
|
+
if(neurons == NULL)
|
1078
|
+
{
|
1079
|
+
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
|
1080
|
+
return;
|
1081
|
+
}
|
1082
|
+
neurons[num_neurons] = 0;
|
1083
|
+
|
1084
|
+
printf("Layer / Neuron ");
|
1085
|
+
for(i = 0; i < num_neurons; i++)
|
1086
|
+
{
|
1087
|
+
printf("%d", i % 10);
|
1088
|
+
}
|
1089
|
+
printf("\n");
|
1090
|
+
|
1091
|
+
for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer; layer_it++)
|
1092
|
+
{
|
1093
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++)
|
1094
|
+
{
|
1095
|
+
|
1096
|
+
memset(neurons, (int) '.', num_neurons);
|
1097
|
+
for(i = neuron_it->first_con; i < neuron_it->last_con; i++)
|
1098
|
+
{
|
1099
|
+
if(ann->weights[i] < 0)
|
1100
|
+
{
|
1101
|
+
#ifdef FIXEDFANN
|
1102
|
+
value = (int) ((ann->weights[i] / (double) ann->multiplier) - 0.5);
|
1103
|
+
#else
|
1104
|
+
value = (int) ((ann->weights[i]) - 0.5);
|
1105
|
+
#endif
|
1106
|
+
if(value < -25)
|
1107
|
+
value = -25;
|
1108
|
+
neurons[ann->connections[i] - ann->first_layer->first_neuron] = (char)('a' - value);
|
1109
|
+
}
|
1110
|
+
else
|
1111
|
+
{
|
1112
|
+
#ifdef FIXEDFANN
|
1113
|
+
value = (int) ((ann->weights[i] / (double) ann->multiplier) + 0.5);
|
1114
|
+
#else
|
1115
|
+
value = (int) ((ann->weights[i]) + 0.5);
|
1116
|
+
#endif
|
1117
|
+
if(value > 25)
|
1118
|
+
value = 25;
|
1119
|
+
neurons[ann->connections[i] - ann->first_layer->first_neuron] = (char)('A' + value);
|
1120
|
+
}
|
1121
|
+
}
|
1122
|
+
printf("L %3d / N %4d %s\n", (int)(layer_it - ann->first_layer),
|
1123
|
+
(int)(neuron_it - ann->first_layer->first_neuron), neurons);
|
1124
|
+
}
|
1125
|
+
}
|
1126
|
+
|
1127
|
+
free(neurons);
|
1128
|
+
}
|
1129
|
+
|
1130
|
+
/* Initialize the weights using Widrow + Nguyen's algorithm.
|
1131
|
+
*/
|
1132
|
+
FANN_EXTERNAL void FANN_API fann_init_weights(struct fann *ann, struct fann_train_data *train_data)
|
1133
|
+
{
|
1134
|
+
fann_type smallest_inp, largest_inp;
|
1135
|
+
unsigned int dat = 0, elem, num_connect, num_hidden_neurons;
|
1136
|
+
struct fann_layer *layer_it;
|
1137
|
+
struct fann_neuron *neuron_it, *last_neuron, *bias_neuron;
|
1138
|
+
|
1139
|
+
#ifdef FIXEDFANN
|
1140
|
+
unsigned int multiplier = ann->multiplier;
|
1141
|
+
#endif
|
1142
|
+
float scale_factor;
|
1143
|
+
|
1144
|
+
for(smallest_inp = largest_inp = train_data->input[0][0]; dat < train_data->num_data; dat++)
|
1145
|
+
{
|
1146
|
+
for(elem = 0; elem < train_data->num_input; elem++)
|
1147
|
+
{
|
1148
|
+
if(train_data->input[dat][elem] < smallest_inp)
|
1149
|
+
smallest_inp = train_data->input[dat][elem];
|
1150
|
+
if(train_data->input[dat][elem] > largest_inp)
|
1151
|
+
largest_inp = train_data->input[dat][elem];
|
1152
|
+
}
|
1153
|
+
}
|
1154
|
+
|
1155
|
+
num_hidden_neurons =
|
1156
|
+
ann->total_neurons - (ann->num_input + ann->num_output +
|
1157
|
+
(ann->last_layer - ann->first_layer));
|
1158
|
+
scale_factor =
|
1159
|
+
(float) (pow
|
1160
|
+
((double) (0.7f * (double) num_hidden_neurons),
|
1161
|
+
(double) (1.0f / (double) ann->num_input)) / (double) (largest_inp -
|
1162
|
+
smallest_inp));
|
1163
|
+
|
1164
|
+
#ifdef DEBUG
|
1165
|
+
printf("Initializing weights with scale factor %f\n", scale_factor);
|
1166
|
+
#endif
|
1167
|
+
bias_neuron = ann->first_layer->last_neuron - 1;
|
1168
|
+
for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer; layer_it++)
|
1169
|
+
{
|
1170
|
+
last_neuron = layer_it->last_neuron;
|
1171
|
+
|
1172
|
+
if(ann->network_type == FANN_NETTYPE_LAYER)
|
1173
|
+
{
|
1174
|
+
bias_neuron = (layer_it - 1)->last_neuron - 1;
|
1175
|
+
}
|
1176
|
+
|
1177
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
|
1178
|
+
{
|
1179
|
+
for(num_connect = neuron_it->first_con; num_connect < neuron_it->last_con;
|
1180
|
+
num_connect++)
|
1181
|
+
{
|
1182
|
+
if(bias_neuron == ann->connections[num_connect])
|
1183
|
+
{
|
1184
|
+
#ifdef FIXEDFANN
|
1185
|
+
ann->weights[num_connect] =
|
1186
|
+
(fann_type) fann_rand(-scale_factor, scale_factor * multiplier);
|
1187
|
+
#else
|
1188
|
+
ann->weights[num_connect] = (fann_type) fann_rand(-scale_factor, scale_factor);
|
1189
|
+
#endif
|
1190
|
+
}
|
1191
|
+
else
|
1192
|
+
{
|
1193
|
+
#ifdef FIXEDFANN
|
1194
|
+
ann->weights[num_connect] = (fann_type) fann_rand(0, scale_factor * multiplier);
|
1195
|
+
#else
|
1196
|
+
ann->weights[num_connect] = (fann_type) fann_rand(0, scale_factor);
|
1197
|
+
#endif
|
1198
|
+
}
|
1199
|
+
}
|
1200
|
+
}
|
1201
|
+
}
|
1202
|
+
|
1203
|
+
#ifndef FIXEDFANN
|
1204
|
+
if(ann->prev_train_slopes != NULL)
|
1205
|
+
{
|
1206
|
+
fann_clear_train_arrays(ann);
|
1207
|
+
}
|
1208
|
+
#endif
|
1209
|
+
}
|
1210
|
+
|
1211
|
+
FANN_EXTERNAL void FANN_API fann_print_parameters(struct fann *ann)
|
1212
|
+
{
|
1213
|
+
struct fann_layer *layer_it;
|
1214
|
+
#ifndef FIXEDFANN
|
1215
|
+
unsigned int i;
|
1216
|
+
#endif
|
1217
|
+
|
1218
|
+
printf("Input layer :%4d neurons, 1 bias\n", ann->num_input);
|
1219
|
+
for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer - 1; layer_it++)
|
1220
|
+
{
|
1221
|
+
if(ann->network_type == FANN_NETTYPE_SHORTCUT)
|
1222
|
+
{
|
1223
|
+
printf(" Hidden layer :%4d neurons, 0 bias\n",
|
1224
|
+
(int)(layer_it->last_neuron - layer_it->first_neuron));
|
1225
|
+
}
|
1226
|
+
else
|
1227
|
+
{
|
1228
|
+
printf(" Hidden layer :%4d neurons, 1 bias\n",
|
1229
|
+
(int)(layer_it->last_neuron - layer_it->first_neuron - 1));
|
1230
|
+
}
|
1231
|
+
}
|
1232
|
+
printf("Output layer :%4d neurons\n", ann->num_output);
|
1233
|
+
printf("Total neurons and biases :%4d\n", fann_get_total_neurons(ann));
|
1234
|
+
printf("Total connections :%4d\n", ann->total_connections);
|
1235
|
+
printf("Connection rate :%8.3f\n", ann->connection_rate);
|
1236
|
+
printf("Network type : %s\n", FANN_NETTYPE_NAMES[ann->network_type]);
|
1237
|
+
#ifdef FIXEDFANN
|
1238
|
+
printf("Decimal point :%4d\n", ann->decimal_point);
|
1239
|
+
printf("Multiplier :%4d\n", ann->multiplier);
|
1240
|
+
#else
|
1241
|
+
printf("Training algorithm : %s\n", FANN_TRAIN_NAMES[ann->training_algorithm]);
|
1242
|
+
printf("Training error function : %s\n", FANN_ERRORFUNC_NAMES[ann->train_error_function]);
|
1243
|
+
printf("Training stop function : %s\n", FANN_STOPFUNC_NAMES[ann->train_stop_function]);
|
1244
|
+
#endif
|
1245
|
+
#ifdef FIXEDFANN
|
1246
|
+
printf("Bit fail limit :%4d\n", ann->bit_fail_limit);
|
1247
|
+
#else
|
1248
|
+
printf("Bit fail limit :%8.3f\n", ann->bit_fail_limit);
|
1249
|
+
printf("Learning rate :%8.3f\n", ann->learning_rate);
|
1250
|
+
printf("Learning momentum :%8.3f\n", ann->learning_momentum);
|
1251
|
+
printf("Quickprop decay :%11.6f\n", ann->quickprop_decay);
|
1252
|
+
printf("Quickprop mu :%8.3f\n", ann->quickprop_mu);
|
1253
|
+
printf("RPROP increase factor :%8.3f\n", ann->rprop_increase_factor);
|
1254
|
+
printf("RPROP decrease factor :%8.3f\n", ann->rprop_decrease_factor);
|
1255
|
+
printf("RPROP delta min :%8.3f\n", ann->rprop_delta_min);
|
1256
|
+
printf("RPROP delta max :%8.3f\n", ann->rprop_delta_max);
|
1257
|
+
printf("Cascade output change fraction :%11.6f\n", ann->cascade_output_change_fraction);
|
1258
|
+
printf("Cascade candidate change fraction :%11.6f\n", ann->cascade_candidate_change_fraction);
|
1259
|
+
printf("Cascade output stagnation epochs :%4d\n", ann->cascade_output_stagnation_epochs);
|
1260
|
+
printf("Cascade candidate stagnation epochs :%4d\n", ann->cascade_candidate_stagnation_epochs);
|
1261
|
+
printf("Cascade max output epochs :%4d\n", ann->cascade_max_out_epochs);
|
1262
|
+
printf("Cascade min output epochs :%4d\n", ann->cascade_min_out_epochs);
|
1263
|
+
printf("Cascade max candidate epochs :%4d\n", ann->cascade_max_cand_epochs);
|
1264
|
+
printf("Cascade min candidate epochs :%4d\n", ann->cascade_min_cand_epochs);
|
1265
|
+
printf("Cascade weight multiplier :%8.3f\n", ann->cascade_weight_multiplier);
|
1266
|
+
printf("Cascade candidate limit :%8.3f\n", ann->cascade_candidate_limit);
|
1267
|
+
for(i = 0; i < ann->cascade_activation_functions_count; i++)
|
1268
|
+
printf("Cascade activation functions[%d] : %s\n", i,
|
1269
|
+
FANN_ACTIVATIONFUNC_NAMES[ann->cascade_activation_functions[i]]);
|
1270
|
+
for(i = 0; i < ann->cascade_activation_steepnesses_count; i++)
|
1271
|
+
printf("Cascade activation steepnesses[%d] :%8.3f\n", i,
|
1272
|
+
ann->cascade_activation_steepnesses[i]);
|
1273
|
+
|
1274
|
+
printf("Cascade candidate groups :%4d\n", ann->cascade_num_candidate_groups);
|
1275
|
+
printf("Cascade no. of candidates :%4d\n", fann_get_cascade_num_candidates(ann));
|
1276
|
+
|
1277
|
+
/* TODO: dump scale parameters */
|
1278
|
+
#endif
|
1279
|
+
}
|
1280
|
+
|
1281
|
+
FANN_GET(unsigned int, num_input)
|
1282
|
+
FANN_GET(unsigned int, num_output)
|
1283
|
+
|
1284
|
+
FANN_EXTERNAL unsigned int FANN_API fann_get_total_neurons(struct fann *ann)
|
1285
|
+
{
|
1286
|
+
if(ann->network_type)
|
1287
|
+
{
|
1288
|
+
return ann->total_neurons;
|
1289
|
+
}
|
1290
|
+
else
|
1291
|
+
{
|
1292
|
+
/* -1, because there is always an unused bias neuron in the last layer */
|
1293
|
+
return ann->total_neurons - 1;
|
1294
|
+
}
|
1295
|
+
}
|
1296
|
+
|
1297
|
+
FANN_GET(unsigned int, total_connections)
|
1298
|
+
|
1299
|
+
FANN_EXTERNAL enum fann_nettype_enum FANN_API fann_get_network_type(struct fann *ann)
|
1300
|
+
{
|
1301
|
+
/* Currently two types: LAYER = 0, SHORTCUT = 1 */
|
1302
|
+
/* Enum network_types must be set to match the return values */
|
1303
|
+
return ann->network_type;
|
1304
|
+
}
|
1305
|
+
|
1306
|
+
FANN_EXTERNAL float FANN_API fann_get_connection_rate(struct fann *ann)
|
1307
|
+
{
|
1308
|
+
return ann->connection_rate;
|
1309
|
+
}
|
1310
|
+
|
1311
|
+
FANN_EXTERNAL unsigned int FANN_API fann_get_num_layers(struct fann *ann)
|
1312
|
+
{
|
1313
|
+
return ann->last_layer - ann->first_layer;
|
1314
|
+
}
|
1315
|
+
|
1316
|
+
FANN_EXTERNAL void FANN_API fann_get_layer_array(struct fann *ann, unsigned int *layers)
|
1317
|
+
{
|
1318
|
+
struct fann_layer *layer_it;
|
1319
|
+
|
1320
|
+
for (layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++) {
|
1321
|
+
unsigned int count = layer_it->last_neuron - layer_it->first_neuron;
|
1322
|
+
/* Remove the bias from the count of neurons. */
|
1323
|
+
switch (fann_get_network_type(ann)) {
|
1324
|
+
case FANN_NETTYPE_LAYER: {
|
1325
|
+
--count;
|
1326
|
+
break;
|
1327
|
+
}
|
1328
|
+
case FANN_NETTYPE_SHORTCUT: {
|
1329
|
+
/* The bias in the first layer is reused for all layers */
|
1330
|
+
if (layer_it == ann->first_layer)
|
1331
|
+
--count;
|
1332
|
+
break;
|
1333
|
+
}
|
1334
|
+
default: {
|
1335
|
+
/* Unknown network type, assume no bias present */
|
1336
|
+
break;
|
1337
|
+
}
|
1338
|
+
}
|
1339
|
+
*layers++ = count;
|
1340
|
+
}
|
1341
|
+
}
|
1342
|
+
|
1343
|
+
FANN_EXTERNAL void FANN_API fann_get_bias_array(struct fann *ann, unsigned int *bias)
|
1344
|
+
{
|
1345
|
+
struct fann_layer *layer_it;
|
1346
|
+
|
1347
|
+
for (layer_it = ann->first_layer; layer_it != ann->last_layer; ++layer_it, ++bias) {
|
1348
|
+
switch (fann_get_network_type(ann)) {
|
1349
|
+
case FANN_NETTYPE_LAYER: {
|
1350
|
+
/* Report one bias in each layer except the last */
|
1351
|
+
if (layer_it != ann->last_layer-1)
|
1352
|
+
*bias = 1;
|
1353
|
+
else
|
1354
|
+
*bias = 0;
|
1355
|
+
break;
|
1356
|
+
}
|
1357
|
+
case FANN_NETTYPE_SHORTCUT: {
|
1358
|
+
/* The bias in the first layer is reused for all layers */
|
1359
|
+
if (layer_it == ann->first_layer)
|
1360
|
+
*bias = 1;
|
1361
|
+
else
|
1362
|
+
*bias = 0;
|
1363
|
+
break;
|
1364
|
+
}
|
1365
|
+
default: {
|
1366
|
+
/* Unknown network type, assume no bias present */
|
1367
|
+
*bias = 0;
|
1368
|
+
break;
|
1369
|
+
}
|
1370
|
+
}
|
1371
|
+
}
|
1372
|
+
}
|
1373
|
+
|
1374
|
+
FANN_EXTERNAL void FANN_API fann_get_connection_array(struct fann *ann, struct fann_connection *connections)
|
1375
|
+
{
|
1376
|
+
struct fann_neuron *first_neuron;
|
1377
|
+
struct fann_layer *layer_it;
|
1378
|
+
struct fann_neuron *neuron_it;
|
1379
|
+
unsigned int idx;
|
1380
|
+
unsigned int source_index;
|
1381
|
+
unsigned int destination_index;
|
1382
|
+
|
1383
|
+
first_neuron = ann->first_layer->first_neuron;
|
1384
|
+
|
1385
|
+
source_index = 0;
|
1386
|
+
destination_index = 0;
|
1387
|
+
|
1388
|
+
/* The following assumes that the last unused bias has no connections */
|
1389
|
+
|
1390
|
+
/* for each layer */
|
1391
|
+
for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
|
1392
|
+
/* for each neuron */
|
1393
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++){
|
1394
|
+
/* for each connection */
|
1395
|
+
for (idx = neuron_it->first_con; idx < neuron_it->last_con; idx++){
|
1396
|
+
/* Assign the source, destination and weight */
|
1397
|
+
connections->from_neuron = ann->connections[source_index] - first_neuron;
|
1398
|
+
connections->to_neuron = destination_index;
|
1399
|
+
connections->weight = ann->weights[source_index];
|
1400
|
+
|
1401
|
+
connections++;
|
1402
|
+
source_index++;
|
1403
|
+
}
|
1404
|
+
destination_index++;
|
1405
|
+
}
|
1406
|
+
}
|
1407
|
+
}
|
1408
|
+
|
1409
|
+
FANN_EXTERNAL void FANN_API fann_set_weight_array(struct fann *ann,
|
1410
|
+
struct fann_connection *connections, unsigned int num_connections)
|
1411
|
+
{
|
1412
|
+
unsigned int idx;
|
1413
|
+
|
1414
|
+
for (idx = 0; idx < num_connections; idx++) {
|
1415
|
+
fann_set_weight(ann, connections[idx].from_neuron,
|
1416
|
+
connections[idx].to_neuron, connections[idx].weight);
|
1417
|
+
}
|
1418
|
+
}
|
1419
|
+
|
1420
|
+
FANN_EXTERNAL void FANN_API fann_set_weight(struct fann *ann,
|
1421
|
+
unsigned int from_neuron, unsigned int to_neuron, fann_type weight)
|
1422
|
+
{
|
1423
|
+
struct fann_neuron *first_neuron;
|
1424
|
+
struct fann_layer *layer_it;
|
1425
|
+
struct fann_neuron *neuron_it;
|
1426
|
+
unsigned int idx;
|
1427
|
+
unsigned int source_index;
|
1428
|
+
unsigned int destination_index;
|
1429
|
+
|
1430
|
+
first_neuron = ann->first_layer->first_neuron;
|
1431
|
+
|
1432
|
+
source_index = 0;
|
1433
|
+
destination_index = 0;
|
1434
|
+
|
1435
|
+
/* Find the connection, simple brute force search through the network
|
1436
|
+
for one or more connections that match to minimize datastructure dependencies.
|
1437
|
+
Nothing is done if the connection does not already exist in the network. */
|
1438
|
+
|
1439
|
+
/* for each layer */
|
1440
|
+
for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
|
1441
|
+
/* for each neuron */
|
1442
|
+
for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++){
|
1443
|
+
/* for each connection */
|
1444
|
+
for (idx = neuron_it->first_con; idx < neuron_it->last_con; idx++){
|
1445
|
+
/* If the source and destination neurons match, assign the weight */
|
1446
|
+
if (((int)from_neuron == ann->connections[source_index] - first_neuron) &&
|
1447
|
+
(to_neuron == destination_index))
|
1448
|
+
{
|
1449
|
+
ann->weights[source_index] = weight;
|
1450
|
+
}
|
1451
|
+
source_index++;
|
1452
|
+
}
|
1453
|
+
destination_index++;
|
1454
|
+
}
|
1455
|
+
}
|
1456
|
+
}
|
1457
|
+
|
1458
|
+
FANN_GET_SET(void *, user_data)
|
1459
|
+
|
1460
|
+
#ifdef FIXEDFANN
|
1461
|
+
|
1462
|
+
FANN_GET(unsigned int, decimal_point)
|
1463
|
+
FANN_GET(unsigned int, multiplier)
|
1464
|
+
|
1465
|
+
/* INTERNAL FUNCTION
|
1466
|
+
Adjust the steepwise functions (if used)
|
1467
|
+
*/
|
1468
|
+
void fann_update_stepwise(struct fann *ann)
|
1469
|
+
{
|
1470
|
+
unsigned int i = 0;
|
1471
|
+
|
1472
|
+
/* Calculate the parameters for the stepwise linear
|
1473
|
+
* sigmoid function fixed point.
|
1474
|
+
* Using a rewritten sigmoid function.
|
1475
|
+
* results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
|
1476
|
+
*/
|
1477
|
+
ann->sigmoid_results[0] = fann_max((fann_type) (ann->multiplier / 200.0 + 0.5), 1);
|
1478
|
+
ann->sigmoid_results[1] = fann_max((fann_type) (ann->multiplier / 20.0 + 0.5), 1);
|
1479
|
+
ann->sigmoid_results[2] = fann_max((fann_type) (ann->multiplier / 4.0 + 0.5), 1);
|
1480
|
+
ann->sigmoid_results[3] = fann_min(ann->multiplier - (fann_type) (ann->multiplier / 4.0 + 0.5), ann->multiplier - 1);
|
1481
|
+
ann->sigmoid_results[4] = fann_min(ann->multiplier - (fann_type) (ann->multiplier / 20.0 + 0.5), ann->multiplier - 1);
|
1482
|
+
ann->sigmoid_results[5] = fann_min(ann->multiplier - (fann_type) (ann->multiplier / 200.0 + 0.5), ann->multiplier - 1);
|
1483
|
+
|
1484
|
+
ann->sigmoid_symmetric_results[0] = fann_max((fann_type) ((ann->multiplier / 100.0) - ann->multiplier - 0.5),
|
1485
|
+
(fann_type) (1 - (fann_type) ann->multiplier));
|
1486
|
+
ann->sigmoid_symmetric_results[1] = fann_max((fann_type) ((ann->multiplier / 10.0) - ann->multiplier - 0.5),
|
1487
|
+
(fann_type) (1 - (fann_type) ann->multiplier));
|
1488
|
+
ann->sigmoid_symmetric_results[2] = fann_max((fann_type) ((ann->multiplier / 2.0) - ann->multiplier - 0.5),
|
1489
|
+
(fann_type) (1 - (fann_type) ann->multiplier));
|
1490
|
+
ann->sigmoid_symmetric_results[3] = fann_min(ann->multiplier - (fann_type) (ann->multiplier / 2.0 + 0.5),
|
1491
|
+
ann->multiplier - 1);
|
1492
|
+
ann->sigmoid_symmetric_results[4] = fann_min(ann->multiplier - (fann_type) (ann->multiplier / 10.0 + 0.5),
|
1493
|
+
ann->multiplier - 1);
|
1494
|
+
ann->sigmoid_symmetric_results[5] = fann_min(ann->multiplier - (fann_type) (ann->multiplier / 100.0 + 1.0),
|
1495
|
+
ann->multiplier - 1);
|
1496
|
+
|
1497
|
+
for(i = 0; i < 6; i++)
|
1498
|
+
{
|
1499
|
+
ann->sigmoid_values[i] =
|
1500
|
+
(fann_type) (((log(ann->multiplier / (float) ann->sigmoid_results[i] - 1) *
|
1501
|
+
(float) ann->multiplier) / -2.0) * (float) ann->multiplier);
|
1502
|
+
ann->sigmoid_symmetric_values[i] =
|
1503
|
+
(fann_type) (((log
|
1504
|
+
((ann->multiplier -
|
1505
|
+
(float) ann->sigmoid_symmetric_results[i]) /
|
1506
|
+
((float) ann->sigmoid_symmetric_results[i] +
|
1507
|
+
ann->multiplier)) * (float) ann->multiplier) / -2.0) *
|
1508
|
+
(float) ann->multiplier);
|
1509
|
+
}
|
1510
|
+
}
|
1511
|
+
#endif
|
1512
|
+
|
1513
|
+
|
1514
|
+
/* INTERNAL FUNCTION
|
1515
|
+
Allocates the main structure and sets some default values.
|
1516
|
+
*/
|
1517
|
+
struct fann *fann_allocate_structure(unsigned int num_layers)
|
1518
|
+
{
|
1519
|
+
struct fann *ann;
|
1520
|
+
|
1521
|
+
if(num_layers < 2)
|
1522
|
+
{
|
1523
|
+
#ifdef DEBUG
|
1524
|
+
printf("less than 2 layers - ABORTING.\n");
|
1525
|
+
#endif
|
1526
|
+
return NULL;
|
1527
|
+
}
|
1528
|
+
|
1529
|
+
/* allocate and initialize the main network structure */
|
1530
|
+
ann = (struct fann *) malloc(sizeof(struct fann));
|
1531
|
+
if(ann == NULL)
|
1532
|
+
{
|
1533
|
+
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
|
1534
|
+
return NULL;
|
1535
|
+
}
|
1536
|
+
|
1537
|
+
ann->errno_f = FANN_E_NO_ERROR;
|
1538
|
+
ann->error_log = fann_default_error_log;
|
1539
|
+
ann->errstr = NULL;
|
1540
|
+
ann->learning_rate = 0.7f;
|
1541
|
+
ann->learning_momentum = 0.0;
|
1542
|
+
ann->total_neurons = 0;
|
1543
|
+
ann->total_connections = 0;
|
1544
|
+
ann->num_input = 0;
|
1545
|
+
ann->num_output = 0;
|
1546
|
+
ann->train_errors = NULL;
|
1547
|
+
ann->train_slopes = NULL;
|
1548
|
+
ann->prev_steps = NULL;
|
1549
|
+
ann->prev_train_slopes = NULL;
|
1550
|
+
ann->prev_weights_deltas = NULL;
|
1551
|
+
ann->training_algorithm = FANN_TRAIN_RPROP;
|
1552
|
+
ann->num_MSE = 0;
|
1553
|
+
ann->MSE_value = 0;
|
1554
|
+
ann->num_bit_fail = 0;
|
1555
|
+
ann->bit_fail_limit = (fann_type)0.35;
|
1556
|
+
ann->network_type = FANN_NETTYPE_LAYER;
|
1557
|
+
ann->train_error_function = FANN_ERRORFUNC_TANH;
|
1558
|
+
ann->train_stop_function = FANN_STOPFUNC_MSE;
|
1559
|
+
ann->callback = NULL;
|
1560
|
+
ann->user_data = NULL; /* User is responsible for deallocation */
|
1561
|
+
ann->weights = NULL;
|
1562
|
+
ann->connections = NULL;
|
1563
|
+
ann->output = NULL;
|
1564
|
+
#ifndef FIXEDFANN
|
1565
|
+
ann->scale_mean_in = NULL;
|
1566
|
+
ann->scale_deviation_in = NULL;
|
1567
|
+
ann->scale_new_min_in = NULL;
|
1568
|
+
ann->scale_factor_in = NULL;
|
1569
|
+
ann->scale_mean_out = NULL;
|
1570
|
+
ann->scale_deviation_out = NULL;
|
1571
|
+
ann->scale_new_min_out = NULL;
|
1572
|
+
ann->scale_factor_out = NULL;
|
1573
|
+
#endif
|
1574
|
+
|
1575
|
+
/* variables used for cascade correlation (reasonable defaults) */
|
1576
|
+
ann->cascade_output_change_fraction = 0.01f;
|
1577
|
+
ann->cascade_candidate_change_fraction = 0.01f;
|
1578
|
+
ann->cascade_output_stagnation_epochs = 12;
|
1579
|
+
ann->cascade_candidate_stagnation_epochs = 12;
|
1580
|
+
ann->cascade_num_candidate_groups = 2;
|
1581
|
+
ann->cascade_weight_multiplier = (fann_type)0.4;
|
1582
|
+
ann->cascade_candidate_limit = (fann_type)1000.0;
|
1583
|
+
ann->cascade_max_out_epochs = 150;
|
1584
|
+
ann->cascade_max_cand_epochs = 150;
|
1585
|
+
ann->cascade_min_out_epochs = 50;
|
1586
|
+
ann->cascade_min_cand_epochs = 50;
|
1587
|
+
ann->cascade_candidate_scores = NULL;
|
1588
|
+
ann->cascade_activation_functions_count = 10;
|
1589
|
+
ann->cascade_activation_functions =
|
1590
|
+
(enum fann_activationfunc_enum *)calloc(ann->cascade_activation_functions_count,
|
1591
|
+
sizeof(enum fann_activationfunc_enum));
|
1592
|
+
if(ann->cascade_activation_functions == NULL)
|
1593
|
+
{
|
1594
|
+
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
|
1595
|
+
free(ann);
|
1596
|
+
return NULL;
|
1597
|
+
}
|
1598
|
+
|
1599
|
+
ann->cascade_activation_functions[0] = FANN_SIGMOID;
|
1600
|
+
ann->cascade_activation_functions[1] = FANN_SIGMOID_SYMMETRIC;
|
1601
|
+
ann->cascade_activation_functions[2] = FANN_GAUSSIAN;
|
1602
|
+
ann->cascade_activation_functions[3] = FANN_GAUSSIAN_SYMMETRIC;
|
1603
|
+
ann->cascade_activation_functions[4] = FANN_ELLIOT;
|
1604
|
+
ann->cascade_activation_functions[5] = FANN_ELLIOT_SYMMETRIC;
|
1605
|
+
ann->cascade_activation_functions[6] = FANN_SIN_SYMMETRIC;
|
1606
|
+
ann->cascade_activation_functions[7] = FANN_COS_SYMMETRIC;
|
1607
|
+
ann->cascade_activation_functions[8] = FANN_SIN;
|
1608
|
+
ann->cascade_activation_functions[9] = FANN_COS;
|
1609
|
+
|
1610
|
+
ann->cascade_activation_steepnesses_count = 4;
|
1611
|
+
ann->cascade_activation_steepnesses =
|
1612
|
+
(fann_type *)calloc(ann->cascade_activation_steepnesses_count,
|
1613
|
+
sizeof(fann_type));
|
1614
|
+
if(ann->cascade_activation_steepnesses == NULL)
|
1615
|
+
{
|
1616
|
+
fann_safe_free(ann->cascade_activation_functions);
|
1617
|
+
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
|
1618
|
+
free(ann);
|
1619
|
+
return NULL;
|
1620
|
+
}
|
1621
|
+
|
1622
|
+
ann->cascade_activation_steepnesses[0] = (fann_type)0.25;
|
1623
|
+
ann->cascade_activation_steepnesses[1] = (fann_type)0.5;
|
1624
|
+
ann->cascade_activation_steepnesses[2] = (fann_type)0.75;
|
1625
|
+
ann->cascade_activation_steepnesses[3] = (fann_type)1.0;
|
1626
|
+
|
1627
|
+
/* Variables for use with with Quickprop training (reasonable defaults) */
|
1628
|
+
ann->quickprop_decay = -0.0001f;
|
1629
|
+
ann->quickprop_mu = 1.75;
|
1630
|
+
|
1631
|
+
/* Variables for use with with RPROP training (reasonable defaults) */
|
1632
|
+
ann->rprop_increase_factor = 1.2f;
|
1633
|
+
ann->rprop_decrease_factor = 0.5;
|
1634
|
+
ann->rprop_delta_min = 0.0;
|
1635
|
+
ann->rprop_delta_max = 50.0;
|
1636
|
+
ann->rprop_delta_zero = 0.1f;
|
1637
|
+
|
1638
|
+
/* Variables for use with SARPROP training (reasonable defaults) */
|
1639
|
+
ann->sarprop_weight_decay_shift = -6.644f;
|
1640
|
+
ann->sarprop_step_error_threshold_factor = 0.1f;
|
1641
|
+
ann->sarprop_step_error_shift = 1.385f;
|
1642
|
+
ann->sarprop_temperature = 0.015f;
|
1643
|
+
ann->sarprop_epoch = 0;
|
1644
|
+
|
1645
|
+
fann_init_error_data((struct fann_error *) ann);
|
1646
|
+
|
1647
|
+
#ifdef FIXEDFANN
|
1648
|
+
/* these values are only boring defaults, and should really
|
1649
|
+
* never be used, since the real values are always loaded from a file. */
|
1650
|
+
ann->decimal_point = 8;
|
1651
|
+
ann->multiplier = 256;
|
1652
|
+
#endif
|
1653
|
+
|
1654
|
+
/* allocate room for the layers */
|
1655
|
+
ann->first_layer = (struct fann_layer *) calloc(num_layers, sizeof(struct fann_layer));
|
1656
|
+
if(ann->first_layer == NULL)
|
1657
|
+
{
|
1658
|
+
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
|
1659
|
+
free(ann);
|
1660
|
+
return NULL;
|
1661
|
+
}
|
1662
|
+
|
1663
|
+
ann->last_layer = ann->first_layer + num_layers;
|
1664
|
+
|
1665
|
+
return ann;
|
1666
|
+
}
|
1667
|
+
|
1668
|
+
/* INTERNAL FUNCTION
|
1669
|
+
Allocates room for the scaling parameters.
|
1670
|
+
*/
|
1671
|
+
int fann_allocate_scale(struct fann *ann)
|
1672
|
+
{
|
1673
|
+
/* todo this should only be allocated when needed */
|
1674
|
+
#ifndef FIXEDFANN
|
1675
|
+
unsigned int i = 0;
|
1676
|
+
#define SCALE_ALLOCATE( what, where, default_value ) \
|
1677
|
+
ann->what##_##where = (float *)calloc( \
|
1678
|
+
ann->num_##where##put, \
|
1679
|
+
sizeof( float ) \
|
1680
|
+
); \
|
1681
|
+
if( ann->what##_##where == NULL ) \
|
1682
|
+
{ \
|
1683
|
+
fann_error( NULL, FANN_E_CANT_ALLOCATE_MEM ); \
|
1684
|
+
fann_destroy( ann ); \
|
1685
|
+
return 1; \
|
1686
|
+
} \
|
1687
|
+
for( i = 0; i < ann->num_##where##put; i++ ) \
|
1688
|
+
ann->what##_##where[ i ] = ( default_value );
|
1689
|
+
|
1690
|
+
SCALE_ALLOCATE( scale_mean, in, 0.0 )
|
1691
|
+
SCALE_ALLOCATE( scale_deviation, in, 1.0 )
|
1692
|
+
SCALE_ALLOCATE( scale_new_min, in, -1.0 )
|
1693
|
+
SCALE_ALLOCATE( scale_factor, in, 1.0 )
|
1694
|
+
|
1695
|
+
SCALE_ALLOCATE( scale_mean, out, 0.0 )
|
1696
|
+
SCALE_ALLOCATE( scale_deviation, out, 1.0 )
|
1697
|
+
SCALE_ALLOCATE( scale_new_min, out, -1.0 )
|
1698
|
+
SCALE_ALLOCATE( scale_factor, out, 1.0 )
|
1699
|
+
#undef SCALE_ALLOCATE
|
1700
|
+
#endif
|
1701
|
+
return 0;
|
1702
|
+
}
|
1703
|
+
|
1704
|
+
/* INTERNAL FUNCTION
|
1705
|
+
Allocates room for the neurons.
|
1706
|
+
*/
|
1707
|
+
void fann_allocate_neurons(struct fann *ann)
|
1708
|
+
{
|
1709
|
+
struct fann_layer *layer_it;
|
1710
|
+
struct fann_neuron *neurons;
|
1711
|
+
unsigned int num_neurons_so_far = 0;
|
1712
|
+
unsigned int num_neurons = 0;
|
1713
|
+
|
1714
|
+
/* all the neurons is allocated in one long array (calloc clears mem) */
|
1715
|
+
neurons = (struct fann_neuron *) calloc(ann->total_neurons, sizeof(struct fann_neuron));
|
1716
|
+
ann->total_neurons_allocated = ann->total_neurons;
|
1717
|
+
|
1718
|
+
if(neurons == NULL)
|
1719
|
+
{
|
1720
|
+
fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
|
1721
|
+
return;
|
1722
|
+
}
|
1723
|
+
|
1724
|
+
for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++)
|
1725
|
+
{
|
1726
|
+
num_neurons = layer_it->last_neuron - layer_it->first_neuron;
|
1727
|
+
layer_it->first_neuron = neurons + num_neurons_so_far;
|
1728
|
+
layer_it->last_neuron = layer_it->first_neuron + num_neurons;
|
1729
|
+
num_neurons_so_far += num_neurons;
|
1730
|
+
}
|
1731
|
+
|
1732
|
+
ann->output = (fann_type *) calloc(num_neurons, sizeof(fann_type));
|
1733
|
+
if(ann->output == NULL)
|
1734
|
+
{
|
1735
|
+
fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
|
1736
|
+
return;
|
1737
|
+
}
|
1738
|
+
}
|
1739
|
+
|
1740
|
+
/* INTERNAL FUNCTION
|
1741
|
+
Allocate room for the connections.
|
1742
|
+
*/
|
1743
|
+
void fann_allocate_connections(struct fann *ann)
|
1744
|
+
{
|
1745
|
+
ann->weights = (fann_type *) calloc(ann->total_connections, sizeof(fann_type));
|
1746
|
+
if(ann->weights == NULL)
|
1747
|
+
{
|
1748
|
+
fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
|
1749
|
+
return;
|
1750
|
+
}
|
1751
|
+
ann->total_connections_allocated = ann->total_connections;
|
1752
|
+
|
1753
|
+
/* TODO make special cases for all places where the connections
|
1754
|
+
* is used, so that it is not needed for fully connected networks.
|
1755
|
+
*/
|
1756
|
+
ann->connections =
|
1757
|
+
(struct fann_neuron **) calloc(ann->total_connections_allocated,
|
1758
|
+
sizeof(struct fann_neuron *));
|
1759
|
+
if(ann->connections == NULL)
|
1760
|
+
{
|
1761
|
+
fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
|
1762
|
+
return;
|
1763
|
+
}
|
1764
|
+
}
|
1765
|
+
|
1766
|
+
|
1767
|
+
/* INTERNAL FUNCTION
|
1768
|
+
Seed the random function.
|
1769
|
+
*/
|
1770
|
+
void fann_seed_rand()
|
1771
|
+
{
|
1772
|
+
#ifndef _WIN32
|
1773
|
+
FILE *fp = fopen("/dev/urandom", "r");
|
1774
|
+
unsigned int foo;
|
1775
|
+
struct timeval t;
|
1776
|
+
|
1777
|
+
if(!fp)
|
1778
|
+
{
|
1779
|
+
gettimeofday(&t, NULL);
|
1780
|
+
foo = t.tv_usec;
|
1781
|
+
#ifdef DEBUG
|
1782
|
+
printf("unable to open /dev/urandom\n");
|
1783
|
+
#endif
|
1784
|
+
}
|
1785
|
+
else
|
1786
|
+
{
|
1787
|
+
if(fread(&foo, sizeof(foo), 1, fp) != 1)
|
1788
|
+
{
|
1789
|
+
gettimeofday(&t, NULL);
|
1790
|
+
foo = t.tv_usec;
|
1791
|
+
#ifdef DEBUG
|
1792
|
+
printf("unable to read from /dev/urandom\n");
|
1793
|
+
#endif
|
1794
|
+
}
|
1795
|
+
fclose(fp);
|
1796
|
+
}
|
1797
|
+
srand(foo);
|
1798
|
+
#else
|
1799
|
+
/* COMPAT_TIME REPLACEMENT */
|
1800
|
+
srand(GetTickCount());
|
1801
|
+
#endif
|
1802
|
+
}
|
1803
|
+
|