tomz-liblinear-ruby-swig 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- data/AUTHORS +2 -0
- data/COPYING +24 -0
- data/History.txt +3 -0
- data/Manifest.txt +20 -0
- data/README.rdoc +74 -0
- data/Rakefile +46 -0
- data/ext/blas.h +25 -0
- data/ext/blasp.h +430 -0
- data/ext/daxpy.c +49 -0
- data/ext/ddot.c +50 -0
- data/ext/dnrm2.c +62 -0
- data/ext/dscal.c +44 -0
- data/ext/extconf.rb +17 -0
- data/ext/liblinear_wrap.cxx +4525 -0
- data/ext/linear.cpp +1450 -0
- data/ext/linear.h +69 -0
- data/ext/tron.cpp +214 -0
- data/ext/tron.h +32 -0
- data/lib/linear.rb +349 -0
- data/lib/linear_cv.rb +36 -0
- metadata +85 -0
data/ext/linear.cpp
ADDED
@@ -0,0 +1,1450 @@
|
|
1
|
+
#include <math.h>
|
2
|
+
#include <stdio.h>
|
3
|
+
#include <stdlib.h>
|
4
|
+
#include <string.h>
|
5
|
+
#include <stdarg.h>
|
6
|
+
#include "linear.h"
|
7
|
+
#include "tron.h"
|
8
|
+
typedef signed char schar;
|
9
|
+
template <class T> inline void swap(T& x, T& y) { T t=x; x=y; y=t; }
|
10
|
+
#ifndef min
|
11
|
+
template <class T> inline T min(T x,T y) { return (x<y)?x:y; }
|
12
|
+
#endif
|
13
|
+
#ifndef max
|
14
|
+
template <class T> inline T max(T x,T y) { return (x>y)?x:y; }
|
15
|
+
#endif
|
16
|
+
template <class S, class T> inline void clone(T*& dst, S* src, int n)
|
17
|
+
{
|
18
|
+
dst = new T[n];
|
19
|
+
memcpy((void *)dst,(void *)src,sizeof(T)*n);
|
20
|
+
}
|
21
|
+
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
|
22
|
+
#define INF HUGE_VAL
|
23
|
+
|
24
|
+
#if 1
|
25
|
+
int info_on = 0;
|
26
|
+
static void info(const char *fmt,...)
|
27
|
+
{
|
28
|
+
va_list ap;
|
29
|
+
if (info_on==1) {
|
30
|
+
va_start(ap,fmt);
|
31
|
+
vprintf(fmt,ap);
|
32
|
+
va_end(ap);
|
33
|
+
}
|
34
|
+
}
|
35
|
+
static void info_flush()
|
36
|
+
{
|
37
|
+
if (info_on==1) fflush(stdout);
|
38
|
+
}
|
39
|
+
#else
|
40
|
+
static void info(char *fmt,...) {}
|
41
|
+
static void info_flush() {}
|
42
|
+
#endif
|
43
|
+
|
44
|
+
class l2_lr_fun : public function
|
45
|
+
{
|
46
|
+
public:
|
47
|
+
l2_lr_fun(const problem *prob, double Cp, double Cn);
|
48
|
+
~l2_lr_fun();
|
49
|
+
|
50
|
+
double fun(double *w);
|
51
|
+
void grad(double *w, double *g);
|
52
|
+
void Hv(double *s, double *Hs);
|
53
|
+
|
54
|
+
int get_nr_variable(void);
|
55
|
+
|
56
|
+
private:
|
57
|
+
void Xv(double *v, double *Xv);
|
58
|
+
void XTv(double *v, double *XTv);
|
59
|
+
|
60
|
+
double *C;
|
61
|
+
double *z;
|
62
|
+
double *D;
|
63
|
+
const problem *prob;
|
64
|
+
};
|
65
|
+
|
66
|
+
l2_lr_fun::l2_lr_fun(const problem *prob, double Cp, double Cn)
|
67
|
+
{
|
68
|
+
int i;
|
69
|
+
int l=prob->l;
|
70
|
+
int *y=prob->y;
|
71
|
+
|
72
|
+
this->prob = prob;
|
73
|
+
|
74
|
+
z = new double[l];
|
75
|
+
D = new double[l];
|
76
|
+
C = new double[l];
|
77
|
+
|
78
|
+
for (i=0; i<l; i++)
|
79
|
+
{
|
80
|
+
if (y[i] == 1)
|
81
|
+
C[i] = Cp;
|
82
|
+
else
|
83
|
+
C[i] = Cn;
|
84
|
+
}
|
85
|
+
}
|
86
|
+
|
87
|
+
l2_lr_fun::~l2_lr_fun()
|
88
|
+
{
|
89
|
+
delete[] z;
|
90
|
+
delete[] D;
|
91
|
+
delete[] C;
|
92
|
+
}
|
93
|
+
|
94
|
+
|
95
|
+
double l2_lr_fun::fun(double *w)
|
96
|
+
{
|
97
|
+
int i;
|
98
|
+
double f=0;
|
99
|
+
int *y=prob->y;
|
100
|
+
int l=prob->l;
|
101
|
+
int n=prob->n;
|
102
|
+
|
103
|
+
Xv(w, z);
|
104
|
+
for(i=0;i<l;i++)
|
105
|
+
{
|
106
|
+
double yz = y[i]*z[i];
|
107
|
+
if (yz >= 0)
|
108
|
+
f += C[i]*log(1 + exp(-yz));
|
109
|
+
else
|
110
|
+
f += C[i]*(-yz+log(1 + exp(yz)));
|
111
|
+
}
|
112
|
+
f = 2*f;
|
113
|
+
for(i=0;i<n;i++)
|
114
|
+
f += w[i]*w[i];
|
115
|
+
f /= 2.0;
|
116
|
+
|
117
|
+
return(f);
|
118
|
+
}
|
119
|
+
|
120
|
+
void l2_lr_fun::grad(double *w, double *g)
|
121
|
+
{
|
122
|
+
int i;
|
123
|
+
int *y=prob->y;
|
124
|
+
int l=prob->l;
|
125
|
+
int n=prob->n;
|
126
|
+
|
127
|
+
for(i=0;i<l;i++)
|
128
|
+
{
|
129
|
+
z[i] = 1/(1 + exp(-y[i]*z[i]));
|
130
|
+
D[i] = z[i]*(1-z[i]);
|
131
|
+
z[i] = C[i]*(z[i]-1)*y[i];
|
132
|
+
}
|
133
|
+
XTv(z, g);
|
134
|
+
|
135
|
+
for(i=0;i<n;i++)
|
136
|
+
g[i] = w[i] + g[i];
|
137
|
+
}
|
138
|
+
|
139
|
+
int l2_lr_fun::get_nr_variable(void)
|
140
|
+
{
|
141
|
+
return prob->n;
|
142
|
+
}
|
143
|
+
|
144
|
+
void l2_lr_fun::Hv(double *s, double *Hs)
|
145
|
+
{
|
146
|
+
int i;
|
147
|
+
int l=prob->l;
|
148
|
+
int n=prob->n;
|
149
|
+
double *wa = new double[l];
|
150
|
+
|
151
|
+
Xv(s, wa);
|
152
|
+
for(i=0;i<l;i++)
|
153
|
+
wa[i] = C[i]*D[i]*wa[i];
|
154
|
+
|
155
|
+
XTv(wa, Hs);
|
156
|
+
for(i=0;i<n;i++)
|
157
|
+
Hs[i] = s[i] + Hs[i];
|
158
|
+
delete[] wa;
|
159
|
+
}
|
160
|
+
|
161
|
+
void l2_lr_fun::Xv(double *v, double *Xv)
|
162
|
+
{
|
163
|
+
int i;
|
164
|
+
int l=prob->l;
|
165
|
+
feature_node **x=prob->x;
|
166
|
+
|
167
|
+
for(i=0;i<l;i++)
|
168
|
+
{
|
169
|
+
feature_node *s=x[i];
|
170
|
+
Xv[i]=0;
|
171
|
+
while(s->index!=-1)
|
172
|
+
{
|
173
|
+
Xv[i]+=v[s->index-1]*s->value;
|
174
|
+
s++;
|
175
|
+
}
|
176
|
+
}
|
177
|
+
}
|
178
|
+
|
179
|
+
void l2_lr_fun::XTv(double *v, double *XTv)
|
180
|
+
{
|
181
|
+
int i;
|
182
|
+
int l=prob->l;
|
183
|
+
int n=prob->n;
|
184
|
+
feature_node **x=prob->x;
|
185
|
+
|
186
|
+
for(i=0;i<n;i++)
|
187
|
+
XTv[i]=0;
|
188
|
+
for(i=0;i<l;i++)
|
189
|
+
{
|
190
|
+
feature_node *s=x[i];
|
191
|
+
while(s->index!=-1)
|
192
|
+
{
|
193
|
+
XTv[s->index-1]+=v[i]*s->value;
|
194
|
+
s++;
|
195
|
+
}
|
196
|
+
}
|
197
|
+
}
|
198
|
+
|
199
|
+
class l2loss_svm_fun : public function
|
200
|
+
{
|
201
|
+
public:
|
202
|
+
l2loss_svm_fun(const problem *prob, double Cp, double Cn);
|
203
|
+
~l2loss_svm_fun();
|
204
|
+
|
205
|
+
double fun(double *w);
|
206
|
+
void grad(double *w, double *g);
|
207
|
+
void Hv(double *s, double *Hs);
|
208
|
+
|
209
|
+
int get_nr_variable(void);
|
210
|
+
|
211
|
+
private:
|
212
|
+
void Xv(double *v, double *Xv);
|
213
|
+
void subXv(double *v, double *Xv);
|
214
|
+
void subXTv(double *v, double *XTv);
|
215
|
+
|
216
|
+
double *C;
|
217
|
+
double *z;
|
218
|
+
double *D;
|
219
|
+
int *I;
|
220
|
+
int sizeI;
|
221
|
+
const problem *prob;
|
222
|
+
};
|
223
|
+
|
224
|
+
l2loss_svm_fun::l2loss_svm_fun(const problem *prob, double Cp, double Cn)
|
225
|
+
{
|
226
|
+
int i;
|
227
|
+
int l=prob->l;
|
228
|
+
int *y=prob->y;
|
229
|
+
|
230
|
+
this->prob = prob;
|
231
|
+
|
232
|
+
z = new double[l];
|
233
|
+
D = new double[l];
|
234
|
+
C = new double[l];
|
235
|
+
I = new int[l];
|
236
|
+
|
237
|
+
for (i=0; i<l; i++)
|
238
|
+
{
|
239
|
+
if (y[i] == 1)
|
240
|
+
C[i] = Cp;
|
241
|
+
else
|
242
|
+
C[i] = Cn;
|
243
|
+
}
|
244
|
+
}
|
245
|
+
|
246
|
+
l2loss_svm_fun::~l2loss_svm_fun()
|
247
|
+
{
|
248
|
+
delete[] z;
|
249
|
+
delete[] D;
|
250
|
+
delete[] C;
|
251
|
+
delete[] I;
|
252
|
+
}
|
253
|
+
|
254
|
+
double l2loss_svm_fun::fun(double *w)
|
255
|
+
{
|
256
|
+
int i;
|
257
|
+
double f=0;
|
258
|
+
int *y=prob->y;
|
259
|
+
int l=prob->l;
|
260
|
+
int n=prob->n;
|
261
|
+
|
262
|
+
Xv(w, z);
|
263
|
+
for(i=0;i<l;i++)
|
264
|
+
{
|
265
|
+
z[i] = y[i]*z[i];
|
266
|
+
double d = 1-z[i];
|
267
|
+
if (d > 0)
|
268
|
+
f += C[i]*d*d;
|
269
|
+
}
|
270
|
+
f = 2*f;
|
271
|
+
for(i=0;i<n;i++)
|
272
|
+
f += w[i]*w[i];
|
273
|
+
f /= 2.0;
|
274
|
+
|
275
|
+
return(f);
|
276
|
+
}
|
277
|
+
|
278
|
+
void l2loss_svm_fun::grad(double *w, double *g)
|
279
|
+
{
|
280
|
+
int i;
|
281
|
+
int *y=prob->y;
|
282
|
+
int l=prob->l;
|
283
|
+
int n=prob->n;
|
284
|
+
|
285
|
+
sizeI = 0;
|
286
|
+
for (i=0;i<l;i++)
|
287
|
+
if (z[i] < 1)
|
288
|
+
{
|
289
|
+
z[sizeI] = C[i]*y[i]*(z[i]-1);
|
290
|
+
I[sizeI] = i;
|
291
|
+
sizeI++;
|
292
|
+
}
|
293
|
+
subXTv(z, g);
|
294
|
+
|
295
|
+
for(i=0;i<n;i++)
|
296
|
+
g[i] = w[i] + 2*g[i];
|
297
|
+
}
|
298
|
+
|
299
|
+
int l2loss_svm_fun::get_nr_variable(void)
|
300
|
+
{
|
301
|
+
return prob->n;
|
302
|
+
}
|
303
|
+
|
304
|
+
void l2loss_svm_fun::Hv(double *s, double *Hs)
|
305
|
+
{
|
306
|
+
int i;
|
307
|
+
int l=prob->l;
|
308
|
+
int n=prob->n;
|
309
|
+
double *wa = new double[l];
|
310
|
+
|
311
|
+
subXv(s, wa);
|
312
|
+
for(i=0;i<sizeI;i++)
|
313
|
+
wa[i] = C[I[i]]*wa[i];
|
314
|
+
|
315
|
+
subXTv(wa, Hs);
|
316
|
+
for(i=0;i<n;i++)
|
317
|
+
Hs[i] = s[i] + 2*Hs[i];
|
318
|
+
delete[] wa;
|
319
|
+
}
|
320
|
+
|
321
|
+
void l2loss_svm_fun::Xv(double *v, double *Xv)
|
322
|
+
{
|
323
|
+
int i;
|
324
|
+
int l=prob->l;
|
325
|
+
feature_node **x=prob->x;
|
326
|
+
|
327
|
+
for(i=0;i<l;i++)
|
328
|
+
{
|
329
|
+
feature_node *s=x[i];
|
330
|
+
Xv[i]=0;
|
331
|
+
while(s->index!=-1)
|
332
|
+
{
|
333
|
+
Xv[i]+=v[s->index-1]*s->value;
|
334
|
+
s++;
|
335
|
+
}
|
336
|
+
}
|
337
|
+
}
|
338
|
+
|
339
|
+
void l2loss_svm_fun::subXv(double *v, double *Xv)
|
340
|
+
{
|
341
|
+
int i;
|
342
|
+
feature_node **x=prob->x;
|
343
|
+
|
344
|
+
for(i=0;i<sizeI;i++)
|
345
|
+
{
|
346
|
+
feature_node *s=x[I[i]];
|
347
|
+
Xv[i]=0;
|
348
|
+
while(s->index!=-1)
|
349
|
+
{
|
350
|
+
Xv[i]+=v[s->index-1]*s->value;
|
351
|
+
s++;
|
352
|
+
}
|
353
|
+
}
|
354
|
+
}
|
355
|
+
|
356
|
+
void l2loss_svm_fun::subXTv(double *v, double *XTv)
|
357
|
+
{
|
358
|
+
int i;
|
359
|
+
int n=prob->n;
|
360
|
+
feature_node **x=prob->x;
|
361
|
+
|
362
|
+
for(i=0;i<n;i++)
|
363
|
+
XTv[i]=0;
|
364
|
+
for(i=0;i<sizeI;i++)
|
365
|
+
{
|
366
|
+
feature_node *s=x[I[i]];
|
367
|
+
while(s->index!=-1)
|
368
|
+
{
|
369
|
+
XTv[s->index-1]+=v[i]*s->value;
|
370
|
+
s++;
|
371
|
+
}
|
372
|
+
}
|
373
|
+
}
|
374
|
+
|
375
|
+
// A coordinate descent algorithm for
|
376
|
+
// multi-class support vector machines by Crammer and Singer
|
377
|
+
//
|
378
|
+
// min_{\alpha} 0.5 \sum_m ||w_m(\alpha)||^2 + \sum_i \sum_m e^m_i alpha^m_i
|
379
|
+
// s.t. \alpha^m_i <= C^m_i \forall m,i , \sum_m \alpha^m_i=0 \forall i
|
380
|
+
//
|
381
|
+
// where e^m_i = 0 if y_i = m,
|
382
|
+
// e^m_i = 1 if y_i != m,
|
383
|
+
// C^m_i = C if m = y_i,
|
384
|
+
// C^m_i = 0 if m != y_i,
|
385
|
+
// and w_m(\alpha) = \sum_i \alpha^m_i x_i
|
386
|
+
//
|
387
|
+
// Given:
|
388
|
+
// x, y, C
|
389
|
+
// eps is the stopping tolerance
|
390
|
+
//
|
391
|
+
// solution will be put in w
|
392
|
+
class Solver_MCSVM_CS
|
393
|
+
{
|
394
|
+
public:
|
395
|
+
Solver_MCSVM_CS(const problem *prob, int nr_class, double *C, double eps=0.1, int max_iter=100000);
|
396
|
+
~Solver_MCSVM_CS();
|
397
|
+
void Solve(double *w);
|
398
|
+
private:
|
399
|
+
void solve_sub_problem(double A_i, int yi, double C_yi, int active_i, double *alpha_new);
|
400
|
+
bool be_shrunken(int m, int yi, double alpha_i, double minG);
|
401
|
+
double *B, *C, *G;
|
402
|
+
int n, l;
|
403
|
+
int nr_class;
|
404
|
+
int max_iter;
|
405
|
+
double eps;
|
406
|
+
const problem *prob;
|
407
|
+
};
|
408
|
+
|
409
|
+
Solver_MCSVM_CS::Solver_MCSVM_CS(const problem *prob, int nr_class, double *C, double eps, int max_iter)
|
410
|
+
{
|
411
|
+
this->n = prob->n;
|
412
|
+
this->l = prob->l;
|
413
|
+
this->nr_class = nr_class;
|
414
|
+
this->eps = eps;
|
415
|
+
this->max_iter = max_iter;
|
416
|
+
this->prob = prob;
|
417
|
+
this->C = C;
|
418
|
+
this->B = new double[nr_class];
|
419
|
+
this->G = new double[nr_class];
|
420
|
+
}
|
421
|
+
|
422
|
+
Solver_MCSVM_CS::~Solver_MCSVM_CS()
|
423
|
+
{
|
424
|
+
delete[] B;
|
425
|
+
delete[] G;
|
426
|
+
}
|
427
|
+
|
428
|
+
int compare_double(const void *a, const void *b)
|
429
|
+
{
|
430
|
+
if(*(double *)a > *(double *)b)
|
431
|
+
return -1;
|
432
|
+
if(*(double *)a < *(double *)b)
|
433
|
+
return 1;
|
434
|
+
return 0;
|
435
|
+
}
|
436
|
+
|
437
|
+
void Solver_MCSVM_CS::solve_sub_problem(double A_i, int yi, double C_yi, int active_i, double *alpha_new)
|
438
|
+
{
|
439
|
+
int r;
|
440
|
+
double *D;
|
441
|
+
|
442
|
+
clone(D, B, active_i);
|
443
|
+
if(yi < active_i)
|
444
|
+
D[yi] += A_i*C_yi;
|
445
|
+
qsort(D, active_i, sizeof(double), compare_double);
|
446
|
+
|
447
|
+
double beta = D[0] - A_i*C_yi;
|
448
|
+
for(r=1;r<active_i && beta<r*D[r];r++)
|
449
|
+
beta += D[r];
|
450
|
+
|
451
|
+
beta /= r;
|
452
|
+
for(r=0;r<active_i;r++)
|
453
|
+
{
|
454
|
+
if(r == yi)
|
455
|
+
alpha_new[r] = min(C_yi, (beta-B[r])/A_i);
|
456
|
+
else
|
457
|
+
alpha_new[r] = min((double)0, (beta - B[r])/A_i);
|
458
|
+
}
|
459
|
+
delete[] D;
|
460
|
+
}
|
461
|
+
|
462
|
+
bool Solver_MCSVM_CS::be_shrunken(int m, int yi, double alpha_i, double minG)
|
463
|
+
{
|
464
|
+
double bound = 0;
|
465
|
+
if(m == yi)
|
466
|
+
bound = C[yi];
|
467
|
+
if(alpha_i == bound && G[m] < minG)
|
468
|
+
return true;
|
469
|
+
return false;
|
470
|
+
}
|
471
|
+
|
472
|
+
void Solver_MCSVM_CS::Solve(double *w)
|
473
|
+
{
|
474
|
+
int i, m, s;
|
475
|
+
int iter = 0;
|
476
|
+
double *alpha = new double[l*nr_class];
|
477
|
+
double *alpha_new = new double[nr_class];
|
478
|
+
int *index = new int[l];
|
479
|
+
double *QD = new double[l];
|
480
|
+
int *d_ind = new int[nr_class];
|
481
|
+
double *d_val = new double[nr_class];
|
482
|
+
int *alpha_index = new int[nr_class*l];
|
483
|
+
int *y_index = new int[l];
|
484
|
+
int active_size = l;
|
485
|
+
int *active_size_i = new int[l];
|
486
|
+
double eps_shrink = max(10.0*eps, 1.0); // stopping tolerance for shrinking
|
487
|
+
bool start_from_all = true;
|
488
|
+
// initial
|
489
|
+
for(i=0;i<l*nr_class;i++)
|
490
|
+
alpha[i] = 0;
|
491
|
+
for(i=0;i<n*nr_class;i++)
|
492
|
+
w[i] = 0;
|
493
|
+
for(i=0;i<l;i++)
|
494
|
+
{
|
495
|
+
for(m=0;m<nr_class;m++)
|
496
|
+
alpha_index[i*nr_class+m] = m;
|
497
|
+
feature_node *xi = prob->x[i];
|
498
|
+
QD[i] = 0;
|
499
|
+
while(xi->index != -1)
|
500
|
+
{
|
501
|
+
QD[i] += (xi->value)*(xi->value);
|
502
|
+
xi++;
|
503
|
+
}
|
504
|
+
active_size_i[i] = nr_class;
|
505
|
+
y_index[i] = prob->y[i];
|
506
|
+
index[i] = i;
|
507
|
+
}
|
508
|
+
|
509
|
+
while(iter < max_iter)
|
510
|
+
{
|
511
|
+
double stopping = -INF;
|
512
|
+
for(i=0;i<active_size;i++)
|
513
|
+
{
|
514
|
+
int j = i+rand()%(active_size-i);
|
515
|
+
swap(index[i], index[j]);
|
516
|
+
}
|
517
|
+
for(s=0;s<active_size;s++)
|
518
|
+
{
|
519
|
+
i = index[s];
|
520
|
+
double Ai = QD[i];
|
521
|
+
double *alpha_i = &alpha[i*nr_class];
|
522
|
+
int *alpha_index_i = &alpha_index[i*nr_class];
|
523
|
+
|
524
|
+
if(Ai > 0)
|
525
|
+
{
|
526
|
+
for(m=0;m<active_size_i[i];m++)
|
527
|
+
G[m] = 1;
|
528
|
+
if(y_index[i] < active_size_i[i])
|
529
|
+
G[y_index[i]] = 0;
|
530
|
+
|
531
|
+
feature_node *xi = prob->x[i];
|
532
|
+
while(xi->index!= -1)
|
533
|
+
{
|
534
|
+
double *w_i = &w[(xi->index-1)*nr_class];
|
535
|
+
for(m=0;m<active_size_i[i];m++)
|
536
|
+
G[m] += w_i[alpha_index_i[m]]*(xi->value);
|
537
|
+
xi++;
|
538
|
+
}
|
539
|
+
|
540
|
+
double minG = INF;
|
541
|
+
double maxG = -INF;
|
542
|
+
for(m=0;m<active_size_i[i];m++)
|
543
|
+
{
|
544
|
+
if(alpha_i[alpha_index_i[m]] < 0 && G[m] < minG)
|
545
|
+
minG = G[m];
|
546
|
+
if(G[m] > maxG)
|
547
|
+
maxG = G[m];
|
548
|
+
}
|
549
|
+
if(y_index[i] < active_size_i[i])
|
550
|
+
if(alpha_i[prob->y[i]] < C[prob->y[i]] && G[y_index[i]] < minG)
|
551
|
+
minG = G[y_index[i]];
|
552
|
+
|
553
|
+
for(m=0;m<active_size_i[i];m++)
|
554
|
+
{
|
555
|
+
if(be_shrunken(m, y_index[i], alpha_i[alpha_index_i[m]], minG))
|
556
|
+
{
|
557
|
+
active_size_i[i]--;
|
558
|
+
while(active_size_i[i]>m)
|
559
|
+
{
|
560
|
+
if(!be_shrunken(active_size_i[i], y_index[i],
|
561
|
+
alpha_i[alpha_index_i[active_size_i[i]]], minG))
|
562
|
+
{
|
563
|
+
swap(alpha_index_i[m], alpha_index_i[active_size_i[i]]);
|
564
|
+
swap(G[m], G[active_size_i[i]]);
|
565
|
+
if(y_index[i] == active_size_i[i])
|
566
|
+
y_index[i] = m;
|
567
|
+
else if(y_index[i] == m)
|
568
|
+
y_index[i] = active_size_i[i];
|
569
|
+
break;
|
570
|
+
}
|
571
|
+
active_size_i[i]--;
|
572
|
+
}
|
573
|
+
}
|
574
|
+
}
|
575
|
+
|
576
|
+
if(active_size_i[i] <= 1)
|
577
|
+
{
|
578
|
+
active_size--;
|
579
|
+
swap(index[s], index[active_size]);
|
580
|
+
s--;
|
581
|
+
continue;
|
582
|
+
}
|
583
|
+
|
584
|
+
if(maxG-minG <= 1e-12)
|
585
|
+
continue;
|
586
|
+
else
|
587
|
+
stopping = max(maxG - minG, stopping);
|
588
|
+
|
589
|
+
for(m=0;m<active_size_i[i];m++)
|
590
|
+
B[m] = G[m] - Ai*alpha_i[alpha_index_i[m]] ;
|
591
|
+
|
592
|
+
solve_sub_problem(Ai, y_index[i], C[prob->y[i]], active_size_i[i], alpha_new);
|
593
|
+
int nz_d = 0;
|
594
|
+
for(m=0;m<active_size_i[i];m++)
|
595
|
+
{
|
596
|
+
double d = alpha_new[m] - alpha_i[alpha_index_i[m]];
|
597
|
+
alpha_i[alpha_index_i[m]] = alpha_new[m];
|
598
|
+
if(fabs(d) >= 1e-12)
|
599
|
+
{
|
600
|
+
d_ind[nz_d] = alpha_index_i[m];
|
601
|
+
d_val[nz_d] = d;
|
602
|
+
nz_d++;
|
603
|
+
}
|
604
|
+
}
|
605
|
+
|
606
|
+
xi = prob->x[i];
|
607
|
+
while(xi->index != -1)
|
608
|
+
{
|
609
|
+
double *w_i = &w[(xi->index-1)*nr_class];
|
610
|
+
for(m=0;m<nz_d;m++)
|
611
|
+
w_i[d_ind[m]] += d_val[m]*xi->value;
|
612
|
+
xi++;
|
613
|
+
}
|
614
|
+
}
|
615
|
+
}
|
616
|
+
|
617
|
+
iter++;
|
618
|
+
if(iter % 10 == 0)
|
619
|
+
{
|
620
|
+
info(".");
|
621
|
+
info_flush();
|
622
|
+
}
|
623
|
+
|
624
|
+
if(stopping < eps_shrink)
|
625
|
+
{
|
626
|
+
if(stopping < eps && start_from_all == true)
|
627
|
+
break;
|
628
|
+
else
|
629
|
+
{
|
630
|
+
active_size = l;
|
631
|
+
for(i=0;i<l;i++)
|
632
|
+
active_size_i[i] = nr_class;
|
633
|
+
info("*"); info_flush();
|
634
|
+
eps_shrink = max(eps_shrink/2, eps);
|
635
|
+
start_from_all = true;
|
636
|
+
}
|
637
|
+
}
|
638
|
+
else
|
639
|
+
start_from_all = false;
|
640
|
+
}
|
641
|
+
|
642
|
+
info("\noptimization finished, #iter = %d\n",iter);
|
643
|
+
if (iter >= max_iter)
|
644
|
+
info("Warning: reaching max number of iterations\n");
|
645
|
+
|
646
|
+
// calculate objective value
|
647
|
+
double v = 0;
|
648
|
+
int nSV = 0;
|
649
|
+
for(i=0;i<n*nr_class;i++)
|
650
|
+
v += w[i]*w[i];
|
651
|
+
v = 0.5*v;
|
652
|
+
for(i=0;i<l*nr_class;i++)
|
653
|
+
{
|
654
|
+
v += alpha[i];
|
655
|
+
if(fabs(alpha[i]) > 0)
|
656
|
+
nSV++;
|
657
|
+
}
|
658
|
+
for(i=0;i<l;i++)
|
659
|
+
v -= alpha[i*nr_class+prob->y[i]];
|
660
|
+
info("Objective value = %lf\n",v);
|
661
|
+
info("nSV = %d\n",nSV);
|
662
|
+
|
663
|
+
delete [] alpha;
|
664
|
+
delete [] alpha_new;
|
665
|
+
delete [] index;
|
666
|
+
delete [] QD;
|
667
|
+
delete [] d_ind;
|
668
|
+
delete [] d_val;
|
669
|
+
delete [] alpha_index;
|
670
|
+
delete [] y_index;
|
671
|
+
delete [] active_size_i;
|
672
|
+
}
|
673
|
+
|
674
|
+
// A coordinate descent algorithm for
|
675
|
+
// L1-loss and L2-loss SVM dual problems
|
676
|
+
//
|
677
|
+
// min_\alpha 0.5(\alpha^T (Q + D)\alpha) - e^T \alpha,
|
678
|
+
// s.t. 0 <= alpha_i <= upper_bound_i,
|
679
|
+
//
|
680
|
+
// where Qij = yi yj xi^T xj and
|
681
|
+
// D is a diagonal matrix
|
682
|
+
//
|
683
|
+
// In L1-SVM case:
|
684
|
+
// upper_bound_i = Cp if y_i = 1
|
685
|
+
// upper_bound_i = Cn if y_i = -1
|
686
|
+
// D_ii = 0
|
687
|
+
// In L2-Svm case:
|
688
|
+
// upper_bound_i = INF
|
689
|
+
// D_ii = 1/(2*Cp) if y_i = 1
|
690
|
+
// D_ii = 1/(2*Cn) if y_i = -1
|
691
|
+
//
|
692
|
+
// Given:
|
693
|
+
// x, y, Cp, Cn
|
694
|
+
// eps is the stopping tolerance
|
695
|
+
//
|
696
|
+
// solution will be put in w
|
697
|
+
|
698
|
+
static void solve_linear_c_svc(
|
699
|
+
const problem *prob, double *w, double eps,
|
700
|
+
double Cp, double Cn, int solver_type)
|
701
|
+
{
|
702
|
+
int l = prob->l;
|
703
|
+
int n = prob->n;
|
704
|
+
int i, s, iter = 0;
|
705
|
+
double C, d, G;
|
706
|
+
double *QD = new double[l];
|
707
|
+
int max_iter = 20000;
|
708
|
+
int *index = new int[l];
|
709
|
+
double *alpha = new double[l];
|
710
|
+
schar *y = new schar[l];
|
711
|
+
int active_size = l;
|
712
|
+
|
713
|
+
// PG: projected gradient, for shrinking and stopping
|
714
|
+
double PG;
|
715
|
+
double PGmax_old = INF;
|
716
|
+
double PGmin_old = -INF;
|
717
|
+
double PGmax_new, PGmin_new;
|
718
|
+
|
719
|
+
// default solver_type: L2LOSS_SVM_DUAL
|
720
|
+
double diag_p = 0.5/Cp, diag_n = 0.5/Cn;
|
721
|
+
double upper_bound_p = INF, upper_bound_n = INF;
|
722
|
+
if(solver_type == L1LOSS_SVM_DUAL)
|
723
|
+
{
|
724
|
+
diag_p = 0; diag_n = 0;
|
725
|
+
upper_bound_p = Cp; upper_bound_n = Cn;
|
726
|
+
}
|
727
|
+
|
728
|
+
for(i=0; i<n; i++)
|
729
|
+
w[i] = 0;
|
730
|
+
for(i=0; i<l; i++)
|
731
|
+
{
|
732
|
+
alpha[i] = 0;
|
733
|
+
if(prob->y[i] > 0)
|
734
|
+
{
|
735
|
+
y[i] = +1;
|
736
|
+
QD[i] = diag_p;
|
737
|
+
}
|
738
|
+
else
|
739
|
+
{
|
740
|
+
y[i] = -1;
|
741
|
+
QD[i] = diag_n;
|
742
|
+
}
|
743
|
+
|
744
|
+
feature_node *xi = prob->x[i];
|
745
|
+
while (xi->index != -1)
|
746
|
+
{
|
747
|
+
QD[i] += (xi->value)*(xi->value);
|
748
|
+
xi++;
|
749
|
+
}
|
750
|
+
index[i] = i;
|
751
|
+
}
|
752
|
+
|
753
|
+
while (iter < max_iter)
|
754
|
+
{
|
755
|
+
PGmax_new = -INF;
|
756
|
+
PGmin_new = INF;
|
757
|
+
|
758
|
+
for (i=0; i<active_size; i++)
|
759
|
+
{
|
760
|
+
int j = i+rand()%(active_size-i);
|
761
|
+
swap(index[i], index[j]);
|
762
|
+
}
|
763
|
+
|
764
|
+
for (s=0;s<active_size;s++)
|
765
|
+
{
|
766
|
+
i = index[s];
|
767
|
+
G = 0;
|
768
|
+
schar yi = y[i];
|
769
|
+
|
770
|
+
feature_node *xi = prob->x[i];
|
771
|
+
while(xi->index!= -1)
|
772
|
+
{
|
773
|
+
G += w[xi->index-1]*(xi->value);
|
774
|
+
xi++;
|
775
|
+
}
|
776
|
+
G = G*yi-1;
|
777
|
+
|
778
|
+
if(yi == 1)
|
779
|
+
{
|
780
|
+
C = upper_bound_p;
|
781
|
+
G += alpha[i]*diag_p;
|
782
|
+
}
|
783
|
+
else
|
784
|
+
{
|
785
|
+
C = upper_bound_n;
|
786
|
+
G += alpha[i]*diag_n;
|
787
|
+
}
|
788
|
+
|
789
|
+
PG = 0;
|
790
|
+
if (alpha[i] == 0)
|
791
|
+
{
|
792
|
+
if (G > PGmax_old)
|
793
|
+
{
|
794
|
+
active_size--;
|
795
|
+
swap(index[s], index[active_size]);
|
796
|
+
s--;
|
797
|
+
continue;
|
798
|
+
}
|
799
|
+
else if (G < 0)
|
800
|
+
PG = G;
|
801
|
+
}
|
802
|
+
else if (alpha[i] == C)
|
803
|
+
{
|
804
|
+
if (G < PGmin_old)
|
805
|
+
{
|
806
|
+
active_size--;
|
807
|
+
swap(index[s], index[active_size]);
|
808
|
+
s--;
|
809
|
+
continue;
|
810
|
+
}
|
811
|
+
else if (G > 0)
|
812
|
+
PG = G;
|
813
|
+
}
|
814
|
+
else
|
815
|
+
PG = G;
|
816
|
+
|
817
|
+
PGmax_new = max(PGmax_new, PG);
|
818
|
+
PGmin_new = min(PGmin_new, PG);
|
819
|
+
|
820
|
+
if(fabs(PG) > 1.0e-12)
|
821
|
+
{
|
822
|
+
double alpha_old = alpha[i];
|
823
|
+
alpha[i] = min(max(alpha[i] - G/QD[i], 0.0), C);
|
824
|
+
d = (alpha[i] - alpha_old)*yi;
|
825
|
+
xi = prob->x[i];
|
826
|
+
while (xi->index != -1)
|
827
|
+
{
|
828
|
+
w[xi->index-1] += d*xi->value;
|
829
|
+
xi++;
|
830
|
+
}
|
831
|
+
}
|
832
|
+
}
|
833
|
+
|
834
|
+
iter++;
|
835
|
+
if(iter % 10 == 0)
|
836
|
+
{
|
837
|
+
info(".");
|
838
|
+
info_flush();
|
839
|
+
}
|
840
|
+
|
841
|
+
if(PGmax_new - PGmin_new <= eps)
|
842
|
+
{
|
843
|
+
if(active_size == l)
|
844
|
+
break;
|
845
|
+
else
|
846
|
+
{
|
847
|
+
active_size = l;
|
848
|
+
info("*"); info_flush();
|
849
|
+
PGmax_old = INF;
|
850
|
+
PGmin_old = -INF;
|
851
|
+
continue;
|
852
|
+
}
|
853
|
+
}
|
854
|
+
PGmax_old = PGmax_new;
|
855
|
+
PGmin_old = PGmin_new;
|
856
|
+
if (PGmax_old <= 0)
|
857
|
+
PGmax_old = INF;
|
858
|
+
if (PGmin_old >= 0)
|
859
|
+
PGmin_old = -INF;
|
860
|
+
}
|
861
|
+
|
862
|
+
info("\noptimization finished, #iter = %d\n",iter);
|
863
|
+
if (iter >= max_iter)
|
864
|
+
info("Warning: reaching max number of iterations\n");
|
865
|
+
|
866
|
+
// calculate objective value
|
867
|
+
|
868
|
+
double v = 0;
|
869
|
+
int nSV = 0;
|
870
|
+
for(i=0; i<n; i++)
|
871
|
+
v += w[i]*w[i];
|
872
|
+
for(i=0; i<l; i++)
|
873
|
+
{
|
874
|
+
if (y[i] == 1)
|
875
|
+
v += alpha[i]*(alpha[i]*diag_p - 2);
|
876
|
+
else
|
877
|
+
v += alpha[i]*(alpha[i]*diag_n - 2);
|
878
|
+
if(alpha[i] > 0)
|
879
|
+
++nSV;
|
880
|
+
}
|
881
|
+
info("Objective value = %lf\n",v/2);
|
882
|
+
info("nSV = %d\n",nSV);
|
883
|
+
|
884
|
+
delete [] QD;
|
885
|
+
delete [] alpha;
|
886
|
+
delete [] y;
|
887
|
+
delete [] index;
|
888
|
+
}
|
889
|
+
|
890
|
+
// label: label name, start: begin of each class, count: #data of classes, perm: indices to the original data
|
891
|
+
// perm, length l, must be allocated before calling this subroutine
|
892
|
+
void group_classes(const problem *prob, int *nr_class_ret, int **label_ret, int **start_ret, int **count_ret, int *perm)
|
893
|
+
{
|
894
|
+
int l = prob->l;
|
895
|
+
int max_nr_class = 16;
|
896
|
+
int nr_class = 0;
|
897
|
+
int *label = Malloc(int,max_nr_class);
|
898
|
+
int *count = Malloc(int,max_nr_class);
|
899
|
+
int *data_label = Malloc(int,l);
|
900
|
+
int i;
|
901
|
+
|
902
|
+
for(i=0;i<l;i++)
|
903
|
+
{
|
904
|
+
int this_label = prob->y[i];
|
905
|
+
int j;
|
906
|
+
for(j=0;j<nr_class;j++)
|
907
|
+
{
|
908
|
+
if(this_label == label[j])
|
909
|
+
{
|
910
|
+
++count[j];
|
911
|
+
break;
|
912
|
+
}
|
913
|
+
}
|
914
|
+
data_label[i] = j;
|
915
|
+
if(j == nr_class)
|
916
|
+
{
|
917
|
+
if(nr_class == max_nr_class)
|
918
|
+
{
|
919
|
+
max_nr_class *= 2;
|
920
|
+
label = (int *)realloc(label,max_nr_class*sizeof(int));
|
921
|
+
count = (int *)realloc(count,max_nr_class*sizeof(int));
|
922
|
+
}
|
923
|
+
label[nr_class] = this_label;
|
924
|
+
count[nr_class] = 1;
|
925
|
+
++nr_class;
|
926
|
+
}
|
927
|
+
}
|
928
|
+
|
929
|
+
int *start = Malloc(int,nr_class);
|
930
|
+
start[0] = 0;
|
931
|
+
for(i=1;i<nr_class;i++)
|
932
|
+
start[i] = start[i-1]+count[i-1];
|
933
|
+
for(i=0;i<l;i++)
|
934
|
+
{
|
935
|
+
perm[start[data_label[i]]] = i;
|
936
|
+
++start[data_label[i]];
|
937
|
+
}
|
938
|
+
start[0] = 0;
|
939
|
+
for(i=1;i<nr_class;i++)
|
940
|
+
start[i] = start[i-1]+count[i-1];
|
941
|
+
|
942
|
+
*nr_class_ret = nr_class;
|
943
|
+
*label_ret = label;
|
944
|
+
*start_ret = start;
|
945
|
+
*count_ret = count;
|
946
|
+
free(data_label);
|
947
|
+
}
|
948
|
+
|
949
|
+
void train_one(const problem *prob, const parameter *param, double *w, double Cp, double Cn)
|
950
|
+
{
|
951
|
+
double eps=param->eps;
|
952
|
+
int pos = 0;
|
953
|
+
int neg = 0;
|
954
|
+
for(int i=0;i<prob->l;i++)
|
955
|
+
if(prob->y[i]==+1)
|
956
|
+
pos++;
|
957
|
+
neg = prob->l - pos;
|
958
|
+
|
959
|
+
function *fun_obj=NULL;
|
960
|
+
switch(param->solver_type)
|
961
|
+
{
|
962
|
+
case L2_LR:
|
963
|
+
{
|
964
|
+
fun_obj=new l2_lr_fun(prob, Cp, Cn);
|
965
|
+
TRON tron_obj(fun_obj, eps*min(pos,neg)/prob->l);
|
966
|
+
tron_obj.tron(w);
|
967
|
+
delete fun_obj;
|
968
|
+
break;
|
969
|
+
}
|
970
|
+
case L2LOSS_SVM:
|
971
|
+
{
|
972
|
+
fun_obj=new l2loss_svm_fun(prob, Cp, Cn);
|
973
|
+
TRON tron_obj(fun_obj, eps*min(pos,neg)/prob->l);
|
974
|
+
tron_obj.tron(w);
|
975
|
+
delete fun_obj;
|
976
|
+
break;
|
977
|
+
}
|
978
|
+
case L2LOSS_SVM_DUAL:
|
979
|
+
solve_linear_c_svc(prob, w, eps, Cp, Cn, L2LOSS_SVM_DUAL);
|
980
|
+
break;
|
981
|
+
case L1LOSS_SVM_DUAL:
|
982
|
+
solve_linear_c_svc(prob, w, eps, Cp, Cn, L1LOSS_SVM_DUAL);
|
983
|
+
break;
|
984
|
+
default:
|
985
|
+
fprintf(stderr, "Error: unknown solver_type\n");
|
986
|
+
break;
|
987
|
+
}
|
988
|
+
}
|
989
|
+
|
990
|
+
//
|
991
|
+
// Interface functions
|
992
|
+
//
|
993
|
+
model* train(const problem *prob, const parameter *param)
|
994
|
+
{
|
995
|
+
int i,j;
|
996
|
+
int l = prob->l;
|
997
|
+
int n = prob->n;
|
998
|
+
model *model_ = Malloc(model,1);
|
999
|
+
|
1000
|
+
if(prob->bias>=0)
|
1001
|
+
model_->nr_feature=n-1;
|
1002
|
+
else
|
1003
|
+
model_->nr_feature=n;
|
1004
|
+
model_->param = *param;
|
1005
|
+
model_->bias = prob->bias;
|
1006
|
+
|
1007
|
+
int nr_class;
|
1008
|
+
int *label = NULL;
|
1009
|
+
int *start = NULL;
|
1010
|
+
int *count = NULL;
|
1011
|
+
int *perm = Malloc(int,l);
|
1012
|
+
|
1013
|
+
// group training data of the same class
|
1014
|
+
group_classes(prob,&nr_class,&label,&start,&count,perm);
|
1015
|
+
|
1016
|
+
model_->nr_class=nr_class;
|
1017
|
+
model_->label = Malloc(int,nr_class);
|
1018
|
+
for(i=0;i<nr_class;i++)
|
1019
|
+
model_->label[i] = label[i];
|
1020
|
+
|
1021
|
+
// calculate weighted C
|
1022
|
+
double *weighted_C = Malloc(double, nr_class);
|
1023
|
+
for(i=0;i<nr_class;i++)
|
1024
|
+
weighted_C[i] = param->C;
|
1025
|
+
for(i=0;i<param->nr_weight;i++)
|
1026
|
+
{
|
1027
|
+
for(j=0;j<nr_class;j++)
|
1028
|
+
if(param->weight_label[i] == label[j])
|
1029
|
+
break;
|
1030
|
+
if(j == nr_class)
|
1031
|
+
fprintf(stderr,"warning: class label %d specified in weight is not found\n", param->weight_label[i]);
|
1032
|
+
else
|
1033
|
+
weighted_C[j] *= param->weight[i];
|
1034
|
+
}
|
1035
|
+
|
1036
|
+
// constructing the subproblem
|
1037
|
+
feature_node **x = Malloc(feature_node *,l);
|
1038
|
+
for(i=0;i<l;i++)
|
1039
|
+
x[i] = prob->x[perm[i]];
|
1040
|
+
|
1041
|
+
int k;
|
1042
|
+
problem sub_prob;
|
1043
|
+
sub_prob.l = l;
|
1044
|
+
sub_prob.n = n;
|
1045
|
+
sub_prob.x = Malloc(feature_node *,sub_prob.l);
|
1046
|
+
sub_prob.y = Malloc(int,sub_prob.l);
|
1047
|
+
|
1048
|
+
for(k=0; k<sub_prob.l; k++)
|
1049
|
+
sub_prob.x[k] = x[k];
|
1050
|
+
|
1051
|
+
// multi-class svm by Crammer and Singer
|
1052
|
+
if(param->solver_type == MCSVM_CS)
|
1053
|
+
{
|
1054
|
+
model_->w=Malloc(double, n*nr_class);
|
1055
|
+
for(i=0;i<nr_class;i++)
|
1056
|
+
for(j=start[i];j<start[i]+count[i];j++)
|
1057
|
+
sub_prob.y[j] = i;
|
1058
|
+
Solver_MCSVM_CS Solver(&sub_prob, nr_class, weighted_C, param->eps);
|
1059
|
+
Solver.Solve(model_->w);
|
1060
|
+
}
|
1061
|
+
else
|
1062
|
+
{
|
1063
|
+
if(nr_class == 2)
|
1064
|
+
{
|
1065
|
+
model_->w=Malloc(double, n);
|
1066
|
+
|
1067
|
+
int e0 = start[0]+count[0];
|
1068
|
+
k=0;
|
1069
|
+
for(; k<e0; k++)
|
1070
|
+
sub_prob.y[k] = +1;
|
1071
|
+
for(; k<sub_prob.l; k++)
|
1072
|
+
sub_prob.y[k] = -1;
|
1073
|
+
|
1074
|
+
train_one(&sub_prob, param, &model_->w[0], weighted_C[0], weighted_C[1]);
|
1075
|
+
}
|
1076
|
+
else
|
1077
|
+
{
|
1078
|
+
model_->w=Malloc(double, n*nr_class);
|
1079
|
+
double *w=Malloc(double, n);
|
1080
|
+
for(i=0;i<nr_class;i++)
|
1081
|
+
{
|
1082
|
+
int si = start[i];
|
1083
|
+
int ei = si+count[i];
|
1084
|
+
|
1085
|
+
k=0;
|
1086
|
+
for(; k<si; k++)
|
1087
|
+
sub_prob.y[k] = -1;
|
1088
|
+
for(; k<ei; k++)
|
1089
|
+
sub_prob.y[k] = +1;
|
1090
|
+
for(; k<sub_prob.l; k++)
|
1091
|
+
sub_prob.y[k] = -1;
|
1092
|
+
|
1093
|
+
train_one(&sub_prob, param, w, weighted_C[i], param->C);
|
1094
|
+
|
1095
|
+
for(int j=0;j<n;j++)
|
1096
|
+
model_->w[j*nr_class+i] = w[j];
|
1097
|
+
}
|
1098
|
+
free(w);
|
1099
|
+
}
|
1100
|
+
|
1101
|
+
}
|
1102
|
+
|
1103
|
+
free(x);
|
1104
|
+
free(label);
|
1105
|
+
free(start);
|
1106
|
+
free(count);
|
1107
|
+
free(perm);
|
1108
|
+
free(sub_prob.x);
|
1109
|
+
free(sub_prob.y);
|
1110
|
+
free(weighted_C);
|
1111
|
+
return model_;
|
1112
|
+
}
|
1113
|
+
|
1114
|
+
void destroy_model(struct model *model_)
|
1115
|
+
{
|
1116
|
+
if(model_->w != NULL)
|
1117
|
+
free(model_->w);
|
1118
|
+
if(model_->label != NULL)
|
1119
|
+
free(model_->label);
|
1120
|
+
free(model_);
|
1121
|
+
}
|
1122
|
+
|
1123
|
+
const char *solver_type_table[]=
|
1124
|
+
{
|
1125
|
+
"L2_LR", "L2LOSS_SVM_DUAL", "L2LOSS_SVM","L1LOSS_SVM_DUAL","MCSVM_CS", NULL
|
1126
|
+
};
|
1127
|
+
|
1128
|
+
int save_model(const char *model_file_name, const struct model *model_)
|
1129
|
+
{
|
1130
|
+
int i;
|
1131
|
+
int nr_feature=model_->nr_feature;
|
1132
|
+
int n;
|
1133
|
+
const parameter& param = model_->param;
|
1134
|
+
|
1135
|
+
if(model_->bias>=0)
|
1136
|
+
n=nr_feature+1;
|
1137
|
+
else
|
1138
|
+
n=nr_feature;
|
1139
|
+
FILE *fp = fopen(model_file_name,"w");
|
1140
|
+
if(fp==NULL) return -1;
|
1141
|
+
|
1142
|
+
int nr_w;
|
1143
|
+
if(model_->nr_class==2 && model_->param.solver_type != MCSVM_CS)
|
1144
|
+
nr_w=1;
|
1145
|
+
else
|
1146
|
+
nr_w=model_->nr_class;
|
1147
|
+
|
1148
|
+
fprintf(fp, "solver_type %s\n", solver_type_table[param.solver_type]);
|
1149
|
+
fprintf(fp, "nr_class %d\n", model_->nr_class);
|
1150
|
+
fprintf(fp, "label");
|
1151
|
+
for(i=0; i<model_->nr_class; i++)
|
1152
|
+
fprintf(fp, " %d", model_->label[i]);
|
1153
|
+
fprintf(fp, "\n");
|
1154
|
+
|
1155
|
+
fprintf(fp, "nr_feature %d\n", nr_feature);
|
1156
|
+
|
1157
|
+
fprintf(fp, "bias %.16g\n", model_->bias);
|
1158
|
+
|
1159
|
+
fprintf(fp, "w\n");
|
1160
|
+
for(i=0; i<n; i++)
|
1161
|
+
{
|
1162
|
+
int j;
|
1163
|
+
for(j=0; j<nr_w; j++)
|
1164
|
+
fprintf(fp, "%.16g ", model_->w[i*nr_w+j]);
|
1165
|
+
fprintf(fp, "\n");
|
1166
|
+
}
|
1167
|
+
|
1168
|
+
if (ferror(fp) != 0 || fclose(fp) != 0) return -1;
|
1169
|
+
else return 0;
|
1170
|
+
}
|
1171
|
+
|
1172
|
+
struct model *load_model(const char *model_file_name)
|
1173
|
+
{
|
1174
|
+
FILE *fp = fopen(model_file_name,"r");
|
1175
|
+
if(fp==NULL) return NULL;
|
1176
|
+
|
1177
|
+
int i;
|
1178
|
+
int nr_feature;
|
1179
|
+
int n;
|
1180
|
+
int nr_class;
|
1181
|
+
double bias;
|
1182
|
+
model *model_ = Malloc(model,1);
|
1183
|
+
parameter& param = model_->param;
|
1184
|
+
|
1185
|
+
model_->label = NULL;
|
1186
|
+
|
1187
|
+
char cmd[81];
|
1188
|
+
while(1)
|
1189
|
+
{
|
1190
|
+
fscanf(fp,"%80s",cmd);
|
1191
|
+
if(strcmp(cmd,"solver_type")==0)
|
1192
|
+
{
|
1193
|
+
fscanf(fp,"%80s",cmd);
|
1194
|
+
int i;
|
1195
|
+
for(i=0;solver_type_table[i];i++)
|
1196
|
+
{
|
1197
|
+
if(strcmp(solver_type_table[i],cmd)==0)
|
1198
|
+
{
|
1199
|
+
param.solver_type=i;
|
1200
|
+
break;
|
1201
|
+
}
|
1202
|
+
}
|
1203
|
+
if(solver_type_table[i] == NULL)
|
1204
|
+
{
|
1205
|
+
fprintf(stderr,"unknown solver type.\n");
|
1206
|
+
free(model_->label);
|
1207
|
+
free(model_);
|
1208
|
+
return NULL;
|
1209
|
+
}
|
1210
|
+
}
|
1211
|
+
else if(strcmp(cmd,"nr_class")==0)
|
1212
|
+
{
|
1213
|
+
fscanf(fp,"%d",&nr_class);
|
1214
|
+
model_->nr_class=nr_class;
|
1215
|
+
}
|
1216
|
+
else if(strcmp(cmd,"nr_feature")==0)
|
1217
|
+
{
|
1218
|
+
fscanf(fp,"%d",&nr_feature);
|
1219
|
+
model_->nr_feature=nr_feature;
|
1220
|
+
}
|
1221
|
+
else if(strcmp(cmd,"bias")==0)
|
1222
|
+
{
|
1223
|
+
fscanf(fp,"%lf",&bias);
|
1224
|
+
model_->bias=bias;
|
1225
|
+
}
|
1226
|
+
else if(strcmp(cmd,"w")==0)
|
1227
|
+
{
|
1228
|
+
break;
|
1229
|
+
}
|
1230
|
+
else if(strcmp(cmd,"label")==0)
|
1231
|
+
{
|
1232
|
+
int nr_class = model_->nr_class;
|
1233
|
+
model_->label = Malloc(int,nr_class);
|
1234
|
+
for(int i=0;i<nr_class;i++)
|
1235
|
+
fscanf(fp,"%d",&model_->label[i]);
|
1236
|
+
}
|
1237
|
+
else
|
1238
|
+
{
|
1239
|
+
fprintf(stderr,"unknown text in model file: [%s]\n",cmd);
|
1240
|
+
free(model_);
|
1241
|
+
return NULL;
|
1242
|
+
}
|
1243
|
+
}
|
1244
|
+
|
1245
|
+
nr_feature=model_->nr_feature;
|
1246
|
+
if(model_->bias>=0)
|
1247
|
+
n=nr_feature+1;
|
1248
|
+
else
|
1249
|
+
n=nr_feature;
|
1250
|
+
|
1251
|
+
int nr_w;
|
1252
|
+
if(nr_class==2 && param.solver_type != MCSVM_CS)
|
1253
|
+
nr_w = 1;
|
1254
|
+
else
|
1255
|
+
nr_w = nr_class;
|
1256
|
+
|
1257
|
+
model_->w=Malloc(double, n*nr_w);
|
1258
|
+
for(i=0; i<n; i++)
|
1259
|
+
{
|
1260
|
+
int j;
|
1261
|
+
for(j=0; j<nr_w; j++)
|
1262
|
+
fscanf(fp, "%lf ", &model_->w[i*nr_w+j]);
|
1263
|
+
fscanf(fp, "\n");
|
1264
|
+
}
|
1265
|
+
if (ferror(fp) != 0 || fclose(fp) != 0) return NULL;
|
1266
|
+
|
1267
|
+
return model_;
|
1268
|
+
}
|
1269
|
+
|
1270
|
+
int predict_values(const struct model *model_, const struct feature_node *x, double *dec_values)
|
1271
|
+
{
|
1272
|
+
int idx;
|
1273
|
+
int n;
|
1274
|
+
if(model_->bias>=0)
|
1275
|
+
n=model_->nr_feature+1;
|
1276
|
+
else
|
1277
|
+
n=model_->nr_feature;
|
1278
|
+
double *w=model_->w;
|
1279
|
+
int nr_class=model_->nr_class;
|
1280
|
+
int i;
|
1281
|
+
int nr_w;
|
1282
|
+
if(nr_class==2 && model_->param.solver_type != MCSVM_CS)
|
1283
|
+
nr_w = 1;
|
1284
|
+
else
|
1285
|
+
nr_w = nr_class;
|
1286
|
+
|
1287
|
+
const feature_node *lx=x;
|
1288
|
+
for(i=0;i<nr_w;i++)
|
1289
|
+
dec_values[i] = 0;
|
1290
|
+
for(; (idx=lx->index)!=-1; lx++)
|
1291
|
+
{
|
1292
|
+
// the dimension of testing data may exceed that of training
|
1293
|
+
if(idx<=n)
|
1294
|
+
for(i=0;i<nr_w;i++)
|
1295
|
+
dec_values[i] += w[(idx-1)*nr_w+i]*lx->value;
|
1296
|
+
}
|
1297
|
+
|
1298
|
+
if(nr_class==2)
|
1299
|
+
return (dec_values[0]>0)?model_->label[0]:model_->label[1];
|
1300
|
+
else
|
1301
|
+
{
|
1302
|
+
int dec_max_idx = 0;
|
1303
|
+
for(i=1;i<nr_class;i++)
|
1304
|
+
{
|
1305
|
+
if(dec_values[i] > dec_values[dec_max_idx])
|
1306
|
+
dec_max_idx = i;
|
1307
|
+
}
|
1308
|
+
return model_->label[dec_max_idx];
|
1309
|
+
}
|
1310
|
+
}
|
1311
|
+
|
1312
|
+
int predict(const model *model_, const feature_node *x)
|
1313
|
+
{
|
1314
|
+
double *dec_values = Malloc(double, model_->nr_class);
|
1315
|
+
int label=predict_values(model_, x, dec_values);
|
1316
|
+
free(dec_values);
|
1317
|
+
return label;
|
1318
|
+
}
|
1319
|
+
|
1320
|
+
int predict_probability(const struct model *model_, const struct feature_node *x, double* prob_estimates)
|
1321
|
+
{
|
1322
|
+
if(model_->param.solver_type==L2_LR)
|
1323
|
+
{
|
1324
|
+
int i;
|
1325
|
+
int nr_class=model_->nr_class;
|
1326
|
+
int nr_w;
|
1327
|
+
if(nr_class==2)
|
1328
|
+
nr_w = 1;
|
1329
|
+
else
|
1330
|
+
nr_w = nr_class;
|
1331
|
+
|
1332
|
+
int label=predict_values(model_, x, prob_estimates);
|
1333
|
+
for(i=0;i<nr_w;i++)
|
1334
|
+
prob_estimates[i]=1/(1+exp(-prob_estimates[i]));
|
1335
|
+
|
1336
|
+
if(nr_class==2) // for binary classification
|
1337
|
+
prob_estimates[1]=1.-prob_estimates[0];
|
1338
|
+
else
|
1339
|
+
{
|
1340
|
+
double sum=0;
|
1341
|
+
for(i=0; i<nr_class; i++)
|
1342
|
+
sum+=prob_estimates[i];
|
1343
|
+
|
1344
|
+
for(i=0; i<nr_class; i++)
|
1345
|
+
prob_estimates[i]=prob_estimates[i]/sum;
|
1346
|
+
}
|
1347
|
+
|
1348
|
+
return label;
|
1349
|
+
}
|
1350
|
+
else
|
1351
|
+
return 0;
|
1352
|
+
}
|
1353
|
+
|
1354
|
+
void destroy_param(parameter* param)
|
1355
|
+
{
|
1356
|
+
if(param->weight_label != NULL)
|
1357
|
+
free(param->weight_label);
|
1358
|
+
if(param->weight != NULL)
|
1359
|
+
free(param->weight);
|
1360
|
+
}
|
1361
|
+
|
1362
|
+
const char *check_parameter(const problem *prob, const parameter *param)
|
1363
|
+
{
|
1364
|
+
if(param->eps <= 0)
|
1365
|
+
return "eps <= 0";
|
1366
|
+
|
1367
|
+
if(param->C <= 0)
|
1368
|
+
return "C <= 0";
|
1369
|
+
|
1370
|
+
if(param->solver_type != L2_LR
|
1371
|
+
&& param->solver_type != L2LOSS_SVM_DUAL
|
1372
|
+
&& param->solver_type != L2LOSS_SVM
|
1373
|
+
&& param->solver_type != L1LOSS_SVM_DUAL
|
1374
|
+
&& param->solver_type != MCSVM_CS)
|
1375
|
+
return "unknown solver type";
|
1376
|
+
|
1377
|
+
return NULL;
|
1378
|
+
}
|
1379
|
+
|
1380
|
+
void cross_validation(const problem *prob, const parameter *param, int nr_fold, int *target)
|
1381
|
+
{
|
1382
|
+
int i;
|
1383
|
+
int *fold_start = Malloc(int,nr_fold+1);
|
1384
|
+
int l = prob->l;
|
1385
|
+
int *perm = Malloc(int,l);
|
1386
|
+
|
1387
|
+
for(i=0;i<l;i++) perm[i]=i;
|
1388
|
+
for(i=0;i<l;i++)
|
1389
|
+
{
|
1390
|
+
int j = i+rand()%(l-i);
|
1391
|
+
swap(perm[i],perm[j]);
|
1392
|
+
}
|
1393
|
+
for(i=0;i<=nr_fold;i++)
|
1394
|
+
fold_start[i]=i*l/nr_fold;
|
1395
|
+
|
1396
|
+
for(i=0;i<nr_fold;i++)
|
1397
|
+
{
|
1398
|
+
int begin = fold_start[i];
|
1399
|
+
int end = fold_start[i+1];
|
1400
|
+
int j,k;
|
1401
|
+
struct problem subprob;
|
1402
|
+
|
1403
|
+
subprob.bias = prob->bias;
|
1404
|
+
subprob.n = prob->n;
|
1405
|
+
subprob.l = l-(end-begin);
|
1406
|
+
subprob.x = Malloc(struct feature_node*,subprob.l);
|
1407
|
+
subprob.y = Malloc(int,subprob.l);
|
1408
|
+
|
1409
|
+
k=0;
|
1410
|
+
for(j=0;j<begin;j++)
|
1411
|
+
{
|
1412
|
+
subprob.x[k] = prob->x[perm[j]];
|
1413
|
+
subprob.y[k] = prob->y[perm[j]];
|
1414
|
+
++k;
|
1415
|
+
}
|
1416
|
+
for(j=end;j<l;j++)
|
1417
|
+
{
|
1418
|
+
subprob.x[k] = prob->x[perm[j]];
|
1419
|
+
subprob.y[k] = prob->y[perm[j]];
|
1420
|
+
++k;
|
1421
|
+
}
|
1422
|
+
struct model *submodel = train(&subprob,param);
|
1423
|
+
for(j=begin;j<end;j++)
|
1424
|
+
target[perm[j]] = predict(submodel,prob->x[perm[j]]);
|
1425
|
+
destroy_model(submodel);
|
1426
|
+
free(subprob.x);
|
1427
|
+
free(subprob.y);
|
1428
|
+
}
|
1429
|
+
free(fold_start);
|
1430
|
+
free(perm);
|
1431
|
+
}
|
1432
|
+
|
1433
|
+
int get_nr_feature(const model *model_)
|
1434
|
+
{
|
1435
|
+
return model_->nr_feature;
|
1436
|
+
}
|
1437
|
+
|
1438
|
+
int get_nr_class(const model *model_)
|
1439
|
+
{
|
1440
|
+
return model_->nr_class;
|
1441
|
+
}
|
1442
|
+
|
1443
|
+
void get_labels(const model *model_, int* label)
|
1444
|
+
{
|
1445
|
+
if (model_->label != NULL)
|
1446
|
+
for(int i=0;i<model_->nr_class;i++)
|
1447
|
+
label[i] = model_->label[i];
|
1448
|
+
}
|
1449
|
+
|
1450
|
+
|