liblinear-ruby 0.0.6 → 0.0.7
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/ext/blasp.h +8 -0
- data/ext/daxpy.c +8 -0
- data/ext/ddot.c +8 -0
- data/ext/dnrm2.c +8 -0
- data/ext/dscal.c +8 -0
- data/ext/liblinear_wrap.cxx +128 -3
- data/ext/linear.cpp +344 -175
- data/ext/linear.h +2 -0
- data/ext/tron.cpp +14 -8
- data/ext/tron.h +2 -1
- data/lib/liblinear/version.rb +1 -1
- data/{liblinear-1.95 → liblinear-2.1}/COPYRIGHT +1 -1
- data/{liblinear-1.95 → liblinear-2.1}/Makefile +1 -1
- data/{liblinear-1.95 → liblinear-2.1}/Makefile.win +3 -9
- data/{liblinear-1.95 → liblinear-2.1}/README +45 -7
- data/{liblinear-1.95 → liblinear-2.1}/blas/Makefile +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/blas/blas.h +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/blas/blasp.h +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/blas/daxpy.c +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/blas/ddot.c +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/blas/dnrm2.c +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/blas/dscal.c +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/heart_scale +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/linear.cpp +344 -175
- data/{liblinear-1.95 → liblinear-2.1}/linear.def +1 -0
- data/{liblinear-1.95 → liblinear-2.1}/linear.h +2 -0
- data/{liblinear-1.95 → liblinear-2.1}/matlab/Makefile +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/matlab/README +12 -2
- data/{liblinear-1.95 → liblinear-2.1}/matlab/libsvmread.c +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/matlab/libsvmwrite.c +1 -1
- data/{liblinear-1.95 → liblinear-2.1}/matlab/linear_model_matlab.c +1 -1
- data/{liblinear-1.95 → liblinear-2.1}/matlab/linear_model_matlab.h +0 -0
- data/liblinear-2.1/matlab/make.m +22 -0
- data/{liblinear-1.95 → liblinear-2.1}/matlab/predict.c +1 -1
- data/{liblinear-1.95 → liblinear-2.1}/matlab/train.c +65 -10
- data/{liblinear-1.95 → liblinear-2.1}/predict.c +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/python/Makefile +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/python/README +7 -0
- data/{liblinear-1.95 → liblinear-2.1}/python/liblinear.py +27 -8
- data/{liblinear-1.95 → liblinear-2.1}/python/liblinearutil.py +16 -2
- data/{liblinear-1.95 → liblinear-2.1}/train.c +51 -1
- data/{liblinear-1.95 → liblinear-2.1}/tron.cpp +14 -8
- data/{liblinear-1.95 → liblinear-2.1}/tron.h +2 -1
- data/liblinear-2.1/windows/liblinear.dll +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/windows/libsvmread.mexw64 +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/windows/libsvmwrite.mexw64 +0 -0
- data/liblinear-2.1/windows/predict.exe +0 -0
- data/{liblinear-1.95 → liblinear-2.1}/windows/predict.mexw64 +0 -0
- data/liblinear-2.1/windows/train.exe +0 -0
- data/liblinear-2.1/windows/train.mexw64 +0 -0
- data/liblinear-ruby.gemspec +9 -10
- metadata +49 -50
- data/liblinear-1.95/matlab/make.m +0 -21
- data/liblinear-1.95/windows/liblinear.dll +0 -0
- data/liblinear-1.95/windows/predict.exe +0 -0
- data/liblinear-1.95/windows/train.exe +0 -0
- data/liblinear-1.95/windows/train.mexw64 +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a7d1e2c1eeff706b5cd0494d4fc720dec937f710
|
4
|
+
data.tar.gz: 3d436efa057c9fa1a68e0e4e884322b4c07dfb10
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 59d78c950c0d15db0213b6925a56a7aa56b567126fa184ceea744e13c418bedd4cd00638182ac0635f40075041cd96cde6bb1ba1c788440f0fbe70a9e46b128f
|
7
|
+
data.tar.gz: fbeaad15badd01d2fea3ebf2ab2a4cf594e7f4b90975e5c88c73f13433f9c3c3282c5d7939dd347355543e2a94216e5e9274d3089a823650cb616507c089bd8b
|
data/ext/blasp.h
CHANGED
@@ -3,6 +3,10 @@
|
|
3
3
|
|
4
4
|
/* Functions listed in alphabetical order */
|
5
5
|
|
6
|
+
#ifdef __cplusplus
|
7
|
+
extern "C" {
|
8
|
+
#endif
|
9
|
+
|
6
10
|
#ifdef F2C_COMPAT
|
7
11
|
|
8
12
|
void cdotc_(fcomplex *dotval, int *n, fcomplex *cx, int *incx,
|
@@ -428,3 +432,7 @@ int ztrsm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
|
428
432
|
|
429
433
|
int ztrsv_(char *uplo, char *trans, char *diag, int *n, dcomplex *a,
|
430
434
|
int *lda, dcomplex *x, int *incx);
|
435
|
+
|
436
|
+
#ifdef __cplusplus
|
437
|
+
}
|
438
|
+
#endif
|
data/ext/daxpy.c
CHANGED
@@ -1,5 +1,9 @@
|
|
1
1
|
#include "blas.h"
|
2
2
|
|
3
|
+
#ifdef __cplusplus
|
4
|
+
extern "C" {
|
5
|
+
#endif
|
6
|
+
|
3
7
|
int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy,
|
4
8
|
int *incy)
|
5
9
|
{
|
@@ -47,3 +51,7 @@ int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy,
|
|
47
51
|
|
48
52
|
return 0;
|
49
53
|
} /* daxpy_ */
|
54
|
+
|
55
|
+
#ifdef __cplusplus
|
56
|
+
}
|
57
|
+
#endif
|
data/ext/ddot.c
CHANGED
@@ -1,5 +1,9 @@
|
|
1
1
|
#include "blas.h"
|
2
2
|
|
3
|
+
#ifdef __cplusplus
|
4
|
+
extern "C" {
|
5
|
+
#endif
|
6
|
+
|
3
7
|
double ddot_(int *n, double *sx, int *incx, double *sy, int *incy)
|
4
8
|
{
|
5
9
|
long int i, m, nn, iincx, iincy;
|
@@ -48,3 +52,7 @@ double ddot_(int *n, double *sx, int *incx, double *sy, int *incy)
|
|
48
52
|
|
49
53
|
return stemp;
|
50
54
|
} /* ddot_ */
|
55
|
+
|
56
|
+
#ifdef __cplusplus
|
57
|
+
}
|
58
|
+
#endif
|
data/ext/dnrm2.c
CHANGED
@@ -1,6 +1,10 @@
|
|
1
1
|
#include <math.h> /* Needed for fabs() and sqrt() */
|
2
2
|
#include "blas.h"
|
3
3
|
|
4
|
+
#ifdef __cplusplus
|
5
|
+
extern "C" {
|
6
|
+
#endif
|
7
|
+
|
4
8
|
double dnrm2_(int *n, double *x, int *incx)
|
5
9
|
{
|
6
10
|
long int ix, nn, iincx;
|
@@ -60,3 +64,7 @@ double dnrm2_(int *n, double *x, int *incx)
|
|
60
64
|
return norm;
|
61
65
|
|
62
66
|
} /* dnrm2_ */
|
67
|
+
|
68
|
+
#ifdef __cplusplus
|
69
|
+
}
|
70
|
+
#endif
|
data/ext/dscal.c
CHANGED
@@ -1,5 +1,9 @@
|
|
1
1
|
#include "blas.h"
|
2
2
|
|
3
|
+
#ifdef __cplusplus
|
4
|
+
extern "C" {
|
5
|
+
#endif
|
6
|
+
|
3
7
|
int dscal_(int *n, double *sa, double *sx, int *incx)
|
4
8
|
{
|
5
9
|
long int i, m, nincx, nn, iincx;
|
@@ -42,3 +46,7 @@ int dscal_(int *n, double *sa, double *sx, int *incx)
|
|
42
46
|
|
43
47
|
return 0;
|
44
48
|
} /* dscal_ */
|
49
|
+
|
50
|
+
#ifdef __cplusplus
|
51
|
+
}
|
52
|
+
#endif
|
data/ext/liblinear_wrap.cxx
CHANGED
@@ -1870,7 +1870,7 @@ SWIG_ruby_failed(void)
|
|
1870
1870
|
}
|
1871
1871
|
|
1872
1872
|
|
1873
|
-
/*@SWIG:/usr/share/
|
1873
|
+
/*@SWIG:/usr/local/share/swig/2.0.4/ruby/rubyprimtypes.swg,19,%ruby_aux_method@*/
|
1874
1874
|
SWIGINTERN VALUE SWIG_AUX_NUM2LONG(VALUE *args)
|
1875
1875
|
{
|
1876
1876
|
VALUE obj = args[0];
|
@@ -1925,7 +1925,7 @@ SWIG_From_int (int value)
|
|
1925
1925
|
}
|
1926
1926
|
|
1927
1927
|
|
1928
|
-
/*@SWIG:/usr/share/
|
1928
|
+
/*@SWIG:/usr/local/share/swig/2.0.4/ruby/rubyprimtypes.swg,19,%ruby_aux_method@*/
|
1929
1929
|
SWIGINTERN VALUE SWIG_AUX_NUM2DBL(VALUE *args)
|
1930
1930
|
{
|
1931
1931
|
VALUE obj = args[0];
|
@@ -2051,7 +2051,7 @@ SWIG_FromCharPtr(const char *cptr)
|
|
2051
2051
|
}
|
2052
2052
|
|
2053
2053
|
|
2054
|
-
/*@SWIG:/usr/share/
|
2054
|
+
/*@SWIG:/usr/local/share/swig/2.0.4/ruby/rubyprimtypes.swg,19,%ruby_aux_method@*/
|
2055
2055
|
SWIGINTERN VALUE SWIG_AUX_NUM2ULONG(VALUE *args)
|
2056
2056
|
{
|
2057
2057
|
VALUE obj = args[0];
|
@@ -2959,6 +2959,59 @@ fail:
|
|
2959
2959
|
}
|
2960
2960
|
|
2961
2961
|
|
2962
|
+
SWIGINTERN VALUE
|
2963
|
+
_wrap_parameter_init_sol_set(int argc, VALUE *argv, VALUE self) {
|
2964
|
+
parameter *arg1 = (parameter *) 0 ;
|
2965
|
+
double *arg2 = (double *) 0 ;
|
2966
|
+
void *argp1 = 0 ;
|
2967
|
+
int res1 = 0 ;
|
2968
|
+
void *argp2 = 0 ;
|
2969
|
+
int res2 = 0 ;
|
2970
|
+
|
2971
|
+
if ((argc < 1) || (argc > 1)) {
|
2972
|
+
rb_raise(rb_eArgError, "wrong # of arguments(%d for 1)",argc); SWIG_fail;
|
2973
|
+
}
|
2974
|
+
res1 = SWIG_ConvertPtr(self, &argp1,SWIGTYPE_p_parameter, 0 | 0 );
|
2975
|
+
if (!SWIG_IsOK(res1)) {
|
2976
|
+
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "parameter *","init_sol", 1, self ));
|
2977
|
+
}
|
2978
|
+
arg1 = reinterpret_cast< parameter * >(argp1);
|
2979
|
+
res2 = SWIG_ConvertPtr(argv[0], &argp2,SWIGTYPE_p_double, SWIG_POINTER_DISOWN | 0 );
|
2980
|
+
if (!SWIG_IsOK(res2)) {
|
2981
|
+
SWIG_exception_fail(SWIG_ArgError(res2), Ruby_Format_TypeError( "", "double *","init_sol", 2, argv[0] ));
|
2982
|
+
}
|
2983
|
+
arg2 = reinterpret_cast< double * >(argp2);
|
2984
|
+
if (arg1) (arg1)->init_sol = arg2;
|
2985
|
+
return Qnil;
|
2986
|
+
fail:
|
2987
|
+
return Qnil;
|
2988
|
+
}
|
2989
|
+
|
2990
|
+
|
2991
|
+
SWIGINTERN VALUE
|
2992
|
+
_wrap_parameter_init_sol_get(int argc, VALUE *argv, VALUE self) {
|
2993
|
+
parameter *arg1 = (parameter *) 0 ;
|
2994
|
+
void *argp1 = 0 ;
|
2995
|
+
int res1 = 0 ;
|
2996
|
+
double *result = 0 ;
|
2997
|
+
VALUE vresult = Qnil;
|
2998
|
+
|
2999
|
+
if ((argc < 0) || (argc > 0)) {
|
3000
|
+
rb_raise(rb_eArgError, "wrong # of arguments(%d for 0)",argc); SWIG_fail;
|
3001
|
+
}
|
3002
|
+
res1 = SWIG_ConvertPtr(self, &argp1,SWIGTYPE_p_parameter, 0 | 0 );
|
3003
|
+
if (!SWIG_IsOK(res1)) {
|
3004
|
+
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "parameter *","init_sol", 1, self ));
|
3005
|
+
}
|
3006
|
+
arg1 = reinterpret_cast< parameter * >(argp1);
|
3007
|
+
result = (double *) ((arg1)->init_sol);
|
3008
|
+
vresult = SWIG_NewPointerObj(SWIG_as_voidptr(result), SWIGTYPE_p_double, 0 | 0 );
|
3009
|
+
return vresult;
|
3010
|
+
fail:
|
3011
|
+
return Qnil;
|
3012
|
+
}
|
3013
|
+
|
3014
|
+
|
2962
3015
|
#ifdef HAVE_RB_DEFINE_ALLOC_FUNC
|
2963
3016
|
SWIGINTERN VALUE
|
2964
3017
|
_wrap_parameter_allocate(VALUE self) {
|
@@ -3430,6 +3483,75 @@ fail:
|
|
3430
3483
|
}
|
3431
3484
|
|
3432
3485
|
|
3486
|
+
SWIGINTERN VALUE
|
3487
|
+
_wrap_find_parameter_C(int argc, VALUE *argv, VALUE self) {
|
3488
|
+
problem *arg1 = (problem *) 0 ;
|
3489
|
+
parameter *arg2 = (parameter *) 0 ;
|
3490
|
+
int arg3 ;
|
3491
|
+
double arg4 ;
|
3492
|
+
double arg5 ;
|
3493
|
+
double *arg6 = (double *) 0 ;
|
3494
|
+
double *arg7 = (double *) 0 ;
|
3495
|
+
void *argp1 = 0 ;
|
3496
|
+
int res1 = 0 ;
|
3497
|
+
void *argp2 = 0 ;
|
3498
|
+
int res2 = 0 ;
|
3499
|
+
int val3 ;
|
3500
|
+
int ecode3 = 0 ;
|
3501
|
+
double val4 ;
|
3502
|
+
int ecode4 = 0 ;
|
3503
|
+
double val5 ;
|
3504
|
+
int ecode5 = 0 ;
|
3505
|
+
void *argp6 = 0 ;
|
3506
|
+
int res6 = 0 ;
|
3507
|
+
void *argp7 = 0 ;
|
3508
|
+
int res7 = 0 ;
|
3509
|
+
|
3510
|
+
if ((argc < 7) || (argc > 7)) {
|
3511
|
+
rb_raise(rb_eArgError, "wrong # of arguments(%d for 7)",argc); SWIG_fail;
|
3512
|
+
}
|
3513
|
+
res1 = SWIG_ConvertPtr(argv[0], &argp1,SWIGTYPE_p_problem, 0 | 0 );
|
3514
|
+
if (!SWIG_IsOK(res1)) {
|
3515
|
+
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "problem const *","find_parameter_C", 1, argv[0] ));
|
3516
|
+
}
|
3517
|
+
arg1 = reinterpret_cast< problem * >(argp1);
|
3518
|
+
res2 = SWIG_ConvertPtr(argv[1], &argp2,SWIGTYPE_p_parameter, 0 | 0 );
|
3519
|
+
if (!SWIG_IsOK(res2)) {
|
3520
|
+
SWIG_exception_fail(SWIG_ArgError(res2), Ruby_Format_TypeError( "", "parameter const *","find_parameter_C", 2, argv[1] ));
|
3521
|
+
}
|
3522
|
+
arg2 = reinterpret_cast< parameter * >(argp2);
|
3523
|
+
ecode3 = SWIG_AsVal_int(argv[2], &val3);
|
3524
|
+
if (!SWIG_IsOK(ecode3)) {
|
3525
|
+
SWIG_exception_fail(SWIG_ArgError(ecode3), Ruby_Format_TypeError( "", "int","find_parameter_C", 3, argv[2] ));
|
3526
|
+
}
|
3527
|
+
arg3 = static_cast< int >(val3);
|
3528
|
+
ecode4 = SWIG_AsVal_double(argv[3], &val4);
|
3529
|
+
if (!SWIG_IsOK(ecode4)) {
|
3530
|
+
SWIG_exception_fail(SWIG_ArgError(ecode4), Ruby_Format_TypeError( "", "double","find_parameter_C", 4, argv[3] ));
|
3531
|
+
}
|
3532
|
+
arg4 = static_cast< double >(val4);
|
3533
|
+
ecode5 = SWIG_AsVal_double(argv[4], &val5);
|
3534
|
+
if (!SWIG_IsOK(ecode5)) {
|
3535
|
+
SWIG_exception_fail(SWIG_ArgError(ecode5), Ruby_Format_TypeError( "", "double","find_parameter_C", 5, argv[4] ));
|
3536
|
+
}
|
3537
|
+
arg5 = static_cast< double >(val5);
|
3538
|
+
res6 = SWIG_ConvertPtr(argv[5], &argp6,SWIGTYPE_p_double, 0 | 0 );
|
3539
|
+
if (!SWIG_IsOK(res6)) {
|
3540
|
+
SWIG_exception_fail(SWIG_ArgError(res6), Ruby_Format_TypeError( "", "double *","find_parameter_C", 6, argv[5] ));
|
3541
|
+
}
|
3542
|
+
arg6 = reinterpret_cast< double * >(argp6);
|
3543
|
+
res7 = SWIG_ConvertPtr(argv[6], &argp7,SWIGTYPE_p_double, 0 | 0 );
|
3544
|
+
if (!SWIG_IsOK(res7)) {
|
3545
|
+
SWIG_exception_fail(SWIG_ArgError(res7), Ruby_Format_TypeError( "", "double *","find_parameter_C", 7, argv[6] ));
|
3546
|
+
}
|
3547
|
+
arg7 = reinterpret_cast< double * >(argp7);
|
3548
|
+
find_parameter_C((problem const *)arg1,(parameter const *)arg2,arg3,arg4,arg5,arg6,arg7);
|
3549
|
+
return Qnil;
|
3550
|
+
fail:
|
3551
|
+
return Qnil;
|
3552
|
+
}
|
3553
|
+
|
3554
|
+
|
3433
3555
|
SWIGINTERN VALUE
|
3434
3556
|
_wrap_predict_values(int argc, VALUE *argv, VALUE self) {
|
3435
3557
|
model *arg1 = (model *) 0 ;
|
@@ -4685,6 +4807,8 @@ SWIGEXPORT void Init_liblinearswig(void) {
|
|
4685
4807
|
rb_define_method(SwigClassParameter.klass, "weight", VALUEFUNC(_wrap_parameter_weight_get), -1);
|
4686
4808
|
rb_define_method(SwigClassParameter.klass, "p=", VALUEFUNC(_wrap_parameter_p_set), -1);
|
4687
4809
|
rb_define_method(SwigClassParameter.klass, "p", VALUEFUNC(_wrap_parameter_p_get), -1);
|
4810
|
+
rb_define_method(SwigClassParameter.klass, "init_sol=", VALUEFUNC(_wrap_parameter_init_sol_set), -1);
|
4811
|
+
rb_define_method(SwigClassParameter.klass, "init_sol", VALUEFUNC(_wrap_parameter_init_sol_get), -1);
|
4688
4812
|
SwigClassParameter.mark = 0;
|
4689
4813
|
SwigClassParameter.destroy = (void (*)(void *)) free_parameter;
|
4690
4814
|
SwigClassParameter.trackObjects = 0;
|
@@ -4710,6 +4834,7 @@ SWIGEXPORT void Init_liblinearswig(void) {
|
|
4710
4834
|
SwigClassModel.trackObjects = 0;
|
4711
4835
|
rb_define_module_function(mLiblinearswig, "train", VALUEFUNC(_wrap_train), -1);
|
4712
4836
|
rb_define_module_function(mLiblinearswig, "cross_validation", VALUEFUNC(_wrap_cross_validation), -1);
|
4837
|
+
rb_define_module_function(mLiblinearswig, "find_parameter_C", VALUEFUNC(_wrap_find_parameter_C), -1);
|
4713
4838
|
rb_define_module_function(mLiblinearswig, "predict_values", VALUEFUNC(_wrap_predict_values), -1);
|
4714
4839
|
rb_define_module_function(mLiblinearswig, "predict", VALUEFUNC(_wrap_predict), -1);
|
4715
4840
|
rb_define_module_function(mLiblinearswig, "predict_probability", VALUEFUNC(_wrap_predict_probability), -1);
|
data/ext/linear.cpp
CHANGED
@@ -27,6 +27,7 @@ static void print_string_stdout(const char *s)
|
|
27
27
|
fputs(s,stdout);
|
28
28
|
fflush(stdout);
|
29
29
|
}
|
30
|
+
static void print_null(const char *s) {}
|
30
31
|
|
31
32
|
static void (*liblinear_print_string) (const char *) = &print_string_stdout;
|
32
33
|
|
@@ -43,6 +44,40 @@ static void info(const char *fmt,...)
|
|
43
44
|
#else
|
44
45
|
static void info(const char *fmt,...) {}
|
45
46
|
#endif
|
47
|
+
class sparse_operator
|
48
|
+
{
|
49
|
+
public:
|
50
|
+
static double nrm2_sq(const feature_node *x)
|
51
|
+
{
|
52
|
+
double ret = 0;
|
53
|
+
while(x->index != -1)
|
54
|
+
{
|
55
|
+
ret += x->value*x->value;
|
56
|
+
x++;
|
57
|
+
}
|
58
|
+
return (ret);
|
59
|
+
}
|
60
|
+
|
61
|
+
static double dot(const double *s, const feature_node *x)
|
62
|
+
{
|
63
|
+
double ret = 0;
|
64
|
+
while(x->index != -1)
|
65
|
+
{
|
66
|
+
ret += s[x->index-1]*x->value;
|
67
|
+
x++;
|
68
|
+
}
|
69
|
+
return (ret);
|
70
|
+
}
|
71
|
+
|
72
|
+
static void axpy(const double a, const feature_node *x, double *y)
|
73
|
+
{
|
74
|
+
while(x->index != -1)
|
75
|
+
{
|
76
|
+
y[x->index-1] += a*x->value;
|
77
|
+
x++;
|
78
|
+
}
|
79
|
+
}
|
80
|
+
};
|
46
81
|
|
47
82
|
class l2r_lr_fun: public function
|
48
83
|
{
|
@@ -139,12 +174,19 @@ void l2r_lr_fun::Hv(double *s, double *Hs)
|
|
139
174
|
int l=prob->l;
|
140
175
|
int w_size=get_nr_variable();
|
141
176
|
double *wa = new double[l];
|
177
|
+
feature_node **x=prob->x;
|
142
178
|
|
143
|
-
|
179
|
+
for(i=0;i<w_size;i++)
|
180
|
+
Hs[i] = 0;
|
144
181
|
for(i=0;i<l;i++)
|
182
|
+
{
|
183
|
+
feature_node * const xi=x[i];
|
184
|
+
wa[i] = sparse_operator::dot(s, xi);
|
185
|
+
|
145
186
|
wa[i] = C[i]*D[i]*wa[i];
|
146
187
|
|
147
|
-
|
188
|
+
sparse_operator::axpy(wa[i], xi, Hs);
|
189
|
+
}
|
148
190
|
for(i=0;i<w_size;i++)
|
149
191
|
Hs[i] = s[i] + Hs[i];
|
150
192
|
delete[] wa;
|
@@ -157,15 +199,7 @@ void l2r_lr_fun::Xv(double *v, double *Xv)
|
|
157
199
|
feature_node **x=prob->x;
|
158
200
|
|
159
201
|
for(i=0;i<l;i++)
|
160
|
-
|
161
|
-
feature_node *s=x[i];
|
162
|
-
Xv[i]=0;
|
163
|
-
while(s->index!=-1)
|
164
|
-
{
|
165
|
-
Xv[i]+=v[s->index-1]*s->value;
|
166
|
-
s++;
|
167
|
-
}
|
168
|
-
}
|
202
|
+
Xv[i]=sparse_operator::dot(v, x[i]);
|
169
203
|
}
|
170
204
|
|
171
205
|
void l2r_lr_fun::XTv(double *v, double *XTv)
|
@@ -178,14 +212,7 @@ void l2r_lr_fun::XTv(double *v, double *XTv)
|
|
178
212
|
for(i=0;i<w_size;i++)
|
179
213
|
XTv[i]=0;
|
180
214
|
for(i=0;i<l;i++)
|
181
|
-
|
182
|
-
feature_node *s=x[i];
|
183
|
-
while(s->index!=-1)
|
184
|
-
{
|
185
|
-
XTv[s->index-1]+=v[i]*s->value;
|
186
|
-
s++;
|
187
|
-
}
|
188
|
-
}
|
215
|
+
sparse_operator::axpy(v[i], x[i], XTv);
|
189
216
|
}
|
190
217
|
|
191
218
|
class l2r_l2_svc_fun: public function
|
@@ -202,7 +229,6 @@ public:
|
|
202
229
|
|
203
230
|
protected:
|
204
231
|
void Xv(double *v, double *Xv);
|
205
|
-
void subXv(double *v, double *Xv);
|
206
232
|
void subXTv(double *v, double *XTv);
|
207
233
|
|
208
234
|
double *C;
|
@@ -287,12 +313,19 @@ void l2r_l2_svc_fun::Hv(double *s, double *Hs)
|
|
287
313
|
int i;
|
288
314
|
int w_size=get_nr_variable();
|
289
315
|
double *wa = new double[sizeI];
|
316
|
+
feature_node **x=prob->x;
|
290
317
|
|
291
|
-
|
318
|
+
for(i=0;i<w_size;i++)
|
319
|
+
Hs[i]=0;
|
292
320
|
for(i=0;i<sizeI;i++)
|
321
|
+
{
|
322
|
+
feature_node * const xi=x[I[i]];
|
323
|
+
wa[i] = sparse_operator::dot(s, xi);
|
324
|
+
|
293
325
|
wa[i] = C[I[i]]*wa[i];
|
294
326
|
|
295
|
-
|
327
|
+
sparse_operator::axpy(wa[i], xi, Hs);
|
328
|
+
}
|
296
329
|
for(i=0;i<w_size;i++)
|
297
330
|
Hs[i] = s[i] + 2*Hs[i];
|
298
331
|
delete[] wa;
|
@@ -305,32 +338,7 @@ void l2r_l2_svc_fun::Xv(double *v, double *Xv)
|
|
305
338
|
feature_node **x=prob->x;
|
306
339
|
|
307
340
|
for(i=0;i<l;i++)
|
308
|
-
|
309
|
-
feature_node *s=x[i];
|
310
|
-
Xv[i]=0;
|
311
|
-
while(s->index!=-1)
|
312
|
-
{
|
313
|
-
Xv[i]+=v[s->index-1]*s->value;
|
314
|
-
s++;
|
315
|
-
}
|
316
|
-
}
|
317
|
-
}
|
318
|
-
|
319
|
-
void l2r_l2_svc_fun::subXv(double *v, double *Xv)
|
320
|
-
{
|
321
|
-
int i;
|
322
|
-
feature_node **x=prob->x;
|
323
|
-
|
324
|
-
for(i=0;i<sizeI;i++)
|
325
|
-
{
|
326
|
-
feature_node *s=x[I[i]];
|
327
|
-
Xv[i]=0;
|
328
|
-
while(s->index!=-1)
|
329
|
-
{
|
330
|
-
Xv[i]+=v[s->index-1]*s->value;
|
331
|
-
s++;
|
332
|
-
}
|
333
|
-
}
|
341
|
+
Xv[i]=sparse_operator::dot(v, x[i]);
|
334
342
|
}
|
335
343
|
|
336
344
|
void l2r_l2_svc_fun::subXTv(double *v, double *XTv)
|
@@ -342,14 +350,7 @@ void l2r_l2_svc_fun::subXTv(double *v, double *XTv)
|
|
342
350
|
for(i=0;i<w_size;i++)
|
343
351
|
XTv[i]=0;
|
344
352
|
for(i=0;i<sizeI;i++)
|
345
|
-
|
346
|
-
feature_node *s=x[I[i]];
|
347
|
-
while(s->index!=-1)
|
348
|
-
{
|
349
|
-
XTv[s->index-1]+=v[i]*s->value;
|
350
|
-
s++;
|
351
|
-
}
|
352
|
-
}
|
353
|
+
sparse_operator::axpy(v[i], x[I[i]], XTv);
|
353
354
|
}
|
354
355
|
|
355
356
|
class l2r_l2_svr_fun: public l2r_l2_svc_fun
|
@@ -830,14 +831,10 @@ static void solve_l2r_l1l2_svc(
|
|
830
831
|
{
|
831
832
|
QD[i] = diag[GETI(i)];
|
832
833
|
|
833
|
-
feature_node *xi = prob->x[i];
|
834
|
-
|
835
|
-
|
836
|
-
|
837
|
-
QD[i] += val*val;
|
838
|
-
w[xi->index-1] += y[i]*alpha[i]*val;
|
839
|
-
xi++;
|
840
|
-
}
|
834
|
+
feature_node * const xi = prob->x[i];
|
835
|
+
QD[i] += sparse_operator::nrm2_sq(xi);
|
836
|
+
sparse_operator::axpy(y[i]*alpha[i], xi, w);
|
837
|
+
|
841
838
|
index[i] = i;
|
842
839
|
}
|
843
840
|
|
@@ -855,16 +852,10 @@ static void solve_l2r_l1l2_svc(
|
|
855
852
|
for (s=0; s<active_size; s++)
|
856
853
|
{
|
857
854
|
i = index[s];
|
858
|
-
|
859
|
-
|
855
|
+
const schar yi = y[i];
|
856
|
+
feature_node * const xi = prob->x[i];
|
860
857
|
|
861
|
-
|
862
|
-
while(xi->index!= -1)
|
863
|
-
{
|
864
|
-
G += w[xi->index-1]*(xi->value);
|
865
|
-
xi++;
|
866
|
-
}
|
867
|
-
G = G*yi-1;
|
858
|
+
G = yi*sparse_operator::dot(w, xi)-1;
|
868
859
|
|
869
860
|
C = upper_bound[GETI(i)];
|
870
861
|
G += alpha[i]*diag[GETI(i)];
|
@@ -905,12 +896,7 @@ static void solve_l2r_l1l2_svc(
|
|
905
896
|
double alpha_old = alpha[i];
|
906
897
|
alpha[i] = min(max(alpha[i] - G/QD[i], 0.0), C);
|
907
898
|
d = (alpha[i] - alpha_old)*yi;
|
908
|
-
xi
|
909
|
-
while (xi->index != -1)
|
910
|
-
{
|
911
|
-
w[xi->index-1] += d*xi->value;
|
912
|
-
xi++;
|
913
|
-
}
|
899
|
+
sparse_operator::axpy(d, xi, w);
|
914
900
|
}
|
915
901
|
}
|
916
902
|
|
@@ -1035,15 +1021,9 @@ static void solve_l2r_l1l2_svr(
|
|
1035
1021
|
w[i] = 0;
|
1036
1022
|
for(i=0; i<l; i++)
|
1037
1023
|
{
|
1038
|
-
|
1039
|
-
|
1040
|
-
|
1041
|
-
{
|
1042
|
-
double val = xi->value;
|
1043
|
-
QD[i] += val*val;
|
1044
|
-
w[xi->index-1] += beta[i]*val;
|
1045
|
-
xi++;
|
1046
|
-
}
|
1024
|
+
feature_node * const xi = prob->x[i];
|
1025
|
+
QD[i] = sparse_operator::nrm2_sq(xi);
|
1026
|
+
sparse_operator::axpy(beta[i], xi, w);
|
1047
1027
|
|
1048
1028
|
index[i] = i;
|
1049
1029
|
}
|
@@ -1066,14 +1046,8 @@ static void solve_l2r_l1l2_svr(
|
|
1066
1046
|
G = -y[i] + lambda[GETI(i)]*beta[i];
|
1067
1047
|
H = QD[i] + lambda[GETI(i)];
|
1068
1048
|
|
1069
|
-
feature_node *xi = prob->x[i];
|
1070
|
-
|
1071
|
-
{
|
1072
|
-
int ind = xi->index-1;
|
1073
|
-
double val = xi->value;
|
1074
|
-
G += val*w[ind];
|
1075
|
-
xi++;
|
1076
|
-
}
|
1049
|
+
feature_node * const xi = prob->x[i];
|
1050
|
+
G += sparse_operator::dot(w, xi);
|
1077
1051
|
|
1078
1052
|
double Gp = G+p;
|
1079
1053
|
double Gn = G-p;
|
@@ -1140,14 +1114,7 @@ static void solve_l2r_l1l2_svr(
|
|
1140
1114
|
d = beta[i]-beta_old;
|
1141
1115
|
|
1142
1116
|
if(d != 0)
|
1143
|
-
|
1144
|
-
xi = prob->x[i];
|
1145
|
-
while(xi->index != -1)
|
1146
|
-
{
|
1147
|
-
w[xi->index-1] += d*xi->value;
|
1148
|
-
xi++;
|
1149
|
-
}
|
1150
|
-
}
|
1117
|
+
sparse_operator::axpy(d, xi, w);
|
1151
1118
|
}
|
1152
1119
|
|
1153
1120
|
if(iter == 0)
|
@@ -1260,15 +1227,9 @@ void solve_l2r_lr_dual(const problem *prob, double *w, double eps, double Cp, do
|
|
1260
1227
|
w[i] = 0;
|
1261
1228
|
for(i=0; i<l; i++)
|
1262
1229
|
{
|
1263
|
-
|
1264
|
-
|
1265
|
-
|
1266
|
-
{
|
1267
|
-
double val = xi->value;
|
1268
|
-
xTx[i] += val*val;
|
1269
|
-
w[xi->index-1] += y[i]*alpha[2*i]*val;
|
1270
|
-
xi++;
|
1271
|
-
}
|
1230
|
+
feature_node * const xi = prob->x[i];
|
1231
|
+
xTx[i] = sparse_operator::nrm2_sq(xi);
|
1232
|
+
sparse_operator::axpy(y[i]*alpha[2*i], xi, w);
|
1272
1233
|
index[i] = i;
|
1273
1234
|
}
|
1274
1235
|
|
@@ -1284,16 +1245,11 @@ void solve_l2r_lr_dual(const problem *prob, double *w, double eps, double Cp, do
|
|
1284
1245
|
for (s=0; s<l; s++)
|
1285
1246
|
{
|
1286
1247
|
i = index[s];
|
1287
|
-
schar yi = y[i];
|
1248
|
+
const schar yi = y[i];
|
1288
1249
|
double C = upper_bound[GETI(i)];
|
1289
1250
|
double ywTx = 0, xisq = xTx[i];
|
1290
|
-
feature_node *xi = prob->x[i];
|
1291
|
-
|
1292
|
-
{
|
1293
|
-
ywTx += w[xi->index-1]*xi->value;
|
1294
|
-
xi++;
|
1295
|
-
}
|
1296
|
-
ywTx *= y[i];
|
1251
|
+
feature_node * const xi = prob->x[i];
|
1252
|
+
ywTx = yi*sparse_operator::dot(w, xi);
|
1297
1253
|
double a = xisq, b = ywTx;
|
1298
1254
|
|
1299
1255
|
// Decide to minimize g_1(z) or g_2(z)
|
@@ -1335,12 +1291,7 @@ void solve_l2r_lr_dual(const problem *prob, double *w, double eps, double Cp, do
|
|
1335
1291
|
{
|
1336
1292
|
alpha[ind1] = z;
|
1337
1293
|
alpha[ind2] = C-z;
|
1338
|
-
xi
|
1339
|
-
while (xi->index != -1)
|
1340
|
-
{
|
1341
|
-
w[xi->index-1] += sign*(z-alpha_old)*yi*xi->value;
|
1342
|
-
xi++;
|
1343
|
-
}
|
1294
|
+
sparse_operator::axpy(sign*(z-alpha_old)*yi, xi, w);
|
1344
1295
|
}
|
1345
1296
|
}
|
1346
1297
|
|
@@ -1534,11 +1485,7 @@ static void solve_l1r_l2_svc(
|
|
1534
1485
|
if(appxcond <= 0)
|
1535
1486
|
{
|
1536
1487
|
x = prob_col->x[j];
|
1537
|
-
|
1538
|
-
{
|
1539
|
-
b[x->index-1] += d_diff*x->value;
|
1540
|
-
x++;
|
1541
|
-
}
|
1488
|
+
sparse_operator::axpy(d_diff, x, b);
|
1542
1489
|
break;
|
1543
1490
|
}
|
1544
1491
|
|
@@ -1598,11 +1545,7 @@ static void solve_l1r_l2_svc(
|
|
1598
1545
|
{
|
1599
1546
|
if(w[i]==0) continue;
|
1600
1547
|
x = prob_col->x[i];
|
1601
|
-
|
1602
|
-
{
|
1603
|
-
b[x->index-1] -= w[i]*x->value;
|
1604
|
-
x++;
|
1605
|
-
}
|
1548
|
+
sparse_operator::axpy(-w[i], x, b);
|
1606
1549
|
}
|
1607
1550
|
}
|
1608
1551
|
}
|
@@ -1891,12 +1834,7 @@ static void solve_l1r_lr(
|
|
1891
1834
|
wpd[j] += z;
|
1892
1835
|
|
1893
1836
|
x = prob_col->x[j];
|
1894
|
-
|
1895
|
-
{
|
1896
|
-
int ind = x->index-1;
|
1897
|
-
xTd[ind] += x->value*z;
|
1898
|
-
x++;
|
1899
|
-
}
|
1837
|
+
sparse_operator::axpy(z, x, xTd);
|
1900
1838
|
}
|
1901
1839
|
|
1902
1840
|
iter++;
|
@@ -1988,11 +1926,7 @@ static void solve_l1r_lr(
|
|
1988
1926
|
{
|
1989
1927
|
if(w[i]==0) continue;
|
1990
1928
|
x = prob_col->x[i];
|
1991
|
-
|
1992
|
-
{
|
1993
|
-
exp_wTx[x->index-1] += w[i]*x->value;
|
1994
|
-
x++;
|
1995
|
-
}
|
1929
|
+
sparse_operator::axpy(w[i], x, exp_wTx);
|
1996
1930
|
}
|
1997
1931
|
|
1998
1932
|
for(int i=0; i<l; i++)
|
@@ -2180,14 +2114,18 @@ static void group_classes(const problem *prob, int *nr_class_ret, int **label_re
|
|
2180
2114
|
|
2181
2115
|
static void train_one(const problem *prob, const parameter *param, double *w, double Cp, double Cn)
|
2182
2116
|
{
|
2183
|
-
|
2117
|
+
//inner and outer tolerances for TRON
|
2118
|
+
double eps = param->eps;
|
2119
|
+
double eps_cg = 0.1;
|
2120
|
+
if(param->init_sol != NULL)
|
2121
|
+
eps_cg = 0.5;
|
2122
|
+
|
2184
2123
|
int pos = 0;
|
2185
2124
|
int neg = 0;
|
2186
2125
|
for(int i=0;i<prob->l;i++)
|
2187
2126
|
if(prob->y[i] > 0)
|
2188
2127
|
pos++;
|
2189
2128
|
neg = prob->l - pos;
|
2190
|
-
|
2191
2129
|
double primal_solver_tol = eps*max(min(pos,neg), 1)/prob->l;
|
2192
2130
|
|
2193
2131
|
function *fun_obj=NULL;
|
@@ -2204,7 +2142,7 @@ static void train_one(const problem *prob, const parameter *param, double *w, do
|
|
2204
2142
|
C[i] = Cn;
|
2205
2143
|
}
|
2206
2144
|
fun_obj=new l2r_lr_fun(prob, C);
|
2207
|
-
TRON tron_obj(fun_obj, primal_solver_tol);
|
2145
|
+
TRON tron_obj(fun_obj, primal_solver_tol, eps_cg);
|
2208
2146
|
tron_obj.set_print_string(liblinear_print_string);
|
2209
2147
|
tron_obj.tron(w);
|
2210
2148
|
delete fun_obj;
|
@@ -2222,7 +2160,7 @@ static void train_one(const problem *prob, const parameter *param, double *w, do
|
|
2222
2160
|
C[i] = Cn;
|
2223
2161
|
}
|
2224
2162
|
fun_obj=new l2r_l2_svc_fun(prob, C);
|
2225
|
-
TRON tron_obj(fun_obj, primal_solver_tol);
|
2163
|
+
TRON tron_obj(fun_obj, primal_solver_tol, eps_cg);
|
2226
2164
|
tron_obj.set_print_string(liblinear_print_string);
|
2227
2165
|
tron_obj.tron(w);
|
2228
2166
|
delete fun_obj;
|
@@ -2287,6 +2225,36 @@ static void train_one(const problem *prob, const parameter *param, double *w, do
|
|
2287
2225
|
}
|
2288
2226
|
}
|
2289
2227
|
|
2228
|
+
// Calculate the initial C for parameter selection
|
2229
|
+
static double calc_start_C(const problem *prob, const parameter *param)
|
2230
|
+
{
|
2231
|
+
int i;
|
2232
|
+
double xTx,max_xTx;
|
2233
|
+
max_xTx = 0;
|
2234
|
+
for(i=0; i<prob->l; i++)
|
2235
|
+
{
|
2236
|
+
xTx = 0;
|
2237
|
+
feature_node *xi=prob->x[i];
|
2238
|
+
while(xi->index != -1)
|
2239
|
+
{
|
2240
|
+
double val = xi->value;
|
2241
|
+
xTx += val*val;
|
2242
|
+
xi++;
|
2243
|
+
}
|
2244
|
+
if(xTx > max_xTx)
|
2245
|
+
max_xTx = xTx;
|
2246
|
+
}
|
2247
|
+
|
2248
|
+
double min_C = 1.0;
|
2249
|
+
if(param->solver_type == L2R_LR)
|
2250
|
+
min_C = 1.0 / (prob->l * max_xTx);
|
2251
|
+
else if(param->solver_type == L2R_L2LOSS_SVC)
|
2252
|
+
min_C = 1.0 / (2 * prob->l * max_xTx);
|
2253
|
+
|
2254
|
+
return pow( 2, floor(log(min_C) / log(2.0)) );
|
2255
|
+
}
|
2256
|
+
|
2257
|
+
|
2290
2258
|
//
|
2291
2259
|
// Interface functions
|
2292
2260
|
//
|
@@ -2308,9 +2276,11 @@ model* train(const problem *prob, const parameter *param)
|
|
2308
2276
|
if(check_regression_model(model_))
|
2309
2277
|
{
|
2310
2278
|
model_->w = Malloc(double, w_size);
|
2279
|
+
for(i=0; i<w_size; i++)
|
2280
|
+
model_->w[i] = 0;
|
2311
2281
|
model_->nr_class = 2;
|
2312
2282
|
model_->label = NULL;
|
2313
|
-
train_one(prob, param,
|
2283
|
+
train_one(prob, param, model_->w, 0, 0);
|
2314
2284
|
}
|
2315
2285
|
else
|
2316
2286
|
{
|
@@ -2380,8 +2350,15 @@ model* train(const problem *prob, const parameter *param)
|
|
2380
2350
|
sub_prob.y[k] = +1;
|
2381
2351
|
for(; k<sub_prob.l; k++)
|
2382
2352
|
sub_prob.y[k] = -1;
|
2353
|
+
|
2354
|
+
if(param->init_sol != NULL)
|
2355
|
+
for(i=0;i<w_size;i++)
|
2356
|
+
model_->w[i] = param->init_sol[i];
|
2357
|
+
else
|
2358
|
+
for(i=0;i<w_size;i++)
|
2359
|
+
model_->w[i] = 0;
|
2383
2360
|
|
2384
|
-
train_one(&sub_prob, param,
|
2361
|
+
train_one(&sub_prob, param, model_->w, weighted_C[0], weighted_C[1]);
|
2385
2362
|
}
|
2386
2363
|
else
|
2387
2364
|
{
|
@@ -2400,6 +2377,13 @@ model* train(const problem *prob, const parameter *param)
|
|
2400
2377
|
for(; k<sub_prob.l; k++)
|
2401
2378
|
sub_prob.y[k] = -1;
|
2402
2379
|
|
2380
|
+
if(param->init_sol != NULL)
|
2381
|
+
for(j=0;j<w_size;j++)
|
2382
|
+
w[j] = param->init_sol[j*nr_class+i];
|
2383
|
+
else
|
2384
|
+
for(j=0;j<w_size;j++)
|
2385
|
+
w[j] = 0;
|
2386
|
+
|
2403
2387
|
train_one(&sub_prob, param, w, weighted_C[i], param->C);
|
2404
2388
|
|
2405
2389
|
for(int j=0;j<w_size;j++)
|
@@ -2480,6 +2464,158 @@ void cross_validation(const problem *prob, const parameter *param, int nr_fold,
|
|
2480
2464
|
free(perm);
|
2481
2465
|
}
|
2482
2466
|
|
2467
|
+
void find_parameter_C(const problem *prob, const parameter *param, int nr_fold, double start_C, double max_C, double *best_C, double *best_rate)
|
2468
|
+
{
|
2469
|
+
// variables for CV
|
2470
|
+
int i;
|
2471
|
+
int *fold_start;
|
2472
|
+
int l = prob->l;
|
2473
|
+
int *perm = Malloc(int, l);
|
2474
|
+
double *target = Malloc(double, prob->l);
|
2475
|
+
struct problem *subprob = Malloc(problem,nr_fold);
|
2476
|
+
|
2477
|
+
// variables for warm start
|
2478
|
+
double ratio = 2;
|
2479
|
+
double **prev_w = Malloc(double*, nr_fold);
|
2480
|
+
for(i = 0; i < nr_fold; i++)
|
2481
|
+
prev_w[i] = NULL;
|
2482
|
+
int num_unchanged_w = 0;
|
2483
|
+
struct parameter param1 = *param;
|
2484
|
+
void (*default_print_string) (const char *) = liblinear_print_string;
|
2485
|
+
|
2486
|
+
if (nr_fold > l)
|
2487
|
+
{
|
2488
|
+
nr_fold = l;
|
2489
|
+
fprintf(stderr,"WARNING: # folds > # data. Will use # folds = # data instead (i.e., leave-one-out cross validation)\n");
|
2490
|
+
}
|
2491
|
+
fold_start = Malloc(int,nr_fold+1);
|
2492
|
+
for(i=0;i<l;i++) perm[i]=i;
|
2493
|
+
for(i=0;i<l;i++)
|
2494
|
+
{
|
2495
|
+
int j = i+rand()%(l-i);
|
2496
|
+
swap(perm[i],perm[j]);
|
2497
|
+
}
|
2498
|
+
for(i=0;i<=nr_fold;i++)
|
2499
|
+
fold_start[i]=i*l/nr_fold;
|
2500
|
+
|
2501
|
+
for(i=0;i<nr_fold;i++)
|
2502
|
+
{
|
2503
|
+
int begin = fold_start[i];
|
2504
|
+
int end = fold_start[i+1];
|
2505
|
+
int j,k;
|
2506
|
+
|
2507
|
+
subprob[i].bias = prob->bias;
|
2508
|
+
subprob[i].n = prob->n;
|
2509
|
+
subprob[i].l = l-(end-begin);
|
2510
|
+
subprob[i].x = Malloc(struct feature_node*,subprob[i].l);
|
2511
|
+
subprob[i].y = Malloc(double,subprob[i].l);
|
2512
|
+
|
2513
|
+
k=0;
|
2514
|
+
for(j=0;j<begin;j++)
|
2515
|
+
{
|
2516
|
+
subprob[i].x[k] = prob->x[perm[j]];
|
2517
|
+
subprob[i].y[k] = prob->y[perm[j]];
|
2518
|
+
++k;
|
2519
|
+
}
|
2520
|
+
for(j=end;j<l;j++)
|
2521
|
+
{
|
2522
|
+
subprob[i].x[k] = prob->x[perm[j]];
|
2523
|
+
subprob[i].y[k] = prob->y[perm[j]];
|
2524
|
+
++k;
|
2525
|
+
}
|
2526
|
+
|
2527
|
+
}
|
2528
|
+
|
2529
|
+
*best_rate = 0;
|
2530
|
+
if(start_C <= 0)
|
2531
|
+
start_C = calc_start_C(prob,param);
|
2532
|
+
param1.C = start_C;
|
2533
|
+
|
2534
|
+
while(param1.C <= max_C)
|
2535
|
+
{
|
2536
|
+
//Output disabled for running CV at a particular C
|
2537
|
+
set_print_string_function(&print_null);
|
2538
|
+
|
2539
|
+
for(i=0; i<nr_fold; i++)
|
2540
|
+
{
|
2541
|
+
int j;
|
2542
|
+
int begin = fold_start[i];
|
2543
|
+
int end = fold_start[i+1];
|
2544
|
+
|
2545
|
+
param1.init_sol = prev_w[i];
|
2546
|
+
struct model *submodel = train(&subprob[i],¶m1);
|
2547
|
+
|
2548
|
+
int total_w_size;
|
2549
|
+
if(submodel->nr_class == 2)
|
2550
|
+
total_w_size = subprob[i].n;
|
2551
|
+
else
|
2552
|
+
total_w_size = subprob[i].n * submodel->nr_class;
|
2553
|
+
|
2554
|
+
if(prev_w[i] == NULL)
|
2555
|
+
{
|
2556
|
+
prev_w[i] = Malloc(double, total_w_size);
|
2557
|
+
for(j=0; j<total_w_size; j++)
|
2558
|
+
prev_w[i][j] = submodel->w[j];
|
2559
|
+
}
|
2560
|
+
else if(num_unchanged_w >= 0)
|
2561
|
+
{
|
2562
|
+
double norm_w_diff = 0;
|
2563
|
+
for(j=0; j<total_w_size; j++)
|
2564
|
+
{
|
2565
|
+
norm_w_diff += (submodel->w[j] - prev_w[i][j])*(submodel->w[j] - prev_w[i][j]);
|
2566
|
+
prev_w[i][j] = submodel->w[j];
|
2567
|
+
}
|
2568
|
+
norm_w_diff = sqrt(norm_w_diff);
|
2569
|
+
|
2570
|
+
if(norm_w_diff > 1e-15)
|
2571
|
+
num_unchanged_w = -1;
|
2572
|
+
}
|
2573
|
+
else
|
2574
|
+
{
|
2575
|
+
for(j=0; j<total_w_size; j++)
|
2576
|
+
prev_w[i][j] = submodel->w[j];
|
2577
|
+
}
|
2578
|
+
|
2579
|
+
for(j=begin; j<end; j++)
|
2580
|
+
target[perm[j]] = predict(submodel,prob->x[perm[j]]);
|
2581
|
+
|
2582
|
+
free_and_destroy_model(&submodel);
|
2583
|
+
}
|
2584
|
+
set_print_string_function(default_print_string);
|
2585
|
+
|
2586
|
+
int total_correct = 0;
|
2587
|
+
for(i=0; i<prob->l; i++)
|
2588
|
+
if(target[i] == prob->y[i])
|
2589
|
+
++total_correct;
|
2590
|
+
double current_rate = (double)total_correct/prob->l;
|
2591
|
+
if(current_rate > *best_rate)
|
2592
|
+
{
|
2593
|
+
*best_C = param1.C;
|
2594
|
+
*best_rate = current_rate;
|
2595
|
+
}
|
2596
|
+
|
2597
|
+
info("log2c=%7.2f\trate=%g\n",log(param1.C)/log(2.0),100.0*current_rate);
|
2598
|
+
num_unchanged_w++;
|
2599
|
+
if(num_unchanged_w == 3)
|
2600
|
+
break;
|
2601
|
+
param1.C = param1.C*ratio;
|
2602
|
+
}
|
2603
|
+
|
2604
|
+
if(param1.C > max_C && max_C > start_C)
|
2605
|
+
info("warning: maximum C reached.\n");
|
2606
|
+
free(fold_start);
|
2607
|
+
free(perm);
|
2608
|
+
free(target);
|
2609
|
+
for(i=0; i<nr_fold; i++)
|
2610
|
+
{
|
2611
|
+
free(subprob[i].x);
|
2612
|
+
free(subprob[i].y);
|
2613
|
+
free(prev_w[i]);
|
2614
|
+
}
|
2615
|
+
free(prev_w);
|
2616
|
+
free(subprob);
|
2617
|
+
}
|
2618
|
+
|
2483
2619
|
double predict_values(const struct model *model_, const struct feature_node *x, double *dec_values)
|
2484
2620
|
{
|
2485
2621
|
int idx;
|
@@ -2592,7 +2728,11 @@ int save_model(const char *model_file_name, const struct model *model_)
|
|
2592
2728
|
FILE *fp = fopen(model_file_name,"w");
|
2593
2729
|
if(fp==NULL) return -1;
|
2594
2730
|
|
2595
|
-
char *old_locale =
|
2731
|
+
char *old_locale = setlocale(LC_ALL, NULL);
|
2732
|
+
if (old_locale)
|
2733
|
+
{
|
2734
|
+
old_locale = strdup(old_locale);
|
2735
|
+
}
|
2596
2736
|
setlocale(LC_ALL, "C");
|
2597
2737
|
|
2598
2738
|
int nr_w;
|
@@ -2632,6 +2772,30 @@ int save_model(const char *model_file_name, const struct model *model_)
|
|
2632
2772
|
else return 0;
|
2633
2773
|
}
|
2634
2774
|
|
2775
|
+
//
|
2776
|
+
// FSCANF helps to handle fscanf failures.
|
2777
|
+
// Its do-while block avoids the ambiguity when
|
2778
|
+
// if (...)
|
2779
|
+
// FSCANF();
|
2780
|
+
// is used
|
2781
|
+
//
|
2782
|
+
#define FSCANF(_stream, _format, _var)do\
|
2783
|
+
{\
|
2784
|
+
if (fscanf(_stream, _format, _var) != 1)\
|
2785
|
+
{\
|
2786
|
+
fprintf(stderr, "ERROR: fscanf failed to read the model\n");\
|
2787
|
+
EXIT_LOAD_MODEL()\
|
2788
|
+
}\
|
2789
|
+
}while(0)
|
2790
|
+
// EXIT_LOAD_MODEL should NOT end with a semicolon.
|
2791
|
+
#define EXIT_LOAD_MODEL()\
|
2792
|
+
{\
|
2793
|
+
setlocale(LC_ALL, old_locale);\
|
2794
|
+
free(model_->label);\
|
2795
|
+
free(model_);\
|
2796
|
+
free(old_locale);\
|
2797
|
+
return NULL;\
|
2798
|
+
}
|
2635
2799
|
struct model *load_model(const char *model_file_name)
|
2636
2800
|
{
|
2637
2801
|
FILE *fp = fopen(model_file_name,"r");
|
@@ -2647,16 +2811,20 @@ struct model *load_model(const char *model_file_name)
|
|
2647
2811
|
|
2648
2812
|
model_->label = NULL;
|
2649
2813
|
|
2650
|
-
char *old_locale =
|
2814
|
+
char *old_locale = setlocale(LC_ALL, NULL);
|
2815
|
+
if (old_locale)
|
2816
|
+
{
|
2817
|
+
old_locale = strdup(old_locale);
|
2818
|
+
}
|
2651
2819
|
setlocale(LC_ALL, "C");
|
2652
2820
|
|
2653
2821
|
char cmd[81];
|
2654
2822
|
while(1)
|
2655
2823
|
{
|
2656
|
-
|
2824
|
+
FSCANF(fp,"%80s",cmd);
|
2657
2825
|
if(strcmp(cmd,"solver_type")==0)
|
2658
2826
|
{
|
2659
|
-
|
2827
|
+
FSCANF(fp,"%80s",cmd);
|
2660
2828
|
int i;
|
2661
2829
|
for(i=0;solver_type_table[i];i++)
|
2662
2830
|
{
|
@@ -2669,27 +2837,22 @@ struct model *load_model(const char *model_file_name)
|
|
2669
2837
|
if(solver_type_table[i] == NULL)
|
2670
2838
|
{
|
2671
2839
|
fprintf(stderr,"unknown solver type.\n");
|
2672
|
-
|
2673
|
-
setlocale(LC_ALL, old_locale);
|
2674
|
-
free(model_->label);
|
2675
|
-
free(model_);
|
2676
|
-
free(old_locale);
|
2677
|
-
return NULL;
|
2840
|
+
EXIT_LOAD_MODEL()
|
2678
2841
|
}
|
2679
2842
|
}
|
2680
2843
|
else if(strcmp(cmd,"nr_class")==0)
|
2681
2844
|
{
|
2682
|
-
|
2845
|
+
FSCANF(fp,"%d",&nr_class);
|
2683
2846
|
model_->nr_class=nr_class;
|
2684
2847
|
}
|
2685
2848
|
else if(strcmp(cmd,"nr_feature")==0)
|
2686
2849
|
{
|
2687
|
-
|
2850
|
+
FSCANF(fp,"%d",&nr_feature);
|
2688
2851
|
model_->nr_feature=nr_feature;
|
2689
2852
|
}
|
2690
2853
|
else if(strcmp(cmd,"bias")==0)
|
2691
2854
|
{
|
2692
|
-
|
2855
|
+
FSCANF(fp,"%lf",&bias);
|
2693
2856
|
model_->bias=bias;
|
2694
2857
|
}
|
2695
2858
|
else if(strcmp(cmd,"w")==0)
|
@@ -2701,16 +2864,12 @@ struct model *load_model(const char *model_file_name)
|
|
2701
2864
|
int nr_class = model_->nr_class;
|
2702
2865
|
model_->label = Malloc(int,nr_class);
|
2703
2866
|
for(int i=0;i<nr_class;i++)
|
2704
|
-
|
2867
|
+
FSCANF(fp,"%d",&model_->label[i]);
|
2705
2868
|
}
|
2706
2869
|
else
|
2707
2870
|
{
|
2708
2871
|
fprintf(stderr,"unknown text in model file: [%s]\n",cmd);
|
2709
|
-
|
2710
|
-
free(model_->label);
|
2711
|
-
free(model_);
|
2712
|
-
free(old_locale);
|
2713
|
-
return NULL;
|
2872
|
+
EXIT_LOAD_MODEL()
|
2714
2873
|
}
|
2715
2874
|
}
|
2716
2875
|
|
@@ -2731,8 +2890,12 @@ struct model *load_model(const char *model_file_name)
|
|
2731
2890
|
{
|
2732
2891
|
int j;
|
2733
2892
|
for(j=0; j<nr_w; j++)
|
2734
|
-
|
2735
|
-
fscanf(fp, "\n")
|
2893
|
+
FSCANF(fp, "%lf ", &model_->w[i*nr_w+j]);
|
2894
|
+
if (fscanf(fp, "\n") !=0)
|
2895
|
+
{
|
2896
|
+
fprintf(stderr, "ERROR: fscanf failed to read the model\n");
|
2897
|
+
EXIT_LOAD_MODEL()
|
2898
|
+
}
|
2736
2899
|
}
|
2737
2900
|
|
2738
2901
|
setlocale(LC_ALL, old_locale);
|
@@ -2831,6 +2994,8 @@ void destroy_param(parameter* param)
|
|
2831
2994
|
free(param->weight_label);
|
2832
2995
|
if(param->weight != NULL)
|
2833
2996
|
free(param->weight);
|
2997
|
+
if(param->init_sol != NULL)
|
2998
|
+
free(param->init_sol);
|
2834
2999
|
}
|
2835
3000
|
|
2836
3001
|
const char *check_parameter(const problem *prob, const parameter *param)
|
@@ -2857,6 +3022,10 @@ const char *check_parameter(const problem *prob, const parameter *param)
|
|
2857
3022
|
&& param->solver_type != L2R_L1LOSS_SVR_DUAL)
|
2858
3023
|
return "unknown solver type";
|
2859
3024
|
|
3025
|
+
if(param->init_sol != NULL
|
3026
|
+
&& param->solver_type != L2R_LR && param->solver_type != L2R_L2LOSS_SVC)
|
3027
|
+
return "Initial-solution specification supported only for solver L2R_LR and L2R_L2LOSS_SVC";
|
3028
|
+
|
2860
3029
|
return NULL;
|
2861
3030
|
}
|
2862
3031
|
|