liblinear-ruby 0.0.4 → 0.0.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +6 -3
- data/ext/liblinear_wrap.cxx +99 -0
- data/ext/linear.cpp +61 -11
- data/ext/linear.h +3 -0
- data/lib/liblinear/model.rb +37 -6
- data/lib/liblinear/version.rb +1 -1
- data/{liblinear-1.94 → liblinear-1.95}/COPYRIGHT +1 -1
- data/{liblinear-1.94 → liblinear-1.95}/Makefile +1 -1
- data/{liblinear-1.94 → liblinear-1.95}/Makefile.win +1 -1
- data/{liblinear-1.94 → liblinear-1.95}/README +32 -1
- data/{liblinear-1.94 → liblinear-1.95}/blas/Makefile +0 -0
- data/{liblinear-1.94 → liblinear-1.95}/blas/blas.h +0 -0
- data/{liblinear-1.94 → liblinear-1.95}/blas/blasp.h +8 -0
- data/{liblinear-1.94 → liblinear-1.95}/blas/daxpy.c +8 -0
- data/{liblinear-1.94 → liblinear-1.95}/blas/ddot.c +8 -0
- data/{liblinear-1.94 → liblinear-1.95}/blas/dnrm2.c +8 -0
- data/{liblinear-1.94 → liblinear-1.95}/blas/dscal.c +8 -0
- data/{liblinear-1.94 → liblinear-1.95}/heart_scale +0 -0
- data/{liblinear-1.94 → liblinear-1.95}/linear.cpp +61 -11
- data/{liblinear-1.94 → liblinear-1.95}/linear.def +3 -0
- data/{liblinear-1.94 → liblinear-1.95}/linear.h +3 -0
- data/{liblinear-1.94 → liblinear-1.95}/matlab/Makefile +2 -11
- data/{liblinear-1.94 → liblinear-1.95}/matlab/README +0 -0
- data/{liblinear-1.94 → liblinear-1.95}/matlab/libsvmread.c +3 -4
- data/{liblinear-1.94 → liblinear-1.95}/matlab/libsvmwrite.c +6 -7
- data/{liblinear-1.94 → liblinear-1.95}/matlab/linear_model_matlab.c +1 -1
- data/{liblinear-1.94 → liblinear-1.95}/matlab/linear_model_matlab.h +0 -0
- data/{liblinear-1.94 → liblinear-1.95}/matlab/make.m +21 -21
- data/{liblinear-1.94 → liblinear-1.95}/matlab/predict.c +4 -6
- data/{liblinear-1.94 → liblinear-1.95}/matlab/train.c +27 -17
- data/{liblinear-1.94 → liblinear-1.95}/predict.c +1 -3
- data/{liblinear-1.94 → liblinear-1.95}/python/Makefile +0 -0
- data/{liblinear-1.94 → liblinear-1.95}/python/README +30 -0
- data/{liblinear-1.94 → liblinear-1.95}/python/liblinear.py +35 -8
- data/{liblinear-1.94 → liblinear-1.95}/python/liblinearutil.py +7 -1
- data/{liblinear-1.94 → liblinear-1.95}/train.c +1 -1
- data/{liblinear-1.94 → liblinear-1.95}/tron.cpp +0 -0
- data/{liblinear-1.94 → liblinear-1.95}/tron.h +0 -0
- data/{liblinear-1.94 → liblinear-1.95}/windows/liblinear.dll +0 -0
- data/liblinear-1.95/windows/libsvmread.mexw64 +0 -0
- data/liblinear-1.95/windows/libsvmwrite.mexw64 +0 -0
- data/{liblinear-1.94 → liblinear-1.95}/windows/predict.exe +0 -0
- data/liblinear-1.95/windows/predict.mexw64 +0 -0
- data/{liblinear-1.94 → liblinear-1.95}/windows/train.exe +0 -0
- data/liblinear-1.95/windows/train.mexw64 +0 -0
- data/spec/liblinear/model_spec.rb +50 -11
- metadata +43 -42
- data/liblinear-1.94/windows/libsvmread.mexw64 +0 -0
- data/liblinear-1.94/windows/libsvmwrite.mexw64 +0 -0
- data/liblinear-1.94/windows/predict.mexw64 +0 -0
- data/liblinear-1.94/windows/train.mexw64 +0 -0
File without changes
|
File without changes
|
@@ -3,6 +3,10 @@
|
|
3
3
|
|
4
4
|
/* Functions listed in alphabetical order */
|
5
5
|
|
6
|
+
#ifdef __cplusplus
|
7
|
+
extern "C" {
|
8
|
+
#endif
|
9
|
+
|
6
10
|
#ifdef F2C_COMPAT
|
7
11
|
|
8
12
|
void cdotc_(fcomplex *dotval, int *n, fcomplex *cx, int *incx,
|
@@ -428,3 +432,7 @@ int ztrsm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
|
428
432
|
|
429
433
|
int ztrsv_(char *uplo, char *trans, char *diag, int *n, dcomplex *a,
|
430
434
|
int *lda, dcomplex *x, int *incx);
|
435
|
+
|
436
|
+
#ifdef __cplusplus
|
437
|
+
}
|
438
|
+
#endif
|
@@ -1,5 +1,9 @@
|
|
1
1
|
#include "blas.h"
|
2
2
|
|
3
|
+
#ifdef __cplusplus
|
4
|
+
extern "C" {
|
5
|
+
#endif
|
6
|
+
|
3
7
|
int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy,
|
4
8
|
int *incy)
|
5
9
|
{
|
@@ -47,3 +51,7 @@ int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy,
|
|
47
51
|
|
48
52
|
return 0;
|
49
53
|
} /* daxpy_ */
|
54
|
+
|
55
|
+
#ifdef __cplusplus
|
56
|
+
}
|
57
|
+
#endif
|
@@ -1,5 +1,9 @@
|
|
1
1
|
#include "blas.h"
|
2
2
|
|
3
|
+
#ifdef __cplusplus
|
4
|
+
extern "C" {
|
5
|
+
#endif
|
6
|
+
|
3
7
|
double ddot_(int *n, double *sx, int *incx, double *sy, int *incy)
|
4
8
|
{
|
5
9
|
long int i, m, nn, iincx, iincy;
|
@@ -48,3 +52,7 @@ double ddot_(int *n, double *sx, int *incx, double *sy, int *incy)
|
|
48
52
|
|
49
53
|
return stemp;
|
50
54
|
} /* ddot_ */
|
55
|
+
|
56
|
+
#ifdef __cplusplus
|
57
|
+
}
|
58
|
+
#endif
|
@@ -1,6 +1,10 @@
|
|
1
1
|
#include <math.h> /* Needed for fabs() and sqrt() */
|
2
2
|
#include "blas.h"
|
3
3
|
|
4
|
+
#ifdef __cplusplus
|
5
|
+
extern "C" {
|
6
|
+
#endif
|
7
|
+
|
4
8
|
double dnrm2_(int *n, double *x, int *incx)
|
5
9
|
{
|
6
10
|
long int ix, nn, iincx;
|
@@ -60,3 +64,7 @@ double dnrm2_(int *n, double *x, int *incx)
|
|
60
64
|
return norm;
|
61
65
|
|
62
66
|
} /* dnrm2_ */
|
67
|
+
|
68
|
+
#ifdef __cplusplus
|
69
|
+
}
|
70
|
+
#endif
|
@@ -1,5 +1,9 @@
|
|
1
1
|
#include "blas.h"
|
2
2
|
|
3
|
+
#ifdef __cplusplus
|
4
|
+
extern "C" {
|
5
|
+
#endif
|
6
|
+
|
3
7
|
int dscal_(int *n, double *sa, double *sx, int *incx)
|
4
8
|
{
|
5
9
|
long int i, m, nincx, nn, iincx;
|
@@ -42,3 +46,7 @@ int dscal_(int *n, double *sa, double *sx, int *incx)
|
|
42
46
|
|
43
47
|
return 0;
|
44
48
|
} /* dscal_ */
|
49
|
+
|
50
|
+
#ifdef __cplusplus
|
51
|
+
}
|
52
|
+
#endif
|
File without changes
|
@@ -1010,7 +1010,7 @@ static void solve_l2r_l1l2_svr(
|
|
1010
1010
|
double d, G, H;
|
1011
1011
|
double Gmax_old = INF;
|
1012
1012
|
double Gmax_new, Gnorm1_new;
|
1013
|
-
double Gnorm1_init;
|
1013
|
+
double Gnorm1_init = -1.0; // Gnorm1_init is initialized at the first iteration
|
1014
1014
|
double *beta = new double[l];
|
1015
1015
|
double *QD = new double[l];
|
1016
1016
|
double *y = prob->y;
|
@@ -1409,7 +1409,7 @@ static void solve_l1r_l2_svc(
|
|
1409
1409
|
double d, G_loss, G, H;
|
1410
1410
|
double Gmax_old = INF;
|
1411
1411
|
double Gmax_new, Gnorm1_new;
|
1412
|
-
double Gnorm1_init;
|
1412
|
+
double Gnorm1_init = -1.0; // Gnorm1_init is initialized at the first iteration
|
1413
1413
|
double d_old, d_diff;
|
1414
1414
|
double loss_old, loss_new;
|
1415
1415
|
double appxcond, cond;
|
@@ -1699,7 +1699,7 @@ static void solve_l1r_lr(
|
|
1699
1699
|
double sigma = 0.01;
|
1700
1700
|
double w_norm, w_norm_new;
|
1701
1701
|
double z, G, H;
|
1702
|
-
double Gnorm1_init;
|
1702
|
+
double Gnorm1_init = -1.0; // Gnorm1_init is initialized at the first iteration
|
1703
1703
|
double Gmax_old = INF;
|
1704
1704
|
double Gmax_new, Gnorm1_new;
|
1705
1705
|
double QP_Gmax_old = INF;
|
@@ -2051,8 +2051,8 @@ static void transpose(const problem *prob, feature_node **x_space_ret, problem *
|
|
2051
2051
|
int i;
|
2052
2052
|
int l = prob->l;
|
2053
2053
|
int n = prob->n;
|
2054
|
-
|
2055
|
-
|
2054
|
+
size_t nnz = 0;
|
2055
|
+
size_t *col_ptr = new size_t [n+1];
|
2056
2056
|
feature_node *x_space;
|
2057
2057
|
prob_col->l = l;
|
2058
2058
|
prob_col->n = n;
|
@@ -2305,9 +2305,7 @@ model* train(const problem *prob, const parameter *param)
|
|
2305
2305
|
model_->param = *param;
|
2306
2306
|
model_->bias = prob->bias;
|
2307
2307
|
|
2308
|
-
if(
|
2309
|
-
param->solver_type == L2R_L1LOSS_SVR_DUAL ||
|
2310
|
-
param->solver_type == L2R_L2LOSS_SVR_DUAL)
|
2308
|
+
if(check_regression_model(model_))
|
2311
2309
|
{
|
2312
2310
|
model_->w = Malloc(double, w_size);
|
2313
2311
|
model_->nr_class = 2;
|
@@ -2512,9 +2510,7 @@ double predict_values(const struct model *model_, const struct feature_node *x,
|
|
2512
2510
|
|
2513
2511
|
if(nr_class==2)
|
2514
2512
|
{
|
2515
|
-
if(model_
|
2516
|
-
model_->param.solver_type == L2R_L1LOSS_SVR_DUAL ||
|
2517
|
-
model_->param.solver_type == L2R_L2LOSS_SVR_DUAL)
|
2513
|
+
if(check_regression_model(model_))
|
2518
2514
|
return dec_values[0];
|
2519
2515
|
else
|
2520
2516
|
return (dec_values[0]>0)?model_->label[0]:model_->label[1];
|
@@ -2764,6 +2760,53 @@ void get_labels(const model *model_, int* label)
|
|
2764
2760
|
label[i] = model_->label[i];
|
2765
2761
|
}
|
2766
2762
|
|
2763
|
+
// use inline here for better performance (around 20% faster than the non-inline one)
|
2764
|
+
static inline double get_w_value(const struct model *model_, int idx, int label_idx)
|
2765
|
+
{
|
2766
|
+
int nr_class = model_->nr_class;
|
2767
|
+
int solver_type = model_->param.solver_type;
|
2768
|
+
const double *w = model_->w;
|
2769
|
+
|
2770
|
+
if(idx < 0 || idx > model_->nr_feature)
|
2771
|
+
return 0;
|
2772
|
+
if(check_regression_model(model_))
|
2773
|
+
return w[idx];
|
2774
|
+
else
|
2775
|
+
{
|
2776
|
+
if(label_idx < 0 || label_idx >= nr_class)
|
2777
|
+
return 0;
|
2778
|
+
if(nr_class == 2 && solver_type != MCSVM_CS)
|
2779
|
+
{
|
2780
|
+
if(label_idx == 0)
|
2781
|
+
return w[idx];
|
2782
|
+
else
|
2783
|
+
return -w[idx];
|
2784
|
+
}
|
2785
|
+
else
|
2786
|
+
return w[idx*nr_class+label_idx];
|
2787
|
+
}
|
2788
|
+
}
|
2789
|
+
|
2790
|
+
// feat_idx: starting from 1 to nr_feature
|
2791
|
+
// label_idx: starting from 0 to nr_class-1 for classification models;
|
2792
|
+
// for regression models, label_idx is ignored.
|
2793
|
+
double get_decfun_coef(const struct model *model_, int feat_idx, int label_idx)
|
2794
|
+
{
|
2795
|
+
if(feat_idx > model_->nr_feature)
|
2796
|
+
return 0;
|
2797
|
+
return get_w_value(model_, feat_idx-1, label_idx);
|
2798
|
+
}
|
2799
|
+
|
2800
|
+
double get_decfun_bias(const struct model *model_, int label_idx)
|
2801
|
+
{
|
2802
|
+
int bias_idx = model_->nr_feature;
|
2803
|
+
double bias = model_->bias;
|
2804
|
+
if(bias <= 0)
|
2805
|
+
return 0;
|
2806
|
+
else
|
2807
|
+
return bias*get_w_value(model_, bias_idx, label_idx);
|
2808
|
+
}
|
2809
|
+
|
2767
2810
|
void free_model_content(struct model *model_ptr)
|
2768
2811
|
{
|
2769
2812
|
if(model_ptr->w != NULL)
|
@@ -2824,6 +2867,13 @@ int check_probability_model(const struct model *model_)
|
|
2824
2867
|
model_->param.solver_type==L1R_LR);
|
2825
2868
|
}
|
2826
2869
|
|
2870
|
+
int check_regression_model(const struct model *model_)
|
2871
|
+
{
|
2872
|
+
return (model_->param.solver_type==L2R_L2LOSS_SVR ||
|
2873
|
+
model_->param.solver_type==L2R_L1LOSS_SVR_DUAL ||
|
2874
|
+
model_->param.solver_type==L2R_L2LOSS_SVR_DUAL);
|
2875
|
+
}
|
2876
|
+
|
2827
2877
|
void set_print_string_function(void (*print_func)(const char*))
|
2828
2878
|
{
|
2829
2879
|
if (print_func == NULL)
|
@@ -57,6 +57,8 @@ struct model *load_model(const char *model_file_name);
|
|
57
57
|
int get_nr_feature(const struct model *model_);
|
58
58
|
int get_nr_class(const struct model *model_);
|
59
59
|
void get_labels(const struct model *model_, int* label);
|
60
|
+
double get_decfun_coef(const struct model *model_, int feat_idx, int label_idx);
|
61
|
+
double get_decfun_bias(const struct model *model_, int label_idx);
|
60
62
|
|
61
63
|
void free_model_content(struct model *model_ptr);
|
62
64
|
void free_and_destroy_model(struct model **model_ptr_ptr);
|
@@ -64,6 +66,7 @@ void destroy_param(struct parameter *param);
|
|
64
66
|
|
65
67
|
const char *check_parameter(const struct problem *prob, const struct parameter *param);
|
66
68
|
int check_probability_model(const struct model *model);
|
69
|
+
int check_regression_model(const struct model *model);
|
67
70
|
void set_print_string_function(void (*print_func) (const char*));
|
68
71
|
|
69
72
|
#ifdef __cplusplus
|
@@ -7,26 +7,17 @@ CC ?= gcc
|
|
7
7
|
CFLAGS = -Wall -Wconversion -O3 -fPIC -I$(MATLABDIR)/extern/include -I..
|
8
8
|
|
9
9
|
MEX = $(MATLABDIR)/bin/mex
|
10
|
-
MEX_OPTION = CC
|
10
|
+
MEX_OPTION = CC="$(CXX)" CXX="$(CXX)" CFLAGS="$(CFLAGS)" CXXFLAGS="$(CFLAGS)"
|
11
11
|
# comment the following line if you use MATLAB on a 32-bit computer
|
12
12
|
MEX_OPTION += -largeArrayDims
|
13
13
|
MEX_EXT = $(shell $(MATLABDIR)/bin/mexext)
|
14
14
|
|
15
|
-
OCTAVEDIR ?= /usr/include/octave
|
16
|
-
OCTAVE_MEX = env CC=$(CXX) mkoctfile
|
17
|
-
OCTAVE_MEX_OPTION = --mex
|
18
|
-
OCTAVE_MEX_EXT = mex
|
19
|
-
OCTAVE_CFLAGS = -Wall -O3 -fPIC -I$(OCTAVEDIR) -I..
|
20
|
-
|
21
15
|
all: matlab
|
22
16
|
|
23
17
|
matlab: binary
|
24
18
|
|
25
19
|
octave:
|
26
|
-
@
|
27
|
-
MEX_EXT="$(OCTAVE_MEX_EXT)" CFLAGS="$(OCTAVE_CFLAGS)" \
|
28
|
-
binary
|
29
|
-
|
20
|
+
@echo "please type make under Octave"
|
30
21
|
binary: train.$(MEX_EXT) predict.$(MEX_EXT) libsvmread.$(MEX_EXT) libsvmwrite.$(MEX_EXT)
|
31
22
|
|
32
23
|
train.$(MEX_EXT): train.c ../linear.h ../tron.o ../linear.o linear_model_matlab.o ../blas/blas.a
|
File without changes
|
@@ -56,14 +56,13 @@ static char* readline(FILE *input)
|
|
56
56
|
// read in a problem (in libsvm format)
|
57
57
|
void read_problem(const char *filename, int nlhs, mxArray *plhs[])
|
58
58
|
{
|
59
|
-
int max_index, min_index, inst_max_index
|
60
|
-
|
59
|
+
int max_index, min_index, inst_max_index;
|
60
|
+
size_t elements, k, i, l=0;
|
61
61
|
FILE *fp = fopen(filename,"r");
|
62
|
-
int l = 0;
|
63
62
|
char *endptr;
|
64
63
|
mwIndex *ir, *jc;
|
65
64
|
double *labels, *samples;
|
66
|
-
|
65
|
+
|
67
66
|
if(fp == NULL)
|
68
67
|
{
|
69
68
|
mexPrintf("can't open input file %s\n",filename);
|
@@ -26,9 +26,8 @@ static void fake_answer(int nlhs, mxArray *plhs[])
|
|
26
26
|
void libsvmwrite(const char *filename, const mxArray *label_vec, const mxArray *instance_mat)
|
27
27
|
{
|
28
28
|
FILE *fp = fopen(filename,"w");
|
29
|
-
|
30
|
-
|
31
|
-
int label_vector_row_num;
|
29
|
+
mwIndex *ir, *jc, k, low, high;
|
30
|
+
size_t i, l, label_vector_row_num;
|
32
31
|
double *samples, *labels;
|
33
32
|
mxArray *instance_mat_col; // instance sparse matrix in column format
|
34
33
|
|
@@ -52,8 +51,8 @@ void libsvmwrite(const char *filename, const mxArray *label_vec, const mxArray *
|
|
52
51
|
}
|
53
52
|
|
54
53
|
// the number of instance
|
55
|
-
l =
|
56
|
-
label_vector_row_num =
|
54
|
+
l = mxGetN(instance_mat_col);
|
55
|
+
label_vector_row_num = mxGetM(label_vec);
|
57
56
|
|
58
57
|
if(label_vector_row_num!=l)
|
59
58
|
{
|
@@ -71,9 +70,9 @@ void libsvmwrite(const char *filename, const mxArray *label_vec, const mxArray *
|
|
71
70
|
{
|
72
71
|
fprintf(fp,"%g", labels[i]);
|
73
72
|
|
74
|
-
low =
|
73
|
+
low = jc[i], high = jc[i+1];
|
75
74
|
for(k=low;k<high;k++)
|
76
|
-
fprintf(fp," %
|
75
|
+
fprintf(fp," %zu:%g", (size_t)ir[k]+1, samples[k]);
|
77
76
|
|
78
77
|
fprintf(fp,"\n");
|
79
78
|
}
|
File without changes
|
@@ -1,21 +1,21 @@
|
|
1
|
-
% This make.m is for MATLAB and OCTAVE under Windows, Mac, and Unix
|
2
|
-
|
3
|
-
try
|
4
|
-
Type = ver;
|
5
|
-
% This part is for OCTAVE
|
6
|
-
if(strcmp(Type(1).Name, 'Octave') == 1)
|
7
|
-
mex libsvmread.c
|
8
|
-
mex libsvmwrite.c
|
9
|
-
mex train.c linear_model_matlab.c ../linear.cpp ../tron.cpp ../blas
|
10
|
-
mex predict.c linear_model_matlab.c ../linear.cpp ../tron.cpp ../blas
|
11
|
-
% This part is for MATLAB
|
12
|
-
% Add -largeArrayDims on 64-bit machines of MATLAB
|
13
|
-
else
|
14
|
-
mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmread.c
|
15
|
-
mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmwrite.c
|
16
|
-
mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims train.c linear_model_matlab.c ../linear.cpp ../tron.cpp
|
17
|
-
mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims predict.c linear_model_matlab.c ../linear.cpp ../tron.cpp
|
18
|
-
end
|
19
|
-
catch
|
20
|
-
fprintf('If make.m fails, please check README about detailed instructions.\n');
|
21
|
-
end
|
1
|
+
% This make.m is for MATLAB and OCTAVE under Windows, Mac, and Unix
|
2
|
+
|
3
|
+
try
|
4
|
+
Type = ver;
|
5
|
+
% This part is for OCTAVE
|
6
|
+
if(strcmp(Type(1).Name, 'Octave') == 1)
|
7
|
+
mex libsvmread.c
|
8
|
+
mex libsvmwrite.c
|
9
|
+
mex train.c linear_model_matlab.c ../linear.cpp ../tron.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
|
10
|
+
mex predict.c linear_model_matlab.c ../linear.cpp ../tron.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
|
11
|
+
% This part is for MATLAB
|
12
|
+
% Add -largeArrayDims on 64-bit machines of MATLAB
|
13
|
+
else
|
14
|
+
mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmread.c
|
15
|
+
mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmwrite.c
|
16
|
+
mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims train.c linear_model_matlab.c ../linear.cpp ../tron.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
|
17
|
+
mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims predict.c linear_model_matlab.c ../linear.cpp ../tron.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
|
18
|
+
end
|
19
|
+
catch
|
20
|
+
fprintf('If make.m fails, please check README about detailed instructions.\n');
|
21
|
+
end
|
@@ -23,8 +23,8 @@ int col_format_flag;
|
|
23
23
|
|
24
24
|
void read_sparse_instance(const mxArray *prhs, int index, struct feature_node *x, int feature_number, double bias)
|
25
25
|
{
|
26
|
-
int
|
27
|
-
mwIndex *ir, *jc;
|
26
|
+
int j;
|
27
|
+
mwIndex *ir, *jc, low, high, i;
|
28
28
|
double *samples;
|
29
29
|
|
30
30
|
ir = mxGetIr(prhs);
|
@@ -33,7 +33,7 @@ void read_sparse_instance(const mxArray *prhs, int index, struct feature_node *x
|
|
33
33
|
|
34
34
|
// each column is one instance
|
35
35
|
j = 0;
|
36
|
-
low =
|
36
|
+
low = jc[index], high = jc[index+1];
|
37
37
|
for(i=low; i<high && (int) (ir[i])<feature_number; i++)
|
38
38
|
{
|
39
39
|
x[j].index = (int) ir[i]+1;
|
@@ -176,9 +176,7 @@ void do_predict(int nlhs, mxArray *plhs[], const mxArray *prhs[], struct model *
|
|
176
176
|
++total;
|
177
177
|
}
|
178
178
|
|
179
|
-
if(model_
|
180
|
-
model_->param.solver_type==L2R_L1LOSS_SVR_DUAL ||
|
181
|
-
model_->param.solver_type==L2R_L2LOSS_SVR_DUAL)
|
179
|
+
if(check_regression_model(model_))
|
182
180
|
{
|
183
181
|
info("Mean squared error = %g (regression)\n",error/total);
|
184
182
|
info("Squared correlation coefficient = %g (regression)\n",
|
@@ -252,9 +252,10 @@ static void fake_answer(int nlhs, mxArray *plhs[])
|
|
252
252
|
|
253
253
|
int read_problem_sparse(const mxArray *label_vec, const mxArray *instance_mat)
|
254
254
|
{
|
255
|
-
|
256
|
-
|
257
|
-
|
255
|
+
mwIndex *ir, *jc, low, high, k;
|
256
|
+
// using size_t due to the output type of matlab functions
|
257
|
+
size_t i, j, l, elements, max_index, label_vector_row_num;
|
258
|
+
mwSize num_samples;
|
258
259
|
double *samples, *labels;
|
259
260
|
mxArray *instance_mat_col; // instance sparse matrix in column format
|
260
261
|
|
@@ -279,10 +280,11 @@ int read_problem_sparse(const mxArray *label_vec, const mxArray *instance_mat)
|
|
279
280
|
}
|
280
281
|
|
281
282
|
// the number of instance
|
282
|
-
|
283
|
-
label_vector_row_num =
|
283
|
+
l = mxGetN(instance_mat_col);
|
284
|
+
label_vector_row_num = mxGetM(label_vec);
|
285
|
+
prob.l = (int) l;
|
284
286
|
|
285
|
-
if(label_vector_row_num!=
|
287
|
+
if(label_vector_row_num!=l)
|
286
288
|
{
|
287
289
|
mexPrintf("Length of label vector does not match # of instances.\n");
|
288
290
|
return -1;
|
@@ -294,23 +296,23 @@ int read_problem_sparse(const mxArray *label_vec, const mxArray *instance_mat)
|
|
294
296
|
ir = mxGetIr(instance_mat_col);
|
295
297
|
jc = mxGetJc(instance_mat_col);
|
296
298
|
|
297
|
-
num_samples =
|
299
|
+
num_samples = mxGetNzmax(instance_mat_col);
|
298
300
|
|
299
|
-
elements = num_samples +
|
300
|
-
max_index =
|
301
|
+
elements = num_samples + l*2;
|
302
|
+
max_index = mxGetM(instance_mat_col);
|
301
303
|
|
302
|
-
prob.y = Malloc(double,
|
303
|
-
prob.x = Malloc(struct feature_node*,
|
304
|
+
prob.y = Malloc(double, l);
|
305
|
+
prob.x = Malloc(struct feature_node*, l);
|
304
306
|
x_space = Malloc(struct feature_node, elements);
|
305
307
|
|
306
308
|
prob.bias=bias;
|
307
309
|
|
308
310
|
j = 0;
|
309
|
-
for(i=0;i<
|
311
|
+
for(i=0;i<l;i++)
|
310
312
|
{
|
311
313
|
prob.x[i] = &x_space[j];
|
312
314
|
prob.y[i] = labels[i];
|
313
|
-
low =
|
315
|
+
low = jc[i], high = jc[i+1];
|
314
316
|
for(k=low;k<high;k++)
|
315
317
|
{
|
316
318
|
x_space[j].index = (int) ir[k]+1;
|
@@ -319,7 +321,7 @@ int read_problem_sparse(const mxArray *label_vec, const mxArray *instance_mat)
|
|
319
321
|
}
|
320
322
|
if(prob.bias>=0)
|
321
323
|
{
|
322
|
-
x_space[j].index = max_index+1;
|
324
|
+
x_space[j].index = (int) max_index+1;
|
323
325
|
x_space[j].value = prob.bias;
|
324
326
|
j++;
|
325
327
|
}
|
@@ -327,9 +329,9 @@ int read_problem_sparse(const mxArray *label_vec, const mxArray *instance_mat)
|
|
327
329
|
}
|
328
330
|
|
329
331
|
if(prob.bias>=0)
|
330
|
-
prob.n = max_index+1;
|
332
|
+
prob.n = (int) max_index+1;
|
331
333
|
else
|
332
|
-
prob.n = max_index;
|
334
|
+
prob.n = (int) max_index;
|
333
335
|
|
334
336
|
return 0;
|
335
337
|
}
|
@@ -356,12 +358,20 @@ void mexFunction( int nlhs, mxArray *plhs[],
|
|
356
358
|
{
|
357
359
|
int err=0;
|
358
360
|
|
359
|
-
if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1]))
|
361
|
+
if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1]))
|
362
|
+
{
|
360
363
|
mexPrintf("Error: label vector and instance matrix must be double\n");
|
361
364
|
fake_answer(nlhs, plhs);
|
362
365
|
return;
|
363
366
|
}
|
364
367
|
|
368
|
+
if(mxIsSparse(prhs[0]))
|
369
|
+
{
|
370
|
+
mexPrintf("Error: label vector should not be in sparse format");
|
371
|
+
fake_answer(nlhs, plhs);
|
372
|
+
return;
|
373
|
+
}
|
374
|
+
|
365
375
|
if(parse_command_line(nrhs, prhs, NULL))
|
366
376
|
{
|
367
377
|
exit_with_help();
|