liblinear-ruby 1.0.1 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +1 -1
- data/ext/blasp.h +8 -8
- data/ext/daxpy.c +3 -3
- data/ext/ddot.c +3 -3
- data/ext/dnrm2.c +7 -7
- data/ext/dscal.c +4 -4
- data/ext/liblinear_wrap.cxx +382 -382
- data/ext/linear.cpp +44 -55
- data/ext/linear.h +5 -1
- data/ext/tron.cpp +13 -5
- data/ext/tron.h +1 -1
- data/lib/liblinear.rb +2 -0
- data/lib/liblinear/version.rb +1 -1
- metadata +2 -41
- data/liblinear-2.1/COPYRIGHT +0 -31
- data/liblinear-2.1/Makefile +0 -37
- data/liblinear-2.1/Makefile.win +0 -24
- data/liblinear-2.1/README +0 -600
- data/liblinear-2.1/blas/Makefile +0 -22
- data/liblinear-2.1/blas/blas.h +0 -25
- data/liblinear-2.1/blas/blasp.h +0 -438
- data/liblinear-2.1/blas/daxpy.c +0 -57
- data/liblinear-2.1/blas/ddot.c +0 -58
- data/liblinear-2.1/blas/dnrm2.c +0 -70
- data/liblinear-2.1/blas/dscal.c +0 -52
- data/liblinear-2.1/heart_scale +0 -270
- data/liblinear-2.1/linear.cpp +0 -3053
- data/liblinear-2.1/linear.def +0 -22
- data/liblinear-2.1/linear.h +0 -79
- data/liblinear-2.1/matlab/Makefile +0 -49
- data/liblinear-2.1/matlab/README +0 -208
- data/liblinear-2.1/matlab/libsvmread.c +0 -212
- data/liblinear-2.1/matlab/libsvmwrite.c +0 -119
- data/liblinear-2.1/matlab/linear_model_matlab.c +0 -176
- data/liblinear-2.1/matlab/linear_model_matlab.h +0 -2
- data/liblinear-2.1/matlab/make.m +0 -22
- data/liblinear-2.1/matlab/predict.c +0 -341
- data/liblinear-2.1/matlab/train.c +0 -492
- data/liblinear-2.1/predict.c +0 -243
- data/liblinear-2.1/python/Makefile +0 -4
- data/liblinear-2.1/python/README +0 -380
- data/liblinear-2.1/python/liblinear.py +0 -323
- data/liblinear-2.1/python/liblinearutil.py +0 -270
- data/liblinear-2.1/train.c +0 -449
- data/liblinear-2.1/tron.cpp +0 -241
- data/liblinear-2.1/tron.h +0 -35
- data/liblinear-2.1/windows/liblinear.dll +0 -0
- data/liblinear-2.1/windows/libsvmread.mexw64 +0 -0
- data/liblinear-2.1/windows/libsvmwrite.mexw64 +0 -0
- data/liblinear-2.1/windows/predict.exe +0 -0
- data/liblinear-2.1/windows/predict.mexw64 +0 -0
- data/liblinear-2.1/windows/train.exe +0 -0
- data/liblinear-2.1/windows/train.mexw64 +0 -0
data/liblinear-2.1/linear.def
DELETED
@@ -1,22 +0,0 @@
|
|
1
|
-
LIBRARY liblinear
|
2
|
-
EXPORTS
|
3
|
-
train @1
|
4
|
-
cross_validation @2
|
5
|
-
save_model @3
|
6
|
-
load_model @4
|
7
|
-
get_nr_feature @5
|
8
|
-
get_nr_class @6
|
9
|
-
get_labels @7
|
10
|
-
predict_values @8
|
11
|
-
predict @9
|
12
|
-
predict_probability @10
|
13
|
-
free_and_destroy_model @11
|
14
|
-
free_model_content @12
|
15
|
-
destroy_param @13
|
16
|
-
check_parameter @14
|
17
|
-
check_probability_model @15
|
18
|
-
set_print_string_function @16
|
19
|
-
get_decfun_coef @17
|
20
|
-
get_decfun_bias @18
|
21
|
-
check_regression_model @19
|
22
|
-
find_parameter_C @20
|
data/liblinear-2.1/linear.h
DELETED
@@ -1,79 +0,0 @@
|
|
1
|
-
#ifndef _LIBLINEAR_H
|
2
|
-
#define _LIBLINEAR_H
|
3
|
-
|
4
|
-
#ifdef __cplusplus
|
5
|
-
extern "C" {
|
6
|
-
#endif
|
7
|
-
|
8
|
-
struct feature_node
|
9
|
-
{
|
10
|
-
int index;
|
11
|
-
double value;
|
12
|
-
};
|
13
|
-
|
14
|
-
struct problem
|
15
|
-
{
|
16
|
-
int l, n;
|
17
|
-
double *y;
|
18
|
-
struct feature_node **x;
|
19
|
-
double bias; /* < 0 if no bias term */
|
20
|
-
};
|
21
|
-
|
22
|
-
enum { L2R_LR, L2R_L2LOSS_SVC_DUAL, L2R_L2LOSS_SVC, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L1R_L2LOSS_SVC, L1R_LR, L2R_LR_DUAL, L2R_L2LOSS_SVR = 11, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL }; /* solver_type */
|
23
|
-
|
24
|
-
struct parameter
|
25
|
-
{
|
26
|
-
int solver_type;
|
27
|
-
|
28
|
-
/* these are for training only */
|
29
|
-
double eps; /* stopping criteria */
|
30
|
-
double C;
|
31
|
-
int nr_weight;
|
32
|
-
int *weight_label;
|
33
|
-
double* weight;
|
34
|
-
double p;
|
35
|
-
double *init_sol;
|
36
|
-
};
|
37
|
-
|
38
|
-
struct model
|
39
|
-
{
|
40
|
-
struct parameter param;
|
41
|
-
int nr_class; /* number of classes */
|
42
|
-
int nr_feature;
|
43
|
-
double *w;
|
44
|
-
int *label; /* label of each class */
|
45
|
-
double bias;
|
46
|
-
};
|
47
|
-
|
48
|
-
struct model* train(const struct problem *prob, const struct parameter *param);
|
49
|
-
void cross_validation(const struct problem *prob, const struct parameter *param, int nr_fold, double *target);
|
50
|
-
void find_parameter_C(const struct problem *prob, const struct parameter *param, int nr_fold, double start_C, double max_C, double *best_C, double *best_rate);
|
51
|
-
|
52
|
-
double predict_values(const struct model *model_, const struct feature_node *x, double* dec_values);
|
53
|
-
double predict(const struct model *model_, const struct feature_node *x);
|
54
|
-
double predict_probability(const struct model *model_, const struct feature_node *x, double* prob_estimates);
|
55
|
-
|
56
|
-
int save_model(const char *model_file_name, const struct model *model_);
|
57
|
-
struct model *load_model(const char *model_file_name);
|
58
|
-
|
59
|
-
int get_nr_feature(const struct model *model_);
|
60
|
-
int get_nr_class(const struct model *model_);
|
61
|
-
void get_labels(const struct model *model_, int* label);
|
62
|
-
double get_decfun_coef(const struct model *model_, int feat_idx, int label_idx);
|
63
|
-
double get_decfun_bias(const struct model *model_, int label_idx);
|
64
|
-
|
65
|
-
void free_model_content(struct model *model_ptr);
|
66
|
-
void free_and_destroy_model(struct model **model_ptr_ptr);
|
67
|
-
void destroy_param(struct parameter *param);
|
68
|
-
|
69
|
-
const char *check_parameter(const struct problem *prob, const struct parameter *param);
|
70
|
-
int check_probability_model(const struct model *model);
|
71
|
-
int check_regression_model(const struct model *model);
|
72
|
-
void set_print_string_function(void (*print_func) (const char*));
|
73
|
-
|
74
|
-
#ifdef __cplusplus
|
75
|
-
}
|
76
|
-
#endif
|
77
|
-
|
78
|
-
#endif /* _LIBLINEAR_H */
|
79
|
-
|
@@ -1,49 +0,0 @@
|
|
1
|
-
# This Makefile is used under Linux
|
2
|
-
|
3
|
-
MATLABDIR ?= /usr/local/matlab
|
4
|
-
CXX ?= g++
|
5
|
-
#CXX = g++-3.3
|
6
|
-
CC ?= gcc
|
7
|
-
CFLAGS = -Wall -Wconversion -O3 -fPIC -I$(MATLABDIR)/extern/include -I..
|
8
|
-
|
9
|
-
MEX = $(MATLABDIR)/bin/mex
|
10
|
-
MEX_OPTION = CC="$(CXX)" CXX="$(CXX)" CFLAGS="$(CFLAGS)" CXXFLAGS="$(CFLAGS)"
|
11
|
-
# comment the following line if you use MATLAB on a 32-bit computer
|
12
|
-
MEX_OPTION += -largeArrayDims
|
13
|
-
MEX_EXT = $(shell $(MATLABDIR)/bin/mexext)
|
14
|
-
|
15
|
-
all: matlab
|
16
|
-
|
17
|
-
matlab: binary
|
18
|
-
|
19
|
-
octave:
|
20
|
-
@echo "please type make under Octave"
|
21
|
-
binary: train.$(MEX_EXT) predict.$(MEX_EXT) libsvmread.$(MEX_EXT) libsvmwrite.$(MEX_EXT)
|
22
|
-
|
23
|
-
train.$(MEX_EXT): train.c ../linear.h ../tron.o ../linear.o linear_model_matlab.o ../blas/blas.a
|
24
|
-
$(MEX) $(MEX_OPTION) train.c ../tron.o ../linear.o linear_model_matlab.o ../blas/blas.a
|
25
|
-
|
26
|
-
predict.$(MEX_EXT): predict.c ../linear.h ../tron.o ../linear.o linear_model_matlab.o ../blas/blas.a
|
27
|
-
$(MEX) $(MEX_OPTION) predict.c ../tron.o ../linear.o linear_model_matlab.o ../blas/blas.a
|
28
|
-
|
29
|
-
libsvmread.$(MEX_EXT): libsvmread.c
|
30
|
-
$(MEX) $(MEX_OPTION) libsvmread.c
|
31
|
-
|
32
|
-
libsvmwrite.$(MEX_EXT): libsvmwrite.c
|
33
|
-
$(MEX) $(MEX_OPTION) libsvmwrite.c
|
34
|
-
|
35
|
-
linear_model_matlab.o: linear_model_matlab.c ../linear.h
|
36
|
-
$(CXX) $(CFLAGS) -c linear_model_matlab.c
|
37
|
-
|
38
|
-
../linear.o: ../linear.cpp ../linear.h
|
39
|
-
make -C .. linear.o
|
40
|
-
|
41
|
-
../tron.o: ../tron.cpp ../tron.h
|
42
|
-
make -C .. tron.o
|
43
|
-
|
44
|
-
../blas/blas.a: ../blas/*.c ../blas/*.h
|
45
|
-
make -C ../blas OPTFLAGS='$(CFLAGS)' CC='$(CC)';
|
46
|
-
|
47
|
-
clean:
|
48
|
-
make -C ../blas clean
|
49
|
-
rm -f *~ *.o *.mex* *.obj ../linear.o ../tron.o
|
data/liblinear-2.1/matlab/README
DELETED
@@ -1,208 +0,0 @@
|
|
1
|
-
--------------------------------------------
|
2
|
-
--- MATLAB/OCTAVE interface of LIBLINEAR ---
|
3
|
-
--------------------------------------------
|
4
|
-
|
5
|
-
Table of Contents
|
6
|
-
=================
|
7
|
-
|
8
|
-
- Introduction
|
9
|
-
- Installation
|
10
|
-
- Usage
|
11
|
-
- Returned Model Structure
|
12
|
-
- Other Utilities
|
13
|
-
- Examples
|
14
|
-
- Additional Information
|
15
|
-
|
16
|
-
|
17
|
-
Introduction
|
18
|
-
============
|
19
|
-
|
20
|
-
This tool provides a simple interface to LIBLINEAR, a library for
|
21
|
-
large-scale regularized linear classification and regression
|
22
|
-
(http://www.csie.ntu.edu.tw/~cjlin/liblinear). It is very easy to use
|
23
|
-
as the usage and the way of specifying parameters are the same as that
|
24
|
-
of LIBLINEAR.
|
25
|
-
|
26
|
-
Installation
|
27
|
-
============
|
28
|
-
|
29
|
-
On Windows systems, pre-built binary files are already in the
|
30
|
-
directory '..\windows', so no need to conduct installation. Now we
|
31
|
-
provide binary files only for 64bit MATLAB on Windows. If you would
|
32
|
-
like to re-build the package, please rely on the following steps.
|
33
|
-
|
34
|
-
We recommend using make.m on both MATLAB and OCTAVE. Just type 'make'
|
35
|
-
to build 'libsvmread.mex', 'libsvmwrite.mex', 'train.mex', and
|
36
|
-
'predict.mex'.
|
37
|
-
|
38
|
-
On MATLAB or Octave:
|
39
|
-
|
40
|
-
>> make
|
41
|
-
|
42
|
-
If make.m does not work on MATLAB (especially for Windows), try 'mex
|
43
|
-
-setup' to choose a suitable compiler for mex. Make sure your compiler
|
44
|
-
is accessible and workable. Then type 'make' to start the
|
45
|
-
installation.
|
46
|
-
|
47
|
-
Example:
|
48
|
-
|
49
|
-
matlab>> mex -setup
|
50
|
-
(ps: MATLAB will show the following messages to setup default compiler.)
|
51
|
-
Please choose your compiler for building external interface (MEX) files:
|
52
|
-
Would you like mex to locate installed compilers [y]/n? y
|
53
|
-
Select a compiler:
|
54
|
-
[1] Microsoft Visual C/C++ version 7.1 in C:\Program Files\Microsoft Visual Studio
|
55
|
-
[0] None
|
56
|
-
Compiler: 1
|
57
|
-
Please verify your choices:
|
58
|
-
Compiler: Microsoft Visual C/C++ 7.1
|
59
|
-
Location: C:\Program Files\Microsoft Visual Studio
|
60
|
-
Are these correct?([y]/n): y
|
61
|
-
|
62
|
-
matlab>> make
|
63
|
-
|
64
|
-
On Unix systems, if neither make.m nor 'mex -setup' works, please use
|
65
|
-
Makefile and type 'make' in a command window. Note that we assume
|
66
|
-
your MATLAB is installed in '/usr/local/matlab'. If not, please change
|
67
|
-
MATLABDIR in Makefile.
|
68
|
-
|
69
|
-
Example:
|
70
|
-
linux> make
|
71
|
-
|
72
|
-
To use octave, type 'make octave':
|
73
|
-
|
74
|
-
Example:
|
75
|
-
linux> make octave
|
76
|
-
|
77
|
-
For a list of supported/compatible compilers for MATLAB, please check
|
78
|
-
the following page:
|
79
|
-
|
80
|
-
http://www.mathworks.com/support/compilers/current_release/
|
81
|
-
|
82
|
-
Usage
|
83
|
-
=====
|
84
|
-
|
85
|
-
matlab> model = train(training_label_vector, training_instance_matrix [,'liblinear_options', 'col']);
|
86
|
-
|
87
|
-
-training_label_vector:
|
88
|
-
An m by 1 vector of training labels. (type must be double)
|
89
|
-
-training_instance_matrix:
|
90
|
-
An m by n matrix of m training instances with n features.
|
91
|
-
It must be a sparse matrix. (type must be double)
|
92
|
-
-liblinear_options:
|
93
|
-
A string of training options in the same format as that of LIBLINEAR.
|
94
|
-
-col:
|
95
|
-
if 'col' is set, each column of training_instance_matrix is a data instance. Otherwise each row is a data instance.
|
96
|
-
|
97
|
-
matlab> [predicted_label, accuracy, decision_values/prob_estimates] = predict(testing_label_vector, testing_instance_matrix, model [, 'liblinear_options', 'col']);
|
98
|
-
matlab> [predicted_label] = predict(testing_label_vector, testing_instance_matrix, model [, 'liblinear_options', 'col']);
|
99
|
-
|
100
|
-
-testing_label_vector:
|
101
|
-
An m by 1 vector of prediction labels. If labels of test
|
102
|
-
data are unknown, simply use any random values. (type must be double)
|
103
|
-
-testing_instance_matrix:
|
104
|
-
An m by n matrix of m testing instances with n features.
|
105
|
-
It must be a sparse matrix. (type must be double)
|
106
|
-
-model:
|
107
|
-
The output of train.
|
108
|
-
-liblinear_options:
|
109
|
-
A string of testing options in the same format as that of LIBLINEAR.
|
110
|
-
-col:
|
111
|
-
if 'col' is set, each column of testing_instance_matrix is a data instance. Otherwise each row is a data instance.
|
112
|
-
|
113
|
-
Returned Model Structure
|
114
|
-
========================
|
115
|
-
|
116
|
-
The 'train' function returns a model which can be used for future
|
117
|
-
prediction. It is a structure and is organized as [Parameters, nr_class,
|
118
|
-
nr_feature, bias, Label, w]:
|
119
|
-
|
120
|
-
-Parameters: Parameters (now only solver type is provided)
|
121
|
-
-nr_class: number of classes; = 2 for regression
|
122
|
-
-nr_feature: number of features in training data (without including the bias term)
|
123
|
-
-bias: If >= 0, we assume one additional feature is added to the end
|
124
|
-
of each data instance.
|
125
|
-
-Label: label of each class; empty for regression
|
126
|
-
-w: a nr_w-by-n matrix for the weights, where n is nr_feature
|
127
|
-
or nr_feature+1 depending on the existence of the bias term.
|
128
|
-
nr_w is 1 if nr_class=2 and -s is not 4 (i.e., not
|
129
|
-
multi-class svm by Crammer and Singer). It is
|
130
|
-
nr_class otherwise.
|
131
|
-
|
132
|
-
If the '-v' option is specified, cross validation is conducted and the
|
133
|
-
returned model is just a scalar: cross-validation accuracy for
|
134
|
-
classification and mean-squared error for regression. If the '-C' option
|
135
|
-
is specified, the best parameter C is found by cross validation. The
|
136
|
-
returned model is a two dimensional vector, where the first value is
|
137
|
-
the best C and the second value is the corresponding cross-validation
|
138
|
-
accuracy. The parameter selection utility is supported by only -s 0
|
139
|
-
and -s 2.
|
140
|
-
|
141
|
-
Result of Prediction
|
142
|
-
====================
|
143
|
-
|
144
|
-
The function 'predict' has three outputs. The first one,
|
145
|
-
predicted_label, is a vector of predicted labels. The second output,
|
146
|
-
accuracy, is a vector including accuracy (for classification), mean
|
147
|
-
squared error, and squared correlation coefficient (for regression).
|
148
|
-
The third is a matrix containing decision values or probability
|
149
|
-
estimates (if '-b 1' is specified). If k is the number of classes
|
150
|
-
and k' is the number of classifiers (k'=1 if k=2, otherwise k'=k), for decision values,
|
151
|
-
each row includes results of k' binary linear classifiers. For probabilities,
|
152
|
-
each row contains k values indicating the probability that the testing instance is in
|
153
|
-
each class. Note that the order of classes here is the same as 'Label'
|
154
|
-
field in the model structure.
|
155
|
-
|
156
|
-
Other Utilities
|
157
|
-
===============
|
158
|
-
|
159
|
-
A matlab function libsvmread reads files in LIBSVM format:
|
160
|
-
|
161
|
-
[label_vector, instance_matrix] = libsvmread('data.txt');
|
162
|
-
|
163
|
-
Two outputs are labels and instances, which can then be used as inputs
|
164
|
-
of svmtrain or svmpredict.
|
165
|
-
|
166
|
-
A matlab function libsvmwrite writes Matlab matrix to a file in LIBSVM format:
|
167
|
-
|
168
|
-
libsvmwrite('data.txt', label_vector, instance_matrix]
|
169
|
-
|
170
|
-
The instance_matrix must be a sparse matrix. (type must be double)
|
171
|
-
For windows, `libsvmread.mexw64' and `libsvmwrite.mexw64' are ready in
|
172
|
-
the directory `..\windows'.
|
173
|
-
|
174
|
-
These codes are prepared by Rong-En Fan and Kai-Wei Chang from National
|
175
|
-
Taiwan University.
|
176
|
-
|
177
|
-
Examples
|
178
|
-
========
|
179
|
-
|
180
|
-
Train and test on the provided data heart_scale:
|
181
|
-
|
182
|
-
matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale');
|
183
|
-
matlab> model = train(heart_scale_label, heart_scale_inst, '-c 1');
|
184
|
-
matlab> [predict_label, accuracy, dec_values] = predict(heart_scale_label, heart_scale_inst, model); % test the training data
|
185
|
-
|
186
|
-
Note that for testing, you can put anything in the testing_label_vector.
|
187
|
-
|
188
|
-
For probability estimates, you need '-b 1' only in the testing phase:
|
189
|
-
|
190
|
-
matlab> [predict_label, accuracy, prob_estimates] = predict(heart_scale_label, heart_scale_inst, model, '-b 1');
|
191
|
-
|
192
|
-
Use the best parameter to train (only supported by -s 0 and -s 2):
|
193
|
-
|
194
|
-
matlab> best = train(heart_scale_label, heart_scale_inst, '-C -s 0');
|
195
|
-
matlab> model = train(heart_scale_label, heart_scale_inst, sprintf('-c %f -s 0', best(1))); % use the same solver: -s 0
|
196
|
-
|
197
|
-
Additional Information
|
198
|
-
======================
|
199
|
-
|
200
|
-
Please cite LIBLINEAR as follows
|
201
|
-
|
202
|
-
R.-E. Fan, K.-W. Chang, C.-J. Hsieh, X.-R. Wang, and C.-J. Lin.
|
203
|
-
LIBLINEAR: A Library for Large Linear Classification, Journal of
|
204
|
-
Machine Learning Research 9(2008), 1871-1874.Software available at
|
205
|
-
http://www.csie.ntu.edu.tw/~cjlin/liblinear
|
206
|
-
|
207
|
-
For any question, please contact Chih-Jen Lin <cjlin@csie.ntu.edu.tw>.
|
208
|
-
|
@@ -1,212 +0,0 @@
|
|
1
|
-
#include <stdio.h>
|
2
|
-
#include <string.h>
|
3
|
-
#include <stdlib.h>
|
4
|
-
#include <ctype.h>
|
5
|
-
#include <errno.h>
|
6
|
-
|
7
|
-
#include "mex.h"
|
8
|
-
|
9
|
-
#ifdef MX_API_VER
|
10
|
-
#if MX_API_VER < 0x07030000
|
11
|
-
typedef int mwIndex;
|
12
|
-
#endif
|
13
|
-
#endif
|
14
|
-
#ifndef max
|
15
|
-
#define max(x,y) (((x)>(y))?(x):(y))
|
16
|
-
#endif
|
17
|
-
#ifndef min
|
18
|
-
#define min(x,y) (((x)<(y))?(x):(y))
|
19
|
-
#endif
|
20
|
-
|
21
|
-
void exit_with_help()
|
22
|
-
{
|
23
|
-
mexPrintf(
|
24
|
-
"Usage: [label_vector, instance_matrix] = libsvmread('filename');\n"
|
25
|
-
);
|
26
|
-
}
|
27
|
-
|
28
|
-
static void fake_answer(int nlhs, mxArray *plhs[])
|
29
|
-
{
|
30
|
-
int i;
|
31
|
-
for(i=0;i<nlhs;i++)
|
32
|
-
plhs[i] = mxCreateDoubleMatrix(0, 0, mxREAL);
|
33
|
-
}
|
34
|
-
|
35
|
-
static char *line;
|
36
|
-
static int max_line_len;
|
37
|
-
|
38
|
-
static char* readline(FILE *input)
|
39
|
-
{
|
40
|
-
int len;
|
41
|
-
|
42
|
-
if(fgets(line,max_line_len,input) == NULL)
|
43
|
-
return NULL;
|
44
|
-
|
45
|
-
while(strrchr(line,'\n') == NULL)
|
46
|
-
{
|
47
|
-
max_line_len *= 2;
|
48
|
-
line = (char *) realloc(line, max_line_len);
|
49
|
-
len = (int) strlen(line);
|
50
|
-
if(fgets(line+len,max_line_len-len,input) == NULL)
|
51
|
-
break;
|
52
|
-
}
|
53
|
-
return line;
|
54
|
-
}
|
55
|
-
|
56
|
-
// read in a problem (in libsvm format)
|
57
|
-
void read_problem(const char *filename, int nlhs, mxArray *plhs[])
|
58
|
-
{
|
59
|
-
int max_index, min_index, inst_max_index;
|
60
|
-
size_t elements, k, i, l=0;
|
61
|
-
FILE *fp = fopen(filename,"r");
|
62
|
-
char *endptr;
|
63
|
-
mwIndex *ir, *jc;
|
64
|
-
double *labels, *samples;
|
65
|
-
|
66
|
-
if(fp == NULL)
|
67
|
-
{
|
68
|
-
mexPrintf("can't open input file %s\n",filename);
|
69
|
-
fake_answer(nlhs, plhs);
|
70
|
-
return;
|
71
|
-
}
|
72
|
-
|
73
|
-
max_line_len = 1024;
|
74
|
-
line = (char *) malloc(max_line_len*sizeof(char));
|
75
|
-
|
76
|
-
max_index = 0;
|
77
|
-
min_index = 1; // our index starts from 1
|
78
|
-
elements = 0;
|
79
|
-
while(readline(fp) != NULL)
|
80
|
-
{
|
81
|
-
char *idx, *val;
|
82
|
-
// features
|
83
|
-
int index = 0;
|
84
|
-
|
85
|
-
inst_max_index = -1; // strtol gives 0 if wrong format, and precomputed kernel has <index> start from 0
|
86
|
-
strtok(line," \t"); // label
|
87
|
-
while (1)
|
88
|
-
{
|
89
|
-
idx = strtok(NULL,":"); // index:value
|
90
|
-
val = strtok(NULL," \t");
|
91
|
-
if(val == NULL)
|
92
|
-
break;
|
93
|
-
|
94
|
-
errno = 0;
|
95
|
-
index = (int) strtol(idx,&endptr,10);
|
96
|
-
if(endptr == idx || errno != 0 || *endptr != '\0' || index <= inst_max_index)
|
97
|
-
{
|
98
|
-
mexPrintf("Wrong input format at line %d\n",l+1);
|
99
|
-
fake_answer(nlhs, plhs);
|
100
|
-
return;
|
101
|
-
}
|
102
|
-
else
|
103
|
-
inst_max_index = index;
|
104
|
-
|
105
|
-
min_index = min(min_index, index);
|
106
|
-
elements++;
|
107
|
-
}
|
108
|
-
max_index = max(max_index, inst_max_index);
|
109
|
-
l++;
|
110
|
-
}
|
111
|
-
rewind(fp);
|
112
|
-
|
113
|
-
// y
|
114
|
-
plhs[0] = mxCreateDoubleMatrix(l, 1, mxREAL);
|
115
|
-
// x^T
|
116
|
-
if (min_index <= 0)
|
117
|
-
plhs[1] = mxCreateSparse(max_index-min_index+1, l, elements, mxREAL);
|
118
|
-
else
|
119
|
-
plhs[1] = mxCreateSparse(max_index, l, elements, mxREAL);
|
120
|
-
|
121
|
-
labels = mxGetPr(plhs[0]);
|
122
|
-
samples = mxGetPr(plhs[1]);
|
123
|
-
ir = mxGetIr(plhs[1]);
|
124
|
-
jc = mxGetJc(plhs[1]);
|
125
|
-
|
126
|
-
k=0;
|
127
|
-
for(i=0;i<l;i++)
|
128
|
-
{
|
129
|
-
char *idx, *val, *label;
|
130
|
-
jc[i] = k;
|
131
|
-
|
132
|
-
readline(fp);
|
133
|
-
|
134
|
-
label = strtok(line," \t\n");
|
135
|
-
if(label == NULL)
|
136
|
-
{
|
137
|
-
mexPrintf("Empty line at line %d\n",i+1);
|
138
|
-
fake_answer(nlhs, plhs);
|
139
|
-
return;
|
140
|
-
}
|
141
|
-
labels[i] = strtod(label,&endptr);
|
142
|
-
if(endptr == label || *endptr != '\0')
|
143
|
-
{
|
144
|
-
mexPrintf("Wrong input format at line %d\n",i+1);
|
145
|
-
fake_answer(nlhs, plhs);
|
146
|
-
return;
|
147
|
-
}
|
148
|
-
|
149
|
-
// features
|
150
|
-
while(1)
|
151
|
-
{
|
152
|
-
idx = strtok(NULL,":");
|
153
|
-
val = strtok(NULL," \t");
|
154
|
-
if(val == NULL)
|
155
|
-
break;
|
156
|
-
|
157
|
-
ir[k] = (mwIndex) (strtol(idx,&endptr,10) - min_index); // precomputed kernel has <index> start from 0
|
158
|
-
|
159
|
-
errno = 0;
|
160
|
-
samples[k] = strtod(val,&endptr);
|
161
|
-
if (endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
|
162
|
-
{
|
163
|
-
mexPrintf("Wrong input format at line %d\n",i+1);
|
164
|
-
fake_answer(nlhs, plhs);
|
165
|
-
return;
|
166
|
-
}
|
167
|
-
++k;
|
168
|
-
}
|
169
|
-
}
|
170
|
-
jc[l] = k;
|
171
|
-
|
172
|
-
fclose(fp);
|
173
|
-
free(line);
|
174
|
-
|
175
|
-
{
|
176
|
-
mxArray *rhs[1], *lhs[1];
|
177
|
-
rhs[0] = plhs[1];
|
178
|
-
if(mexCallMATLAB(1, lhs, 1, rhs, "transpose"))
|
179
|
-
{
|
180
|
-
mexPrintf("Error: cannot transpose problem\n");
|
181
|
-
fake_answer(nlhs, plhs);
|
182
|
-
return;
|
183
|
-
}
|
184
|
-
plhs[1] = lhs[0];
|
185
|
-
}
|
186
|
-
}
|
187
|
-
|
188
|
-
void mexFunction( int nlhs, mxArray *plhs[],
|
189
|
-
int nrhs, const mxArray *prhs[] )
|
190
|
-
{
|
191
|
-
char filename[256];
|
192
|
-
|
193
|
-
if(nrhs != 1 || nlhs != 2)
|
194
|
-
{
|
195
|
-
exit_with_help();
|
196
|
-
fake_answer(nlhs, plhs);
|
197
|
-
return;
|
198
|
-
}
|
199
|
-
|
200
|
-
mxGetString(prhs[0], filename, mxGetN(prhs[0]) + 1);
|
201
|
-
|
202
|
-
if(filename == NULL)
|
203
|
-
{
|
204
|
-
mexPrintf("Error: filename is NULL\n");
|
205
|
-
return;
|
206
|
-
}
|
207
|
-
|
208
|
-
read_problem(filename, nlhs, plhs);
|
209
|
-
|
210
|
-
return;
|
211
|
-
}
|
212
|
-
|