liblinear-ruby 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +19 -0
  3. data/Gemfile +4 -0
  4. data/LICENSE.txt +22 -0
  5. data/README.md +46 -0
  6. data/Rakefile +1 -0
  7. data/ext/Makefile +237 -0
  8. data/ext/blas.h +25 -0
  9. data/ext/blasp.h +430 -0
  10. data/ext/daxpy.c +49 -0
  11. data/ext/ddot.c +50 -0
  12. data/ext/dnrm2.c +62 -0
  13. data/ext/dscal.c +44 -0
  14. data/ext/extconf.rb +12 -0
  15. data/ext/liblinear_wrap.cxx +4646 -0
  16. data/ext/linear.cpp +2811 -0
  17. data/ext/linear.h +74 -0
  18. data/ext/linear.rb +357 -0
  19. data/ext/tron.cpp +235 -0
  20. data/ext/tron.h +34 -0
  21. data/lib/liblinear.rb +89 -0
  22. data/lib/liblinear/error.rb +4 -0
  23. data/lib/liblinear/model.rb +66 -0
  24. data/lib/liblinear/parameter.rb +42 -0
  25. data/lib/liblinear/problem.rb +55 -0
  26. data/lib/liblinear/version.rb +3 -0
  27. data/liblinear-1.93/COPYRIGHT +31 -0
  28. data/liblinear-1.93/Makefile +37 -0
  29. data/liblinear-1.93/Makefile.win +30 -0
  30. data/liblinear-1.93/README +531 -0
  31. data/liblinear-1.93/blas/Makefile +22 -0
  32. data/liblinear-1.93/blas/blas.a +0 -0
  33. data/liblinear-1.93/blas/blas.h +25 -0
  34. data/liblinear-1.93/blas/blasp.h +430 -0
  35. data/liblinear-1.93/blas/daxpy.c +49 -0
  36. data/liblinear-1.93/blas/daxpy.o +0 -0
  37. data/liblinear-1.93/blas/ddot.c +50 -0
  38. data/liblinear-1.93/blas/ddot.o +0 -0
  39. data/liblinear-1.93/blas/dnrm2.c +62 -0
  40. data/liblinear-1.93/blas/dnrm2.o +0 -0
  41. data/liblinear-1.93/blas/dscal.c +44 -0
  42. data/liblinear-1.93/blas/dscal.o +0 -0
  43. data/liblinear-1.93/heart_scale +270 -0
  44. data/liblinear-1.93/linear.cpp +2811 -0
  45. data/liblinear-1.93/linear.def +18 -0
  46. data/liblinear-1.93/linear.h +74 -0
  47. data/liblinear-1.93/linear.o +0 -0
  48. data/liblinear-1.93/matlab/Makefile +58 -0
  49. data/liblinear-1.93/matlab/README +197 -0
  50. data/liblinear-1.93/matlab/libsvmread.c +212 -0
  51. data/liblinear-1.93/matlab/libsvmwrite.c +106 -0
  52. data/liblinear-1.93/matlab/linear_model_matlab.c +176 -0
  53. data/liblinear-1.93/matlab/linear_model_matlab.h +2 -0
  54. data/liblinear-1.93/matlab/make.m +21 -0
  55. data/liblinear-1.93/matlab/predict.c +331 -0
  56. data/liblinear-1.93/matlab/train.c +418 -0
  57. data/liblinear-1.93/predict +0 -0
  58. data/liblinear-1.93/predict.c +245 -0
  59. data/liblinear-1.93/python/Makefile +4 -0
  60. data/liblinear-1.93/python/README +343 -0
  61. data/liblinear-1.93/python/liblinear.py +277 -0
  62. data/liblinear-1.93/python/liblinearutil.py +250 -0
  63. data/liblinear-1.93/ruby/liblinear.i +41 -0
  64. data/liblinear-1.93/ruby/liblinear_wrap.cxx +4646 -0
  65. data/liblinear-1.93/ruby/linear.h +74 -0
  66. data/liblinear-1.93/ruby/linear.o +0 -0
  67. data/liblinear-1.93/train +0 -0
  68. data/liblinear-1.93/train.c +399 -0
  69. data/liblinear-1.93/tron.cpp +235 -0
  70. data/liblinear-1.93/tron.h +34 -0
  71. data/liblinear-1.93/tron.o +0 -0
  72. data/liblinear-1.93/windows/liblinear.dll +0 -0
  73. data/liblinear-1.93/windows/libsvmread.mexw64 +0 -0
  74. data/liblinear-1.93/windows/libsvmwrite.mexw64 +0 -0
  75. data/liblinear-1.93/windows/predict.exe +0 -0
  76. data/liblinear-1.93/windows/predict.mexw64 +0 -0
  77. data/liblinear-1.93/windows/train.exe +0 -0
  78. data/liblinear-1.93/windows/train.mexw64 +0 -0
  79. data/liblinear-ruby.gemspec +24 -0
  80. metadata +152 -0
@@ -0,0 +1,18 @@
1
+ LIBRARY liblinear
2
+ EXPORTS
3
+ train @1
4
+ cross_validation @2
5
+ save_model @3
6
+ load_model @4
7
+ get_nr_feature @5
8
+ get_nr_class @6
9
+ get_labels @7
10
+ predict_values @8
11
+ predict @9
12
+ predict_probability @10
13
+ free_and_destroy_model @11
14
+ free_model_content @12
15
+ destroy_param @13
16
+ check_parameter @14
17
+ check_probability_model @15
18
+ set_print_string_function @16
@@ -0,0 +1,74 @@
1
+ #ifndef _LIBLINEAR_H
2
+ #define _LIBLINEAR_H
3
+
4
+ #ifdef __cplusplus
5
+ extern "C" {
6
+ #endif
7
+
8
+ struct feature_node
9
+ {
10
+ int index;
11
+ double value;
12
+ };
13
+
14
+ struct problem
15
+ {
16
+ int l, n;
17
+ double *y;
18
+ struct feature_node **x;
19
+ double bias; /* < 0 if no bias term */
20
+ };
21
+
22
+ enum { L2R_LR, L2R_L2LOSS_SVC_DUAL, L2R_L2LOSS_SVC, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L1R_L2LOSS_SVC, L1R_LR, L2R_LR_DUAL, L2R_L2LOSS_SVR = 11, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL }; /* solver_type */
23
+
24
+ struct parameter
25
+ {
26
+ int solver_type;
27
+
28
+ /* these are for training only */
29
+ double eps; /* stopping criteria */
30
+ double C;
31
+ int nr_weight;
32
+ int *weight_label;
33
+ double* weight;
34
+ double p;
35
+ };
36
+
37
+ struct model
38
+ {
39
+ struct parameter param;
40
+ int nr_class; /* number of classes */
41
+ int nr_feature;
42
+ double *w;
43
+ int *label; /* label of each class */
44
+ double bias;
45
+ };
46
+
47
+ struct model* train(const struct problem *prob, const struct parameter *param);
48
+ void cross_validation(const struct problem *prob, const struct parameter *param, int nr_fold, double *target);
49
+
50
+ double predict_values(const struct model *model_, const struct feature_node *x, double* dec_values);
51
+ double predict(const struct model *model_, const struct feature_node *x);
52
+ double predict_probability(const struct model *model_, const struct feature_node *x, double* prob_estimates);
53
+
54
+ int save_model(const char *model_file_name, const struct model *model_);
55
+ struct model *load_model(const char *model_file_name);
56
+
57
+ int get_nr_feature(const struct model *model_);
58
+ int get_nr_class(const struct model *model_);
59
+ void get_labels(const struct model *model_, int* label);
60
+
61
+ void free_model_content(struct model *model_ptr);
62
+ void free_and_destroy_model(struct model **model_ptr_ptr);
63
+ void destroy_param(struct parameter *param);
64
+
65
+ const char *check_parameter(const struct problem *prob, const struct parameter *param);
66
+ int check_probability_model(const struct model *model);
67
+ void set_print_string_function(void (*print_func) (const char*));
68
+
69
+ #ifdef __cplusplus
70
+ }
71
+ #endif
72
+
73
+ #endif /* _LIBLINEAR_H */
74
+
Binary file
@@ -0,0 +1,58 @@
1
+ # This Makefile is used under Linux
2
+
3
+ MATLABDIR ?= /usr/local/matlab
4
+ CXX ?= g++
5
+ #CXX = g++-3.3
6
+ CC ?= gcc
7
+ CFLAGS = -Wall -Wconversion -O3 -fPIC -I$(MATLABDIR)/extern/include -I..
8
+
9
+ MEX = $(MATLABDIR)/bin/mex
10
+ MEX_OPTION = CC\#$(CXX) CXX\#$(CXX) CFLAGS\#"$(CFLAGS)" CXXFLAGS\#"$(CFLAGS)"
11
+ # comment the following line if you use MATLAB on a 32-bit computer
12
+ MEX_OPTION += -largeArrayDims
13
+ MEX_EXT = $(shell $(MATLABDIR)/bin/mexext)
14
+
15
+ OCTAVEDIR ?= /usr/include/octave
16
+ OCTAVE_MEX = env CC=$(CXX) mkoctfile
17
+ OCTAVE_MEX_OPTION = --mex
18
+ OCTAVE_MEX_EXT = mex
19
+ OCTAVE_CFLAGS = -Wall -O3 -fPIC -I$(OCTAVEDIR) -I..
20
+
21
+ all: matlab
22
+
23
+ matlab: binary
24
+
25
+ octave:
26
+ @make MEX="$(OCTAVE_MEX)" MEX_OPTION="$(OCTAVE_MEX_OPTION)" \
27
+ MEX_EXT="$(OCTAVE_MEX_EXT)" CFLAGS="$(OCTAVE_CFLAGS)" \
28
+ binary
29
+
30
+ binary: train.$(MEX_EXT) predict.$(MEX_EXT) libsvmread.$(MEX_EXT) libsvmwrite.$(MEX_EXT)
31
+
32
+ train.$(MEX_EXT): train.c ../linear.h ../tron.o ../linear.o linear_model_matlab.o ../blas/blas.a
33
+ $(MEX) $(MEX_OPTION) train.c ../tron.o ../linear.o linear_model_matlab.o ../blas/blas.a
34
+
35
+ predict.$(MEX_EXT): predict.c ../linear.h ../tron.o ../linear.o linear_model_matlab.o ../blas/blas.a
36
+ $(MEX) $(MEX_OPTION) predict.c ../tron.o ../linear.o linear_model_matlab.o ../blas/blas.a
37
+
38
+ libsvmread.$(MEX_EXT): libsvmread.c
39
+ $(MEX) $(MEX_OPTION) libsvmread.c
40
+
41
+ libsvmwrite.$(MEX_EXT): libsvmwrite.c
42
+ $(MEX) $(MEX_OPTION) libsvmwrite.c
43
+
44
+ linear_model_matlab.o: linear_model_matlab.c ../linear.h
45
+ $(CXX) $(CFLAGS) -c linear_model_matlab.c
46
+
47
+ ../linear.o: ../linear.cpp ../linear.h
48
+ make -C .. linear.o
49
+
50
+ ../tron.o: ../tron.cpp ../tron.h
51
+ make -C .. tron.o
52
+
53
+ ../blas/blas.a: ../blas/*.c ../blas/*.h
54
+ make -C ../blas OPTFLAGS='$(CFLAGS)' CC='$(CC)';
55
+
56
+ clean:
57
+ make -C ../blas clean
58
+ rm -f *~ *.o *.mex* *.obj ../linear.o ../tron.o
@@ -0,0 +1,197 @@
1
+ --------------------------------------------
2
+ --- MATLAB/OCTAVE interface of LIBLINEAR ---
3
+ --------------------------------------------
4
+
5
+ Table of Contents
6
+ =================
7
+
8
+ - Introduction
9
+ - Installation
10
+ - Usage
11
+ - Returned Model Structure
12
+ - Other Utilities
13
+ - Examples
14
+ - Additional Information
15
+
16
+
17
+ Introduction
18
+ ============
19
+
20
+ This tool provides a simple interface to LIBLINEAR, a library for
21
+ large-scale regularized linear classification and regression
22
+ (http://www.csie.ntu.edu.tw/~cjlin/liblinear). It is very easy to use
23
+ as the usage and the way of specifying parameters are the same as that
24
+ of LIBLINEAR.
25
+
26
+ Installation
27
+ ============
28
+
29
+ On Windows systems, pre-built binary files are already in the
30
+ directory '..\windows', so no need to conduct installation. Now we
31
+ provide binary files only for 64bit MATLAB on Windows. If you would
32
+ like to re-build the package, please rely on the following steps.
33
+
34
+ We recommend using make.m on both MATLAB and OCTAVE. Just type 'make'
35
+ to build 'libsvmread.mex', 'libsvmwrite.mex', 'train.mex', and
36
+ 'predict.mex'.
37
+
38
+ On MATLAB or Octave:
39
+
40
+ >> make
41
+
42
+ If make.m does not work on MATLAB (especially for Windows), try 'mex
43
+ -setup' to choose a suitable compiler for mex. Make sure your compiler
44
+ is accessible and workable. Then type 'make' to start the
45
+ installation.
46
+
47
+ Example:
48
+
49
+ matlab>> mex -setup
50
+ (ps: MATLAB will show the following messages to setup default compiler.)
51
+ Please choose your compiler for building external interface (MEX) files:
52
+ Would you like mex to locate installed compilers [y]/n? y
53
+ Select a compiler:
54
+ [1] Microsoft Visual C/C++ version 7.1 in C:\Program Files\Microsoft Visual Studio
55
+ [0] None
56
+ Compiler: 1
57
+ Please verify your choices:
58
+ Compiler: Microsoft Visual C/C++ 7.1
59
+ Location: C:\Program Files\Microsoft Visual Studio
60
+ Are these correct?([y]/n): y
61
+
62
+ matlab>> make
63
+
64
+ On Unix systems, if neither make.m nor 'mex -setup' works, please use
65
+ Makefile and type 'make' in a command window. Note that we assume
66
+ your MATLAB is installed in '/usr/local/matlab'. If not, please change
67
+ MATLABDIR in Makefile.
68
+
69
+ Example:
70
+ linux> make
71
+
72
+ To use octave, type 'make octave':
73
+
74
+ Example:
75
+ linux> make octave
76
+
77
+ For a list of supported/compatible compilers for MATLAB, please check
78
+ the following page:
79
+
80
+ http://www.mathworks.com/support/compilers/current_release/
81
+
82
+ Usage
83
+ =====
84
+
85
+ matlab> model = train(training_label_vector, training_instance_matrix [,'liblinear_options', 'col']);
86
+
87
+ -training_label_vector:
88
+ An m by 1 vector of training labels. (type must be double)
89
+ -training_instance_matrix:
90
+ An m by n matrix of m training instances with n features.
91
+ It must be a sparse matrix. (type must be double)
92
+ -liblinear_options:
93
+ A string of training options in the same format as that of LIBLINEAR.
94
+ -col:
95
+ if 'col' is set, each column of training_instance_matrix is a data instance. Otherwise each row is a data instance.
96
+
97
+ matlab> [predicted_label, accuracy, decision_values/prob_estimates] = predict(testing_label_vector, testing_instance_matrix, model [, 'liblinear_options', 'col']);
98
+
99
+ -testing_label_vector:
100
+ An m by 1 vector of prediction labels. If labels of test
101
+ data are unknown, simply use any random values. (type must be double)
102
+ -testing_instance_matrix:
103
+ An m by n matrix of m testing instances with n features.
104
+ It must be a sparse matrix. (type must be double)
105
+ -model:
106
+ The output of train.
107
+ -liblinear_options:
108
+ A string of testing options in the same format as that of LIBLINEAR.
109
+ -col:
110
+ if 'col' is set, each column of testing_instance_matrix is a data instance. Otherwise each row is a data instance.
111
+
112
+ Returned Model Structure
113
+ ========================
114
+
115
+ The 'train' function returns a model which can be used for future
116
+ prediction. It is a structure and is organized as [Parameters, nr_class,
117
+ nr_feature, bias, Label, w]:
118
+
119
+ -Parameters: Parameters
120
+ -nr_class: number of classes; = 2 for regression
121
+ -nr_feature: number of features in training data (without including the bias term)
122
+ -bias: If >= 0, we assume one additional feature is added to the end
123
+ of each data instance.
124
+ -Label: label of each class; empty for regression
125
+ -w: a nr_w-by-n matrix for the weights, where n is nr_feature
126
+ or nr_feature+1 depending on the existence of the bias term.
127
+ nr_w is 1 if nr_class=2 and -s is not 4 (i.e., not
128
+ multi-class svm by Crammer and Singer). It is
129
+ nr_class otherwise.
130
+
131
+ If the '-v' option is specified, cross validation is conducted and the
132
+ returned model is just a scalar: cross-validation accuracy for
133
+ classification and mean-squared error for regression.
134
+
135
+ Result of Prediction
136
+ ====================
137
+
138
+ The function 'predict' has three outputs. The first one,
139
+ predicted_label, is a vector of predicted labels. The second output,
140
+ accuracy, is a vector including accuracy (for classification), mean
141
+ squared error, and squared correlation coefficient (for regression).
142
+ The third is a matrix containing decision values or probability
143
+ estimates (if '-b 1' is specified). If k is the number of classes
144
+ and k' is the number of classifiers (k'=1 if k=2, otherwise k'=k), for decision values,
145
+ each row includes results of k' binary linear classifiers. For probabilities,
146
+ each row contains k values indicating the probability that the testing instance is in
147
+ each class. Note that the order of classes here is the same as 'Label'
148
+ field in the model structure.
149
+
150
+ Other Utilities
151
+ ===============
152
+
153
+ A matlab function libsvmread reads files in LIBSVM format:
154
+
155
+ [label_vector, instance_matrix] = libsvmread('data.txt');
156
+
157
+ Two outputs are labels and instances, which can then be used as inputs
158
+ of svmtrain or svmpredict.
159
+
160
+ A matlab function libsvmwrite writes Matlab matrix to a file in LIBSVM format:
161
+
162
+ libsvmwrite('data.txt', label_vector, instance_matrix]
163
+
164
+ The instance_matrix must be a sparse matrix. (type must be double)
165
+ For windows, `libsvmread.mexw64' and `libsvmwrite.mexw64' are ready in
166
+ the directory `..\windows'.
167
+
168
+ These codes are prepared by Rong-En Fan and Kai-Wei Chang from National
169
+ Taiwan University.
170
+
171
+ Examples
172
+ ========
173
+
174
+ Train and test on the provided data heart_scale:
175
+
176
+ matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale');
177
+ matlab> model = train(heart_scale_label, heart_scale_inst, '-c 1');
178
+ matlab> [predict_label, accuracy, dec_values] = predict(heart_scale_label, heart_scale_inst, model); % test the training data
179
+
180
+ Note that for testing, you can put anything in the testing_label_vector.
181
+
182
+ For probability estimates, you need '-b 1' only in the testing phase:
183
+
184
+ matlab> [predict_label, accuracy, prob_estimates] = predict(heart_scale_label, heart_scale_inst, model, '-b 1');
185
+
186
+ Additional Information
187
+ ======================
188
+
189
+ Please cite LIBLINEAR as follows
190
+
191
+ R.-E. Fan, K.-W. Chang, C.-J. Hsieh, X.-R. Wang, and C.-J. Lin.
192
+ LIBLINEAR: A Library for Large Linear Classification, Journal of
193
+ Machine Learning Research 9(2008), 1871-1874.Software available at
194
+ http://www.csie.ntu.edu.tw/~cjlin/liblinear
195
+
196
+ For any question, please contact Chih-Jen Lin <cjlin@csie.ntu.edu.tw>.
197
+
@@ -0,0 +1,212 @@
1
+ #include <stdio.h>
2
+ #include <string.h>
3
+ #include <stdlib.h>
4
+ #include <ctype.h>
5
+ #include <errno.h>
6
+
7
+ #include "mex.h"
8
+
9
+ #ifdef MX_API_VER
10
+ #if MX_API_VER < 0x07030000
11
+ typedef int mwIndex;
12
+ #endif
13
+ #endif
14
+ #ifndef max
15
+ #define max(x,y) (((x)>(y))?(x):(y))
16
+ #endif
17
+ #ifndef min
18
+ #define min(x,y) (((x)<(y))?(x):(y))
19
+ #endif
20
+
21
+ void exit_with_help()
22
+ {
23
+ mexPrintf(
24
+ "Usage: [label_vector, instance_matrix] = libsvmread('filename');\n"
25
+ );
26
+ }
27
+
28
+ static void fake_answer(mxArray *plhs[])
29
+ {
30
+ plhs[0] = mxCreateDoubleMatrix(0, 0, mxREAL);
31
+ plhs[1] = mxCreateDoubleMatrix(0, 0, mxREAL);
32
+ }
33
+
34
+ static char *line;
35
+ static int max_line_len;
36
+
37
+ static char* readline(FILE *input)
38
+ {
39
+ int len;
40
+
41
+ if(fgets(line,max_line_len,input) == NULL)
42
+ return NULL;
43
+
44
+ while(strrchr(line,'\n') == NULL)
45
+ {
46
+ max_line_len *= 2;
47
+ line = (char *) realloc(line, max_line_len);
48
+ len = (int) strlen(line);
49
+ if(fgets(line+len,max_line_len-len,input) == NULL)
50
+ break;
51
+ }
52
+ return line;
53
+ }
54
+
55
+ // read in a problem (in libsvm format)
56
+ void read_problem(const char *filename, mxArray *plhs[])
57
+ {
58
+ int max_index, min_index, inst_max_index, i;
59
+ long elements, k;
60
+ FILE *fp = fopen(filename,"r");
61
+ int l = 0;
62
+ char *endptr;
63
+ mwIndex *ir, *jc;
64
+ double *labels, *samples;
65
+
66
+ if(fp == NULL)
67
+ {
68
+ mexPrintf("can't open input file %s\n",filename);
69
+ fake_answer(plhs);
70
+ return;
71
+ }
72
+
73
+ max_line_len = 1024;
74
+ line = (char *) malloc(max_line_len*sizeof(char));
75
+
76
+ max_index = 0;
77
+ min_index = 1; // our index starts from 1
78
+ elements = 0;
79
+ while(readline(fp) != NULL)
80
+ {
81
+ char *idx, *val;
82
+ // features
83
+ int index = 0;
84
+
85
+ inst_max_index = -1; // strtol gives 0 if wrong format, and precomputed kernel has <index> start from 0
86
+ strtok(line," \t"); // label
87
+ while (1)
88
+ {
89
+ idx = strtok(NULL,":"); // index:value
90
+ val = strtok(NULL," \t");
91
+ if(val == NULL)
92
+ break;
93
+
94
+ errno = 0;
95
+ index = (int) strtol(idx,&endptr,10);
96
+ if(endptr == idx || errno != 0 || *endptr != '\0' || index <= inst_max_index)
97
+ {
98
+ mexPrintf("Wrong input format at line %d\n",l+1);
99
+ fake_answer(plhs);
100
+ return;
101
+ }
102
+ else
103
+ inst_max_index = index;
104
+
105
+ min_index = min(min_index, index);
106
+ elements++;
107
+ }
108
+ max_index = max(max_index, inst_max_index);
109
+ l++;
110
+ }
111
+ rewind(fp);
112
+
113
+ // y
114
+ plhs[0] = mxCreateDoubleMatrix(l, 1, mxREAL);
115
+ // x^T
116
+ if (min_index <= 0)
117
+ plhs[1] = mxCreateSparse(max_index-min_index+1, l, elements, mxREAL);
118
+ else
119
+ plhs[1] = mxCreateSparse(max_index, l, elements, mxREAL);
120
+
121
+ labels = mxGetPr(plhs[0]);
122
+ samples = mxGetPr(plhs[1]);
123
+ ir = mxGetIr(plhs[1]);
124
+ jc = mxGetJc(plhs[1]);
125
+
126
+ k=0;
127
+ for(i=0;i<l;i++)
128
+ {
129
+ char *idx, *val, *label;
130
+ jc[i] = k;
131
+
132
+ readline(fp);
133
+
134
+ label = strtok(line," \t\n");
135
+ if(label == NULL)
136
+ {
137
+ mexPrintf("Empty line at line %d\n",i+1);
138
+ fake_answer(plhs);
139
+ return;
140
+ }
141
+ labels[i] = strtod(label,&endptr);
142
+ if(endptr == label || *endptr != '\0')
143
+ {
144
+ mexPrintf("Wrong input format at line %d\n",i+1);
145
+ fake_answer(plhs);
146
+ return;
147
+ }
148
+
149
+ // features
150
+ while(1)
151
+ {
152
+ idx = strtok(NULL,":");
153
+ val = strtok(NULL," \t");
154
+ if(val == NULL)
155
+ break;
156
+
157
+ ir[k] = (mwIndex) (strtol(idx,&endptr,10) - min_index); // precomputed kernel has <index> start from 0
158
+
159
+ errno = 0;
160
+ samples[k] = strtod(val,&endptr);
161
+ if (endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
162
+ {
163
+ mexPrintf("Wrong input format at line %d\n",i+1);
164
+ fake_answer(plhs);
165
+ return;
166
+ }
167
+ ++k;
168
+ }
169
+ }
170
+ jc[l] = k;
171
+
172
+ fclose(fp);
173
+ free(line);
174
+
175
+ {
176
+ mxArray *rhs[1], *lhs[1];
177
+ rhs[0] = plhs[1];
178
+ if(mexCallMATLAB(1, lhs, 1, rhs, "transpose"))
179
+ {
180
+ mexPrintf("Error: cannot transpose problem\n");
181
+ fake_answer(plhs);
182
+ return;
183
+ }
184
+ plhs[1] = lhs[0];
185
+ }
186
+ }
187
+
188
+ void mexFunction( int nlhs, mxArray *plhs[],
189
+ int nrhs, const mxArray *prhs[] )
190
+ {
191
+ if(nrhs == 1)
192
+ {
193
+ char filename[256];
194
+
195
+ mxGetString(prhs[0], filename, mxGetN(prhs[0]) + 1);
196
+
197
+ if(filename == NULL)
198
+ {
199
+ mexPrintf("Error: filename is NULL\n");
200
+ return;
201
+ }
202
+
203
+ read_problem(filename, plhs);
204
+ }
205
+ else
206
+ {
207
+ exit_with_help();
208
+ fake_answer(plhs);
209
+ return;
210
+ }
211
+ }
212
+