liblinear-ruby 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +19 -0
- data/Gemfile +4 -0
- data/LICENSE.txt +22 -0
- data/README.md +46 -0
- data/Rakefile +1 -0
- data/ext/Makefile +237 -0
- data/ext/blas.h +25 -0
- data/ext/blasp.h +430 -0
- data/ext/daxpy.c +49 -0
- data/ext/ddot.c +50 -0
- data/ext/dnrm2.c +62 -0
- data/ext/dscal.c +44 -0
- data/ext/extconf.rb +12 -0
- data/ext/liblinear_wrap.cxx +4646 -0
- data/ext/linear.cpp +2811 -0
- data/ext/linear.h +74 -0
- data/ext/linear.rb +357 -0
- data/ext/tron.cpp +235 -0
- data/ext/tron.h +34 -0
- data/lib/liblinear.rb +89 -0
- data/lib/liblinear/error.rb +4 -0
- data/lib/liblinear/model.rb +66 -0
- data/lib/liblinear/parameter.rb +42 -0
- data/lib/liblinear/problem.rb +55 -0
- data/lib/liblinear/version.rb +3 -0
- data/liblinear-1.93/COPYRIGHT +31 -0
- data/liblinear-1.93/Makefile +37 -0
- data/liblinear-1.93/Makefile.win +30 -0
- data/liblinear-1.93/README +531 -0
- data/liblinear-1.93/blas/Makefile +22 -0
- data/liblinear-1.93/blas/blas.a +0 -0
- data/liblinear-1.93/blas/blas.h +25 -0
- data/liblinear-1.93/blas/blasp.h +430 -0
- data/liblinear-1.93/blas/daxpy.c +49 -0
- data/liblinear-1.93/blas/daxpy.o +0 -0
- data/liblinear-1.93/blas/ddot.c +50 -0
- data/liblinear-1.93/blas/ddot.o +0 -0
- data/liblinear-1.93/blas/dnrm2.c +62 -0
- data/liblinear-1.93/blas/dnrm2.o +0 -0
- data/liblinear-1.93/blas/dscal.c +44 -0
- data/liblinear-1.93/blas/dscal.o +0 -0
- data/liblinear-1.93/heart_scale +270 -0
- data/liblinear-1.93/linear.cpp +2811 -0
- data/liblinear-1.93/linear.def +18 -0
- data/liblinear-1.93/linear.h +74 -0
- data/liblinear-1.93/linear.o +0 -0
- data/liblinear-1.93/matlab/Makefile +58 -0
- data/liblinear-1.93/matlab/README +197 -0
- data/liblinear-1.93/matlab/libsvmread.c +212 -0
- data/liblinear-1.93/matlab/libsvmwrite.c +106 -0
- data/liblinear-1.93/matlab/linear_model_matlab.c +176 -0
- data/liblinear-1.93/matlab/linear_model_matlab.h +2 -0
- data/liblinear-1.93/matlab/make.m +21 -0
- data/liblinear-1.93/matlab/predict.c +331 -0
- data/liblinear-1.93/matlab/train.c +418 -0
- data/liblinear-1.93/predict +0 -0
- data/liblinear-1.93/predict.c +245 -0
- data/liblinear-1.93/python/Makefile +4 -0
- data/liblinear-1.93/python/README +343 -0
- data/liblinear-1.93/python/liblinear.py +277 -0
- data/liblinear-1.93/python/liblinearutil.py +250 -0
- data/liblinear-1.93/ruby/liblinear.i +41 -0
- data/liblinear-1.93/ruby/liblinear_wrap.cxx +4646 -0
- data/liblinear-1.93/ruby/linear.h +74 -0
- data/liblinear-1.93/ruby/linear.o +0 -0
- data/liblinear-1.93/train +0 -0
- data/liblinear-1.93/train.c +399 -0
- data/liblinear-1.93/tron.cpp +235 -0
- data/liblinear-1.93/tron.h +34 -0
- data/liblinear-1.93/tron.o +0 -0
- data/liblinear-1.93/windows/liblinear.dll +0 -0
- data/liblinear-1.93/windows/libsvmread.mexw64 +0 -0
- data/liblinear-1.93/windows/libsvmwrite.mexw64 +0 -0
- data/liblinear-1.93/windows/predict.exe +0 -0
- data/liblinear-1.93/windows/predict.mexw64 +0 -0
- data/liblinear-1.93/windows/train.exe +0 -0
- data/liblinear-1.93/windows/train.mexw64 +0 -0
- data/liblinear-ruby.gemspec +24 -0
- metadata +152 -0
@@ -0,0 +1,343 @@
|
|
1
|
+
-------------------------------------
|
2
|
+
--- Python interface of LIBLINEAR ---
|
3
|
+
-------------------------------------
|
4
|
+
|
5
|
+
Table of Contents
|
6
|
+
=================
|
7
|
+
|
8
|
+
- Introduction
|
9
|
+
- Installation
|
10
|
+
- Quick Start
|
11
|
+
- Design Description
|
12
|
+
- Data Structures
|
13
|
+
- Utility Functions
|
14
|
+
- Additional Information
|
15
|
+
|
16
|
+
Introduction
|
17
|
+
============
|
18
|
+
|
19
|
+
Python (http://www.python.org/) is a programming language suitable for rapid
|
20
|
+
development. This tool provides a simple Python interface to LIBLINEAR, a library
|
21
|
+
for support vector machines (http://www.csie.ntu.edu.tw/~cjlin/liblinear). The
|
22
|
+
interface is very easy to use as the usage is the same as that of LIBLINEAR. The
|
23
|
+
interface is developed with the built-in Python library "ctypes."
|
24
|
+
|
25
|
+
Installation
|
26
|
+
============
|
27
|
+
|
28
|
+
On Unix systems, type
|
29
|
+
|
30
|
+
> make
|
31
|
+
|
32
|
+
The interface needs only LIBLINEAR shared library, which is generated by
|
33
|
+
the above command. We assume that the shared library is on the LIBLINEAR
|
34
|
+
main directory or in the system path.
|
35
|
+
|
36
|
+
For windows, the shared library liblinear.dll is ready in the directory
|
37
|
+
`..\windows'. You can also copy it to the system directory (e.g.,
|
38
|
+
`C:\WINDOWS\system32\' for Windows XP). To regenerate the shared library,
|
39
|
+
please follow the instruction of building windows binaries in LIBLINEAR README.
|
40
|
+
|
41
|
+
Quick Start
|
42
|
+
===========
|
43
|
+
|
44
|
+
There are two levels of usage. The high-level one uses utility functions
|
45
|
+
in liblinearutil.py and the usage is the same as the LIBLINEAR MATLAB interface.
|
46
|
+
|
47
|
+
>>> from liblinearutil import *
|
48
|
+
# Read data in LIBSVM format
|
49
|
+
>>> y, x = svm_read_problem('../heart_scale')
|
50
|
+
>>> m = train(y[:200], x[:200], '-c 4')
|
51
|
+
>>> p_label, p_acc, p_val = predict(y[200:], x[200:], m)
|
52
|
+
|
53
|
+
# Construct problem in python format
|
54
|
+
# Dense data
|
55
|
+
>>> y, x = [1,-1], [[1,0,1], [-1,0,-1]]
|
56
|
+
# Sparse data
|
57
|
+
>>> y, x = [1,-1], [{1:1, 3:1}, {1:-1,3:-1}]
|
58
|
+
>>> prob = problem(y, x)
|
59
|
+
>>> param = parameter('-c 4 -B 1')
|
60
|
+
>>> m = train(prob, param)
|
61
|
+
|
62
|
+
# Other utility functions
|
63
|
+
>>> save_model('heart_scale.model', m)
|
64
|
+
>>> m = load_model('heart_scale.model')
|
65
|
+
>>> p_label, p_acc, p_val = predict(y, x, m, '-b 1')
|
66
|
+
>>> ACC, MSE, SCC = evaluations(y, p_label)
|
67
|
+
|
68
|
+
# Getting online help
|
69
|
+
>>> help(train)
|
70
|
+
|
71
|
+
The low-level use directly calls C interfaces imported by liblinear.py. Note that
|
72
|
+
all arguments and return values are in ctypes format. You need to handle them
|
73
|
+
carefully.
|
74
|
+
|
75
|
+
>>> from liblinear import *
|
76
|
+
>>> prob = problem([1,-1], [{1:1, 3:1}, {1:-1,3:-1}])
|
77
|
+
>>> param = parameter('-c 4')
|
78
|
+
>>> m = liblinear.train(prob, param) # m is a ctype pointer to a model
|
79
|
+
# Convert a Python-format instance to feature_nodearray, a ctypes structure
|
80
|
+
>>> x0, max_idx = gen_feature_nodearray({1:1, 3:1})
|
81
|
+
>>> label = liblinear.predict(m, x0)
|
82
|
+
|
83
|
+
Design Description
|
84
|
+
==================
|
85
|
+
|
86
|
+
There are two files liblinear.py and liblinearutil.py, which respectively correspond to
|
87
|
+
low-level and high-level use of the interface.
|
88
|
+
|
89
|
+
In liblinear.py, we adopt the Python built-in library "ctypes," so that
|
90
|
+
Python can directly access C structures and interface functions defined
|
91
|
+
in linear.h.
|
92
|
+
|
93
|
+
While advanced users can use structures/functions in liblinear.py, to
|
94
|
+
avoid handling ctypes structures, in liblinearutil.py we provide some easy-to-use
|
95
|
+
functions. The usage is similar to LIBLINEAR MATLAB interface.
|
96
|
+
|
97
|
+
Data Structures
|
98
|
+
===============
|
99
|
+
|
100
|
+
Three data structures derived from linear.h are node, problem, and
|
101
|
+
parameter. They all contain fields with the same names in
|
102
|
+
linear.h. Access these fields carefully because you directly use a C structure
|
103
|
+
instead of a Python object. The following description introduces additional
|
104
|
+
fields and methods.
|
105
|
+
|
106
|
+
Before using the data structures, execute the following command to load the
|
107
|
+
LIBLINEAR shared library:
|
108
|
+
|
109
|
+
>>> from liblinear import *
|
110
|
+
|
111
|
+
- class feature_node:
|
112
|
+
|
113
|
+
Construct a feature_node.
|
114
|
+
|
115
|
+
>>> node = feature_node(idx, val)
|
116
|
+
|
117
|
+
idx: an integer indicates the feature index.
|
118
|
+
|
119
|
+
val: a float indicates the feature value.
|
120
|
+
|
121
|
+
Show the index and the value of a node.
|
122
|
+
|
123
|
+
>>> print(node)
|
124
|
+
|
125
|
+
- Function: gen_feature_nodearray(xi [,feature_max=None [,issparse=True]])
|
126
|
+
|
127
|
+
Generate a feature vector from a Python list/tuple or a dictionary:
|
128
|
+
|
129
|
+
>>> xi, max_idx = gen_feature_nodearray({1:1, 3:1, 5:-2})
|
130
|
+
|
131
|
+
xi: the returned feature_nodearray (a ctypes structure)
|
132
|
+
|
133
|
+
max_idx: the maximal feature index of xi
|
134
|
+
|
135
|
+
issparse: if issparse == True, zero feature values are removed. The default
|
136
|
+
value is True for the sparsity.
|
137
|
+
|
138
|
+
feature_max: if feature_max is assigned, features with indices larger than
|
139
|
+
feature_max are removed.
|
140
|
+
|
141
|
+
- class problem:
|
142
|
+
|
143
|
+
Construct a problem instance
|
144
|
+
|
145
|
+
>>> prob = problem(y, x [,bias=-1])
|
146
|
+
|
147
|
+
y: a Python list/tuple of l labels (type must be int/double).
|
148
|
+
|
149
|
+
x: a Python list/tuple of l data instances. Each element of x must be
|
150
|
+
an instance of list/tuple/dictionary type.
|
151
|
+
|
152
|
+
bias: if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term
|
153
|
+
added (default -1)
|
154
|
+
|
155
|
+
You can also modify the bias value by
|
156
|
+
|
157
|
+
>>> prob.set_bias(1)
|
158
|
+
|
159
|
+
Note that if your x contains sparse data (i.e., dictionary), the internal
|
160
|
+
ctypes data format is still sparse.
|
161
|
+
|
162
|
+
- class parameter:
|
163
|
+
|
164
|
+
Construct a parameter instance
|
165
|
+
|
166
|
+
>>> param = parameter('training_options')
|
167
|
+
|
168
|
+
If 'training_options' is empty, LIBLINEAR default values are applied.
|
169
|
+
|
170
|
+
Set param to LIBLINEAR default values.
|
171
|
+
|
172
|
+
>>> param.set_to_default_values()
|
173
|
+
|
174
|
+
Parse a string of options.
|
175
|
+
|
176
|
+
>>> param.parse_options('training_options')
|
177
|
+
|
178
|
+
Show values of parameters.
|
179
|
+
|
180
|
+
>>> print(param)
|
181
|
+
|
182
|
+
- class model:
|
183
|
+
|
184
|
+
There are two ways to obtain an instance of model:
|
185
|
+
|
186
|
+
>>> model_ = train(y, x)
|
187
|
+
>>> model_ = load_model('model_file_name')
|
188
|
+
|
189
|
+
Note that the returned structure of interface functions
|
190
|
+
liblinear.train and liblinear.load_model is a ctypes pointer of
|
191
|
+
model, which is different from the model object returned
|
192
|
+
by train and load_model in liblinearutil.py. We provide a
|
193
|
+
function toPyModel for the conversion:
|
194
|
+
|
195
|
+
>>> model_ptr = liblinear.train(prob, param)
|
196
|
+
>>> model_ = toPyModel(model_ptr)
|
197
|
+
|
198
|
+
If you obtain a model in a way other than the above approaches,
|
199
|
+
handle it carefully to avoid memory leak or segmentation fault.
|
200
|
+
|
201
|
+
Some interface functions to access LIBLINEAR models are wrapped as
|
202
|
+
members of the class model:
|
203
|
+
|
204
|
+
>>> nr_feature = model_.get_nr_feature()
|
205
|
+
>>> nr_class = model_.get_nr_class()
|
206
|
+
>>> class_labels = model_.get_labels()
|
207
|
+
>>> is_prob_model = model_.is_probability_model()
|
208
|
+
|
209
|
+
Utility Functions
|
210
|
+
=================
|
211
|
+
|
212
|
+
To use utility functions, type
|
213
|
+
|
214
|
+
>>> from liblinearutil import *
|
215
|
+
|
216
|
+
The above command loads
|
217
|
+
train() : train a linear model
|
218
|
+
predict() : predict testing data
|
219
|
+
svm_read_problem() : read the data from a LIBSVM-format file.
|
220
|
+
load_model() : load a LIBLINEAR model.
|
221
|
+
save_model() : save model to a file.
|
222
|
+
evaluations() : evaluate prediction results.
|
223
|
+
|
224
|
+
- Function: train
|
225
|
+
|
226
|
+
There are three ways to call train()
|
227
|
+
|
228
|
+
>>> model = train(y, x [, 'training_options'])
|
229
|
+
>>> model = train(prob [, 'training_options'])
|
230
|
+
>>> model = train(prob, param)
|
231
|
+
|
232
|
+
y: a list/tuple of l training labels (type must be int/double).
|
233
|
+
|
234
|
+
x: a list/tuple of l training instances. The feature vector of
|
235
|
+
each training instance is an instance of list/tuple or dictionary.
|
236
|
+
|
237
|
+
training_options: a string in the same form as that for LIBLINEAR command
|
238
|
+
mode.
|
239
|
+
|
240
|
+
prob: a problem instance generated by calling
|
241
|
+
problem(y, x).
|
242
|
+
|
243
|
+
param: a parameter instance generated by calling
|
244
|
+
parameter('training_options')
|
245
|
+
|
246
|
+
model: the returned model instance. See linear.h for details of this
|
247
|
+
structure. If '-v' is specified, cross validation is
|
248
|
+
conducted and the returned model is just a scalar: cross-validation
|
249
|
+
accuracy for classification and mean-squared error for regression.
|
250
|
+
|
251
|
+
To train the same data many times with different
|
252
|
+
parameters, the second and the third ways should be faster..
|
253
|
+
|
254
|
+
Examples:
|
255
|
+
|
256
|
+
>>> y, x = svm_read_problem('../heart_scale')
|
257
|
+
>>> prob = problem(y, x)
|
258
|
+
>>> param = parameter('-s 3 -c 5 -q')
|
259
|
+
>>> m = train(y, x, '-c 5')
|
260
|
+
>>> m = train(prob, '-w1 5 -c 5')
|
261
|
+
>>> m = train(prob, param)
|
262
|
+
>>> CV_ACC = train(y, x, '-v 3')
|
263
|
+
|
264
|
+
- Function: predict
|
265
|
+
|
266
|
+
To predict testing data with a model, use
|
267
|
+
|
268
|
+
>>> p_labs, p_acc, p_vals = predict(y, x, model [,'predicting_options'])
|
269
|
+
|
270
|
+
y: a list/tuple of l true labels (type must be int/double). It is used
|
271
|
+
for calculating the accuracy. Use [] if true labels are
|
272
|
+
unavailable.
|
273
|
+
|
274
|
+
x: a list/tuple of l predicting instances. The feature vector of
|
275
|
+
each predicting instance is an instance of list/tuple or dictionary.
|
276
|
+
|
277
|
+
predicting_options: a string of predicting options in the same format as
|
278
|
+
that of LIBLINEAR.
|
279
|
+
|
280
|
+
model: a model instance.
|
281
|
+
|
282
|
+
p_labels: a list of predicted labels
|
283
|
+
|
284
|
+
p_acc: a tuple including accuracy (for classification), mean
|
285
|
+
squared error, and squared correlation coefficient (for
|
286
|
+
regression).
|
287
|
+
|
288
|
+
p_vals: a list of decision values or probability estimates (if '-b 1'
|
289
|
+
is specified). If k is the number of classes, for decision values,
|
290
|
+
each element includes results of predicting k binary-class
|
291
|
+
SVMs. If k = 2 and solver is not MCSVM_CS, only one decision value
|
292
|
+
is returned. For probabilities, each element contains k values
|
293
|
+
indicating the probability that the testing instance is in each class.
|
294
|
+
Note that the order of classes here is the same as 'model.label'
|
295
|
+
field in the model structure.
|
296
|
+
|
297
|
+
Example:
|
298
|
+
|
299
|
+
>>> m = train(y, x, '-c 5')
|
300
|
+
>>> p_labels, p_acc, p_vals = predict(y, x, m)
|
301
|
+
|
302
|
+
- Functions: svm_read_problem/load_model/save_model
|
303
|
+
|
304
|
+
See the usage by examples:
|
305
|
+
|
306
|
+
>>> y, x = svm_read_problem('data.txt')
|
307
|
+
>>> m = load_model('model_file')
|
308
|
+
>>> save_model('model_file', m)
|
309
|
+
|
310
|
+
- Function: evaluations
|
311
|
+
|
312
|
+
Calculate some evaluations using the true values (ty) and predicted
|
313
|
+
values (pv):
|
314
|
+
|
315
|
+
>>> (ACC, MSE, SCC) = evaluations(ty, pv)
|
316
|
+
|
317
|
+
ty: a list of true values.
|
318
|
+
|
319
|
+
pv: a list of predict values.
|
320
|
+
|
321
|
+
ACC: accuracy.
|
322
|
+
|
323
|
+
MSE: mean squared error.
|
324
|
+
|
325
|
+
SCC: squared correlation coefficient.
|
326
|
+
|
327
|
+
|
328
|
+
Additional Information
|
329
|
+
======================
|
330
|
+
|
331
|
+
This interface was written by Hsiang-Fu Yu from Department of Computer
|
332
|
+
Science, National Taiwan University. If you find this tool useful, please
|
333
|
+
cite LIBLINEAR as follows
|
334
|
+
|
335
|
+
R.-E. Fan, K.-W. Chang, C.-J. Hsieh, X.-R. Wang, and C.-J. Lin.
|
336
|
+
LIBLINEAR: A Library for Large Linear Classification, Journal of
|
337
|
+
Machine Learning Research 9(2008), 1871-1874. Software available at
|
338
|
+
http://www.csie.ntu.edu.tw/~cjlin/liblinear
|
339
|
+
|
340
|
+
For any question, please contact Chih-Jen Lin <cjlin@csie.ntu.edu.tw>,
|
341
|
+
or check the FAQ page:
|
342
|
+
|
343
|
+
http://www.csie.ntu.edu.tw/~cjlin/liblinear/faq.html
|
@@ -0,0 +1,277 @@
|
|
1
|
+
#!/usr/bin/env python
|
2
|
+
|
3
|
+
from ctypes import *
|
4
|
+
from ctypes.util import find_library
|
5
|
+
from os import path
|
6
|
+
import sys
|
7
|
+
|
8
|
+
try:
|
9
|
+
dirname = path.dirname(path.abspath(__file__))
|
10
|
+
if sys.platform == 'win32':
|
11
|
+
liblinear = CDLL(path.join(dirname, r'..\windows\liblinear.dll'))
|
12
|
+
else:
|
13
|
+
liblinear = CDLL(path.join(dirname, '../liblinear.so.1'))
|
14
|
+
except:
|
15
|
+
# For unix the prefix 'lib' is not considered.
|
16
|
+
if find_library('linear'):
|
17
|
+
liblinear = CDLL(find_library('linear'))
|
18
|
+
elif find_library('liblinear'):
|
19
|
+
liblinear = CDLL(find_library('liblinear'))
|
20
|
+
else:
|
21
|
+
raise Exception('LIBLINEAR library not found.')
|
22
|
+
|
23
|
+
# Construct constants
|
24
|
+
SOLVER_TYPE = ['L2R_LR', 'L2R_L2LOSS_SVC_DUAL', 'L2R_L2LOSS_SVC', 'L2R_L1LOSS_SVC_DUAL',\
|
25
|
+
'MCSVM_CS', 'L1R_L2LOSS_SVC', 'L1R_LR', 'L2R_LR_DUAL', \
|
26
|
+
None, None, None, \
|
27
|
+
'L2R_L2LOSS_SVR', 'L2R_L2LOSS_SVR_DUAL', 'L2R_L1LOSS_SVR_DUAL']
|
28
|
+
for i, s in enumerate(SOLVER_TYPE):
|
29
|
+
if s is not None: exec("%s = %d" % (s , i))
|
30
|
+
|
31
|
+
PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
|
32
|
+
def print_null(s):
|
33
|
+
return
|
34
|
+
|
35
|
+
def genFields(names, types):
|
36
|
+
return list(zip(names, types))
|
37
|
+
|
38
|
+
def fillprototype(f, restype, argtypes):
|
39
|
+
f.restype = restype
|
40
|
+
f.argtypes = argtypes
|
41
|
+
|
42
|
+
class feature_node(Structure):
|
43
|
+
_names = ["index", "value"]
|
44
|
+
_types = [c_int, c_double]
|
45
|
+
_fields_ = genFields(_names, _types)
|
46
|
+
|
47
|
+
def __str__(self):
|
48
|
+
return '%d:%g' % (self.index, self.value)
|
49
|
+
|
50
|
+
def gen_feature_nodearray(xi, feature_max=None, issparse=True):
|
51
|
+
if isinstance(xi, dict):
|
52
|
+
index_range = xi.keys()
|
53
|
+
elif isinstance(xi, (list, tuple)):
|
54
|
+
xi = [0] + xi # idx should start from 1
|
55
|
+
index_range = range(1, len(xi))
|
56
|
+
else:
|
57
|
+
raise TypeError('xi should be a dictionary, list or tuple')
|
58
|
+
|
59
|
+
if feature_max:
|
60
|
+
assert(isinstance(feature_max, int))
|
61
|
+
index_range = filter(lambda j: j <= feature_max, index_range)
|
62
|
+
if issparse:
|
63
|
+
index_range = filter(lambda j:xi[j] != 0, index_range)
|
64
|
+
|
65
|
+
index_range = sorted(index_range)
|
66
|
+
ret = (feature_node * (len(index_range)+2))()
|
67
|
+
ret[-1].index = -1 # for bias term
|
68
|
+
ret[-2].index = -1
|
69
|
+
for idx, j in enumerate(index_range):
|
70
|
+
ret[idx].index = j
|
71
|
+
ret[idx].value = xi[j]
|
72
|
+
max_idx = 0
|
73
|
+
if index_range :
|
74
|
+
max_idx = index_range[-1]
|
75
|
+
return ret, max_idx
|
76
|
+
|
77
|
+
class problem(Structure):
|
78
|
+
_names = ["l", "n", "y", "x", "bias"]
|
79
|
+
_types = [c_int, c_int, POINTER(c_double), POINTER(POINTER(feature_node)), c_double]
|
80
|
+
_fields_ = genFields(_names, _types)
|
81
|
+
|
82
|
+
def __init__(self, y, x, bias = -1):
|
83
|
+
if len(y) != len(x) :
|
84
|
+
raise ValueError("len(y) != len(x)")
|
85
|
+
self.l = l = len(y)
|
86
|
+
self.bias = -1
|
87
|
+
|
88
|
+
max_idx = 0
|
89
|
+
x_space = self.x_space = []
|
90
|
+
for i, xi in enumerate(x):
|
91
|
+
tmp_xi, tmp_idx = gen_feature_nodearray(xi)
|
92
|
+
x_space += [tmp_xi]
|
93
|
+
max_idx = max(max_idx, tmp_idx)
|
94
|
+
self.n = max_idx
|
95
|
+
|
96
|
+
self.y = (c_double * l)()
|
97
|
+
for i, yi in enumerate(y): self.y[i] = y[i]
|
98
|
+
|
99
|
+
self.x = (POINTER(feature_node) * l)()
|
100
|
+
for i, xi in enumerate(self.x_space): self.x[i] = xi
|
101
|
+
|
102
|
+
self.set_bias(bias)
|
103
|
+
|
104
|
+
def set_bias(self, bias):
|
105
|
+
if self.bias == bias:
|
106
|
+
return
|
107
|
+
if bias >= 0 and self.bias < 0:
|
108
|
+
self.n += 1
|
109
|
+
node = feature_node(self.n, bias)
|
110
|
+
if bias < 0 and self.bias >= 0:
|
111
|
+
self.n -= 1
|
112
|
+
node = feature_node(-1, bias)
|
113
|
+
|
114
|
+
for xi in self.x_space:
|
115
|
+
xi[-2] = node
|
116
|
+
self.bias = bias
|
117
|
+
|
118
|
+
|
119
|
+
class parameter(Structure):
|
120
|
+
_names = ["solver_type", "eps", "C", "nr_weight", "weight_label", "weight", "p"]
|
121
|
+
_types = [c_int, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double), c_double]
|
122
|
+
_fields_ = genFields(_names, _types)
|
123
|
+
|
124
|
+
def __init__(self, options = None):
|
125
|
+
if options == None:
|
126
|
+
options = ''
|
127
|
+
self.parse_options(options)
|
128
|
+
|
129
|
+
def __str__(self):
|
130
|
+
s = ''
|
131
|
+
attrs = parameter._names + list(self.__dict__.keys())
|
132
|
+
values = map(lambda attr: getattr(self, attr), attrs)
|
133
|
+
for attr, val in zip(attrs, values):
|
134
|
+
s += (' %s: %s\n' % (attr, val))
|
135
|
+
s = s.strip()
|
136
|
+
|
137
|
+
return s
|
138
|
+
|
139
|
+
def set_to_default_values(self):
|
140
|
+
self.solver_type = L2R_L2LOSS_SVC_DUAL
|
141
|
+
self.eps = float('inf')
|
142
|
+
self.C = 1
|
143
|
+
self.p = 0.1
|
144
|
+
self.nr_weight = 0
|
145
|
+
self.weight_label = (c_int * 0)()
|
146
|
+
self.weight = (c_double * 0)()
|
147
|
+
self.bias = -1
|
148
|
+
self.cross_validation = False
|
149
|
+
self.nr_fold = 0
|
150
|
+
self.print_func = None
|
151
|
+
|
152
|
+
def parse_options(self, options):
|
153
|
+
if isinstance(options, list):
|
154
|
+
argv = options
|
155
|
+
elif isinstance(options, str):
|
156
|
+
argv = options.split()
|
157
|
+
else:
|
158
|
+
raise TypeError("arg 1 should be a list or a str.")
|
159
|
+
self.set_to_default_values()
|
160
|
+
self.print_func = cast(None, PRINT_STRING_FUN)
|
161
|
+
weight_label = []
|
162
|
+
weight = []
|
163
|
+
|
164
|
+
i = 0
|
165
|
+
while i < len(argv) :
|
166
|
+
if argv[i] == "-s":
|
167
|
+
i = i + 1
|
168
|
+
self.solver_type = int(argv[i])
|
169
|
+
elif argv[i] == "-c":
|
170
|
+
i = i + 1
|
171
|
+
self.C = float(argv[i])
|
172
|
+
elif argv[i] == "-p":
|
173
|
+
i = i + 1
|
174
|
+
self.p = float(argv[i])
|
175
|
+
elif argv[i] == "-e":
|
176
|
+
i = i + 1
|
177
|
+
self.eps = float(argv[i])
|
178
|
+
elif argv[i] == "-B":
|
179
|
+
i = i + 1
|
180
|
+
self.bias = float(argv[i])
|
181
|
+
elif argv[i] == "-v":
|
182
|
+
i = i + 1
|
183
|
+
self.cross_validation = 1
|
184
|
+
self.nr_fold = int(argv[i])
|
185
|
+
if self.nr_fold < 2 :
|
186
|
+
raise ValueError("n-fold cross validation: n must >= 2")
|
187
|
+
elif argv[i].startswith("-w"):
|
188
|
+
i = i + 1
|
189
|
+
self.nr_weight += 1
|
190
|
+
nr_weight = self.nr_weight
|
191
|
+
weight_label += [int(argv[i-1][2:])]
|
192
|
+
weight += [float(argv[i])]
|
193
|
+
elif argv[i] == "-q":
|
194
|
+
self.print_func = PRINT_STRING_FUN(print_null)
|
195
|
+
else :
|
196
|
+
raise ValueError("Wrong options")
|
197
|
+
i += 1
|
198
|
+
|
199
|
+
liblinear.set_print_string_function(self.print_func)
|
200
|
+
self.weight_label = (c_int*self.nr_weight)()
|
201
|
+
self.weight = (c_double*self.nr_weight)()
|
202
|
+
for i in range(self.nr_weight):
|
203
|
+
self.weight[i] = weight[i]
|
204
|
+
self.weight_label[i] = weight_label[i]
|
205
|
+
|
206
|
+
if self.eps == float('inf'):
|
207
|
+
if self.solver_type in [L2R_LR, L2R_L2LOSS_SVC]:
|
208
|
+
self.eps = 0.01
|
209
|
+
elif self.solver_type in [L2R_L2LOSS_SVR]:
|
210
|
+
self.eps = 0.001
|
211
|
+
elif self.solver_type in [L2R_L2LOSS_SVC_DUAL, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L2R_LR_DUAL]:
|
212
|
+
self.eps = 0.1
|
213
|
+
elif self.solver_type in [L1R_L2LOSS_SVC, L1R_LR]:
|
214
|
+
self.eps = 0.01
|
215
|
+
elif self.solver_type in [L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL]:
|
216
|
+
self.eps = 0.1
|
217
|
+
|
218
|
+
class model(Structure):
|
219
|
+
_names = ["param", "nr_class", "nr_feature", "w", "label", "bias"]
|
220
|
+
_types = [parameter, c_int, c_int, POINTER(c_double), POINTER(c_int), c_double]
|
221
|
+
_fields_ = genFields(_names, _types)
|
222
|
+
|
223
|
+
def __init__(self):
|
224
|
+
self.__createfrom__ = 'python'
|
225
|
+
|
226
|
+
def __del__(self):
|
227
|
+
# free memory created by C to avoid memory leak
|
228
|
+
if hasattr(self, '__createfrom__') and self.__createfrom__ == 'C':
|
229
|
+
liblinear.free_and_destroy_model(pointer(self))
|
230
|
+
|
231
|
+
def get_nr_feature(self):
|
232
|
+
return liblinear.get_nr_feature(self)
|
233
|
+
|
234
|
+
def get_nr_class(self):
|
235
|
+
return liblinear.get_nr_class(self)
|
236
|
+
|
237
|
+
def get_labels(self):
|
238
|
+
nr_class = self.get_nr_class()
|
239
|
+
labels = (c_int * nr_class)()
|
240
|
+
liblinear.get_labels(self, labels)
|
241
|
+
return labels[:nr_class]
|
242
|
+
|
243
|
+
def is_probability_model(self):
|
244
|
+
return (liblinear.check_probability_model(self) == 1)
|
245
|
+
|
246
|
+
def toPyModel(model_ptr):
|
247
|
+
"""
|
248
|
+
toPyModel(model_ptr) -> model
|
249
|
+
|
250
|
+
Convert a ctypes POINTER(model) to a Python model
|
251
|
+
"""
|
252
|
+
if bool(model_ptr) == False:
|
253
|
+
raise ValueError("Null pointer")
|
254
|
+
m = model_ptr.contents
|
255
|
+
m.__createfrom__ = 'C'
|
256
|
+
return m
|
257
|
+
|
258
|
+
fillprototype(liblinear.train, POINTER(model), [POINTER(problem), POINTER(parameter)])
|
259
|
+
fillprototype(liblinear.cross_validation, None, [POINTER(problem), POINTER(parameter), c_int, POINTER(c_double)])
|
260
|
+
|
261
|
+
fillprototype(liblinear.predict_values, c_double, [POINTER(model), POINTER(feature_node), POINTER(c_double)])
|
262
|
+
fillprototype(liblinear.predict, c_double, [POINTER(model), POINTER(feature_node)])
|
263
|
+
fillprototype(liblinear.predict_probability, c_double, [POINTER(model), POINTER(feature_node), POINTER(c_double)])
|
264
|
+
|
265
|
+
fillprototype(liblinear.save_model, c_int, [c_char_p, POINTER(model)])
|
266
|
+
fillprototype(liblinear.load_model, POINTER(model), [c_char_p])
|
267
|
+
|
268
|
+
fillprototype(liblinear.get_nr_feature, c_int, [POINTER(model)])
|
269
|
+
fillprototype(liblinear.get_nr_class, c_int, [POINTER(model)])
|
270
|
+
fillprototype(liblinear.get_labels, None, [POINTER(model), POINTER(c_int)])
|
271
|
+
|
272
|
+
fillprototype(liblinear.free_model_content, None, [POINTER(model)])
|
273
|
+
fillprototype(liblinear.free_and_destroy_model, None, [POINTER(POINTER(model))])
|
274
|
+
fillprototype(liblinear.destroy_param, None, [POINTER(parameter)])
|
275
|
+
fillprototype(liblinear.check_parameter, c_char_p, [POINTER(problem), POINTER(parameter)])
|
276
|
+
fillprototype(liblinear.check_probability_model, c_int, [POINTER(model)])
|
277
|
+
fillprototype(liblinear.set_print_string_function, None, [CFUNCTYPE(None, c_char_p)])
|