liblinear-ruby 0.0.4 → 0.0.5

Sign up to get free protection for your applications and to get access to all the features.
Files changed (52) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +6 -3
  3. data/ext/liblinear_wrap.cxx +99 -0
  4. data/ext/linear.cpp +61 -11
  5. data/ext/linear.h +3 -0
  6. data/lib/liblinear/model.rb +37 -6
  7. data/lib/liblinear/version.rb +1 -1
  8. data/{liblinear-1.94 → liblinear-1.95}/COPYRIGHT +1 -1
  9. data/{liblinear-1.94 → liblinear-1.95}/Makefile +1 -1
  10. data/{liblinear-1.94 → liblinear-1.95}/Makefile.win +1 -1
  11. data/{liblinear-1.94 → liblinear-1.95}/README +32 -1
  12. data/{liblinear-1.94 → liblinear-1.95}/blas/Makefile +0 -0
  13. data/{liblinear-1.94 → liblinear-1.95}/blas/blas.h +0 -0
  14. data/{liblinear-1.94 → liblinear-1.95}/blas/blasp.h +8 -0
  15. data/{liblinear-1.94 → liblinear-1.95}/blas/daxpy.c +8 -0
  16. data/{liblinear-1.94 → liblinear-1.95}/blas/ddot.c +8 -0
  17. data/{liblinear-1.94 → liblinear-1.95}/blas/dnrm2.c +8 -0
  18. data/{liblinear-1.94 → liblinear-1.95}/blas/dscal.c +8 -0
  19. data/{liblinear-1.94 → liblinear-1.95}/heart_scale +0 -0
  20. data/{liblinear-1.94 → liblinear-1.95}/linear.cpp +61 -11
  21. data/{liblinear-1.94 → liblinear-1.95}/linear.def +3 -0
  22. data/{liblinear-1.94 → liblinear-1.95}/linear.h +3 -0
  23. data/{liblinear-1.94 → liblinear-1.95}/matlab/Makefile +2 -11
  24. data/{liblinear-1.94 → liblinear-1.95}/matlab/README +0 -0
  25. data/{liblinear-1.94 → liblinear-1.95}/matlab/libsvmread.c +3 -4
  26. data/{liblinear-1.94 → liblinear-1.95}/matlab/libsvmwrite.c +6 -7
  27. data/{liblinear-1.94 → liblinear-1.95}/matlab/linear_model_matlab.c +1 -1
  28. data/{liblinear-1.94 → liblinear-1.95}/matlab/linear_model_matlab.h +0 -0
  29. data/{liblinear-1.94 → liblinear-1.95}/matlab/make.m +21 -21
  30. data/{liblinear-1.94 → liblinear-1.95}/matlab/predict.c +4 -6
  31. data/{liblinear-1.94 → liblinear-1.95}/matlab/train.c +27 -17
  32. data/{liblinear-1.94 → liblinear-1.95}/predict.c +1 -3
  33. data/{liblinear-1.94 → liblinear-1.95}/python/Makefile +0 -0
  34. data/{liblinear-1.94 → liblinear-1.95}/python/README +30 -0
  35. data/{liblinear-1.94 → liblinear-1.95}/python/liblinear.py +35 -8
  36. data/{liblinear-1.94 → liblinear-1.95}/python/liblinearutil.py +7 -1
  37. data/{liblinear-1.94 → liblinear-1.95}/train.c +1 -1
  38. data/{liblinear-1.94 → liblinear-1.95}/tron.cpp +0 -0
  39. data/{liblinear-1.94 → liblinear-1.95}/tron.h +0 -0
  40. data/{liblinear-1.94 → liblinear-1.95}/windows/liblinear.dll +0 -0
  41. data/liblinear-1.95/windows/libsvmread.mexw64 +0 -0
  42. data/liblinear-1.95/windows/libsvmwrite.mexw64 +0 -0
  43. data/{liblinear-1.94 → liblinear-1.95}/windows/predict.exe +0 -0
  44. data/liblinear-1.95/windows/predict.mexw64 +0 -0
  45. data/{liblinear-1.94 → liblinear-1.95}/windows/train.exe +0 -0
  46. data/liblinear-1.95/windows/train.mexw64 +0 -0
  47. data/spec/liblinear/model_spec.rb +50 -11
  48. metadata +43 -42
  49. data/liblinear-1.94/windows/libsvmread.mexw64 +0 -0
  50. data/liblinear-1.94/windows/libsvmwrite.mexw64 +0 -0
  51. data/liblinear-1.94/windows/predict.mexw64 +0 -0
  52. data/liblinear-1.94/windows/train.mexw64 +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: e70466e2b89a0daa504f9dd8a655d759beb40260
4
- data.tar.gz: e414010bb3853f7d2609e9e5a2fe34474d86e784
3
+ metadata.gz: ac700b40f9f8bc42fbb319f6d510b150740556e7
4
+ data.tar.gz: 1027bf69f4ab2a882c12b0d569ce952a2729c350
5
5
  SHA512:
6
- metadata.gz: 208a81ca1d8ebe5aaf221797417fbb3501a88f3a3d40310c65691980e3808463f5dc67c04cdea3ecf676b46b824d6506d9bfe0fb0f0d123094f39ae391454ec3
7
- data.tar.gz: 848111ae60b0ae8422550f719fdf60d2ad701ee07a723da1c9e4c58e50dbce73f977a52eb1dc5ef8b981102d65d606e6333a15fa84fe1e5759f8852a35066ff9
6
+ metadata.gz: 4ea3e62ea0b3977d0a49f5ed700f7e40079c99b144eda439a384cb04de644d21355db4e032f150430c3abc9df4cfea1fdd3828409f119a636dd01fbcc63b254e
7
+ data.tar.gz: 610cf9eab7d391d5c4ee1767e28accc957c158402a15d5bd637c55eaa1bf960218f5d52581752499ffbb77dab3bd5e65a0e8d14a12ee9772047d14ac76077269
data/README.md CHANGED
@@ -1,8 +1,8 @@
1
1
  # Liblinear-Ruby
2
2
  [![Gem Version](https://badge.fury.io/rb/liblinear-ruby.png)](http://badge.fury.io/rb/liblinear-ruby)
3
3
 
4
- Liblinear-Ruby is Ruby interface to LIBLINEAR using SWIG.
5
- Now, this interface is supporting LIBLINEAR 1.94.
4
+ Liblinear-Ruby is Ruby interface to LIBLINEAR using SWIG.
5
+ Now, this interface is supporting LIBLINEAR 1.95.
6
6
 
7
7
  ## Installation
8
8
 
@@ -39,6 +39,10 @@ model = Liblinear::Model.new(prob, param)
39
39
 
40
40
  # Predicting phase
41
41
  puts model.predict({1=>1, 2=>1, 3=>1, 4=>1, 5=>1}) # => -1.0
42
+
43
+ # Analyzing phase
44
+ puts model.coefficient
45
+ puts model.bias
42
46
  ```
43
47
  ## Usage
44
48
 
@@ -147,7 +151,6 @@ model = Liblinear::Model.new(prob, param)
147
151
  ```
148
152
  If you have already had a model file, you can load it as:
149
153
  ```ruby
150
- prob = Liblinear::Problem.new(labels, examples, bias)
151
154
  model = Liblinear::Model.new(model_file)
152
155
  ```
153
156
  In this phase, you can save model as:
@@ -3681,6 +3681,78 @@ fail:
3681
3681
  }
3682
3682
 
3683
3683
 
3684
+ SWIGINTERN VALUE
3685
+ _wrap_get_decfun_coef(int argc, VALUE *argv, VALUE self) {
3686
+ model *arg1 = (model *) 0 ;
3687
+ int arg2 ;
3688
+ int arg3 ;
3689
+ void *argp1 = 0 ;
3690
+ int res1 = 0 ;
3691
+ int val2 ;
3692
+ int ecode2 = 0 ;
3693
+ int val3 ;
3694
+ int ecode3 = 0 ;
3695
+ double result;
3696
+ VALUE vresult = Qnil;
3697
+
3698
+ if ((argc < 3) || (argc > 3)) {
3699
+ rb_raise(rb_eArgError, "wrong # of arguments(%d for 3)",argc); SWIG_fail;
3700
+ }
3701
+ res1 = SWIG_ConvertPtr(argv[0], &argp1,SWIGTYPE_p_model, 0 | 0 );
3702
+ if (!SWIG_IsOK(res1)) {
3703
+ SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "model const *","get_decfun_coef", 1, argv[0] ));
3704
+ }
3705
+ arg1 = reinterpret_cast< model * >(argp1);
3706
+ ecode2 = SWIG_AsVal_int(argv[1], &val2);
3707
+ if (!SWIG_IsOK(ecode2)) {
3708
+ SWIG_exception_fail(SWIG_ArgError(ecode2), Ruby_Format_TypeError( "", "int","get_decfun_coef", 2, argv[1] ));
3709
+ }
3710
+ arg2 = static_cast< int >(val2);
3711
+ ecode3 = SWIG_AsVal_int(argv[2], &val3);
3712
+ if (!SWIG_IsOK(ecode3)) {
3713
+ SWIG_exception_fail(SWIG_ArgError(ecode3), Ruby_Format_TypeError( "", "int","get_decfun_coef", 3, argv[2] ));
3714
+ }
3715
+ arg3 = static_cast< int >(val3);
3716
+ result = (double)get_decfun_coef((model const *)arg1,arg2,arg3);
3717
+ vresult = SWIG_From_double(static_cast< double >(result));
3718
+ return vresult;
3719
+ fail:
3720
+ return Qnil;
3721
+ }
3722
+
3723
+
3724
+ SWIGINTERN VALUE
3725
+ _wrap_get_decfun_bias(int argc, VALUE *argv, VALUE self) {
3726
+ model *arg1 = (model *) 0 ;
3727
+ int arg2 ;
3728
+ void *argp1 = 0 ;
3729
+ int res1 = 0 ;
3730
+ int val2 ;
3731
+ int ecode2 = 0 ;
3732
+ double result;
3733
+ VALUE vresult = Qnil;
3734
+
3735
+ if ((argc < 2) || (argc > 2)) {
3736
+ rb_raise(rb_eArgError, "wrong # of arguments(%d for 2)",argc); SWIG_fail;
3737
+ }
3738
+ res1 = SWIG_ConvertPtr(argv[0], &argp1,SWIGTYPE_p_model, 0 | 0 );
3739
+ if (!SWIG_IsOK(res1)) {
3740
+ SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "model const *","get_decfun_bias", 1, argv[0] ));
3741
+ }
3742
+ arg1 = reinterpret_cast< model * >(argp1);
3743
+ ecode2 = SWIG_AsVal_int(argv[1], &val2);
3744
+ if (!SWIG_IsOK(ecode2)) {
3745
+ SWIG_exception_fail(SWIG_ArgError(ecode2), Ruby_Format_TypeError( "", "int","get_decfun_bias", 2, argv[1] ));
3746
+ }
3747
+ arg2 = static_cast< int >(val2);
3748
+ result = (double)get_decfun_bias((model const *)arg1,arg2);
3749
+ vresult = SWIG_From_double(static_cast< double >(result));
3750
+ return vresult;
3751
+ fail:
3752
+ return Qnil;
3753
+ }
3754
+
3755
+
3684
3756
  SWIGINTERN VALUE
3685
3757
  _wrap_free_model_content(int argc, VALUE *argv, VALUE self) {
3686
3758
  model *arg1 = (model *) 0 ;
@@ -3800,6 +3872,30 @@ fail:
3800
3872
  }
3801
3873
 
3802
3874
 
3875
+ SWIGINTERN VALUE
3876
+ _wrap_check_regression_model(int argc, VALUE *argv, VALUE self) {
3877
+ model *arg1 = (model *) 0 ;
3878
+ void *argp1 = 0 ;
3879
+ int res1 = 0 ;
3880
+ int result;
3881
+ VALUE vresult = Qnil;
3882
+
3883
+ if ((argc < 1) || (argc > 1)) {
3884
+ rb_raise(rb_eArgError, "wrong # of arguments(%d for 1)",argc); SWIG_fail;
3885
+ }
3886
+ res1 = SWIG_ConvertPtr(argv[0], &argp1,SWIGTYPE_p_model, 0 | 0 );
3887
+ if (!SWIG_IsOK(res1)) {
3888
+ SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "model const *","check_regression_model", 1, argv[0] ));
3889
+ }
3890
+ arg1 = reinterpret_cast< model * >(argp1);
3891
+ result = (int)check_regression_model((model const *)arg1);
3892
+ vresult = SWIG_From_int(static_cast< int >(result));
3893
+ return vresult;
3894
+ fail:
3895
+ return Qnil;
3896
+ }
3897
+
3898
+
3803
3899
  SWIGINTERN VALUE
3804
3900
  _wrap_set_print_string_function(int argc, VALUE *argv, VALUE self) {
3805
3901
  void (*arg1)(char const *) = (void (*)(char const *)) 0 ;
@@ -4622,11 +4718,14 @@ SWIGEXPORT void Init_liblinearswig(void) {
4622
4718
  rb_define_module_function(mLiblinearswig, "get_nr_feature", VALUEFUNC(_wrap_get_nr_feature), -1);
4623
4719
  rb_define_module_function(mLiblinearswig, "get_nr_class", VALUEFUNC(_wrap_get_nr_class), -1);
4624
4720
  rb_define_module_function(mLiblinearswig, "get_labels", VALUEFUNC(_wrap_get_labels), -1);
4721
+ rb_define_module_function(mLiblinearswig, "get_decfun_coef", VALUEFUNC(_wrap_get_decfun_coef), -1);
4722
+ rb_define_module_function(mLiblinearswig, "get_decfun_bias", VALUEFUNC(_wrap_get_decfun_bias), -1);
4625
4723
  rb_define_module_function(mLiblinearswig, "free_model_content", VALUEFUNC(_wrap_free_model_content), -1);
4626
4724
  rb_define_module_function(mLiblinearswig, "free_and_destroy_model", VALUEFUNC(_wrap_free_and_destroy_model), -1);
4627
4725
  rb_define_module_function(mLiblinearswig, "destroy_param", VALUEFUNC(_wrap_destroy_param), -1);
4628
4726
  rb_define_module_function(mLiblinearswig, "check_parameter", VALUEFUNC(_wrap_check_parameter), -1);
4629
4727
  rb_define_module_function(mLiblinearswig, "check_probability_model", VALUEFUNC(_wrap_check_probability_model), -1);
4728
+ rb_define_module_function(mLiblinearswig, "check_regression_model", VALUEFUNC(_wrap_check_regression_model), -1);
4630
4729
  rb_define_module_function(mLiblinearswig, "set_print_string_function", VALUEFUNC(_wrap_set_print_string_function), -1);
4631
4730
  rb_define_module_function(mLiblinearswig, "new_int", VALUEFUNC(_wrap_new_int), -1);
4632
4731
  rb_define_module_function(mLiblinearswig, "delete_int", VALUEFUNC(_wrap_delete_int), -1);
data/ext/linear.cpp CHANGED
@@ -1010,7 +1010,7 @@ static void solve_l2r_l1l2_svr(
1010
1010
  double d, G, H;
1011
1011
  double Gmax_old = INF;
1012
1012
  double Gmax_new, Gnorm1_new;
1013
- double Gnorm1_init;
1013
+ double Gnorm1_init = -1.0; // Gnorm1_init is initialized at the first iteration
1014
1014
  double *beta = new double[l];
1015
1015
  double *QD = new double[l];
1016
1016
  double *y = prob->y;
@@ -1409,7 +1409,7 @@ static void solve_l1r_l2_svc(
1409
1409
  double d, G_loss, G, H;
1410
1410
  double Gmax_old = INF;
1411
1411
  double Gmax_new, Gnorm1_new;
1412
- double Gnorm1_init;
1412
+ double Gnorm1_init = -1.0; // Gnorm1_init is initialized at the first iteration
1413
1413
  double d_old, d_diff;
1414
1414
  double loss_old, loss_new;
1415
1415
  double appxcond, cond;
@@ -1699,7 +1699,7 @@ static void solve_l1r_lr(
1699
1699
  double sigma = 0.01;
1700
1700
  double w_norm, w_norm_new;
1701
1701
  double z, G, H;
1702
- double Gnorm1_init;
1702
+ double Gnorm1_init = -1.0; // Gnorm1_init is initialized at the first iteration
1703
1703
  double Gmax_old = INF;
1704
1704
  double Gmax_new, Gnorm1_new;
1705
1705
  double QP_Gmax_old = INF;
@@ -2051,8 +2051,8 @@ static void transpose(const problem *prob, feature_node **x_space_ret, problem *
2051
2051
  int i;
2052
2052
  int l = prob->l;
2053
2053
  int n = prob->n;
2054
- long int nnz = 0;
2055
- long int *col_ptr = new long int [n+1];
2054
+ size_t nnz = 0;
2055
+ size_t *col_ptr = new size_t [n+1];
2056
2056
  feature_node *x_space;
2057
2057
  prob_col->l = l;
2058
2058
  prob_col->n = n;
@@ -2305,9 +2305,7 @@ model* train(const problem *prob, const parameter *param)
2305
2305
  model_->param = *param;
2306
2306
  model_->bias = prob->bias;
2307
2307
 
2308
- if(param->solver_type == L2R_L2LOSS_SVR ||
2309
- param->solver_type == L2R_L1LOSS_SVR_DUAL ||
2310
- param->solver_type == L2R_L2LOSS_SVR_DUAL)
2308
+ if(check_regression_model(model_))
2311
2309
  {
2312
2310
  model_->w = Malloc(double, w_size);
2313
2311
  model_->nr_class = 2;
@@ -2512,9 +2510,7 @@ double predict_values(const struct model *model_, const struct feature_node *x,
2512
2510
 
2513
2511
  if(nr_class==2)
2514
2512
  {
2515
- if(model_->param.solver_type == L2R_L2LOSS_SVR ||
2516
- model_->param.solver_type == L2R_L1LOSS_SVR_DUAL ||
2517
- model_->param.solver_type == L2R_L2LOSS_SVR_DUAL)
2513
+ if(check_regression_model(model_))
2518
2514
  return dec_values[0];
2519
2515
  else
2520
2516
  return (dec_values[0]>0)?model_->label[0]:model_->label[1];
@@ -2764,6 +2760,53 @@ void get_labels(const model *model_, int* label)
2764
2760
  label[i] = model_->label[i];
2765
2761
  }
2766
2762
 
2763
+ // use inline here for better performance (around 20% faster than the non-inline one)
2764
+ static inline double get_w_value(const struct model *model_, int idx, int label_idx)
2765
+ {
2766
+ int nr_class = model_->nr_class;
2767
+ int solver_type = model_->param.solver_type;
2768
+ const double *w = model_->w;
2769
+
2770
+ if(idx < 0 || idx > model_->nr_feature)
2771
+ return 0;
2772
+ if(check_regression_model(model_))
2773
+ return w[idx];
2774
+ else
2775
+ {
2776
+ if(label_idx < 0 || label_idx >= nr_class)
2777
+ return 0;
2778
+ if(nr_class == 2 && solver_type != MCSVM_CS)
2779
+ {
2780
+ if(label_idx == 0)
2781
+ return w[idx];
2782
+ else
2783
+ return -w[idx];
2784
+ }
2785
+ else
2786
+ return w[idx*nr_class+label_idx];
2787
+ }
2788
+ }
2789
+
2790
+ // feat_idx: starting from 1 to nr_feature
2791
+ // label_idx: starting from 0 to nr_class-1 for classification models;
2792
+ // for regression models, label_idx is ignored.
2793
+ double get_decfun_coef(const struct model *model_, int feat_idx, int label_idx)
2794
+ {
2795
+ if(feat_idx > model_->nr_feature)
2796
+ return 0;
2797
+ return get_w_value(model_, feat_idx-1, label_idx);
2798
+ }
2799
+
2800
+ double get_decfun_bias(const struct model *model_, int label_idx)
2801
+ {
2802
+ int bias_idx = model_->nr_feature;
2803
+ double bias = model_->bias;
2804
+ if(bias <= 0)
2805
+ return 0;
2806
+ else
2807
+ return bias*get_w_value(model_, bias_idx, label_idx);
2808
+ }
2809
+
2767
2810
  void free_model_content(struct model *model_ptr)
2768
2811
  {
2769
2812
  if(model_ptr->w != NULL)
@@ -2824,6 +2867,13 @@ int check_probability_model(const struct model *model_)
2824
2867
  model_->param.solver_type==L1R_LR);
2825
2868
  }
2826
2869
 
2870
+ int check_regression_model(const struct model *model_)
2871
+ {
2872
+ return (model_->param.solver_type==L2R_L2LOSS_SVR ||
2873
+ model_->param.solver_type==L2R_L1LOSS_SVR_DUAL ||
2874
+ model_->param.solver_type==L2R_L2LOSS_SVR_DUAL);
2875
+ }
2876
+
2827
2877
  void set_print_string_function(void (*print_func)(const char*))
2828
2878
  {
2829
2879
  if (print_func == NULL)
data/ext/linear.h CHANGED
@@ -57,6 +57,8 @@ struct model *load_model(const char *model_file_name);
57
57
  int get_nr_feature(const struct model *model_);
58
58
  int get_nr_class(const struct model *model_);
59
59
  void get_labels(const struct model *model_, int* label);
60
+ double get_decfun_coef(const struct model *model_, int feat_idx, int label_idx);
61
+ double get_decfun_bias(const struct model *model_, int label_idx);
60
62
 
61
63
  void free_model_content(struct model *model_ptr);
62
64
  void free_and_destroy_model(struct model **model_ptr_ptr);
@@ -64,6 +66,7 @@ void destroy_param(struct parameter *param);
64
66
 
65
67
  const char *check_parameter(const struct problem *prob, const struct parameter *param);
66
68
  int check_probability_model(const struct model *model);
69
+ int check_regression_model(const struct model *model);
67
70
  void set_print_string_function(void (*print_func) (const char*));
68
71
 
69
72
  #ifdef __cplusplus
@@ -23,15 +23,26 @@ module Liblinear
23
23
  end
24
24
 
25
25
  # @return [Integer]
26
- def nr_class
26
+ def class_size
27
27
  get_nr_class(@model)
28
28
  end
29
29
 
30
+ # @return [Integer]
31
+ def nr_class
32
+ warn "'nr_class' is deprecated. Please use 'class_size' instead."
33
+ class_size
34
+ end
35
+
36
+ # @return [Integer]
37
+ def feature_size
38
+ get_nr_feature(@model)
39
+ end
40
+
30
41
  # @return [Array <Integer>]
31
42
  def labels
32
- c_int_array = new_int(nr_class)
43
+ c_int_array = new_int(class_size)
33
44
  get_labels(@model, c_int_array)
34
- labels = int_array_c_to_ruby(c_int_array, nr_class)
45
+ labels = int_array_c_to_ruby(c_int_array, class_size)
35
46
  delete_int(c_int_array)
36
47
  labels
37
48
  end
@@ -62,14 +73,34 @@ module Liblinear
62
73
  save_model(filename, @model)
63
74
  end
64
75
 
65
- private
76
+ # @param feature_index [Integer]
77
+ # @param label_index [Integer]
78
+ # @return [Double, Array <Double>]
79
+ def coefficient(feature_index = nil, label_index = 0)
80
+ return get_decfun_coef(@model, feature_index, label_index) if feature_index
81
+ coefficients = []
82
+ feature_size.times.map {|feature_index| get_decfun_coef(@model, feature_index + 1, label_index)}
83
+ end
84
+
85
+ # @param label_index [Integer]
86
+ # @return [Double]
87
+ def bias(label_index = 0)
88
+ get_decfun_bias(@model, label_index)
89
+ end
90
+
91
+ # @return [Boolean]
92
+ def regression_model?
93
+ check_regression_model(@model) == 1 ? true : false
94
+ end
95
+
96
+ private
66
97
  # @param example [Array, Hash]
67
98
  # @return [Hash]
68
99
  def predict_prob_val(example, liblinear_func)
69
100
  feature_nodes = convert_to_feature_node_array(example, @model.nr_feature, @model.bias)
70
- c_double_array = Liblinearswig.new_double(nr_class)
101
+ c_double_array = new_double(class_size)
71
102
  Liblinearswig.send(liblinear_func, @model, feature_nodes, c_double_array)
72
- values = double_array_c_to_ruby(c_double_array, nr_class)
103
+ values = double_array_c_to_ruby(c_double_array, class_size)
73
104
  delete_double(c_double_array)
74
105
  feature_node_array_destroy(feature_nodes)
75
106
  value_list = {}
@@ -1,3 +1,3 @@
1
1
  module Liblinear
2
- VERSION = '0.0.4'
2
+ VERSION = '0.0.5'
3
3
  end
@@ -1,5 +1,5 @@
1
1
 
2
- Copyright (c) 2007-2013 The LIBLINEAR Project.
2
+ Copyright (c) 2007-2014 The LIBLINEAR Project.
3
3
  All rights reserved.
4
4
 
5
5
  Redistribution and use in source and binary forms, with or without
@@ -2,7 +2,7 @@ CXX ?= g++
2
2
  CC ?= gcc
3
3
  CFLAGS = -Wall -Wconversion -O3 -fPIC
4
4
  LIBS = blas/blas.a
5
- SHVER = 1
5
+ SHVER = 2
6
6
  OS = $(shell uname)
7
7
  #LIBS = -lblas
8
8
 
@@ -5,7 +5,7 @@
5
5
 
6
6
  ##########################################
7
7
  CXX = cl.exe
8
- CFLAGS = -nologo -O2 -EHsc -I. -D __WIN32__ -D _CRT_SECURE_NO_DEPRECATE
8
+ CFLAGS = /nologo /O2 /EHsc /I. /D _WIN32 /D _CRT_SECURE_NO_DEPRECATE
9
9
  TARGET = windows
10
10
 
11
11
  all: $(TARGET)\train.exe $(TARGET)\predict.exe
@@ -123,7 +123,7 @@ options:
123
123
  -s 1, 3, 4 and 7
124
124
  Dual maximal violation <= eps; similar to libsvm (default 0.1)
125
125
  -s 5 and 6
126
- |f'(w)|_inf <= eps*min(pos,neg)/l*|f'(w0)|_inf,
126
+ |f'(w)|_1 <= eps*min(pos,neg)/l*|f'(w0)|_1,
127
127
  where f is the primal function (default 0.01)
128
128
  -s 12 and 13\n"
129
129
  |f'(alpha)|_1 <= eps |f'(alpha0)|,
@@ -448,6 +448,24 @@ Library Usage
448
448
  This function outputs the name of labels into an array called label.
449
449
  For a regression model, label is unchanged.
450
450
 
451
+ - Function: double get_decfun_coef(const struct model *model_, int feat_idx,
452
+ int label_idx);
453
+
454
+ This function gives the coefficient for the feature with feature index =
455
+ feat_idx and the class with label index = label_idx. Note that feat_idx
456
+ starts from 1, while label_idx starts from 0. If feat_idx is not in the
457
+ valid range (1 to nr_feature), then a zero value will be returned. For
458
+ classification models, if label_idx is not in the valid range (0 to
459
+ nr_class-1), then a zero value will be returned; for regression models,
460
+ label_idx is ignored.
461
+
462
+ - Function: double get_decfun_bias(const struct model *model_, int label_idx);
463
+
464
+ This function gives the bias term corresponding to the class with the
465
+ label_idx. For classification models, if label_idx is not in a valid range
466
+ (0 to nr_class-1), then a zero value will be returned; for regression
467
+ models, label_idx is ignored.
468
+
451
469
  - Function: const char *check_parameter(const struct problem *prob,
452
470
  const struct parameter *param);
453
471
 
@@ -456,6 +474,16 @@ Library Usage
456
474
  train() and cross_validation(). It returns NULL if the
457
475
  parameters are feasible, otherwise an error message is returned.
458
476
 
477
+ - Function: int check_probability_model(const struct model *model);
478
+
479
+ This function returns 1 if the model supports probability output;
480
+ otherwise, it returns 0.
481
+
482
+ - Function: int check_regression_model(const struct model *model);
483
+
484
+ This function returns 1 if the model is a regression model; otherwise
485
+ it returns 0.
486
+
459
487
  - Function: int save_model(const char *model_file_name,
460
488
  const struct model *model_);
461
489
 
@@ -504,6 +532,9 @@ VC++ or where it is installed.
504
532
 
505
533
  nmake -f Makefile.win clean all
506
534
 
535
+ 2. (Optional) To build 64-bit windows binaries, you must
536
+ (1) Setup vcvars64.bat instead of vcvars32.bat
537
+ (2) Change CFLAGS in Makefile.win: /D _WIN32 to /D _WIN64
507
538
 
508
539
  MATLAB/OCTAVE Interface
509
540
  =======================