rb-libsvm 1.4.2 → 1.4.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: a4d86f3b24ca149a3d67f9ca37445ae179e48898
4
- data.tar.gz: f66c353522007be14a7bdf1ec4a78689fc4eb52c
3
+ metadata.gz: 0cb9d00d4396bbc79e1d5aff34ab0d45f141437e
4
+ data.tar.gz: b86a3e1c9f2656e0e0cdfab7942458578584d263
5
5
  SHA512:
6
- metadata.gz: 4facd54fee43073903cbcbd75e7ac46c8f12e468fa774994f923a1e367e421e8faaee88523e51a1077d6491082a4a7a4ddeb7be9433c3c416341a2809c40e052
7
- data.tar.gz: 9c7bed37d221ca4dfff72835e73c5cb994e871f366bcb2b98ec3a440129f58e9d2ca54758f9b68e7c8ab68e86cb95fbd48ecf73b0d9eb5794a106e8b50cc7b5a
6
+ metadata.gz: c09e012e812f851cfc18ad75eea8292298f5723c12ab1b36f23adbc1ef1f7c7a58ea63846d16596865ae4dec7ae5eaf2772be35b480ef83c0f7a88d7a5c92d03
7
+ data.tar.gz: 6cd24bba79c2aed7a106d5d8c9d043f6a3ee59ce2ba8e7b204d04168154c2f11d3795ec181889f709548eee9bd19011bb846753f91ecd0278a44dfbb99b44749
@@ -2,6 +2,16 @@
2
2
  All notable changes to this project will be documented in this file.
3
3
  This project adheres to [Semantic Versioning](http://semver.org/).
4
4
 
5
+ ## [1.4.2] - 2018-09-15
6
+ ### Changed
7
+ - Upgrade to LIBSVM 3.23
8
+
9
+ ## [1.4.2] - 2018-09-02
10
+ ### Changed
11
+ - bundler requires automatically
12
+
13
+ ## [1.4.1] - 2018-06-15
14
+
5
15
  ## [1.4.0] - 2015-09-18
6
16
  ### Changed
7
17
  - Complete transition to `expect` specs.
data/README.md CHANGED
@@ -3,7 +3,7 @@
3
3
  [![Gem Version](https://badge.fury.io/rb/rb-libsvm.png)](http://badge.fury.io/rb/rb-libsvm)
4
4
  [![Build Status](https://secure.travis-ci.org/febeling/rb-libsvm.png)](http://travis-ci.org/febeling/rb-libsvm)
5
5
 
6
- This package provides a Ruby bindings to the [LIBSVM][] library. SVM
6
+ This package provides Ruby bindings to the [LIBSVM][] library. SVM
7
7
  is a machine learning and classification algorithm, and LIBSVM is a
8
8
  popular free implementation of it, written by Chih-Chung Chang and
9
9
  Chih-Jen Lin, of National Taiwan University, Taipei. See the book ["Programming
@@ -25,7 +25,7 @@ this gem. You should install the original package if you need them.
25
25
  It is helpful to consult the [README of the LIBSVM][README] package for
26
26
  reference when configuring the training parameters.
27
27
 
28
- Currently this package includes libsvm version 3.20.
28
+ Currently this package includes libsvm version 3.23.
29
29
 
30
30
  ## Dependencies
31
31
 
@@ -308,7 +308,7 @@ double Kernel::dot(const svm_node *px, const svm_node *py)
308
308
  ++py;
309
309
  else
310
310
  ++px;
311
- }
311
+ }
312
312
  }
313
313
  return sum;
314
314
  }
@@ -337,7 +337,7 @@ double Kernel::k_function(const svm_node *x, const svm_node *y,
337
337
  else
338
338
  {
339
339
  if(x->index > y->index)
340
- {
340
+ {
341
341
  sum += y->value * y->value;
342
342
  ++y;
343
343
  }
@@ -360,7 +360,7 @@ double Kernel::k_function(const svm_node *x, const svm_node *y,
360
360
  sum += y->value * y->value;
361
361
  ++y;
362
362
  }
363
-
363
+
364
364
  return exp(-param.gamma*sum);
365
365
  }
366
366
  case SIGMOID:
@@ -368,7 +368,7 @@ double Kernel::k_function(const svm_node *x, const svm_node *y,
368
368
  case PRECOMPUTED: //x: test (validation), y: SV
369
369
  return x[(int)(y->value)].value;
370
370
  default:
371
- return 0; // Unreachable
371
+ return 0; // Unreachable
372
372
  }
373
373
  }
374
374
 
@@ -560,7 +560,7 @@ void Solver::Solve(int l, const QMatrix& Q, const double *p_, const schar *y_,
560
560
  int iter = 0;
561
561
  int max_iter = max(10000000, l>INT_MAX/100 ? INT_MAX : 100*l);
562
562
  int counter = min(l,1000)+1;
563
-
563
+
564
564
  while(iter < max_iter)
565
565
  {
566
566
  // show progress and do shrinking
@@ -585,11 +585,11 @@ void Solver::Solve(int l, const QMatrix& Q, const double *p_, const schar *y_,
585
585
  else
586
586
  counter = 1; // do shrinking next iteration
587
587
  }
588
-
588
+
589
589
  ++iter;
590
590
 
591
591
  // update alpha[i] and alpha[j], handle bounds carefully
592
-
592
+
593
593
  const Qfloat *Q_i = Q.get_Q(i,active_size);
594
594
  const Qfloat *Q_j = Q.get_Q(j,active_size);
595
595
 
@@ -608,7 +608,7 @@ void Solver::Solve(int l, const QMatrix& Q, const double *p_, const schar *y_,
608
608
  double diff = alpha[i] - alpha[j];
609
609
  alpha[i] += delta;
610
610
  alpha[j] += delta;
611
-
611
+
612
612
  if(diff > 0)
613
613
  {
614
614
  if(alpha[j] < 0)
@@ -690,7 +690,7 @@ void Solver::Solve(int l, const QMatrix& Q, const double *p_, const schar *y_,
690
690
 
691
691
  double delta_alpha_i = alpha[i] - old_alpha_i;
692
692
  double delta_alpha_j = alpha[j] - old_alpha_j;
693
-
693
+
694
694
  for(int k=0;k<active_size;k++)
695
695
  {
696
696
  G[k] += Q_i[k]*delta_alpha_i + Q_j[k]*delta_alpha_j;
@@ -790,7 +790,7 @@ int Solver::select_working_set(int &out_i, int &out_j)
790
790
  // j: minimizes the decrease of obj value
791
791
  // (if quadratic coefficeint <= 0, replace it with tau)
792
792
  // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha)
793
-
793
+
794
794
  double Gmax = -INF;
795
795
  double Gmax2 = -INF;
796
796
  int Gmax_idx = -1;
@@ -798,7 +798,7 @@ int Solver::select_working_set(int &out_i, int &out_j)
798
798
  double obj_diff_min = INF;
799
799
 
800
800
  for(int t=0;t<active_size;t++)
801
- if(y[t]==+1)
801
+ if(y[t]==+1)
802
802
  {
803
803
  if(!is_upper_bound(t))
804
804
  if(-G[t] >= Gmax)
@@ -874,7 +874,7 @@ int Solver::select_working_set(int &out_i, int &out_j)
874
874
  }
875
875
  }
876
876
 
877
- if(Gmax+Gmax2 < eps)
877
+ if(Gmax+Gmax2 < eps || Gmin_idx == -1)
878
878
  return 1;
879
879
 
880
880
  out_i = Gmax_idx;
@@ -895,7 +895,7 @@ bool Solver::be_shrunk(int i, double Gmax1, double Gmax2)
895
895
  {
896
896
  if(y[i]==+1)
897
897
  return(G[i] > Gmax2);
898
- else
898
+ else
899
899
  return(G[i] > Gmax1);
900
900
  }
901
901
  else
@@ -911,27 +911,27 @@ void Solver::do_shrinking()
911
911
  // find maximal violating pair first
912
912
  for(i=0;i<active_size;i++)
913
913
  {
914
- if(y[i]==+1)
914
+ if(y[i]==+1)
915
915
  {
916
- if(!is_upper_bound(i))
916
+ if(!is_upper_bound(i))
917
917
  {
918
918
  if(-G[i] >= Gmax1)
919
919
  Gmax1 = -G[i];
920
920
  }
921
- if(!is_lower_bound(i))
921
+ if(!is_lower_bound(i))
922
922
  {
923
923
  if(G[i] >= Gmax2)
924
924
  Gmax2 = G[i];
925
925
  }
926
926
  }
927
- else
927
+ else
928
928
  {
929
- if(!is_upper_bound(i))
929
+ if(!is_upper_bound(i))
930
930
  {
931
931
  if(-G[i] >= Gmax2)
932
932
  Gmax2 = -G[i];
933
933
  }
934
- if(!is_lower_bound(i))
934
+ if(!is_lower_bound(i))
935
935
  {
936
936
  if(G[i] >= Gmax1)
937
937
  Gmax1 = G[i];
@@ -939,7 +939,7 @@ void Solver::do_shrinking()
939
939
  }
940
940
  }
941
941
 
942
- if(unshrink == false && Gmax1 + Gmax2 <= eps*10)
942
+ if(unshrink == false && Gmax1 + Gmax2 <= eps*10)
943
943
  {
944
944
  unshrink = true;
945
945
  reconstruct_gradient();
@@ -1078,7 +1078,7 @@ int Solver_NU::select_working_set(int &out_i, int &out_j)
1078
1078
  {
1079
1079
  if(y[j]==+1)
1080
1080
  {
1081
- if (!is_lower_bound(j))
1081
+ if (!is_lower_bound(j))
1082
1082
  {
1083
1083
  double grad_diff=Gmaxp+G[j];
1084
1084
  if (G[j] >= Gmaxp2)
@@ -1126,7 +1126,7 @@ int Solver_NU::select_working_set(int &out_i, int &out_j)
1126
1126
  }
1127
1127
  }
1128
1128
 
1129
- if(max(Gmaxp+Gmaxp2,Gmaxn+Gmaxn2) < eps)
1129
+ if(max(Gmaxp+Gmaxp2,Gmaxn+Gmaxn2) < eps || Gmin_idx == -1)
1130
1130
  return 1;
1131
1131
 
1132
1132
  if (y[Gmin_idx] == +1)
@@ -1144,14 +1144,14 @@ bool Solver_NU::be_shrunk(int i, double Gmax1, double Gmax2, double Gmax3, doubl
1144
1144
  {
1145
1145
  if(y[i]==+1)
1146
1146
  return(-G[i] > Gmax1);
1147
- else
1147
+ else
1148
1148
  return(-G[i] > Gmax4);
1149
1149
  }
1150
1150
  else if(is_lower_bound(i))
1151
1151
  {
1152
1152
  if(y[i]==+1)
1153
1153
  return(G[i] > Gmax2);
1154
- else
1154
+ else
1155
1155
  return(G[i] > Gmax3);
1156
1156
  }
1157
1157
  else
@@ -1180,14 +1180,14 @@ void Solver_NU::do_shrinking()
1180
1180
  if(!is_lower_bound(i))
1181
1181
  {
1182
1182
  if(y[i]==+1)
1183
- {
1183
+ {
1184
1184
  if(G[i] > Gmax2) Gmax2 = G[i];
1185
1185
  }
1186
1186
  else if(G[i] > Gmax3) Gmax3 = G[i];
1187
1187
  }
1188
1188
  }
1189
1189
 
1190
- if(unshrink == false && max(Gmax1+Gmax2,Gmax3+Gmax4) <= eps*10)
1190
+ if(unshrink == false && max(Gmax1+Gmax2,Gmax3+Gmax4) <= eps*10)
1191
1191
  {
1192
1192
  unshrink = true;
1193
1193
  reconstruct_gradient();
@@ -1250,12 +1250,12 @@ double Solver_NU::calculate_rho()
1250
1250
  r1 = sum_free1/nr_free1;
1251
1251
  else
1252
1252
  r1 = (ub1+lb1)/2;
1253
-
1253
+
1254
1254
  if(nr_free2 > 0)
1255
1255
  r2 = sum_free2/nr_free2;
1256
1256
  else
1257
1257
  r2 = (ub2+lb2)/2;
1258
-
1258
+
1259
1259
  si->r = (r1+r2)/2;
1260
1260
  return (r1-r2)/2;
1261
1261
  }
@@ -1264,7 +1264,7 @@ double Solver_NU::calculate_rho()
1264
1264
  // Q matrices for various formulations
1265
1265
  //
1266
1266
  class SVC_Q: public Kernel
1267
- {
1267
+ {
1268
1268
  public:
1269
1269
  SVC_Q(const svm_problem& prob, const svm_parameter& param, const schar *y_)
1270
1270
  :Kernel(prob.l, prob.x, param)
@@ -1275,7 +1275,7 @@ public:
1275
1275
  for(int i=0;i<prob.l;i++)
1276
1276
  QD[i] = (this->*kernel_function)(i,i);
1277
1277
  }
1278
-
1278
+
1279
1279
  Qfloat *get_Q(int i, int len) const
1280
1280
  {
1281
1281
  Qfloat *data;
@@ -1324,7 +1324,7 @@ public:
1324
1324
  for(int i=0;i<prob.l;i++)
1325
1325
  QD[i] = (this->*kernel_function)(i,i);
1326
1326
  }
1327
-
1327
+
1328
1328
  Qfloat *get_Q(int i, int len) const
1329
1329
  {
1330
1330
  Qfloat *data;
@@ -1360,7 +1360,7 @@ private:
1360
1360
  };
1361
1361
 
1362
1362
  class SVR_Q: public Kernel
1363
- {
1363
+ {
1364
1364
  public:
1365
1365
  SVR_Q(const svm_problem& prob, const svm_parameter& param)
1366
1366
  :Kernel(prob.l, prob.x, param)
@@ -1390,7 +1390,7 @@ public:
1390
1390
  swap(index[i],index[j]);
1391
1391
  swap(QD[i],QD[j]);
1392
1392
  }
1393
-
1393
+
1394
1394
  Qfloat *get_Q(int i, int len) const
1395
1395
  {
1396
1396
  Qfloat *data;
@@ -1703,7 +1703,7 @@ static decision_function svm_train_one(
1703
1703
 
1704
1704
  // Platt's binary SVM Probablistic Output: an improvement from Lin et al.
1705
1705
  static void sigmoid_train(
1706
- int l, const double *dec_values, const double *labels,
1706
+ int l, const double *dec_values, const double *labels,
1707
1707
  double& A, double& B)
1708
1708
  {
1709
1709
  double prior1=0, prior0 = 0;
@@ -1712,7 +1712,7 @@ static void sigmoid_train(
1712
1712
  for (i=0;i<l;i++)
1713
1713
  if (labels[i] > 0) prior1+=1;
1714
1714
  else prior0+=1;
1715
-
1715
+
1716
1716
  int max_iter=100; // Maximal number of iterations
1717
1717
  double min_step=1e-10; // Minimal step taken in line search
1718
1718
  double sigma=1e-12; // For numerically strict PD of Hessian
@@ -1723,7 +1723,7 @@ static void sigmoid_train(
1723
1723
  double fApB,p,q,h11,h22,h21,g1,g2,det,dA,dB,gd,stepsize;
1724
1724
  double newA,newB,newf,d1,d2;
1725
1725
  int iter;
1726
-
1726
+
1727
1727
  // Initial Point and Initial Fun Value
1728
1728
  A=0.0; B=log((prior0+1.0)/(prior1+1.0));
1729
1729
  double fval = 0.0;
@@ -1833,7 +1833,7 @@ static void multiclass_probability(int k, double **r, double *p)
1833
1833
  double **Q=Malloc(double *,k);
1834
1834
  double *Qp=Malloc(double,k);
1835
1835
  double pQp, eps=0.005/k;
1836
-
1836
+
1837
1837
  for (t=0;t<k;t++)
1838
1838
  {
1839
1839
  p[t]=1.0/k; // Valid if k = 1
@@ -1869,7 +1869,7 @@ static void multiclass_probability(int k, double **r, double *p)
1869
1869
  max_error=error;
1870
1870
  }
1871
1871
  if (max_error<eps) break;
1872
-
1872
+
1873
1873
  for (t=0;t<k;t++)
1874
1874
  {
1875
1875
  double diff=(-Qp[t]+pQp)/Q[t][t];
@@ -1916,7 +1916,7 @@ static void svm_binary_svc_probability(
1916
1916
  subprob.l = prob->l-(end-begin);
1917
1917
  subprob.x = Malloc(struct svm_node*,subprob.l);
1918
1918
  subprob.y = Malloc(double,subprob.l);
1919
-
1919
+
1920
1920
  k=0;
1921
1921
  for(j=0;j<begin;j++)
1922
1922
  {
@@ -1964,19 +1964,19 @@ static void svm_binary_svc_probability(
1964
1964
  svm_predict_values(submodel,prob->x[perm[j]],&(dec_values[perm[j]]));
1965
1965
  // ensure +1 -1 order; reason not using CV subroutine
1966
1966
  dec_values[perm[j]] *= submodel->label[0];
1967
- }
1967
+ }
1968
1968
  svm_free_and_destroy_model(&submodel);
1969
1969
  svm_destroy_param(&subparam);
1970
1970
  }
1971
1971
  free(subprob.x);
1972
1972
  free(subprob.y);
1973
- }
1973
+ }
1974
1974
  sigmoid_train(prob->l,dec_values,prob->y,probA,probB);
1975
1975
  free(dec_values);
1976
1976
  free(perm);
1977
1977
  }
1978
1978
 
1979
- // Return parameter of a Laplace distribution
1979
+ // Return parameter of a Laplace distribution
1980
1980
  static double svm_svr_probability(
1981
1981
  const svm_problem *prob, const svm_parameter *param)
1982
1982
  {
@@ -1992,15 +1992,15 @@ static double svm_svr_probability(
1992
1992
  {
1993
1993
  ymv[i]=prob->y[i]-ymv[i];
1994
1994
  mae += fabs(ymv[i]);
1995
- }
1995
+ }
1996
1996
  mae /= prob->l;
1997
1997
  double std=sqrt(2*mae*mae);
1998
1998
  int count=0;
1999
1999
  mae=0;
2000
2000
  for(i=0;i<prob->l;i++)
2001
- if (fabs(ymv[i]) > 5*std)
2001
+ if (fabs(ymv[i]) > 5*std)
2002
2002
  count=count+1;
2003
- else
2003
+ else
2004
2004
  mae+=fabs(ymv[i]);
2005
2005
  mae /= (prob->l-count);
2006
2006
  info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma= %g\n",mae);
@@ -2049,8 +2049,8 @@ static void svm_group_classes(const svm_problem *prob, int *nr_class_ret, int **
2049
2049
  }
2050
2050
 
2051
2051
  //
2052
- // Labels are ordered by their first occurrence in the training set.
2053
- // However, for two-class sets with -1/+1 labels and -1 appears first,
2052
+ // Labels are ordered by their first occurrence in the training set.
2053
+ // However, for two-class sets with -1/+1 labels and -1 appears first,
2054
2054
  // we swap labels to ensure that internally the binary SVM has positive data corresponding to the +1 instances.
2055
2055
  //
2056
2056
  if (nr_class == 2 && label[0] == -1 && label[1] == 1)
@@ -2106,7 +2106,7 @@ svm_model *svm_train(const svm_problem *prob, const svm_parameter *param)
2106
2106
  model->probA = NULL; model->probB = NULL;
2107
2107
  model->sv_coef = Malloc(double *,1);
2108
2108
 
2109
- if(param->probability &&
2109
+ if(param->probability &&
2110
2110
  (param->svm_type == EPSILON_SVR ||
2111
2111
  param->svm_type == NU_SVR))
2112
2112
  {
@@ -2134,7 +2134,7 @@ svm_model *svm_train(const svm_problem *prob, const svm_parameter *param)
2134
2134
  model->sv_coef[0][j] = f.alpha[i];
2135
2135
  model->sv_indices[j] = i+1;
2136
2136
  ++j;
2137
- }
2137
+ }
2138
2138
 
2139
2139
  free(f.alpha);
2140
2140
  }
@@ -2150,9 +2150,9 @@ svm_model *svm_train(const svm_problem *prob, const svm_parameter *param)
2150
2150
 
2151
2151
  // group training data of the same class
2152
2152
  svm_group_classes(prob,&nr_class,&label,&start,&count,perm);
2153
- if(nr_class == 1)
2153
+ if(nr_class == 1)
2154
2154
  info("WARNING: training data in only one class. See README for details.\n");
2155
-
2155
+
2156
2156
  svm_node **x = Malloc(svm_node *,l);
2157
2157
  int i;
2158
2158
  for(i=0;i<l;i++)
@@ -2164,7 +2164,7 @@ svm_model *svm_train(const svm_problem *prob, const svm_parameter *param)
2164
2164
  for(i=0;i<nr_class;i++)
2165
2165
  weighted_C[i] = param->C;
2166
2166
  for(i=0;i<param->nr_weight;i++)
2167
- {
2167
+ {
2168
2168
  int j;
2169
2169
  for(j=0;j<nr_class;j++)
2170
2170
  if(param->weight_label[i] == label[j])
@@ -2176,7 +2176,7 @@ svm_model *svm_train(const svm_problem *prob, const svm_parameter *param)
2176
2176
  }
2177
2177
 
2178
2178
  // train k*(k-1)/2 models
2179
-
2179
+
2180
2180
  bool *nonzero = Malloc(bool,l);
2181
2181
  for(i=0;i<l;i++)
2182
2182
  nonzero[i] = false;
@@ -2229,11 +2229,11 @@ svm_model *svm_train(const svm_problem *prob, const svm_parameter *param)
2229
2229
  // build output
2230
2230
 
2231
2231
  model->nr_class = nr_class;
2232
-
2232
+
2233
2233
  model->label = Malloc(int,nr_class);
2234
2234
  for(i=0;i<nr_class;i++)
2235
2235
  model->label[i] = label[i];
2236
-
2236
+
2237
2237
  model->rho = Malloc(double,nr_class*(nr_class-1)/2);
2238
2238
  for(i=0;i<nr_class*(nr_class-1)/2;i++)
2239
2239
  model->rho[i] = f[i].rho;
@@ -2262,14 +2262,14 @@ svm_model *svm_train(const svm_problem *prob, const svm_parameter *param)
2262
2262
  int nSV = 0;
2263
2263
  for(int j=0;j<count[i];j++)
2264
2264
  if(nonzero[start[i]+j])
2265
- {
2265
+ {
2266
2266
  ++nSV;
2267
2267
  ++total_sv;
2268
2268
  }
2269
2269
  model->nSV[i] = nSV;
2270
2270
  nz_count[i] = nSV;
2271
2271
  }
2272
-
2272
+
2273
2273
  info("Total nSV = %d\n",total_sv);
2274
2274
 
2275
2275
  model->l = total_sv;
@@ -2304,7 +2304,7 @@ svm_model *svm_train(const svm_problem *prob, const svm_parameter *param)
2304
2304
  int sj = start[j];
2305
2305
  int ci = count[i];
2306
2306
  int cj = count[j];
2307
-
2307
+
2308
2308
  int q = nz_start[i];
2309
2309
  int k;
2310
2310
  for(k=0;k<ci;k++)
@@ -2316,7 +2316,7 @@ svm_model *svm_train(const svm_problem *prob, const svm_parameter *param)
2316
2316
  model->sv_coef[i][q++] = f[p].alpha[ci+k];
2317
2317
  ++p;
2318
2318
  }
2319
-
2319
+
2320
2320
  free(label);
2321
2321
  free(probA);
2322
2322
  free(probB);
@@ -2365,7 +2365,7 @@ void svm_cross_validation(const svm_problem *prob, const svm_parameter *param, i
2365
2365
  int *index = Malloc(int,l);
2366
2366
  for(i=0;i<l;i++)
2367
2367
  index[i]=perm[i];
2368
- for (c=0; c<nr_class; c++)
2368
+ for (c=0; c<nr_class; c++)
2369
2369
  for(i=0;i<count[c];i++)
2370
2370
  {
2371
2371
  int j = i+rand()%(count[c]-i);
@@ -2422,7 +2422,7 @@ void svm_cross_validation(const svm_problem *prob, const svm_parameter *param, i
2422
2422
  subprob.l = l-(end-begin);
2423
2423
  subprob.x = Malloc(struct svm_node*,subprob.l);
2424
2424
  subprob.y = Malloc(double,subprob.l);
2425
-
2425
+
2426
2426
  k=0;
2427
2427
  for(j=0;j<begin;j++)
2428
2428
  {
@@ -2437,7 +2437,7 @@ void svm_cross_validation(const svm_problem *prob, const svm_parameter *param, i
2437
2437
  ++k;
2438
2438
  }
2439
2439
  struct svm_model *submodel = svm_train(&subprob,param);
2440
- if(param->probability &&
2440
+ if(param->probability &&
2441
2441
  (param->svm_type == C_SVC || param->svm_type == NU_SVC))
2442
2442
  {
2443
2443
  double *prob_estimates=Malloc(double,svm_get_nr_class(submodel));
@@ -2451,7 +2451,7 @@ void svm_cross_validation(const svm_problem *prob, const svm_parameter *param, i
2451
2451
  svm_free_and_destroy_model(&submodel);
2452
2452
  free(subprob.x);
2453
2453
  free(subprob.y);
2454
- }
2454
+ }
2455
2455
  free(fold_start);
2456
2456
  free(perm);
2457
2457
  }
@@ -2521,7 +2521,7 @@ double svm_predict_values(const svm_model *model, const svm_node *x, double* dec
2521
2521
  {
2522
2522
  int nr_class = model->nr_class;
2523
2523
  int l = model->l;
2524
-
2524
+
2525
2525
  double *kvalue = Malloc(double,l);
2526
2526
  for(i=0;i<l;i++)
2527
2527
  kvalue[i] = Kernel::k_function(x,model->SV[i],model->param);
@@ -2544,7 +2544,7 @@ double svm_predict_values(const svm_model *model, const svm_node *x, double* dec
2544
2544
  int sj = start[j];
2545
2545
  int ci = model->nSV[i];
2546
2546
  int cj = model->nSV[j];
2547
-
2547
+
2548
2548
  int k;
2549
2549
  double *coef1 = model->sv_coef[j-1];
2550
2550
  double *coef2 = model->sv_coef[i];
@@ -2582,7 +2582,7 @@ double svm_predict(const svm_model *model, const svm_node *x)
2582
2582
  model->param.svm_type == EPSILON_SVR ||
2583
2583
  model->param.svm_type == NU_SVR)
2584
2584
  dec_values = Malloc(double, 1);
2585
- else
2585
+ else
2586
2586
  dec_values = Malloc(double, nr_class*(nr_class-1)/2);
2587
2587
  double pred_result = svm_predict_values(model, x, dec_values);
2588
2588
  free(dec_values);
@@ -2612,7 +2612,13 @@ double svm_predict_probability(
2612
2612
  pairwise_prob[j][i]=1-pairwise_prob[i][j];
2613
2613
  k++;
2614
2614
  }
2615
- multiclass_probability(nr_class,pairwise_prob,prob_estimates);
2615
+ if (nr_class == 2)
2616
+ {
2617
+ prob_estimates[0] = pairwise_prob[0][1];
2618
+ prob_estimates[1] = pairwise_prob[1][0];
2619
+ }
2620
+ else
2621
+ multiclass_probability(nr_class,pairwise_prob,prob_estimates);
2616
2622
 
2617
2623
  int prob_max_idx = 0;
2618
2624
  for(i=1;i<nr_class;i++)
@@ -2624,7 +2630,7 @@ double svm_predict_probability(
2624
2630
  free(pairwise_prob);
2625
2631
  return model->label[prob_max_idx];
2626
2632
  }
2627
- else
2633
+ else
2628
2634
  return svm_predict(model, x);
2629
2635
  }
2630
2636
 
@@ -2643,7 +2649,10 @@ int svm_save_model(const char *model_file_name, const svm_model *model)
2643
2649
  FILE *fp = fopen(model_file_name,"w");
2644
2650
  if(fp==NULL) return -1;
2645
2651
 
2646
- char *old_locale = strdup(setlocale(LC_ALL, NULL));
2652
+ char *old_locale = setlocale(LC_ALL, NULL);
2653
+ if (old_locale) {
2654
+ old_locale = strdup(old_locale);
2655
+ }
2647
2656
  setlocale(LC_ALL, "C");
2648
2657
 
2649
2658
  const svm_parameter& param = model->param;
@@ -2655,23 +2664,23 @@ int svm_save_model(const char *model_file_name, const svm_model *model)
2655
2664
  fprintf(fp,"degree %d\n", param.degree);
2656
2665
 
2657
2666
  if(param.kernel_type == POLY || param.kernel_type == RBF || param.kernel_type == SIGMOID)
2658
- fprintf(fp,"gamma %g\n", param.gamma);
2667
+ fprintf(fp,"gamma %.17g\n", param.gamma);
2659
2668
 
2660
2669
  if(param.kernel_type == POLY || param.kernel_type == SIGMOID)
2661
- fprintf(fp,"coef0 %g\n", param.coef0);
2670
+ fprintf(fp,"coef0 %.17g\n", param.coef0);
2662
2671
 
2663
2672
  int nr_class = model->nr_class;
2664
2673
  int l = model->l;
2665
2674
  fprintf(fp, "nr_class %d\n", nr_class);
2666
2675
  fprintf(fp, "total_sv %d\n",l);
2667
-
2676
+
2668
2677
  {
2669
2678
  fprintf(fp, "rho");
2670
2679
  for(int i=0;i<nr_class*(nr_class-1)/2;i++)
2671
- fprintf(fp," %g",model->rho[i]);
2680
+ fprintf(fp," %.17g",model->rho[i]);
2672
2681
  fprintf(fp, "\n");
2673
2682
  }
2674
-
2683
+
2675
2684
  if(model->label)
2676
2685
  {
2677
2686
  fprintf(fp, "label");
@@ -2684,14 +2693,14 @@ int svm_save_model(const char *model_file_name, const svm_model *model)
2684
2693
  {
2685
2694
  fprintf(fp, "probA");
2686
2695
  for(int i=0;i<nr_class*(nr_class-1)/2;i++)
2687
- fprintf(fp," %g",model->probA[i]);
2696
+ fprintf(fp," %.17g",model->probA[i]);
2688
2697
  fprintf(fp, "\n");
2689
2698
  }
2690
2699
  if(model->probB)
2691
2700
  {
2692
2701
  fprintf(fp, "probB");
2693
2702
  for(int i=0;i<nr_class*(nr_class-1)/2;i++)
2694
- fprintf(fp," %g",model->probB[i]);
2703
+ fprintf(fp," %.17g",model->probB[i]);
2695
2704
  fprintf(fp, "\n");
2696
2705
  }
2697
2706
 
@@ -2710,7 +2719,7 @@ int svm_save_model(const char *model_file_name, const svm_model *model)
2710
2719
  for(int i=0;i<l;i++)
2711
2720
  {
2712
2721
  for(int j=0;j<nr_class-1;j++)
2713
- fprintf(fp, "%.16g ",sv_coef[j][i]);
2722
+ fprintf(fp, "%.17g ",sv_coef[j][i]);
2714
2723
 
2715
2724
  const svm_node *p = SV[i];
2716
2725
 
@@ -2764,6 +2773,11 @@ static char* readline(FILE *input)
2764
2773
  bool read_model_header(FILE *fp, svm_model* model)
2765
2774
  {
2766
2775
  svm_parameter& param = model->param;
2776
+ // parameters for training only won't be assigned, but arrays are assigned as NULL for safety
2777
+ param.nr_weight = 0;
2778
+ param.weight_label = NULL;
2779
+ param.weight = NULL;
2780
+
2767
2781
  char cmd[81];
2768
2782
  while(1)
2769
2783
  {
@@ -2788,7 +2802,7 @@ bool read_model_header(FILE *fp, svm_model* model)
2788
2802
  }
2789
2803
  }
2790
2804
  else if(strcmp(cmd,"kernel_type")==0)
2791
- {
2805
+ {
2792
2806
  FSCANF(fp,"%80s",cmd);
2793
2807
  int i;
2794
2808
  for(i=0;kernel_type_table[i];i++)
@@ -2801,7 +2815,7 @@ bool read_model_header(FILE *fp, svm_model* model)
2801
2815
  }
2802
2816
  if(kernel_type_table[i] == NULL)
2803
2817
  {
2804
- fprintf(stderr,"unknown kernel function.\n");
2818
+ fprintf(stderr,"unknown kernel function.\n");
2805
2819
  return false;
2806
2820
  }
2807
2821
  }
@@ -2875,7 +2889,10 @@ svm_model *svm_load_model(const char *model_file_name)
2875
2889
  FILE *fp = fopen(model_file_name,"rb");
2876
2890
  if(fp==NULL) return NULL;
2877
2891
 
2878
- char *old_locale = strdup(setlocale(LC_ALL, NULL));
2892
+ char *old_locale = setlocale(LC_ALL, NULL);
2893
+ if (old_locale) {
2894
+ old_locale = strdup(old_locale);
2895
+ }
2879
2896
  setlocale(LC_ALL, "C");
2880
2897
 
2881
2898
  // read parameters
@@ -2887,7 +2904,7 @@ svm_model *svm_load_model(const char *model_file_name)
2887
2904
  model->sv_indices = NULL;
2888
2905
  model->label = NULL;
2889
2906
  model->nSV = NULL;
2890
-
2907
+
2891
2908
  // read header
2892
2909
  if (!read_model_header(fp, model))
2893
2910
  {
@@ -2900,7 +2917,7 @@ svm_model *svm_load_model(const char *model_file_name)
2900
2917
  free(model);
2901
2918
  return NULL;
2902
2919
  }
2903
-
2920
+
2904
2921
  // read sv_coef and SV
2905
2922
 
2906
2923
  int elements = 0;
@@ -3037,9 +3054,9 @@ const char *svm_check_parameter(const svm_problem *prob, const svm_parameter *pa
3037
3054
  svm_type != EPSILON_SVR &&
3038
3055
  svm_type != NU_SVR)
3039
3056
  return "unknown svm type";
3040
-
3057
+
3041
3058
  // kernel_type, degree
3042
-
3059
+
3043
3060
  int kernel_type = param->kernel_type;
3044
3061
  if(kernel_type != LINEAR &&
3045
3062
  kernel_type != POLY &&
@@ -3092,7 +3109,7 @@ const char *svm_check_parameter(const svm_problem *prob, const svm_parameter *pa
3092
3109
 
3093
3110
 
3094
3111
  // check whether nu-svc is feasible
3095
-
3112
+
3096
3113
  if(svm_type == NU_SVC)
3097
3114
  {
3098
3115
  int l = prob->l;
@@ -3125,7 +3142,7 @@ const char *svm_check_parameter(const svm_problem *prob, const svm_parameter *pa
3125
3142
  ++nr_class;
3126
3143
  }
3127
3144
  }
3128
-
3145
+
3129
3146
  for(i=0;i<nr_class;i++)
3130
3147
  {
3131
3148
  int n1 = count[i];
@@ -1,7 +1,7 @@
1
1
  #ifndef _LIBSVM_H
2
2
  #define _LIBSVM_H
3
3
 
4
- #define LIBSVM_VERSION 320
4
+ #define LIBSVM_VERSION 323
5
5
 
6
6
  #ifdef __cplusplus
7
7
  extern "C" {
@@ -48,7 +48,7 @@ struct svm_parameter
48
48
 
49
49
  //
50
50
  // svm_model
51
- //
51
+ //
52
52
  struct svm_model
53
53
  {
54
54
  struct svm_parameter param; /* parameter */
@@ -1,3 +1,3 @@
1
1
  module Libsvm
2
- VERSION = "1.4.2"
2
+ VERSION = "1.4.3"
3
3
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rb-libsvm
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.4.2
4
+ version: 1.4.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - C. Florian Ebeling
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2018-09-02 00:00:00.000000000 Z
12
+ date: 2018-09-15 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: rake-compiler