numo-liblinear 2.0.0 → 2.1.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f2be7c6e622882f6e9bde188d859e85cffff521041ec085a124925e356e33b3b
4
- data.tar.gz: ba6ca472a2e81e4ce119f853d8178e14349690ded3dab957c93d14edaef077a4
3
+ metadata.gz: 8ea54dc3ead49a3edb3d55b2bf46c673fca3a11b8b29001658fc453d8bfd8831
4
+ data.tar.gz: 3c4751dce386d1127ab4c1570807671a7b8ab73f632899e63655096cead86ffe
5
5
  SHA512:
6
- metadata.gz: 703f37ebac8b88194070e13199452669fb5c38415e4b50f4ab310abf685249837aa20af27865d246f4683ccf871ed3aeb55639fca78909ff74e3ef30a84f3ce4
7
- data.tar.gz: 4739ef305741e787801ad03f6fd24c4e6ca91ea28687f72683a24136f2c614766fc506a709b947df96f8e21a33b2f9ebb654af82964a75dc4df158f700548251
6
+ metadata.gz: 1f016fa4a2a372fb7948eea6418973ecf9228f3e8a8b58b23b7da9360f0dc3f0facd85a45d029b259bfd5d52ffb4a9b42565ebd1d163c9ac881f37c978a3f22b
7
+ data.tar.gz: 7119f78866b27df545177c6b6d24b2df261caade7a8437721df5b99c616af16ef2a039919ffc9ed35b5ee98760cc77520e9d708edeb9a851d822232b68c43806
data/CHANGELOG.md CHANGED
@@ -1,3 +1,6 @@
1
+ # 2.1.0
2
+ - Update bundled LIBLINEAR to 2.44
3
+
1
4
  # 2.0.0
2
5
  - Redesign native extension codes.
3
6
  - Change not ot use git submodule for LIBLINEAR codes bundle.
data/README.md CHANGED
@@ -172,3 +172,7 @@ param = {
172
172
 
173
173
  Bug reports and pull requests are welcome on GitHub at https://github.com/yoshoku/numo-liblinear.
174
174
  This project is intended to be a safe, welcoming space for collaboration, and contributors are expected to adhere to the [Contributor Covenant](http://contributor-covenant.org) code of conduct.
175
+
176
+ ## License
177
+
178
+ The gem is available as open source under the terms of the [BSD-3-Clause License](https://opensource.org/licenses/BSD-3-Clause).
@@ -1,5 +1,5 @@
1
1
 
2
- Copyright (c) 2007-2019 The LIBLINEAR Project.
2
+ Copyright (c) 2007-2022 The LIBLINEAR Project.
3
3
  All rights reserved.
4
4
 
5
5
  Redistribution and use in source and binary forms, with or without
@@ -56,7 +56,7 @@ public:
56
56
  ret += x->value*x->value;
57
57
  x++;
58
58
  }
59
- return (ret);
59
+ return ret;
60
60
  }
61
61
 
62
62
  static double dot(const double *s, const feature_node *x)
@@ -67,7 +67,7 @@ public:
67
67
  ret += s[x->index-1]*x->value;
68
68
  x++;
69
69
  }
70
- return (ret);
70
+ return ret;
71
71
  }
72
72
 
73
73
  static double sparse_dot(const feature_node *x1, const feature_node *x2)
@@ -89,7 +89,7 @@ public:
89
89
  ++x1;
90
90
  }
91
91
  }
92
- return (ret);
92
+ return ret;
93
93
  }
94
94
 
95
95
  static void axpy(const double a, const feature_node *x, double *y)
@@ -164,7 +164,7 @@ double l2r_erm_fun::fun(double *w)
164
164
  f += C_times_loss(i, wx[i]);
165
165
  f = f + 0.5 * wTw;
166
166
 
167
- return(f);
167
+ return f;
168
168
  }
169
169
 
170
170
  int l2r_erm_fun::get_nr_variable(void)
@@ -876,13 +876,13 @@ void Solver_MCSVM_CS::Solve(double *w)
876
876
  // D is a diagonal matrix
877
877
  //
878
878
  // In L1-SVM case:
879
- // upper_bound_i = Cp if y_i = 1
880
- // upper_bound_i = Cn if y_i = -1
881
- // D_ii = 0
879
+ // upper_bound_i = Cp if y_i = 1
880
+ // upper_bound_i = Cn if y_i = -1
881
+ // D_ii = 0
882
882
  // In L2-SVM case:
883
- // upper_bound_i = INF
884
- // D_ii = 1/(2*Cp) if y_i = 1
885
- // D_ii = 1/(2*Cn) if y_i = -1
883
+ // upper_bound_i = INF
884
+ // D_ii = 1/(2*Cp) if y_i = 1
885
+ // D_ii = 1/(2*Cn) if y_i = -1
886
886
  //
887
887
  // Given:
888
888
  // x, y, Cp, Cn
@@ -890,22 +890,23 @@ void Solver_MCSVM_CS::Solve(double *w)
890
890
  //
891
891
  // solution will be put in w
892
892
  //
893
+ // this function returns the number of iterations
894
+ //
893
895
  // See Algorithm 3 of Hsieh et al., ICML 2008
894
896
 
895
897
  #undef GETI
896
898
  #define GETI(i) (y[i]+1)
897
899
  // To support weights for instances, use GETI(i) (i)
898
900
 
899
- static void solve_l2r_l1l2_svc(
900
- const problem *prob, double *w, double eps,
901
- double Cp, double Cn, int solver_type)
901
+ static int solve_l2r_l1l2_svc(const problem *prob, const parameter *param, double *w, double Cp, double Cn, int max_iter=300)
902
902
  {
903
903
  int l = prob->l;
904
904
  int w_size = prob->n;
905
+ double eps = param->eps;
906
+ int solver_type = param->solver_type;
905
907
  int i, s, iter = 0;
906
908
  double C, d, G;
907
909
  double *QD = new double[l];
908
- int max_iter = 1000;
909
910
  int *index = new int[l];
910
911
  double *alpha = new double[l];
911
912
  schar *y = new schar[l];
@@ -1024,7 +1025,8 @@ static void solve_l2r_l1l2_svc(
1024
1025
  if(iter % 10 == 0)
1025
1026
  info(".");
1026
1027
 
1027
- if(PGmax_new - PGmin_new <= eps)
1028
+ if(PGmax_new - PGmin_new <= eps &&
1029
+ fabs(PGmax_new) <= eps && fabs(PGmin_new) <= eps)
1028
1030
  {
1029
1031
  if(active_size == l)
1030
1032
  break;
@@ -1046,8 +1048,6 @@ static void solve_l2r_l1l2_svc(
1046
1048
  }
1047
1049
 
1048
1050
  info("\noptimization finished, #iter = %d\n",iter);
1049
- if (iter >= max_iter)
1050
- info("\nWARNING: reaching max number of iterations\nUsing -s 2 may be faster (also see FAQ)\n\n");
1051
1051
 
1052
1052
  // calculate objective value
1053
1053
 
@@ -1068,6 +1068,8 @@ static void solve_l2r_l1l2_svc(
1068
1068
  delete [] alpha;
1069
1069
  delete [] y;
1070
1070
  delete [] index;
1071
+
1072
+ return iter;
1071
1073
  }
1072
1074
 
1073
1075
 
@@ -1081,11 +1083,11 @@ static void solve_l2r_l1l2_svc(
1081
1083
  // D is a diagonal matrix
1082
1084
  //
1083
1085
  // In L1-SVM case:
1084
- // upper_bound_i = C
1085
- // lambda_i = 0
1086
+ // upper_bound_i = C
1087
+ // lambda_i = 0
1086
1088
  // In L2-SVM case:
1087
- // upper_bound_i = INF
1088
- // lambda_i = 1/(2*C)
1089
+ // upper_bound_i = INF
1090
+ // lambda_i = 1/(2*C)
1089
1091
  //
1090
1092
  // Given:
1091
1093
  // x, y, p, C
@@ -1093,23 +1095,23 @@ static void solve_l2r_l1l2_svc(
1093
1095
  //
1094
1096
  // solution will be put in w
1095
1097
  //
1098
+ // this function returns the number of iterations
1099
+ //
1096
1100
  // See Algorithm 4 of Ho and Lin, 2012
1097
1101
 
1098
1102
  #undef GETI
1099
1103
  #define GETI(i) (0)
1100
1104
  // To support weights for instances, use GETI(i) (i)
1101
1105
 
1102
- static void solve_l2r_l1l2_svr(
1103
- const problem *prob, double *w, const parameter *param,
1104
- int solver_type)
1106
+ static int solve_l2r_l1l2_svr(const problem *prob, const parameter *param, double *w, int max_iter=300)
1105
1107
  {
1108
+ const int solver_type = param->solver_type;
1106
1109
  int l = prob->l;
1107
1110
  double C = param->C;
1108
1111
  double p = param->p;
1109
1112
  int w_size = prob->n;
1110
1113
  double eps = param->eps;
1111
1114
  int i, s, iter = 0;
1112
- int max_iter = 1000;
1113
1115
  int active_size = l;
1114
1116
  int *index = new int[l];
1115
1117
 
@@ -1260,8 +1262,6 @@ static void solve_l2r_l1l2_svr(
1260
1262
  }
1261
1263
 
1262
1264
  info("\noptimization finished, #iter = %d\n", iter);
1263
- if(iter >= max_iter)
1264
- info("\nWARNING: reaching max number of iterations\nUsing -s 11 may be faster\n\n");
1265
1265
 
1266
1266
  // calculate objective value
1267
1267
  double v = 0;
@@ -1282,6 +1282,8 @@ static void solve_l2r_l1l2_svr(
1282
1282
  delete [] beta;
1283
1283
  delete [] QD;
1284
1284
  delete [] index;
1285
+
1286
+ return iter;
1285
1287
  }
1286
1288
 
1287
1289
 
@@ -1301,19 +1303,21 @@ static void solve_l2r_l1l2_svr(
1301
1303
  //
1302
1304
  // solution will be put in w
1303
1305
  //
1306
+ // this function returns the number of iterations
1307
+ //
1304
1308
  // See Algorithm 5 of Yu et al., MLJ 2010
1305
1309
 
1306
1310
  #undef GETI
1307
1311
  #define GETI(i) (y[i]+1)
1308
1312
  // To support weights for instances, use GETI(i) (i)
1309
1313
 
1310
- void solve_l2r_lr_dual(const problem *prob, double *w, double eps, double Cp, double Cn)
1314
+ static int solve_l2r_lr_dual(const problem *prob, const parameter *param, double *w, double Cp, double Cn, int max_iter=300)
1311
1315
  {
1312
1316
  int l = prob->l;
1313
1317
  int w_size = prob->n;
1318
+ double eps = param->eps;
1314
1319
  int i, s, iter = 0;
1315
1320
  double *xTx = new double[l];
1316
- int max_iter = 1000;
1317
1321
  int *index = new int[l];
1318
1322
  double *alpha = new double[2*l]; // store alpha and C - alpha
1319
1323
  schar *y = new schar[l];
@@ -1428,8 +1432,6 @@ void solve_l2r_lr_dual(const problem *prob, double *w, double eps, double Cp, do
1428
1432
  }
1429
1433
 
1430
1434
  info("\noptimization finished, #iter = %d\n",iter);
1431
- if (iter >= max_iter)
1432
- info("\nWARNING: reaching max number of iterations\nUsing -s 0 may be faster (also see FAQ)\n\n");
1433
1435
 
1434
1436
  // calculate objective value
1435
1437
 
@@ -1446,6 +1448,8 @@ void solve_l2r_lr_dual(const problem *prob, double *w, double eps, double Cp, do
1446
1448
  delete [] alpha;
1447
1449
  delete [] y;
1448
1450
  delete [] index;
1451
+
1452
+ return iter;
1449
1453
  }
1450
1454
 
1451
1455
  // A coordinate descent algorithm for
@@ -1459,6 +1463,8 @@ void solve_l2r_lr_dual(const problem *prob, double *w, double eps, double Cp, do
1459
1463
  //
1460
1464
  // solution will be put in w
1461
1465
  //
1466
+ // this function returns the number of iterations
1467
+ //
1462
1468
  // See Yuan et al. (2010) and appendix of LIBLINEAR paper, Fan et al. (2008)
1463
1469
  //
1464
1470
  // To not regularize the bias (i.e., regularize_bias = 0), a constant feature = 1
@@ -1468,12 +1474,11 @@ void solve_l2r_lr_dual(const problem *prob, double *w, double eps, double Cp, do
1468
1474
  #define GETI(i) (y[i]+1)
1469
1475
  // To support weights for instances, use GETI(i) (i)
1470
1476
 
1471
- static void solve_l1r_l2_svc(
1472
- problem *prob_col, double *w, double eps,
1473
- double Cp, double Cn, int regularize_bias)
1477
+ static int solve_l1r_l2_svc(const problem *prob_col, const parameter* param, double *w, double Cp, double Cn, double eps)
1474
1478
  {
1475
1479
  int l = prob_col->l;
1476
1480
  int w_size = prob_col->n;
1481
+ int regularize_bias = param->regularize_bias;
1477
1482
  int j, s, iter = 0;
1478
1483
  int max_iter = 1000;
1479
1484
  int active_size = w_size;
@@ -1747,6 +1752,8 @@ static void solve_l1r_l2_svc(
1747
1752
  delete [] y;
1748
1753
  delete [] b;
1749
1754
  delete [] xj_sq;
1755
+
1756
+ return iter;
1750
1757
  }
1751
1758
 
1752
1759
  // A coordinate descent algorithm for
@@ -1760,6 +1767,8 @@ static void solve_l1r_l2_svc(
1760
1767
  //
1761
1768
  // solution will be put in w
1762
1769
  //
1770
+ // this function returns the number of iterations
1771
+ //
1763
1772
  // See Yuan et al. (2011) and appendix of LIBLINEAR paper, Fan et al. (2008)
1764
1773
  //
1765
1774
  // To not regularize the bias (i.e., regularize_bias = 0), a constant feature = 1
@@ -1769,12 +1778,11 @@ static void solve_l1r_l2_svc(
1769
1778
  #define GETI(i) (y[i]+1)
1770
1779
  // To support weights for instances, use GETI(i) (i)
1771
1780
 
1772
- static void solve_l1r_lr(
1773
- const problem *prob_col, double *w, double eps,
1774
- double Cp, double Cn, int regularize_bias)
1781
+ static int solve_l1r_lr(const problem *prob_col, const parameter *param, double *w, double Cp, double Cn, double eps)
1775
1782
  {
1776
1783
  int l = prob_col->l;
1777
1784
  int w_size = prob_col->n;
1785
+ int regularize_bias = param->regularize_bias;
1778
1786
  int j, s, newton_iter=0, iter=0;
1779
1787
  int max_newton_iter = 100;
1780
1788
  int max_iter = 1000;
@@ -2143,6 +2151,8 @@ static void solve_l1r_lr(
2143
2151
  delete [] exp_wTx_new;
2144
2152
  delete [] tau;
2145
2153
  delete [] D;
2154
+
2155
+ return newton_iter;
2146
2156
  }
2147
2157
 
2148
2158
  struct heap {
@@ -2230,12 +2240,16 @@ struct heap {
2230
2240
  //
2231
2241
  // solution will be put in w and rho
2232
2242
  //
2243
+ // this function returns the number of iterations
2244
+ //
2233
2245
  // See Algorithm 7 in supplementary materials of Chou et al., SDM 2020.
2234
2246
 
2235
- static void solve_oneclass_svm(const problem *prob, double *w, double *rho, double eps, double nu)
2247
+ static int solve_oneclass_svm(const problem *prob, const parameter *param, double *w, double *rho)
2236
2248
  {
2237
2249
  int l = prob->l;
2238
2250
  int w_size = prob->n;
2251
+ double eps = param->eps;
2252
+ double nu = param->nu;
2239
2253
  int i, j, s, iter = 0;
2240
2254
  double Gi, Gj;
2241
2255
  double Qij, quad_coef, delta, sum;
@@ -2248,13 +2262,13 @@ static void solve_oneclass_svm(const problem *prob, double *w, double *rho, doub
2248
2262
  int max_iter = 1000;
2249
2263
  int active_size = l;
2250
2264
 
2251
- double negGmax; // max { -grad(f)_i | alpha_i < 1 }
2252
- double negGmin; // min { -grad(f)_i | alpha_i > 0 }
2265
+ double negGmax; // max { -grad(f)_i | alpha_i < 1 }
2266
+ double negGmin; // min { -grad(f)_i | alpha_i > 0 }
2253
2267
 
2254
2268
  int *most_violating_i = new int[l];
2255
2269
  int *most_violating_j = new int[l];
2256
2270
 
2257
- int n = (int)(nu*l); // # of alpha's at upper bound
2271
+ int n = (int)(nu*l); // # of alpha's at upper bound
2258
2272
  for(i=0; i<n; i++)
2259
2273
  alpha[i] = 1;
2260
2274
  if (n<l)
@@ -2479,6 +2493,8 @@ static void solve_oneclass_svm(const problem *prob, double *w, double *rho, doub
2479
2493
  delete [] alpha;
2480
2494
  delete [] most_violating_i;
2481
2495
  delete [] most_violating_j;
2496
+
2497
+ return iter;
2482
2498
  }
2483
2499
 
2484
2500
  // transpose matrix X from row format to column format
@@ -2616,67 +2632,85 @@ static void group_classes(const problem *prob, int *nr_class_ret, int **label_re
2616
2632
 
2617
2633
  static void train_one(const problem *prob, const parameter *param, double *w, double Cp, double Cn)
2618
2634
  {
2619
- double eps = param->eps;
2635
+ int solver_type = param->solver_type;
2636
+ int dual_solver_max_iter = 300;
2637
+ int iter;
2620
2638
 
2621
- int pos = 0;
2622
- int neg = 0;
2623
- for(int i=0;i<prob->l;i++)
2624
- if(prob->y[i] > 0)
2625
- pos++;
2626
- neg = prob->l - pos;
2627
- double primal_solver_tol = eps*max(min(pos,neg), 1)/prob->l;
2639
+ bool is_regression = (solver_type==L2R_L2LOSS_SVR ||
2640
+ solver_type==L2R_L1LOSS_SVR_DUAL ||
2641
+ solver_type==L2R_L2LOSS_SVR_DUAL);
2628
2642
 
2629
- function *fun_obj=NULL;
2630
- switch(param->solver_type)
2643
+ // Some solvers use Cp,Cn but not C array; extensions possible but no plan for now
2644
+ double *C = new double[prob->l];
2645
+ double primal_solver_tol = param->eps;
2646
+ if(is_regression)
2631
2647
  {
2632
- case L2R_LR:
2648
+ for(int i=0;i<prob->l;i++)
2649
+ C[i] = param->C;
2650
+ }
2651
+ else
2652
+ {
2653
+ int pos = 0;
2654
+ for(int i=0;i<prob->l;i++)
2633
2655
  {
2634
- double *C = new double[prob->l];
2635
- for(int i = 0; i < prob->l; i++)
2656
+ if(prob->y[i] > 0)
2636
2657
  {
2637
- if(prob->y[i] > 0)
2638
- C[i] = Cp;
2639
- else
2640
- C[i] = Cn;
2658
+ pos++;
2659
+ C[i] = Cp;
2641
2660
  }
2642
- fun_obj=new l2r_lr_fun(prob, param, C);
2643
- NEWTON newton_obj(fun_obj, primal_solver_tol);
2661
+ else
2662
+ C[i] = Cn;
2663
+ }
2664
+ int neg = prob->l - pos;
2665
+ primal_solver_tol = param->eps*max(min(pos,neg), 1)/prob->l;
2666
+ }
2667
+
2668
+ switch(solver_type)
2669
+ {
2670
+ case L2R_LR:
2671
+ {
2672
+ l2r_lr_fun fun_obj(prob, param, C);
2673
+ NEWTON newton_obj(&fun_obj, primal_solver_tol);
2644
2674
  newton_obj.set_print_string(liblinear_print_string);
2645
2675
  newton_obj.newton(w);
2646
- delete fun_obj;
2647
- delete[] C;
2648
2676
  break;
2649
2677
  }
2650
2678
  case L2R_L2LOSS_SVC:
2651
2679
  {
2652
- double *C = new double[prob->l];
2653
- for(int i = 0; i < prob->l; i++)
2654
- {
2655
- if(prob->y[i] > 0)
2656
- C[i] = Cp;
2657
- else
2658
- C[i] = Cn;
2659
- }
2660
- fun_obj=new l2r_l2_svc_fun(prob, param, C);
2661
- NEWTON newton_obj(fun_obj, primal_solver_tol);
2680
+ l2r_l2_svc_fun fun_obj(prob, param, C);
2681
+ NEWTON newton_obj(&fun_obj, primal_solver_tol);
2662
2682
  newton_obj.set_print_string(liblinear_print_string);
2663
2683
  newton_obj.newton(w);
2664
- delete fun_obj;
2665
- delete[] C;
2666
2684
  break;
2667
2685
  }
2668
2686
  case L2R_L2LOSS_SVC_DUAL:
2669
- solve_l2r_l1l2_svc(prob, w, eps, Cp, Cn, L2R_L2LOSS_SVC_DUAL);
2687
+ {
2688
+ iter = solve_l2r_l1l2_svc(prob, param, w, Cp, Cn, dual_solver_max_iter);
2689
+ if(iter >= dual_solver_max_iter)
2690
+ {
2691
+ info("\nWARNING: reaching max number of iterations\nSwitching to use -s 2\n\n");
2692
+ // primal_solver_tol obtained from eps for dual may be too loose
2693
+ primal_solver_tol *= 0.1;
2694
+ l2r_l2_svc_fun fun_obj(prob, param, C);
2695
+ NEWTON newton_obj(&fun_obj, primal_solver_tol);
2696
+ newton_obj.set_print_string(liblinear_print_string);
2697
+ newton_obj.newton(w);
2698
+ }
2670
2699
  break;
2700
+ }
2671
2701
  case L2R_L1LOSS_SVC_DUAL:
2672
- solve_l2r_l1l2_svc(prob, w, eps, Cp, Cn, L2R_L1LOSS_SVC_DUAL);
2702
+ {
2703
+ iter = solve_l2r_l1l2_svc(prob, param, w, Cp, Cn, dual_solver_max_iter);
2704
+ if(iter >= dual_solver_max_iter)
2705
+ info("\nWARNING: reaching max number of iterations\nUsing -s 2 may be faster (also see FAQ)\n\n");
2673
2706
  break;
2707
+ }
2674
2708
  case L1R_L2LOSS_SVC:
2675
2709
  {
2676
2710
  problem prob_col;
2677
2711
  feature_node *x_space = NULL;
2678
2712
  transpose(prob, &x_space ,&prob_col);
2679
- solve_l1r_l2_svc(&prob_col, w, primal_solver_tol, Cp, Cn, param->regularize_bias);
2713
+ solve_l1r_l2_svc(&prob_col, param, w, Cp, Cn, primal_solver_tol);
2680
2714
  delete [] prob_col.y;
2681
2715
  delete [] prob_col.x;
2682
2716
  delete [] x_space;
@@ -2687,40 +2721,64 @@ static void train_one(const problem *prob, const parameter *param, double *w, do
2687
2721
  problem prob_col;
2688
2722
  feature_node *x_space = NULL;
2689
2723
  transpose(prob, &x_space ,&prob_col);
2690
- solve_l1r_lr(&prob_col, w, primal_solver_tol, Cp, Cn, param->regularize_bias);
2724
+ solve_l1r_lr(&prob_col, param, w, Cp, Cn, primal_solver_tol);
2691
2725
  delete [] prob_col.y;
2692
2726
  delete [] prob_col.x;
2693
2727
  delete [] x_space;
2694
2728
  break;
2695
2729
  }
2696
2730
  case L2R_LR_DUAL:
2697
- solve_l2r_lr_dual(prob, w, eps, Cp, Cn);
2731
+ {
2732
+ iter = solve_l2r_lr_dual(prob, param, w, Cp, Cn, dual_solver_max_iter);
2733
+ if(iter >= dual_solver_max_iter)
2734
+ {
2735
+ info("\nWARNING: reaching max number of iterations\nSwitching to use -s 0\n\n");
2736
+ // primal_solver_tol obtained from eps for dual may be too loose
2737
+ primal_solver_tol *= 0.1;
2738
+ l2r_lr_fun fun_obj(prob, param, C);
2739
+ NEWTON newton_obj(&fun_obj, primal_solver_tol);
2740
+ newton_obj.set_print_string(liblinear_print_string);
2741
+ newton_obj.newton(w);
2742
+ }
2698
2743
  break;
2744
+ }
2699
2745
  case L2R_L2LOSS_SVR:
2700
2746
  {
2701
- double *C = new double[prob->l];
2702
- for(int i = 0; i < prob->l; i++)
2703
- C[i] = param->C;
2704
-
2705
- fun_obj=new l2r_l2_svr_fun(prob, param, C);
2706
- NEWTON newton_obj(fun_obj, param->eps);
2747
+ l2r_l2_svr_fun fun_obj(prob, param, C);
2748
+ NEWTON newton_obj(&fun_obj, primal_solver_tol);
2707
2749
  newton_obj.set_print_string(liblinear_print_string);
2708
2750
  newton_obj.newton(w);
2709
- delete fun_obj;
2710
- delete[] C;
2711
2751
  break;
2712
-
2713
2752
  }
2714
2753
  case L2R_L1LOSS_SVR_DUAL:
2715
- solve_l2r_l1l2_svr(prob, w, param, L2R_L1LOSS_SVR_DUAL);
2754
+ {
2755
+ iter = solve_l2r_l1l2_svr(prob, param, w, dual_solver_max_iter);
2756
+ if(iter >= dual_solver_max_iter)
2757
+ info("\nWARNING: reaching max number of iterations\nUsing -s 11 may be faster (also see FAQ)\n\n");
2758
+
2716
2759
  break;
2760
+ }
2717
2761
  case L2R_L2LOSS_SVR_DUAL:
2718
- solve_l2r_l1l2_svr(prob, w, param, L2R_L2LOSS_SVR_DUAL);
2762
+ {
2763
+ iter = solve_l2r_l1l2_svr(prob, param, w, dual_solver_max_iter);
2764
+ if(iter >= dual_solver_max_iter)
2765
+ {
2766
+ info("\nWARNING: reaching max number of iterations\nSwitching to use -s 11\n\n");
2767
+ // primal_solver_tol obtained from eps for dual may be too loose
2768
+ primal_solver_tol *= 0.001;
2769
+ l2r_l2_svr_fun fun_obj(prob, param, C);
2770
+ NEWTON newton_obj(&fun_obj, primal_solver_tol);
2771
+ newton_obj.set_print_string(liblinear_print_string);
2772
+ newton_obj.newton(w);
2773
+ }
2719
2774
  break;
2775
+ }
2720
2776
  default:
2721
2777
  fprintf(stderr, "ERROR: unknown solver_type\n");
2722
2778
  break;
2723
2779
  }
2780
+
2781
+ delete[] C;
2724
2782
  }
2725
2783
 
2726
2784
  // Calculate the initial C for parameter selection
@@ -2768,7 +2826,7 @@ static double calc_start_C(const problem *prob, const parameter *param)
2768
2826
  return pow( 2, floor(log(min_C) / log(2.0)) );
2769
2827
  }
2770
2828
 
2771
- static double calc_max_p(const problem *prob, const parameter *param)
2829
+ static double calc_max_p(const problem *prob)
2772
2830
  {
2773
2831
  int i;
2774
2832
  double max_p = 0.0;
@@ -2938,7 +2996,7 @@ model* train(const problem *prob, const parameter *param)
2938
2996
  model_->w = Malloc(double, w_size);
2939
2997
  model_->nr_class = 2;
2940
2998
  model_->label = NULL;
2941
- solve_oneclass_svm(prob, model_->w, &(model_->rho), param->eps, param->nu);
2999
+ solve_oneclass_svm(prob, param, model_->w, &(model_->rho));
2942
3000
  }
2943
3001
  else
2944
3002
  {
@@ -3173,7 +3231,6 @@ void find_parameters(const problem *prob, const parameter *param, int nr_fold, d
3173
3231
  subprob[i].y[k] = prob->y[perm[j]];
3174
3232
  ++k;
3175
3233
  }
3176
-
3177
3234
  }
3178
3235
 
3179
3236
  struct parameter param_tmp = *param;
@@ -3193,7 +3250,7 @@ void find_parameters(const problem *prob, const parameter *param, int nr_fold, d
3193
3250
  }
3194
3251
  else if(param->solver_type == L2R_L2LOSS_SVR)
3195
3252
  {
3196
- double max_p = calc_max_p(prob, &param_tmp);
3253
+ double max_p = calc_max_p(prob);
3197
3254
  int num_p_steps = 20;
3198
3255
  double max_C = 1048576;
3199
3256
  *best_score = INF;
@@ -3655,7 +3712,7 @@ const char *check_parameter(const problem *prob, const parameter *param)
3655
3712
  if(param->C <= 0)
3656
3713
  return "C <= 0";
3657
3714
 
3658
- if(param->p < 0)
3715
+ if(param->p < 0 && param->solver_type == L2R_L2LOSS_SVR)
3659
3716
  return "p < 0";
3660
3717
 
3661
3718
  if(prob->bias >= 0 && param->solver_type == ONECLASS_SVM)
@@ -1,7 +1,7 @@
1
1
  #ifndef _LIBLINEAR_H
2
2
  #define _LIBLINEAR_H
3
3
 
4
- #define LIBLINEAR_VERSION 241
4
+ #define LIBLINEAR_VERSION 244
5
5
 
6
6
  #ifdef __cplusplus
7
7
  extern "C" {
@@ -30,7 +30,7 @@ struct parameter
30
30
  int solver_type;
31
31
 
32
32
  /* these are for training only */
33
- double eps; /* stopping criteria */
33
+ double eps; /* stopping tolerance */
34
34
  double C;
35
35
  int nr_weight;
36
36
  int *weight_label;
@@ -44,12 +44,12 @@ struct parameter
44
44
  struct model
45
45
  {
46
46
  struct parameter param;
47
- int nr_class; /* number of classes */
47
+ int nr_class; /* number of classes */
48
48
  int nr_feature;
49
49
  double *w;
50
- int *label; /* label of each class */
50
+ int *label; /* label of each class */
51
51
  double bias;
52
- double rho; /* one-class SVM only */
52
+ double rho; /* one-class SVM only */
53
53
  };
54
54
 
55
55
  struct model* train(const struct problem *prob, const struct parameter *param);
@@ -117,14 +117,13 @@ void NEWTON::newton(double *w)
117
117
  delete [] w0;
118
118
 
119
119
  f = fun_obj->fun(w);
120
- info("init f %5.3e\n", f);
121
120
  fun_obj->grad(w, g);
122
121
  double gnorm = dnrm2_(&n, g, &inc);
122
+ info("init f %5.3e |g| %5.3e\n", f, gnorm);
123
123
 
124
124
  if (gnorm <= eps*gnorm0)
125
125
  search = 0;
126
126
 
127
- double *w_new = new double[n];
128
127
  while (iter <= max_iter && search)
129
128
  {
130
129
  fun_obj->get_diag_preconditioner(M);
@@ -133,7 +132,7 @@ void NEWTON::newton(double *w)
133
132
  cg_iter = pcg(g, M, s, r);
134
133
 
135
134
  fold = f;
136
- step_size = fun_obj->linesearch_and_update(w, s, & f, g, init_step_size);
135
+ step_size = fun_obj->linesearch_and_update(w, s, &f, g, init_step_size);
137
136
 
138
137
  if (step_size == 0)
139
138
  {
@@ -141,14 +140,11 @@ void NEWTON::newton(double *w)
141
140
  break;
142
141
  }
143
142
 
144
- info("iter %2d f %5.3e |g| %5.3e CG %3d step_size %4.2e \n", iter, f, gnorm, cg_iter, step_size);
145
-
146
- actred = fold - f;
147
- iter++;
148
-
149
143
  fun_obj->grad(w, g);
150
-
151
144
  gnorm = dnrm2_(&n, g, &inc);
145
+
146
+ info("iter %2d f %5.3e |g| %5.3e CG %3d step_size %4.2e \n", iter, f, gnorm, cg_iter, step_size);
147
+
152
148
  if (gnorm <= eps*gnorm0)
153
149
  break;
154
150
  if (f < -1.0e+32)
@@ -156,16 +152,21 @@ void NEWTON::newton(double *w)
156
152
  info("WARNING: f < -1.0e+32\n");
157
153
  break;
158
154
  }
155
+ actred = fold - f;
159
156
  if (fabs(actred) <= 1.0e-12*fabs(f))
160
157
  {
161
158
  info("WARNING: actred too small\n");
162
159
  break;
163
160
  }
161
+
162
+ iter++;
164
163
  }
165
164
 
165
+ if(iter >= max_iter)
166
+ info("\nWARNING: reaching max number of Newton iterations\n");
167
+
166
168
  delete[] g;
167
169
  delete[] r;
168
- delete[] w_new;
169
170
  delete[] s;
170
171
  delete[] M;
171
172
  }
@@ -177,7 +178,7 @@ int NEWTON::pcg(double *g, double *M, double *s, double *r)
177
178
  double one = 1;
178
179
  double *d = new double[n];
179
180
  double *Hd = new double[n];
180
- double zTr, znewTrnew, alpha, beta, cgtol;
181
+ double zTr, znewTrnew, alpha, beta, cgtol, dHd;
181
182
  double *z = new double[n];
182
183
  double Q = 0, newQ, Qdiff;
183
184
 
@@ -198,9 +199,14 @@ int NEWTON::pcg(double *g, double *M, double *s, double *r)
198
199
  while (cg_iter < max_cg_iter)
199
200
  {
200
201
  cg_iter++;
201
- fun_obj->Hv(d, Hd);
202
202
 
203
- alpha = zTr/ddot_(&n, d, &inc, Hd, &inc);
203
+ fun_obj->Hv(d, Hd);
204
+ dHd = ddot_(&n, d, &inc, Hd, &inc);
205
+ // avoid 0/0 in getting alpha
206
+ if (dHd <= 1.0e-16)
207
+ break;
208
+
209
+ alpha = zTr/dHd;
204
210
  daxpy_(&n, &alpha, d, &inc, s, &inc);
205
211
  alpha = -alpha;
206
212
  daxpy_(&n, &alpha, Hd, &inc, r, &inc);
@@ -236,7 +242,7 @@ int NEWTON::pcg(double *g, double *M, double *s, double *r)
236
242
  delete[] Hd;
237
243
  delete[] z;
238
244
 
239
- return(cg_iter);
245
+ return cg_iter;
240
246
  }
241
247
 
242
248
  void NEWTON::set_print_string(void (*print_string) (const char *buf))
@@ -3,6 +3,6 @@
3
3
  module Numo
4
4
  module Liblinear
5
5
  # The version of Numo::Liblienar you are using.
6
- VERSION = '2.0.0'
6
+ VERSION = '2.1.0'
7
7
  end
8
8
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: numo-liblinear
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.0.0
4
+ version: 2.1.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - yoshoku
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2022-01-09 00:00:00.000000000 Z
11
+ date: 2022-03-26 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: numo-narray
@@ -77,7 +77,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
77
77
  - !ruby/object:Gem::Version
78
78
  version: '0'
79
79
  requirements: []
80
- rubygems_version: 3.3.3
80
+ rubygems_version: 3.3.7
81
81
  signing_key:
82
82
  specification_version: 4
83
83
  summary: Numo::Liblinear is a Ruby gem binding to the LIBLINEAR library. Numo::Liblinear