liblinear-ruby 1.0.1 → 1.0.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +1 -1
  3. data/ext/blasp.h +8 -8
  4. data/ext/daxpy.c +3 -3
  5. data/ext/ddot.c +3 -3
  6. data/ext/dnrm2.c +7 -7
  7. data/ext/dscal.c +4 -4
  8. data/ext/liblinear_wrap.cxx +382 -382
  9. data/ext/linear.cpp +44 -55
  10. data/ext/linear.h +5 -1
  11. data/ext/tron.cpp +13 -5
  12. data/ext/tron.h +1 -1
  13. data/lib/liblinear.rb +2 -0
  14. data/lib/liblinear/version.rb +1 -1
  15. metadata +2 -41
  16. data/liblinear-2.1/COPYRIGHT +0 -31
  17. data/liblinear-2.1/Makefile +0 -37
  18. data/liblinear-2.1/Makefile.win +0 -24
  19. data/liblinear-2.1/README +0 -600
  20. data/liblinear-2.1/blas/Makefile +0 -22
  21. data/liblinear-2.1/blas/blas.h +0 -25
  22. data/liblinear-2.1/blas/blasp.h +0 -438
  23. data/liblinear-2.1/blas/daxpy.c +0 -57
  24. data/liblinear-2.1/blas/ddot.c +0 -58
  25. data/liblinear-2.1/blas/dnrm2.c +0 -70
  26. data/liblinear-2.1/blas/dscal.c +0 -52
  27. data/liblinear-2.1/heart_scale +0 -270
  28. data/liblinear-2.1/linear.cpp +0 -3053
  29. data/liblinear-2.1/linear.def +0 -22
  30. data/liblinear-2.1/linear.h +0 -79
  31. data/liblinear-2.1/matlab/Makefile +0 -49
  32. data/liblinear-2.1/matlab/README +0 -208
  33. data/liblinear-2.1/matlab/libsvmread.c +0 -212
  34. data/liblinear-2.1/matlab/libsvmwrite.c +0 -119
  35. data/liblinear-2.1/matlab/linear_model_matlab.c +0 -176
  36. data/liblinear-2.1/matlab/linear_model_matlab.h +0 -2
  37. data/liblinear-2.1/matlab/make.m +0 -22
  38. data/liblinear-2.1/matlab/predict.c +0 -341
  39. data/liblinear-2.1/matlab/train.c +0 -492
  40. data/liblinear-2.1/predict.c +0 -243
  41. data/liblinear-2.1/python/Makefile +0 -4
  42. data/liblinear-2.1/python/README +0 -380
  43. data/liblinear-2.1/python/liblinear.py +0 -323
  44. data/liblinear-2.1/python/liblinearutil.py +0 -270
  45. data/liblinear-2.1/train.c +0 -449
  46. data/liblinear-2.1/tron.cpp +0 -241
  47. data/liblinear-2.1/tron.h +0 -35
  48. data/liblinear-2.1/windows/liblinear.dll +0 -0
  49. data/liblinear-2.1/windows/libsvmread.mexw64 +0 -0
  50. data/liblinear-2.1/windows/libsvmwrite.mexw64 +0 -0
  51. data/liblinear-2.1/windows/predict.exe +0 -0
  52. data/liblinear-2.1/windows/predict.mexw64 +0 -0
  53. data/liblinear-2.1/windows/train.exe +0 -0
  54. data/liblinear-2.1/windows/train.mexw64 +0 -0
@@ -6,6 +6,7 @@
6
6
  #include <locale.h>
7
7
  #include "linear.h"
8
8
  #include "tron.h"
9
+ int liblinear_version = LIBLINEAR_VERSION;
9
10
  typedef signed char schar;
10
11
  template <class T> static inline void swap(T& x, T& y) { T t=x; x=y; y=t; }
11
12
  #ifndef min
@@ -173,7 +174,6 @@ void l2r_lr_fun::Hv(double *s, double *Hs)
173
174
  int i;
174
175
  int l=prob->l;
175
176
  int w_size=get_nr_variable();
176
- double *wa = new double[l];
177
177
  feature_node **x=prob->x;
178
178
 
179
179
  for(i=0;i<w_size;i++)
@@ -181,15 +181,14 @@ void l2r_lr_fun::Hv(double *s, double *Hs)
181
181
  for(i=0;i<l;i++)
182
182
  {
183
183
  feature_node * const xi=x[i];
184
- wa[i] = sparse_operator::dot(s, xi);
185
-
186
- wa[i] = C[i]*D[i]*wa[i];
184
+ double xTs = sparse_operator::dot(s, xi);
187
185
 
188
- sparse_operator::axpy(wa[i], xi, Hs);
186
+ xTs = C[i]*D[i]*xTs;
187
+
188
+ sparse_operator::axpy(xTs, xi, Hs);
189
189
  }
190
190
  for(i=0;i<w_size;i++)
191
191
  Hs[i] = s[i] + Hs[i];
192
- delete[] wa;
193
192
  }
194
193
 
195
194
  void l2r_lr_fun::Xv(double *v, double *Xv)
@@ -233,7 +232,6 @@ protected:
233
232
 
234
233
  double *C;
235
234
  double *z;
236
- double *D;
237
235
  int *I;
238
236
  int sizeI;
239
237
  const problem *prob;
@@ -246,7 +244,6 @@ l2r_l2_svc_fun::l2r_l2_svc_fun(const problem *prob, double *C)
246
244
  this->prob = prob;
247
245
 
248
246
  z = new double[l];
249
- D = new double[l];
250
247
  I = new int[l];
251
248
  this->C = C;
252
249
  }
@@ -254,7 +251,6 @@ l2r_l2_svc_fun::l2r_l2_svc_fun(const problem *prob, double *C)
254
251
  l2r_l2_svc_fun::~l2r_l2_svc_fun()
255
252
  {
256
253
  delete[] z;
257
- delete[] D;
258
254
  delete[] I;
259
255
  }
260
256
 
@@ -312,7 +308,6 @@ void l2r_l2_svc_fun::Hv(double *s, double *Hs)
312
308
  {
313
309
  int i;
314
310
  int w_size=get_nr_variable();
315
- double *wa = new double[sizeI];
316
311
  feature_node **x=prob->x;
317
312
 
318
313
  for(i=0;i<w_size;i++)
@@ -320,15 +315,14 @@ void l2r_l2_svc_fun::Hv(double *s, double *Hs)
320
315
  for(i=0;i<sizeI;i++)
321
316
  {
322
317
  feature_node * const xi=x[I[i]];
323
- wa[i] = sparse_operator::dot(s, xi);
324
-
325
- wa[i] = C[I[i]]*wa[i];
318
+ double xTs = sparse_operator::dot(s, xi);
319
+
320
+ xTs = C[I[i]]*xTs;
326
321
 
327
- sparse_operator::axpy(wa[i], xi, Hs);
322
+ sparse_operator::axpy(xTs, xi, Hs);
328
323
  }
329
324
  for(i=0;i<w_size;i++)
330
325
  Hs[i] = s[i] + 2*Hs[i];
331
- delete[] wa;
332
326
  }
333
327
 
334
328
  void l2r_l2_svc_fun::Xv(double *v, double *Xv)
@@ -431,19 +425,19 @@ void l2r_l2_svr_fun::grad(double *w, double *g)
431
425
  g[i] = w[i] + 2*g[i];
432
426
  }
433
427
 
434
- // A coordinate descent algorithm for
428
+ // A coordinate descent algorithm for
435
429
  // multi-class support vector machines by Crammer and Singer
436
430
  //
437
431
  // min_{\alpha} 0.5 \sum_m ||w_m(\alpha)||^2 + \sum_i \sum_m e^m_i alpha^m_i
438
432
  // s.t. \alpha^m_i <= C^m_i \forall m,i , \sum_m \alpha^m_i=0 \forall i
439
- //
433
+ //
440
434
  // where e^m_i = 0 if y_i = m,
441
435
  // e^m_i = 1 if y_i != m,
442
- // C^m_i = C if m = y_i,
443
- // C^m_i = 0 if m != y_i,
444
- // and w_m(\alpha) = \sum_i \alpha^m_i x_i
436
+ // C^m_i = C if m = y_i,
437
+ // C^m_i = 0 if m != y_i,
438
+ // and w_m(\alpha) = \sum_i \alpha^m_i x_i
445
439
  //
446
- // Given:
440
+ // Given:
447
441
  // x, y, C
448
442
  // eps is the stopping tolerance
449
443
  //
@@ -551,7 +545,7 @@ void Solver_MCSVM_CS::Solve(double *w)
551
545
  double eps_shrink = max(10.0*eps, 1.0); // stopping tolerance for shrinking
552
546
  bool start_from_all = true;
553
547
 
554
- // Initial alpha can be set here. Note that
548
+ // Initial alpha can be set here. Note that
555
549
  // sum_m alpha[i*nr_class+m] = 0, for all i=1,...,l-1
556
550
  // alpha[i*nr_class+m] <= C[GETI(i)] if prob->y[i] == m
557
551
  // alpha[i*nr_class+m] <= 0 if prob->y[i] != m
@@ -746,14 +740,14 @@ void Solver_MCSVM_CS::Solve(double *w)
746
740
  delete [] active_size_i;
747
741
  }
748
742
 
749
- // A coordinate descent algorithm for
743
+ // A coordinate descent algorithm for
750
744
  // L1-loss and L2-loss SVM dual problems
751
745
  //
752
746
  // min_\alpha 0.5(\alpha^T (Q + D)\alpha) - e^T \alpha,
753
747
  // s.t. 0 <= \alpha_i <= upper_bound_i,
754
- //
748
+ //
755
749
  // where Qij = yi yj xi^T xj and
756
- // D is a diagonal matrix
750
+ // D is a diagonal matrix
757
751
  //
758
752
  // In L1-SVM case:
759
753
  // upper_bound_i = Cp if y_i = 1
@@ -764,12 +758,12 @@ void Solver_MCSVM_CS::Solve(double *w)
764
758
  // D_ii = 1/(2*Cp) if y_i = 1
765
759
  // D_ii = 1/(2*Cn) if y_i = -1
766
760
  //
767
- // Given:
761
+ // Given:
768
762
  // x, y, Cp, Cn
769
763
  // eps is the stopping tolerance
770
764
  //
771
765
  // solution will be put in w
772
- //
766
+ //
773
767
  // See Algorithm 3 of Hsieh et al., ICML 2008
774
768
 
775
769
  #undef GETI
@@ -951,14 +945,14 @@ static void solve_l2r_l1l2_svc(
951
945
  }
952
946
 
953
947
 
954
- // A coordinate descent algorithm for
948
+ // A coordinate descent algorithm for
955
949
  // L1-loss and L2-loss epsilon-SVR dual problem
956
950
  //
957
951
  // min_\beta 0.5\beta^T (Q + diag(lambda)) \beta - p \sum_{i=1}^l|\beta_i| + \sum_{i=1}^l yi\beta_i,
958
952
  // s.t. -upper_bound_i <= \beta_i <= upper_bound_i,
959
- //
953
+ //
960
954
  // where Qij = xi^T xj and
961
- // D is a diagonal matrix
955
+ // D is a diagonal matrix
962
956
  //
963
957
  // In L1-SVM case:
964
958
  // upper_bound_i = C
@@ -967,13 +961,13 @@ static void solve_l2r_l1l2_svc(
967
961
  // upper_bound_i = INF
968
962
  // lambda_i = 1/(2*C)
969
963
  //
970
- // Given:
964
+ // Given:
971
965
  // x, y, p, C
972
966
  // eps is the stopping tolerance
973
967
  //
974
968
  // solution will be put in w
975
969
  //
976
- // See Algorithm 4 of Ho and Lin, 2012
970
+ // See Algorithm 4 of Ho and Lin, 2012
977
971
 
978
972
  #undef GETI
979
973
  #define GETI(i) (0)
@@ -1165,17 +1159,17 @@ static void solve_l2r_l1l2_svr(
1165
1159
  }
1166
1160
 
1167
1161
 
1168
- // A coordinate descent algorithm for
1162
+ // A coordinate descent algorithm for
1169
1163
  // the dual of L2-regularized logistic regression problems
1170
1164
  //
1171
1165
  // min_\alpha 0.5(\alpha^T Q \alpha) + \sum \alpha_i log (\alpha_i) + (upper_bound_i - \alpha_i) log (upper_bound_i - \alpha_i),
1172
1166
  // s.t. 0 <= \alpha_i <= upper_bound_i,
1173
- //
1174
- // where Qij = yi yj xi^T xj and
1167
+ //
1168
+ // where Qij = yi yj xi^T xj and
1175
1169
  // upper_bound_i = Cp if y_i = 1
1176
1170
  // upper_bound_i = Cn if y_i = -1
1177
1171
  //
1178
- // Given:
1172
+ // Given:
1179
1173
  // x, y, Cp, Cn
1180
1174
  // eps is the stopping tolerance
1181
1175
  //
@@ -1194,7 +1188,7 @@ void solve_l2r_lr_dual(const problem *prob, double *w, double eps, double Cp, do
1194
1188
  int i, s, iter = 0;
1195
1189
  double *xTx = new double[l];
1196
1190
  int max_iter = 1000;
1197
- int *index = new int[l];
1191
+ int *index = new int[l];
1198
1192
  double *alpha = new double[2*l]; // store alpha and C - alpha
1199
1193
  schar *y = new schar[l];
1200
1194
  int max_inner_iter = 100; // for inner Newton
@@ -1213,7 +1207,7 @@ void solve_l2r_lr_dual(const problem *prob, double *w, double eps, double Cp, do
1213
1207
  y[i] = -1;
1214
1208
  }
1215
1209
  }
1216
-
1210
+
1217
1211
  // Initial alpha can be set here. Note that
1218
1212
  // 0 < alpha[i] < upper_bound[GETI(i)]
1219
1213
  // alpha[2*i] + alpha[2*i+1] = upper_bound[GETI(i)]
@@ -1328,12 +1322,12 @@ void solve_l2r_lr_dual(const problem *prob, double *w, double eps, double Cp, do
1328
1322
  delete [] index;
1329
1323
  }
1330
1324
 
1331
- // A coordinate descent algorithm for
1325
+ // A coordinate descent algorithm for
1332
1326
  // L1-regularized L2-loss support vector classification
1333
1327
  //
1334
1328
  // min_w \sum |wj| + C \sum max(0, 1-yi w^T xi)^2,
1335
1329
  //
1336
- // Given:
1330
+ // Given:
1337
1331
  // x, y, Cp, Cn
1338
1332
  // eps is the stopping tolerance
1339
1333
  //
@@ -1607,12 +1601,12 @@ static void solve_l1r_l2_svc(
1607
1601
  delete [] xj_sq;
1608
1602
  }
1609
1603
 
1610
- // A coordinate descent algorithm for
1604
+ // A coordinate descent algorithm for
1611
1605
  // L1-regularized logistic regression problems
1612
1606
  //
1613
1607
  // min_w \sum |wj| + C \sum log(1+exp(-yi w^T xi)),
1614
1608
  //
1615
- // Given:
1609
+ // Given:
1616
1610
  // x, y, Cp, Cn
1617
1611
  // eps is the stopping tolerance
1618
1612
  //
@@ -2075,8 +2069,8 @@ static void group_classes(const problem *prob, int *nr_class_ret, int **label_re
2075
2069
  }
2076
2070
 
2077
2071
  //
2078
- // Labels are ordered by their first occurrence in the training set.
2079
- // However, for two-class sets with -1/+1 labels and -1 appears first,
2072
+ // Labels are ordered by their first occurrence in the training set.
2073
+ // However, for two-class sets with -1/+1 labels and -1 appears first,
2080
2074
  // we swap labels to ensure that internally the binary SVM has positive data corresponding to the +1 instances.
2081
2075
  //
2082
2076
  if (nr_class == 2 && label[0] == -1 && label[1] == 1)
@@ -2350,7 +2344,7 @@ model* train(const problem *prob, const parameter *param)
2350
2344
  sub_prob.y[k] = +1;
2351
2345
  for(; k<sub_prob.l; k++)
2352
2346
  sub_prob.y[k] = -1;
2353
-
2347
+
2354
2348
  if(param->init_sol != NULL)
2355
2349
  for(i=0;i<w_size;i++)
2356
2350
  model_->w[i] = param->init_sol[i];
@@ -2386,7 +2380,7 @@ model* train(const problem *prob, const parameter *param)
2386
2380
 
2387
2381
  train_one(&sub_prob, param, w, weighted_C[i], param->C);
2388
2382
 
2389
- for(int j=0;j<w_size;j++)
2383
+ for(j=0;j<w_size;j++)
2390
2384
  model_->w[j*nr_class+i] = w[j];
2391
2385
  }
2392
2386
  free(w);
@@ -2601,7 +2595,7 @@ void find_parameter_C(const problem *prob, const parameter *param, int nr_fold,
2601
2595
  param1.C = param1.C*ratio;
2602
2596
  }
2603
2597
 
2604
- if(param1.C > max_C && max_C > start_C)
2598
+ if(param1.C > max_C && max_C > start_C)
2605
2599
  info("warning: maximum C reached.\n");
2606
2600
  free(fold_start);
2607
2601
  free(perm);
@@ -2891,11 +2885,6 @@ struct model *load_model(const char *model_file_name)
2891
2885
  int j;
2892
2886
  for(j=0; j<nr_w; j++)
2893
2887
  FSCANF(fp, "%lf ", &model_->w[i*nr_w+j]);
2894
- if (fscanf(fp, "\n") !=0)
2895
- {
2896
- fprintf(stderr, "ERROR: fscanf failed to read the model\n");
2897
- EXIT_LOAD_MODEL()
2898
- }
2899
2888
  }
2900
2889
 
2901
2890
  setlocale(LC_ALL, old_locale);
@@ -2924,7 +2913,7 @@ void get_labels(const model *model_, int* label)
2924
2913
  }
2925
2914
 
2926
2915
  // use inline here for better performance (around 20% faster than the non-inline one)
2927
- static inline double get_w_value(const struct model *model_, int idx, int label_idx)
2916
+ static inline double get_w_value(const struct model *model_, int idx, int label_idx)
2928
2917
  {
2929
2918
  int nr_class = model_->nr_class;
2930
2919
  int solver_type = model_->param.solver_type;
@@ -2934,7 +2923,7 @@ static inline double get_w_value(const struct model *model_, int idx, int label_
2934
2923
  return 0;
2935
2924
  if(check_regression_model(model_))
2936
2925
  return w[idx];
2937
- else
2926
+ else
2938
2927
  {
2939
2928
  if(label_idx < 0 || label_idx >= nr_class)
2940
2929
  return 0;
@@ -3022,7 +3011,7 @@ const char *check_parameter(const problem *prob, const parameter *param)
3022
3011
  && param->solver_type != L2R_L1LOSS_SVR_DUAL)
3023
3012
  return "unknown solver type";
3024
3013
 
3025
- if(param->init_sol != NULL
3014
+ if(param->init_sol != NULL
3026
3015
  && param->solver_type != L2R_LR && param->solver_type != L2R_L2LOSS_SVC)
3027
3016
  return "Initial-solution specification supported only for solver L2R_LR and L2R_L2LOSS_SVC";
3028
3017
 
@@ -1,10 +1,14 @@
1
1
  #ifndef _LIBLINEAR_H
2
2
  #define _LIBLINEAR_H
3
3
 
4
+ #define LIBLINEAR_VERSION 211
5
+
4
6
  #ifdef __cplusplus
5
7
  extern "C" {
6
8
  #endif
7
9
 
10
+ extern int liblinear_version;
11
+
8
12
  struct feature_node
9
13
  {
10
14
  int index;
@@ -16,7 +20,7 @@ struct problem
16
20
  int l, n;
17
21
  double *y;
18
22
  struct feature_node **x;
19
- double bias; /* < 0 if no bias term */
23
+ double bias; /* < 0 if no bias term */
20
24
  };
21
25
 
22
26
  enum { L2R_LR, L2R_L2LOSS_SVC_DUAL, L2R_L2LOSS_SVC, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L1R_L2LOSS_SVC, L1R_LR, L2R_LR_DUAL, L2R_L2LOSS_SVR = 11, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL }; /* solver_type */
@@ -91,9 +91,10 @@ void TRON::tron(double *w)
91
91
  iter = 1;
92
92
 
93
93
  double *w_new = new double[n];
94
+ bool reach_boundary;
94
95
  while (iter <= max_iter && search)
95
96
  {
96
- cg_iter = trcg(delta, g, s, r);
97
+ cg_iter = trcg(delta, g, s, r, &reach_boundary);
97
98
 
98
99
  memcpy(w_new, w, sizeof(double)*n);
99
100
  daxpy_(&n, &one, s, &inc, w_new, &inc);
@@ -124,7 +125,12 @@ void TRON::tron(double *w)
124
125
  else if (actred < eta2*prered)
125
126
  delta = max(sigma1*delta, min(alpha*snorm, sigma3*delta));
126
127
  else
127
- delta = max(delta, min(alpha*snorm, sigma3*delta));
128
+ {
129
+ if (reach_boundary)
130
+ delta = sigma3*delta;
131
+ else
132
+ delta = max(delta, min(alpha*snorm, sigma3*delta));
133
+ }
128
134
 
129
135
  info("iter %2d act %5.3e pre %5.3e delta %5.3e f %5.3e |g| %5.3e CG %3d\n", iter, actred, prered, delta, f, gnorm, cg_iter);
130
136
 
@@ -144,9 +150,9 @@ void TRON::tron(double *w)
144
150
  info("WARNING: f < -1.0e+32\n");
145
151
  break;
146
152
  }
147
- if (fabs(actred) <= 0 && prered <= 0)
153
+ if (prered <= 0)
148
154
  {
149
- info("WARNING: actred and prered <= 0\n");
155
+ info("WARNING: prered <= 0\n");
150
156
  break;
151
157
  }
152
158
  if (fabs(actred) <= 1.0e-12*fabs(f) &&
@@ -163,7 +169,7 @@ void TRON::tron(double *w)
163
169
  delete[] s;
164
170
  }
165
171
 
166
- int TRON::trcg(double delta, double *g, double *s, double *r)
172
+ int TRON::trcg(double delta, double *g, double *s, double *r, bool *reach_boundary)
167
173
  {
168
174
  int i, inc = 1;
169
175
  int n = fun_obj->get_nr_variable();
@@ -172,6 +178,7 @@ int TRON::trcg(double delta, double *g, double *s, double *r)
172
178
  double *Hd = new double[n];
173
179
  double rTr, rnewTrnew, alpha, beta, cgtol;
174
180
 
181
+ *reach_boundary = false;
175
182
  for (i=0; i<n; i++)
176
183
  {
177
184
  s[i] = 0;
@@ -194,6 +201,7 @@ int TRON::trcg(double delta, double *g, double *s, double *r)
194
201
  if (dnrm2_(&n, s, &inc) > delta)
195
202
  {
196
203
  info("cg reaches trust region boundary\n");
204
+ *reach_boundary = true;
197
205
  alpha = -alpha;
198
206
  daxpy_(&n, &alpha, d, &inc, s, &inc);
199
207
 
data/ext/tron.h CHANGED
@@ -22,7 +22,7 @@ public:
22
22
  void set_print_string(void (*i_print) (const char *buf));
23
23
 
24
24
  private:
25
- int trcg(double delta, double *g, double *s, double *r);
25
+ int trcg(double delta, double *g, double *s, double *r, bool *reach_boundary);
26
26
  double norm_inf(int n, double *x);
27
27
 
28
28
  double eps;
@@ -26,6 +26,8 @@ class Liblinear
26
26
  L2R_L2LOSS_SVR_DUAL = Liblinearswig::L2R_L2LOSS_SVR_DUAL
27
27
  L2R_L1LOSS_SVR_DUAL = Liblinearswig::L2R_L1LOSS_SVR_DUAL
28
28
 
29
+ LIBLINEAR_VERSION = sprintf("%.2f", Liblinearswig::LIBLINEAR_VERSION / 100.0)
30
+
29
31
  class << self
30
32
  # @param problem [Liblinear::Problem]
31
33
  # @param parameter [Liblinear::Parameter]
@@ -1,3 +1,3 @@
1
1
  class Liblinear
2
- VERSION = '1.0.1'
2
+ VERSION = '1.0.2'
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: liblinear-ruby
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.1
4
+ version: 1.0.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Kei Tsuchiya
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-02-08 00:00:00.000000000 Z
11
+ date: 2017-05-22 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -76,45 +76,6 @@ files:
76
76
  - lib/liblinear/parameter.rb
77
77
  - lib/liblinear/problem.rb
78
78
  - lib/liblinear/version.rb
79
- - liblinear-2.1/COPYRIGHT
80
- - liblinear-2.1/Makefile
81
- - liblinear-2.1/Makefile.win
82
- - liblinear-2.1/README
83
- - liblinear-2.1/blas/Makefile
84
- - liblinear-2.1/blas/blas.h
85
- - liblinear-2.1/blas/blasp.h
86
- - liblinear-2.1/blas/daxpy.c
87
- - liblinear-2.1/blas/ddot.c
88
- - liblinear-2.1/blas/dnrm2.c
89
- - liblinear-2.1/blas/dscal.c
90
- - liblinear-2.1/heart_scale
91
- - liblinear-2.1/linear.cpp
92
- - liblinear-2.1/linear.def
93
- - liblinear-2.1/linear.h
94
- - liblinear-2.1/matlab/Makefile
95
- - liblinear-2.1/matlab/README
96
- - liblinear-2.1/matlab/libsvmread.c
97
- - liblinear-2.1/matlab/libsvmwrite.c
98
- - liblinear-2.1/matlab/linear_model_matlab.c
99
- - liblinear-2.1/matlab/linear_model_matlab.h
100
- - liblinear-2.1/matlab/make.m
101
- - liblinear-2.1/matlab/predict.c
102
- - liblinear-2.1/matlab/train.c
103
- - liblinear-2.1/predict.c
104
- - liblinear-2.1/python/Makefile
105
- - liblinear-2.1/python/README
106
- - liblinear-2.1/python/liblinear.py
107
- - liblinear-2.1/python/liblinearutil.py
108
- - liblinear-2.1/train.c
109
- - liblinear-2.1/tron.cpp
110
- - liblinear-2.1/tron.h
111
- - liblinear-2.1/windows/liblinear.dll
112
- - liblinear-2.1/windows/libsvmread.mexw64
113
- - liblinear-2.1/windows/libsvmwrite.mexw64
114
- - liblinear-2.1/windows/predict.exe
115
- - liblinear-2.1/windows/predict.mexw64
116
- - liblinear-2.1/windows/train.exe
117
- - liblinear-2.1/windows/train.mexw64
118
79
  - liblinear-ruby.gemspec
119
80
  - spec/liblinear/array/double_spec.rb
120
81
  - spec/liblinear/example_spec.rb