liblinear-ruby 1.0.1 → 1.0.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +1 -1
  3. data/ext/blasp.h +8 -8
  4. data/ext/daxpy.c +3 -3
  5. data/ext/ddot.c +3 -3
  6. data/ext/dnrm2.c +7 -7
  7. data/ext/dscal.c +4 -4
  8. data/ext/liblinear_wrap.cxx +382 -382
  9. data/ext/linear.cpp +44 -55
  10. data/ext/linear.h +5 -1
  11. data/ext/tron.cpp +13 -5
  12. data/ext/tron.h +1 -1
  13. data/lib/liblinear.rb +2 -0
  14. data/lib/liblinear/version.rb +1 -1
  15. metadata +2 -41
  16. data/liblinear-2.1/COPYRIGHT +0 -31
  17. data/liblinear-2.1/Makefile +0 -37
  18. data/liblinear-2.1/Makefile.win +0 -24
  19. data/liblinear-2.1/README +0 -600
  20. data/liblinear-2.1/blas/Makefile +0 -22
  21. data/liblinear-2.1/blas/blas.h +0 -25
  22. data/liblinear-2.1/blas/blasp.h +0 -438
  23. data/liblinear-2.1/blas/daxpy.c +0 -57
  24. data/liblinear-2.1/blas/ddot.c +0 -58
  25. data/liblinear-2.1/blas/dnrm2.c +0 -70
  26. data/liblinear-2.1/blas/dscal.c +0 -52
  27. data/liblinear-2.1/heart_scale +0 -270
  28. data/liblinear-2.1/linear.cpp +0 -3053
  29. data/liblinear-2.1/linear.def +0 -22
  30. data/liblinear-2.1/linear.h +0 -79
  31. data/liblinear-2.1/matlab/Makefile +0 -49
  32. data/liblinear-2.1/matlab/README +0 -208
  33. data/liblinear-2.1/matlab/libsvmread.c +0 -212
  34. data/liblinear-2.1/matlab/libsvmwrite.c +0 -119
  35. data/liblinear-2.1/matlab/linear_model_matlab.c +0 -176
  36. data/liblinear-2.1/matlab/linear_model_matlab.h +0 -2
  37. data/liblinear-2.1/matlab/make.m +0 -22
  38. data/liblinear-2.1/matlab/predict.c +0 -341
  39. data/liblinear-2.1/matlab/train.c +0 -492
  40. data/liblinear-2.1/predict.c +0 -243
  41. data/liblinear-2.1/python/Makefile +0 -4
  42. data/liblinear-2.1/python/README +0 -380
  43. data/liblinear-2.1/python/liblinear.py +0 -323
  44. data/liblinear-2.1/python/liblinearutil.py +0 -270
  45. data/liblinear-2.1/train.c +0 -449
  46. data/liblinear-2.1/tron.cpp +0 -241
  47. data/liblinear-2.1/tron.h +0 -35
  48. data/liblinear-2.1/windows/liblinear.dll +0 -0
  49. data/liblinear-2.1/windows/libsvmread.mexw64 +0 -0
  50. data/liblinear-2.1/windows/libsvmwrite.mexw64 +0 -0
  51. data/liblinear-2.1/windows/predict.exe +0 -0
  52. data/liblinear-2.1/windows/predict.mexw64 +0 -0
  53. data/liblinear-2.1/windows/train.exe +0 -0
  54. data/liblinear-2.1/windows/train.mexw64 +0 -0
@@ -1,449 +0,0 @@
1
- #include <stdio.h>
2
- #include <math.h>
3
- #include <stdlib.h>
4
- #include <string.h>
5
- #include <ctype.h>
6
- #include <errno.h>
7
- #include "linear.h"
8
- #define Malloc(type,n) (type *)malloc((n)*sizeof(type))
9
- #define INF HUGE_VAL
10
-
11
- void print_null(const char *s) {}
12
-
13
- void exit_with_help()
14
- {
15
- printf(
16
- "Usage: train [options] training_set_file [model_file]\n"
17
- "options:\n"
18
- "-s type : set type of solver (default 1)\n"
19
- " for multi-class classification\n"
20
- " 0 -- L2-regularized logistic regression (primal)\n"
21
- " 1 -- L2-regularized L2-loss support vector classification (dual)\n"
22
- " 2 -- L2-regularized L2-loss support vector classification (primal)\n"
23
- " 3 -- L2-regularized L1-loss support vector classification (dual)\n"
24
- " 4 -- support vector classification by Crammer and Singer\n"
25
- " 5 -- L1-regularized L2-loss support vector classification\n"
26
- " 6 -- L1-regularized logistic regression\n"
27
- " 7 -- L2-regularized logistic regression (dual)\n"
28
- " for regression\n"
29
- " 11 -- L2-regularized L2-loss support vector regression (primal)\n"
30
- " 12 -- L2-regularized L2-loss support vector regression (dual)\n"
31
- " 13 -- L2-regularized L1-loss support vector regression (dual)\n"
32
- "-c cost : set the parameter C (default 1)\n"
33
- "-p epsilon : set the epsilon in loss function of SVR (default 0.1)\n"
34
- "-e epsilon : set tolerance of termination criterion\n"
35
- " -s 0 and 2\n"
36
- " |f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,\n"
37
- " where f is the primal function and pos/neg are # of\n"
38
- " positive/negative data (default 0.01)\n"
39
- " -s 11\n"
40
- " |f'(w)|_2 <= eps*|f'(w0)|_2 (default 0.001)\n"
41
- " -s 1, 3, 4, and 7\n"
42
- " Dual maximal violation <= eps; similar to libsvm (default 0.1)\n"
43
- " -s 5 and 6\n"
44
- " |f'(w)|_1 <= eps*min(pos,neg)/l*|f'(w0)|_1,\n"
45
- " where f is the primal function (default 0.01)\n"
46
- " -s 12 and 13\n"
47
- " |f'(alpha)|_1 <= eps |f'(alpha0)|,\n"
48
- " where f is the dual function (default 0.1)\n"
49
- "-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)\n"
50
- "-wi weight: weights adjust the parameter C of different classes (see README for details)\n"
51
- "-v n: n-fold cross validation mode\n"
52
- "-C : find parameter C (only for -s 0 and 2)\n"
53
- "-q : quiet mode (no outputs)\n"
54
- );
55
- exit(1);
56
- }
57
-
58
- void exit_input_error(int line_num)
59
- {
60
- fprintf(stderr,"Wrong input format at line %d\n", line_num);
61
- exit(1);
62
- }
63
-
64
- static char *line = NULL;
65
- static int max_line_len;
66
-
67
- static char* readline(FILE *input)
68
- {
69
- int len;
70
-
71
- if(fgets(line,max_line_len,input) == NULL)
72
- return NULL;
73
-
74
- while(strrchr(line,'\n') == NULL)
75
- {
76
- max_line_len *= 2;
77
- line = (char *) realloc(line,max_line_len);
78
- len = (int) strlen(line);
79
- if(fgets(line+len,max_line_len-len,input) == NULL)
80
- break;
81
- }
82
- return line;
83
- }
84
-
85
- void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name);
86
- void read_problem(const char *filename);
87
- void do_cross_validation();
88
- void do_find_parameter_C();
89
-
90
- struct feature_node *x_space;
91
- struct parameter param;
92
- struct problem prob;
93
- struct model* model_;
94
- int flag_cross_validation;
95
- int flag_find_C;
96
- int flag_C_specified;
97
- int flag_solver_specified;
98
- int nr_fold;
99
- double bias;
100
-
101
- int main(int argc, char **argv)
102
- {
103
- char input_file_name[1024];
104
- char model_file_name[1024];
105
- const char *error_msg;
106
-
107
- parse_command_line(argc, argv, input_file_name, model_file_name);
108
- read_problem(input_file_name);
109
- error_msg = check_parameter(&prob,&param);
110
-
111
- if(error_msg)
112
- {
113
- fprintf(stderr,"ERROR: %s\n",error_msg);
114
- exit(1);
115
- }
116
-
117
- if (flag_find_C)
118
- {
119
- do_find_parameter_C();
120
- }
121
- else if(flag_cross_validation)
122
- {
123
- do_cross_validation();
124
- }
125
- else
126
- {
127
- model_=train(&prob, &param);
128
- if(save_model(model_file_name, model_))
129
- {
130
- fprintf(stderr,"can't save model to file %s\n",model_file_name);
131
- exit(1);
132
- }
133
- free_and_destroy_model(&model_);
134
- }
135
- destroy_param(&param);
136
- free(prob.y);
137
- free(prob.x);
138
- free(x_space);
139
- free(line);
140
-
141
- return 0;
142
- }
143
-
144
- void do_find_parameter_C()
145
- {
146
- double start_C, best_C, best_rate;
147
- double max_C = 1024;
148
- if (flag_C_specified)
149
- start_C = param.C;
150
- else
151
- start_C = -1.0;
152
- printf("Doing parameter search with %d-fold cross validation.\n", nr_fold);
153
- find_parameter_C(&prob, &param, nr_fold, start_C, max_C, &best_C, &best_rate);
154
- printf("Best C = %g CV accuracy = %g%%\n", best_C, 100.0*best_rate);
155
- }
156
-
157
- void do_cross_validation()
158
- {
159
- int i;
160
- int total_correct = 0;
161
- double total_error = 0;
162
- double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0;
163
- double *target = Malloc(double, prob.l);
164
-
165
- cross_validation(&prob,&param,nr_fold,target);
166
- if(param.solver_type == L2R_L2LOSS_SVR ||
167
- param.solver_type == L2R_L1LOSS_SVR_DUAL ||
168
- param.solver_type == L2R_L2LOSS_SVR_DUAL)
169
- {
170
- for(i=0;i<prob.l;i++)
171
- {
172
- double y = prob.y[i];
173
- double v = target[i];
174
- total_error += (v-y)*(v-y);
175
- sumv += v;
176
- sumy += y;
177
- sumvv += v*v;
178
- sumyy += y*y;
179
- sumvy += v*y;
180
- }
181
- printf("Cross Validation Mean squared error = %g\n",total_error/prob.l);
182
- printf("Cross Validation Squared correlation coefficient = %g\n",
183
- ((prob.l*sumvy-sumv*sumy)*(prob.l*sumvy-sumv*sumy))/
184
- ((prob.l*sumvv-sumv*sumv)*(prob.l*sumyy-sumy*sumy))
185
- );
186
- }
187
- else
188
- {
189
- for(i=0;i<prob.l;i++)
190
- if(target[i] == prob.y[i])
191
- ++total_correct;
192
- printf("Cross Validation Accuracy = %g%%\n",100.0*total_correct/prob.l);
193
- }
194
-
195
- free(target);
196
- }
197
-
198
- void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name)
199
- {
200
- int i;
201
- void (*print_func)(const char*) = NULL; // default printing to stdout
202
-
203
- // default values
204
- param.solver_type = L2R_L2LOSS_SVC_DUAL;
205
- param.C = 1;
206
- param.eps = INF; // see setting below
207
- param.p = 0.1;
208
- param.nr_weight = 0;
209
- param.weight_label = NULL;
210
- param.weight = NULL;
211
- param.init_sol = NULL;
212
- flag_cross_validation = 0;
213
- flag_C_specified = 0;
214
- flag_solver_specified = 0;
215
- flag_find_C = 0;
216
- bias = -1;
217
-
218
- // parse options
219
- for(i=1;i<argc;i++)
220
- {
221
- if(argv[i][0] != '-') break;
222
- if(++i>=argc)
223
- exit_with_help();
224
- switch(argv[i-1][1])
225
- {
226
- case 's':
227
- param.solver_type = atoi(argv[i]);
228
- flag_solver_specified = 1;
229
- break;
230
-
231
- case 'c':
232
- param.C = atof(argv[i]);
233
- flag_C_specified = 1;
234
- break;
235
-
236
- case 'p':
237
- param.p = atof(argv[i]);
238
- break;
239
-
240
- case 'e':
241
- param.eps = atof(argv[i]);
242
- break;
243
-
244
- case 'B':
245
- bias = atof(argv[i]);
246
- break;
247
-
248
- case 'w':
249
- ++param.nr_weight;
250
- param.weight_label = (int *) realloc(param.weight_label,sizeof(int)*param.nr_weight);
251
- param.weight = (double *) realloc(param.weight,sizeof(double)*param.nr_weight);
252
- param.weight_label[param.nr_weight-1] = atoi(&argv[i-1][2]);
253
- param.weight[param.nr_weight-1] = atof(argv[i]);
254
- break;
255
-
256
- case 'v':
257
- flag_cross_validation = 1;
258
- nr_fold = atoi(argv[i]);
259
- if(nr_fold < 2)
260
- {
261
- fprintf(stderr,"n-fold cross validation: n must >= 2\n");
262
- exit_with_help();
263
- }
264
- break;
265
-
266
- case 'q':
267
- print_func = &print_null;
268
- i--;
269
- break;
270
-
271
- case 'C':
272
- flag_find_C = 1;
273
- i--;
274
- break;
275
-
276
- default:
277
- fprintf(stderr,"unknown option: -%c\n", argv[i-1][1]);
278
- exit_with_help();
279
- break;
280
- }
281
- }
282
-
283
- set_print_string_function(print_func);
284
-
285
- // determine filenames
286
- if(i>=argc)
287
- exit_with_help();
288
-
289
- strcpy(input_file_name, argv[i]);
290
-
291
- if(i<argc-1)
292
- strcpy(model_file_name,argv[i+1]);
293
- else
294
- {
295
- char *p = strrchr(argv[i],'/');
296
- if(p==NULL)
297
- p = argv[i];
298
- else
299
- ++p;
300
- sprintf(model_file_name,"%s.model",p);
301
- }
302
-
303
- // default solver for parameter selection is L2R_L2LOSS_SVC
304
- if(flag_find_C)
305
- {
306
- if(!flag_cross_validation)
307
- nr_fold = 5;
308
- if(!flag_solver_specified)
309
- {
310
- fprintf(stderr, "Solver not specified. Using -s 2\n");
311
- param.solver_type = L2R_L2LOSS_SVC;
312
- }
313
- else if(param.solver_type != L2R_LR && param.solver_type != L2R_L2LOSS_SVC)
314
- {
315
- fprintf(stderr, "Warm-start parameter search only available for -s 0 and -s 2\n");
316
- exit_with_help();
317
- }
318
- }
319
-
320
- if(param.eps == INF)
321
- {
322
- switch(param.solver_type)
323
- {
324
- case L2R_LR:
325
- case L2R_L2LOSS_SVC:
326
- param.eps = 0.01;
327
- break;
328
- case L2R_L2LOSS_SVR:
329
- param.eps = 0.001;
330
- break;
331
- case L2R_L2LOSS_SVC_DUAL:
332
- case L2R_L1LOSS_SVC_DUAL:
333
- case MCSVM_CS:
334
- case L2R_LR_DUAL:
335
- param.eps = 0.1;
336
- break;
337
- case L1R_L2LOSS_SVC:
338
- case L1R_LR:
339
- param.eps = 0.01;
340
- break;
341
- case L2R_L1LOSS_SVR_DUAL:
342
- case L2R_L2LOSS_SVR_DUAL:
343
- param.eps = 0.1;
344
- break;
345
- }
346
- }
347
- }
348
-
349
- // read in a problem (in libsvm format)
350
- void read_problem(const char *filename)
351
- {
352
- int max_index, inst_max_index, i;
353
- size_t elements, j;
354
- FILE *fp = fopen(filename,"r");
355
- char *endptr;
356
- char *idx, *val, *label;
357
-
358
- if(fp == NULL)
359
- {
360
- fprintf(stderr,"can't open input file %s\n",filename);
361
- exit(1);
362
- }
363
-
364
- prob.l = 0;
365
- elements = 0;
366
- max_line_len = 1024;
367
- line = Malloc(char,max_line_len);
368
- while(readline(fp)!=NULL)
369
- {
370
- char *p = strtok(line," \t"); // label
371
-
372
- // features
373
- while(1)
374
- {
375
- p = strtok(NULL," \t");
376
- if(p == NULL || *p == '\n') // check '\n' as ' ' may be after the last feature
377
- break;
378
- elements++;
379
- }
380
- elements++; // for bias term
381
- prob.l++;
382
- }
383
- rewind(fp);
384
-
385
- prob.bias=bias;
386
-
387
- prob.y = Malloc(double,prob.l);
388
- prob.x = Malloc(struct feature_node *,prob.l);
389
- x_space = Malloc(struct feature_node,elements+prob.l);
390
-
391
- max_index = 0;
392
- j=0;
393
- for(i=0;i<prob.l;i++)
394
- {
395
- inst_max_index = 0; // strtol gives 0 if wrong format
396
- readline(fp);
397
- prob.x[i] = &x_space[j];
398
- label = strtok(line," \t\n");
399
- if(label == NULL) // empty line
400
- exit_input_error(i+1);
401
-
402
- prob.y[i] = strtod(label,&endptr);
403
- if(endptr == label || *endptr != '\0')
404
- exit_input_error(i+1);
405
-
406
- while(1)
407
- {
408
- idx = strtok(NULL,":");
409
- val = strtok(NULL," \t");
410
-
411
- if(val == NULL)
412
- break;
413
-
414
- errno = 0;
415
- x_space[j].index = (int) strtol(idx,&endptr,10);
416
- if(endptr == idx || errno != 0 || *endptr != '\0' || x_space[j].index <= inst_max_index)
417
- exit_input_error(i+1);
418
- else
419
- inst_max_index = x_space[j].index;
420
-
421
- errno = 0;
422
- x_space[j].value = strtod(val,&endptr);
423
- if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
424
- exit_input_error(i+1);
425
-
426
- ++j;
427
- }
428
-
429
- if(inst_max_index > max_index)
430
- max_index = inst_max_index;
431
-
432
- if(prob.bias >= 0)
433
- x_space[j++].value = prob.bias;
434
-
435
- x_space[j++].index = -1;
436
- }
437
-
438
- if(prob.bias >= 0)
439
- {
440
- prob.n=max_index+1;
441
- for(i=1;i<prob.l;i++)
442
- (prob.x[i]-2)->index = prob.n;
443
- x_space[j-2].index = prob.n;
444
- }
445
- else
446
- prob.n=max_index;
447
-
448
- fclose(fp);
449
- }
@@ -1,241 +0,0 @@
1
- #include <math.h>
2
- #include <stdio.h>
3
- #include <string.h>
4
- #include <stdarg.h>
5
- #include "tron.h"
6
-
7
- #ifndef min
8
- template <class T> static inline T min(T x,T y) { return (x<y)?x:y; }
9
- #endif
10
-
11
- #ifndef max
12
- template <class T> static inline T max(T x,T y) { return (x>y)?x:y; }
13
- #endif
14
-
15
- #ifdef __cplusplus
16
- extern "C" {
17
- #endif
18
-
19
- extern double dnrm2_(int *, double *, int *);
20
- extern double ddot_(int *, double *, int *, double *, int *);
21
- extern int daxpy_(int *, double *, double *, int *, double *, int *);
22
- extern int dscal_(int *, double *, double *, int *);
23
-
24
- #ifdef __cplusplus
25
- }
26
- #endif
27
-
28
- static void default_print(const char *buf)
29
- {
30
- fputs(buf,stdout);
31
- fflush(stdout);
32
- }
33
-
34
- void TRON::info(const char *fmt,...)
35
- {
36
- char buf[BUFSIZ];
37
- va_list ap;
38
- va_start(ap,fmt);
39
- vsprintf(buf,fmt,ap);
40
- va_end(ap);
41
- (*tron_print_string)(buf);
42
- }
43
-
44
- TRON::TRON(const function *fun_obj, double eps, double eps_cg, int max_iter)
45
- {
46
- this->fun_obj=const_cast<function *>(fun_obj);
47
- this->eps=eps;
48
- this->eps_cg=eps_cg;
49
- this->max_iter=max_iter;
50
- tron_print_string = default_print;
51
- }
52
-
53
- TRON::~TRON()
54
- {
55
- }
56
-
57
- void TRON::tron(double *w)
58
- {
59
- // Parameters for updating the iterates.
60
- double eta0 = 1e-4, eta1 = 0.25, eta2 = 0.75;
61
-
62
- // Parameters for updating the trust region size delta.
63
- double sigma1 = 0.25, sigma2 = 0.5, sigma3 = 4;
64
-
65
- int n = fun_obj->get_nr_variable();
66
- int i, cg_iter;
67
- double delta, snorm, one=1.0;
68
- double alpha, f, fnew, prered, actred, gs;
69
- int search = 1, iter = 1, inc = 1;
70
- double *s = new double[n];
71
- double *r = new double[n];
72
- double *g = new double[n];
73
-
74
- // calculate gradient norm at w=0 for stopping condition.
75
- double *w0 = new double[n];
76
- for (i=0; i<n; i++)
77
- w0[i] = 0;
78
- fun_obj->fun(w0);
79
- fun_obj->grad(w0, g);
80
- double gnorm0 = dnrm2_(&n, g, &inc);
81
- delete [] w0;
82
-
83
- f = fun_obj->fun(w);
84
- fun_obj->grad(w, g);
85
- delta = dnrm2_(&n, g, &inc);
86
- double gnorm = delta;
87
-
88
- if (gnorm <= eps*gnorm0)
89
- search = 0;
90
-
91
- iter = 1;
92
-
93
- double *w_new = new double[n];
94
- while (iter <= max_iter && search)
95
- {
96
- cg_iter = trcg(delta, g, s, r);
97
-
98
- memcpy(w_new, w, sizeof(double)*n);
99
- daxpy_(&n, &one, s, &inc, w_new, &inc);
100
-
101
- gs = ddot_(&n, g, &inc, s, &inc);
102
- prered = -0.5*(gs-ddot_(&n, s, &inc, r, &inc));
103
- fnew = fun_obj->fun(w_new);
104
-
105
- // Compute the actual reduction.
106
- actred = f - fnew;
107
-
108
- // On the first iteration, adjust the initial step bound.
109
- snorm = dnrm2_(&n, s, &inc);
110
- if (iter == 1)
111
- delta = min(delta, snorm);
112
-
113
- // Compute prediction alpha*snorm of the step.
114
- if (fnew - f - gs <= 0)
115
- alpha = sigma3;
116
- else
117
- alpha = max(sigma1, -0.5*(gs/(fnew - f - gs)));
118
-
119
- // Update the trust region bound according to the ratio of actual to predicted reduction.
120
- if (actred < eta0*prered)
121
- delta = min(max(alpha, sigma1)*snorm, sigma2*delta);
122
- else if (actred < eta1*prered)
123
- delta = max(sigma1*delta, min(alpha*snorm, sigma2*delta));
124
- else if (actred < eta2*prered)
125
- delta = max(sigma1*delta, min(alpha*snorm, sigma3*delta));
126
- else
127
- delta = max(delta, min(alpha*snorm, sigma3*delta));
128
-
129
- info("iter %2d act %5.3e pre %5.3e delta %5.3e f %5.3e |g| %5.3e CG %3d\n", iter, actred, prered, delta, f, gnorm, cg_iter);
130
-
131
- if (actred > eta0*prered)
132
- {
133
- iter++;
134
- memcpy(w, w_new, sizeof(double)*n);
135
- f = fnew;
136
- fun_obj->grad(w, g);
137
-
138
- gnorm = dnrm2_(&n, g, &inc);
139
- if (gnorm <= eps*gnorm0)
140
- break;
141
- }
142
- if (f < -1.0e+32)
143
- {
144
- info("WARNING: f < -1.0e+32\n");
145
- break;
146
- }
147
- if (fabs(actred) <= 0 && prered <= 0)
148
- {
149
- info("WARNING: actred and prered <= 0\n");
150
- break;
151
- }
152
- if (fabs(actred) <= 1.0e-12*fabs(f) &&
153
- fabs(prered) <= 1.0e-12*fabs(f))
154
- {
155
- info("WARNING: actred and prered too small\n");
156
- break;
157
- }
158
- }
159
-
160
- delete[] g;
161
- delete[] r;
162
- delete[] w_new;
163
- delete[] s;
164
- }
165
-
166
- int TRON::trcg(double delta, double *g, double *s, double *r)
167
- {
168
- int i, inc = 1;
169
- int n = fun_obj->get_nr_variable();
170
- double one = 1;
171
- double *d = new double[n];
172
- double *Hd = new double[n];
173
- double rTr, rnewTrnew, alpha, beta, cgtol;
174
-
175
- for (i=0; i<n; i++)
176
- {
177
- s[i] = 0;
178
- r[i] = -g[i];
179
- d[i] = r[i];
180
- }
181
- cgtol = eps_cg*dnrm2_(&n, g, &inc);
182
-
183
- int cg_iter = 0;
184
- rTr = ddot_(&n, r, &inc, r, &inc);
185
- while (1)
186
- {
187
- if (dnrm2_(&n, r, &inc) <= cgtol)
188
- break;
189
- cg_iter++;
190
- fun_obj->Hv(d, Hd);
191
-
192
- alpha = rTr/ddot_(&n, d, &inc, Hd, &inc);
193
- daxpy_(&n, &alpha, d, &inc, s, &inc);
194
- if (dnrm2_(&n, s, &inc) > delta)
195
- {
196
- info("cg reaches trust region boundary\n");
197
- alpha = -alpha;
198
- daxpy_(&n, &alpha, d, &inc, s, &inc);
199
-
200
- double std = ddot_(&n, s, &inc, d, &inc);
201
- double sts = ddot_(&n, s, &inc, s, &inc);
202
- double dtd = ddot_(&n, d, &inc, d, &inc);
203
- double dsq = delta*delta;
204
- double rad = sqrt(std*std + dtd*(dsq-sts));
205
- if (std >= 0)
206
- alpha = (dsq - sts)/(std + rad);
207
- else
208
- alpha = (rad - std)/dtd;
209
- daxpy_(&n, &alpha, d, &inc, s, &inc);
210
- alpha = -alpha;
211
- daxpy_(&n, &alpha, Hd, &inc, r, &inc);
212
- break;
213
- }
214
- alpha = -alpha;
215
- daxpy_(&n, &alpha, Hd, &inc, r, &inc);
216
- rnewTrnew = ddot_(&n, r, &inc, r, &inc);
217
- beta = rnewTrnew/rTr;
218
- dscal_(&n, &beta, d, &inc);
219
- daxpy_(&n, &one, r, &inc, d, &inc);
220
- rTr = rnewTrnew;
221
- }
222
-
223
- delete[] d;
224
- delete[] Hd;
225
-
226
- return(cg_iter);
227
- }
228
-
229
- double TRON::norm_inf(int n, double *x)
230
- {
231
- double dmax = fabs(x[0]);
232
- for (int i=1; i<n; i++)
233
- if (fabs(x[i]) >= dmax)
234
- dmax = fabs(x[i]);
235
- return(dmax);
236
- }
237
-
238
- void TRON::set_print_string(void (*print_string) (const char *buf))
239
- {
240
- tron_print_string = print_string;
241
- }