liblinear-ruby 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (80) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +19 -0
  3. data/Gemfile +4 -0
  4. data/LICENSE.txt +22 -0
  5. data/README.md +46 -0
  6. data/Rakefile +1 -0
  7. data/ext/Makefile +237 -0
  8. data/ext/blas.h +25 -0
  9. data/ext/blasp.h +430 -0
  10. data/ext/daxpy.c +49 -0
  11. data/ext/ddot.c +50 -0
  12. data/ext/dnrm2.c +62 -0
  13. data/ext/dscal.c +44 -0
  14. data/ext/extconf.rb +12 -0
  15. data/ext/liblinear_wrap.cxx +4646 -0
  16. data/ext/linear.cpp +2811 -0
  17. data/ext/linear.h +74 -0
  18. data/ext/linear.rb +357 -0
  19. data/ext/tron.cpp +235 -0
  20. data/ext/tron.h +34 -0
  21. data/lib/liblinear.rb +89 -0
  22. data/lib/liblinear/error.rb +4 -0
  23. data/lib/liblinear/model.rb +66 -0
  24. data/lib/liblinear/parameter.rb +42 -0
  25. data/lib/liblinear/problem.rb +55 -0
  26. data/lib/liblinear/version.rb +3 -0
  27. data/liblinear-1.93/COPYRIGHT +31 -0
  28. data/liblinear-1.93/Makefile +37 -0
  29. data/liblinear-1.93/Makefile.win +30 -0
  30. data/liblinear-1.93/README +531 -0
  31. data/liblinear-1.93/blas/Makefile +22 -0
  32. data/liblinear-1.93/blas/blas.a +0 -0
  33. data/liblinear-1.93/blas/blas.h +25 -0
  34. data/liblinear-1.93/blas/blasp.h +430 -0
  35. data/liblinear-1.93/blas/daxpy.c +49 -0
  36. data/liblinear-1.93/blas/daxpy.o +0 -0
  37. data/liblinear-1.93/blas/ddot.c +50 -0
  38. data/liblinear-1.93/blas/ddot.o +0 -0
  39. data/liblinear-1.93/blas/dnrm2.c +62 -0
  40. data/liblinear-1.93/blas/dnrm2.o +0 -0
  41. data/liblinear-1.93/blas/dscal.c +44 -0
  42. data/liblinear-1.93/blas/dscal.o +0 -0
  43. data/liblinear-1.93/heart_scale +270 -0
  44. data/liblinear-1.93/linear.cpp +2811 -0
  45. data/liblinear-1.93/linear.def +18 -0
  46. data/liblinear-1.93/linear.h +74 -0
  47. data/liblinear-1.93/linear.o +0 -0
  48. data/liblinear-1.93/matlab/Makefile +58 -0
  49. data/liblinear-1.93/matlab/README +197 -0
  50. data/liblinear-1.93/matlab/libsvmread.c +212 -0
  51. data/liblinear-1.93/matlab/libsvmwrite.c +106 -0
  52. data/liblinear-1.93/matlab/linear_model_matlab.c +176 -0
  53. data/liblinear-1.93/matlab/linear_model_matlab.h +2 -0
  54. data/liblinear-1.93/matlab/make.m +21 -0
  55. data/liblinear-1.93/matlab/predict.c +331 -0
  56. data/liblinear-1.93/matlab/train.c +418 -0
  57. data/liblinear-1.93/predict +0 -0
  58. data/liblinear-1.93/predict.c +245 -0
  59. data/liblinear-1.93/python/Makefile +4 -0
  60. data/liblinear-1.93/python/README +343 -0
  61. data/liblinear-1.93/python/liblinear.py +277 -0
  62. data/liblinear-1.93/python/liblinearutil.py +250 -0
  63. data/liblinear-1.93/ruby/liblinear.i +41 -0
  64. data/liblinear-1.93/ruby/liblinear_wrap.cxx +4646 -0
  65. data/liblinear-1.93/ruby/linear.h +74 -0
  66. data/liblinear-1.93/ruby/linear.o +0 -0
  67. data/liblinear-1.93/train +0 -0
  68. data/liblinear-1.93/train.c +399 -0
  69. data/liblinear-1.93/tron.cpp +235 -0
  70. data/liblinear-1.93/tron.h +34 -0
  71. data/liblinear-1.93/tron.o +0 -0
  72. data/liblinear-1.93/windows/liblinear.dll +0 -0
  73. data/liblinear-1.93/windows/libsvmread.mexw64 +0 -0
  74. data/liblinear-1.93/windows/libsvmwrite.mexw64 +0 -0
  75. data/liblinear-1.93/windows/predict.exe +0 -0
  76. data/liblinear-1.93/windows/predict.mexw64 +0 -0
  77. data/liblinear-1.93/windows/train.exe +0 -0
  78. data/liblinear-1.93/windows/train.mexw64 +0 -0
  79. data/liblinear-ruby.gemspec +24 -0
  80. metadata +152 -0
data/ext/linear.h ADDED
@@ -0,0 +1,74 @@
1
+ #ifndef _LIBLINEAR_H
2
+ #define _LIBLINEAR_H
3
+
4
+ #ifdef __cplusplus
5
+ extern "C" {
6
+ #endif
7
+
8
+ struct feature_node
9
+ {
10
+ int index;
11
+ double value;
12
+ };
13
+
14
+ struct problem
15
+ {
16
+ int l, n;
17
+ double *y;
18
+ struct feature_node **x;
19
+ double bias; /* < 0 if no bias term */
20
+ };
21
+
22
+ enum { L2R_LR, L2R_L2LOSS_SVC_DUAL, L2R_L2LOSS_SVC, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L1R_L2LOSS_SVC, L1R_LR, L2R_LR_DUAL, L2R_L2LOSS_SVR = 11, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL }; /* solver_type */
23
+
24
+ struct parameter
25
+ {
26
+ int solver_type;
27
+
28
+ /* these are for training only */
29
+ double eps; /* stopping criteria */
30
+ double C;
31
+ int nr_weight;
32
+ int *weight_label;
33
+ double* weight;
34
+ double p;
35
+ };
36
+
37
+ struct model
38
+ {
39
+ struct parameter param;
40
+ int nr_class; /* number of classes */
41
+ int nr_feature;
42
+ double *w;
43
+ int *label; /* label of each class */
44
+ double bias;
45
+ };
46
+
47
+ struct model* train(const struct problem *prob, const struct parameter *param);
48
+ void cross_validation(const struct problem *prob, const struct parameter *param, int nr_fold, double *target);
49
+
50
+ double predict_values(const struct model *model_, const struct feature_node *x, double* dec_values);
51
+ double predict(const struct model *model_, const struct feature_node *x);
52
+ double predict_probability(const struct model *model_, const struct feature_node *x, double* prob_estimates);
53
+
54
+ int save_model(const char *model_file_name, const struct model *model_);
55
+ struct model *load_model(const char *model_file_name);
56
+
57
+ int get_nr_feature(const struct model *model_);
58
+ int get_nr_class(const struct model *model_);
59
+ void get_labels(const struct model *model_, int* label);
60
+
61
+ void free_model_content(struct model *model_ptr);
62
+ void free_and_destroy_model(struct model **model_ptr_ptr);
63
+ void destroy_param(struct parameter *param);
64
+
65
+ const char *check_parameter(const struct problem *prob, const struct parameter *param);
66
+ int check_probability_model(const struct model *model);
67
+ void set_print_string_function(void (*print_func) (const char*));
68
+
69
+ #ifdef __cplusplus
70
+ }
71
+ #endif
72
+
73
+ #endif /* _LIBLINEAR_H */
74
+
data/ext/linear.rb ADDED
@@ -0,0 +1,357 @@
1
+ require 'liblinear'
2
+ include Liblinear
3
+
4
+ def _int_array(seq)
5
+ size = seq.size
6
+ array = new_int(size)
7
+ i = 0
8
+ for item in seq
9
+ int_setitem(array,i,item)
10
+ i = i + 1
11
+ end
12
+ return array
13
+ end
14
+
15
+ def _double_array(seq)
16
+ size = seq.size
17
+ array = new_double(size)
18
+ i = 0
19
+ for item in seq
20
+ double_setitem(array,i,item)
21
+ i = i + 1
22
+ end
23
+ return array
24
+ end
25
+
26
+ def _free_int_array(x)
27
+ if !x.nil? # and !x.empty?
28
+ delete_int(x)
29
+ end
30
+ end
31
+
32
+ def _free_double_array(x)
33
+ if !x.nil? # and !x.empty?
34
+ delete_double(x)
35
+ end
36
+ end
37
+
38
+ def _int_array_to_list(x,n)
39
+ list = []
40
+ (0..n-1).each {|i| list << int_getitem(x,i) }
41
+ return list
42
+ end
43
+
44
+ def _double_array_to_list(x,n)
45
+ list = []
46
+ (0..n-1).each {|i| list << double_getitem(x,i) }
47
+ return list
48
+ end
49
+
50
+ class LParameter
51
+ attr_accessor :param
52
+
53
+ def initialize(*args)
54
+ @param = Liblinear::Parameter.new
55
+ @param.solver_type = L2R_LR
56
+ @param.C = 1
57
+ @param.eps = 0.01
58
+ @param.nr_weight = 0
59
+ @param.weight_label = _int_array([])
60
+ @param.weight = _double_array([])
61
+
62
+ args[0].each {|k,v|
63
+ self.send("#{k}=",v)
64
+ } if !args[0].nil?
65
+ end
66
+
67
+ def method_missing(m, *args)
68
+ #print m.to_s
69
+ #puts args.inspect
70
+ if m.to_s == 'weight_label='
71
+ @weight_label_len = args[0].size
72
+ pargs = _int_array(args[0])
73
+ _free_int_array(@param.weight_label)
74
+ elsif m.to_s == 'weight='
75
+ @weight_len = args[0].size
76
+ pargs = _double_array(args[0])
77
+ _free_double_array(@param.weight)
78
+ else
79
+ pargs = args[0]
80
+ end
81
+
82
+ if m.to_s.index('=')
83
+ @param.send("#{m}",pargs)
84
+ else
85
+ @param.send("#{m}")
86
+ end
87
+
88
+ end
89
+
90
+ def inspect
91
+ "LParameter: solver_type=#{@param.solver_type} C=#{@param.C} eps=#{@param.eps}"
92
+ end
93
+
94
+ def destroy
95
+ _free_int_array(@param.weight_label)
96
+ _free_double_array(@param.weight)
97
+ delete_parameter(@param)
98
+ @param = nil
99
+ end
100
+ end
101
+
102
+ def _convert_to_feature_node_array(x, maxlen, bias=-1)
103
+ # convert a sequence or mapping to an feature_node array
104
+
105
+ # Find non zero elements
106
+ iter_range = []
107
+ if x.class == Hash
108
+ x.each {|k, v|
109
+ # all zeros kept due to the precomputed kernel; no good solution yet
110
+ iter_range << k #if v != 0
111
+ }
112
+ elsif x.class == Array
113
+ x.each_index {|j|
114
+ iter_range << j #if x[j] != 0
115
+ }
116
+ else
117
+ raise TypeError,"data must be a hash or an array"
118
+ end
119
+
120
+ iter_range.sort!
121
+ if bias >=0
122
+ data = feature_node_array(iter_range.size+2)
123
+ #puts "bias element (#{iter_range.size},#{bias})"
124
+ feature_node_array_set(data,iter_range.size,maxlen+1,bias)
125
+ feature_node_array_set(data,iter_range.size+1,-1,0)
126
+ else
127
+ data = feature_node_array(iter_range.size+1)
128
+ feature_node_array_set(data,iter_range.size,-1,0)
129
+ end
130
+
131
+ j = 0
132
+ for k in iter_range
133
+ #puts "element #{j}= (#{k},#{x[k]})"
134
+ feature_node_array_set(data,j,k,x[k])
135
+ j = j + 1
136
+ end
137
+ return data
138
+ end
139
+
140
+
141
+ class LProblem
142
+ attr_accessor :prob, :maxlen, :size
143
+
144
+ def initialize(y,x,bias)
145
+ # assert_equal(y.size, x.size)
146
+ @prob = prob = Liblinear::Problem.new
147
+ @size = size = y.size
148
+
149
+ @y_array = y_array = new_double(size)
150
+ for i in (0..size-1)
151
+ double_setitem(@y_array,i,y[i])
152
+ end
153
+
154
+ @x_matrix = x_matrix = feature_node_matrix(size)
155
+ @data = []
156
+ @maxlen = 0 #max number of features
157
+ len_array=[]
158
+
159
+ for i in (0..size-1)
160
+ data = _convert_to_feature_node_array(x[i], @maxlen, bias)
161
+ @data << data
162
+ feature_node_matrix_set(x_matrix,i,data)
163
+
164
+ if x[i].class == Hash
165
+ if x[i].size > 0
166
+ @maxlen = [@maxlen,x[i].keys.max].max
167
+ end
168
+ else
169
+ @maxlen = [@maxlen,x[i].size].max
170
+ end
171
+ len_array << x[i].size
172
+ end
173
+
174
+ if bias >= 0
175
+ set_bias_index(x_matrix, size, @maxlen, _int_array(len_array))
176
+ end
177
+
178
+ prob.y = y_array
179
+ prob.x = x_matrix
180
+ prob.bias = bias
181
+ prob.l = size
182
+ prob.n = @maxlen
183
+ if bias >= 0
184
+ prob.n += 1
185
+ end
186
+ end
187
+
188
+ def inspect
189
+ "LProblem: size = #{size} n=#{prob.n} bias=#{prob.bias} maxlen=#{@maxlen}"
190
+ end
191
+
192
+ def destroy
193
+ delete_problem(@prob)
194
+ delete_int(@y_array)
195
+ for i in (0..size-1)
196
+ feature_node_array_destroy(@data[i])
197
+ end
198
+ feature_node_matrix_destroy(@x_matrix)
199
+ end
200
+ end
201
+
202
+ class LModel
203
+ attr_accessor :model, :probability
204
+
205
+ def initialize(arg1,arg2=nil)
206
+ if arg2 == nil
207
+ # create model from file
208
+ filename = arg1
209
+ @model = load_model(filename)
210
+ else
211
+ # create model from problem and parameter
212
+ prob,param = arg1,arg2
213
+ @prob = prob
214
+ msg = check_parameter(prob.prob,param.param)
215
+ raise "ValueError", msg if msg
216
+ @model = Liblinear::train(prob.prob,param.param)
217
+ end
218
+ #setup some classwide variables
219
+ @nr_class = Liblinear::get_nr_class(@model)
220
+ #create labels(classes)
221
+ intarr = new_int(@nr_class)
222
+ Liblinear::get_labels(@model,intarr)
223
+ @labels = _int_array_to_list(intarr, @nr_class)
224
+ delete_int(intarr)
225
+ end
226
+
227
+ def predict(x)
228
+ data = _convert_to_feature_node_array(x, @model.nr_feature, @model.bias)
229
+ ret = Liblinear::predict(@model,data)
230
+ feature_node_array_destroy(data)
231
+ return ret
232
+ end
233
+
234
+
235
+ def get_nr_class
236
+ return @nr_class
237
+ end
238
+
239
+ def get_labels
240
+ return @labels
241
+ end
242
+
243
+ def predict_values_raw(x)
244
+ #convert x into feature_node, allocate a double array for return
245
+ n = (@nr_class*(@nr_class-1)/2).floor
246
+ data = _convert_to_feature_node_array(x, @model.nr_feature, @model.bias)
247
+ dblarr = new_double(n)
248
+ Liblinear::predict_values(@model, data, dblarr)
249
+ ret = _double_array_to_list(dblarr, n)
250
+ delete_double(dblarr)
251
+ feature_node_array_destroy(data)
252
+ return ret
253
+ end
254
+
255
+ def predict_values(x)
256
+ v=predict_values_raw(x)
257
+ #puts v.inspect
258
+ if false
259
+ #if @svm_type == NU_SVR or @svm_type == EPSILON_SVR or @svm_type == ONE_CLASS
260
+ return v[0]
261
+ else #self.svm_type == C_SVC or self.svm_type == NU_SVC
262
+ count = 0
263
+
264
+ # create a width x height array
265
+ width = @labels.size
266
+ height = @labels.size
267
+ d = Array.new(width)
268
+ d.map! { Array.new(height) }
269
+
270
+ for i in (0..@labels.size-1)
271
+ for j in (i+1..@labels.size-1)
272
+ d[@labels[i]][@labels[j]] = v[count]
273
+ d[@labels[j]][@labels[i]] = -v[count]
274
+ count += 1
275
+ end
276
+ end
277
+ return d
278
+ end
279
+ end
280
+
281
+ def predict_probability(x)
282
+ # if not @probability
283
+ # raise TypeError, "model does not support probabiliy estimates"
284
+ # end
285
+
286
+ #convert x into feature_node, alloc a double array to receive probabilities
287
+ data = _convert_to_feature_node_array(x, @model.nr_feature, @model.bias)
288
+ dblarr = new_double(@nr_class)
289
+ pred = Liblinear::predict_probability(@model, data, dblarr)
290
+ pv = _double_array_to_list(dblarr, @nr_class)
291
+ delete_double(dblarr)
292
+ feature_node_array_destroy(data)
293
+ p = {}
294
+ for i in (0..@labels.size-1)
295
+ p[@labels[i]] = pv[i]
296
+ end
297
+ return pred, p
298
+ end
299
+
300
+ # def get_svr_probability
301
+ # #leave the Error checking to svm.cpp code
302
+ # ret = Liblinear::get_svr_probability(@model)
303
+ # if ret == 0
304
+ # raise TypeError, "not a regression model or probability information not available"
305
+ # end
306
+ # return ret
307
+ # end
308
+
309
+ # def get_svr_pdf
310
+ # #get_svr_probability will handle error checking
311
+ # sigma = get_svr_probability()
312
+ # return Proc.new{|z| exp(-z.abs/sigma)/(2*sigma)} # TODO: verify this works
313
+ # end
314
+
315
+ def save(filename)
316
+ save_model(filename,@model)
317
+ end
318
+
319
+ def destroy
320
+ destroy_model(@model)
321
+ end
322
+ end
323
+
324
+ def cross_validation(prob, param, fold)
325
+ target = new_int(prob.size)
326
+ Liblinear::cross_validation(prob.prob, param.param, fold, target)
327
+ ret = _int_array_to_list(target, prob.size)
328
+ delete_int(target)
329
+ return ret
330
+ end
331
+
332
+ def read_file filename
333
+ labels = []
334
+ samples = []
335
+ max_index = 0
336
+
337
+ f = File.open(filename)
338
+ f.each do |line|
339
+ elems = line.split
340
+ sample = {}
341
+ for e in elems[1..-1]
342
+ points = e.split(":")
343
+ sample[points[0].to_i] = points[1].to_f
344
+ if points[0].to_i < max_index
345
+ max_index = points[0].to_i
346
+ end
347
+ end
348
+ labels << elems[0].to_i
349
+ samples << sample
350
+ #print elems[0].to_i
351
+ #print " - "
352
+ #puts sample.inspect
353
+ end
354
+ puts "#{filename}: #{samples.size} samples loaded."
355
+ return labels,samples
356
+ end
357
+
data/ext/tron.cpp ADDED
@@ -0,0 +1,235 @@
1
+ #include <math.h>
2
+ #include <stdio.h>
3
+ #include <string.h>
4
+ #include <stdarg.h>
5
+ #include "tron.h"
6
+
7
+ #ifndef min
8
+ template <class T> static inline T min(T x,T y) { return (x<y)?x:y; }
9
+ #endif
10
+
11
+ #ifndef max
12
+ template <class T> static inline T max(T x,T y) { return (x>y)?x:y; }
13
+ #endif
14
+
15
+ #ifdef __cplusplus
16
+ extern "C" {
17
+ #endif
18
+
19
+ extern double dnrm2_(int *, double *, int *);
20
+ extern double ddot_(int *, double *, int *, double *, int *);
21
+ extern int daxpy_(int *, double *, double *, int *, double *, int *);
22
+ extern int dscal_(int *, double *, double *, int *);
23
+
24
+ #ifdef __cplusplus
25
+ }
26
+ #endif
27
+
28
+ static void default_print(const char *buf)
29
+ {
30
+ fputs(buf,stdout);
31
+ fflush(stdout);
32
+ }
33
+
34
+ void TRON::info(const char *fmt,...)
35
+ {
36
+ char buf[BUFSIZ];
37
+ va_list ap;
38
+ va_start(ap,fmt);
39
+ vsprintf(buf,fmt,ap);
40
+ va_end(ap);
41
+ (*tron_print_string)(buf);
42
+ }
43
+
44
+ TRON::TRON(const function *fun_obj, double eps, int max_iter)
45
+ {
46
+ this->fun_obj=const_cast<function *>(fun_obj);
47
+ this->eps=eps;
48
+ this->max_iter=max_iter;
49
+ tron_print_string = default_print;
50
+ }
51
+
52
+ TRON::~TRON()
53
+ {
54
+ }
55
+
56
+ void TRON::tron(double *w)
57
+ {
58
+ // Parameters for updating the iterates.
59
+ double eta0 = 1e-4, eta1 = 0.25, eta2 = 0.75;
60
+
61
+ // Parameters for updating the trust region size delta.
62
+ double sigma1 = 0.25, sigma2 = 0.5, sigma3 = 4;
63
+
64
+ int n = fun_obj->get_nr_variable();
65
+ int i, cg_iter;
66
+ double delta, snorm, one=1.0;
67
+ double alpha, f, fnew, prered, actred, gs;
68
+ int search = 1, iter = 1, inc = 1;
69
+ double *s = new double[n];
70
+ double *r = new double[n];
71
+ double *w_new = new double[n];
72
+ double *g = new double[n];
73
+
74
+ for (i=0; i<n; i++)
75
+ w[i] = 0;
76
+
77
+ f = fun_obj->fun(w);
78
+ fun_obj->grad(w, g);
79
+ delta = dnrm2_(&n, g, &inc);
80
+ double gnorm1 = delta;
81
+ double gnorm = gnorm1;
82
+
83
+ if (gnorm <= eps*gnorm1)
84
+ search = 0;
85
+
86
+ iter = 1;
87
+
88
+ while (iter <= max_iter && search)
89
+ {
90
+ cg_iter = trcg(delta, g, s, r);
91
+
92
+ memcpy(w_new, w, sizeof(double)*n);
93
+ daxpy_(&n, &one, s, &inc, w_new, &inc);
94
+
95
+ gs = ddot_(&n, g, &inc, s, &inc);
96
+ prered = -0.5*(gs-ddot_(&n, s, &inc, r, &inc));
97
+ fnew = fun_obj->fun(w_new);
98
+
99
+ // Compute the actual reduction.
100
+ actred = f - fnew;
101
+
102
+ // On the first iteration, adjust the initial step bound.
103
+ snorm = dnrm2_(&n, s, &inc);
104
+ if (iter == 1)
105
+ delta = min(delta, snorm);
106
+
107
+ // Compute prediction alpha*snorm of the step.
108
+ if (fnew - f - gs <= 0)
109
+ alpha = sigma3;
110
+ else
111
+ alpha = max(sigma1, -0.5*(gs/(fnew - f - gs)));
112
+
113
+ // Update the trust region bound according to the ratio of actual to predicted reduction.
114
+ if (actred < eta0*prered)
115
+ delta = min(max(alpha, sigma1)*snorm, sigma2*delta);
116
+ else if (actred < eta1*prered)
117
+ delta = max(sigma1*delta, min(alpha*snorm, sigma2*delta));
118
+ else if (actred < eta2*prered)
119
+ delta = max(sigma1*delta, min(alpha*snorm, sigma3*delta));
120
+ else
121
+ delta = max(delta, min(alpha*snorm, sigma3*delta));
122
+
123
+ info("iter %2d act %5.3e pre %5.3e delta %5.3e f %5.3e |g| %5.3e CG %3d\n", iter, actred, prered, delta, f, gnorm, cg_iter);
124
+
125
+ if (actred > eta0*prered)
126
+ {
127
+ iter++;
128
+ memcpy(w, w_new, sizeof(double)*n);
129
+ f = fnew;
130
+ fun_obj->grad(w, g);
131
+
132
+ gnorm = dnrm2_(&n, g, &inc);
133
+ if (gnorm <= eps*gnorm1)
134
+ break;
135
+ }
136
+ if (f < -1.0e+32)
137
+ {
138
+ info("WARNING: f < -1.0e+32\n");
139
+ break;
140
+ }
141
+ if (fabs(actred) <= 0 && prered <= 0)
142
+ {
143
+ info("WARNING: actred and prered <= 0\n");
144
+ break;
145
+ }
146
+ if (fabs(actred) <= 1.0e-12*fabs(f) &&
147
+ fabs(prered) <= 1.0e-12*fabs(f))
148
+ {
149
+ info("WARNING: actred and prered too small\n");
150
+ break;
151
+ }
152
+ }
153
+
154
+ delete[] g;
155
+ delete[] r;
156
+ delete[] w_new;
157
+ delete[] s;
158
+ }
159
+
160
+ int TRON::trcg(double delta, double *g, double *s, double *r)
161
+ {
162
+ int i, inc = 1;
163
+ int n = fun_obj->get_nr_variable();
164
+ double one = 1;
165
+ double *d = new double[n];
166
+ double *Hd = new double[n];
167
+ double rTr, rnewTrnew, alpha, beta, cgtol;
168
+
169
+ for (i=0; i<n; i++)
170
+ {
171
+ s[i] = 0;
172
+ r[i] = -g[i];
173
+ d[i] = r[i];
174
+ }
175
+ cgtol = 0.1*dnrm2_(&n, g, &inc);
176
+
177
+ int cg_iter = 0;
178
+ rTr = ddot_(&n, r, &inc, r, &inc);
179
+ while (1)
180
+ {
181
+ if (dnrm2_(&n, r, &inc) <= cgtol)
182
+ break;
183
+ cg_iter++;
184
+ fun_obj->Hv(d, Hd);
185
+
186
+ alpha = rTr/ddot_(&n, d, &inc, Hd, &inc);
187
+ daxpy_(&n, &alpha, d, &inc, s, &inc);
188
+ if (dnrm2_(&n, s, &inc) > delta)
189
+ {
190
+ info("cg reaches trust region boundary\n");
191
+ alpha = -alpha;
192
+ daxpy_(&n, &alpha, d, &inc, s, &inc);
193
+
194
+ double std = ddot_(&n, s, &inc, d, &inc);
195
+ double sts = ddot_(&n, s, &inc, s, &inc);
196
+ double dtd = ddot_(&n, d, &inc, d, &inc);
197
+ double dsq = delta*delta;
198
+ double rad = sqrt(std*std + dtd*(dsq-sts));
199
+ if (std >= 0)
200
+ alpha = (dsq - sts)/(std + rad);
201
+ else
202
+ alpha = (rad - std)/dtd;
203
+ daxpy_(&n, &alpha, d, &inc, s, &inc);
204
+ alpha = -alpha;
205
+ daxpy_(&n, &alpha, Hd, &inc, r, &inc);
206
+ break;
207
+ }
208
+ alpha = -alpha;
209
+ daxpy_(&n, &alpha, Hd, &inc, r, &inc);
210
+ rnewTrnew = ddot_(&n, r, &inc, r, &inc);
211
+ beta = rnewTrnew/rTr;
212
+ dscal_(&n, &beta, d, &inc);
213
+ daxpy_(&n, &one, r, &inc, d, &inc);
214
+ rTr = rnewTrnew;
215
+ }
216
+
217
+ delete[] d;
218
+ delete[] Hd;
219
+
220
+ return(cg_iter);
221
+ }
222
+
223
+ double TRON::norm_inf(int n, double *x)
224
+ {
225
+ double dmax = fabs(x[0]);
226
+ for (int i=1; i<n; i++)
227
+ if (fabs(x[i]) >= dmax)
228
+ dmax = fabs(x[i]);
229
+ return(dmax);
230
+ }
231
+
232
+ void TRON::set_print_string(void (*print_string) (const char *buf))
233
+ {
234
+ tron_print_string = print_string;
235
+ }