rb-gsl 1.16.0.2 → 1.16.0.3.rc1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/ChangeLog +5 -0
- data/README +2 -2
- data/Rakefile +2 -3
- data/lib/gsl/version.rb +1 -1
- data/rdoc/alf.rdoc +5 -5
- data/rdoc/blas.rdoc +8 -8
- data/rdoc/bspline.rdoc +16 -16
- data/rdoc/changes.rdoc +4 -9
- data/rdoc/cheb.rdoc +24 -24
- data/rdoc/cholesky_complex.rdoc +21 -21
- data/rdoc/combi.rdoc +36 -36
- data/rdoc/complex.rdoc +21 -21
- data/rdoc/const.rdoc +46 -46
- data/rdoc/dht.rdoc +48 -48
- data/rdoc/diff.rdoc +41 -41
- data/rdoc/ehandling.rdoc +5 -5
- data/rdoc/eigen.rdoc +152 -152
- data/rdoc/fft.rdoc +145 -145
- data/rdoc/fit.rdoc +108 -108
- data/rdoc/function.rdoc +10 -10
- data/rdoc/graph.rdoc +16 -16
- data/rdoc/hist.rdoc +102 -102
- data/rdoc/hist2d.rdoc +41 -41
- data/rdoc/hist3d.rdoc +8 -8
- data/rdoc/index.rdoc +18 -21
- data/rdoc/integration.rdoc +109 -109
- data/rdoc/interp.rdoc +70 -70
- data/rdoc/intro.rdoc +6 -6
- data/rdoc/linalg.rdoc +187 -187
- data/rdoc/linalg_complex.rdoc +1 -1
- data/rdoc/math.rdoc +57 -57
- data/rdoc/matrix.rdoc +272 -272
- data/rdoc/min.rdoc +56 -56
- data/rdoc/monte.rdoc +21 -21
- data/rdoc/multimin.rdoc +94 -94
- data/rdoc/multiroot.rdoc +79 -79
- data/rdoc/narray.rdoc +31 -31
- data/rdoc/ndlinear.rdoc +53 -53
- data/rdoc/nonlinearfit.rdoc +99 -99
- data/rdoc/ntuple.rdoc +30 -30
- data/rdoc/odeiv.rdoc +87 -87
- data/rdoc/perm.rdoc +89 -89
- data/rdoc/poly.rdoc +65 -65
- data/rdoc/qrng.rdoc +20 -20
- data/rdoc/randist.rdoc +81 -81
- data/rdoc/ref.rdoc +56 -56
- data/rdoc/rng.rdoc +84 -84
- data/rdoc/roots.rdoc +56 -56
- data/rdoc/sf.rdoc +427 -427
- data/rdoc/siman.rdoc +18 -18
- data/rdoc/sort.rdoc +29 -29
- data/rdoc/start.rdoc +8 -8
- data/rdoc/stats.rdoc +51 -51
- data/rdoc/sum.rdoc +11 -11
- data/rdoc/tensor.rdoc +30 -30
- data/rdoc/tut.rdoc +1 -1
- data/rdoc/use.rdoc +37 -37
- data/rdoc/vector.rdoc +187 -187
- data/rdoc/vector_complex.rdoc +23 -23
- data/rdoc/wavelet.rdoc +46 -46
- metadata +17 -20
- data/rdoc/rngextra.rdoc +0 -11
- data/rdoc/screenshot.rdoc +0 -40
data/rdoc/min.rdoc
CHANGED
@@ -1,54 +1,54 @@
|
|
1
1
|
#
|
2
2
|
# = One dimensional Minimization
|
3
3
|
#
|
4
|
-
# This chapter describes routines for finding minima of arbitrary
|
4
|
+
# This chapter describes routines for finding minima of arbitrary
|
5
5
|
# one-dimensional functions.
|
6
6
|
#
|
7
7
|
#
|
8
8
|
# Contents:
|
9
|
-
# 1. {Introduction}[link:
|
10
|
-
# 1. {GSL::Min::FMinimizer class}[link:
|
11
|
-
# 1. {Iteration}[link:
|
12
|
-
# 1. {Stopping Parameters}[link:
|
13
|
-
# 1. {Examples}[link:
|
9
|
+
# 1. {Introduction}[link:min_rdoc.html#label-Introduction]
|
10
|
+
# 1. {GSL::Min::FMinimizer class}[link:min_rdoc.html#label-Minimizer+class]
|
11
|
+
# 1. {Iteration}[link:min_rdoc.html#label-Iteration]
|
12
|
+
# 1. {Stopping Parameters}[link:min_rdoc.html#label-Stopping+Parameters]
|
13
|
+
# 1. {Examples}[link:min_rdoc.html#label-Example]
|
14
14
|
#
|
15
|
-
# ==
|
15
|
+
# == Introduction
|
16
16
|
#
|
17
|
-
# The minimization algorithms begin with a bounded region known to contain
|
18
|
-
# a minimum. The region is described by <tt>a</tt> lower bound a and an upper bound
|
17
|
+
# The minimization algorithms begin with a bounded region known to contain
|
18
|
+
# a minimum. The region is described by <tt>a</tt> lower bound a and an upper bound
|
19
19
|
# <tt>b</tt>, with an estimate of the location of the minimum <tt>x</tt>.
|
20
20
|
#
|
21
|
-
# The value of the function at <tt>x</tt> must be less than the value of the
|
21
|
+
# The value of the function at <tt>x</tt> must be less than the value of the
|
22
22
|
# function at the ends of the interval,
|
23
23
|
# f(a) > f(x) < f(b)
|
24
|
-
# This condition guarantees that a minimum is contained somewhere within the
|
24
|
+
# This condition guarantees that a minimum is contained somewhere within the
|
25
25
|
# interval. On each iteration a new point <tt>x'</tt> is selected using one of the
|
26
|
-
# available algorithms. If the new point is a better estimate of the minimum,
|
27
|
-
# <tt>f(x') < f(x)</tt>, then the current estimate of the minimum <tt>x</tt> is
|
28
|
-
# updated. The new point also allows the size of the bounded interval to be
|
29
|
-
# reduced, by choosing the most compact set of points which satisfies the
|
30
|
-
# constraint <tt>f(a) > f(x) < f(b)</tt>. The interval is reduced until it
|
31
|
-
# encloses the true minimum to a desired tolerance. This provides a best
|
26
|
+
# available algorithms. If the new point is a better estimate of the minimum,
|
27
|
+
# <tt>f(x') < f(x)</tt>, then the current estimate of the minimum <tt>x</tt> is
|
28
|
+
# updated. The new point also allows the size of the bounded interval to be
|
29
|
+
# reduced, by choosing the most compact set of points which satisfies the
|
30
|
+
# constraint <tt>f(a) > f(x) < f(b)</tt>. The interval is reduced until it
|
31
|
+
# encloses the true minimum to a desired tolerance. This provides a best
|
32
32
|
# estimate of the location of the minimum and a rigorous error estimate.
|
33
33
|
#
|
34
|
-
# Several bracketing algorithms are available within a single framework.
|
35
|
-
# The user provides a high-level driver for the algorithm, and the library
|
36
|
-
# provides the individual functions necessary for each of the steps. There
|
34
|
+
# Several bracketing algorithms are available within a single framework.
|
35
|
+
# The user provides a high-level driver for the algorithm, and the library
|
36
|
+
# provides the individual functions necessary for each of the steps. There
|
37
37
|
# are three main phases of the iteration. The steps are,
|
38
38
|
# * initialize minimizer (or <tt>solver</tt>) state, <tt>s</tt>, for algorithm <tt>T</tt>
|
39
39
|
# * update <tt>s</tt> using the iteration <tt>T</tt>
|
40
40
|
# * test <tt>s</tt> for convergence, and repeat iteration if necessary
|
41
41
|
#
|
42
|
-
# The state of the minimizers is held in a <tt>GSL::Min::FMinimizer</tt> object.
|
42
|
+
# The state of the minimizers is held in a <tt>GSL::Min::FMinimizer</tt> object.
|
43
43
|
# The updating procedure use only function evaluations (not derivatives).
|
44
|
-
# The function to minimize is given as an instance of the {GSL::Function}[link:
|
44
|
+
# The function to minimize is given as an instance of the {GSL::Function}[link:function_rdoc.html] class to the minimizer.
|
45
45
|
#
|
46
46
|
#
|
47
|
-
# ==
|
47
|
+
# == FMinimizer class
|
48
48
|
# ---
|
49
49
|
# * GSL::Min::FMinimizer.alloc(t)
|
50
50
|
#
|
51
|
-
# These method create an instance of the <tt>GSL::Min::FMinimizer</tt> class of
|
51
|
+
# These method create an instance of the <tt>GSL::Min::FMinimizer</tt> class of
|
52
52
|
# type <tt>t</tt>. The type <tt>t</tt> is given by a String,
|
53
53
|
# * "goldensection"
|
54
54
|
# * "brent"
|
@@ -65,52 +65,52 @@
|
|
65
65
|
# ---
|
66
66
|
# * GSL::Min::FMinimizer#set(f, xmin, xlow, xup)
|
67
67
|
#
|
68
|
-
# This method sets, or resets, an existing minimizer <tt>self</tt> to use
|
68
|
+
# This method sets, or resets, an existing minimizer <tt>self</tt> to use
|
69
69
|
# the function <tt>f</tt> (given by a <tt>GSL::Function</tt>
|
70
|
-
# object) and the initial search interval [<tt>xlow, xup</tt>],
|
70
|
+
# object) and the initial search interval [<tt>xlow, xup</tt>],
|
71
71
|
# with a guess for the location of the minimum <tt>xmin</tt>.
|
72
72
|
#
|
73
|
-
# If the interval given does not contain a minimum, then the
|
73
|
+
# If the interval given does not contain a minimum, then the
|
74
74
|
# method returns an error code of <tt>GSL::FAILURE</tt>.
|
75
75
|
#
|
76
76
|
# ---
|
77
77
|
# * GSL::Min::FMinimizer#set_with_values(f, xmin, fmin, xlow, flow, xup, fup)
|
78
78
|
#
|
79
|
-
# This method is equivalent to <tt>Fminimizer#set</tt> but uses the values
|
80
|
-
# <tt>fmin, flowe</tt> and <tt>fup</tt> instead of computing
|
79
|
+
# This method is equivalent to <tt>Fminimizer#set</tt> but uses the values
|
80
|
+
# <tt>fmin, flowe</tt> and <tt>fup</tt> instead of computing
|
81
81
|
# <tt>f(xmin), f(xlow)</tt> and <tt>f(xup)</tt>.
|
82
82
|
#
|
83
83
|
# ---
|
84
84
|
# * GSL::Min::FMinimizer#name
|
85
85
|
#
|
86
|
-
# This returns the name of the minimizer.
|
86
|
+
# This returns the name of the minimizer.
|
87
87
|
#
|
88
|
-
# ==
|
88
|
+
# == Iteration
|
89
89
|
# ---
|
90
90
|
# * GSL::Min::FMinimizer#iterate
|
91
91
|
#
|
92
|
-
# This method performs a single iteration of the minimizer <tt>self</tt>.
|
93
|
-
# If the iteration encounters an unexpected problem then an error code
|
92
|
+
# This method performs a single iteration of the minimizer <tt>self</tt>.
|
93
|
+
# If the iteration encounters an unexpected problem then an error code
|
94
94
|
# will be returned,
|
95
|
-
# * <tt>GSL::EBADFUNC</tt>: the iteration encountered a singular point where the
|
95
|
+
# * <tt>GSL::EBADFUNC</tt>: the iteration encountered a singular point where the
|
96
96
|
# function evaluated to <tt>Inf</tt> or <tt>NaN</tt>.
|
97
|
-
# * <tt>GSL::FAILURE</tt>: the algorithm could not improve the current best
|
97
|
+
# * <tt>GSL::FAILURE</tt>: the algorithm could not improve the current best
|
98
98
|
# approximation or bounding interval.
|
99
|
-
# The minimizer maintains a current best estimate of the position of
|
100
|
-
# the minimum at all times, and the current interval bounding the minimum.
|
99
|
+
# The minimizer maintains a current best estimate of the position of
|
100
|
+
# the minimum at all times, and the current interval bounding the minimum.
|
101
101
|
# This information can be accessed with the following auxiliary methods
|
102
102
|
#
|
103
103
|
# ---
|
104
104
|
# * GSL::Min::FMinimizer#x_minimum
|
105
105
|
#
|
106
|
-
# Returns the current estimate of the position of the minimum
|
106
|
+
# Returns the current estimate of the position of the minimum
|
107
107
|
# for the minimizer <tt>self</tt>.
|
108
108
|
#
|
109
109
|
# ---
|
110
110
|
# * GSL::Min::FMinimizer#x_upper
|
111
111
|
# * GSL::Min::FMinimizer#x_lower
|
112
112
|
#
|
113
|
-
# Return the current upper and lower bound of the interval for the
|
113
|
+
# Return the current upper and lower bound of the interval for the
|
114
114
|
# minimizer <tt>self</tt>.
|
115
115
|
#
|
116
116
|
# ---
|
@@ -118,33 +118,33 @@
|
|
118
118
|
# * GSL::Min::FMinimizer#f_upper
|
119
119
|
# * GSL::Min::FMinimizer#f_lower
|
120
120
|
#
|
121
|
-
# Return the value of the function at the current estimate of the
|
122
|
-
# minimum and at the upper and lower bounds of interval
|
121
|
+
# Return the value of the function at the current estimate of the
|
122
|
+
# minimum and at the upper and lower bounds of interval
|
123
123
|
# for the minimizer <tt>self</tt>.
|
124
124
|
#
|
125
|
-
# ==
|
125
|
+
# == Stopping Parameters
|
126
126
|
# ---
|
127
127
|
# * GSL::Min::FMinimizer#test_interval(epsabs, epsrel)
|
128
128
|
# * GSL::Min.test_interval(xlow, xup, epsabs, epsrel)
|
129
129
|
#
|
130
|
-
# These methoeds test for the convergence of the interval
|
131
|
-
# [<tt>xlow, xup</tt>] with absolute error <tt>epsabs</tt> and relative
|
132
|
-
# error <tt>epsrel</tt>. The test returns <tt>GSL::SUCCESS</tt>
|
130
|
+
# These methoeds test for the convergence of the interval
|
131
|
+
# [<tt>xlow, xup</tt>] with absolute error <tt>epsabs</tt> and relative
|
132
|
+
# error <tt>epsrel</tt>. The test returns <tt>GSL::SUCCESS</tt>
|
133
133
|
# if the following condition is achieved,
|
134
|
-
# |a - b| < epsabs + epsrel min(|a|,|b|)
|
135
|
-
# when the interval <tt>x = [a,b]</tt> does not include the origin.
|
136
|
-
# If the interval includes the origin then <tt>min(|a|,|b|)</tt> is
|
137
|
-
# replaced by zero (which is the minimum value of |x| over the interval).
|
138
|
-
# This ensures that the relative error is accurately estimated for minima
|
134
|
+
# |a - b| < epsabs + epsrel min(|a|,|b|)
|
135
|
+
# when the interval <tt>x = [a,b]</tt> does not include the origin.
|
136
|
+
# If the interval includes the origin then <tt>min(|a|,|b|)</tt> is
|
137
|
+
# replaced by zero (which is the minimum value of |x| over the interval).
|
138
|
+
# This ensures that the relative error is accurately estimated for minima
|
139
139
|
# close to the origin.
|
140
140
|
#
|
141
|
-
# This condition on the interval also implies that any estimate of the
|
142
|
-
# minimum x_m in the interval satisfies the same condition with respect
|
141
|
+
# This condition on the interval also implies that any estimate of the
|
142
|
+
# minimum x_m in the interval satisfies the same condition with respect
|
143
143
|
# to the true minimum x_m^*,
|
144
144
|
# |x_m - x_m^*| < epsabs + epsrel x_m^*
|
145
145
|
# assuming that the true minimum x_m^* is contained within the interval.
|
146
146
|
#
|
147
|
-
# ==
|
147
|
+
# == Example
|
148
148
|
# To find the minimum of the function f(x) = cos(x) + 1.0:
|
149
149
|
#
|
150
150
|
# #!/usr/bin/env ruby
|
@@ -180,10 +180,10 @@
|
|
180
180
|
# iter, a, b, m, m - m_expected, b - a);
|
181
181
|
# end while status == GSL::CONTINUE and iter < max_iter
|
182
182
|
#
|
183
|
-
# {prev}[link:
|
184
|
-
# {next}[link:
|
183
|
+
# {prev}[link:roots_rdoc.html]
|
184
|
+
# {next}[link:multiroot_rdoc.html]
|
185
185
|
#
|
186
|
-
# {Reference index}[link:
|
186
|
+
# {Reference index}[link:ref_rdoc.html]
|
187
187
|
# {top}[link:index.html]
|
188
188
|
#
|
189
189
|
#
|
data/rdoc/monte.rdoc
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
#
|
2
2
|
# = Monte Carlo Integration
|
3
3
|
#
|
4
|
-
# ==
|
4
|
+
# == The Function class
|
5
5
|
# The function to be integrated has its own datatype, the <tt>GSL::Monte::Function</tt> class.
|
6
6
|
#
|
7
7
|
# ---
|
@@ -18,7 +18,7 @@
|
|
18
18
|
# }
|
19
19
|
# dim = 2
|
20
20
|
# mf = Monte::Function.alloc(proc_f, dim)
|
21
|
-
# mf.set_params([3, 2, 1])
|
21
|
+
# mf.set_params([3, 2, 1])
|
22
22
|
#
|
23
23
|
# ---
|
24
24
|
# * GSL::Munte::Function#set(proc, dim, params)
|
@@ -32,34 +32,34 @@
|
|
32
32
|
# * GSL::Munte::Function#call
|
33
33
|
#
|
34
34
|
#
|
35
|
-
# ==
|
36
|
-
# ===
|
35
|
+
# == Monte Carlo plans, alrgorithms
|
36
|
+
# === PLAIN Monte Carlo
|
37
37
|
# ---
|
38
38
|
# * GSL::Monte::Plain.alloc(dim)
|
39
39
|
# * GSL::Monte::Plain#init
|
40
40
|
#
|
41
|
-
# ===
|
41
|
+
# === Miser
|
42
42
|
# ---
|
43
43
|
# * GSL::Monte::Miser.alloc(dim)
|
44
44
|
# * GSL::Monte::Miser#init
|
45
45
|
#
|
46
|
-
# ===
|
46
|
+
# === Vegas
|
47
47
|
# ---
|
48
48
|
# * GSL::Monte::Vegas.alloc(dim)
|
49
49
|
# * GSL::Monte::Vegas#init
|
50
50
|
#
|
51
51
|
#
|
52
|
-
# ==
|
52
|
+
# == Integration
|
53
53
|
# ---
|
54
54
|
# * GSL:Monte::Function#integrate(xl, xu, dim, calls, rng, s)
|
55
55
|
# * GSL:Monte::Function#integrate(xl, xu, dim, calls, s)
|
56
56
|
# * GSL:Monte::Function#integrate(xl, xu, calls, rng, s)
|
57
57
|
# * GSL:Monte::Function#integrate(xl, xu, calls, s)
|
58
58
|
#
|
59
|
-
# This method performs Monte-Carlo integration of the function <tt>self</tt>
|
60
|
-
# using the algorithm <tt>s</tt>, over the <tt>dim</tt>-dimensional hypercubic
|
61
|
-
# region defined by the lower and upper
|
62
|
-
# limits in the arrays <tt>xl</tt> and <tt>xu</tt>, each of size <tt>dim</tt>.
|
59
|
+
# This method performs Monte-Carlo integration of the function <tt>self</tt>
|
60
|
+
# using the algorithm <tt>s</tt>, over the <tt>dim</tt>-dimensional hypercubic
|
61
|
+
# region defined by the lower and upper
|
62
|
+
# limits in the arrays <tt>xl</tt> and <tt>xu</tt>, each of size <tt>dim</tt>.
|
63
63
|
# The integration uses a fixed number of function calls <tt>calls</tt>.
|
64
64
|
# The argument <tt>rng</tt> is a random number generator (optional). If it is not
|
65
65
|
# given, a new generator is created internally and freed when the calculation
|
@@ -67,7 +67,7 @@
|
|
67
67
|
#
|
68
68
|
# See sample scripts <tt>sample/monte*.rb</tt> for more details.
|
69
69
|
#
|
70
|
-
# ==
|
70
|
+
# == Accessing internal state of the Monte Carlo classes
|
71
71
|
# ---
|
72
72
|
# * GSL::Monte::Miser#estimate_frac
|
73
73
|
# * GSL::Monte::Miser#estimate_frac=
|
@@ -102,7 +102,7 @@
|
|
102
102
|
# * GSL::Monte::Vegas#verbose=
|
103
103
|
#
|
104
104
|
#
|
105
|
-
# ==
|
105
|
+
# == Miser Parameters (GSL-1.13 or later)
|
106
106
|
# ---
|
107
107
|
# * GSL::Monte::Miser#params_get
|
108
108
|
#
|
@@ -111,7 +111,7 @@
|
|
111
111
|
# * GSL::Monte::Miser#params_set(params)
|
112
112
|
#
|
113
113
|
# Sets the integrator parameters based on values provided in an object of the <tt>GSL::Monte::Miser::Params</tt> class <tt>params</tt>.
|
114
|
-
# ===
|
114
|
+
# === Accessors of Miser <tt>Params</tt>
|
115
115
|
# ---
|
116
116
|
# * GSL::Monte::Miser::Params#estimate_frac
|
117
117
|
# * GSL::Monte::Miser::Params#estimate_frac=
|
@@ -138,7 +138,7 @@
|
|
138
138
|
#
|
139
139
|
# This parameter introduces a random fractional variation of size dither into each bisection, which can be used to break the symmetry of integrands which are concentrated near the exact center of the hypercubic integration region. The default value of dither is zero, so no variation is introduced. If needed, a typical value of dither is 0.1.
|
140
140
|
#
|
141
|
-
# ==
|
141
|
+
# == Vegas Parameters (GSL-1.13 or later)
|
142
142
|
# ---
|
143
143
|
# * GSL::Monte::Vegas#params_get
|
144
144
|
#
|
@@ -148,7 +148,7 @@
|
|
148
148
|
#
|
149
149
|
# Sets the integrator parameters based on values provided in an object of the <tt>GSL::Monte::Vegas::Params</tt> class <tt>params</tt>.
|
150
150
|
#
|
151
|
-
# ===
|
151
|
+
# === Accessors of Vegas <tt>Params</tt>
|
152
152
|
# ---
|
153
153
|
# * GSL::Monte::Vegas::Params#alpha
|
154
154
|
# * GSL::Monte::Vegas::Params#alpha=
|
@@ -175,7 +175,7 @@
|
|
175
175
|
#
|
176
176
|
# Set the level of information printed by VEGAS. All information is written to the stream ostream. The default setting of verbose is -1, which turns off all output. A verbose value of 0 prints summary information about the weighted average and final result, while a value of 1 also displays the grid coordinates. A value of 2 prints information from the rebinning procedure for each iteration.
|
177
177
|
#
|
178
|
-
# ==
|
178
|
+
# == Example
|
179
179
|
#
|
180
180
|
# #!/usr/bin/env ruby
|
181
181
|
# require("gsl")
|
@@ -220,15 +220,15 @@
|
|
220
220
|
# puts("converging...");
|
221
221
|
# begin
|
222
222
|
# result, error = G.integrate(xl, xu, dim, calls/5, r, vegas)
|
223
|
-
# printf("result = % .6f sigma = % .6f chisq/dof = %.1f\n",
|
223
|
+
# printf("result = % .6f sigma = % .6f chisq/dof = %.1f\n",
|
224
224
|
# result, error, vegas.chisq)
|
225
225
|
# end while (vegas.chisq-1.0).abs > 0.5
|
226
226
|
# display_results("vegas final", result, error)
|
227
227
|
#
|
228
|
-
# {prev}[link:
|
229
|
-
# {next}[link:
|
228
|
+
# {prev}[link:ntuple_rdoc.html]
|
229
|
+
# {next}[link:siman_rdoc.html]
|
230
230
|
#
|
231
|
-
# {Reference index}[link:
|
231
|
+
# {Reference index}[link:ref_rdoc.html]
|
232
232
|
# {top}[link:index.html]
|
233
233
|
#
|
234
234
|
#
|
data/rdoc/multimin.rdoc
CHANGED
@@ -1,78 +1,78 @@
|
|
1
1
|
#
|
2
2
|
# = Multidimensional Minimization
|
3
|
-
# This chapter describes routines for finding minima of arbitrary
|
4
|
-
# multidimensional functions. The library provides low level components for a
|
5
|
-
# variety of iterative minimizers and convergence tests. These can be combined
|
6
|
-
# by the user to achieve the desired solution, while providing full access to
|
7
|
-
# the intermediate steps of the algorithms. Each class of methods uses the
|
8
|
-
# same framework, so that you can switch between minimizers at runtime without
|
9
|
-
# needing to recompile your program. Each instance of a minimizer keeps track
|
10
|
-
# of its own state, allowing the minimizers to be used in multi-threaded
|
11
|
-
# programs.
|
3
|
+
# This chapter describes routines for finding minima of arbitrary
|
4
|
+
# multidimensional functions. The library provides low level components for a
|
5
|
+
# variety of iterative minimizers and convergence tests. These can be combined
|
6
|
+
# by the user to achieve the desired solution, while providing full access to
|
7
|
+
# the intermediate steps of the algorithms. Each class of methods uses the
|
8
|
+
# same framework, so that you can switch between minimizers at runtime without
|
9
|
+
# needing to recompile your program. Each instance of a minimizer keeps track
|
10
|
+
# of its own state, allowing the minimizers to be used in multi-threaded
|
11
|
+
# programs.
|
12
12
|
#
|
13
13
|
# Contents:
|
14
|
-
# 1. {Overview}[link:
|
15
|
-
# 1. {Caveats}[link:
|
16
|
-
# 1. {Initializing the Multidimensional Minimizer}[link:
|
17
|
-
# 1. {Providing a function to minimize}[link:
|
18
|
-
# 1. {Iteration}[link:
|
19
|
-
# 1. {Stopping Criteria}[link:
|
20
|
-
# 1. {Examples}[link:
|
21
|
-
# 1. {FdfMinimizer}[link:
|
22
|
-
# 1. {FMinimizer}[link:
|
23
|
-
#
|
24
|
-
# ==
|
25
|
-
# The problem of multidimensional minimization requires finding a point x such
|
26
|
-
# that the scalar function, takes a value which is lower than at any neighboring
|
27
|
-
# point. For smooth functions the gradient g = \nabla f vanishes at the minimum.
|
28
|
-
# In general there are no bracketing methods available for the minimization of
|
29
|
-
# n-dimensional functions. The algorithms proceed from an initial guess using a
|
30
|
-
# search algorithm which attempts to move in a downhill direction.
|
31
|
-
#
|
32
|
-
# Algorithms making use of the gradient of the function perform a
|
33
|
-
# one-dimensional line minimisation along this direction until the lowest point
|
34
|
-
# is found to a suitable tolerance. The search direction is then updated with
|
35
|
-
# local information from the function and its derivatives, and the whole process
|
36
|
-
# repeated until the true n-dimensional minimum is found.
|
37
|
-
#
|
38
|
-
# The Nelder-Mead Simplex algorithm applies a different strategy. It maintains
|
39
|
-
# n+1 trial parameter vectors as the vertices of a n-dimensional simplex.
|
40
|
-
# In each iteration step it tries to improve the worst vertex by a simple
|
41
|
-
# geometrical transformation until the size of the simplex falls below a given
|
42
|
-
# tolerance.
|
43
|
-
#
|
44
|
-
# Both types of algorithms use a standard framework. The user provides a
|
45
|
-
# high-level driver for the algorithms, and the library provides the individual
|
46
|
-
# functions necessary for each of the steps. There are three main phases of the
|
47
|
-
# iteration. The steps are,
|
48
|
-
#
|
49
|
-
# * initialize minimizer state, s, for algorithm T
|
50
|
-
# * update s using the iteration T
|
51
|
-
# * test s for convergence, and repeat iteration if necessary
|
52
|
-
#
|
53
|
-
# Each iteration step consists either of an improvement to the line-minimisation
|
54
|
-
# in the current direction or an update to the search direction itself. The
|
55
|
-
# state for the minimizers is held in a <tt>GSL::MultiMin::FdfMinimizer</tt> or
|
14
|
+
# 1. {Overview}[link:multimin_rdoc.html#label-Overview]
|
15
|
+
# 1. {Caveats}[link:multimin_rdoc.html#label-Caveats]
|
16
|
+
# 1. {Initializing the Multidimensional Minimizer}[link:multimin_rdoc.html#label-Initializing+the+Multidimensional+Minimizer]
|
17
|
+
# 1. {Providing a function to minimize}[link:multimin_rdoc.html#label-Providing+a+function+to+minimize]
|
18
|
+
# 1. {Iteration}[link:multimin_rdoc.html#label-Iteration]
|
19
|
+
# 1. {Stopping Criteria}[link:multimin_rdoc.html#label-Stopping+Criteria]
|
20
|
+
# 1. {Examples}[link:multimin_rdoc.html#label-Examples]
|
21
|
+
# 1. {FdfMinimizer}[link:multimin_rdoc.html#label-FdfMinimizer]
|
22
|
+
# 1. {FMinimizer}[link:multimin_rdoc.html#label-FMinimizer]
|
23
|
+
#
|
24
|
+
# == Overview
|
25
|
+
# The problem of multidimensional minimization requires finding a point x such
|
26
|
+
# that the scalar function, takes a value which is lower than at any neighboring
|
27
|
+
# point. For smooth functions the gradient g = \nabla f vanishes at the minimum.
|
28
|
+
# In general there are no bracketing methods available for the minimization of
|
29
|
+
# n-dimensional functions. The algorithms proceed from an initial guess using a
|
30
|
+
# search algorithm which attempts to move in a downhill direction.
|
31
|
+
#
|
32
|
+
# Algorithms making use of the gradient of the function perform a
|
33
|
+
# one-dimensional line minimisation along this direction until the lowest point
|
34
|
+
# is found to a suitable tolerance. The search direction is then updated with
|
35
|
+
# local information from the function and its derivatives, and the whole process
|
36
|
+
# repeated until the true n-dimensional minimum is found.
|
37
|
+
#
|
38
|
+
# The Nelder-Mead Simplex algorithm applies a different strategy. It maintains
|
39
|
+
# n+1 trial parameter vectors as the vertices of a n-dimensional simplex.
|
40
|
+
# In each iteration step it tries to improve the worst vertex by a simple
|
41
|
+
# geometrical transformation until the size of the simplex falls below a given
|
42
|
+
# tolerance.
|
43
|
+
#
|
44
|
+
# Both types of algorithms use a standard framework. The user provides a
|
45
|
+
# high-level driver for the algorithms, and the library provides the individual
|
46
|
+
# functions necessary for each of the steps. There are three main phases of the
|
47
|
+
# iteration. The steps are,
|
48
|
+
#
|
49
|
+
# * initialize minimizer state, s, for algorithm T
|
50
|
+
# * update s using the iteration T
|
51
|
+
# * test s for convergence, and repeat iteration if necessary
|
52
|
+
#
|
53
|
+
# Each iteration step consists either of an improvement to the line-minimisation
|
54
|
+
# in the current direction or an update to the search direction itself. The
|
55
|
+
# state for the minimizers is held in a <tt>GSL::MultiMin::FdfMinimizer</tt> or
|
56
56
|
# a <tt>GSL::MultiMin::FMinimizer</tt> object.
|
57
57
|
#
|
58
|
-
# ==
|
59
|
-
# Note that the minimization algorithms can only search for one local minimum
|
60
|
-
# at a time. When there are several local minima in the search area, the first
|
61
|
-
# minimum to be found will be returned; however it is difficult to predict which
|
62
|
-
# of the minima this will be. In most cases, no error will be reported if you
|
63
|
-
# try to find a local minimum in an area where there is more than one.
|
58
|
+
# == Caveats
|
59
|
+
# Note that the minimization algorithms can only search for one local minimum
|
60
|
+
# at a time. When there are several local minima in the search area, the first
|
61
|
+
# minimum to be found will be returned; however it is difficult to predict which
|
62
|
+
# of the minima this will be. In most cases, no error will be reported if you
|
63
|
+
# try to find a local minimum in an area where there is more than one.
|
64
64
|
#
|
65
|
-
# It is also important to note that the minimization algorithms find local
|
66
|
-
# minima; there is no way to determine whether a minimum is a global minimum of
|
67
|
-
# the function in question.
|
65
|
+
# It is also important to note that the minimization algorithms find local
|
66
|
+
# minima; there is no way to determine whether a minimum is a global minimum of
|
67
|
+
# the function in question.
|
68
68
|
#
|
69
69
|
#
|
70
|
-
# ==
|
70
|
+
# == Initializing the Multidimensional Minimizer
|
71
71
|
# ---
|
72
72
|
# * GSL::MultiMin::FdfMinimizer.alloc(type, n)
|
73
73
|
# * GSL::MultiMin::FMinimizer.alloc(type, n)
|
74
74
|
#
|
75
|
-
# These method create a minimizer of type <tt>type</tt> for an <tt>n</tt>-dimension function.
|
75
|
+
# These method create a minimizer of type <tt>type</tt> for an <tt>n</tt>-dimension function.
|
76
76
|
# The type is given by a string, or by a Ruby constant.
|
77
77
|
#
|
78
78
|
# * <tt>GSL::MultiMin::FdfMinimizer::CONJUGATE_FR</tt> or <tt>"conjugate_fr"</tt>
|
@@ -93,17 +93,17 @@
|
|
93
93
|
# ---
|
94
94
|
# * GSL::MultiMin::FdfMinimizer#set(func, x, step_size, tol)
|
95
95
|
#
|
96
|
-
# This method initializes the minimizer <tt>self</tt> to minimize the function
|
97
|
-
# <tt>fdf</tt> (the <tt>GSL::MultiMin::Function_fdf</tt> class, see below) starting from
|
98
|
-
# the initial point <tt>x</tt> (<tt>GSL::Vector</tt>). The size of the first trial step is
|
99
|
-
# given by <tt>step_size</tt> (<tt>Vector</tt>). The accuracy of the line minimization is
|
96
|
+
# This method initializes the minimizer <tt>self</tt> to minimize the function
|
97
|
+
# <tt>fdf</tt> (the <tt>GSL::MultiMin::Function_fdf</tt> class, see below) starting from
|
98
|
+
# the initial point <tt>x</tt> (<tt>GSL::Vector</tt>). The size of the first trial step is
|
99
|
+
# given by <tt>step_size</tt> (<tt>Vector</tt>). The accuracy of the line minimization is
|
100
100
|
# specified by <tt>tol</tt>.
|
101
101
|
#
|
102
102
|
# ---
|
103
103
|
# * GSL::MultiMin::FMinimizer#set(func, x, step_size)
|
104
104
|
#
|
105
|
-
# This method initializes the minimizer <tt>self</tt> to minimize the function <tt>func</tt>,
|
106
|
-
# starting from the initial point <tt>x</tt> (Vector). The size of the initial trial steps
|
105
|
+
# This method initializes the minimizer <tt>self</tt> to minimize the function <tt>func</tt>,
|
106
|
+
# starting from the initial point <tt>x</tt> (Vector). The size of the initial trial steps
|
107
107
|
# is given in vector <tt>step_size</tt>.
|
108
108
|
#
|
109
109
|
# ---
|
@@ -112,10 +112,10 @@
|
|
112
112
|
#
|
113
113
|
# These return the name of the minimizer <tt>self</tt>.
|
114
114
|
#
|
115
|
-
# ==
|
116
|
-
# You must provide a parametric function of <tt>n</tt> variables for the minimizers to
|
117
|
-
# operate on. You may also need to provide a routine which calculates the gradient of the
|
118
|
-
# function. In order to allow for general parameters the functions are defined by the
|
115
|
+
# == Providing a function to minimize
|
116
|
+
# You must provide a parametric function of <tt>n</tt> variables for the minimizers to
|
117
|
+
# operate on. You may also need to provide a routine which calculates the gradient of the
|
118
|
+
# function. In order to allow for general parameters the functions are defined by the
|
119
119
|
# classes, <tt>GSL::MultiMin::Function_fdf</tt> and <tt>GSL::MultiMin::Function</tt>.
|
120
120
|
#
|
121
121
|
# ---
|
@@ -166,14 +166,14 @@
|
|
166
166
|
# my_func = Function.alloc(my_f, np)
|
167
167
|
# my_func.set_params([1.0, 2.0]) # parameters
|
168
168
|
#
|
169
|
-
# ==
|
169
|
+
# == Iteration
|
170
170
|
# ---
|
171
171
|
# * GSL::MultiMin::FdfMinimizer#iterate
|
172
172
|
# * GSL::MultiMin::FMinimizer#iterate
|
173
173
|
#
|
174
|
-
# These methods perform a single iteration of the minimizer <tt>self</tt>.
|
174
|
+
# These methods perform a single iteration of the minimizer <tt>self</tt>.
|
175
175
|
# If the iteration encounters an unexpected problem then an error code will be returned.
|
176
|
-
# The minimizer maintains a current best estimate of the minimum at all times.
|
176
|
+
# The minimizer maintains a current best estimate of the minimum at all times.
|
177
177
|
# This information can be accessed with the following methods,
|
178
178
|
#
|
179
179
|
# ---
|
@@ -184,48 +184,48 @@
|
|
184
184
|
# * GSL::MultiMin::FMinimizer#minimum
|
185
185
|
# * GSL::MultiMin::FMinimizer#size
|
186
186
|
#
|
187
|
-
# These method return the current best estimate of the location of the minimum,
|
188
|
-
# the value of the function at that point, its gradient, and minimizer specific
|
187
|
+
# These method return the current best estimate of the location of the minimum,
|
188
|
+
# the value of the function at that point, its gradient, and minimizer specific
|
189
189
|
# characteristic size for the minimizer <tt>self</tt>.
|
190
190
|
#
|
191
191
|
# ---
|
192
192
|
# * GSL::MultiMin::FdfMinimizer#restart
|
193
193
|
#
|
194
|
-
# This method resets the minimizer <tt>self</tt> to use the current point as a new
|
194
|
+
# This method resets the minimizer <tt>self</tt> to use the current point as a new
|
195
195
|
# starting point.
|
196
196
|
#
|
197
|
-
# ==
|
197
|
+
# == Stopping Criteria
|
198
198
|
# A minimization procedure should stop when one of the following conditions is true:
|
199
199
|
# * A minimum has been found to within the user-specified precision.
|
200
200
|
# * A user-specified maximum number of iterations has been reached.
|
201
201
|
# * An error has occurred.
|
202
|
-
# The handling of these conditions is under user control. The methods below allow the
|
202
|
+
# The handling of these conditions is under user control. The methods below allow the
|
203
203
|
# user to test the precision of the current result.
|
204
204
|
#
|
205
205
|
# ---
|
206
206
|
# * GSL::MultiMin::FdfMinimizer#test_gradient(epsabs)
|
207
207
|
# * GSL::MultiMin::FdfMinimizer.test_gradient(g, epsabs)
|
208
208
|
#
|
209
|
-
# These method test the norm of the gradient <tt>g</tt> against the absolute tolerance
|
210
|
-
# <tt>epsabs</tt>. The gradient of a multidimensional function goes to zero at a minimum.
|
209
|
+
# These method test the norm of the gradient <tt>g</tt> against the absolute tolerance
|
210
|
+
# <tt>epsabs</tt>. The gradient of a multidimensional function goes to zero at a minimum.
|
211
211
|
# The tests return <tt>GSL::SUCCESS</tt> if the following condition is achieved,
|
212
212
|
# |g| < epsabs
|
213
|
-
# and returns <tt>GSL::CONTINUE</tt> otherwise. A suitable choice of <tt>epsabs</tt> can
|
214
|
-
# be made from the desired accuracy in the function for small variations in <tt>x</tt>.
|
213
|
+
# and returns <tt>GSL::CONTINUE</tt> otherwise. A suitable choice of <tt>epsabs</tt> can
|
214
|
+
# be made from the desired accuracy in the function for small variations in <tt>x</tt>.
|
215
215
|
# The relationship between these quantities is given by <tt>\delta f = g \delta x</tt>.
|
216
216
|
#
|
217
217
|
# ---
|
218
218
|
# * GSL::MultiMin::FdfMinimizer#test_size(epsabs)
|
219
219
|
# * GSL::MultiMin::FdfMinimizer.test_size(size, epsabs)
|
220
220
|
#
|
221
|
-
# These method test the minimizer specific characteristic <tt>size</tt>
|
222
|
-
# (if applicable to the used minimizer) against absolute tolerance <tt>epsabs</tt>.
|
223
|
-
# The tests return (<tt>GSL::SUCCESS</tt> if the size is smaller than tolerance,
|
221
|
+
# These method test the minimizer specific characteristic <tt>size</tt>
|
222
|
+
# (if applicable to the used minimizer) against absolute tolerance <tt>epsabs</tt>.
|
223
|
+
# The tests return (<tt>GSL::SUCCESS</tt> if the size is smaller than tolerance,
|
224
224
|
# otherwise <tt>GSL::CONTINUE</tt> is returned.
|
225
225
|
#
|
226
|
-
# ==
|
226
|
+
# == Examples
|
227
227
|
#
|
228
|
-
# ===
|
228
|
+
# === FdfMinimizer
|
229
229
|
# #!/usr/bin/env ruby
|
230
230
|
# require("gsl")
|
231
231
|
# include GSL::MultiMin
|
@@ -264,13 +264,13 @@
|
|
264
264
|
# printf("%5d %.5f %.5f %10.5f\n", iter, x[0], x[1], f)
|
265
265
|
# end while status == GSL::CONTINUE and iter < 100
|
266
266
|
#
|
267
|
-
# ===
|
267
|
+
# === FMinimizer
|
268
268
|
# #!/usr/bin/env ruby
|
269
269
|
# require("gsl")
|
270
270
|
# include GSL::MultiMin
|
271
271
|
#
|
272
272
|
# np = 2
|
273
|
-
#
|
273
|
+
#
|
274
274
|
# my_f = Proc.new { |v, params|
|
275
275
|
# x = v[0]; y = v[1]
|
276
276
|
# p0 = params[0]; p1 = params[1]
|
@@ -303,10 +303,10 @@
|
|
303
303
|
# printf("f() = %7.3f size = %.3f\n", minimizer.fval, minimizer.size);
|
304
304
|
# end while status == GSL::CONTINUE and iter < 100
|
305
305
|
#
|
306
|
-
# {prev}[link:
|
307
|
-
# {next}[link:
|
306
|
+
# {prev}[link:multiroot_rdoc.html]
|
307
|
+
# {next}[link:fit_rdoc.html]
|
308
308
|
#
|
309
|
-
# {Reference index}[link:
|
309
|
+
# {Reference index}[link:ref_rdoc.html]
|
310
310
|
# {top}[link:index.html]
|
311
311
|
#
|
312
312
|
#
|