rb-gsl 1.16.0.2 → 1.16.0.3.rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ChangeLog +5 -0
- data/README +2 -2
- data/Rakefile +2 -3
- data/lib/gsl/version.rb +1 -1
- data/rdoc/alf.rdoc +5 -5
- data/rdoc/blas.rdoc +8 -8
- data/rdoc/bspline.rdoc +16 -16
- data/rdoc/changes.rdoc +4 -9
- data/rdoc/cheb.rdoc +24 -24
- data/rdoc/cholesky_complex.rdoc +21 -21
- data/rdoc/combi.rdoc +36 -36
- data/rdoc/complex.rdoc +21 -21
- data/rdoc/const.rdoc +46 -46
- data/rdoc/dht.rdoc +48 -48
- data/rdoc/diff.rdoc +41 -41
- data/rdoc/ehandling.rdoc +5 -5
- data/rdoc/eigen.rdoc +152 -152
- data/rdoc/fft.rdoc +145 -145
- data/rdoc/fit.rdoc +108 -108
- data/rdoc/function.rdoc +10 -10
- data/rdoc/graph.rdoc +16 -16
- data/rdoc/hist.rdoc +102 -102
- data/rdoc/hist2d.rdoc +41 -41
- data/rdoc/hist3d.rdoc +8 -8
- data/rdoc/index.rdoc +18 -21
- data/rdoc/integration.rdoc +109 -109
- data/rdoc/interp.rdoc +70 -70
- data/rdoc/intro.rdoc +6 -6
- data/rdoc/linalg.rdoc +187 -187
- data/rdoc/linalg_complex.rdoc +1 -1
- data/rdoc/math.rdoc +57 -57
- data/rdoc/matrix.rdoc +272 -272
- data/rdoc/min.rdoc +56 -56
- data/rdoc/monte.rdoc +21 -21
- data/rdoc/multimin.rdoc +94 -94
- data/rdoc/multiroot.rdoc +79 -79
- data/rdoc/narray.rdoc +31 -31
- data/rdoc/ndlinear.rdoc +53 -53
- data/rdoc/nonlinearfit.rdoc +99 -99
- data/rdoc/ntuple.rdoc +30 -30
- data/rdoc/odeiv.rdoc +87 -87
- data/rdoc/perm.rdoc +89 -89
- data/rdoc/poly.rdoc +65 -65
- data/rdoc/qrng.rdoc +20 -20
- data/rdoc/randist.rdoc +81 -81
- data/rdoc/ref.rdoc +56 -56
- data/rdoc/rng.rdoc +84 -84
- data/rdoc/roots.rdoc +56 -56
- data/rdoc/sf.rdoc +427 -427
- data/rdoc/siman.rdoc +18 -18
- data/rdoc/sort.rdoc +29 -29
- data/rdoc/start.rdoc +8 -8
- data/rdoc/stats.rdoc +51 -51
- data/rdoc/sum.rdoc +11 -11
- data/rdoc/tensor.rdoc +30 -30
- data/rdoc/tut.rdoc +1 -1
- data/rdoc/use.rdoc +37 -37
- data/rdoc/vector.rdoc +187 -187
- data/rdoc/vector_complex.rdoc +23 -23
- data/rdoc/wavelet.rdoc +46 -46
- metadata +17 -20
- data/rdoc/rngextra.rdoc +0 -11
- data/rdoc/screenshot.rdoc +0 -40
data/rdoc/fit.rdoc
CHANGED
@@ -1,92 +1,92 @@
|
|
1
1
|
#
|
2
2
|
# = Least-Squares Fitting
|
3
|
-
# This chapter describes routines for performing least squares fits to
|
4
|
-
# experimental data using linear combinations of functions. The data may be
|
5
|
-
# weighted or unweighted, i.e. with known or unknown errors. For weighted data
|
6
|
-
# the functions compute the best fit parameters and their associated covariance
|
7
|
-
# matrix. For unweighted data the covariance matrix is estimated from the
|
8
|
-
# scatter of the points, giving a variance-covariance matrix.
|
3
|
+
# This chapter describes routines for performing least squares fits to
|
4
|
+
# experimental data using linear combinations of functions. The data may be
|
5
|
+
# weighted or unweighted, i.e. with known or unknown errors. For weighted data
|
6
|
+
# the functions compute the best fit parameters and their associated covariance
|
7
|
+
# matrix. For unweighted data the covariance matrix is estimated from the
|
8
|
+
# scatter of the points, giving a variance-covariance matrix.
|
9
9
|
#
|
10
|
-
# The functions are divided into separate versions for simple one- or
|
11
|
-
# two-parameter regression and multiple-parameter fits.
|
10
|
+
# The functions are divided into separate versions for simple one- or
|
11
|
+
# two-parameter regression and multiple-parameter fits.
|
12
12
|
#
|
13
13
|
# Contents:
|
14
|
-
# 1. {Overview}[link:
|
15
|
-
# 1. {Linear regression}[link:
|
16
|
-
# 1. {Module functions for linear regression}[link:
|
17
|
-
# 1. {Linear fitting without a constant term}[link:
|
18
|
-
# 1. {Multi-parameter fitting}[link:
|
19
|
-
# 1. {GSL::MultiFit::Workspace class}[link:
|
20
|
-
# 1. {Module functions}[link:
|
21
|
-
# 1. {Higer level interface}[link:
|
22
|
-
# 1. {NDLINEAR: multi-linear, multi-parameter least squares fitting}[link:
|
23
|
-
# 1. {Examples}[link:
|
24
|
-
# 1. {Linear regression}[link:
|
25
|
-
# 1. {Exponential fitting}[link:
|
26
|
-
# 1. {Multi-parameter fitting}[link:
|
27
|
-
#
|
28
|
-
# ==
|
29
|
-
# Least-squares fits are found by minimizing \chi^2 (chi-squared), the weighted
|
30
|
-
# sum of squared residuals over n experimental datapoints (x_i, y_i) for the
|
14
|
+
# 1. {Overview}[link:fit_rdoc.html#label-Overview]
|
15
|
+
# 1. {Linear regression}[link:fit_rdoc.html#label-Linear+regression]
|
16
|
+
# 1. {Module functions for linear regression}[link:fit_rdoc.html#label-Module+functions+for+linear+regression]
|
17
|
+
# 1. {Linear fitting without a constant term}[link:fit_rdoc.html#label-Linear+fitting+without+a+constant+term]
|
18
|
+
# 1. {Multi-parameter fitting}[link:fit_rdoc.html#label-Multi-parameter+fitting]
|
19
|
+
# 1. {GSL::MultiFit::Workspace class}[link:fit_rdoc.html#label-Workspace+class]
|
20
|
+
# 1. {Module functions}[link:fit_rdoc.html#label-Module+functions]
|
21
|
+
# 1. {Higer level interface}[link:fit_rdoc.html#label-Higer+level+interface]
|
22
|
+
# 1. {NDLINEAR: multi-linear, multi-parameter least squares fitting}[link:ndlinear_rdoc.html] (GSL extension)
|
23
|
+
# 1. {Examples}[link:fit_rdoc.html#label-Examples]
|
24
|
+
# 1. {Linear regression}[link:fit_rdoc.html#label-Linear+regression]
|
25
|
+
# 1. {Exponential fitting}[link:fit_rdoc.html#label-Exponential+fitting]
|
26
|
+
# 1. {Multi-parameter fitting}[link:fit_rdoc.html#label-Multi-parameter+fitting]
|
27
|
+
#
|
28
|
+
# == Overview
|
29
|
+
# Least-squares fits are found by minimizing \chi^2 (chi-squared), the weighted
|
30
|
+
# sum of squared residuals over n experimental datapoints (x_i, y_i) for the
|
31
31
|
# model Y(c,x), The p parameters of the model are c = {c_0, c_1, ...}. The
|
32
32
|
# weight factors w_i are given by w_i = 1/\sigma_i^2, where \sigma_i is the
|
33
33
|
# experimental error on the data-point y_i. The errors are assumed to be
|
34
34
|
# gaussian and uncorrelated. For unweighted data the chi-squared sum is
|
35
35
|
# computed without any weight factors.
|
36
36
|
#
|
37
|
-
# The fitting routines return the best-fit parameters c and their p \times p
|
38
|
-
# covariance matrix. The covariance matrix measures the statistical errors on
|
39
|
-
# the best-fit parameters resulting from the errors on the data, \sigma_i, and
|
40
|
-
# is defined as C_{ab} = <\delta c_a \delta c_b> where < > denotes an average
|
41
|
-
# over the gaussian error distributions of the underlying datapoints.
|
37
|
+
# The fitting routines return the best-fit parameters c and their p \times p
|
38
|
+
# covariance matrix. The covariance matrix measures the statistical errors on
|
39
|
+
# the best-fit parameters resulting from the errors on the data, \sigma_i, and
|
40
|
+
# is defined as C_{ab} = <\delta c_a \delta c_b> where < > denotes an average
|
41
|
+
# over the gaussian error distributions of the underlying datapoints.
|
42
42
|
#
|
43
|
-
# The covariance matrix is calculated by error propagation from the data errors
|
44
|
-
# \sigma_i. The change in a fitted parameter \delta c_a caused by a small change
|
43
|
+
# The covariance matrix is calculated by error propagation from the data errors
|
44
|
+
# \sigma_i. The change in a fitted parameter \delta c_a caused by a small change
|
45
45
|
# in the data \delta y_i is given by allowing the covariance matrix to be written
|
46
|
-
# in terms of the errors on the data, For uncorrelated data the fluctuations of
|
47
|
-
# the underlying datapoints satisfy
|
48
|
-
# <\delta y_i \delta y_j> = \sigma_i^2 \delta_{ij}, giving a corresponding
|
49
|
-
# parameter covariance matrix of When computing the covariance matrix for
|
50
|
-
# unweighted data, i.e. data with unknown errors, the weight factors w_i in this
|
51
|
-
# sum are replaced by the single estimate w = 1/\sigma^2, where \sigma^2 is the
|
52
|
-
# computed variance of the residuals about the
|
53
|
-
# best-fit model, \sigma^2 = \sum (y_i - Y(c,x_i))^2 / (n-p).
|
54
|
-
# This is referred to as the variance-covariance matrix.
|
55
|
-
#
|
56
|
-
# The standard deviations of the best-fit parameters are given by the square
|
57
|
-
# root of the corresponding diagonal elements of the covariance matrix,
|
58
|
-
# \sigma_{c_a} = \sqrt{C_{aa}}. The correlation coefficient of the fit
|
59
|
-
# parameters c_a and c_b is given by \rho_{ab} = C_{ab} / \sqrt{C_{aa} C_{bb}}.
|
60
|
-
#
|
61
|
-
#
|
62
|
-
# ==
|
63
|
-
# The functions described in this section can be used to perform least-squares
|
64
|
-
# fits to a straight line model, Y = c_0 + c_1 X. For weighted data the best-fit
|
46
|
+
# in terms of the errors on the data, For uncorrelated data the fluctuations of
|
47
|
+
# the underlying datapoints satisfy
|
48
|
+
# <\delta y_i \delta y_j> = \sigma_i^2 \delta_{ij}, giving a corresponding
|
49
|
+
# parameter covariance matrix of When computing the covariance matrix for
|
50
|
+
# unweighted data, i.e. data with unknown errors, the weight factors w_i in this
|
51
|
+
# sum are replaced by the single estimate w = 1/\sigma^2, where \sigma^2 is the
|
52
|
+
# computed variance of the residuals about the
|
53
|
+
# best-fit model, \sigma^2 = \sum (y_i - Y(c,x_i))^2 / (n-p).
|
54
|
+
# This is referred to as the variance-covariance matrix.
|
55
|
+
#
|
56
|
+
# The standard deviations of the best-fit parameters are given by the square
|
57
|
+
# root of the corresponding diagonal elements of the covariance matrix,
|
58
|
+
# \sigma_{c_a} = \sqrt{C_{aa}}. The correlation coefficient of the fit
|
59
|
+
# parameters c_a and c_b is given by \rho_{ab} = C_{ab} / \sqrt{C_{aa} C_{bb}}.
|
60
|
+
#
|
61
|
+
#
|
62
|
+
# == Linear regression
|
63
|
+
# The functions described in this section can be used to perform least-squares
|
64
|
+
# fits to a straight line model, Y = c_0 + c_1 X. For weighted data the best-fit
|
65
65
|
# is found by minimizing the weighted sum of squared residuals, chi^2,
|
66
66
|
#
|
67
67
|
# chi^2 = sum_i w_i (y_i - (c0 + c1 x_i))^2
|
68
68
|
#
|
69
|
-
# for the parameters <tt>c0, c1</tt>. For unweighted data the sum is computed with
|
69
|
+
# for the parameters <tt>c0, c1</tt>. For unweighted data the sum is computed with
|
70
70
|
# <tt>w_i = 1</tt>.
|
71
71
|
#
|
72
|
-
# ===
|
72
|
+
# === Module functions for linear regression
|
73
73
|
# ---
|
74
74
|
# * GSL::Fit::linear(x, y)
|
75
75
|
#
|
76
|
-
# This function computes the best-fit linear regression coefficients (c0,c1)
|
77
|
-
# of the model Y = c0 + c1 X for the datasets <tt>(x, y)</tt>, two vectors of
|
78
|
-
# equal length with stride 1. This returns an array of 7 elements,
|
76
|
+
# This function computes the best-fit linear regression coefficients (c0,c1)
|
77
|
+
# of the model Y = c0 + c1 X for the datasets <tt>(x, y)</tt>, two vectors of
|
78
|
+
# equal length with stride 1. This returns an array of 7 elements,
|
79
79
|
# <tt>[c0, c1, cov00, cov01, cov11, chisq, status]</tt>, where <tt>c0, c1</tt> are the
|
80
|
-
# estimated parameters, <tt>cov00, cov01, cov11</tt> are the variance-covariance
|
80
|
+
# estimated parameters, <tt>cov00, cov01, cov11</tt> are the variance-covariance
|
81
81
|
# matrix elements, <tt>chisq</tt> is the sum of squares of the residuals, and
|
82
82
|
# <tt>status</tt> is the return code from the GSL function <tt>gsl_fit_linear()</tt>.
|
83
83
|
#
|
84
84
|
# ---
|
85
85
|
# * GSL::Fit::wlinear(x, w, y)
|
86
86
|
#
|
87
|
-
# This function computes the best-fit linear regression coefficients (c0,c1)
|
88
|
-
# of the model Y = c_0 + c_1 X for the weighted datasets <tt>(x, y)</tt>.
|
89
|
-
# The vector <tt>w</tt>, specifies the weight of each datapoint, which is the
|
87
|
+
# This function computes the best-fit linear regression coefficients (c0,c1)
|
88
|
+
# of the model Y = c_0 + c_1 X for the weighted datasets <tt>(x, y)</tt>.
|
89
|
+
# The vector <tt>w</tt>, specifies the weight of each datapoint, which is the
|
90
90
|
# reciprocal of the variance for each datapoint in <tt>y</tt>. This returns an
|
91
91
|
# array of 7 elements, same as the method <tt>linear</tt>.
|
92
92
|
#
|
@@ -94,82 +94,82 @@
|
|
94
94
|
# * GSL::Fit::linear_est(x, c0, c1, c00, c01, c11)
|
95
95
|
# * GSL::Fit::linear_est(x, [c0, c1, c00, c01, c11])
|
96
96
|
#
|
97
|
-
# This function uses the best-fit linear regression coefficients <tt>c0,c1</tt> and
|
98
|
-
# their estimated covariance <tt>cov00,cov01,cov11</tt> to compute the fitted function
|
97
|
+
# This function uses the best-fit linear regression coefficients <tt>c0,c1</tt> and
|
98
|
+
# their estimated covariance <tt>cov00,cov01,cov11</tt> to compute the fitted function
|
99
99
|
# and its standard deviation for the model Y = c_0 + c_1 X at the point <tt>x</tt>.
|
100
100
|
# The returned value is an array of <tt>[y, yerr]</tt>.
|
101
101
|
#
|
102
|
-
# ==
|
102
|
+
# == Linear fitting without a constant term
|
103
103
|
# ---
|
104
104
|
# * GSL::Fit::mul(x, y)
|
105
105
|
#
|
106
|
-
# This function computes the best-fit linear regression coefficient <tt>c1</tt>
|
107
|
-
# of the model Y = c1 X for the datasets <tt>(x, y)</tt>, two vectors of
|
108
|
-
# equal length with stride 1. This returns an array of 4 elements,
|
106
|
+
# This function computes the best-fit linear regression coefficient <tt>c1</tt>
|
107
|
+
# of the model Y = c1 X for the datasets <tt>(x, y)</tt>, two vectors of
|
108
|
+
# equal length with stride 1. This returns an array of 4 elements,
|
109
109
|
# <tt>[c1, cov11, chisq, status]</tt>.
|
110
110
|
#
|
111
111
|
# ---
|
112
112
|
# * GSL::Fit::wmul(x, w, y)
|
113
113
|
#
|
114
|
-
# This function computes the best-fit linear regression coefficient <tt>c1</tt>
|
115
|
-
# of the model Y = c_1 X for the weighted datasets <tt>(x, y)</tt>. The vector
|
116
|
-
# <tt>w</tt> specifies the weight of each datapoint. The weight is the reciprocal
|
114
|
+
# This function computes the best-fit linear regression coefficient <tt>c1</tt>
|
115
|
+
# of the model Y = c_1 X for the weighted datasets <tt>(x, y)</tt>. The vector
|
116
|
+
# <tt>w</tt> specifies the weight of each datapoint. The weight is the reciprocal
|
117
117
|
# of the variance for each datapoint in <tt>y</tt>.
|
118
118
|
#
|
119
119
|
# ---
|
120
120
|
# * GSL::Fit::mul_est(x, c1, c11)
|
121
121
|
# * GSL::Fit::mul_est(x, [c1, c11])
|
122
122
|
#
|
123
|
-
# This function uses the best-fit linear regression coefficient <tt>c1</tt>
|
124
|
-
# and its estimated covariance <tt>cov11</tt> to compute the fitted function
|
125
|
-
# <tt>y</tt> and its standard deviation <tt>y_err</tt>
|
126
|
-
# for the model Y = c_1 X at the point <tt>x</tt>.
|
123
|
+
# This function uses the best-fit linear regression coefficient <tt>c1</tt>
|
124
|
+
# and its estimated covariance <tt>cov11</tt> to compute the fitted function
|
125
|
+
# <tt>y</tt> and its standard deviation <tt>y_err</tt>
|
126
|
+
# for the model Y = c_1 X at the point <tt>x</tt>.
|
127
127
|
# The returned value is an array of <tt>[y, yerr]</tt>.
|
128
128
|
#
|
129
|
-
# ==
|
130
|
-
# ===
|
129
|
+
# == Multi-parameter fitting
|
130
|
+
# === Workspace class
|
131
131
|
# ---
|
132
132
|
# * GSL::MultiFit::Workspace.alloc(n, p)
|
133
133
|
#
|
134
|
-
# This creates a workspace for fitting a model to <tt>n</tt>
|
134
|
+
# This creates a workspace for fitting a model to <tt>n</tt>
|
135
135
|
# observations using <tt>p</tt> parameters.
|
136
136
|
#
|
137
|
-
# ===
|
137
|
+
# === Module functions
|
138
138
|
# ---
|
139
139
|
# * GSL::MultiFit::linear(X, y, work)
|
140
140
|
# * GSL::MultiFit::linear(X, y)
|
141
141
|
#
|
142
|
-
# This function computes the best-fit parameters <tt>c</tt> of the model <tt>y = X c</tt>
|
143
|
-
# for the observations <tt>y</tt> and the matrix of predictor variables <tt>X</tt>.
|
144
|
-
# The variance-covariance matrix of the model parameters <tt>cov</tt> is estimated
|
145
|
-
# from the scatter of the observations about the best-fit. The sum of squares
|
142
|
+
# This function computes the best-fit parameters <tt>c</tt> of the model <tt>y = X c</tt>
|
143
|
+
# for the observations <tt>y</tt> and the matrix of predictor variables <tt>X</tt>.
|
144
|
+
# The variance-covariance matrix of the model parameters <tt>cov</tt> is estimated
|
145
|
+
# from the scatter of the observations about the best-fit. The sum of squares
|
146
146
|
# of the residuals from the best-fit is also calculated. The returned value is
|
147
147
|
# an array of 4 elements, <tt>[c, cov, chisq, status]</tt>, where <tt>c</tt> is a
|
148
|
-
# {GSL::Vector}[link:
|
149
|
-
# and <tt>cov</tt> is the variance-covariance matrix as a
|
150
|
-
# {GSL::Matrix}[link:
|
148
|
+
# {GSL::Vector}[link:vector_rdoc.html] object which contains the best-fit parameters,
|
149
|
+
# and <tt>cov</tt> is the variance-covariance matrix as a
|
150
|
+
# {GSL::Matrix}[link:matrix_rdoc.html] object.
|
151
151
|
#
|
152
|
-
# The best-fit is found by singular value decomposition of the matrix <tt>X</tt>
|
152
|
+
# The best-fit is found by singular value decomposition of the matrix <tt>X</tt>
|
153
153
|
# using the workspace provided in <tt>work</tt> (optional, if not given, it is allocated
|
154
|
-
# internally).
|
155
|
-
# The modified Golub-Reinsch SVD algorithm is used, with column scaling to improve
|
156
|
-
# the accuracy of the singular values. Any components which have zero singular
|
154
|
+
# internally).
|
155
|
+
# The modified Golub-Reinsch SVD algorithm is used, with column scaling to improve
|
156
|
+
# the accuracy of the singular values. Any components which have zero singular
|
157
157
|
# value (to machine precision) are discarded from the fit.
|
158
158
|
#
|
159
159
|
# ---
|
160
160
|
# * GSL::MultiFit::wlinear(X, w, y, work)
|
161
161
|
# * GSL::MultiFit::wlinear(X, w, y)
|
162
162
|
#
|
163
|
-
# This function computes the best-fit parameters <tt>c</tt> of the model
|
164
|
-
# <tt>y = X c</tt> for the observations <tt>y</tt> and the matrix of predictor
|
165
|
-
# variables <tt>X</tt>. The covariance matrix of the model parameters
|
163
|
+
# This function computes the best-fit parameters <tt>c</tt> of the model
|
164
|
+
# <tt>y = X c</tt> for the observations <tt>y</tt> and the matrix of predictor
|
165
|
+
# variables <tt>X</tt>. The covariance matrix of the model parameters
|
166
166
|
# <tt>cov</tt> is estimated from the weighted data. The weighted sum of
|
167
|
-
# squares of the residuals from the best-fit is also calculated.
|
168
|
-
# The returned value is an array of 4 elements,
|
167
|
+
# squares of the residuals from the best-fit is also calculated.
|
168
|
+
# The returned value is an array of 4 elements,
|
169
169
|
# <tt>[c: Vector, cov: Matrix, chisq: Float, status: Fixnum]</tt>.
|
170
|
-
# The best-fit is found by singular value decomposition of the matrix <tt>X</tt>
|
171
|
-
# using the workspace provided in <tt>work</tt> (optional). Any components
|
172
|
-
# which have
|
170
|
+
# The best-fit is found by singular value decomposition of the matrix <tt>X</tt>
|
171
|
+
# using the workspace provided in <tt>work</tt> (optional). Any components
|
172
|
+
# which have
|
173
173
|
# zero singular value (to machine precision) are discarded from the fit.
|
174
174
|
#
|
175
175
|
# ---
|
@@ -181,12 +181,12 @@
|
|
181
181
|
#
|
182
182
|
# (GSL-1.11 or later) This method computes the vector of residuals <tt>r = y - X c</tt> for the observations <tt>y</tt>, coefficients <tt>c</tt> and matrix of predictor variables <tt>X</tt>, and returns <tt>r</tt>.
|
183
183
|
#
|
184
|
-
# ===
|
184
|
+
# === Higer level interface
|
185
185
|
#
|
186
186
|
# ---
|
187
187
|
# * GSL::MultiFit::polyfit(x, y, order)
|
188
188
|
#
|
189
|
-
# Finds the coefficient of a polynomial of order <tt>order</tt>
|
189
|
+
# Finds the coefficient of a polynomial of order <tt>order</tt>
|
190
190
|
# that fits the vector data (<tt>x, y</tt>) in a least-square sense.
|
191
191
|
#
|
192
192
|
# Example:
|
@@ -196,13 +196,13 @@
|
|
196
196
|
# x = Vector[1, 2, 3, 4, 5]
|
197
197
|
# y = Vector[5.5, 43.1, 128, 290.7, 498.4]
|
198
198
|
# # The results are stored in a polynomial "coef"
|
199
|
-
# coef, err, chisq, status = MultiFit.polyfit(x, y, 3)
|
199
|
+
# coef, err, chisq, status = MultiFit.polyfit(x, y, 3)
|
200
200
|
#
|
201
201
|
# x2 = Vector.linspace(1, 5, 20)
|
202
202
|
# graph([x, y], [x2, coef.eval(x2)], "-C -g 3 -S 4")
|
203
203
|
#
|
204
|
-
# ==
|
205
|
-
# ===
|
204
|
+
# == Examples
|
205
|
+
# === Linear regression
|
206
206
|
# #!/usr/bin/env ruby
|
207
207
|
# require("gsl")
|
208
208
|
# include GSL::Fit
|
@@ -220,11 +220,11 @@
|
|
220
220
|
#
|
221
221
|
# printf("# best fit: Y = %g + %g X\n", c0, c1);
|
222
222
|
# printf("# covariance matrix:\n");
|
223
|
-
# printf("# [ %g, %g\n# %g, %g]\n",
|
223
|
+
# printf("# [ %g, %g\n# %g, %g]\n",
|
224
224
|
# cov00, cov01, cov01, cov11);
|
225
225
|
# printf("# chisq = %g\n", chisq);
|
226
226
|
#
|
227
|
-
# ===
|
227
|
+
# === Exponential fitting
|
228
228
|
# #!/usr/bin/env ruby
|
229
229
|
# require("gsl")
|
230
230
|
#
|
@@ -245,13 +245,13 @@
|
|
245
245
|
# printf("Result: a = %f, b = %f\n", A, b2)
|
246
246
|
# graph([x, y], [x2, A*Sf::exp(b2*x2)], "-C -g 3 -S 4")
|
247
247
|
#
|
248
|
-
# ===
|
248
|
+
# === Multi-parameter fitting
|
249
249
|
# #!/usr/bin/env ruby
|
250
250
|
# require("gsl")
|
251
251
|
# include GSL::MultiFit
|
252
252
|
#
|
253
253
|
# Rng.env_setup()
|
254
|
-
#
|
254
|
+
#
|
255
255
|
# r = GSL::Rng.alloc(Rng::DEFAULT)
|
256
256
|
# n = 19
|
257
257
|
# dim = 3
|
@@ -275,10 +275,10 @@
|
|
275
275
|
#
|
276
276
|
# c, cov, chisq, status = MultiFit.wlinear(X, w, y)
|
277
277
|
#
|
278
|
-
# {prev}[link:
|
279
|
-
# {next}[link:
|
278
|
+
# {prev}[link:multimin_rdoc.html]
|
279
|
+
# {next}[link:nonlinearfit_rdoc.html]
|
280
280
|
#
|
281
|
-
# {Reference index}[link:
|
281
|
+
# {Reference index}[link:ref_rdoc.html]
|
282
282
|
# {top}[link:index.html]
|
283
283
|
#
|
284
284
|
#
|
data/rdoc/function.rdoc
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
#
|
2
2
|
# = GSL::Function class
|
3
3
|
#
|
4
|
-
# ==
|
4
|
+
# == Class Methods
|
5
5
|
#
|
6
6
|
# ---
|
7
7
|
# * GSL::Function.alloc
|
8
8
|
#
|
9
9
|
# Constructor.
|
10
|
-
#
|
10
|
+
#
|
11
11
|
# * ex:
|
12
12
|
# require("gsl")
|
13
13
|
# f = GSL::Function.alloc { |x| sin(x) }
|
@@ -18,22 +18,22 @@
|
|
18
18
|
#
|
19
19
|
# The function can have parameters of arbitrary numbers. Here is an
|
20
20
|
# example in case of exponential function <tt>f(x; a, b) = a*exp(-b*x)</tt>.
|
21
|
-
#
|
21
|
+
#
|
22
22
|
# f = GSL::Function.alloc { |x, params| # x: a scalar, params: an array
|
23
23
|
# a = params[0]; b = params[1]
|
24
24
|
# a*exp(-b*x)
|
25
25
|
# }
|
26
|
-
# To evaluate the function <tt>f(x) = 2*exp(-3*x)</tt>,
|
26
|
+
# To evaluate the function <tt>f(x) = 2*exp(-3*x)</tt>,
|
27
27
|
# f.set_params([2, 3])
|
28
28
|
# f.eval(x)
|
29
29
|
#
|
30
|
-
# ==
|
30
|
+
# == Methods
|
31
31
|
#
|
32
32
|
# ---
|
33
33
|
# * GSL::Function#eval(x)
|
34
34
|
# * GSL::Function#call(x)
|
35
35
|
# * GSL::Function#at(x)
|
36
|
-
# * GSL::Function#[x]
|
36
|
+
# * \GSL::Function#[x]
|
37
37
|
#
|
38
38
|
# These methods return a value of the function at <tt>x</tt>.
|
39
39
|
# p f.eval(2.5)
|
@@ -57,7 +57,7 @@
|
|
57
57
|
#
|
58
58
|
# This set the constant parameters of the function.
|
59
59
|
#
|
60
|
-
# ==
|
60
|
+
# == Graph
|
61
61
|
# ---
|
62
62
|
# * GSL::Function#graph(x[, options])
|
63
63
|
#
|
@@ -70,12 +70,12 @@
|
|
70
70
|
# f.graph(x, "-T X -g 3 -C -L 'sin(x)'")
|
71
71
|
#
|
72
72
|
#
|
73
|
-
# ==
|
73
|
+
# == Example
|
74
74
|
# A quadratic function, f(x) = x^2 + 2x + 3.
|
75
75
|
#
|
76
76
|
# >> require("gsl")
|
77
77
|
# => true
|
78
|
-
# >> f = Function.alloc { |x, param| x*x + param[0]*x + param[1] }
|
78
|
+
# >> f = Function.alloc { |x, param| x*x + param[0]*x + param[1] }
|
79
79
|
# => #<GSL::Function:0x6e8eb0>
|
80
80
|
# >> f.set_params(2, 3)
|
81
81
|
# => #<GSL::Function:0x6e8eb0>
|
@@ -86,7 +86,7 @@
|
|
86
86
|
# >> f.eval([1, 2, 3]) <--- Array
|
87
87
|
# => [6.0, 11.0, 18.0]
|
88
88
|
# >> f.eval(Matrix.alloc([1, 2], [3, 4])) <--- GSL::Matrix
|
89
|
-
# [ 6.000e+00 1.100e+01
|
89
|
+
# [ 6.000e+00 1.100e+01
|
90
90
|
# 1.800e+01 2.700e+01 ]
|
91
91
|
# => #<GSL::Matrix:0x6dd1b4>
|
92
92
|
#
|
data/rdoc/graph.rdoc
CHANGED
@@ -2,18 +2,18 @@
|
|
2
2
|
# = Graphics
|
3
3
|
#
|
4
4
|
# The GSL library itself does not include any utilities to visualize computation results.
|
5
|
-
# Some examples found in the GSL manual use
|
6
|
-
# {GNU graph}[
|
5
|
+
# Some examples found in the GSL manual use
|
6
|
+
# {GNU graph}[https://gnu.org/software/plotutils/plotutils.html]
|
7
7
|
# to show the results: the data are stored in data files, and then
|
8
8
|
# displayed by using <tt>GNU graph</tt>.
|
9
9
|
# Ruby/GSL provides simple interfaces to <tt>GNU graph</tt>
|
10
10
|
# to plot vectors or histograms directly without storing them in data files.
|
11
|
-
# Although the methods described below do not cover all the functionalities
|
12
|
-
# of <tt>GNU graph</tt>, these are useful to check calculations and get some
|
11
|
+
# Although the methods described below do not cover all the functionalities
|
12
|
+
# of <tt>GNU graph</tt>, these are useful to check calculations and get some
|
13
13
|
# speculations on the data.
|
14
14
|
#
|
15
15
|
#
|
16
|
-
# ==
|
16
|
+
# == Plotting vectors
|
17
17
|
# ---
|
18
18
|
# * Vector.graph(y[, options])
|
19
19
|
# * Vector.graph(nil, y[, y2, y3, ..., options])
|
@@ -25,7 +25,7 @@
|
|
25
25
|
# * GSL::graph([x1, y1], [x2, y2], ...., options)
|
26
26
|
#
|
27
27
|
# These methods use the <tt>GNU graph</tt> utility to plot vectors.
|
28
|
-
# The options <tt>options</tt> given by a <tt>String</tt>. If <tt>nil</tt> is
|
28
|
+
# The options <tt>options</tt> given by a <tt>String</tt>. If <tt>nil</tt> is
|
29
29
|
# given for <tt>ARGV[0]</tt>, auto-generated abscissa are used.
|
30
30
|
#
|
31
31
|
# Ex:
|
@@ -44,10 +44,10 @@
|
|
44
44
|
# * GSL::Vector#graph(options)
|
45
45
|
# * GSL::Vector#graph(x[, options])
|
46
46
|
#
|
47
|
-
# These methods plot the vector using the GNU <tt>graph</tt>
|
47
|
+
# These methods plot the vector using the GNU <tt>graph</tt>
|
48
48
|
# command. The options for the <tt>graph</tt> command are given by a <tt>String</tt>.
|
49
49
|
#
|
50
|
-
# Ex1:
|
50
|
+
# Ex1:
|
51
51
|
# >> x = Vector[1..5]
|
52
52
|
# [ 1.000e+00 2.000e+00 3.000e+00 4.000e+00 5.000e+00 ]
|
53
53
|
# >> x.graph("-m 2") # dotted line
|
@@ -60,13 +60,13 @@
|
|
60
60
|
# >> c = Sf::cos(x)
|
61
61
|
# >> c.graph(x, "-T X -C -g 3 -L 'cos(x)'")
|
62
62
|
#
|
63
|
-
# ==
|
63
|
+
# == Drawing histogram
|
64
64
|
# ---
|
65
65
|
# * GSL::Histogram#graph(options)
|
66
66
|
#
|
67
67
|
# This method uses the GNU plotutils <tt>graph</tt> to draw a histogram.
|
68
68
|
#
|
69
|
-
# ==
|
69
|
+
# == Plotting Functions
|
70
70
|
# ---
|
71
71
|
# * GSL::Function#graph(x[, options])
|
72
72
|
#
|
@@ -78,7 +78,7 @@
|
|
78
78
|
# x = Vector.linspace(0, 2*M_PI, 50)
|
79
79
|
# f.graph(x, "-T X -g 3 -C -L 'sin(x)'")
|
80
80
|
#
|
81
|
-
# ==
|
81
|
+
# == Other way
|
82
82
|
# The code below uses <tt>GNUPLOT</tt> directly to plot vectors.
|
83
83
|
#
|
84
84
|
# #!/usr/bin/env ruby
|
@@ -101,14 +101,14 @@
|
|
101
101
|
#
|
102
102
|
# Gnuplot.open do |gp|
|
103
103
|
# Gnuplot::Plot.new( gp ) do |plot|
|
104
|
-
#
|
104
|
+
#
|
105
105
|
# plot.xrange "[0:10]"
|
106
106
|
# plot.yrange "[-1.5:1.5]"
|
107
107
|
# plot.title "Sin Wave Example"
|
108
108
|
# plot.xlabel "x"
|
109
109
|
# plot.ylabel "sin(x)"
|
110
110
|
# plot.pointsize 3
|
111
|
-
# plot.grid
|
111
|
+
# plot.grid
|
112
112
|
#
|
113
113
|
# x = GSL::Vector[0..10]
|
114
114
|
# y = GSL::Sf::sin(x)
|
@@ -119,7 +119,7 @@
|
|
119
119
|
# ds.title = "String function"
|
120
120
|
# ds.linewidth = 4
|
121
121
|
# },
|
122
|
-
#
|
122
|
+
#
|
123
123
|
# Gnuplot::DataSet.new( [x, y] ) { |ds|
|
124
124
|
# ds.with = "linespoints"
|
125
125
|
# ds.title = "Array data"
|
@@ -129,9 +129,9 @@
|
|
129
129
|
# end
|
130
130
|
# end
|
131
131
|
#
|
132
|
-
# {prev}[link:
|
132
|
+
# {prev}[link:const_rdoc.html]
|
133
133
|
#
|
134
|
-
# {Reference index}[link:
|
134
|
+
# {Reference index}[link:ref_rdoc.html]
|
135
135
|
# {top}[link:index.html]
|
136
136
|
#
|
137
137
|
#
|