gsl 1.12.109 → 1.14.5
Sign up to get free protection for your applications and to get access to all the features.
- data/AUTHORS +6 -0
- data/COPYING +339 -0
- data/ChangeLog +556 -0
- data/{README.rdoc → README} +3 -0
- data/Rakefile +54 -10
- data/THANKS +17 -0
- data/VERSION +1 -2
- data/examples/alf/alf.gp +15 -0
- data/examples/alf/alf.rb +32 -0
- data/examples/blas/blas.rb +13 -0
- data/examples/blas/dnrm2.rb +16 -0
- data/examples/blas/level1.rb +81 -0
- data/examples/blas/level2.rb +11 -0
- data/examples/blas/level3.rb +12 -0
- data/examples/bspline.rb +57 -0
- data/examples/cdf.rb +16 -0
- data/examples/cheb.rb +21 -0
- data/examples/combination.rb +23 -0
- data/examples/complex/RC-lpf.rb +47 -0
- data/examples/complex/add.rb +36 -0
- data/examples/complex/coerce.rb +14 -0
- data/examples/complex/complex.rb +25 -0
- data/examples/complex/fpmi.rb +70 -0
- data/examples/complex/functions.rb +77 -0
- data/examples/complex/michelson.rb +36 -0
- data/examples/complex/mul.rb +28 -0
- data/examples/complex/oscillator.rb +17 -0
- data/examples/complex/set.rb +37 -0
- data/examples/const/physconst.rb +151 -0
- data/examples/const/travel.rb +45 -0
- data/examples/deriv/demo.rb +13 -0
- data/examples/deriv/deriv.rb +36 -0
- data/examples/deriv/diff.rb +35 -0
- data/examples/dht.rb +42 -0
- data/examples/dirac.rb +56 -0
- data/examples/eigen/eigen.rb +34 -0
- data/examples/eigen/herm.rb +22 -0
- data/examples/eigen/narray.rb +9 -0
- data/examples/eigen/nonsymm.rb +37 -0
- data/examples/eigen/nonsymmv.rb +43 -0
- data/examples/eigen/qhoscillator.gp +35 -0
- data/examples/eigen/qhoscillator.rb +90 -0
- data/examples/eigen/vander.rb +41 -0
- data/examples/fft/fft.rb +17 -0
- data/examples/fft/fft2.rb +17 -0
- data/examples/fft/forward.rb +25 -0
- data/examples/fft/forward2.rb +26 -0
- data/examples/fft/radix2.rb +18 -0
- data/examples/fft/real-halfcomplex.rb +33 -0
- data/examples/fft/real-halfcomplex2.rb +30 -0
- data/examples/fft/realradix2.rb +19 -0
- data/examples/fft/sunspot.dat +256 -0
- data/examples/fft/sunspot.rb +16 -0
- data/examples/fit/expdata.dat +20 -0
- data/examples/fit/expfit.rb +31 -0
- data/examples/fit/gaussfit.rb +29 -0
- data/examples/fit/gaussian_2peaks.rb +34 -0
- data/examples/fit/hillfit.rb +40 -0
- data/examples/fit/lognormal.rb +26 -0
- data/examples/fit/lorentzfit.rb +22 -0
- data/examples/fit/multifit.rb +72 -0
- data/examples/fit/ndlinear.rb +133 -0
- data/examples/fit/nonlinearfit.rb +89 -0
- data/examples/fit/plot.gp +36 -0
- data/examples/fit/polyfit.rb +9 -0
- data/examples/fit/powerfit.rb +21 -0
- data/examples/fit/sigmoidfit.rb +40 -0
- data/examples/fit/sinfit.rb +22 -0
- data/examples/fit/wlinear.rb +46 -0
- data/examples/fresnel.rb +11 -0
- data/examples/function/function.rb +36 -0
- data/examples/function/log.rb +7 -0
- data/examples/function/min.rb +33 -0
- data/examples/function/sin.rb +10 -0
- data/examples/function/synchrotron.rb +18 -0
- data/examples/gallery/butterfly.rb +7 -0
- data/examples/gallery/cayley.rb +12 -0
- data/examples/gallery/cornu.rb +23 -0
- data/examples/gallery/eight.rb +11 -0
- data/examples/gallery/koch.rb +40 -0
- data/examples/gallery/lemniscate.rb +11 -0
- data/examples/gallery/polar.rb +11 -0
- data/examples/gallery/rgplot/cossin.rb +35 -0
- data/examples/gallery/rgplot/rgplot.replaced +0 -0
- data/examples/gallery/rgplot/roesller.rb +55 -0
- data/examples/gallery/roesller.rb +39 -0
- data/examples/gallery/scarabaeus.rb +14 -0
- data/examples/histogram/cauchy.rb +27 -0
- data/examples/histogram/cauchy.sh +2 -0
- data/examples/histogram/exponential.rb +19 -0
- data/examples/histogram/gauss.rb +16 -0
- data/examples/histogram/gsl-histogram.rb +40 -0
- data/examples/histogram/histo2d.rb +31 -0
- data/examples/histogram/histo3d.rb +34 -0
- data/examples/histogram/histogram-pdf.rb +27 -0
- data/examples/histogram/histogram.rb +26 -0
- data/examples/histogram/integral.rb +28 -0
- data/examples/histogram/poisson.rb +27 -0
- data/examples/histogram/power.rb +25 -0
- data/examples/histogram/rebin.rb +17 -0
- data/examples/histogram/smp.dat +5 -0
- data/examples/histogram/xexp.rb +21 -0
- data/examples/integration/ahmed.rb +21 -0
- data/examples/integration/cosmology.rb +75 -0
- data/examples/integration/friedmann.gp +16 -0
- data/examples/integration/friedmann.rb +35 -0
- data/examples/integration/gamma-zeta.rb +35 -0
- data/examples/integration/integration.rb +22 -0
- data/examples/integration/qag.rb +8 -0
- data/examples/integration/qag2.rb +14 -0
- data/examples/integration/qag3.rb +8 -0
- data/examples/integration/qagi.rb +28 -0
- data/examples/integration/qagi2.rb +49 -0
- data/examples/integration/qagiu.rb +29 -0
- data/examples/integration/qagp.rb +20 -0
- data/examples/integration/qags.rb +14 -0
- data/examples/integration/qawc.rb +18 -0
- data/examples/integration/qawf.rb +41 -0
- data/examples/integration/qawo.rb +29 -0
- data/examples/integration/qaws.rb +30 -0
- data/examples/integration/qng.rb +17 -0
- data/examples/interp/demo.gp +20 -0
- data/examples/interp/demo.rb +45 -0
- data/examples/interp/interp.rb +37 -0
- data/examples/interp/points +10 -0
- data/examples/interp/spline.rb +20 -0
- data/examples/jacobi/deriv.rb +40 -0
- data/examples/jacobi/integrate.rb +34 -0
- data/examples/jacobi/interp.rb +43 -0
- data/examples/jacobi/jacobi.rb +11 -0
- data/examples/linalg/HH.rb +15 -0
- data/examples/linalg/HH_narray.rb +13 -0
- data/examples/linalg/LQ_solve.rb +73 -0
- data/examples/linalg/LU.rb +84 -0
- data/examples/linalg/LU2.rb +31 -0
- data/examples/linalg/LU_narray.rb +24 -0
- data/examples/linalg/PTLQ.rb +47 -0
- data/examples/linalg/QR.rb +18 -0
- data/examples/linalg/QRPT.rb +47 -0
- data/examples/linalg/QR_solve.rb +78 -0
- data/examples/linalg/QR_solve_narray.rb +13 -0
- data/examples/linalg/SV.rb +16 -0
- data/examples/linalg/SV_narray.rb +12 -0
- data/examples/linalg/SV_solve.rb +49 -0
- data/examples/linalg/chol.rb +29 -0
- data/examples/linalg/chol_narray.rb +15 -0
- data/examples/linalg/complex.rb +57 -0
- data/examples/linalg/invert_narray.rb +10 -0
- data/examples/math/const.rb +67 -0
- data/examples/math/elementary.rb +35 -0
- data/examples/math/functions.rb +41 -0
- data/examples/math/inf_nan.rb +34 -0
- data/examples/math/minmax.rb +22 -0
- data/examples/math/power.rb +18 -0
- data/examples/math/test.rb +31 -0
- data/examples/matrix/a.dat +0 -0
- data/examples/matrix/add.rb +45 -0
- data/examples/matrix/b.dat +4 -0
- data/examples/matrix/cat.rb +31 -0
- data/examples/matrix/colvectors.rb +24 -0
- data/examples/matrix/complex.rb +41 -0
- data/examples/matrix/det.rb +29 -0
- data/examples/matrix/diagonal.rb +23 -0
- data/examples/matrix/get_all.rb +159 -0
- data/examples/matrix/hilbert.rb +31 -0
- data/examples/matrix/iterator.rb +19 -0
- data/examples/matrix/matrix.rb +57 -0
- data/examples/matrix/minmax.rb +53 -0
- data/examples/matrix/mul.rb +39 -0
- data/examples/matrix/rand.rb +20 -0
- data/examples/matrix/read.rb +29 -0
- data/examples/matrix/rowcol.rb +47 -0
- data/examples/matrix/set.rb +41 -0
- data/examples/matrix/set_all.rb +100 -0
- data/examples/matrix/view.rb +32 -0
- data/examples/matrix/view_all.rb +148 -0
- data/examples/matrix/write.rb +23 -0
- data/examples/min.rb +29 -0
- data/examples/monte/miser.rb +47 -0
- data/examples/monte/monte.rb +47 -0
- data/examples/monte/plain.rb +47 -0
- data/examples/monte/vegas.rb +46 -0
- data/examples/multimin/bundle.rb +66 -0
- data/examples/multimin/cqp.rb +109 -0
- data/examples/multimin/fdfminimizer.rb +40 -0
- data/examples/multimin/fminimizer.rb +41 -0
- data/examples/multiroot/demo.rb +36 -0
- data/examples/multiroot/fdfsolver.rb +50 -0
- data/examples/multiroot/fsolver.rb +33 -0
- data/examples/multiroot/fsolver2.rb +32 -0
- data/examples/multiroot/fsolver3.rb +26 -0
- data/examples/narray/histogram.rb +14 -0
- data/examples/narray/mandel.rb +27 -0
- data/examples/narray/narray.rb +28 -0
- data/examples/narray/narray2.rb +44 -0
- data/examples/narray/sf.rb +26 -0
- data/examples/ntuple/create.rb +17 -0
- data/examples/ntuple/project.rb +31 -0
- data/examples/odeiv/binarysystem.gp +23 -0
- data/examples/odeiv/binarysystem.rb +104 -0
- data/examples/odeiv/demo.gp +24 -0
- data/examples/odeiv/demo.rb +69 -0
- data/examples/odeiv/demo2.gp +26 -0
- data/examples/odeiv/duffing.rb +45 -0
- data/examples/odeiv/frei1.rb +109 -0
- data/examples/odeiv/frei2.rb +76 -0
- data/examples/odeiv/legendre.rb +52 -0
- data/examples/odeiv/odeiv.rb +32 -0
- data/examples/odeiv/odeiv2.rb +45 -0
- data/examples/odeiv/oscillator.rb +42 -0
- data/examples/odeiv/sedov.rb +97 -0
- data/examples/odeiv/whitedwarf.gp +40 -0
- data/examples/odeiv/whitedwarf.rb +158 -0
- data/examples/ool/conmin.rb +100 -0
- data/examples/ool/gencan.rb +99 -0
- data/examples/ool/pgrad.rb +100 -0
- data/examples/ool/spg.rb +100 -0
- data/examples/pdf/bernoulli.rb +5 -0
- data/examples/pdf/beta.rb +7 -0
- data/examples/pdf/binomiral.rb +10 -0
- data/examples/pdf/cauchy.rb +6 -0
- data/examples/pdf/chisq.rb +8 -0
- data/examples/pdf/exponential.rb +7 -0
- data/examples/pdf/exppow.rb +6 -0
- data/examples/pdf/fdist.rb +7 -0
- data/examples/pdf/flat.rb +7 -0
- data/examples/pdf/gamma.rb +8 -0
- data/examples/pdf/gauss-tail.rb +5 -0
- data/examples/pdf/gauss.rb +6 -0
- data/examples/pdf/geometric.rb +5 -0
- data/examples/pdf/gumbel.rb +6 -0
- data/examples/pdf/hypergeometric.rb +11 -0
- data/examples/pdf/landau.rb +5 -0
- data/examples/pdf/laplace.rb +7 -0
- data/examples/pdf/logarithmic.rb +5 -0
- data/examples/pdf/logistic.rb +6 -0
- data/examples/pdf/lognormal.rb +6 -0
- data/examples/pdf/neg-binomiral.rb +10 -0
- data/examples/pdf/pareto.rb +7 -0
- data/examples/pdf/pascal.rb +10 -0
- data/examples/pdf/poisson.rb +5 -0
- data/examples/pdf/rayleigh-tail.rb +6 -0
- data/examples/pdf/rayleigh.rb +6 -0
- data/examples/pdf/tdist.rb +6 -0
- data/examples/pdf/weibull.rb +8 -0
- data/examples/permutation/ex1.rb +22 -0
- data/examples/permutation/permutation.rb +16 -0
- data/examples/poly/bell.rb +6 -0
- data/examples/poly/bessel.rb +6 -0
- data/examples/poly/cheb.rb +6 -0
- data/examples/poly/cheb_II.rb +6 -0
- data/examples/poly/cubic.rb +9 -0
- data/examples/poly/demo.rb +20 -0
- data/examples/poly/eval.rb +28 -0
- data/examples/poly/eval_derivs.rb +14 -0
- data/examples/poly/fit.rb +21 -0
- data/examples/poly/hermite.rb +6 -0
- data/examples/poly/poly.rb +13 -0
- data/examples/poly/quadratic.rb +25 -0
- data/examples/random/diffusion.rb +34 -0
- data/examples/random/gaussian.rb +9 -0
- data/examples/random/generator.rb +27 -0
- data/examples/random/hdsobol.rb +21 -0
- data/examples/random/poisson.rb +9 -0
- data/examples/random/qrng.rb +19 -0
- data/examples/random/randomwalk.rb +37 -0
- data/examples/random/randomwalk2d.rb +19 -0
- data/examples/random/rayleigh.rb +36 -0
- data/examples/random/rng.rb +33 -0
- data/examples/random/rngextra.rb +14 -0
- data/examples/roots/bisection.rb +25 -0
- data/examples/roots/brent.rb +43 -0
- data/examples/roots/demo.rb +30 -0
- data/examples/roots/newton.rb +46 -0
- data/examples/roots/recombination.gp +12 -0
- data/examples/roots/recombination.rb +61 -0
- data/examples/roots/steffenson.rb +48 -0
- data/examples/sf/ShiChi.rb +6 -0
- data/examples/sf/SiCi.rb +6 -0
- data/examples/sf/airy_Ai.rb +8 -0
- data/examples/sf/airy_Bi.rb +8 -0
- data/examples/sf/bessel_IK.rb +12 -0
- data/examples/sf/bessel_JY.rb +13 -0
- data/examples/sf/beta_inc.rb +9 -0
- data/examples/sf/clausen.rb +6 -0
- data/examples/sf/dawson.rb +5 -0
- data/examples/sf/debye.rb +9 -0
- data/examples/sf/dilog.rb +6 -0
- data/examples/sf/ellint.rb +6 -0
- data/examples/sf/expint.rb +8 -0
- data/examples/sf/fermi.rb +10 -0
- data/examples/sf/gamma_inc_P.rb +9 -0
- data/examples/sf/gegenbauer.rb +8 -0
- data/examples/sf/hyperg.rb +7 -0
- data/examples/sf/laguerre.rb +19 -0
- data/examples/sf/lambertW.rb +5 -0
- data/examples/sf/legendre_P.rb +10 -0
- data/examples/sf/lngamma.rb +5 -0
- data/examples/sf/psi.rb +54 -0
- data/examples/sf/sphbessel.gp +27 -0
- data/examples/sf/sphbessel.rb +30 -0
- data/examples/sf/synchrotron.rb +5 -0
- data/examples/sf/transport.rb +10 -0
- data/examples/sf/zetam1.rb +5 -0
- data/examples/siman.rb +44 -0
- data/examples/sort/heapsort.rb +23 -0
- data/examples/sort/heapsort_vector_complex.rb +21 -0
- data/examples/sort/sort.rb +23 -0
- data/examples/sort/sort2.rb +16 -0
- data/examples/stats/mean.rb +17 -0
- data/examples/stats/statistics.rb +18 -0
- data/examples/stats/test.rb +9 -0
- data/examples/sum.rb +34 -0
- data/examples/tamu_anova.rb +18 -0
- data/examples/vector/a.dat +0 -0
- data/examples/vector/add.rb +56 -0
- data/examples/vector/b.dat +4 -0
- data/examples/vector/c.dat +3 -0
- data/examples/vector/collect.rb +26 -0
- data/examples/vector/compare.rb +28 -0
- data/examples/vector/complex.rb +51 -0
- data/examples/vector/complex_get_all.rb +85 -0
- data/examples/vector/complex_set_all.rb +131 -0
- data/examples/vector/complex_view_all.rb +77 -0
- data/examples/vector/connect.rb +22 -0
- data/examples/vector/decimate.rb +38 -0
- data/examples/vector/diff.rb +31 -0
- data/examples/vector/filescan.rb +17 -0
- data/examples/vector/floor.rb +23 -0
- data/examples/vector/get_all.rb +82 -0
- data/examples/vector/gnuplot.rb +38 -0
- data/examples/vector/graph.rb +28 -0
- data/examples/vector/histogram.rb +22 -0
- data/examples/vector/linspace.rb +24 -0
- data/examples/vector/log.rb +17 -0
- data/examples/vector/logic.rb +33 -0
- data/examples/vector/logspace.rb +25 -0
- data/examples/vector/minmax.rb +47 -0
- data/examples/vector/mul.rb +49 -0
- data/examples/vector/narray.rb +46 -0
- data/examples/vector/read.rb +29 -0
- data/examples/vector/set.rb +35 -0
- data/examples/vector/set_all.rb +121 -0
- data/examples/vector/smpv.dat +15 -0
- data/examples/vector/test.rb +43 -0
- data/examples/vector/test_gslblock.rb +58 -0
- data/examples/vector/vector.rb +110 -0
- data/examples/vector/view.rb +35 -0
- data/examples/vector/view_all.rb +73 -0
- data/examples/vector/where.rb +29 -0
- data/examples/vector/write.rb +24 -0
- data/examples/vector/zip.rb +34 -0
- data/examples/wavelet/ecg.dat +256 -0
- data/examples/wavelet/wavelet1.rb +50 -0
- data/ext/extconf.rb +9 -0
- data/ext/gsl.c +10 -1
- data/ext/histogram.c +6 -2
- data/ext/integration.c +39 -0
- data/ext/matrix_complex.c +1 -1
- data/ext/multiset.c +214 -0
- data/ext/nmf.c +4 -0
- data/ext/nmf_wrap.c +3 -0
- data/ext/vector_complex.c +1 -1
- data/ext/vector_double.c +3 -3
- data/ext/vector_source.c +6 -6
- data/include/rb_gsl.h +7 -0
- data/include/rb_gsl_common.h +6 -0
- data/rdoc/alf.rdoc +77 -0
- data/rdoc/blas.rdoc +269 -0
- data/rdoc/bspline.rdoc +42 -0
- data/rdoc/changes.rdoc +164 -0
- data/rdoc/cheb.rdoc +99 -0
- data/rdoc/cholesky_complex.rdoc +46 -0
- data/rdoc/combi.rdoc +125 -0
- data/rdoc/complex.rdoc +210 -0
- data/rdoc/const.rdoc +546 -0
- data/rdoc/dht.rdoc +122 -0
- data/rdoc/diff.rdoc +133 -0
- data/rdoc/ehandling.rdoc +50 -0
- data/rdoc/eigen.rdoc +401 -0
- data/rdoc/fft.rdoc +535 -0
- data/rdoc/fit.rdoc +284 -0
- data/rdoc/function.rdoc +94 -0
- data/rdoc/graph.rdoc +137 -0
- data/rdoc/hist.rdoc +409 -0
- data/rdoc/hist2d.rdoc +279 -0
- data/rdoc/hist3d.rdoc +112 -0
- data/rdoc/index.rdoc +62 -0
- data/rdoc/integration.rdoc +398 -0
- data/rdoc/interp.rdoc +231 -0
- data/rdoc/intro.rdoc +27 -0
- data/rdoc/linalg.rdoc +681 -0
- data/rdoc/linalg_complex.rdoc +88 -0
- data/rdoc/math.rdoc +276 -0
- data/rdoc/matrix.rdoc +1093 -0
- data/rdoc/min.rdoc +189 -0
- data/rdoc/monte.rdoc +234 -0
- data/rdoc/multimin.rdoc +312 -0
- data/rdoc/multiroot.rdoc +293 -0
- data/rdoc/narray.rdoc +173 -0
- data/rdoc/ndlinear.rdoc +247 -0
- data/rdoc/nonlinearfit.rdoc +348 -0
- data/rdoc/ntuple.rdoc +88 -0
- data/rdoc/odeiv.rdoc +378 -0
- data/rdoc/perm.rdoc +221 -0
- data/rdoc/poly.rdoc +335 -0
- data/rdoc/qrng.rdoc +90 -0
- data/rdoc/randist.rdoc +233 -0
- data/rdoc/ref.rdoc +93 -0
- data/rdoc/rng.rdoc +203 -0
- data/rdoc/rngextra.rdoc +11 -0
- data/rdoc/roots.rdoc +305 -0
- data/rdoc/screenshot.rdoc +40 -0
- data/rdoc/sf.rdoc +1622 -0
- data/rdoc/siman.rdoc +89 -0
- data/rdoc/sort.rdoc +94 -0
- data/rdoc/start.rdoc +16 -0
- data/rdoc/stats.rdoc +219 -0
- data/rdoc/sum.rdoc +65 -0
- data/rdoc/tensor.rdoc +251 -0
- data/rdoc/tut.rdoc +5 -0
- data/rdoc/use.rdoc +177 -0
- data/rdoc/vector.rdoc +1243 -0
- data/rdoc/vector_complex.rdoc +347 -0
- data/rdoc/wavelet.rdoc +218 -0
- data/setup.rb +1585 -0
- data/tests/blas/amax.rb +14 -0
- data/tests/blas/asum.rb +16 -0
- data/tests/blas/axpy.rb +25 -0
- data/tests/blas/copy.rb +23 -0
- data/tests/blas/dot.rb +23 -0
- data/tests/bspline.rb +53 -0
- data/tests/cdf.rb +1388 -0
- data/tests/cheb.rb +112 -0
- data/tests/combination.rb +123 -0
- data/tests/complex.rb +17 -0
- data/tests/const.rb +24 -0
- data/tests/deriv.rb +85 -0
- data/tests/dht/dht1.rb +17 -0
- data/tests/dht/dht2.rb +23 -0
- data/tests/dht/dht3.rb +23 -0
- data/tests/dht/dht4.rb +23 -0
- data/tests/diff.rb +78 -0
- data/tests/eigen/eigen.rb +220 -0
- data/tests/eigen/gen.rb +105 -0
- data/tests/eigen/genherm.rb +66 -0
- data/tests/eigen/gensymm.rb +68 -0
- data/tests/eigen/nonsymm.rb +53 -0
- data/tests/eigen/nonsymmv.rb +53 -0
- data/tests/eigen/symm-herm.rb +74 -0
- data/tests/err.rb +58 -0
- data/tests/fit.rb +124 -0
- data/tests/gsl_test.rb +118 -0
- data/tests/gsl_test2.rb +107 -0
- data/tests/histo.rb +12 -0
- data/tests/integration/integration1.rb +72 -0
- data/tests/integration/integration2.rb +71 -0
- data/tests/integration/integration3.rb +71 -0
- data/tests/integration/integration4.rb +71 -0
- data/tests/interp.rb +45 -0
- data/tests/linalg/HH.rb +64 -0
- data/tests/linalg/LU.rb +47 -0
- data/tests/linalg/QR.rb +77 -0
- data/tests/linalg/SV.rb +24 -0
- data/tests/linalg/TDN.rb +116 -0
- data/tests/linalg/TDS.rb +122 -0
- data/tests/linalg/bidiag.rb +73 -0
- data/tests/linalg/cholesky.rb +20 -0
- data/tests/linalg/linalg.rb +158 -0
- data/tests/matrix/matrix_nmf_test.rb +39 -0
- data/tests/matrix/matrix_test.rb +48 -0
- data/tests/min.rb +99 -0
- data/tests/monte/miser.rb +31 -0
- data/tests/monte/vegas.rb +45 -0
- data/tests/multifit/test_2dgauss.rb +112 -0
- data/tests/multifit/test_brown.rb +90 -0
- data/tests/multifit/test_enso.rb +246 -0
- data/tests/multifit/test_filip.rb +155 -0
- data/tests/multifit/test_gauss.rb +97 -0
- data/tests/multifit/test_longley.rb +110 -0
- data/tests/multifit/test_multifit.rb +52 -0
- data/tests/multimin.rb +139 -0
- data/tests/multiroot.rb +131 -0
- data/tests/multiset.rb +52 -0
- data/tests/odeiv.rb +353 -0
- data/tests/poly/poly.rb +242 -0
- data/tests/poly/special.rb +65 -0
- data/tests/qrng.rb +131 -0
- data/tests/quartic.rb +29 -0
- data/tests/randist.rb +134 -0
- data/tests/rng.rb +305 -0
- data/tests/roots.rb +76 -0
- data/tests/run-test.sh +17 -0
- data/tests/sf/gsl_test_sf.rb +249 -0
- data/tests/sf/test_airy.rb +83 -0
- data/tests/sf/test_bessel.rb +306 -0
- data/tests/sf/test_coulomb.rb +17 -0
- data/tests/sf/test_dilog.rb +25 -0
- data/tests/sf/test_gamma.rb +209 -0
- data/tests/sf/test_hyperg.rb +356 -0
- data/tests/sf/test_legendre.rb +227 -0
- data/tests/sf/test_mathieu.rb +59 -0
- data/tests/sf/test_sf.rb +839 -0
- data/tests/stats.rb +174 -0
- data/tests/sum.rb +98 -0
- data/tests/sys.rb +323 -0
- data/tests/tensor.rb +419 -0
- data/tests/vector/vector_complex_test.rb +101 -0
- data/tests/vector/vector_test.rb +141 -0
- data/tests/wavelet.rb +142 -0
- metadata +596 -15
data/rdoc/min.rdoc
ADDED
@@ -0,0 +1,189 @@
|
|
1
|
+
#
|
2
|
+
# = One dimensional Minimization
|
3
|
+
#
|
4
|
+
# This chapter describes routines for finding minima of arbitrary
|
5
|
+
# one-dimensional functions.
|
6
|
+
#
|
7
|
+
#
|
8
|
+
# Contents:
|
9
|
+
# 1. {Introduction}[link:files/rdoc/min_rdoc.html#1]
|
10
|
+
# 1. {GSL::Min::FMinimizer class}[link:files/rdoc/min_rdoc.html#2]
|
11
|
+
# 1. {Iteration}[link:files/rdoc/min_rdoc.html#3]
|
12
|
+
# 1. {Stopping Parameters}[link:files/rdoc/min_rdoc.html#4]
|
13
|
+
# 1. {Examples}[link:files/rdoc/min_rdoc.html#5]
|
14
|
+
#
|
15
|
+
# == {}[link:index.html"name="1] Introduction
|
16
|
+
#
|
17
|
+
# The minimization algorithms begin with a bounded region known to contain
|
18
|
+
# a minimum. The region is described by <tt>a</tt> lower bound a and an upper bound
|
19
|
+
# <tt>b</tt>, with an estimate of the location of the minimum <tt>x</tt>.
|
20
|
+
#
|
21
|
+
# The value of the function at <tt>x</tt> must be less than the value of the
|
22
|
+
# function at the ends of the interval,
|
23
|
+
# f(a) > f(x) < f(b)
|
24
|
+
# This condition guarantees that a minimum is contained somewhere within the
|
25
|
+
# interval. On each iteration a new point <tt>x'</tt> is selected using one of the
|
26
|
+
# available algorithms. If the new point is a better estimate of the minimum,
|
27
|
+
# <tt>f(x') < f(x)</tt>, then the current estimate of the minimum <tt>x</tt> is
|
28
|
+
# updated. The new point also allows the size of the bounded interval to be
|
29
|
+
# reduced, by choosing the most compact set of points which satisfies the
|
30
|
+
# constraint <tt>f(a) > f(x) < f(b)</tt>. The interval is reduced until it
|
31
|
+
# encloses the true minimum to a desired tolerance. This provides a best
|
32
|
+
# estimate of the location of the minimum and a rigorous error estimate.
|
33
|
+
#
|
34
|
+
# Several bracketing algorithms are available within a single framework.
|
35
|
+
# The user provides a high-level driver for the algorithm, and the library
|
36
|
+
# provides the individual functions necessary for each of the steps. There
|
37
|
+
# are three main phases of the iteration. The steps are,
|
38
|
+
# * initialize minimizer (or <tt>solver</tt>) state, <tt>s</tt>, for algorithm <tt>T</tt>
|
39
|
+
# * update <tt>s</tt> using the iteration <tt>T</tt>
|
40
|
+
# * test <tt>s</tt> for convergence, and repeat iteration if necessary
|
41
|
+
#
|
42
|
+
# The state of the minimizers is held in a <tt>GSL::Min::FMinimizer</tt> object.
|
43
|
+
# The updating procedure use only function evaluations (not derivatives).
|
44
|
+
# The function to minimize is given as an instance of the {GSL::Function}[link:files/rdoc/function_rdoc.html] class to the minimizer.
|
45
|
+
#
|
46
|
+
#
|
47
|
+
# == {}[link:index.html"name="2] GSL::Min::FMinimizer class
|
48
|
+
# ---
|
49
|
+
# * GSL::Min::FMinimizer.alloc(t)
|
50
|
+
#
|
51
|
+
# These method create an instance of the <tt>GSL::Min::FMinimizer</tt> class of
|
52
|
+
# type <tt>t</tt>. The type <tt>t</tt> is given by a String,
|
53
|
+
# * "goldensection"
|
54
|
+
# * "brent"
|
55
|
+
# * "quad_golden"
|
56
|
+
# or by a Ruby constant,
|
57
|
+
# * GSL::Min::FMinimizer::GOLDENSECTION
|
58
|
+
# * GSL::Min::FMinimizer::BRENT
|
59
|
+
# * GSL::Min::FMinimizer::QUAD_GOLDEN (GSL-1.13)
|
60
|
+
#
|
61
|
+
# ex)
|
62
|
+
# include GSL::Min
|
63
|
+
# s = FMinimizer.alloc(FMinimizer::BRENT)
|
64
|
+
#
|
65
|
+
# ---
|
66
|
+
# * GSL::Min::FMinimizer#set(f, xmin, xlow, xup)
|
67
|
+
#
|
68
|
+
# This method sets, or resets, an existing minimizer <tt>self</tt> to use
|
69
|
+
# the function <tt>f</tt> (given by a <tt>GSL::Function</tt>
|
70
|
+
# object) and the initial search interval [<tt>xlow, xup</tt>],
|
71
|
+
# with a guess for the location of the minimum <tt>xmin</tt>.
|
72
|
+
#
|
73
|
+
# If the interval given does not contain a minimum, then the
|
74
|
+
# method returns an error code of <tt>GSL::FAILURE</tt>.
|
75
|
+
#
|
76
|
+
# ---
|
77
|
+
# * GSL::Min::FMinimizer#set_with_values(f, xmin, fmin, xlow, flow, xup, fup)
|
78
|
+
#
|
79
|
+
# This method is equivalent to <tt>Fminimizer#set</tt> but uses the values
|
80
|
+
# <tt>fmin, flowe</tt> and <tt>fup</tt> instead of computing
|
81
|
+
# <tt>f(xmin), f(xlow)</tt> and <tt>f(xup)</tt>.
|
82
|
+
#
|
83
|
+
# ---
|
84
|
+
# * GSL::Min::FMinimizer#name
|
85
|
+
#
|
86
|
+
# This returns the name of the minimizer.
|
87
|
+
#
|
88
|
+
# == {}[link:index.html"name="3] Iteration
|
89
|
+
# ---
|
90
|
+
# * GSL::Min::FMinimizer#iterate
|
91
|
+
#
|
92
|
+
# This method performs a single iteration of the minimizer <tt>self</tt>.
|
93
|
+
# If the iteration encounters an unexpected problem then an error code
|
94
|
+
# will be returned,
|
95
|
+
# * <tt>GSL::EBADFUNC</tt>: the iteration encountered a singular point where the
|
96
|
+
# function evaluated to <tt>Inf</tt> or <tt>NaN</tt>.
|
97
|
+
# * <tt>GSL::FAILURE</tt>: the algorithm could not improve the current best
|
98
|
+
# approximation or bounding interval.
|
99
|
+
# The minimizer maintains a current best estimate of the position of
|
100
|
+
# the minimum at all times, and the current interval bounding the minimum.
|
101
|
+
# This information can be accessed with the following auxiliary methods
|
102
|
+
#
|
103
|
+
# ---
|
104
|
+
# * GSL::Min::FMinimizer#x_minimum
|
105
|
+
#
|
106
|
+
# Returns the current estimate of the position of the minimum
|
107
|
+
# for the minimizer <tt>self</tt>.
|
108
|
+
#
|
109
|
+
# ---
|
110
|
+
# * GSL::Min::FMinimizer#x_upper
|
111
|
+
# * GSL::Min::FMinimizer#x_lower
|
112
|
+
#
|
113
|
+
# Return the current upper and lower bound of the interval for the
|
114
|
+
# minimizer <tt>self</tt>.
|
115
|
+
#
|
116
|
+
# ---
|
117
|
+
# * GSL::Min::FMinimizer#f_minimum
|
118
|
+
# * GSL::Min::FMinimizer#f_upper
|
119
|
+
# * GSL::Min::FMinimizer#f_lower
|
120
|
+
#
|
121
|
+
# Return the value of the function at the current estimate of the
|
122
|
+
# minimum and at the upper and lower bounds of interval
|
123
|
+
# for the minimizer <tt>self</tt>.
|
124
|
+
#
|
125
|
+
# == {}[link:index.html"name="4] Stopping Parameters
|
126
|
+
# ---
|
127
|
+
# * GSL::Min::FMinimizer#test_interval(epsabs, epsrel)
|
128
|
+
# * GSL::Min.test_interval(xlow, xup, epsabs, epsrel)
|
129
|
+
#
|
130
|
+
# These methoeds test for the convergence of the interval
|
131
|
+
# [<tt>xlow, xup</tt>] with absolute error <tt>epsabs</tt> and relative
|
132
|
+
# error <tt>epsrel</tt>. The test returns <tt>GSL::SUCCESS</tt>
|
133
|
+
# if the following condition is achieved,
|
134
|
+
# |a - b| < epsabs + epsrel min(|a|,|b|)
|
135
|
+
# when the interval <tt>x = [a,b]</tt> does not include the origin.
|
136
|
+
# If the interval includes the origin then <tt>min(|a|,|b|)</tt> is
|
137
|
+
# replaced by zero (which is the minimum value of |x| over the interval).
|
138
|
+
# This ensures that the relative error is accurately estimated for minima
|
139
|
+
# close to the origin.
|
140
|
+
#
|
141
|
+
# This condition on the interval also implies that any estimate of the
|
142
|
+
# minimum x_m in the interval satisfies the same condition with respect
|
143
|
+
# to the true minimum x_m^*,
|
144
|
+
# |x_m - x_m^*| < epsabs + epsrel x_m^*
|
145
|
+
# assuming that the true minimum x_m^* is contained within the interval.
|
146
|
+
#
|
147
|
+
# == {}[link:index.html"name="5] Example
|
148
|
+
# To find the minimum of the function f(x) = cos(x) + 1.0:
|
149
|
+
#
|
150
|
+
# #!/usr/bin/env ruby
|
151
|
+
# require("gsl")
|
152
|
+
# include GSL::Min
|
153
|
+
#
|
154
|
+
# fn1 = Function.alloc { |x| Math::cos(x) + 1.0 }
|
155
|
+
#
|
156
|
+
# iter = 0; max_iter = 500
|
157
|
+
# m = 2.0 # initial guess
|
158
|
+
# m_expected = Math::PI
|
159
|
+
# a = 0.0
|
160
|
+
# b = 6.0
|
161
|
+
#
|
162
|
+
# gmf = FMinimizer.alloc(FMinimizer::BRENT)
|
163
|
+
# gmf.set(fn1, m, a, b)
|
164
|
+
#
|
165
|
+
# printf("using %s method\n", gmf.name)
|
166
|
+
# printf("%5s [%9s, %9s] %9s %10s %9s\n", "iter", "lower", "upper", "min",
|
167
|
+
# "err", "err(est)")
|
168
|
+
#
|
169
|
+
# printf("%5d [%.7f, %.7f] %.7f %+.7f %.7f\n", iter, a, b, m, m - m_expected, b - a)
|
170
|
+
#
|
171
|
+
# begin
|
172
|
+
# iter += 1
|
173
|
+
# status = gmf.iterate
|
174
|
+
# status = gmf.test_interval(0.001, 0.0)
|
175
|
+
# puts("Converged:") if status == GSL::SUCCESS
|
176
|
+
# a = gmf.x_lower
|
177
|
+
# b = gmf.x_upper
|
178
|
+
# m = gmf.x_minimum
|
179
|
+
# printf("%5d [%.7f, %.7f] %.7f %+.7f %.7f\n",
|
180
|
+
# iter, a, b, m, m - m_expected, b - a);
|
181
|
+
# end while status == GSL::CONTINUE and iter < max_iter
|
182
|
+
#
|
183
|
+
# {prev}[link:files/rdoc/roots_rdoc.html]
|
184
|
+
# {next}[link:files/rdoc/multiroot_rdoc.html]
|
185
|
+
#
|
186
|
+
# {Reference index}[link:files/rdoc/ref_rdoc.html]
|
187
|
+
# {top}[link:files/rdoc/index_rdoc.html]
|
188
|
+
#
|
189
|
+
#
|
data/rdoc/monte.rdoc
ADDED
@@ -0,0 +1,234 @@
|
|
1
|
+
#
|
2
|
+
# = Monte Carlo Integration
|
3
|
+
#
|
4
|
+
# == {}[link:index.html"name="1] The GSL::Monte::Function class
|
5
|
+
# The function to be integrated has its own datatype, the <tt>GSL::Monte::Function</tt> class.
|
6
|
+
#
|
7
|
+
# ---
|
8
|
+
# * GSL::Munte::Function.alloc(proc, dim, params)
|
9
|
+
# * GSL::Munte::Function.alloc(proc, dim)
|
10
|
+
#
|
11
|
+
# Constructor. The following example shows how to use this:
|
12
|
+
#
|
13
|
+
# * ex:
|
14
|
+
# proc_f = Proc.new { |x, dim, params|
|
15
|
+
# a = params[0]; b = params[1]; c = params[2]
|
16
|
+
# if dim != 2; raise("dim != 2"); end
|
17
|
+
# a*x[0]*x[0] + b*x[0]*x[1] + c*x[1]*x[1]
|
18
|
+
# }
|
19
|
+
# dim = 2
|
20
|
+
# mf = Monte::Function.alloc(proc_f, dim)
|
21
|
+
# mf.set_params([3, 2, 1])
|
22
|
+
#
|
23
|
+
# ---
|
24
|
+
# * GSL::Munte::Function#set(proc, dim, params)
|
25
|
+
# * GSL::Munte::Function#set(proc, dim)
|
26
|
+
# * GSL::Munte::Function#set(proc)
|
27
|
+
# * GSL::Munte::Function#set_proc(proc)
|
28
|
+
# * GSL::Munte::Function#set_proc(proc, dim)
|
29
|
+
# * GSL::Munte::Function#set_params(params)
|
30
|
+
# * GSL::Munte::Function#params
|
31
|
+
# * GSL::Munte::Function#eval
|
32
|
+
# * GSL::Munte::Function#call
|
33
|
+
#
|
34
|
+
#
|
35
|
+
# == {}[link:index.html"name="2] Monte Carlo plans, alrgorithms
|
36
|
+
# === {}[link:index.html"name="2.1] PLAIN Monte Carlo
|
37
|
+
# ---
|
38
|
+
# * GSL::Monte::Plain.alloc(dim)
|
39
|
+
# * GSL::Monte::Plain#init
|
40
|
+
#
|
41
|
+
# === {}[link:index.html"name="2.2] Miser
|
42
|
+
# ---
|
43
|
+
# * GSL::Monte::Miser.alloc(dim)
|
44
|
+
# * GSL::Monte::Miser#init
|
45
|
+
#
|
46
|
+
# === {}[link:index.html"name="2.3] Vegas
|
47
|
+
# ---
|
48
|
+
# * GSL::Monte::Vegas.alloc(dim)
|
49
|
+
# * GSL::Monte::Vegas#init
|
50
|
+
#
|
51
|
+
#
|
52
|
+
# == {}[link:index.html"name="3] Integration
|
53
|
+
# ---
|
54
|
+
# * GSL:Monte::Function#integrate(xl, xu, dim, calls, rng, s)
|
55
|
+
# * GSL:Monte::Function#integrate(xl, xu, dim, calls, s)
|
56
|
+
# * GSL:Monte::Function#integrate(xl, xu, calls, rng, s)
|
57
|
+
# * GSL:Monte::Function#integrate(xl, xu, calls, s)
|
58
|
+
#
|
59
|
+
# This method performs Monte-Carlo integration of the function <tt>self</tt>
|
60
|
+
# using the algorithm <tt>s</tt>, over the <tt>dim</tt>-dimensional hypercubic
|
61
|
+
# region defined by the lower and upper
|
62
|
+
# limits in the arrays <tt>xl</tt> and <tt>xu</tt>, each of size <tt>dim</tt>.
|
63
|
+
# The integration uses a fixed number of function calls <tt>calls</tt>.
|
64
|
+
# The argument <tt>rng</tt> is a random number generator (optional). If it is not
|
65
|
+
# given, a new generator is created internally and freed when the calculation
|
66
|
+
# finishes.
|
67
|
+
#
|
68
|
+
# See sample scripts <tt>sample/monte*.rb</tt> for more details.
|
69
|
+
#
|
70
|
+
# == {}[link:index.html"name="4] Accessing internal state of the Monte Carlo classes
|
71
|
+
# ---
|
72
|
+
# * GSL::Monte::Miser#estimate_frac
|
73
|
+
# * GSL::Monte::Miser#estimate_frac=
|
74
|
+
# * GSL::Monte::Miser#min_calls
|
75
|
+
# * GSL::Monte::Miser#min_calls=
|
76
|
+
# * GSL::Monte::Miser#min_call_per_bisection
|
77
|
+
# * GSL::Monte::Miser#min_calls_per_bisection=
|
78
|
+
# * GSL::Monte::Miser#alpha
|
79
|
+
# * GSL::Monte::Miser#alpha=
|
80
|
+
# * GSL::Monte::Miser#dither
|
81
|
+
# * GSL::Monte::Miser#dither=
|
82
|
+
# * GSL::Monte::Vegas#alpha
|
83
|
+
# * GSL::Monte::Vegas#result
|
84
|
+
# * GSL::Monte::Vegas#sigma
|
85
|
+
# * GSL::Monte::Vegas#chisq
|
86
|
+
#
|
87
|
+
# Returns the chi-squared per degree of freedom for the weighted estimate of the integral. The returned value should be close to 1. A value which differs significantly from 1 indicates that the values from different iterations are inconsistent. In this case the weighted error will be under-estimated, and further iterations of the algorithm are needed to obtain reliable results.
|
88
|
+
# ---
|
89
|
+
# * GSL::Monte::Vegas#runval
|
90
|
+
#
|
91
|
+
# Returns the raw (unaveraged) values of the integral and its error <tt>[result, sigma]</tt> from the most recent iteration of the algorithm.
|
92
|
+
# ---
|
93
|
+
# * GSL::Monte::Vegas#iterations
|
94
|
+
# * GSL::Monte::Vegas#iterations=
|
95
|
+
# * GSL::Monte::Vegas#alpha
|
96
|
+
# * GSL::Monte::Vegas#alpha=
|
97
|
+
# * GSL::Monte::Vegas#stage
|
98
|
+
# * GSL::Monte::Vegas#stage=
|
99
|
+
# * GSL::Monte::Vegas#mode
|
100
|
+
# * GSL::Monte::Vegas#mode=
|
101
|
+
# * GSL::Monte::Vegas#verbose
|
102
|
+
# * GSL::Monte::Vegas#verbose=
|
103
|
+
#
|
104
|
+
#
|
105
|
+
# == {}[link:index.html"name="5] Miser Parameters (GSL-1.13 or later)
|
106
|
+
# ---
|
107
|
+
# * GSL::Monte::Miser#params_get
|
108
|
+
#
|
109
|
+
# Returns the parameters of the integrator state as an instance of <tt>GSL::Monte::Miser::Params</tt> class.
|
110
|
+
# ---
|
111
|
+
# * GSL::Monte::Miser#params_set(params)
|
112
|
+
#
|
113
|
+
# Sets the integrator parameters based on values provided in an object of the <tt>GSL::Monte::Miser::Params</tt> class <tt>params</tt>.
|
114
|
+
# === {}[link:index.html"name="5.1] Accessors of <tt>GSL::Monte::Miser::Params</tt>
|
115
|
+
# ---
|
116
|
+
# * GSL::Monte::Miser::Params#estimate_frac
|
117
|
+
# * GSL::Monte::Miser::Params#estimate_frac=
|
118
|
+
#
|
119
|
+
# The fraction of the currently available number of function calls which are allocated to estimating the variance at each recursive step. The default value is 0.1.
|
120
|
+
# ---
|
121
|
+
# * GSL::Monte::Miser::Params#min_calls
|
122
|
+
# * GSL::Monte::Miser::Params#min_calls=
|
123
|
+
#
|
124
|
+
# The minimum number of function calls required for each estimate of the variance. If the number of function calls allocated to the estimate using <tt>estimate_frac</tt> falls below <tt>min_calls</tt> then <tt>min_calls</tt> are used instead. This ensures that each estimate maintains a reasonable level of accuracy. The default value of min_calls is 16 * dim.
|
125
|
+
# ---
|
126
|
+
# * GSL::Monte::Miser::Params#min_calls_per_bisection
|
127
|
+
# * GSL::Monte::Miser::Params#min_calls_per_bisection=
|
128
|
+
#
|
129
|
+
# The minimum number of function calls required to proceed with a bisection step. When a recursive step has fewer calls available than <tt>min_calls_per_bisection</tt> it performs a plain Monte Carlo estimate of the current sub-region and terminates its branch of the recursion. The default value of this parameter is 32 * min_calls.
|
130
|
+
# ---
|
131
|
+
# * GSL::Monte::Miser::Params#alpha
|
132
|
+
# * GSL::Monte::Miser::Params#alpha=
|
133
|
+
#
|
134
|
+
# This parameter controls how the estimated variances for the two sub-regions of a bisection are combined when allocating points. With recursive sampling the overall variance should scale better than 1/N, since the values from the sub-regions will be obtained using a procedure which explicitly minimizes their variance. To accommodate this behavior the MISER algorithm allows the total variance to depend on a scaling parameter <tt>alpha</tt>, The authors of the original paper describing MISER recommend the value <tt>alpha</tt> = 2 as a good choice, obtained from numerical experiments, and this is used as the default value in this implementation.
|
135
|
+
# ---
|
136
|
+
# * GSL::Monte::Miser::Params#dither
|
137
|
+
# * GSL::Monte::Miser::Params#dither=
|
138
|
+
#
|
139
|
+
# This parameter introduces a random fractional variation of size dither into each bisection, which can be used to break the symmetry of integrands which are concentrated near the exact center of the hypercubic integration region. The default value of dither is zero, so no variation is introduced. If needed, a typical value of dither is 0.1.
|
140
|
+
#
|
141
|
+
# == {}[link:index.html"name="6] Vegas Parameters (GSL-1.13 or later)
|
142
|
+
# ---
|
143
|
+
# * GSL::Monte::Vegas#params_get
|
144
|
+
#
|
145
|
+
# Returns the parameters of the integrator state as an instance of <tt>GSL::Monte::Vegas::Params</tt> class.
|
146
|
+
# ---
|
147
|
+
# * GSL::Monte::Vegas#params_set(params)
|
148
|
+
#
|
149
|
+
# Sets the integrator parameters based on values provided in an object of the <tt>GSL::Monte::Vegas::Params</tt> class <tt>params</tt>.
|
150
|
+
#
|
151
|
+
# === {}[link:index.html"name="6.1] Accessors of <tt>GSL::Monte::Vegas::Params</tt>
|
152
|
+
# ---
|
153
|
+
# * GSL::Monte::Vegas::Params#alpha
|
154
|
+
# * GSL::Monte::Vegas::Params#alpha=
|
155
|
+
#
|
156
|
+
# Controls the stiffness of the rebinning algorithm. It is typically set between one and two. A value of zero prevents rebinning of the grid. The default value is 1.5.
|
157
|
+
# ---
|
158
|
+
# * GSL::Monte::Vegas::Params#iterations
|
159
|
+
# * GSL::Monte::Vegas::Params#iterations=
|
160
|
+
#
|
161
|
+
# The number of iterations to perform for each call to the routine. The default value is 5 iterations.
|
162
|
+
# ---
|
163
|
+
# * GSL::Monte::Vegas::Params#stage
|
164
|
+
# * GSL::Monte::Vegas::Params#stage=
|
165
|
+
#
|
166
|
+
# Setting this determines the stage of the calculation. Normally, stage = 0 which begins with a new uniform grid and empty weighted average. Calling vegas with stage = 1 retains the grid from the previous run but discards the weighted average, so that one can "tune" the grid using a relatively small number of points and then do a large run with stage = 1 on the optimized grid. Setting stage = 2 keeps the grid and the weighted average from the previous run, but may increase (or decrease) the number of histogram bins in the grid depending on the number of calls available. Choosing stage = 3 enters at the main loop, so that nothing is changed, and is equivalent to performing additional iterations in a previous call.
|
167
|
+
# ---
|
168
|
+
# * GSL::Monte::Vegas::Params#mode
|
169
|
+
# * GSL::Monte::Vegas::Params#mode=
|
170
|
+
#
|
171
|
+
# The possible choices are <tt>GSL::VEGAS::MODE_IMPORTANCE</tt>, <tt>GSL::VEGAS::MODE_STRATIFIED</tt>, <tt>GSL::VEGAS::MODE_IMPORTANCE_ONLY</tt>. This determines whether VEGAS will use importance sampling or stratified sampling, or whether it can pick on its own. In low dimensions VEGAS uses strict stratified sampling (more precisely, stratified sampling is chosen if there are fewer than 2 bins per box).
|
172
|
+
# ---
|
173
|
+
# * GSL::Monte::Vegas::Params#verbose
|
174
|
+
# * GSL::Monte::Vegas::Params#verbose=
|
175
|
+
#
|
176
|
+
# Set the level of information printed by VEGAS. All information is written to the stream ostream. The default setting of verbose is -1, which turns off all output. A verbose value of 0 prints summary information about the weighted average and final result, while a value of 1 also displays the grid coordinates. A value of 2 prints information from the rebinning procedure for each iteration.
|
177
|
+
#
|
178
|
+
# == {}[link:index.html"name="7] Example
|
179
|
+
#
|
180
|
+
# #!/usr/bin/env ruby
|
181
|
+
# require("gsl")
|
182
|
+
# include GSL::Monte
|
183
|
+
# include Math
|
184
|
+
#
|
185
|
+
# proc_f = Proc.new { |k, dim, params|
|
186
|
+
# pi = Math::PI
|
187
|
+
# a = 1.0/(pi*pi*pi)
|
188
|
+
# a/(1.0 - cos(k[0])*cos(k[1])*cos(k[2]))
|
189
|
+
# }
|
190
|
+
#
|
191
|
+
# def display_results(title, result, error)
|
192
|
+
# exact = 1.3932039296856768591842462603255
|
193
|
+
#
|
194
|
+
# diff = result - exact
|
195
|
+
# printf("%s ==================\n", title);
|
196
|
+
# printf("result = % .6f\n", result);
|
197
|
+
# printf("sigma = % .6f\n", error);
|
198
|
+
# printf("exact = % .6f\n", exact);
|
199
|
+
# printf("error = % .6f = %.1g sigma\n", diff, diff.abs/error)
|
200
|
+
# end
|
201
|
+
#
|
202
|
+
# dim = 3
|
203
|
+
# xl = Vector.alloc(0, 0, 0)
|
204
|
+
# xu = Vector.alloc(PI, PI, PI)
|
205
|
+
# G = Monte::Function.alloc(proc_f, dim)
|
206
|
+
# calls = 500000
|
207
|
+
# r = GSL::Rng.alloc(Rng::DEFAULT)
|
208
|
+
#
|
209
|
+
# plain = Monte::Plain.alloc(dim)
|
210
|
+
# result, error = G.integrate(xl, xu, dim, calls, r, plain)
|
211
|
+
# display_results("plain", result, error)
|
212
|
+
#
|
213
|
+
# miser = Monte::Miser.alloc(dim)
|
214
|
+
# result, error = G.integrate(xl, xu, dim, calls, r, miser)
|
215
|
+
# display_results("miser", result, error)
|
216
|
+
#
|
217
|
+
# vegas = Monte::Vegas.alloc(dim)
|
218
|
+
# result, error = G.integrate(xl, xu, dim, 10000, r, vegas)
|
219
|
+
# display_results("vegas warm-up", result, error)
|
220
|
+
# puts("converging...");
|
221
|
+
# begin
|
222
|
+
# result, error = G.integrate(xl, xu, dim, calls/5, r, vegas)
|
223
|
+
# printf("result = % .6f sigma = % .6f chisq/dof = %.1f\n",
|
224
|
+
# result, error, vegas.chisq)
|
225
|
+
# end while (vegas.chisq-1.0).abs > 0.5
|
226
|
+
# display_results("vegas final", result, error)
|
227
|
+
#
|
228
|
+
# {prev}[link:files/rdoc/ntuple_rdoc.html]
|
229
|
+
# {next}[link:files/rdoc/siman_rdoc.html]
|
230
|
+
#
|
231
|
+
# {Reference index}[link:files/rdoc/ref_rdoc.html]
|
232
|
+
# {top}[link:files/rdoc/index_rdoc.html]
|
233
|
+
#
|
234
|
+
#
|
data/rdoc/multimin.rdoc
ADDED
@@ -0,0 +1,312 @@
|
|
1
|
+
#
|
2
|
+
# = Multidimensional Minimization
|
3
|
+
# This chapter describes routines for finding minima of arbitrary
|
4
|
+
# multidimensional functions. The library provides low level components for a
|
5
|
+
# variety of iterative minimizers and convergence tests. These can be combined
|
6
|
+
# by the user to achieve the desired solution, while providing full access to
|
7
|
+
# the intermediate steps of the algorithms. Each class of methods uses the
|
8
|
+
# same framework, so that you can switch between minimizers at runtime without
|
9
|
+
# needing to recompile your program. Each instance of a minimizer keeps track
|
10
|
+
# of its own state, allowing the minimizers to be used in multi-threaded
|
11
|
+
# programs.
|
12
|
+
#
|
13
|
+
# Contents:
|
14
|
+
# 1. {Overview}[link:files/rdoc/multimin_rdoc.html#1]
|
15
|
+
# 1. {Caveats}[link:files/rdoc/multimin_rdoc.html#2]
|
16
|
+
# 1. {Initializing the Multidimensional Minimizer}[link:files/rdoc/multimin_rdoc.html#3]
|
17
|
+
# 1. {Providing a function to minimize}[link:files/rdoc/multimin_rdoc.html#4]
|
18
|
+
# 1. {Iteration}[link:files/rdoc/multimin_rdoc.html#5]
|
19
|
+
# 1. {Stopping Criteria}[link:files/rdoc/multimin_rdoc.html#6]
|
20
|
+
# 1. {Examples}[link:files/rdoc/multimin_rdoc.html#7]
|
21
|
+
# 1. {FdfMinimizer}[link:files/rdoc/multimin_rdoc.html#7.1]
|
22
|
+
# 1. {FMinimizer}[link:files/rdoc/multimin_rdoc.html#7.2]
|
23
|
+
#
|
24
|
+
# == {}[link:index.html"name="1] Overview
|
25
|
+
# The problem of multidimensional minimization requires finding a point x such
|
26
|
+
# that the scalar function, takes a value which is lower than at any neighboring
|
27
|
+
# point. For smooth functions the gradient g = \nabla f vanishes at the minimum.
|
28
|
+
# In general there are no bracketing methods available for the minimization of
|
29
|
+
# n-dimensional functions. The algorithms proceed from an initial guess using a
|
30
|
+
# search algorithm which attempts to move in a downhill direction.
|
31
|
+
#
|
32
|
+
# Algorithms making use of the gradient of the function perform a
|
33
|
+
# one-dimensional line minimisation along this direction until the lowest point
|
34
|
+
# is found to a suitable tolerance. The search direction is then updated with
|
35
|
+
# local information from the function and its derivatives, and the whole process
|
36
|
+
# repeated until the true n-dimensional minimum is found.
|
37
|
+
#
|
38
|
+
# The Nelder-Mead Simplex algorithm applies a different strategy. It maintains
|
39
|
+
# n+1 trial parameter vectors as the vertices of a n-dimensional simplex.
|
40
|
+
# In each iteration step it tries to improve the worst vertex by a simple
|
41
|
+
# geometrical transformation until the size of the simplex falls below a given
|
42
|
+
# tolerance.
|
43
|
+
#
|
44
|
+
# Both types of algorithms use a standard framework. The user provides a
|
45
|
+
# high-level driver for the algorithms, and the library provides the individual
|
46
|
+
# functions necessary for each of the steps. There are three main phases of the
|
47
|
+
# iteration. The steps are,
|
48
|
+
#
|
49
|
+
# * initialize minimizer state, s, for algorithm T
|
50
|
+
# * update s using the iteration T
|
51
|
+
# * test s for convergence, and repeat iteration if necessary
|
52
|
+
#
|
53
|
+
# Each iteration step consists either of an improvement to the line-minimisation
|
54
|
+
# in the current direction or an update to the search direction itself. The
|
55
|
+
# state for the minimizers is held in a <tt>GSL::MultiMin::FdfMinimizer</tt> or
|
56
|
+
# a <tt>GSL::MultiMin::FMinimizer</tt> object.
|
57
|
+
#
|
58
|
+
# == {}[link:index.html"name="2] Caveats
|
59
|
+
# Note that the minimization algorithms can only search for one local minimum
|
60
|
+
# at a time. When there are several local minima in the search area, the first
|
61
|
+
# minimum to be found will be returned; however it is difficult to predict which
|
62
|
+
# of the minima this will be. In most cases, no error will be reported if you
|
63
|
+
# try to find a local minimum in an area where there is more than one.
|
64
|
+
#
|
65
|
+
# It is also important to note that the minimization algorithms find local
|
66
|
+
# minima; there is no way to determine whether a minimum is a global minimum of
|
67
|
+
# the function in question.
|
68
|
+
#
|
69
|
+
#
|
70
|
+
# == {}[link:index.html"name="3] Initializing the Multidimensional Minimizer
|
71
|
+
# ---
|
72
|
+
# * GSL::MultiMin::FdfMinimizer.alloc(type, n)
|
73
|
+
# * GSL::MultiMin::FMinimizer.alloc(type, n)
|
74
|
+
#
|
75
|
+
# These method create a minimizer of type <tt>type</tt> for an <tt>n</tt>-dimension function.
|
76
|
+
# The type is given by a string, or by a Ruby constant.
|
77
|
+
#
|
78
|
+
# * <tt>GSL::MultiMin::FdfMinimizer::CONJUGATE_FR</tt> or <tt>"conjugate_fr"</tt>
|
79
|
+
# * <tt>GSL::MultiMin::FdfMinimizer::CONJUGATE_PR</tt> or <tt>"conjugate_pr"</tt>
|
80
|
+
# * <tt>GSL::MultiMin::FdfMinimizer::VECTOR_BFGS</tt> or <tt>"vector_bfgs"</tt>
|
81
|
+
# * <tt>GSL::MultiMin::FdfMinimizer::VECTOR_BFGS2</tt> or <tt>"vector_bfgs2"</tt> (GSL-1.9 or later)
|
82
|
+
# * <tt>GSL::MultiMin::FdfMinimizer::STEEPEST_DESCENT</tt> or <tt>"steepest_descent"</tt>
|
83
|
+
# * <tt>GSL::MultiMin::FMinimizer::NMSIMPLEX</tt> or <tt>"nmsimplex"</tt>
|
84
|
+
# * <tt>GSL::MultiMin::FMinimizer::NMSIMPLEX2RAND</tt> or <tt>"nmsimplex2rand"</tt> (GSL-1.13)
|
85
|
+
#
|
86
|
+
# ex:
|
87
|
+
# include GSL::MultiMin
|
88
|
+
# m1 = FdfMinimizer.alloc(FdfMinimizer::CONJUGATE_FR, 2)
|
89
|
+
# m2 = FdfMinimizer.alloc("steepest_descent", 4)
|
90
|
+
# m3 = FMinimizer.alloc(FMinimizer::NMSIMPLEX, 3)
|
91
|
+
# m4 = FMinimizer.alloc("nmsimplex", 2)
|
92
|
+
#
|
93
|
+
# ---
|
94
|
+
# * GSL::MultiMin::FdfMinimizer#set(func, x, step_size, tol)
|
95
|
+
#
|
96
|
+
# This method initializes the minimizer <tt>self</tt> to minimize the function
|
97
|
+
# <tt>fdf</tt> (the <tt>GSL::MultiMin::Function_fdf</tt> class, see below) starting from
|
98
|
+
# the initial point <tt>x</tt> (<tt>GSL::Vector</tt>). The size of the first trial step is
|
99
|
+
# given by <tt>step_size</tt> (<tt>Vector</tt>). The accuracy of the line minimization is
|
100
|
+
# specified by <tt>tol</tt>.
|
101
|
+
#
|
102
|
+
# ---
|
103
|
+
# * GSL::MultiMin::FMinimizer#set(func, x, step_size)
|
104
|
+
#
|
105
|
+
# This method initializes the minimizer <tt>self</tt> to minimize the function <tt>func</tt>,
|
106
|
+
# starting from the initial point <tt>x</tt> (Vector). The size of the initial trial steps
|
107
|
+
# is given in vector <tt>step_size</tt>.
|
108
|
+
#
|
109
|
+
# ---
|
110
|
+
# * GSL::MultiMin::FdfMinimizer#name
|
111
|
+
# * GSL::MultiMin::FMinimizer#name
|
112
|
+
#
|
113
|
+
# These return the name of the minimizer <tt>self</tt>.
|
114
|
+
#
|
115
|
+
# == {}[link:index.html"name="4] Providing a function to minimize
|
116
|
+
# You must provide a parametric function of <tt>n</tt> variables for the minimizers to
|
117
|
+
# operate on. You may also need to provide a routine which calculates the gradient of the
|
118
|
+
# function. In order to allow for general parameters the functions are defined by the
|
119
|
+
# classes, <tt>GSL::MultiMin::Function_fdf</tt> and <tt>GSL::MultiMin::Function</tt>.
|
120
|
+
#
|
121
|
+
# ---
|
122
|
+
# * GSL::MultiMin:Function_fdf.alloc(proc_f, proc_df, n)
|
123
|
+
# * GSL::MultiMin:Function_fdf.alloc(proc_f, proc_df, proc_fdf, n)
|
124
|
+
# * GSL::MultiMin:Function_fdf#set_procs(proc_f, proc_df)
|
125
|
+
# * GSL::MultiMin:Function_fdf#set_procs(proc_f, proc_df, n)
|
126
|
+
# * GSL::MultiMin:Function_fdf#set_procs(proc_f, proc_df, proc_fdf, n)
|
127
|
+
# * GSL::MultiMin:Function_fdf#set_params(params)
|
128
|
+
#
|
129
|
+
# See example below.
|
130
|
+
#
|
131
|
+
# include GSL::MultiMin
|
132
|
+
#
|
133
|
+
# my_f = Proc.new { |v, params|
|
134
|
+
# x = v[0]; y = v[1]
|
135
|
+
# p0 = params[0]; p1 = params[1]
|
136
|
+
# 10.0*(x - p0)*(x - p0) + 20.0*(y - p1)*(y - p1) + 30.0
|
137
|
+
# }
|
138
|
+
#
|
139
|
+
# my_df = Proc.new { |v, params, df|
|
140
|
+
# x = v[0]; y = v[1]
|
141
|
+
# p0 = params[0]; p1 = params[1]
|
142
|
+
# df[0] = 20.0*(x-p0)
|
143
|
+
# df[1] = 40.0*(y-p1)
|
144
|
+
# }
|
145
|
+
#
|
146
|
+
# my_func = Function_fdf.alloc(my_f, my_df, 2)
|
147
|
+
# my_func.set_params([1.0, 2.0]) # parameters
|
148
|
+
#
|
149
|
+
# ---
|
150
|
+
# * GSL::MultiMin:Function.alloc(proc_f, n)
|
151
|
+
# * GSL::MultiMin:Function#set_proc(proc_f)
|
152
|
+
# * GSL::MultiMin:Function#set_proc(proc_f, n)
|
153
|
+
# * GSL::MultiMin:Function#set_params(params)
|
154
|
+
#
|
155
|
+
# See example below.
|
156
|
+
#
|
157
|
+
# include GSL::MultiMin
|
158
|
+
#
|
159
|
+
# np = 2
|
160
|
+
# my_f = Proc.alloc { |v, params|
|
161
|
+
# x = v[0]; y = v[1]
|
162
|
+
# p0 = params[0]; p1 = params[1]
|
163
|
+
# 10.0*(x - p0)*(x - p0) + 20.0*(y - p1)*(y - p1) + 30.0
|
164
|
+
# }
|
165
|
+
#
|
166
|
+
# my_func = Function.alloc(my_f, np)
|
167
|
+
# my_func.set_params([1.0, 2.0]) # parameters
|
168
|
+
#
|
169
|
+
# == {}[link:index.html"name="5] Iteration
|
170
|
+
# ---
|
171
|
+
# * GSL::MultiMin::FdfMinimizer#iterate
|
172
|
+
# * GSL::MultiMin::FMinimizer#iterate
|
173
|
+
#
|
174
|
+
# These methods perform a single iteration of the minimizer <tt>self</tt>.
|
175
|
+
# If the iteration encounters an unexpected problem then an error code will be returned.
|
176
|
+
# The minimizer maintains a current best estimate of the minimum at all times.
|
177
|
+
# This information can be accessed with the following methods,
|
178
|
+
#
|
179
|
+
# ---
|
180
|
+
# * GSL::MultiMin::FdfMinimizer#x
|
181
|
+
# * GSL::MultiMin::FdfMinimizer#minimum
|
182
|
+
# * GSL::MultiMin::FdfMinimizer#gradient
|
183
|
+
# * GSL::MultiMin::FMinimizer#x
|
184
|
+
# * GSL::MultiMin::FMinimizer#minimum
|
185
|
+
# * GSL::MultiMin::FMinimizer#size
|
186
|
+
#
|
187
|
+
# These method return the current best estimate of the location of the minimum,
|
188
|
+
# the value of the function at that point, its gradient, and minimizer specific
|
189
|
+
# characteristic size for the minimizer <tt>self</tt>.
|
190
|
+
#
|
191
|
+
# ---
|
192
|
+
# * GSL::MultiMin::FdfMinimizer#restart
|
193
|
+
#
|
194
|
+
# This method resets the minimizer <tt>self</tt> to use the current point as a new
|
195
|
+
# starting point.
|
196
|
+
#
|
197
|
+
# == {}[link:index.html"name="6] Stopping Criteria
|
198
|
+
# A minimization procedure should stop when one of the following conditions is true:
|
199
|
+
# * A minimum has been found to within the user-specified precision.
|
200
|
+
# * A user-specified maximum number of iterations has been reached.
|
201
|
+
# * An error has occurred.
|
202
|
+
# The handling of these conditions is under user control. The methods below allow the
|
203
|
+
# user to test the precision of the current result.
|
204
|
+
#
|
205
|
+
# ---
|
206
|
+
# * GSL::MultiMin::FdfMinimizer#test_gradient(epsabs)
|
207
|
+
# * GSL::MultiMin::FdfMinimizer.test_gradient(g, epsabs)
|
208
|
+
#
|
209
|
+
# These method test the norm of the gradient <tt>g</tt> against the absolute tolerance
|
210
|
+
# <tt>epsabs</tt>. The gradient of a multidimensional function goes to zero at a minimum.
|
211
|
+
# The tests return <tt>GSL::SUCCESS</tt> if the following condition is achieved,
|
212
|
+
# |g| < epsabs
|
213
|
+
# and returns <tt>GSL::CONTINUE</tt> otherwise. A suitable choice of <tt>epsabs</tt> can
|
214
|
+
# be made from the desired accuracy in the function for small variations in <tt>x</tt>.
|
215
|
+
# The relationship between these quantities is given by <tt>\delta f = g \delta x</tt>.
|
216
|
+
#
|
217
|
+
# ---
|
218
|
+
# * GSL::MultiMin::FdfMinimizer#test_size(epsabs)
|
219
|
+
# * GSL::MultiMin::FdfMinimizer.test_size(size, epsabs)
|
220
|
+
#
|
221
|
+
# These method test the minimizer specific characteristic <tt>size</tt>
|
222
|
+
# (if applicable to the used minimizer) against absolute tolerance <tt>epsabs</tt>.
|
223
|
+
# The tests return (<tt>GSL::SUCCESS</tt> if the size is smaller than tolerance,
|
224
|
+
# otherwise <tt>GSL::CONTINUE</tt> is returned.
|
225
|
+
#
|
226
|
+
# == {}[link:index.html"name="7] Examples
|
227
|
+
#
|
228
|
+
# === {}[link:index.html"name="7.1] FdfMinimizer
|
229
|
+
# #!/usr/bin/env ruby
|
230
|
+
# require("gsl")
|
231
|
+
# include GSL::MultiMin
|
232
|
+
#
|
233
|
+
# my_f = Proc.new { |v, params|
|
234
|
+
# x = v[0]; y = v[1]
|
235
|
+
# p0 = params[0]; p1 = params[1]
|
236
|
+
# 10.0*(x - p0)*(x - p0) + 20.0*(y - p1)*(y - p1) + 30.0
|
237
|
+
# }
|
238
|
+
#
|
239
|
+
# my_df = Proc.new { |v, params, df|
|
240
|
+
# x = v[0]; y = v[1]
|
241
|
+
# p0 = params[0]; p1 = params[1]
|
242
|
+
# df[0] = 20.0*(x-p0)
|
243
|
+
# df[1] = 40.0*(y-p1)
|
244
|
+
# }
|
245
|
+
#
|
246
|
+
# my_func = Function_fdf.alloc(my_f, my_df, 2)
|
247
|
+
# my_func.set_params([1.0, 2.0]) # parameters
|
248
|
+
#
|
249
|
+
# x = Vector.alloc(5.0, 7.0) # starting point
|
250
|
+
#
|
251
|
+
# minimizer = FdfMinimizer.alloc("conjugate_fr", 2)
|
252
|
+
# minimizer.set(my_func, x, 0.01, 1e-4)
|
253
|
+
#
|
254
|
+
# iter = 0
|
255
|
+
# begin
|
256
|
+
# iter += 1
|
257
|
+
# status = minimizer.iterate()
|
258
|
+
# status = minimizer.test_gradient(1e-3)
|
259
|
+
# if status == GSL::SUCCESS
|
260
|
+
# puts("Minimum found at")
|
261
|
+
# end
|
262
|
+
# x = minimizer.x
|
263
|
+
# f = minimizer.f
|
264
|
+
# printf("%5d %.5f %.5f %10.5f\n", iter, x[0], x[1], f)
|
265
|
+
# end while status == GSL::CONTINUE and iter < 100
|
266
|
+
#
|
267
|
+
# === {}[link:index.html"name="7.2] FMinimizer
|
268
|
+
# #!/usr/bin/env ruby
|
269
|
+
# require("gsl")
|
270
|
+
# include GSL::MultiMin
|
271
|
+
#
|
272
|
+
# np = 2
|
273
|
+
#
|
274
|
+
# my_f = Proc.new { |v, params|
|
275
|
+
# x = v[0]; y = v[1]
|
276
|
+
# p0 = params[0]; p1 = params[1]
|
277
|
+
# 10.0*(x - p0)*(x - p0) + 20.0*(y - p1)*(y - p1) + 30.0
|
278
|
+
# }
|
279
|
+
#
|
280
|
+
# my_func = Function.alloc(my_f, np)
|
281
|
+
# my_func.set_params([1.0, 2.0]) # parameters
|
282
|
+
#
|
283
|
+
# x = Vector.alloc([5, 7])
|
284
|
+
# ss = Vector.alloc(np)
|
285
|
+
# ss.set_all(1.0)
|
286
|
+
#
|
287
|
+
# minimizer = FMinimizer.alloc("nmsimplex", np)
|
288
|
+
# minimizer.set(my_func, x, ss)
|
289
|
+
#
|
290
|
+
# iter = 0
|
291
|
+
# begin
|
292
|
+
# iter += 1
|
293
|
+
# status = minimizer.iterate()
|
294
|
+
# status = minimizer.test_size(1e-2)
|
295
|
+
# if status == GSL::SUCCESS
|
296
|
+
# puts("converged to minimum at")
|
297
|
+
# end
|
298
|
+
# x = minimizer.x
|
299
|
+
# printf("%5d ", iter);
|
300
|
+
# for i in 0...np do
|
301
|
+
# printf("%10.3e ", x[i])
|
302
|
+
# end
|
303
|
+
# printf("f() = %7.3f size = %.3f\n", minimizer.fval, minimizer.size);
|
304
|
+
# end while status == GSL::CONTINUE and iter < 100
|
305
|
+
#
|
306
|
+
# {prev}[link:files/rdoc/multiroot_rdoc.html]
|
307
|
+
# {next}[link:files/rdoc/fit_rdoc.html]
|
308
|
+
#
|
309
|
+
# {Reference index}[link:files/rdoc/ref_rdoc.html]
|
310
|
+
# {top}[link:files/rdoc/index_rdoc.html]
|
311
|
+
#
|
312
|
+
#
|