pnmatrix 1.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/ext/nmatrix/binary_format.txt +53 -0
- data/ext/nmatrix/data/complex.h +388 -0
- data/ext/nmatrix/data/data.cpp +274 -0
- data/ext/nmatrix/data/data.h +651 -0
- data/ext/nmatrix/data/meta.h +64 -0
- data/ext/nmatrix/data/ruby_object.h +386 -0
- data/ext/nmatrix/extconf.rb +70 -0
- data/ext/nmatrix/math/asum.h +99 -0
- data/ext/nmatrix/math/cblas_enums.h +36 -0
- data/ext/nmatrix/math/cblas_templates_core.h +507 -0
- data/ext/nmatrix/math/gemm.h +241 -0
- data/ext/nmatrix/math/gemv.h +178 -0
- data/ext/nmatrix/math/getrf.h +255 -0
- data/ext/nmatrix/math/getrs.h +121 -0
- data/ext/nmatrix/math/imax.h +82 -0
- data/ext/nmatrix/math/laswp.h +165 -0
- data/ext/nmatrix/math/long_dtype.h +62 -0
- data/ext/nmatrix/math/magnitude.h +54 -0
- data/ext/nmatrix/math/math.h +751 -0
- data/ext/nmatrix/math/nrm2.h +165 -0
- data/ext/nmatrix/math/rot.h +117 -0
- data/ext/nmatrix/math/rotg.h +106 -0
- data/ext/nmatrix/math/scal.h +71 -0
- data/ext/nmatrix/math/trsm.h +336 -0
- data/ext/nmatrix/math/util.h +162 -0
- data/ext/nmatrix/math.cpp +1368 -0
- data/ext/nmatrix/nm_memory.h +60 -0
- data/ext/nmatrix/nmatrix.cpp +285 -0
- data/ext/nmatrix/nmatrix.h +476 -0
- data/ext/nmatrix/ruby_constants.cpp +151 -0
- data/ext/nmatrix/ruby_constants.h +106 -0
- data/ext/nmatrix/ruby_nmatrix.c +3130 -0
- data/ext/nmatrix/storage/common.cpp +77 -0
- data/ext/nmatrix/storage/common.h +183 -0
- data/ext/nmatrix/storage/dense/dense.cpp +1096 -0
- data/ext/nmatrix/storage/dense/dense.h +129 -0
- data/ext/nmatrix/storage/list/list.cpp +1628 -0
- data/ext/nmatrix/storage/list/list.h +138 -0
- data/ext/nmatrix/storage/storage.cpp +730 -0
- data/ext/nmatrix/storage/storage.h +99 -0
- data/ext/nmatrix/storage/yale/class.h +1139 -0
- data/ext/nmatrix/storage/yale/iterators/base.h +143 -0
- data/ext/nmatrix/storage/yale/iterators/iterator.h +131 -0
- data/ext/nmatrix/storage/yale/iterators/row.h +450 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored.h +140 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +169 -0
- data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +124 -0
- data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
- data/ext/nmatrix/storage/yale/yale.cpp +2074 -0
- data/ext/nmatrix/storage/yale/yale.h +203 -0
- data/ext/nmatrix/types.h +55 -0
- data/ext/nmatrix/util/io.cpp +279 -0
- data/ext/nmatrix/util/io.h +115 -0
- data/ext/nmatrix/util/sl_list.cpp +627 -0
- data/ext/nmatrix/util/sl_list.h +144 -0
- data/ext/nmatrix/util/util.h +78 -0
- data/lib/nmatrix/blas.rb +378 -0
- data/lib/nmatrix/cruby/math.rb +744 -0
- data/lib/nmatrix/enumerate.rb +253 -0
- data/lib/nmatrix/homogeneous.rb +241 -0
- data/lib/nmatrix/io/fortran_format.rb +138 -0
- data/lib/nmatrix/io/harwell_boeing.rb +221 -0
- data/lib/nmatrix/io/market.rb +263 -0
- data/lib/nmatrix/io/point_cloud.rb +189 -0
- data/lib/nmatrix/jruby/decomposition.rb +24 -0
- data/lib/nmatrix/jruby/enumerable.rb +13 -0
- data/lib/nmatrix/jruby/error.rb +4 -0
- data/lib/nmatrix/jruby/math.rb +501 -0
- data/lib/nmatrix/jruby/nmatrix_java.rb +840 -0
- data/lib/nmatrix/jruby/operators.rb +283 -0
- data/lib/nmatrix/jruby/slice.rb +264 -0
- data/lib/nmatrix/lapack_core.rb +181 -0
- data/lib/nmatrix/lapack_plugin.rb +44 -0
- data/lib/nmatrix/math.rb +953 -0
- data/lib/nmatrix/mkmf.rb +100 -0
- data/lib/nmatrix/monkeys.rb +137 -0
- data/lib/nmatrix/nmatrix.rb +1172 -0
- data/lib/nmatrix/rspec.rb +75 -0
- data/lib/nmatrix/shortcuts.rb +1163 -0
- data/lib/nmatrix/version.rb +39 -0
- data/lib/nmatrix/yale_functions.rb +118 -0
- data/lib/nmatrix.rb +28 -0
- data/spec/00_nmatrix_spec.rb +892 -0
- data/spec/01_enum_spec.rb +196 -0
- data/spec/02_slice_spec.rb +407 -0
- data/spec/03_nmatrix_monkeys_spec.rb +80 -0
- data/spec/2x2_dense_double.mat +0 -0
- data/spec/4x4_sparse.mat +0 -0
- data/spec/4x5_dense.mat +0 -0
- data/spec/blas_spec.rb +215 -0
- data/spec/elementwise_spec.rb +311 -0
- data/spec/homogeneous_spec.rb +100 -0
- data/spec/io/fortran_format_spec.rb +88 -0
- data/spec/io/harwell_boeing_spec.rb +98 -0
- data/spec/io/test.rua +9 -0
- data/spec/io_spec.rb +159 -0
- data/spec/lapack_core_spec.rb +482 -0
- data/spec/leakcheck.rb +16 -0
- data/spec/math_spec.rb +1363 -0
- data/spec/nmatrix_yale_resize_test_associations.yaml +2802 -0
- data/spec/nmatrix_yale_spec.rb +286 -0
- data/spec/rspec_monkeys.rb +56 -0
- data/spec/rspec_spec.rb +35 -0
- data/spec/shortcuts_spec.rb +474 -0
- data/spec/slice_set_spec.rb +162 -0
- data/spec/spec_helper.rb +172 -0
- data/spec/stat_spec.rb +214 -0
- data/spec/test.pcd +20 -0
- data/spec/utm5940.mtx +83844 -0
- metadata +295 -0
|
@@ -0,0 +1,744 @@
|
|
|
1
|
+
#--
|
|
2
|
+
# = NMatrix
|
|
3
|
+
#
|
|
4
|
+
# A linear algebra library for scientific computation in Ruby.
|
|
5
|
+
# NMatrix is part of SciRuby.
|
|
6
|
+
#
|
|
7
|
+
# NMatrix was originally inspired by and derived from NArray, by
|
|
8
|
+
# Masahiro Tanaka: http://narray.rubyforge.org
|
|
9
|
+
#
|
|
10
|
+
# == Copyright Information
|
|
11
|
+
#
|
|
12
|
+
# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
|
13
|
+
# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
|
14
|
+
#
|
|
15
|
+
# Please see LICENSE.txt for additional copyright notices.
|
|
16
|
+
#
|
|
17
|
+
# == Contributing
|
|
18
|
+
#
|
|
19
|
+
# By contributing source code to SciRuby, you agree to be bound by
|
|
20
|
+
# our Contributor Agreement:
|
|
21
|
+
#
|
|
22
|
+
# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
|
23
|
+
#
|
|
24
|
+
# == math.rb
|
|
25
|
+
#
|
|
26
|
+
# Math functionality for NMatrix, along with any NMatrix instance
|
|
27
|
+
# methods that correspond to ATLAS/BLAS/LAPACK functions (e.g.,
|
|
28
|
+
# laswp).
|
|
29
|
+
#++
|
|
30
|
+
|
|
31
|
+
class NMatrix
|
|
32
|
+
|
|
33
|
+
#
|
|
34
|
+
# call-seq:
|
|
35
|
+
# getrf! -> Array
|
|
36
|
+
#
|
|
37
|
+
# LU factorization of a general M-by-N matrix +A+ using partial pivoting with
|
|
38
|
+
# row interchanges. The LU factorization is A = PLU, where P is a row permutation
|
|
39
|
+
# matrix, L is a lower triangular matrix with unit diagonals, and U is an upper
|
|
40
|
+
# triangular matrix (note that this convention is different from the
|
|
41
|
+
# clapack_getrf behavior, but matches the standard LAPACK getrf).
|
|
42
|
+
# +A+ is overwritten with the elements of L and U (the unit
|
|
43
|
+
# diagonal elements of L are not saved). P is not returned directly and must be
|
|
44
|
+
# constructed from the pivot array ipiv. The row indices in ipiv are indexed
|
|
45
|
+
# starting from 1.
|
|
46
|
+
# Only works for dense matrices.
|
|
47
|
+
#
|
|
48
|
+
# * *Returns* :
|
|
49
|
+
# - The IPIV vector. The L and U matrices are stored in A.
|
|
50
|
+
# * *Raises* :
|
|
51
|
+
# - +StorageTypeError+ -> ATLAS functions only work on dense matrices.
|
|
52
|
+
#
|
|
53
|
+
def getrf!
|
|
54
|
+
raise(StorageTypeError, "ATLAS functions only work on dense matrices") unless self.dense?
|
|
55
|
+
|
|
56
|
+
#For row-major matrices, clapack_getrf uses a different convention than
|
|
57
|
+
#described above (U has unit diagonal elements instead of L and columns
|
|
58
|
+
#are interchanged rather than rows). For column-major matrices, clapack
|
|
59
|
+
#uses the stanard conventions. So we just transpose the matrix before
|
|
60
|
+
#and after calling clapack_getrf.
|
|
61
|
+
#Unfortunately, this is not a very good way, uses a lot of memory.
|
|
62
|
+
temp = self.transpose
|
|
63
|
+
ipiv = NMatrix::LAPACK::clapack_getrf(:col, self.shape[0], self.shape[1], temp, self.shape[0])
|
|
64
|
+
temp = temp.transpose
|
|
65
|
+
self[0...self.shape[0], 0...self.shape[1]] = temp
|
|
66
|
+
|
|
67
|
+
#for some reason, in clapack_getrf, the indices in ipiv start from 0
|
|
68
|
+
#instead of 1 as in LAPACK.
|
|
69
|
+
ipiv.each_index { |i| ipiv[i]+=1 }
|
|
70
|
+
|
|
71
|
+
return ipiv
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
#
|
|
75
|
+
# call-seq:
|
|
76
|
+
# geqrf! -> shape.min x 1 NMatrix
|
|
77
|
+
#
|
|
78
|
+
# QR factorization of a general M-by-N matrix +A+.
|
|
79
|
+
#
|
|
80
|
+
# The QR factorization is A = QR, where Q is orthogonal and R is Upper Triangular
|
|
81
|
+
# +A+ is overwritten with the elements of R and Q with Q being represented by the
|
|
82
|
+
# elements below A's diagonal and an array of scalar factors in the output NMatrix.
|
|
83
|
+
#
|
|
84
|
+
# The matrix Q is represented as a product of elementary reflectors
|
|
85
|
+
# Q = H(1) H(2) . . . H(k), where k = min(m,n).
|
|
86
|
+
#
|
|
87
|
+
# Each H(i) has the form
|
|
88
|
+
#
|
|
89
|
+
# H(i) = I - tau * v * v'
|
|
90
|
+
#
|
|
91
|
+
# http://www.netlib.org/lapack/explore-html/d3/d69/dgeqrf_8f.html
|
|
92
|
+
#
|
|
93
|
+
# Only works for dense matrices.
|
|
94
|
+
#
|
|
95
|
+
# * *Returns* :
|
|
96
|
+
# - Vector TAU. Q and R are stored in A. Q is represented by TAU and A
|
|
97
|
+
# * *Raises* :
|
|
98
|
+
# - +StorageTypeError+ -> LAPACK functions only work on dense matrices.
|
|
99
|
+
#
|
|
100
|
+
def geqrf!
|
|
101
|
+
# The real implementation is in lib/nmatrix/lapacke.rb
|
|
102
|
+
raise(NotImplementedError, "geqrf! requires the nmatrix-lapacke gem")
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
#
|
|
106
|
+
# call-seq:
|
|
107
|
+
# ormqr(tau) -> NMatrix
|
|
108
|
+
# ormqr(tau, side, transpose, c) -> NMatrix
|
|
109
|
+
#
|
|
110
|
+
# Returns the product Q * c or c * Q after a call to geqrf! used in QR factorization.
|
|
111
|
+
# +c+ is overwritten with the elements of the result NMatrix if supplied. Q is the orthogonal matrix
|
|
112
|
+
# represented by tau and the calling NMatrix
|
|
113
|
+
#
|
|
114
|
+
# Only works on float types, use unmqr for complex types.
|
|
115
|
+
#
|
|
116
|
+
# == Arguments
|
|
117
|
+
#
|
|
118
|
+
# * +tau+ - vector containing scalar factors of elementary reflectors
|
|
119
|
+
# * +side+ - direction of multiplication [:left, :right]
|
|
120
|
+
# * +transpose+ - apply Q with or without transpose [false, :transpose]
|
|
121
|
+
# * +c+ - NMatrix multplication argument that is overwritten, no argument assumes c = identity
|
|
122
|
+
#
|
|
123
|
+
# * *Returns* :
|
|
124
|
+
#
|
|
125
|
+
# - Q * c or c * Q Where Q may be transposed before multiplication.
|
|
126
|
+
#
|
|
127
|
+
#
|
|
128
|
+
# * *Raises* :
|
|
129
|
+
# - +StorageTypeError+ -> LAPACK functions only work on dense matrices.
|
|
130
|
+
# - +TypeError+ -> Works only on floating point matrices, use unmqr for complex types
|
|
131
|
+
# - +TypeError+ -> c must have the same dtype as the calling NMatrix
|
|
132
|
+
#
|
|
133
|
+
def ormqr(tau, side=:left, transpose=false, c=nil)
|
|
134
|
+
# The real implementation is in lib/nmatrix/lapacke.rb
|
|
135
|
+
raise(NotImplementedError, "ormqr requires the nmatrix-lapacke gem")
|
|
136
|
+
|
|
137
|
+
end
|
|
138
|
+
|
|
139
|
+
#
|
|
140
|
+
# call-seq:
|
|
141
|
+
# unmqr(tau) -> NMatrix
|
|
142
|
+
# unmqr(tau, side, transpose, c) -> NMatrix
|
|
143
|
+
#
|
|
144
|
+
# Returns the product Q * c or c * Q after a call to geqrf! used in QR factorization.
|
|
145
|
+
# +c+ is overwritten with the elements of the result NMatrix if it is supplied. Q is the orthogonal matrix
|
|
146
|
+
# represented by tau and the calling NMatrix
|
|
147
|
+
#
|
|
148
|
+
# Only works on complex types, use ormqr for float types.
|
|
149
|
+
#
|
|
150
|
+
# == Arguments
|
|
151
|
+
#
|
|
152
|
+
# * +tau+ - vector containing scalar factors of elementary reflectors
|
|
153
|
+
# * +side+ - direction of multiplication [:left, :right]
|
|
154
|
+
# * +transpose+ - apply Q as Q or its complex conjugate [false, :complex_conjugate]
|
|
155
|
+
# * +c+ - NMatrix multplication argument that is overwritten, no argument assumes c = identity
|
|
156
|
+
#
|
|
157
|
+
# * *Returns* :
|
|
158
|
+
#
|
|
159
|
+
# - Q * c or c * Q Where Q may be transformed to its complex conjugate before multiplication.
|
|
160
|
+
#
|
|
161
|
+
#
|
|
162
|
+
# * *Raises* :
|
|
163
|
+
# - +StorageTypeError+ -> LAPACK functions only work on dense matrices.
|
|
164
|
+
# - +TypeError+ -> Works only on floating point matrices, use unmqr for complex types
|
|
165
|
+
# - +TypeError+ -> c must have the same dtype as the calling NMatrix
|
|
166
|
+
#
|
|
167
|
+
def unmqr(tau, side=:left, transpose=false, c=nil)
|
|
168
|
+
# The real implementation is in lib/nmatrix/lapacke.rb
|
|
169
|
+
raise(NotImplementedError, "unmqr requires the nmatrix-lapacke gem")
|
|
170
|
+
end
|
|
171
|
+
|
|
172
|
+
#
|
|
173
|
+
# call-seq:
|
|
174
|
+
# potrf!(upper_or_lower) -> NMatrix
|
|
175
|
+
#
|
|
176
|
+
# Cholesky factorization of a symmetric positive-definite matrix -- or, if complex,
|
|
177
|
+
# a Hermitian positive-definite matrix +A+.
|
|
178
|
+
# The result will be written in either the upper or lower triangular portion of the
|
|
179
|
+
# matrix, depending on whether the argument is +:upper+ or +:lower+.
|
|
180
|
+
# Also the function only reads in the upper or lower part of the matrix,
|
|
181
|
+
# so it doesn't actually have to be symmetric/Hermitian.
|
|
182
|
+
# However, if the matrix (i.e. the symmetric matrix implied by the lower/upper
|
|
183
|
+
# half) is not positive-definite, the function will return nonsense.
|
|
184
|
+
#
|
|
185
|
+
# This functions requires either the nmatrix-atlas or nmatrix-lapacke gem
|
|
186
|
+
# installed.
|
|
187
|
+
#
|
|
188
|
+
# * *Returns* :
|
|
189
|
+
# the triangular portion specified by the parameter
|
|
190
|
+
# * *Raises* :
|
|
191
|
+
# - +StorageTypeError+ -> ATLAS functions only work on dense matrices.
|
|
192
|
+
# - +ShapeError+ -> Must be square.
|
|
193
|
+
# - +NotImplementedError+ -> If called without nmatrix-atlas or nmatrix-lapacke gem
|
|
194
|
+
#
|
|
195
|
+
def potrf!(which)
|
|
196
|
+
# The real implementation is in the plugin files.
|
|
197
|
+
raise(NotImplementedError, "potrf! requires either the nmatrix-atlas or nmatrix-lapacke gem")
|
|
198
|
+
end
|
|
199
|
+
|
|
200
|
+
def potrf_upper!
|
|
201
|
+
potrf! :upper
|
|
202
|
+
end
|
|
203
|
+
|
|
204
|
+
def potrf_lower!
|
|
205
|
+
potrf! :lower
|
|
206
|
+
end
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
#
|
|
210
|
+
# call-seq:
|
|
211
|
+
# factorize_cholesky -> [upper NMatrix, lower NMatrix]
|
|
212
|
+
#
|
|
213
|
+
# Calculates the Cholesky factorization of a matrix and returns the
|
|
214
|
+
# upper and lower matrices such that A=LU and L=U*, where * is
|
|
215
|
+
# either the transpose or conjugate transpose.
|
|
216
|
+
#
|
|
217
|
+
# Unlike potrf!, this makes method requires that the original is matrix is
|
|
218
|
+
# symmetric or Hermitian. However, it is still your responsibility to make
|
|
219
|
+
# sure it is positive-definite.
|
|
220
|
+
def factorize_cholesky
|
|
221
|
+
raise "Matrix must be symmetric/Hermitian for Cholesky factorization" unless self.hermitian?
|
|
222
|
+
l = self.clone.potrf_lower!.tril!
|
|
223
|
+
u = l.conjugate_transpose
|
|
224
|
+
[u,l]
|
|
225
|
+
end
|
|
226
|
+
|
|
227
|
+
#
|
|
228
|
+
# call-seq:
|
|
229
|
+
# factorize_lu -> ...
|
|
230
|
+
#
|
|
231
|
+
# LU factorization of a matrix. Optionally return the permutation matrix.
|
|
232
|
+
# Note that computing the permutation matrix will introduce a slight memory
|
|
233
|
+
# and time overhead.
|
|
234
|
+
#
|
|
235
|
+
# == Arguments
|
|
236
|
+
#
|
|
237
|
+
# +with_permutation_matrix+ - If set to *true* will return the permutation
|
|
238
|
+
# matrix alongwith the LU factorization as a second return value.
|
|
239
|
+
#
|
|
240
|
+
def factorize_lu with_permutation_matrix=nil
|
|
241
|
+
raise(NotImplementedError, "only implemented for dense storage") unless self.stype == :dense
|
|
242
|
+
raise(NotImplementedError, "matrix is not 2-dimensional") unless self.dimensions == 2
|
|
243
|
+
|
|
244
|
+
t = self.clone
|
|
245
|
+
pivot = t.getrf!
|
|
246
|
+
return t unless with_permutation_matrix
|
|
247
|
+
|
|
248
|
+
[t, FactorizeLUMethods.permutation_matrix_from(pivot)]
|
|
249
|
+
end
|
|
250
|
+
|
|
251
|
+
#
|
|
252
|
+
# call-seq:
|
|
253
|
+
# factorize_qr -> [Q,R]
|
|
254
|
+
#
|
|
255
|
+
# QR factorization of a matrix without column pivoting.
|
|
256
|
+
# Q is orthogonal and R is upper triangular if input is square or upper trapezoidal if
|
|
257
|
+
# input is rectangular.
|
|
258
|
+
#
|
|
259
|
+
# Only works for dense matrices.
|
|
260
|
+
#
|
|
261
|
+
# * *Returns* :
|
|
262
|
+
# - Array containing Q and R matrices
|
|
263
|
+
#
|
|
264
|
+
# * *Raises* :
|
|
265
|
+
# - +StorageTypeError+ -> only implemented for desnse storage.
|
|
266
|
+
# - +ShapeError+ -> Input must be a 2-dimensional matrix to have a QR decomposition.
|
|
267
|
+
#
|
|
268
|
+
def factorize_qr
|
|
269
|
+
raise(NotImplementedError, "only implemented for dense storage") unless self.stype == :dense
|
|
270
|
+
raise(ShapeError, "Input must be a 2-dimensional matrix to have a QR decomposition") unless self.dim == 2
|
|
271
|
+
|
|
272
|
+
rows, columns = self.shape
|
|
273
|
+
r = self.clone
|
|
274
|
+
tau = r.geqrf!
|
|
275
|
+
|
|
276
|
+
#Obtain Q
|
|
277
|
+
q = self.complex_dtype? ? r.unmqr(tau) : r.ormqr(tau)
|
|
278
|
+
|
|
279
|
+
#Obtain R
|
|
280
|
+
if rows <= columns
|
|
281
|
+
r.upper_triangle!
|
|
282
|
+
#Need to account for upper trapezoidal structure if R is a tall rectangle (rows > columns)
|
|
283
|
+
else
|
|
284
|
+
r[0...columns, 0...columns].upper_triangle!
|
|
285
|
+
r[columns...rows, 0...columns] = 0
|
|
286
|
+
end
|
|
287
|
+
|
|
288
|
+
[q,r]
|
|
289
|
+
end
|
|
290
|
+
|
|
291
|
+
# Solve the matrix equation AX = B, where A is +self+, B is the first
|
|
292
|
+
# argument, and X is returned. A must be a nxn square matrix, while B must be
|
|
293
|
+
# nxm. Only works with dense matrices and non-integer, non-object data types.
|
|
294
|
+
#
|
|
295
|
+
# == Arguments
|
|
296
|
+
#
|
|
297
|
+
# * +b+ - the right hand side
|
|
298
|
+
#
|
|
299
|
+
# == Options
|
|
300
|
+
#
|
|
301
|
+
# * +form+ - Signifies the form of the matrix A in the linear system AX=B.
|
|
302
|
+
# If not set then it defaults to +:general+, which uses an LU solver.
|
|
303
|
+
# Other possible values are +:lower_tri+, +:upper_tri+ and +:pos_def+ (alternatively,
|
|
304
|
+
# non-abbreviated symbols +:lower_triangular+, +:upper_triangular+,
|
|
305
|
+
# and +:positive_definite+ can be used.
|
|
306
|
+
# If +:lower_tri+ or +:upper_tri+ is set, then a specialized linear solver for linear
|
|
307
|
+
# systems AX=B with a lower or upper triangular matrix A is used. If +:pos_def+ is chosen,
|
|
308
|
+
# then the linear system is solved via the Cholesky factorization.
|
|
309
|
+
# Note that when +:lower_tri+ or +:upper_tri+ is used, then the algorithm just assumes that
|
|
310
|
+
# all entries in the lower/upper triangle of the matrix are zeros without checking (which
|
|
311
|
+
# can be useful in certain applications).
|
|
312
|
+
#
|
|
313
|
+
#
|
|
314
|
+
# == Usage
|
|
315
|
+
#
|
|
316
|
+
# a = NMatrix.new [2,2], [3,1,1,2], dtype: dtype
|
|
317
|
+
# b = NMatrix.new [2,1], [9,8], dtype: dtype
|
|
318
|
+
# a.solve(b)
|
|
319
|
+
#
|
|
320
|
+
# # solve an upper triangular linear system more efficiently:
|
|
321
|
+
# require 'benchmark'
|
|
322
|
+
# require 'nmatrix/lapacke'
|
|
323
|
+
# rand_mat = NMatrix.random([10000, 10000], dtype: :float64)
|
|
324
|
+
# a = rand_mat.triu
|
|
325
|
+
# b = NMatrix.random([10000, 10], dtype: :float64)
|
|
326
|
+
# Benchmark.bm(10) do |bm|
|
|
327
|
+
# bm.report('general') { a.solve(b) }
|
|
328
|
+
# bm.report('upper_tri') { a.solve(b, form: :upper_tri) }
|
|
329
|
+
# end
|
|
330
|
+
# # user system total real
|
|
331
|
+
# # general 73.170000 0.670000 73.840000 ( 73.810086)
|
|
332
|
+
# # upper_tri 0.180000 0.000000 0.180000 ( 0.182491)
|
|
333
|
+
#
|
|
334
|
+
def solve(b, opts = {})
|
|
335
|
+
raise(ShapeError, "Must be called on square matrix") unless self.dim == 2 && self.shape[0] == self.shape[1]
|
|
336
|
+
raise(ShapeError, "number of rows of b must equal number of cols of self") if
|
|
337
|
+
self.shape[1] != b.shape[0]
|
|
338
|
+
raise(ArgumentError, "only works with dense matrices") if self.stype != :dense
|
|
339
|
+
raise(ArgumentError, "only works for non-integer, non-object dtypes") if
|
|
340
|
+
integer_dtype? or object_dtype? or b.integer_dtype? or b.object_dtype?
|
|
341
|
+
|
|
342
|
+
opts = { form: :general }.merge(opts)
|
|
343
|
+
x = b.clone
|
|
344
|
+
n = self.shape[0]
|
|
345
|
+
nrhs = b.shape[1]
|
|
346
|
+
|
|
347
|
+
case opts[:form]
|
|
348
|
+
when :general
|
|
349
|
+
clone = self.clone
|
|
350
|
+
ipiv = NMatrix::LAPACK.clapack_getrf(:row, n, n, clone, n)
|
|
351
|
+
# When we call clapack_getrs with :row, actually only the first matrix
|
|
352
|
+
# (i.e. clone) is interpreted as row-major, while the other matrix (x)
|
|
353
|
+
# is interpreted as column-major. See here: http://math-atlas.sourceforge.net/faq.html#RowSolve
|
|
354
|
+
# So we must transpose x before and after
|
|
355
|
+
# calling it.
|
|
356
|
+
x = x.transpose
|
|
357
|
+
NMatrix::LAPACK.clapack_getrs(:row, :no_transpose, n, nrhs, clone, n, ipiv, x, n)
|
|
358
|
+
x.transpose
|
|
359
|
+
when :upper_tri, :upper_triangular
|
|
360
|
+
raise(ArgumentError, "upper triangular solver does not work with complex dtypes") if
|
|
361
|
+
complex_dtype? or b.complex_dtype?
|
|
362
|
+
# this is the correct function call; see https://github.com/SciRuby/nmatrix/issues/374
|
|
363
|
+
NMatrix::BLAS::cblas_trsm(:row, :left, :upper, false, :nounit, n, nrhs, 1.0, self, n, x, nrhs)
|
|
364
|
+
x
|
|
365
|
+
when :lower_tri, :lower_triangular
|
|
366
|
+
raise(ArgumentError, "lower triangular solver does not work with complex dtypes") if
|
|
367
|
+
complex_dtype? or b.complex_dtype?
|
|
368
|
+
NMatrix::BLAS::cblas_trsm(:row, :left, :lower, false, :nounit, n, nrhs, 1.0, self, n, x, nrhs)
|
|
369
|
+
x
|
|
370
|
+
when :pos_def, :positive_definite
|
|
371
|
+
u, l = self.factorize_cholesky
|
|
372
|
+
z = l.solve(b, form: :lower_tri)
|
|
373
|
+
u.solve(z, form: :upper_tri)
|
|
374
|
+
else
|
|
375
|
+
raise(ArgumentError, "#{opts[:form]} is not a valid form option")
|
|
376
|
+
end
|
|
377
|
+
end
|
|
378
|
+
|
|
379
|
+
#
|
|
380
|
+
# call-seq:
|
|
381
|
+
# least_squares(b) -> NMatrix
|
|
382
|
+
# least_squares(b, tolerance: 10e-10) -> NMatrix
|
|
383
|
+
#
|
|
384
|
+
# Provides the linear least squares approximation of an under-determined system
|
|
385
|
+
# using QR factorization provided that the matrix is not rank-deficient.
|
|
386
|
+
#
|
|
387
|
+
# Only works for dense matrices.
|
|
388
|
+
#
|
|
389
|
+
# * *Arguments* :
|
|
390
|
+
# - +b+ -> The solution column vector NMatrix of A * X = b.
|
|
391
|
+
# - +tolerance:+ -> Absolute tolerance to check if a diagonal element in A = QR is near 0
|
|
392
|
+
#
|
|
393
|
+
# * *Returns* :
|
|
394
|
+
# - NMatrix that is a column vector with the LLS solution
|
|
395
|
+
#
|
|
396
|
+
# * *Raises* :
|
|
397
|
+
# - +ArgumentError+ -> least squares approximation only works for non-complex types
|
|
398
|
+
# - +ShapeError+ -> system must be under-determined ( rows > columns )
|
|
399
|
+
#
|
|
400
|
+
# Examples :-
|
|
401
|
+
#
|
|
402
|
+
# a = NMatrix.new([3,2], [2.0, 0, -1, 1, 0, 2])
|
|
403
|
+
#
|
|
404
|
+
# b = NMatrix.new([3,1], [1.0, 0, -1])
|
|
405
|
+
#
|
|
406
|
+
# a.least_squares(b)
|
|
407
|
+
# =>[
|
|
408
|
+
# [ 0.33333333333333326 ]
|
|
409
|
+
# [ -0.3333333333333334 ]
|
|
410
|
+
# ]
|
|
411
|
+
#
|
|
412
|
+
def least_squares(b, tolerance: 10e-6)
|
|
413
|
+
raise(ArgumentError, "least squares approximation only works for non-complex types") if
|
|
414
|
+
self.complex_dtype?
|
|
415
|
+
|
|
416
|
+
rows, columns = self.shape
|
|
417
|
+
|
|
418
|
+
raise(ShapeError, "system must be under-determined ( rows > columns )") unless
|
|
419
|
+
rows > columns
|
|
420
|
+
|
|
421
|
+
#Perform economical QR factorization
|
|
422
|
+
r = self.clone
|
|
423
|
+
tau = r.geqrf!
|
|
424
|
+
q_transpose_b = r.ormqr(tau, :left, :transpose, b)
|
|
425
|
+
|
|
426
|
+
#Obtain R from geqrf! intermediate
|
|
427
|
+
r[0...columns, 0...columns].upper_triangle!
|
|
428
|
+
r[columns...rows, 0...columns] = 0
|
|
429
|
+
|
|
430
|
+
diagonal = r.diagonal
|
|
431
|
+
|
|
432
|
+
raise(ArgumentError, "rank deficient matrix") if diagonal.any? { |x| x == 0 }
|
|
433
|
+
|
|
434
|
+
if diagonal.any? { |x| x.abs < tolerance }
|
|
435
|
+
warn "warning: A diagonal element of R in A = QR is close to zero ;" <<
|
|
436
|
+
" indicates a possible loss of precision"
|
|
437
|
+
end
|
|
438
|
+
|
|
439
|
+
# Transform the system A * X = B to R1 * X = B2 where B2 = Q1_t * B
|
|
440
|
+
r1 = r[0...columns, 0...columns]
|
|
441
|
+
b2 = q_transpose_b[0...columns]
|
|
442
|
+
|
|
443
|
+
nrhs = b2.shape[1]
|
|
444
|
+
|
|
445
|
+
#Solve the upper triangular system
|
|
446
|
+
NMatrix::BLAS::cblas_trsm(:row, :left, :upper, false, :nounit, r1.shape[0], nrhs, 1.0, r1, r1.shape[0], b2, nrhs)
|
|
447
|
+
b2
|
|
448
|
+
end
|
|
449
|
+
|
|
450
|
+
#
|
|
451
|
+
# call-seq:
|
|
452
|
+
# gesvd! -> [u, sigma, v_transpose]
|
|
453
|
+
# gesvd! -> [u, sigma, v_conjugate_transpose] # complex
|
|
454
|
+
#
|
|
455
|
+
# Compute the singular value decomposition of a matrix using LAPACK's GESVD function.
|
|
456
|
+
# This is destructive, modifying the source NMatrix. See also #gesdd.
|
|
457
|
+
#
|
|
458
|
+
# Optionally accepts a +workspace_size+ parameter, which will be honored only if it is larger than what LAPACK
|
|
459
|
+
# requires.
|
|
460
|
+
#
|
|
461
|
+
def gesvd!(workspace_size=1)
|
|
462
|
+
NMatrix::LAPACK::gesvd(self, workspace_size)
|
|
463
|
+
end
|
|
464
|
+
|
|
465
|
+
#
|
|
466
|
+
# call-seq:
|
|
467
|
+
# gesvd -> [u, sigma, v_transpose]
|
|
468
|
+
# gesvd -> [u, sigma, v_conjugate_transpose] # complex
|
|
469
|
+
#
|
|
470
|
+
# Compute the singular value decomposition of a matrix using LAPACK's GESVD function.
|
|
471
|
+
#
|
|
472
|
+
# Optionally accepts a +workspace_size+ parameter, which will be honored only if it is larger than what LAPACK
|
|
473
|
+
# requires.
|
|
474
|
+
#
|
|
475
|
+
def gesvd(workspace_size=1)
|
|
476
|
+
self.clone.gesvd!(workspace_size)
|
|
477
|
+
end
|
|
478
|
+
|
|
479
|
+
|
|
480
|
+
|
|
481
|
+
#
|
|
482
|
+
# call-seq:
|
|
483
|
+
# gesdd! -> [u, sigma, v_transpose]
|
|
484
|
+
# gesdd! -> [u, sigma, v_conjugate_transpose] # complex
|
|
485
|
+
#
|
|
486
|
+
# Compute the singular value decomposition of a matrix using LAPACK's GESDD function. This uses a divide-and-conquer
|
|
487
|
+
# strategy. This is destructive, modifying the source NMatrix. See also #gesvd.
|
|
488
|
+
#
|
|
489
|
+
# Optionally accepts a +workspace_size+ parameter, which will be honored only if it is larger than what LAPACK
|
|
490
|
+
# requires.
|
|
491
|
+
#
|
|
492
|
+
def gesdd!(workspace_size=nil)
|
|
493
|
+
NMatrix::LAPACK::gesdd(self, workspace_size)
|
|
494
|
+
end
|
|
495
|
+
|
|
496
|
+
#
|
|
497
|
+
# call-seq:
|
|
498
|
+
# gesdd -> [u, sigma, v_transpose]
|
|
499
|
+
# gesdd -> [u, sigma, v_conjugate_transpose] # complex
|
|
500
|
+
#
|
|
501
|
+
# Compute the singular value decomposition of a matrix using LAPACK's GESDD function. This uses a divide-and-conquer
|
|
502
|
+
# strategy. See also #gesvd.
|
|
503
|
+
#
|
|
504
|
+
# Optionally accepts a +workspace_size+ parameter, which will be honored only if it is larger than what LAPACK
|
|
505
|
+
# requires.
|
|
506
|
+
#
|
|
507
|
+
def gesdd(workspace_size=nil)
|
|
508
|
+
self.clone.gesdd!(workspace_size)
|
|
509
|
+
end
|
|
510
|
+
|
|
511
|
+
#
|
|
512
|
+
# call-seq:
|
|
513
|
+
# laswp!(ary) -> NMatrix
|
|
514
|
+
#
|
|
515
|
+
# In-place permute the columns of a dense matrix using LASWP according to the order given as an array +ary+.
|
|
516
|
+
#
|
|
517
|
+
# If +:convention+ is +:lapack+, then +ary+ represents a sequence of pair-wise permutations which are
|
|
518
|
+
# performed successively. That is, the i'th entry of +ary+ is the index of the column to swap
|
|
519
|
+
# the i'th column with, having already applied all earlier swaps.
|
|
520
|
+
#
|
|
521
|
+
# If +:convention+ is +:intuitive+, then +ary+ represents the order of columns after the permutation.
|
|
522
|
+
# That is, the i'th entry of +ary+ is the index of the column that will be in position i after the
|
|
523
|
+
# reordering (Matlab-like behaviour). This is the default.
|
|
524
|
+
#
|
|
525
|
+
# Not yet implemented for yale or list.
|
|
526
|
+
#
|
|
527
|
+
# == Arguments
|
|
528
|
+
#
|
|
529
|
+
# * +ary+ - An Array specifying the order of the columns. See above for details.
|
|
530
|
+
#
|
|
531
|
+
# == Options
|
|
532
|
+
#
|
|
533
|
+
# * +:covention+ - Possible values are +:lapack+ and +:intuitive+. Default is +:intuitive+. See above for details.
|
|
534
|
+
#
|
|
535
|
+
def laswp!(ary, opts={})
|
|
536
|
+
raise(StorageTypeError, "ATLAS functions only work on dense matrices") unless self.dense?
|
|
537
|
+
opts = { convention: :intuitive }.merge(opts)
|
|
538
|
+
|
|
539
|
+
if opts[:convention] == :intuitive
|
|
540
|
+
if ary.length != ary.uniq.length
|
|
541
|
+
raise(ArgumentError, "No duplicated entries in the order array are allowed under convention :intuitive")
|
|
542
|
+
end
|
|
543
|
+
n = self.shape[1]
|
|
544
|
+
p = []
|
|
545
|
+
order = (0...n).to_a
|
|
546
|
+
0.upto(n-2) do |i|
|
|
547
|
+
p[i] = order.index(ary[i])
|
|
548
|
+
order[i], order[p[i]] = order[p[i]], order[i]
|
|
549
|
+
end
|
|
550
|
+
p[n-1] = n-1
|
|
551
|
+
else
|
|
552
|
+
p = ary
|
|
553
|
+
end
|
|
554
|
+
|
|
555
|
+
NMatrix::LAPACK::laswp(self, p)
|
|
556
|
+
end
|
|
557
|
+
|
|
558
|
+
#
|
|
559
|
+
# call-seq:
|
|
560
|
+
# laswp(ary) -> NMatrix
|
|
561
|
+
#
|
|
562
|
+
# Permute the columns of a dense matrix using LASWP according to the order given in an array +ary+.
|
|
563
|
+
#
|
|
564
|
+
# If +:convention+ is +:lapack+, then +ary+ represents a sequence of pair-wise permutations which are
|
|
565
|
+
# performed successively. That is, the i'th entry of +ary+ is the index of the column to swap
|
|
566
|
+
# the i'th column with, having already applied all earlier swaps. This is the default.
|
|
567
|
+
#
|
|
568
|
+
# If +:convention+ is +:intuitive+, then +ary+ represents the order of columns after the permutation.
|
|
569
|
+
# That is, the i'th entry of +ary+ is the index of the column that will be in position i after the
|
|
570
|
+
# reordering (Matlab-like behaviour).
|
|
571
|
+
#
|
|
572
|
+
# Not yet implemented for yale or list.
|
|
573
|
+
#
|
|
574
|
+
# == Arguments
|
|
575
|
+
#
|
|
576
|
+
# * +ary+ - An Array specifying the order of the columns. See above for details.
|
|
577
|
+
#
|
|
578
|
+
# == Options
|
|
579
|
+
#
|
|
580
|
+
# * +:covention+ - Possible values are +:lapack+ and +:intuitive+. Default is +:lapack+. See above for details.
|
|
581
|
+
#
|
|
582
|
+
def laswp(ary, opts={})
|
|
583
|
+
self.clone.laswp!(ary, opts)
|
|
584
|
+
end
|
|
585
|
+
|
|
586
|
+
#
|
|
587
|
+
# call-seq:
|
|
588
|
+
# det -> determinant
|
|
589
|
+
#
|
|
590
|
+
# Calculate the determinant by way of LU decomposition. This is accomplished
|
|
591
|
+
# using clapack_getrf, and then by taking the product of the diagonal elements. There is a
|
|
592
|
+
# risk of underflow/overflow.
|
|
593
|
+
#
|
|
594
|
+
# There are probably also more efficient ways to calculate the determinant.
|
|
595
|
+
# This method requires making a copy of the matrix, since clapack_getrf
|
|
596
|
+
# modifies its input.
|
|
597
|
+
#
|
|
598
|
+
# For smaller matrices, you may be able to use +#det_exact+.
|
|
599
|
+
#
|
|
600
|
+
# This function is guaranteed to return the same type of data in the matrix
|
|
601
|
+
# upon which it is called.
|
|
602
|
+
#
|
|
603
|
+
# Integer matrices are converted to floating point matrices for the purposes of
|
|
604
|
+
# performing the calculation, as xGETRF can't work on integer matrices.
|
|
605
|
+
#
|
|
606
|
+
# * *Returns* :
|
|
607
|
+
# - The determinant of the matrix. It's the same type as the matrix's dtype.
|
|
608
|
+
# * *Raises* :
|
|
609
|
+
# - +ShapeError+ -> Must be used on square matrices.
|
|
610
|
+
#
|
|
611
|
+
def det
|
|
612
|
+
raise(ShapeError, "determinant can be calculated only for square matrices") unless self.dim == 2 && self.shape[0] == self.shape[1]
|
|
613
|
+
|
|
614
|
+
# Cast to a dtype for which getrf is implemented
|
|
615
|
+
new_dtype = self.integer_dtype? ? :float64 : self.dtype
|
|
616
|
+
copy = self.cast(:dense, new_dtype)
|
|
617
|
+
|
|
618
|
+
# Need to know the number of permutations. We'll add up the diagonals of
|
|
619
|
+
# the factorized matrix.
|
|
620
|
+
pivot = copy.getrf!
|
|
621
|
+
|
|
622
|
+
num_perm = 0 #number of permutations
|
|
623
|
+
pivot.each_with_index do |swap, i|
|
|
624
|
+
#pivot indexes rows starting from 1, instead of 0, so need to subtract 1 here
|
|
625
|
+
num_perm += 1 if swap-1 != i
|
|
626
|
+
end
|
|
627
|
+
prod = num_perm % 2 == 1 ? -1 : 1 # odd permutations => negative
|
|
628
|
+
[shape[0],shape[1]].min.times do |i|
|
|
629
|
+
prod *= copy[i,i]
|
|
630
|
+
end
|
|
631
|
+
|
|
632
|
+
# Convert back to an integer if necessary
|
|
633
|
+
new_dtype != self.dtype ? prod.round : prod #prevent rounding errors
|
|
634
|
+
end
|
|
635
|
+
|
|
636
|
+
#
|
|
637
|
+
# call-seq:
|
|
638
|
+
# complex_conjugate -> NMatrix
|
|
639
|
+
# complex_conjugate(new_stype) -> NMatrix
|
|
640
|
+
#
|
|
641
|
+
# Get the complex conjugate of this matrix. See also complex_conjugate! for
|
|
642
|
+
# an in-place operation (provided the dtype is already +:complex64+ or
|
|
643
|
+
# +:complex128+).
|
|
644
|
+
#
|
|
645
|
+
# Doesn't work on list matrices, but you can optionally pass in the stype you
|
|
646
|
+
# want to cast to if you're dealing with a list matrix.
|
|
647
|
+
#
|
|
648
|
+
# * *Arguments* :
|
|
649
|
+
# - +new_stype+ -> stype for the new matrix.
|
|
650
|
+
# * *Returns* :
|
|
651
|
+
# - If the original NMatrix isn't complex, the result is a +:complex128+ NMatrix. Otherwise, it's the original dtype.
|
|
652
|
+
#
|
|
653
|
+
def complex_conjugate(new_stype = self.stype)
|
|
654
|
+
self.cast(new_stype, NMatrix::upcast(dtype, :complex64)).complex_conjugate!
|
|
655
|
+
end
|
|
656
|
+
|
|
657
|
+
#
|
|
658
|
+
# call-seq:
|
|
659
|
+
# conjugate_transpose -> NMatrix
|
|
660
|
+
#
|
|
661
|
+
# Calculate the conjugate transpose of a matrix. If your dtype is already
|
|
662
|
+
# complex, this should only require one copy (for the transpose).
|
|
663
|
+
#
|
|
664
|
+
# * *Returns* :
|
|
665
|
+
# - The conjugate transpose of the matrix as a copy.
|
|
666
|
+
#
|
|
667
|
+
def conjugate_transpose
|
|
668
|
+
self.transpose.complex_conjugate!
|
|
669
|
+
end
|
|
670
|
+
|
|
671
|
+
#
|
|
672
|
+
# call-seq:
|
|
673
|
+
# absolute_sum -> Numeric
|
|
674
|
+
#
|
|
675
|
+
# == Arguments
|
|
676
|
+
# - +incx+ -> the skip size (defaults to 1, no skip)
|
|
677
|
+
# - +n+ -> the number of elements to include
|
|
678
|
+
#
|
|
679
|
+
# Return the sum of the contents of the vector. This is the BLAS asum routine.
|
|
680
|
+
def asum incx=1, n=nil
|
|
681
|
+
if self.shape == [1]
|
|
682
|
+
return self[0].abs unless self.complex_dtype?
|
|
683
|
+
return self[0].real.abs + self[0].imag.abs
|
|
684
|
+
end
|
|
685
|
+
return method_missing(:asum, incx, n) unless vector?
|
|
686
|
+
NMatrix::BLAS::asum(self, incx, self.size / incx)
|
|
687
|
+
end
|
|
688
|
+
alias :absolute_sum :asum
|
|
689
|
+
|
|
690
|
+
#
|
|
691
|
+
# call-seq:
|
|
692
|
+
# norm2 -> Numeric
|
|
693
|
+
#
|
|
694
|
+
# == Arguments
|
|
695
|
+
# - +incx+ -> the skip size (defaults to 1, no skip)
|
|
696
|
+
# - +n+ -> the number of elements to include
|
|
697
|
+
#
|
|
698
|
+
# Return the 2-norm of the vector. This is the BLAS nrm2 routine.
|
|
699
|
+
def nrm2 incx=1, n=nil
|
|
700
|
+
return method_missing(:nrm2, incx, n) unless vector?
|
|
701
|
+
NMatrix::BLAS::nrm2(self, incx, self.size / incx)
|
|
702
|
+
end
|
|
703
|
+
alias :norm2 :nrm2
|
|
704
|
+
|
|
705
|
+
#
|
|
706
|
+
# call-seq:
|
|
707
|
+
# scale! -> NMatrix
|
|
708
|
+
#
|
|
709
|
+
# == Arguments
|
|
710
|
+
# - +alpha+ -> Scalar value used in the operation.
|
|
711
|
+
# - +inc+ -> Increment used in the scaling function. Should generally be 1.
|
|
712
|
+
# - +n+ -> Number of elements of +vector+.
|
|
713
|
+
#
|
|
714
|
+
# This is a destructive method, modifying the source NMatrix. See also #scale.
|
|
715
|
+
# Return the scaling result of the matrix. BLAS scal will be invoked if provided.
|
|
716
|
+
|
|
717
|
+
def scale!(alpha, incx=1, n=nil)
|
|
718
|
+
raise(DataTypeError, "Incompatible data type for the scaling factor") unless
|
|
719
|
+
NMatrix::upcast(self.dtype, NMatrix::min_dtype(alpha)) == self.dtype
|
|
720
|
+
return NMatrix::BLAS::scal(alpha, self, incx, self.size / incx) if NMatrix::BLAS.method_defined? :scal
|
|
721
|
+
self.each_stored_with_indices do |e, *i|
|
|
722
|
+
self[*i] = e*alpha
|
|
723
|
+
end
|
|
724
|
+
end
|
|
725
|
+
|
|
726
|
+
#
|
|
727
|
+
# call-seq:
|
|
728
|
+
# scale -> NMatrix
|
|
729
|
+
#
|
|
730
|
+
# == Arguments
|
|
731
|
+
# - +alpha+ -> Scalar value used in the operation.
|
|
732
|
+
# - +inc+ -> Increment used in the scaling function. Should generally be 1.
|
|
733
|
+
# - +n+ -> Number of elements of +vector+.
|
|
734
|
+
#
|
|
735
|
+
# Return the scaling result of the matrix. BLAS scal will be invoked if provided.
|
|
736
|
+
|
|
737
|
+
def scale(alpha, incx=1, n=nil)
|
|
738
|
+
return self.clone.scale!(alpha, incx, n)
|
|
739
|
+
end
|
|
740
|
+
|
|
741
|
+
alias :permute_columns :laswp
|
|
742
|
+
alias :permute_columns! :laswp!
|
|
743
|
+
|
|
744
|
+
end
|