pnmatrix 1.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/ext/nmatrix/binary_format.txt +53 -0
- data/ext/nmatrix/data/complex.h +388 -0
- data/ext/nmatrix/data/data.cpp +274 -0
- data/ext/nmatrix/data/data.h +651 -0
- data/ext/nmatrix/data/meta.h +64 -0
- data/ext/nmatrix/data/ruby_object.h +386 -0
- data/ext/nmatrix/extconf.rb +70 -0
- data/ext/nmatrix/math/asum.h +99 -0
- data/ext/nmatrix/math/cblas_enums.h +36 -0
- data/ext/nmatrix/math/cblas_templates_core.h +507 -0
- data/ext/nmatrix/math/gemm.h +241 -0
- data/ext/nmatrix/math/gemv.h +178 -0
- data/ext/nmatrix/math/getrf.h +255 -0
- data/ext/nmatrix/math/getrs.h +121 -0
- data/ext/nmatrix/math/imax.h +82 -0
- data/ext/nmatrix/math/laswp.h +165 -0
- data/ext/nmatrix/math/long_dtype.h +62 -0
- data/ext/nmatrix/math/magnitude.h +54 -0
- data/ext/nmatrix/math/math.h +751 -0
- data/ext/nmatrix/math/nrm2.h +165 -0
- data/ext/nmatrix/math/rot.h +117 -0
- data/ext/nmatrix/math/rotg.h +106 -0
- data/ext/nmatrix/math/scal.h +71 -0
- data/ext/nmatrix/math/trsm.h +336 -0
- data/ext/nmatrix/math/util.h +162 -0
- data/ext/nmatrix/math.cpp +1368 -0
- data/ext/nmatrix/nm_memory.h +60 -0
- data/ext/nmatrix/nmatrix.cpp +285 -0
- data/ext/nmatrix/nmatrix.h +476 -0
- data/ext/nmatrix/ruby_constants.cpp +151 -0
- data/ext/nmatrix/ruby_constants.h +106 -0
- data/ext/nmatrix/ruby_nmatrix.c +3130 -0
- data/ext/nmatrix/storage/common.cpp +77 -0
- data/ext/nmatrix/storage/common.h +183 -0
- data/ext/nmatrix/storage/dense/dense.cpp +1096 -0
- data/ext/nmatrix/storage/dense/dense.h +129 -0
- data/ext/nmatrix/storage/list/list.cpp +1628 -0
- data/ext/nmatrix/storage/list/list.h +138 -0
- data/ext/nmatrix/storage/storage.cpp +730 -0
- data/ext/nmatrix/storage/storage.h +99 -0
- data/ext/nmatrix/storage/yale/class.h +1139 -0
- data/ext/nmatrix/storage/yale/iterators/base.h +143 -0
- data/ext/nmatrix/storage/yale/iterators/iterator.h +131 -0
- data/ext/nmatrix/storage/yale/iterators/row.h +450 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored.h +140 -0
- data/ext/nmatrix/storage/yale/iterators/row_stored_nd.h +169 -0
- data/ext/nmatrix/storage/yale/iterators/stored_diagonal.h +124 -0
- data/ext/nmatrix/storage/yale/math/transpose.h +110 -0
- data/ext/nmatrix/storage/yale/yale.cpp +2074 -0
- data/ext/nmatrix/storage/yale/yale.h +203 -0
- data/ext/nmatrix/types.h +55 -0
- data/ext/nmatrix/util/io.cpp +279 -0
- data/ext/nmatrix/util/io.h +115 -0
- data/ext/nmatrix/util/sl_list.cpp +627 -0
- data/ext/nmatrix/util/sl_list.h +144 -0
- data/ext/nmatrix/util/util.h +78 -0
- data/lib/nmatrix/blas.rb +378 -0
- data/lib/nmatrix/cruby/math.rb +744 -0
- data/lib/nmatrix/enumerate.rb +253 -0
- data/lib/nmatrix/homogeneous.rb +241 -0
- data/lib/nmatrix/io/fortran_format.rb +138 -0
- data/lib/nmatrix/io/harwell_boeing.rb +221 -0
- data/lib/nmatrix/io/market.rb +263 -0
- data/lib/nmatrix/io/point_cloud.rb +189 -0
- data/lib/nmatrix/jruby/decomposition.rb +24 -0
- data/lib/nmatrix/jruby/enumerable.rb +13 -0
- data/lib/nmatrix/jruby/error.rb +4 -0
- data/lib/nmatrix/jruby/math.rb +501 -0
- data/lib/nmatrix/jruby/nmatrix_java.rb +840 -0
- data/lib/nmatrix/jruby/operators.rb +283 -0
- data/lib/nmatrix/jruby/slice.rb +264 -0
- data/lib/nmatrix/lapack_core.rb +181 -0
- data/lib/nmatrix/lapack_plugin.rb +44 -0
- data/lib/nmatrix/math.rb +953 -0
- data/lib/nmatrix/mkmf.rb +100 -0
- data/lib/nmatrix/monkeys.rb +137 -0
- data/lib/nmatrix/nmatrix.rb +1172 -0
- data/lib/nmatrix/rspec.rb +75 -0
- data/lib/nmatrix/shortcuts.rb +1163 -0
- data/lib/nmatrix/version.rb +39 -0
- data/lib/nmatrix/yale_functions.rb +118 -0
- data/lib/nmatrix.rb +28 -0
- data/spec/00_nmatrix_spec.rb +892 -0
- data/spec/01_enum_spec.rb +196 -0
- data/spec/02_slice_spec.rb +407 -0
- data/spec/03_nmatrix_monkeys_spec.rb +80 -0
- data/spec/2x2_dense_double.mat +0 -0
- data/spec/4x4_sparse.mat +0 -0
- data/spec/4x5_dense.mat +0 -0
- data/spec/blas_spec.rb +215 -0
- data/spec/elementwise_spec.rb +311 -0
- data/spec/homogeneous_spec.rb +100 -0
- data/spec/io/fortran_format_spec.rb +88 -0
- data/spec/io/harwell_boeing_spec.rb +98 -0
- data/spec/io/test.rua +9 -0
- data/spec/io_spec.rb +159 -0
- data/spec/lapack_core_spec.rb +482 -0
- data/spec/leakcheck.rb +16 -0
- data/spec/math_spec.rb +1363 -0
- data/spec/nmatrix_yale_resize_test_associations.yaml +2802 -0
- data/spec/nmatrix_yale_spec.rb +286 -0
- data/spec/rspec_monkeys.rb +56 -0
- data/spec/rspec_spec.rb +35 -0
- data/spec/shortcuts_spec.rb +474 -0
- data/spec/slice_set_spec.rb +162 -0
- data/spec/spec_helper.rb +172 -0
- data/spec/stat_spec.rb +214 -0
- data/spec/test.pcd +20 -0
- data/spec/utm5940.mtx +83844 -0
- metadata +295 -0
data/lib/nmatrix/math.rb
ADDED
|
@@ -0,0 +1,953 @@
|
|
|
1
|
+
#--
|
|
2
|
+
# = NMatrix
|
|
3
|
+
#
|
|
4
|
+
# A linear algebra library for scientific computation in Ruby.
|
|
5
|
+
# NMatrix is part of SciRuby.
|
|
6
|
+
#
|
|
7
|
+
# NMatrix was originally inspired by and derived from NArray, by
|
|
8
|
+
# Masahiro Tanaka: http://narray.rubyforge.org
|
|
9
|
+
#
|
|
10
|
+
# == Copyright Information
|
|
11
|
+
#
|
|
12
|
+
# SciRuby is Copyright (c) 2010 - 2014, Ruby Science Foundation
|
|
13
|
+
# NMatrix is Copyright (c) 2012 - 2014, John Woods and the Ruby Science Foundation
|
|
14
|
+
#
|
|
15
|
+
# Please see LICENSE.txt for additional copyright notices.
|
|
16
|
+
#
|
|
17
|
+
# == Contributing
|
|
18
|
+
#
|
|
19
|
+
# By contributing source code to SciRuby, you agree to be bound by
|
|
20
|
+
# our Contributor Agreement:
|
|
21
|
+
#
|
|
22
|
+
# * https://github.com/SciRuby/sciruby/wiki/Contributor-Agreement
|
|
23
|
+
#
|
|
24
|
+
# == math.rb
|
|
25
|
+
#
|
|
26
|
+
# Math functionality for NMatrix, along with any NMatrix instance
|
|
27
|
+
# methods that correspond to ATLAS/BLAS/LAPACK functions (e.g.,
|
|
28
|
+
# laswp).
|
|
29
|
+
#++
|
|
30
|
+
|
|
31
|
+
class NMatrix
|
|
32
|
+
|
|
33
|
+
module NMMath #:nodoc:
|
|
34
|
+
METHODS_ARITY_2 = [:atan2, :ldexp, :hypot]
|
|
35
|
+
METHODS_ARITY_1 = [:cos, :sin, :tan, :acos, :asin, :atan, :cosh, :sinh, :tanh, :acosh,
|
|
36
|
+
:asinh, :atanh, :exp, :log2, :log10, :sqrt, :cbrt, :erf, :erfc, :gamma, :-@]
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
# Methods for generating permutation matrix from LU factorization results.
|
|
40
|
+
module FactorizeLUMethods
|
|
41
|
+
class << self
|
|
42
|
+
def permutation_matrix_from(pivot_array)
|
|
43
|
+
perm_arry = permutation_array_for(pivot_array)
|
|
44
|
+
n = NMatrix.zeros(perm_arry.size, dtype: :byte)
|
|
45
|
+
|
|
46
|
+
perm_arry.each_with_index { |e, i| n[e,i] = 1 }
|
|
47
|
+
|
|
48
|
+
n
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
def permutation_array_for(pivot_array)
|
|
52
|
+
perm_arry = Array.new(pivot_array.size) { |i| i }
|
|
53
|
+
perm_arry.each_index do |i|
|
|
54
|
+
#the pivot indices returned by LAPACK getrf are indexed starting
|
|
55
|
+
#from 1, so we need to subtract 1 here
|
|
56
|
+
perm_arry[i], perm_arry[pivot_array[i]-1] = perm_arry[pivot_array[i]-1], perm_arry[i]
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
perm_arry
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
#
|
|
65
|
+
# call-seq:
|
|
66
|
+
# invert! -> NMatrix
|
|
67
|
+
#
|
|
68
|
+
# Use LAPACK to calculate the inverse of the matrix (in-place) if available.
|
|
69
|
+
# Only works on dense matrices. Alternatively uses in-place Gauss-Jordan
|
|
70
|
+
# elimination.
|
|
71
|
+
#
|
|
72
|
+
# * *Raises* :
|
|
73
|
+
# - +StorageTypeError+ -> only implemented on dense matrices.
|
|
74
|
+
# - +ShapeError+ -> matrix must be square.
|
|
75
|
+
# - +DataTypeError+ -> cannot invert an integer matrix in-place.
|
|
76
|
+
#
|
|
77
|
+
def invert!
|
|
78
|
+
raise(StorageTypeError, "invert only works on dense matrices currently") unless self.dense?
|
|
79
|
+
raise(ShapeError, "Cannot invert non-square matrix") unless self.dim == 2 && self.shape[0] == self.shape[1]
|
|
80
|
+
raise(DataTypeError, "Cannot invert an integer matrix in-place") if self.integer_dtype?
|
|
81
|
+
|
|
82
|
+
#No internal implementation of getri, so use this other function
|
|
83
|
+
__inverse__(self, true)
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
#
|
|
87
|
+
# call-seq:
|
|
88
|
+
# invert -> NMatrix
|
|
89
|
+
#
|
|
90
|
+
# Make a copy of the matrix, then invert using Gauss-Jordan elimination.
|
|
91
|
+
# Works without LAPACK.
|
|
92
|
+
#
|
|
93
|
+
# * *Returns* :
|
|
94
|
+
# - A dense NMatrix. Will be the same type as the input NMatrix,
|
|
95
|
+
# except if the input is an integral dtype, in which case it will be a
|
|
96
|
+
# :float64 NMatrix.
|
|
97
|
+
#
|
|
98
|
+
# * *Raises* :
|
|
99
|
+
# - +StorageTypeError+ -> only implemented on dense matrices.
|
|
100
|
+
# - +ShapeError+ -> matrix must be square.
|
|
101
|
+
#
|
|
102
|
+
def invert
|
|
103
|
+
#write this in terms of invert! so plugins will only have to overwrite
|
|
104
|
+
#invert! and not invert
|
|
105
|
+
if self.integer_dtype?
|
|
106
|
+
cloned = self.cast(dtype: :float64)
|
|
107
|
+
cloned.invert!
|
|
108
|
+
else
|
|
109
|
+
cloned = self.clone
|
|
110
|
+
cloned.invert!
|
|
111
|
+
end
|
|
112
|
+
end
|
|
113
|
+
alias :inverse :invert
|
|
114
|
+
|
|
115
|
+
# call-seq:
|
|
116
|
+
# exact_inverse! -> NMatrix
|
|
117
|
+
#
|
|
118
|
+
# Calulates inverse_exact of a matrix of size 2 or 3.
|
|
119
|
+
# Only works on dense matrices.
|
|
120
|
+
#
|
|
121
|
+
# * *Raises* :
|
|
122
|
+
# - +DataTypeError+ -> cannot invert an integer matrix in-place.
|
|
123
|
+
# - +NotImplementedError+ -> cannot find exact inverse of matrix with size greater than 3 #
|
|
124
|
+
def exact_inverse!
|
|
125
|
+
raise(ShapeError, "Cannot invert non-square matrix") unless self.dim == 2 && self.shape[0] == self.shape[1]
|
|
126
|
+
raise(DataTypeError, "Cannot invert an integer matrix in-place") if self.integer_dtype?
|
|
127
|
+
#No internal implementation of getri, so use this other function
|
|
128
|
+
n = self.shape[0]
|
|
129
|
+
if n>3
|
|
130
|
+
raise(NotImplementedError, "Cannot find exact inverse of matrix of size greater than 3")
|
|
131
|
+
else
|
|
132
|
+
clond=self.clone
|
|
133
|
+
__inverse_exact__(clond, n, n)
|
|
134
|
+
end
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
#
|
|
138
|
+
# call-seq:
|
|
139
|
+
# exact_inverse -> NMatrix
|
|
140
|
+
#
|
|
141
|
+
# Make a copy of the matrix, then invert using exact_inverse
|
|
142
|
+
#
|
|
143
|
+
# * *Returns* :
|
|
144
|
+
# - A dense NMatrix. Will be the same type as the input NMatrix,
|
|
145
|
+
# except if the input is an integral dtype, in which case it will be a
|
|
146
|
+
# :float64 NMatrix.
|
|
147
|
+
#
|
|
148
|
+
# * *Raises* :
|
|
149
|
+
# - +StorageTypeError+ -> only implemented on dense matrices.
|
|
150
|
+
# - +ShapeError+ -> matrix must be square.
|
|
151
|
+
# - +NotImplementedError+ -> cannot find exact inverse of matrix with size greater than 3
|
|
152
|
+
#
|
|
153
|
+
def exact_inverse
|
|
154
|
+
#write this in terms of exact_inverse! so plugins will only have to overwrite
|
|
155
|
+
#exact_inverse! and not exact_inverse
|
|
156
|
+
if self.integer_dtype?
|
|
157
|
+
cloned = self.cast(dtype: :float64)
|
|
158
|
+
cloned.exact_inverse!
|
|
159
|
+
else
|
|
160
|
+
cloned = self.clone
|
|
161
|
+
cloned.exact_inverse!
|
|
162
|
+
end
|
|
163
|
+
end
|
|
164
|
+
alias :invert_exactly :exact_inverse
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
#
|
|
169
|
+
# call-seq:
|
|
170
|
+
# pinv -> NMatrix
|
|
171
|
+
#
|
|
172
|
+
# Compute the Moore-Penrose pseudo-inverse of a matrix using its
|
|
173
|
+
# singular value decomposition (SVD).
|
|
174
|
+
#
|
|
175
|
+
# This function requires the nmatrix-atlas gem installed.
|
|
176
|
+
#
|
|
177
|
+
# * *Arguments* :
|
|
178
|
+
# - +tolerance(optional)+ -> Cutoff for small singular values.
|
|
179
|
+
#
|
|
180
|
+
# * *Returns* :
|
|
181
|
+
# - Pseudo-inverse matrix.
|
|
182
|
+
#
|
|
183
|
+
# * *Raises* :
|
|
184
|
+
# - +NotImplementedError+ -> If called without nmatrix-atlas or nmatrix-lapacke gem.
|
|
185
|
+
# - +TypeError+ -> If called without float or complex data type.
|
|
186
|
+
#
|
|
187
|
+
# * *Examples* :
|
|
188
|
+
#
|
|
189
|
+
# a = NMatrix.new([2,2],[1,2,
|
|
190
|
+
# 3,4], dtype: :float64)
|
|
191
|
+
# a.pinv # => [ [-2.0000000000000018, 1.0000000000000007]
|
|
192
|
+
# [1.5000000000000016, -0.5000000000000008] ]
|
|
193
|
+
#
|
|
194
|
+
# b = NMatrix.new([4,1],[1,2,3,4], dtype: :float64)
|
|
195
|
+
# b.pinv # => [ [ 0.03333333, 0.06666667, 0.99999999, 0.13333333] ]
|
|
196
|
+
#
|
|
197
|
+
# == References
|
|
198
|
+
#
|
|
199
|
+
# * https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse
|
|
200
|
+
# * G. Strang, Linear Algebra and Its Applications, 2nd Ed., Orlando, FL, Academic Press
|
|
201
|
+
#
|
|
202
|
+
def pinv(tolerance = 1e-15)
|
|
203
|
+
raise DataTypeError, "pinv works only with matrices of float or complex data type" unless
|
|
204
|
+
[:float32, :float64, :complex64, :complex128].include?(dtype)
|
|
205
|
+
if self.complex_dtype?
|
|
206
|
+
u, s, vt = self.complex_conjugate.gesvd # singular value decomposition
|
|
207
|
+
else
|
|
208
|
+
u, s, vt = self.gesvd
|
|
209
|
+
end
|
|
210
|
+
rows = self.shape[0]
|
|
211
|
+
cols = self.shape[1]
|
|
212
|
+
if rows < cols
|
|
213
|
+
u_reduced = u
|
|
214
|
+
vt_reduced = vt[0..rows - 1, 0..cols - 1].transpose
|
|
215
|
+
else
|
|
216
|
+
u_reduced = u[0..rows - 1, 0..cols - 1]
|
|
217
|
+
vt_reduced = vt.transpose
|
|
218
|
+
end
|
|
219
|
+
largest_singular_value = s.max.to_f
|
|
220
|
+
cutoff = tolerance * largest_singular_value
|
|
221
|
+
(0...[rows, cols].min).each do |i|
|
|
222
|
+
s[i] = 1 / s[i] if s[i] > cutoff
|
|
223
|
+
s[i] = 0 if s[i] <= cutoff
|
|
224
|
+
end
|
|
225
|
+
multiplier = u_reduced.dot(NMatrix.diagonal(s.to_a)).transpose
|
|
226
|
+
vt_reduced.dot(multiplier)
|
|
227
|
+
end
|
|
228
|
+
alias :pseudo_inverse :pinv
|
|
229
|
+
alias :pseudoinverse :pinv
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
#
|
|
233
|
+
# call-seq:
|
|
234
|
+
# adjugate! -> NMatrix
|
|
235
|
+
#
|
|
236
|
+
# Calculate the adjugate of the matrix (in-place).
|
|
237
|
+
# Only works on dense matrices.
|
|
238
|
+
#
|
|
239
|
+
# * *Raises* :
|
|
240
|
+
# - +StorageTypeError+ -> only implemented on dense matrices.
|
|
241
|
+
# - +ShapeError+ -> matrix must be square.
|
|
242
|
+
# - +DataTypeError+ -> cannot calculate adjugate of an integer matrix in-place.
|
|
243
|
+
#
|
|
244
|
+
def adjugate!
|
|
245
|
+
raise(StorageTypeError, "adjugate only works on dense matrices currently") unless self.dense?
|
|
246
|
+
raise(ShapeError, "Cannot calculate adjugate of a non-square matrix") unless self.dim == 2 && self.shape[0] == self.shape[1]
|
|
247
|
+
raise(DataTypeError, "Cannot calculate adjugate of an integer matrix in-place") if self.integer_dtype?
|
|
248
|
+
d = self.det
|
|
249
|
+
self.invert!
|
|
250
|
+
self.map! { |e| e * d }
|
|
251
|
+
self
|
|
252
|
+
end
|
|
253
|
+
alias :adjoint! :adjugate!
|
|
254
|
+
|
|
255
|
+
#
|
|
256
|
+
# call-seq:
|
|
257
|
+
# adjugate -> NMatrix
|
|
258
|
+
#
|
|
259
|
+
# Make a copy of the matrix and calculate the adjugate of the matrix.
|
|
260
|
+
# Only works on dense matrices.
|
|
261
|
+
#
|
|
262
|
+
# * *Returns* :
|
|
263
|
+
# - A dense NMatrix. Will be the same type as the input NMatrix,
|
|
264
|
+
# except if the input is an integral dtype, in which case it will be a
|
|
265
|
+
# :float64 NMatrix.
|
|
266
|
+
#
|
|
267
|
+
# * *Raises* :
|
|
268
|
+
# - +StorageTypeError+ -> only implemented on dense matrices.
|
|
269
|
+
# - +ShapeError+ -> matrix must be square.
|
|
270
|
+
#
|
|
271
|
+
def adjugate
|
|
272
|
+
raise(StorageTypeError, "adjugate only works on dense matrices currently") unless self.dense?
|
|
273
|
+
raise(ShapeError, "Cannot calculate adjugate of a non-square matrix") unless self.dim == 2 && self.shape[0] == self.shape[1]
|
|
274
|
+
d = self.det
|
|
275
|
+
mat = self.invert
|
|
276
|
+
mat.map! { |e| e * d }
|
|
277
|
+
mat
|
|
278
|
+
end
|
|
279
|
+
alias :adjoint :adjugate
|
|
280
|
+
|
|
281
|
+
# Reduce self to upper hessenberg form using householder transforms.
|
|
282
|
+
#
|
|
283
|
+
# == References
|
|
284
|
+
#
|
|
285
|
+
# * http://en.wikipedia.org/wiki/Hessenberg_matrix
|
|
286
|
+
# * http://www.mymathlib.com/c_source/matrices/eigen/hessenberg_orthog.c
|
|
287
|
+
def hessenberg
|
|
288
|
+
clone.hessenberg!
|
|
289
|
+
end
|
|
290
|
+
|
|
291
|
+
# Destructive version of #hessenberg
|
|
292
|
+
def hessenberg!
|
|
293
|
+
raise ShapeError, "Trying to reduce non 2D matrix to hessenberg form" if
|
|
294
|
+
shape.size != 2
|
|
295
|
+
raise ShapeError, "Trying to reduce non-square matrix to hessenberg form" if
|
|
296
|
+
shape[0] != shape[1]
|
|
297
|
+
raise StorageTypeError, "Matrix must be dense" if stype != :dense
|
|
298
|
+
raise TypeError, "Works with float matrices only" unless
|
|
299
|
+
[:float64,:float32].include?(dtype)
|
|
300
|
+
|
|
301
|
+
__hessenberg__(self)
|
|
302
|
+
self
|
|
303
|
+
end
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
# call-seq:
|
|
307
|
+
# matrix_norm -> Numeric
|
|
308
|
+
#
|
|
309
|
+
# Calculates the selected norm (defaults to 2-norm) of a 2D matrix.
|
|
310
|
+
#
|
|
311
|
+
# This should be used for small or medium sized matrices.
|
|
312
|
+
# For greater matrices, there should be a separate implementation where
|
|
313
|
+
# the norm is estimated rather than computed, for the sake of computation speed.
|
|
314
|
+
#
|
|
315
|
+
# Currently implemented norms are 1-norm, 2-norm, Frobenius, Infinity.
|
|
316
|
+
# A minus on the 1, 2 and inf norms returns the minimum instead of the maximum value.
|
|
317
|
+
#
|
|
318
|
+
# Tested mainly with dense matrices. Further checks and modifications might
|
|
319
|
+
# be necessary for sparse matrices.
|
|
320
|
+
#
|
|
321
|
+
# * *Returns* :
|
|
322
|
+
# - The selected norm of the matrix.
|
|
323
|
+
# * *Raises* :
|
|
324
|
+
# - +NotImplementedError+ -> norm can be calculated only for 2D matrices
|
|
325
|
+
# - +ArgumentError+ -> unrecognized norm
|
|
326
|
+
#
|
|
327
|
+
def matrix_norm type = 2
|
|
328
|
+
raise(NotImplementedError, "norm can be calculated only for 2D matrices") unless self.dim == 2
|
|
329
|
+
raise(NotImplementedError, "norm only implemented for dense storage") unless self.stype == :dense
|
|
330
|
+
raise(ArgumentError, "norm not defined for byte dtype")if self.dtype == :byte
|
|
331
|
+
case type
|
|
332
|
+
when nil, 2, -2
|
|
333
|
+
return self.two_matrix_norm (type == -2)
|
|
334
|
+
when 1, -1
|
|
335
|
+
return self.one_matrix_norm (type == -1)
|
|
336
|
+
when :frobenius, :fro
|
|
337
|
+
return self.fro_matrix_norm
|
|
338
|
+
when :infinity, :inf, :'-inf', :'-infinity'
|
|
339
|
+
return self.inf_matrix_norm (type == :'-inf' || type == :'-infinity')
|
|
340
|
+
else
|
|
341
|
+
raise ArgumentError.new("argument must be a valid integer or symbol")
|
|
342
|
+
end
|
|
343
|
+
end
|
|
344
|
+
|
|
345
|
+
# Calculate the variance co-variance matrix
|
|
346
|
+
#
|
|
347
|
+
# == Options
|
|
348
|
+
#
|
|
349
|
+
# * +:for_sample_data+ - Default true. If set to false will consider the denominator for
|
|
350
|
+
# population data (i.e. N, as opposed to N-1 for sample data).
|
|
351
|
+
#
|
|
352
|
+
# == References
|
|
353
|
+
#
|
|
354
|
+
# * http://stattrek.com/matrix-algebra/covariance-matrix.aspx
|
|
355
|
+
def cov(opts={})
|
|
356
|
+
raise TypeError, "Only works for non-integer dtypes" if integer_dtype?
|
|
357
|
+
opts = {
|
|
358
|
+
for_sample_data: true
|
|
359
|
+
}.merge(opts)
|
|
360
|
+
|
|
361
|
+
denominator = opts[:for_sample_data] ? rows - 1 : rows
|
|
362
|
+
ones = NMatrix.ones [rows,1]
|
|
363
|
+
deviation_scores = self - ones.dot(ones.transpose).dot(self) / rows
|
|
364
|
+
deviation_scores.transpose.dot(deviation_scores) / denominator
|
|
365
|
+
end
|
|
366
|
+
|
|
367
|
+
# Calculate the correlation matrix.
|
|
368
|
+
def corr
|
|
369
|
+
raise NotImplementedError, "Does not work for complex dtypes" if complex_dtype?
|
|
370
|
+
standard_deviation = std
|
|
371
|
+
cov / (standard_deviation.transpose.dot(standard_deviation))
|
|
372
|
+
end
|
|
373
|
+
|
|
374
|
+
# Raise a square matrix to a power. Be careful of numeric overflows!
|
|
375
|
+
# In case *n* is 0, an identity matrix of the same dimension is returned. In case
|
|
376
|
+
# of negative *n*, the matrix is inverted and the absolute value of *n* taken
|
|
377
|
+
# for computing the power.
|
|
378
|
+
#
|
|
379
|
+
# == Arguments
|
|
380
|
+
#
|
|
381
|
+
# * +n+ - Integer to which self is to be raised.
|
|
382
|
+
#
|
|
383
|
+
# == References
|
|
384
|
+
#
|
|
385
|
+
# * R.G Dromey - How to Solve it by Computer. Link -
|
|
386
|
+
# http://www.amazon.com/Solve-Computer-Prentice-Hall-International-Science/dp/0134340019/ref=sr_1_1?ie=UTF8&qid=1422605572&sr=8-1&keywords=how+to+solve+it+by+computer
|
|
387
|
+
def pow n
|
|
388
|
+
raise ShapeError, "Only works with 2D square matrices." if
|
|
389
|
+
shape[0] != shape[1] or shape.size != 2
|
|
390
|
+
raise TypeError, "Only works with integer powers" unless n.is_a?(Integer)
|
|
391
|
+
|
|
392
|
+
sequence = (integer_dtype? ? self.cast(dtype: :int64) : self).clone
|
|
393
|
+
product = NMatrix.eye shape[0], dtype: sequence.dtype, stype: sequence.stype
|
|
394
|
+
|
|
395
|
+
if n == 0
|
|
396
|
+
return NMatrix.eye(shape, dtype: dtype, stype: stype)
|
|
397
|
+
elsif n == 1
|
|
398
|
+
return sequence
|
|
399
|
+
elsif n < 0
|
|
400
|
+
n = n.abs
|
|
401
|
+
sequence.invert!
|
|
402
|
+
product = NMatrix.eye shape[0], dtype: sequence.dtype, stype: sequence.stype
|
|
403
|
+
end
|
|
404
|
+
|
|
405
|
+
# Decompose n to reduce the number of multiplications.
|
|
406
|
+
while n > 0
|
|
407
|
+
product = product.dot(sequence) if n % 2 == 1
|
|
408
|
+
n = n / 2
|
|
409
|
+
sequence = sequence.dot(sequence)
|
|
410
|
+
end
|
|
411
|
+
|
|
412
|
+
product
|
|
413
|
+
end
|
|
414
|
+
|
|
415
|
+
# Compute the Kronecker product of +self+ and other NMatrix
|
|
416
|
+
#
|
|
417
|
+
# === Arguments
|
|
418
|
+
#
|
|
419
|
+
# * +mat+ - A 2D NMatrix object
|
|
420
|
+
#
|
|
421
|
+
# === Usage
|
|
422
|
+
#
|
|
423
|
+
# a = NMatrix.new([2,2],[1,2,
|
|
424
|
+
# 3,4])
|
|
425
|
+
# b = NMatrix.new([2,3],[1,1,1,
|
|
426
|
+
# 1,1,1], dtype: :float64)
|
|
427
|
+
# a.kron_prod(b) # => [ [1.0, 1.0, 1.0, 2.0, 2.0, 2.0]
|
|
428
|
+
# [1.0, 1.0, 1.0, 2.0, 2.0, 2.0]
|
|
429
|
+
# [3.0, 3.0, 3.0, 4.0, 4.0, 4.0]
|
|
430
|
+
# [3.0, 3.0, 3.0, 4.0, 4.0, 4.0] ]
|
|
431
|
+
#
|
|
432
|
+
def kron_prod(mat)
|
|
433
|
+
unless self.dimensions==2 and mat.dimensions==2
|
|
434
|
+
raise ShapeError, "Implemented for 2D NMatrix objects only."
|
|
435
|
+
end
|
|
436
|
+
|
|
437
|
+
# compute the shape [n,m] of the product matrix
|
|
438
|
+
n, m = self.shape[0]*mat.shape[0], self.shape[1]*mat.shape[1]
|
|
439
|
+
# compute the entries of the product matrix
|
|
440
|
+
kron_prod_array = []
|
|
441
|
+
if self.yale?
|
|
442
|
+
# +:yale+ requires to get the row by copy in order to apply +#transpose+ to it
|
|
443
|
+
self.each_row(getby=:copy) do |selfr|
|
|
444
|
+
mat.each_row do |matr|
|
|
445
|
+
kron_prod_array += (selfr.transpose.dot matr).to_flat_a
|
|
446
|
+
end
|
|
447
|
+
end
|
|
448
|
+
else
|
|
449
|
+
self.each_row do |selfr|
|
|
450
|
+
mat.each_row do |matr|
|
|
451
|
+
kron_prod_array += (selfr.transpose.dot matr).to_flat_a
|
|
452
|
+
end
|
|
453
|
+
end
|
|
454
|
+
end
|
|
455
|
+
|
|
456
|
+
NMatrix.new([n,m], kron_prod_array)
|
|
457
|
+
end
|
|
458
|
+
|
|
459
|
+
#
|
|
460
|
+
# call-seq:
|
|
461
|
+
# trace -> Numeric
|
|
462
|
+
#
|
|
463
|
+
# Calculates the trace of an nxn matrix.
|
|
464
|
+
#
|
|
465
|
+
# * *Raises* :
|
|
466
|
+
# - +ShapeError+ -> Expected square matrix
|
|
467
|
+
#
|
|
468
|
+
# * *Returns* :
|
|
469
|
+
# - The trace of the matrix (a numeric value)
|
|
470
|
+
#
|
|
471
|
+
def trace
|
|
472
|
+
raise(ShapeError, "Expected square matrix") unless self.shape[0] == self.shape[1] && self.dim == 2
|
|
473
|
+
|
|
474
|
+
(0...self.shape[0]).inject(0) do |total,i|
|
|
475
|
+
total + self[i,i]
|
|
476
|
+
end
|
|
477
|
+
end
|
|
478
|
+
|
|
479
|
+
##
|
|
480
|
+
# call-seq:
|
|
481
|
+
# mean() -> NMatrix
|
|
482
|
+
# mean(dimen) -> NMatrix
|
|
483
|
+
#
|
|
484
|
+
# Calculates the mean along the specified dimension.
|
|
485
|
+
#
|
|
486
|
+
# This will force integer types to float64 dtype.
|
|
487
|
+
#
|
|
488
|
+
# @see #inject_rank
|
|
489
|
+
#
|
|
490
|
+
def mean(dimen=0)
|
|
491
|
+
reduce_dtype = nil
|
|
492
|
+
if integer_dtype? then
|
|
493
|
+
reduce_dtype = :float64
|
|
494
|
+
end
|
|
495
|
+
inject_rank(dimen, 0.0, reduce_dtype) do |mean, sub_mat|
|
|
496
|
+
mean + sub_mat
|
|
497
|
+
end / shape[dimen]
|
|
498
|
+
end
|
|
499
|
+
|
|
500
|
+
##
|
|
501
|
+
# call-seq:
|
|
502
|
+
# sum() -> NMatrix
|
|
503
|
+
# cumsum() -> NMatrix
|
|
504
|
+
# sum(dimen) -> NMatrix
|
|
505
|
+
# cumsum(dimen) -> NMatrix
|
|
506
|
+
#
|
|
507
|
+
# Calculates the sum along the specified dimension.
|
|
508
|
+
#
|
|
509
|
+
# @see #inject_rank
|
|
510
|
+
def sum(dimen=0)
|
|
511
|
+
inject_rank(dimen, 0.0) do |sum, sub_mat|
|
|
512
|
+
sum + sub_mat
|
|
513
|
+
end
|
|
514
|
+
end
|
|
515
|
+
alias :cumsum :sum
|
|
516
|
+
|
|
517
|
+
##
|
|
518
|
+
# call-seq:
|
|
519
|
+
# min() -> NMatrix
|
|
520
|
+
# min(dimen) -> NMatrix
|
|
521
|
+
#
|
|
522
|
+
# Calculates the minimum along the specified dimension.
|
|
523
|
+
#
|
|
524
|
+
# @see #inject_rank
|
|
525
|
+
#
|
|
526
|
+
def min(dimen=0)
|
|
527
|
+
inject_rank(dimen) do |min, sub_mat|
|
|
528
|
+
if min.is_a? NMatrix then
|
|
529
|
+
min * (min <= sub_mat).cast(self.stype, self.dtype) + ((min)*0.0 + (min > sub_mat).cast(self.stype, self.dtype)) * sub_mat
|
|
530
|
+
else
|
|
531
|
+
min <= sub_mat ? min : sub_mat
|
|
532
|
+
end
|
|
533
|
+
end
|
|
534
|
+
end
|
|
535
|
+
|
|
536
|
+
##
|
|
537
|
+
# call-seq:
|
|
538
|
+
# max() -> NMatrix
|
|
539
|
+
# max(dimen) -> NMatrix
|
|
540
|
+
#
|
|
541
|
+
# Calculates the maximum along the specified dimension.
|
|
542
|
+
#
|
|
543
|
+
# @see #inject_rank
|
|
544
|
+
#
|
|
545
|
+
def max(dimen=0)
|
|
546
|
+
inject_rank(dimen) do |max, sub_mat|
|
|
547
|
+
if max.is_a? NMatrix then
|
|
548
|
+
max * (max >= sub_mat).cast(self.stype, self.dtype) + ((max)*0.0 + (max < sub_mat).cast(self.stype, self.dtype)) * sub_mat
|
|
549
|
+
else
|
|
550
|
+
max >= sub_mat ? max : sub_mat
|
|
551
|
+
end
|
|
552
|
+
end
|
|
553
|
+
end
|
|
554
|
+
|
|
555
|
+
|
|
556
|
+
##
|
|
557
|
+
# call-seq:
|
|
558
|
+
# variance() -> NMatrix
|
|
559
|
+
# variance(dimen) -> NMatrix
|
|
560
|
+
#
|
|
561
|
+
# Calculates the sample variance along the specified dimension.
|
|
562
|
+
#
|
|
563
|
+
# This will force integer types to float64 dtype.
|
|
564
|
+
#
|
|
565
|
+
# @see #inject_rank
|
|
566
|
+
#
|
|
567
|
+
def variance(dimen=0)
|
|
568
|
+
reduce_dtype = nil
|
|
569
|
+
if integer_dtype? then
|
|
570
|
+
reduce_dtype = :float64
|
|
571
|
+
end
|
|
572
|
+
m = mean(dimen)
|
|
573
|
+
inject_rank(dimen, 0.0, reduce_dtype) do |var, sub_mat|
|
|
574
|
+
var + (m - sub_mat)*(m - sub_mat)/(shape[dimen]-1)
|
|
575
|
+
end
|
|
576
|
+
end
|
|
577
|
+
|
|
578
|
+
##
|
|
579
|
+
# call-seq:
|
|
580
|
+
# std() -> NMatrix
|
|
581
|
+
# std(dimen) -> NMatrix
|
|
582
|
+
#
|
|
583
|
+
#
|
|
584
|
+
# Calculates the sample standard deviation along the specified dimension.
|
|
585
|
+
#
|
|
586
|
+
# This will force integer types to float64 dtype.
|
|
587
|
+
#
|
|
588
|
+
# @see #inject_rank
|
|
589
|
+
#
|
|
590
|
+
def std(dimen=0)
|
|
591
|
+
variance(dimen).sqrt
|
|
592
|
+
end
|
|
593
|
+
|
|
594
|
+
|
|
595
|
+
#
|
|
596
|
+
# call-seq:
|
|
597
|
+
# abs_dtype -> Symbol
|
|
598
|
+
#
|
|
599
|
+
# Returns the dtype of the result of a call to #abs. In most cases, this is the same as dtype; it should only differ
|
|
600
|
+
# for :complex64 (where it's :float32) and :complex128 (:float64).
|
|
601
|
+
def abs_dtype
|
|
602
|
+
if self.dtype == :complex64
|
|
603
|
+
:float32
|
|
604
|
+
elsif self.dtype == :complex128
|
|
605
|
+
:float64
|
|
606
|
+
else
|
|
607
|
+
self.dtype
|
|
608
|
+
end
|
|
609
|
+
end
|
|
610
|
+
|
|
611
|
+
|
|
612
|
+
#
|
|
613
|
+
# call-seq:
|
|
614
|
+
# abs -> NMatrix
|
|
615
|
+
#
|
|
616
|
+
# Maps all values in a matrix to their absolute values.
|
|
617
|
+
def abs
|
|
618
|
+
if stype == :dense
|
|
619
|
+
self.__dense_map__ { |v| v.abs }
|
|
620
|
+
elsif stype == :list
|
|
621
|
+
# FIXME: Need __list_map_stored__, but this will do for now.
|
|
622
|
+
self.__list_map_merged_stored__(nil, nil) { |v,dummy| v.abs }
|
|
623
|
+
else
|
|
624
|
+
self.__yale_map_stored__ { |v| v.abs }
|
|
625
|
+
end.cast(self.stype, abs_dtype)
|
|
626
|
+
end
|
|
627
|
+
|
|
628
|
+
# Norm calculation methods
|
|
629
|
+
# Frobenius norm: the Euclidean norm of the matrix, treated as if it were a vector
|
|
630
|
+
def fro_matrix_norm
|
|
631
|
+
#float64 has to be used in any case, since nrm2 will not yield correct result for float32
|
|
632
|
+
self_cast = self.cast(:dtype => :float64)
|
|
633
|
+
|
|
634
|
+
column_vector = self_cast.reshape([self.size, 1])
|
|
635
|
+
|
|
636
|
+
return column_vector.nrm2
|
|
637
|
+
end
|
|
638
|
+
|
|
639
|
+
# 2-norm: the largest/smallest singular value of the matrix
|
|
640
|
+
def two_matrix_norm minus = false
|
|
641
|
+
|
|
642
|
+
self_cast = self.cast(:dtype => :float64)
|
|
643
|
+
|
|
644
|
+
#TODO: confirm if this is the desired svd calculation
|
|
645
|
+
svd = self_cast.gesvd
|
|
646
|
+
return svd[1][0, 0] unless minus
|
|
647
|
+
return svd[1][svd[1].rows-1, svd[1].cols-1]
|
|
648
|
+
end
|
|
649
|
+
|
|
650
|
+
# 1-norm: the maximum/minimum absolute column sum of the matrix
|
|
651
|
+
def one_matrix_norm minus = false
|
|
652
|
+
#TODO: change traversing method for sparse matrices
|
|
653
|
+
number_of_columns = self.cols
|
|
654
|
+
col_sums = []
|
|
655
|
+
|
|
656
|
+
number_of_columns.times do |i|
|
|
657
|
+
col_sums << self.col(i).inject(0) { |sum, number| sum += number.abs}
|
|
658
|
+
end
|
|
659
|
+
|
|
660
|
+
return col_sums.max unless minus
|
|
661
|
+
return col_sums.min
|
|
662
|
+
end
|
|
663
|
+
|
|
664
|
+
# Infinity norm: the maximum/minimum absolute row sum of the matrix
|
|
665
|
+
def inf_matrix_norm minus = false
|
|
666
|
+
number_of_rows = self.rows
|
|
667
|
+
row_sums = []
|
|
668
|
+
|
|
669
|
+
number_of_rows.times do |i|
|
|
670
|
+
row_sums << self.row(i).inject(0) { |sum, number| sum += number.abs}
|
|
671
|
+
end
|
|
672
|
+
|
|
673
|
+
return row_sums.max unless minus
|
|
674
|
+
return row_sums.min
|
|
675
|
+
end
|
|
676
|
+
|
|
677
|
+
#
|
|
678
|
+
# call-seq:
|
|
679
|
+
# positive_definite? -> boolean
|
|
680
|
+
#
|
|
681
|
+
# A matrix is positive definite if it’s symmetric and all its eigenvalues are positive
|
|
682
|
+
#
|
|
683
|
+
# * *Returns* :
|
|
684
|
+
# - A boolean value telling if the NMatrix is positive definite or not.
|
|
685
|
+
# * *Raises* :
|
|
686
|
+
# - +ShapeError+ -> Must be used on square matrices.
|
|
687
|
+
#
|
|
688
|
+
def positive_definite?
|
|
689
|
+
raise(ShapeError, "positive definite calculated only for square matrices") unless
|
|
690
|
+
self.dim == 2 && self.shape[0] == self.shape[1]
|
|
691
|
+
cond = 0
|
|
692
|
+
while cond != self.cols
|
|
693
|
+
if self[0..cond, 0..cond].det <= 0
|
|
694
|
+
return false
|
|
695
|
+
end
|
|
696
|
+
cond += 1
|
|
697
|
+
end
|
|
698
|
+
true
|
|
699
|
+
end
|
|
700
|
+
|
|
701
|
+
#
|
|
702
|
+
# call-seq:
|
|
703
|
+
# svd_rank() -> int
|
|
704
|
+
# svd_rank(tolerence) ->int
|
|
705
|
+
# Gives rank of the matrix based on the singular value decomposition.
|
|
706
|
+
# The rank of a matrix is computed as the number of diagonal elements in Sigma that are larger than a tolerance
|
|
707
|
+
#
|
|
708
|
+
#* *Returns* :
|
|
709
|
+
# - An integer equal to the rank of the matrix
|
|
710
|
+
#* *Raises* :
|
|
711
|
+
# - +ShapeError+ -> Is only computable on 2-D matrices
|
|
712
|
+
#
|
|
713
|
+
def svd_rank(tolerence="default")
|
|
714
|
+
raise(ShapeError, "rank calculated only for 2-D matrices") unless
|
|
715
|
+
self.dim == 2
|
|
716
|
+
|
|
717
|
+
sigmas = self.gesvd[1].to_a.flatten
|
|
718
|
+
eps = NMatrix::FLOAT64_EPSILON
|
|
719
|
+
|
|
720
|
+
# epsilon depends on the width of the number
|
|
721
|
+
if (self.dtype == :float32 || self.dtype == :complex64)
|
|
722
|
+
eps = NMatrix::FLOAT32_EPSILON
|
|
723
|
+
end
|
|
724
|
+
case tolerence
|
|
725
|
+
when "default"
|
|
726
|
+
tolerence = self.shape.max * sigmas.max * eps # tolerence of a Matrix A is max(size(A))*eps(norm(A)). norm(A) is nearly equal to max(sigma of A)
|
|
727
|
+
end
|
|
728
|
+
return sigmas.map { |x| x > tolerence ? 1 : 0 }.reduce(:+)
|
|
729
|
+
end
|
|
730
|
+
|
|
731
|
+
|
|
732
|
+
|
|
733
|
+
protected
|
|
734
|
+
# Define the element-wise operations for lists. Note that the __list_map_merged_stored__ iterator returns a Ruby Object
|
|
735
|
+
# matrix, which we then cast back to the appropriate type. If you don't want that, you can redefine these functions in
|
|
736
|
+
# your own code.
|
|
737
|
+
{add: :+, sub: :-, mul: :*, div: :/, pow: :**, mod: :%}.each_pair do |ewop, op|
|
|
738
|
+
define_method("__list_elementwise_#{ewop}__") do |rhs|
|
|
739
|
+
self.__list_map_merged_stored__(rhs, nil) { |l,r| l.send(op,r) }.cast(stype, NMatrix.upcast(dtype, rhs.dtype))
|
|
740
|
+
end
|
|
741
|
+
define_method("__dense_elementwise_#{ewop}__") do |rhs|
|
|
742
|
+
self.__dense_map_pair__(rhs) { |l,r| l.send(op,r) }.cast(stype, NMatrix.upcast(dtype, rhs.dtype))
|
|
743
|
+
end
|
|
744
|
+
define_method("__yale_elementwise_#{ewop}__") do |rhs|
|
|
745
|
+
self.__yale_map_merged_stored__(rhs, nil) { |l,r| l.send(op,r) }.cast(stype, NMatrix.upcast(dtype, rhs.dtype))
|
|
746
|
+
end
|
|
747
|
+
define_method("__list_scalar_#{ewop}__") do |rhs|
|
|
748
|
+
self.__list_map_merged_stored__(rhs, nil) { |l,r| l.send(op,r) }.cast(stype, NMatrix.upcast(dtype, NMatrix.min_dtype(rhs)))
|
|
749
|
+
end
|
|
750
|
+
define_method("__yale_scalar_#{ewop}__") do |rhs|
|
|
751
|
+
self.__yale_map_stored__ { |l| l.send(op,rhs) }.cast(stype, NMatrix.upcast(dtype, NMatrix.min_dtype(rhs)))
|
|
752
|
+
end
|
|
753
|
+
define_method("__dense_scalar_#{ewop}__") do |rhs|
|
|
754
|
+
self.__dense_map__ { |l| l.send(op,rhs) }.cast(stype, NMatrix.upcast(dtype, NMatrix.min_dtype(rhs)))
|
|
755
|
+
end
|
|
756
|
+
end
|
|
757
|
+
|
|
758
|
+
# These don't actually take an argument -- they're called reverse-polish style on the matrix.
|
|
759
|
+
# This group always gets casted to float64.
|
|
760
|
+
[:log, :log2, :log10, :sqrt, :sin, :cos, :tan, :acos, :asin, :atan, :cosh, :sinh, :tanh, :acosh,
|
|
761
|
+
:asinh, :atanh, :exp, :erf, :erfc, :gamma, :cbrt, :round].each do |ewop|
|
|
762
|
+
define_method("__list_unary_#{ewop}__") do
|
|
763
|
+
self.__list_map_stored__(nil) { |l| Math.send(ewop, l) }.cast(stype, NMatrix.upcast(dtype, :float64))
|
|
764
|
+
end
|
|
765
|
+
define_method("__yale_unary_#{ewop}__") do
|
|
766
|
+
self.__yale_map_stored__ { |l| Math.send(ewop, l) }.cast(stype, NMatrix.upcast(dtype, :float64))
|
|
767
|
+
end
|
|
768
|
+
define_method("__dense_unary_#{ewop}__") do
|
|
769
|
+
self.__dense_map__ { |l| Math.send(ewop, l) }.cast(stype, NMatrix.upcast(dtype, :float64))
|
|
770
|
+
end
|
|
771
|
+
end
|
|
772
|
+
|
|
773
|
+
#:stopdoc:
|
|
774
|
+
# log takes an optional single argument, the base. Default to natural log.
|
|
775
|
+
def __list_unary_log__(base)
|
|
776
|
+
self.__list_map_stored__(nil) { |l| Math.log(l, base) }.cast(stype, NMatrix.upcast(dtype, :float64))
|
|
777
|
+
end
|
|
778
|
+
|
|
779
|
+
def __yale_unary_log__(base)
|
|
780
|
+
self.__yale_map_stored__ { |l| Math.log(l, base) }.cast(stype, NMatrix.upcast(dtype, :float64))
|
|
781
|
+
end
|
|
782
|
+
|
|
783
|
+
def __dense_unary_log__(base)
|
|
784
|
+
self.__dense_map__ { |l| Math.log(l, base) }.cast(stype, NMatrix.upcast(dtype, :float64))
|
|
785
|
+
end
|
|
786
|
+
|
|
787
|
+
# These are for negating matrix contents using -@
|
|
788
|
+
def __list_unary_negate__
|
|
789
|
+
self.__list_map_stored__(nil) { |l| -l }.cast(stype, dtype)
|
|
790
|
+
end
|
|
791
|
+
|
|
792
|
+
def __yale_unary_negate__
|
|
793
|
+
self.__yale_map_stored__ { |l| -l }.cast(stype, dtype)
|
|
794
|
+
end
|
|
795
|
+
|
|
796
|
+
def __dense_unary_negate__
|
|
797
|
+
self.__dense_map__ { |l| -l }.cast(stype, dtype)
|
|
798
|
+
end
|
|
799
|
+
#:startdoc:
|
|
800
|
+
|
|
801
|
+
# These are for rounding each value of a matrix. Takes an optional argument
|
|
802
|
+
def __list_unary_round__(precision)
|
|
803
|
+
if self.complex_dtype?
|
|
804
|
+
self.__list_map_stored__(nil) { |l| Complex(l.real.round(precision), l.imag.round(precision)) }
|
|
805
|
+
.cast(stype, dtype)
|
|
806
|
+
else
|
|
807
|
+
self.__list_map_stored__(nil) { |l| l.round(precision) }.cast(stype, dtype)
|
|
808
|
+
end
|
|
809
|
+
end
|
|
810
|
+
|
|
811
|
+
def __yale_unary_round__(precision)
|
|
812
|
+
if self.complex_dtype?
|
|
813
|
+
self.__yale_map_stored__ { |l| Complex(l.real.round(precision), l.imag.round(precision)) }
|
|
814
|
+
.cast(stype, dtype)
|
|
815
|
+
else
|
|
816
|
+
self.__yale_map_stored__ { |l| l.round(precision) }.cast(stype, dtype)
|
|
817
|
+
end
|
|
818
|
+
end
|
|
819
|
+
|
|
820
|
+
def __dense_unary_round__(precision)
|
|
821
|
+
if self.complex_dtype?
|
|
822
|
+
self.__dense_map__ { |l| Complex(l.real.round(precision), l.imag.round(precision)) }
|
|
823
|
+
.cast(stype, dtype)
|
|
824
|
+
else
|
|
825
|
+
self.__dense_map__ { |l| l.round(precision) }.cast(stype, dtype)
|
|
826
|
+
end
|
|
827
|
+
end
|
|
828
|
+
|
|
829
|
+
# These are for calculating the floor or ceil of matrix
|
|
830
|
+
def dtype_for_floor_or_ceil
|
|
831
|
+
if self.integer_dtype? or [:complex64, :complex128, :object].include?(self.dtype)
|
|
832
|
+
return_dtype = dtype
|
|
833
|
+
elsif [:float32, :float64].include?(self.dtype)
|
|
834
|
+
return_dtype = :int64
|
|
835
|
+
end
|
|
836
|
+
|
|
837
|
+
return_dtype
|
|
838
|
+
end
|
|
839
|
+
|
|
840
|
+
[:floor, :ceil].each do |meth|
|
|
841
|
+
define_method("__list_unary_#{meth}__") do
|
|
842
|
+
return_dtype = dtype_for_floor_or_ceil
|
|
843
|
+
|
|
844
|
+
if [:complex64, :complex128].include?(self.dtype)
|
|
845
|
+
self.__list_map_stored__(nil) { |l| Complex(l.real.send(meth), l.imag.send(meth)) }.cast(stype, return_dtype)
|
|
846
|
+
else
|
|
847
|
+
self.__list_map_stored__(nil) { |l| l.send(meth) }.cast(stype, return_dtype)
|
|
848
|
+
end
|
|
849
|
+
end
|
|
850
|
+
|
|
851
|
+
define_method("__yale_unary_#{meth}__") do
|
|
852
|
+
return_dtype = dtype_for_floor_or_ceil
|
|
853
|
+
|
|
854
|
+
if [:complex64, :complex128].include?(self.dtype)
|
|
855
|
+
self.__yale_map_stored__ { |l| Complex(l.real.send(meth), l.imag.send(meth)) }.cast(stype, return_dtype)
|
|
856
|
+
else
|
|
857
|
+
self.__yale_map_stored__ { |l| l.send(meth) }.cast(stype, return_dtype)
|
|
858
|
+
end
|
|
859
|
+
end
|
|
860
|
+
|
|
861
|
+
define_method("__dense_unary_#{meth}__") do
|
|
862
|
+
return_dtype = dtype_for_floor_or_ceil
|
|
863
|
+
|
|
864
|
+
if [:complex64, :complex128].include?(self.dtype)
|
|
865
|
+
self.__dense_map__ { |l| Complex(l.real.send(meth), l.imag.send(meth)) }.cast(stype, return_dtype)
|
|
866
|
+
else
|
|
867
|
+
self.__dense_map__ { |l| l.send(meth) }.cast(stype, return_dtype)
|
|
868
|
+
end
|
|
869
|
+
end
|
|
870
|
+
end
|
|
871
|
+
|
|
872
|
+
# These take two arguments. One might be a matrix, and one might be a scalar.
|
|
873
|
+
# See also monkeys.rb, which contains Math module patches to let the first
|
|
874
|
+
# arg be a scalar
|
|
875
|
+
[:atan2, :ldexp, :hypot].each do |ewop|
|
|
876
|
+
define_method("__list_elementwise_#{ewop}__") do |rhs,order|
|
|
877
|
+
if order then
|
|
878
|
+
self.__list_map_merged_stored__(rhs, nil) { |r,l| Math.send(ewop,l,r) }
|
|
879
|
+
else
|
|
880
|
+
self.__list_map_merged_stored__(rhs, nil) { |l,r| Math.send(ewop,l,r) }
|
|
881
|
+
end.cast(stype, NMatrix.upcast(dtype, :float64))
|
|
882
|
+
end
|
|
883
|
+
|
|
884
|
+
define_method("__dense_elementwise_#{ewop}__") do |rhs, order|
|
|
885
|
+
if order then
|
|
886
|
+
self.__dense_map_pair__(rhs) { |r,l| Math.send(ewop,l,r) }
|
|
887
|
+
else
|
|
888
|
+
self.__dense_map_pair__(rhs) { |l,r| Math.send(ewop,l,r) }
|
|
889
|
+
end.cast(stype, NMatrix.upcast(dtype, :float64))
|
|
890
|
+
end
|
|
891
|
+
|
|
892
|
+
define_method("__yale_elementwise_#{ewop}__") do |rhs, order|
|
|
893
|
+
if order then
|
|
894
|
+
self.__yale_map_merged_stored__(rhs, nil) { |r,l| Math.send(ewop,l,r) }
|
|
895
|
+
else
|
|
896
|
+
self.__yale_map_merged_stored__(rhs, nil) { |l,r| Math.send(ewop,l,r) }
|
|
897
|
+
end.cast(stype, NMatrix.upcast(dtype, :float64))
|
|
898
|
+
end
|
|
899
|
+
|
|
900
|
+
define_method("__list_scalar_#{ewop}__") do |rhs,order|
|
|
901
|
+
if order then
|
|
902
|
+
self.__list_map_stored__(nil) { |l| Math.send(ewop, rhs, l) }
|
|
903
|
+
else
|
|
904
|
+
self.__list_map_stored__(nil) { |l| Math.send(ewop, l, rhs) }
|
|
905
|
+
end.cast(stype, NMatrix.upcast(dtype, :float64))
|
|
906
|
+
end
|
|
907
|
+
|
|
908
|
+
define_method("__yale_scalar_#{ewop}__") do |rhs,order|
|
|
909
|
+
if order then
|
|
910
|
+
self.__yale_map_stored__ { |l| Math.send(ewop, rhs, l) }
|
|
911
|
+
else
|
|
912
|
+
self.__yale_map_stored__ { |l| Math.send(ewop, l, rhs) }
|
|
913
|
+
end.cast(stype, NMatrix.upcast(dtype, :float64))
|
|
914
|
+
end
|
|
915
|
+
|
|
916
|
+
define_method("__dense_scalar_#{ewop}__") do |rhs,order|
|
|
917
|
+
if order
|
|
918
|
+
self.__dense_map__ { |l| Math.send(ewop, rhs, l) }
|
|
919
|
+
else
|
|
920
|
+
self.__dense_map__ { |l| Math.send(ewop, l, rhs) }
|
|
921
|
+
end.cast(stype, NMatrix.upcast(dtype, :float64))
|
|
922
|
+
end
|
|
923
|
+
end
|
|
924
|
+
|
|
925
|
+
# Equality operators do not involve a cast. We want to get back matrices of TrueClass and FalseClass.
|
|
926
|
+
{eqeq: :==, neq: :!=, lt: :<, gt: :>, leq: :<=, geq: :>=}.each_pair do |ewop, op|
|
|
927
|
+
define_method("__list_elementwise_#{ewop}__") do |rhs|
|
|
928
|
+
self.__list_map_merged_stored__(rhs, nil) { |l,r| l.send(op,r) }
|
|
929
|
+
end
|
|
930
|
+
define_method("__dense_elementwise_#{ewop}__") do |rhs|
|
|
931
|
+
self.__dense_map_pair__(rhs) { |l,r| l.send(op,r) }
|
|
932
|
+
end
|
|
933
|
+
define_method("__yale_elementwise_#{ewop}__") do |rhs|
|
|
934
|
+
self.__yale_map_merged_stored__(rhs, nil) { |l,r| l.send(op,r) }
|
|
935
|
+
end
|
|
936
|
+
|
|
937
|
+
define_method("__list_scalar_#{ewop}__") do |rhs|
|
|
938
|
+
self.__list_map_merged_stored__(rhs, nil) { |l,r| l.send(op,r) }
|
|
939
|
+
end
|
|
940
|
+
define_method("__yale_scalar_#{ewop}__") do |rhs|
|
|
941
|
+
self.__yale_map_stored__ { |l| l.send(op,rhs) }
|
|
942
|
+
end
|
|
943
|
+
define_method("__dense_scalar_#{ewop}__") do |rhs|
|
|
944
|
+
self.__dense_map__ { |l| l.send(op,rhs) }
|
|
945
|
+
end
|
|
946
|
+
end
|
|
947
|
+
end
|
|
948
|
+
|
|
949
|
+
if jruby?
|
|
950
|
+
require_relative "./jruby/math.rb"
|
|
951
|
+
else
|
|
952
|
+
require_relative "./cruby/math.rb"
|
|
953
|
+
end
|