statsample-timeseries 0.0.3 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +19 -0
- data/.travis.yml +13 -10
- data/Gemfile +2 -21
- data/History.md +4 -0
- data/LICENSE.txt +1 -1
- data/README.md +62 -0
- data/Rakefile +12 -17
- data/lib/statsample-timeseries.rb +3 -13
- data/lib/statsample-timeseries/arima.rb +72 -74
- data/lib/statsample-timeseries/arima/kalman.rb +20 -40
- data/lib/statsample-timeseries/arima/likelihood.rb +3 -4
- data/lib/statsample-timeseries/daru_monkeys.rb +78 -0
- data/lib/statsample-timeseries/timeseries/pacf.rb +47 -38
- data/lib/statsample-timeseries/utility.rb +105 -133
- data/lib/statsample-timeseries/version.rb +5 -0
- data/statsample-timeseries.gemspec +31 -0
- data/test/helper.rb +6 -29
- data/test/test_acf.rb +41 -0
- data/test/test_arima_ks.rb +28 -12
- data/test/test_arima_simulators.rb +59 -42
- data/test/test_matrix.rb +1 -1
- data/test/test_pacf.rb +7 -2
- data/test/test_wald.rb +7 -3
- metadata +81 -132
- data/README.rdoc +0 -72
- data/VERSION +0 -1
- data/bin/bio-statsample-timeseries +0 -74
- data/features/acf.feature +0 -31
- data/features/pacf.feature +0 -42
- data/features/step_definitions/bio-statsample-timeseries_steps.rb +0 -0
- data/features/step_definitions/step_definitions.rb +0 -37
- data/features/step_definitions/step_definitions_acf.rb +0 -8
- data/features/support/env.rb +0 -15
- data/lib/statsample-timeseries/timeseries.rb +0 -291
- data/test/test_tseries.rb +0 -103
@@ -8,7 +8,7 @@ module Statsample
|
|
8
8
|
include GSL::MultiMin if Statsample.has_gsl?
|
9
9
|
|
10
10
|
#timeseries object
|
11
|
-
|
11
|
+
attr_writer :ts
|
12
12
|
#Autoregressive order
|
13
13
|
attr_accessor :p
|
14
14
|
#Integerated part order
|
@@ -22,14 +22,18 @@ module Statsample
|
|
22
22
|
attr_reader :ma
|
23
23
|
|
24
24
|
#Creates a new KalmanFilter object and computes the likelihood
|
25
|
-
def initialize(ts=[]
|
26
|
-
@ts = ts
|
25
|
+
def initialize(ts=[], p=0, i=0, q=0)
|
26
|
+
@ts = ts.to_a
|
27
27
|
@p = p
|
28
28
|
@i = i
|
29
29
|
@q = q
|
30
30
|
ks #call the filter
|
31
31
|
end
|
32
32
|
|
33
|
+
def ts
|
34
|
+
Daru::Vector.new(@ts)
|
35
|
+
end
|
36
|
+
|
33
37
|
def to_s
|
34
38
|
sprintf("ARIMA model(p = %d, i = %d, q = %d) on series(%d elements) - [%s]",
|
35
39
|
@p, @i, @q, @ts.size, @ts.to_a.join(','))
|
@@ -54,7 +58,7 @@ module Statsample
|
|
54
58
|
p,q = params[1], params[2]
|
55
59
|
params = x
|
56
60
|
#puts x
|
57
|
-
-Arima::KF::LogLikelihood.new(x.to_a, timeseries, p, q).
|
61
|
+
-Arima::KF::LogLikelihood.new(x.to_a, timeseries, p, q).log_likelihood
|
58
62
|
#KalmanFilter.ll(x.to_a, timeseries, p, q)
|
59
63
|
}
|
60
64
|
np = @p + @q
|
@@ -71,20 +75,13 @@ module Statsample
|
|
71
75
|
while status == GSL::CONTINUE && iter < 100
|
72
76
|
iter += 1
|
73
77
|
begin
|
74
|
-
status = minimizer.iterate
|
78
|
+
status = minimizer.iterate
|
75
79
|
status = minimizer.test_size(1e-2)
|
76
80
|
x = minimizer.x
|
77
81
|
rescue
|
78
82
|
break
|
79
83
|
end
|
80
|
-
# printf("%5d ", iter)
|
81
|
-
# for i in 0...np do
|
82
|
-
# puts "#{x[i]}.to_f"
|
83
|
-
# #printf("%10.3e ", x[i].to_f)
|
84
|
-
# end
|
85
|
-
# printf("f() = %7.3f size = %.3f\n", minimizer.fval, minimizer.size)
|
86
84
|
end
|
87
|
-
#
|
88
85
|
@ar = (p > 0) ? x.to_a[0...p] : []
|
89
86
|
@ma = (q > 0) ? x.to_a[p...(p+q)] : []
|
90
87
|
x.to_a
|
@@ -112,36 +109,19 @@ module Statsample
|
|
112
109
|
Arima::KF::LogLikelihood.new(params, timeseries, p, q)
|
113
110
|
end
|
114
111
|
|
115
|
-
#=T
|
116
|
-
#The coefficient matrix for the state vector in state equation
|
117
|
-
# It's dimensions is r+k x r+k
|
118
|
-
#==Parameters
|
119
|
-
#* *r*: integer, r is max(p, q+1), where p and q are orders of AR and MA respectively
|
120
|
-
#* *k*: integer, number of exogeneous variables in ARMA model
|
121
|
-
#* *q*: integer, The AR coefficient of ARMA model
|
122
112
|
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
# intermediate_matrix[0,0] = [params_padded]
|
132
|
-
#
|
133
|
-
# #now generating column matrix for that:
|
134
|
-
# arr = Matrix.columns(intermediate_matrix)
|
135
|
-
# arr_00 = arr[0,0]
|
136
|
-
#
|
137
|
-
# #identify matrix substituition in matrix except row[0] and column[0]
|
138
|
-
# r.times do |i|
|
139
|
-
# arr[r,r] = 1
|
140
|
-
# end
|
141
|
-
# arr[0,0] = arr_00
|
142
|
-
# arr
|
143
|
-
#end
|
113
|
+
def self.T(r, k, p)
|
114
|
+
#=T
|
115
|
+
#The coefficient matrix for the state vector in state equation
|
116
|
+
# It's dimensions is r+k x r+k
|
117
|
+
#==Parameters
|
118
|
+
#* *r*: integer, r is max(p, q+1), where p and q are orders of AR and MA respectively
|
119
|
+
#* *k*: integer, number of exogeneous variables in ARMA model
|
120
|
+
#* *q*: integer, The AR coefficient of ARMA model
|
144
121
|
|
122
|
+
#==References Statsmodels tsa, Durbin and Koopman Section 4.7
|
123
|
+
raise NotImplementedError
|
124
|
+
end
|
145
125
|
end
|
146
126
|
end
|
147
127
|
end
|
@@ -16,7 +16,7 @@ module Statsample
|
|
16
16
|
|
17
17
|
def initialize(params, timeseries, p, q)
|
18
18
|
@params = params
|
19
|
-
@timeseries = timeseries
|
19
|
+
@timeseries = timeseries.to_a
|
20
20
|
@p = p
|
21
21
|
@q = q
|
22
22
|
ll
|
@@ -50,7 +50,7 @@ module Statsample
|
|
50
50
|
t = Matrix.zero(m)
|
51
51
|
#set_column is available in utility.rb
|
52
52
|
t = t.set_column(0, phi)
|
53
|
-
if
|
53
|
+
if m > 1
|
54
54
|
t[0...(m-1), 1...m] = Matrix.I(m-1)
|
55
55
|
#chances of extra constant 0 values as unbalanced column, so:
|
56
56
|
t = Matrix.columns(t.column_vectors)
|
@@ -66,10 +66,9 @@ module Statsample
|
|
66
66
|
|
67
67
|
n.times do |i|
|
68
68
|
v_t[i] = (z * a_t).map { |x| timeseries[i] - x }[0,0]
|
69
|
-
|
70
69
|
f_t[i] = (z * p_t * (z.transpose)).map { |x| x + 1 }[0,0]
|
71
70
|
|
72
|
-
k_t = ((t * p_t * z.transpose) + h).map { |x| x
|
71
|
+
k_t = ((t * p_t * z.transpose) + h).map { |x| x.quo f_t[i] }
|
73
72
|
|
74
73
|
a_t = (t * a_t) + (k_t * v_t[i])
|
75
74
|
l_t = t - k_t * z
|
@@ -0,0 +1,78 @@
|
|
1
|
+
require 'statsample-timeseries/timeseries/pacf'
|
2
|
+
module Statsample::TimeSeriesShorthands
|
3
|
+
# Creates a new Statsample::TimeSeries object
|
4
|
+
# Argument should be equal to TimeSeries.new
|
5
|
+
def to_time_series(*args)
|
6
|
+
Daru::Vector.new(self, *args)
|
7
|
+
end
|
8
|
+
|
9
|
+
alias :to_ts :to_time_series
|
10
|
+
end
|
11
|
+
|
12
|
+
class Array
|
13
|
+
include Statsample::TimeSeriesShorthands
|
14
|
+
end
|
15
|
+
|
16
|
+
module Daru
|
17
|
+
class Vector
|
18
|
+
include Statsample::TimeSeries::Pacf
|
19
|
+
|
20
|
+
# = Partial Autocorrelation
|
21
|
+
# Generates partial autocorrelation series for a timeseries
|
22
|
+
#
|
23
|
+
# == Arguments
|
24
|
+
#
|
25
|
+
#* *max_lags*: integer, optional - provide number of lags
|
26
|
+
#* *method*: string. Default: 'yw'.
|
27
|
+
# * *yw*: For yule-walker algorithm unbiased approach
|
28
|
+
# * *mle*: For Maximum likelihood algorithm approach
|
29
|
+
# * *ld*: Forr Levinson-Durbin recursive approach
|
30
|
+
#
|
31
|
+
# == Returns
|
32
|
+
#
|
33
|
+
# array of pacf
|
34
|
+
def pacf(max_lags = nil, method = :yw)
|
35
|
+
helper = Statsample::TimeSeries::Pacf
|
36
|
+
method = method.downcase.to_sym
|
37
|
+
max_lags ||= (10 * Math.log10(size)).to_i
|
38
|
+
if method == :yw or method == :mle
|
39
|
+
helper.pacf_yw(self, max_lags, method.to_s)
|
40
|
+
elsif method == :ld
|
41
|
+
series = self.acvf
|
42
|
+
helper.levinson_durbin(series, max_lags, true)[2]
|
43
|
+
else
|
44
|
+
raise "Method presents for pacf are 'yw', 'mle' or 'ld'"
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
# == Autoregressive estimation
|
49
|
+
# Generates AR(k) series for the calling timeseries by yule walker.
|
50
|
+
#
|
51
|
+
# == Parameters
|
52
|
+
#
|
53
|
+
#* *n*: integer, (default = 1500) number of observations for AR.
|
54
|
+
#* *k*: integer, (default = 1) order of AR process.
|
55
|
+
#
|
56
|
+
# == Returns
|
57
|
+
#
|
58
|
+
# Array constituting estimated AR series.
|
59
|
+
def ar(n = 1500, k = 1)
|
60
|
+
series = Statsample::TimeSeries.arima
|
61
|
+
#series = Statsample::TimeSeries::ARIMA.new
|
62
|
+
series.yule_walker(self, n, k)
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
module Statsample
|
68
|
+
module TimeSeries
|
69
|
+
|
70
|
+
# Deprecated. Use Daru::Vector.
|
71
|
+
class Series < Daru::Vector
|
72
|
+
def initialize *args, &block
|
73
|
+
$stderr.puts "This class has been deprecated. Use Daru::Vector directly."
|
74
|
+
super(*args, &block)
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
@@ -1,14 +1,14 @@
|
|
1
1
|
module Statsample
|
2
2
|
module TimeSeries
|
3
3
|
module Pacf
|
4
|
-
class
|
5
|
-
|
6
|
-
def self.pacf_yw(timeseries, max_lags, method = 'yw')
|
4
|
+
class << self
|
5
|
+
def pacf_yw(timeseries, max_lags, method = 'yw')
|
7
6
|
#partial autocorrelation by yule walker equations.
|
8
7
|
#Inspiration: StatsModels
|
9
8
|
pacf = [1.0]
|
9
|
+
arr = timeseries.to_a
|
10
10
|
(1..max_lags).map do |i|
|
11
|
-
pacf << yule_walker(
|
11
|
+
pacf << yule_walker(arr, i, method)[0][-1]
|
12
12
|
end
|
13
13
|
pacf
|
14
14
|
end
|
@@ -25,8 +25,7 @@ module Statsample
|
|
25
25
|
#* *arcoefs*: AR coefficients
|
26
26
|
#* *pacf*: pacf function
|
27
27
|
#* *sigma*: some function
|
28
|
-
def
|
29
|
-
|
28
|
+
def levinson_durbin(series, nlags = 10, is_acovf = false)
|
30
29
|
if is_acovf
|
31
30
|
series = series.map(&:to_f)
|
32
31
|
else
|
@@ -60,29 +59,34 @@ module Statsample
|
|
60
59
|
return [sigma_v, arcoefs, pacf, sig, phi]
|
61
60
|
end
|
62
61
|
|
63
|
-
#Returns diagonal elements of matrices
|
64
|
-
|
65
|
-
def self.diag(mat)
|
62
|
+
# Returns diagonal elements of matrices
|
63
|
+
def diag(mat)
|
66
64
|
return mat.each_with_index(:diagonal).map { |x, r, c| x }
|
67
65
|
end
|
68
66
|
|
69
67
|
|
70
68
|
#=Yule Walker Algorithm
|
71
|
-
#From the series, estimates AR(p)(autoregressive) parameter using Yule-Waler equation. See -
|
72
|
-
#http://en.wikipedia.org/wiki/Autoregressive_moving_average_model
|
73
69
|
#
|
74
|
-
|
70
|
+
# From the series, estimates AR(p)(autoregressive) parameter using
|
71
|
+
# Yule-Waler equation. See -
|
72
|
+
# http://en.wikipedia.org/wiki/Autoregressive_moving_average_model
|
73
|
+
#
|
74
|
+
# == Parameters
|
75
|
+
#
|
75
76
|
#* *ts*: timeseries
|
76
77
|
#* *k*: order, default = 1
|
77
78
|
#* *method*: can be 'yw' or 'mle'. If 'yw' then it is unbiased, denominator is (n - k)
|
78
79
|
#
|
79
|
-
|
80
|
+
# == Returns
|
81
|
+
#
|
80
82
|
#* *rho*: autoregressive coefficients
|
81
83
|
#* *sigma*: sigma parameter
|
82
|
-
def
|
83
|
-
ts = ts - ts.mean
|
84
|
+
def yule_walker(ts, k = 1, method='yw')
|
84
85
|
n = ts.size
|
85
|
-
|
86
|
+
mean = (ts.inject(:+) / n)
|
87
|
+
ts = ts.map { |t| t - mean }
|
88
|
+
|
89
|
+
if method == 'yw'
|
86
90
|
#unbiased => denominator = (n - k)
|
87
91
|
denom =->(k) { n - k }
|
88
92
|
else
|
@@ -94,36 +98,41 @@ module Statsample
|
|
94
98
|
r[0] = ts.map { |x| x**2 }.inject(:+).to_f / denom.call(0).to_f
|
95
99
|
|
96
100
|
1.upto(k) do |l|
|
97
|
-
r[l] = (ts[0...-l].zip(ts[l...
|
101
|
+
r[l] = (ts[0...-l].zip(ts[l...n])).map do |x|
|
98
102
|
x.inject(:*)
|
99
103
|
end.inject(:+).to_f / denom.call(l).to_f
|
100
104
|
end
|
101
105
|
|
102
106
|
r_R = toeplitz(r[0...-1])
|
103
107
|
|
104
|
-
mat = Matrix.columns(r_R).inverse
|
108
|
+
mat = Matrix.columns(r_R).inverse
|
105
109
|
phi = solve_matrix(mat, r[1..r.size])
|
106
|
-
phi_vector =
|
107
|
-
r_vector =
|
108
|
-
sigma = r[0] - (r_vector *
|
110
|
+
phi_vector = phi
|
111
|
+
r_vector = r[1..-1]
|
112
|
+
sigma = r[0] - (r_vector.map.with_index {|e,i| e*phi_vector[i] }).inject(:+)
|
109
113
|
return [phi, sigma]
|
110
114
|
end
|
111
115
|
|
112
116
|
#=ToEplitz
|
113
|
-
#
|
114
|
-
#
|
115
|
-
#
|
116
|
-
|
117
|
+
#
|
118
|
+
# Generates teoeplitz matrix from an array
|
119
|
+
# http://en.wikipedia.org/wiki/Toeplitz_matrix.
|
120
|
+
# Toeplitz matrix are equal when they are stored in row & column major
|
121
|
+
#
|
122
|
+
# == Parameters
|
123
|
+
#
|
117
124
|
#* *arr*: array of integers;
|
118
|
-
|
119
|
-
#
|
120
|
-
#
|
121
|
-
|
122
|
-
#
|
123
|
-
#
|
124
|
-
# [
|
125
|
-
#
|
126
|
-
|
125
|
+
#
|
126
|
+
# == Usage
|
127
|
+
#
|
128
|
+
# arr = [0,1,2,3]
|
129
|
+
# Pacf.toeplitz(arr)
|
130
|
+
#
|
131
|
+
# #=> [[0, 1, 2, 3],
|
132
|
+
# #=> [1, 0, 1, 2],
|
133
|
+
# #=> [2, 1, 0, 1],
|
134
|
+
# #=> [3, 2, 1, 0]]
|
135
|
+
def toeplitz(arr)
|
127
136
|
eplitz_matrix = Array.new(arr.size) { Array.new(arr.size) }
|
128
137
|
|
129
138
|
0.upto(arr.size - 1) do |i|
|
@@ -143,9 +152,10 @@ module Statsample
|
|
143
152
|
eplitz_matrix
|
144
153
|
end
|
145
154
|
|
146
|
-
|
147
|
-
#
|
148
|
-
|
155
|
+
#=Solves matrix equations
|
156
|
+
#
|
157
|
+
# Solves for X in AX = B
|
158
|
+
def solve_matrix(matrix, out_vector)
|
149
159
|
solution_vector = Array.new(out_vector.size, 0)
|
150
160
|
matrix = matrix.to_a
|
151
161
|
k = 0
|
@@ -157,7 +167,6 @@ module Statsample
|
|
157
167
|
end
|
158
168
|
solution_vector
|
159
169
|
end
|
160
|
-
|
161
170
|
end
|
162
171
|
end
|
163
172
|
end
|
@@ -1,154 +1,126 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
#* *demean*: boolean - optional. __default__: false
|
10
|
-
#==Returns
|
11
|
-
#Sums the timeseries and then returns the square
|
12
|
-
def squares_of_sum(demean = false)
|
13
|
-
if demean
|
14
|
-
m = self.mean
|
15
|
-
self.map { |x| (x-m) }.sum**2
|
16
|
-
else
|
17
|
-
return self.sum.to_f**2
|
18
|
-
end
|
1
|
+
class ::Matrix
|
2
|
+
# == Squares of sum
|
3
|
+
#
|
4
|
+
# Does squares of sum in column order.
|
5
|
+
# Necessary for computations in various processes
|
6
|
+
def squares_of_sum
|
7
|
+
(0...column_size).map do |j|
|
8
|
+
self.column(j).sum**2
|
19
9
|
end
|
20
10
|
end
|
21
11
|
|
12
|
+
# == Symmetric?
|
13
|
+
# `symmetric?` is present in Ruby Matrix 1.9.3+, but not in 1.8.*
|
14
|
+
#
|
15
|
+
# == Returns
|
16
|
+
#
|
17
|
+
# bool
|
18
|
+
def symmetric?
|
19
|
+
return false unless square?
|
22
20
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
#Necessary for computations in various processes
|
27
|
-
def squares_of_sum
|
28
|
-
(0...column_size).map do |j|
|
29
|
-
self.column(j).sum**2
|
21
|
+
(0...row_size).each do |i|
|
22
|
+
0.upto(i).each do |j|
|
23
|
+
return false if self[i, j] != self[j, i]
|
30
24
|
end
|
31
25
|
end
|
26
|
+
true
|
27
|
+
end
|
32
28
|
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
0.upto(row_size - 1).each do |i|
|
63
|
-
if i == k
|
64
|
-
sum = (0..(k-1)).inject(0.0){ |sum, j| sum + c[k, j] ** 2 }
|
65
|
-
value = Math.sqrt(self[k,k] - sum)
|
66
|
-
c[k, k] = value
|
67
|
-
elsif i > k
|
68
|
-
sum = (0..(k-1)).inject(0.0){ |sum, j| sum + c[i, j] * c[k, j] }
|
69
|
-
value = (self[k,i] - sum) / c[k, k]
|
70
|
-
c[i, k] = value
|
71
|
-
end
|
29
|
+
# == Cholesky decomposition
|
30
|
+
#
|
31
|
+
# Reference: http://en.wikipedia.org/wiki/Cholesky_decomposition
|
32
|
+
# == Description
|
33
|
+
#
|
34
|
+
# Cholesky decomposition is reprsented by `M = L X L*`, where
|
35
|
+
# M is the symmetric matrix and `L` is the lower half of cholesky matrix,
|
36
|
+
# and `L*` is the conjugate form of `L`.
|
37
|
+
#
|
38
|
+
# == Returns
|
39
|
+
#
|
40
|
+
# Cholesky decomposition for a given matrix(if symmetric)
|
41
|
+
#
|
42
|
+
# == Utility
|
43
|
+
#
|
44
|
+
# Essential matrix function, requisite in kalman filter, least squares
|
45
|
+
def cholesky
|
46
|
+
raise ArgumentError, "Given matrix should be symmetric" unless symmetric?
|
47
|
+
c = Matrix.zero(row_size)
|
48
|
+
0.upto(row_size - 1).each do |k|
|
49
|
+
0.upto(row_size - 1).each do |i|
|
50
|
+
if i == k
|
51
|
+
sum = (0..(k-1)).inject(0.0){ |sum, j| sum + c[k, j] ** 2 }
|
52
|
+
value = Math.sqrt(self[k,k] - sum)
|
53
|
+
c[k, k] = value
|
54
|
+
elsif i > k
|
55
|
+
sum = (0..(k-1)).inject(0.0){ |sum, j| sum + c[i, j] * c[k, j] }
|
56
|
+
value = (self[k,i] - sum) / c[k, k]
|
57
|
+
c[i, k] = value
|
72
58
|
end
|
73
59
|
end
|
74
|
-
c
|
75
60
|
end
|
61
|
+
c
|
62
|
+
end
|
76
63
|
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
end
|
64
|
+
#==Chain Product
|
65
|
+
#Class method
|
66
|
+
#Returns the chain product of two matrices
|
67
|
+
#===Usage:
|
68
|
+
#Let `a` be 4 * 3 matrix,
|
69
|
+
#Let `b` be 3 * 3 matrix,
|
70
|
+
#Let `c` be 3 * 1 matrix,
|
71
|
+
#then `Matrix.chain_dot(a, b, c)`
|
72
|
+
#===NOTE:
|
73
|
+
# Send the matrices in multiplicative order with proper dimensions
|
74
|
+
def self.chain_dot(*args)
|
75
|
+
#inspired by Statsmodels
|
76
|
+
begin
|
77
|
+
args.reduce { |x, y| x * y } #perform matrix multiplication in order
|
78
|
+
rescue ExceptionForMatrix::ErrDimensionMismatch
|
79
|
+
puts "ExceptionForMatrix: Please provide matrices with proper multiplicative dimensions"
|
94
80
|
end
|
81
|
+
end
|
95
82
|
|
96
83
|
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
end
|
107
|
-
end
|
108
|
-
#append/prepend a column of one's
|
109
|
-
vectors = (0...row_size).map do |r|
|
110
|
-
if prepend
|
111
|
-
[1.0].concat(self.row(r).to_a)
|
112
|
-
else
|
113
|
-
self.row(r).to_a.push(1.0)
|
114
|
-
end
|
84
|
+
#==Adds a column of constants.
|
85
|
+
#Appends a column of ones to the matrix/array if first argument is false
|
86
|
+
#If an n-array, first checks if one column of ones is already present
|
87
|
+
#if present, then original(self) is returned, else, prepends with a vector of ones
|
88
|
+
def add_constant(prepend = true)
|
89
|
+
#for Matrix
|
90
|
+
(0...column_size).each do |i|
|
91
|
+
if self.column(i).map(&:to_f) == Object::Vector.elements(Array.new(row_size, 1.0))
|
92
|
+
return self
|
115
93
|
end
|
116
|
-
return Matrix.rows(vectors)
|
117
|
-
end
|
118
|
-
|
119
|
-
#populates column i of given matrix with arr
|
120
|
-
def set_column(i, arr)
|
121
|
-
columns = self.column_vectors
|
122
|
-
column = columns[i].to_a
|
123
|
-
column[0...arr.size] = arr
|
124
|
-
columns[i] = column
|
125
|
-
return Matrix.columns(columns)
|
126
94
|
end
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
row = rows[i].to_a
|
135
|
-
row[0...arr.size] = arr
|
136
|
-
rows[i] = row
|
137
|
-
return Matrix.rows(rows)
|
95
|
+
#append/prepend a column of one's
|
96
|
+
vectors = (0...row_size).map do |r|
|
97
|
+
if prepend
|
98
|
+
[1.0].concat(self.row(r).to_a)
|
99
|
+
else
|
100
|
+
self.row(r).to_a.push(1.0)
|
101
|
+
end
|
138
102
|
end
|
103
|
+
return Matrix.rows(vectors)
|
104
|
+
end
|
139
105
|
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
# dims[i] = dim
|
148
|
-
# return Matrix.send("#{dimension}s", dims)
|
149
|
-
# end
|
150
|
-
# end
|
151
|
-
# end
|
106
|
+
#populates column i of given matrix with arr
|
107
|
+
def set_column(i, arr)
|
108
|
+
columns = self.column_vectors
|
109
|
+
column = columns[i].to_a
|
110
|
+
column[0...arr.size] = arr
|
111
|
+
columns[i] = column
|
112
|
+
return Matrix.columns(columns)
|
152
113
|
end
|
153
114
|
|
115
|
+
#populates row i of given matrix with arr
|
116
|
+
def set_row(i, arr)
|
117
|
+
#similar implementation as set_column
|
118
|
+
#writing and commenting metaprogrammed version
|
119
|
+
#Please to give opinion :)
|
120
|
+
rows = self.row_vectors
|
121
|
+
row = rows[i].to_a
|
122
|
+
row[0...arr.size] = arr
|
123
|
+
rows[i] = row
|
124
|
+
return Matrix.rows(rows)
|
125
|
+
end
|
154
126
|
end
|