statsample 0.5.1 → 0.6.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (51) hide show
  1. data/History.txt +12 -0
  2. data/Manifest.txt +13 -0
  3. data/README.txt +2 -1
  4. data/demo/pca.rb +29 -0
  5. data/demo/umann.rb +8 -0
  6. data/lib/distribution.rb +0 -1
  7. data/lib/matrix_extension.rb +35 -21
  8. data/lib/statsample.rb +31 -28
  9. data/lib/statsample/anova.rb +7 -2
  10. data/lib/statsample/bivariate.rb +17 -11
  11. data/lib/statsample/codification.rb +136 -87
  12. data/lib/statsample/combination.rb +0 -2
  13. data/lib/statsample/converter/csv18.rb +1 -1
  14. data/lib/statsample/converter/csv19.rb +1 -1
  15. data/lib/statsample/converters.rb +176 -171
  16. data/lib/statsample/crosstab.rb +227 -154
  17. data/lib/statsample/dataset.rb +94 -12
  18. data/lib/statsample/dominanceanalysis.rb +69 -62
  19. data/lib/statsample/dominanceanalysis/bootstrap.rb +25 -21
  20. data/lib/statsample/factor.rb +18 -0
  21. data/lib/statsample/factor/pca.rb +128 -0
  22. data/lib/statsample/factor/principalaxis.rb +133 -0
  23. data/lib/statsample/factor/rotation.rb +125 -0
  24. data/lib/statsample/histogram.rb +99 -0
  25. data/lib/statsample/mle.rb +125 -126
  26. data/lib/statsample/mle/logit.rb +91 -91
  27. data/lib/statsample/mle/probit.rb +84 -85
  28. data/lib/statsample/multiset.rb +1 -1
  29. data/lib/statsample/permutation.rb +96 -0
  30. data/lib/statsample/regression.rb +1 -1
  31. data/lib/statsample/regression/binomial.rb +89 -89
  32. data/lib/statsample/regression/binomial/logit.rb +9 -9
  33. data/lib/statsample/regression/binomial/probit.rb +9 -9
  34. data/lib/statsample/regression/multiple.rb +8 -14
  35. data/lib/statsample/regression/multiple/gslengine.rb +1 -1
  36. data/lib/statsample/regression/multiple/rubyengine.rb +55 -55
  37. data/lib/statsample/resample.rb +12 -17
  38. data/lib/statsample/srs.rb +4 -1
  39. data/lib/statsample/test.rb +23 -22
  40. data/lib/statsample/test/umannwhitney.rb +182 -0
  41. data/lib/statsample/vector.rb +854 -815
  42. data/test/test_bivariate.rb +132 -132
  43. data/test/test_codification.rb +71 -50
  44. data/test/test_dataset.rb +19 -1
  45. data/test/test_factor.rb +44 -0
  46. data/test/test_histogram.rb +26 -0
  47. data/test/test_permutation.rb +37 -0
  48. data/test/test_statistics.rb +74 -63
  49. data/test/test_umannwhitney.rb +17 -0
  50. data/test/test_vector.rb +46 -30
  51. metadata +31 -4
@@ -1,135 +1,134 @@
1
1
  module Statsample
2
- # Module for MLE calculations.
3
- # Use subclass of BaseMLE for specific MLE model estimation.
4
- # Usage:
5
- #
6
- # mle=Statsample::MLE::Probit.new
7
- # mle.newton_raphson(x,y)
8
- # beta=mle.parameters
9
- # likehood=mle.likehood(x,y,beta)
10
- # iterations=mle.iterations
11
- #
12
- module MLE
13
- class BaseMLE
14
- attr_accessor :verbose
15
- attr_accessor :output
16
- # Could be :parameters or :mle
17
- attr_accessor :stop_criteria
18
- # Variance - Covariance matrix
19
- attr_reader :var_cov_matrix
20
- # Iterations
21
- attr_reader :iterations
22
- # Parameters (beta coefficients)
23
- attr_reader :parameters
24
- ITERATIONS=100
25
- MIN_DIFF=1e-5
26
- MIN_DIFF_PARAMETERS=1e-2
27
- # Model should be a MLE subclass
28
- def initialize()
29
- @verbose = false
30
- @output = STDOUT
31
- @stop_criteria = :parameters
32
- @var_cov_matrix = nil
33
- @iterations = nil
34
- @parameters = nil
2
+ # Module for MLE calculations.
3
+ # Use subclass of BaseMLE for specific MLE model estimation.
4
+ # Usage:
5
+ #
6
+ # mle=Statsample::MLE::Probit.new
7
+ # mle.newton_raphson(x,y)
8
+ # beta=mle.parameters
9
+ # likehood=mle.likehood(x,y,beta)
10
+ # iterations=mle.iterations
11
+ #
12
+ module MLE
13
+ class BaseMLE
14
+ attr_accessor :verbose
15
+ attr_accessor :output
16
+ # Could be :parameters or :mle
17
+ attr_accessor :stop_criteria
18
+ # Variance - Covariance matrix
19
+ attr_reader :var_cov_matrix
20
+ # Iterations
21
+ attr_reader :iterations
22
+ # Parameters (beta coefficients)
23
+ attr_reader :parameters
24
+ ITERATIONS=100
25
+ MIN_DIFF=1e-5
26
+ MIN_DIFF_PARAMETERS=1e-2
27
+ # Model should be a MLE subclass
28
+ def initialize()
29
+ @verbose = false
30
+ @output = STDOUT
31
+ @stop_criteria = :parameters
32
+ @var_cov_matrix = nil
33
+ @iterations = nil
34
+ @parameters = nil
35
+ end
36
+ # Calculate likehood for matrices x and y, given b parameters
37
+ def likehood(x,y,b)
38
+ prod=1
39
+ x.row_size.times{|i|
40
+ xi=Matrix.rows([x.row(i).to_a.collect{|v| v.to_f}])
41
+ y_val=y[i,0].to_f
42
+ fbx=f(b,x)
43
+ prod=prod*likehood_i(xi,y_val,b)
44
+ }
45
+ prod
46
+ end
47
+ # Calculate log likehood for matrices x and y, given b parameters
48
+ def log_likehood(x,y,b)
49
+ sum=0
50
+ x.row_size.times{|i|
51
+ xi=Matrix.rows([x.row(i).to_a.collect{|v| v.to_f}])
52
+ y_val=y[i,0].to_f
53
+ sum+=log_likehood_i(xi,y_val,b)
54
+ }
55
+ sum
56
+ end
57
+ # Creates a zero matrix Mx1, with M=x.M
58
+ def set_default_parameters(x)
59
+ fd=[0.0]*x.column_size
60
+ fd.push(0.1) if self.is_a? Statsample::MLE::Normal
61
+ parameters = Matrix.columns([fd])
62
+ end
63
+
64
+ # Newton Raphson with automatic stopping criteria.
65
+ # Based on: Von Tessin, P. (2005). Maximum Likelihood Estimation With Java and Ruby
66
+ #
67
+ # <tt>x</tt>:: matrix of dependent variables. Should have nxk dimensions
68
+ # <tt>y</tt>:: matrix of independent values. Should have nx1 dimensions
69
+ # <tt>@m</tt>:: class for @ming. Could be Normal or Logit
70
+ # <tt>start_values</tt>:: matrix of coefficients. Should have 1xk dimensions
71
+ def newton_raphson(x,y, start_values=nil)
72
+ # deep copy?
73
+ if start_values.nil?
74
+ parameters=set_default_parameters(x)
75
+ else
76
+ parameters = start_values.dup
35
77
  end
36
- # Calculate likehood for matrices x and y, given b parameters
37
- def likehood(x,y,b)
38
- prod=1
39
- x.row_size.times{|i|
40
- xi=Matrix.rows([x.row(i).to_a.collect{|v| v.to_f}])
41
- y_val=y[i,0].to_f
42
- fbx=f(b,x)
43
- prod=prod*likehood_i(xi,y_val,b)
44
- }
45
- prod
78
+ k=parameters.row_size
79
+ cv=Matrix.rows([([1.0]*k)])
80
+ last_diff=nil
81
+ raise "n on y != n on x" if x.row_size!=y.row_size
82
+ h=nil
83
+ fd=nil
84
+ if @stop_criteria==:mle
85
+ old_likehood=log_likehood(x, y, parameters)
86
+ else
87
+ old_parameters=parameters
46
88
  end
47
- # Calculate log likehood for matrices x and y, given b parameters
48
- def log_likehood(x,y,b)
49
- sum=0
50
- x.row_size.times{|i|
51
- xi=Matrix.rows([x.row(i).to_a.collect{|v| v.to_f}])
52
- y_val=y[i,0].to_f
53
- sum+=log_likehood_i(xi,y_val,b)
54
- }
55
- sum
56
- end
57
- # Creates a zero matrix Mx1, with M=x.M
58
- def set_default_parameters(x)
59
- fd=[0.0]*x.column_size
60
- fd.push(0.1) if self.is_a? Statsample::MLE::Normal
61
- parameters = Matrix.columns([fd])
62
- end
63
-
64
- # Newton Raphson with automatic stopping criteria.
65
- # Based on: Von Tessin, P. (2005). Maximum Likelihood Estimation With Java and Ruby
66
- #
67
- # <tt>x</tt>:: matrix of dependent variables. Should have nxk dimensions
68
- # <tt>y</tt>:: matrix of independent values. Should have nx1 dimensions
69
- # <tt>@m</tt>:: class for @ming. Could be Normal or Logit
70
- # <tt>start_values</tt>:: matrix of coefficients. Should have 1xk dimensions
71
- def newton_raphson(x,y, start_values=nil)
72
- # deep copy?
73
- if start_values.nil?
74
- parameters=set_default_parameters(x)
75
- else
76
- parameters = start_values.dup
89
+ ITERATIONS.times do |i|
90
+ @iterations=i+1
91
+ puts "Set #{i}" if @verbose
92
+ h = second_derivative(x,y,parameters)
93
+ if h.singular?
94
+ raise "Hessian is singular!"
95
+ end
96
+ fd = first_derivative(x,y,parameters)
97
+ parameters = parameters-(h.inverse*(fd))
98
+
99
+ if @stop_criteria==:parameters
100
+ flag=true
101
+ k.times do |j|
102
+ diff= ( parameters[j,0] - old_parameters[j,0] ) / parameters[j,0]
103
+ flag=false if diff.abs >= MIN_DIFF_PARAMETERS
104
+ @output.puts "Parameters #{j}: #{diff}" if @verbose
105
+ end
106
+ if flag
107
+ @var_cov_matrix = h.inverse*-1.0
108
+ return parameters
109
+ end
110
+ old_parameters=parameters
111
+ else
112
+ begin
113
+ new_likehood = log_likehood(x,y,parameters)
114
+ @output.puts "[#{i}]Log-MLE:#{new_likehood} (Diff:#{(new_likehood-old_likehood) / new_likehood})" if @verbose
115
+ if(new_likehood < old_likehood) or ((new_likehood - old_likehood) / new_likehood).abs < MIN_DIFF
116
+ @var_cov_matrix = h.inverse*-1.0
117
+ #@output.puts "Ok"
118
+ break;
119
+ end
120
+ old_likehood=new_likehood
121
+ rescue =>e
122
+ puts "#{e}"
123
+ #puts "dup"
77
124
  end
78
- k=parameters.row_size
79
- cv=Matrix.rows([([1.0]*k)])
80
- last_diff=nil
81
- raise "n on y != n on x" if x.row_size!=y.row_size
82
- h=nil
83
- fd=nil
84
- if @stop_criteria==:mle
85
- old_likehood=log_likehood(x, y, parameters)
86
- else
87
- old_parameters=parameters
88
- end
89
- ITERATIONS.times do |i|
90
- @iterations=i+1
91
- puts "Set #{i}" if @verbose
92
- h = second_derivative(x,y,parameters)
93
- if h.singular?
94
- raise "Hessian is singular!"
95
- end
96
- fd = first_derivative(x,y,parameters)
97
- parameters = parameters-(h.inverse*(fd))
98
-
99
- if @stop_criteria==:parameters
100
- flag=true
101
- k.times do |j|
102
- diff= ( parameters[j,0] - old_parameters[j,0] ) / parameters[j,0]
103
- flag=false if diff.abs >= MIN_DIFF_PARAMETERS
104
- @output.puts "Parameters #{j}: #{diff}" if @verbose
105
- end
106
-
107
- if flag
108
- @var_cov_matrix = h.inverse*-1.0
109
- return parameters
110
- end
111
- old_parameters=parameters
112
- else
113
- begin
114
- new_likehood = log_likehood(x,y,parameters)
115
- @output.puts "[#{i}]Log-MLE:#{new_likehood} (Diff:#{(new_likehood-old_likehood) / new_likehood})" if @verbose
116
- if(new_likehood < old_likehood) or ((new_likehood - old_likehood) / new_likehood).abs < MIN_DIFF
117
- @var_cov_matrix = h.inverse*-1.0
118
- #@output.puts "Ok"
119
- break;
120
- end
121
- old_likehood=new_likehood
122
- rescue =>e
123
- puts "#{e}"
124
- #puts "dup"
125
- end
126
- end
127
- end
128
- @parameters=parameters
129
- parameters
125
+ end
130
126
  end
127
+ @parameters=parameters
128
+ parameters
129
+ end
131
130
  end
132
- end
131
+ end
133
132
  end
134
133
 
135
134
  require 'statsample/mle/normal'
@@ -1,95 +1,95 @@
1
1
  module Statsample
2
- module MLE
3
- # Logit MLE estimation.
4
- # Usage:
5
- #
6
- # mle=Statsample::MLE::Logit.new
7
- # mle.newton_raphson(x,y)
8
- # beta=mle.parameters
9
- # likehood=mle.likehood(x,y,beta)
10
- # iterations=mle.iterations
11
- #
12
- class Logit < BaseMLE
13
- # F(B'Xi)
14
- def f(b,xi)
15
- p_bx=(xi*b)[0,0]
16
- res=(1.0/(1.0+Math::exp(-p_bx)))
17
- if res==0.0
18
- res=1e-15
19
- elsif res==1.0
20
- res=0.999999999999999
21
- end
22
- res
2
+ module MLE
3
+ # Logit MLE estimation.
4
+ # Usage:
5
+ #
6
+ # mle=Statsample::MLE::Logit.new
7
+ # mle.newton_raphson(x,y)
8
+ # beta=mle.parameters
9
+ # likehood=mle.likehood(x,y,beta)
10
+ # iterations=mle.iterations
11
+ #
12
+ class Logit < BaseMLE
13
+ # F(B'Xi)
14
+ def f(b,xi)
15
+ p_bx=(xi*b)[0,0]
16
+ res=(1.0/(1.0+Math::exp(-p_bx)))
17
+ if res==0.0
18
+ res=1e-15
19
+ elsif res==1.0
20
+ res=0.999999999999999
21
+ end
22
+ res
23
+ end
24
+ # Likehood for x_i vector, y_i scalar and b parameters
25
+ def likehood_i(xi,yi,b)
26
+ (f(b,xi)**yi)*((1-f(b,xi))**(1-yi))
27
+ end
28
+ # Log Likehood for x_i vector, y_i scalar and b parameters
29
+ def log_likehood_i(xi,yi,b)
30
+ fbx=f(b,xi)
31
+ (yi.to_f*Math::log(fbx))+((1.0-yi.to_f)*Math::log(1.0-fbx))
32
+ end
33
+
34
+ # First derivative of log-likehood function
35
+ # x: Matrix (NxM)
36
+ # y: Matrix (Nx1)
37
+ # p: Matrix (Mx1)
38
+ def first_derivative(x,y,p)
39
+ raise "x.rows!=y.rows" if x.row_size!=y.row_size
40
+ raise "x.columns!=p.rows" if x.column_size!=p.row_size
41
+ n = x.row_size
42
+ k = x.column_size
43
+ fd = Array.new(k)
44
+ k.times {|i| fd[i] = [0.0]}
45
+ n.times do |i|
46
+ row = x.row(i).to_a
47
+ value1 = (1-y[i,0]) -p_plus(row,p)
48
+ k.times do |j|
49
+ fd[j][0] -= value1*row[j]
23
50
  end
24
- # Likehood for x_i vector, y_i scalar and b parameters
25
- def likehood_i(xi,yi,b)
26
- (f(b,xi)**yi)*((1-f(b,xi))**(1-yi))
51
+ end
52
+ Matrix.rows(fd, true)
53
+
54
+ end
55
+ # Second derivative of log-likehood function
56
+ # x: Matrix (NxM)
57
+ # y: Matrix (Nx1)
58
+ # p: Matrix (Mx1)
59
+ def second_derivative(x,y,p)
60
+ raise "x.rows!=y.rows" if x.row_size!=y.row_size
61
+ raise "x.columns!=p.rows" if x.column_size!=p.row_size
62
+ n = x.row_size
63
+ k = x.column_size
64
+ sd = Array.new(k)
65
+ k.times do |i|
66
+ arr = Array.new(k)
67
+ k.times{ |j| arr[j]=0.0}
68
+ sd[i] = arr
69
+ end
70
+ n.times do |i|
71
+ row = x.row(i).to_a
72
+ p_m = p_minus(row,p)
73
+ k.times do |j|
74
+ k.times do |l|
75
+ sd[j][l] -= p_m *(1-p_m)*row[j]*row[l]
76
+ end
27
77
  end
28
- # Log Likehood for x_i vector, y_i scalar and b parameters
29
- def log_likehood_i(xi,yi,b)
30
- fbx=f(b,xi)
31
- (yi.to_f*Math::log(fbx))+((1.0-yi.to_f)*Math::log(1.0-fbx))
32
- end
33
-
34
- # First derivative of log-likehood function
35
- # x: Matrix (NxM)
36
- # y: Matrix (Nx1)
37
- # p: Matrix (Mx1)
38
- def first_derivative(x,y,p)
39
- raise "x.rows!=y.rows" if x.row_size!=y.row_size
40
- raise "x.columns!=p.rows" if x.column_size!=p.row_size
41
- n = x.row_size
42
- k = x.column_size
43
- fd = Array.new(k)
44
- k.times {|i| fd[i] = [0.0]}
45
- n.times do |i|
46
- row = x.row(i).to_a
47
- value1 = (1-y[i,0]) -p_plus(row,p)
48
- k.times do |j|
49
- fd[j][0] -= value1*row[j]
50
- end
51
- end
52
- Matrix.rows(fd, true)
53
-
54
- end
55
- # Second derivative of log-likehood function
56
- # x: Matrix (NxM)
57
- # y: Matrix (Nx1)
58
- # p: Matrix (Mx1)
59
- def second_derivative(x,y,p)
60
- raise "x.rows!=y.rows" if x.row_size!=y.row_size
61
- raise "x.columns!=p.rows" if x.column_size!=p.row_size
62
- n = x.row_size
63
- k = x.column_size
64
- sd = Array.new(k)
65
- k.times do |i|
66
- arr = Array.new(k)
67
- k.times{ |j| arr[j]=0.0}
68
- sd[i] = arr
69
- end
70
- n.times do |i|
71
- row = x.row(i).to_a
72
- p_m = p_minus(row,p)
73
- k.times do |j|
74
- k.times do |l|
75
- sd[j][l] -= p_m *(1-p_m)*row[j]*row[l]
76
- end
77
- end
78
- end
79
- Matrix.rows(sd, true)
80
- end
81
- private
82
- def p_minus(x_row,p)
83
- value = 0.0;
84
- x_row.each_index { |i| value += x_row[i]*p[i,0]}
85
- 1/(1+Math.exp(-value))
86
- end
87
- def p_plus(x_row,p)
88
- value = 0.0;
89
- x_row.each_index { |i| value += x_row[i]*p[i,0]}
90
- 1/(1+Math.exp(value))
91
- end
92
-
93
- end # Logit
94
- end # MLE
78
+ end
79
+ Matrix.rows(sd, true)
80
+ end
81
+ private
82
+ def p_minus(x_row,p)
83
+ value = 0.0;
84
+ x_row.each_index { |i| value += x_row[i]*p[i,0]}
85
+ 1/(1+Math.exp(-value))
86
+ end
87
+ def p_plus(x_row,p)
88
+ value = 0.0;
89
+ x_row.each_index { |i| value += x_row[i]*p[i,0]}
90
+ 1/(1+Math.exp(value))
91
+ end
92
+
93
+ end # Logit
94
+ end # MLE
95
95
  end # Statsample
@@ -1,93 +1,92 @@
1
1
  require 'matrix_extension'
2
2
  module Statsample
3
- module MLE
4
- # Probit MLE estimation.
5
- # Usage:
6
- #
7
- # mle=Statsample::MLE::Probit.new
8
- # mle.newton_raphson(x,y)
9
- # beta=mle.parameters
10
- # likehood=mle.likehood(x,y,beta)
11
- # iterations=mle.iterations
12
- class Probit < BaseMLE
3
+ module MLE
4
+ # Probit MLE estimation.
5
+ # Usage:
6
+ #
7
+ # mle=Statsample::MLE::Probit.new
8
+ # mle.newton_raphson(x,y)
9
+ # beta=mle.parameters
10
+ # likehood=mle.likehood(x,y,beta)
11
+ # iterations=mle.iterations
12
+ class Probit < BaseMLE
13
+ # F(B'Xi)
14
+ if HAS_GSL
13
15
  # F(B'Xi)
14
- if HAS_GSL
15
- # F(B'Xi)
16
- def f(b,x)
17
- p_bx=(x*b)[0,0]
18
- GSL::Cdf::ugaussian_P(p_bx)
19
- end
20
- # f(B'Xi)
21
- def ff(b,x)
22
- p_bx=(x*b)[0,0]
23
- GSL::Ran::ugaussian_pdf(p_bx)
24
- end
25
- else
26
- def f(b,x) #:nodoc:
27
- p_bx=(x*b)[0,0]
28
- Distribution::Normal.cdf(p_bx)
29
- end
30
- def ff(b,x) #:nodoc:
31
- p_bx=(x*b)[0,0]
32
- Distribution::Normal.pdf(p_bx)
33
-
34
- end
16
+ def f(b,x)
17
+ p_bx=(x*b)[0,0]
18
+ GSL::Cdf::ugaussian_P(p_bx)
19
+ end
20
+ # f(B'Xi)
21
+ def ff(b,x)
22
+ p_bx=(x*b)[0,0]
23
+ GSL::Ran::ugaussian_pdf(p_bx)
24
+ end
25
+ else
26
+ def f(b,x) #:nodoc:
27
+ p_bx=(x*b)[0,0]
28
+ Distribution::Normal.cdf(p_bx)
35
29
  end
36
- # Log Likehood for x_i vector, y_i scalar and b parameters
37
- def log_likehood_i(xi,yi,b)
38
- fbx=f(b,xi)
39
- (yi.to_f*Math::log(fbx))+((1.0-yi.to_f)*Math::log(1.0-fbx))
30
+ def ff(b,x) #:nodoc:
31
+ p_bx=(x*b)[0,0]
32
+ Distribution::Normal.pdf(p_bx)
33
+ end
34
+ end
35
+ # Log Likehood for x_i vector, y_i scalar and b parameters
36
+ def log_likehood_i(xi,yi,b)
37
+ fbx=f(b,xi)
38
+ (yi.to_f*Math::log(fbx))+((1.0-yi.to_f)*Math::log(1.0-fbx))
39
+ end
40
+ # First derivative of log-likehood probit function
41
+ # x: Matrix (NxM)
42
+ # y: Matrix (Nx1)
43
+ # p: Matrix (Mx1)
44
+ def first_derivative(x,y,b)
45
+ raise "x.rows!=y.rows" if x.row_size!=y.row_size
46
+ raise "x.columns!=p.rows" if x.column_size!=b.row_size
47
+ n = x.row_size
48
+ k = x.column_size
49
+ fd = Array.new(k)
50
+ k.times {|i| fd[i] = [0.0]}
51
+ n.times do |i|
52
+ xi = Matrix.rows([x.row(i).to_a])
53
+ fbx=f(b,xi)
54
+ value1 = (y[i,0]-fbx)/ ( fbx*(1-fbx))*ff(b,xi)
55
+ k.times do |j|
56
+ fd[j][0] += value1*xi[0,j]
57
+ end
58
+ end
59
+ Matrix.rows(fd, true)
60
+ end
61
+ # Second derivative of log-likehood probit function
62
+ # x: Matrix (NxM)
63
+ # y: Matrix (Nx1)
64
+ # p: Matrix (Mx1)
65
+
66
+ def second_derivative(x,y,b)
67
+ raise "x.rows!=y.rows" if x.row_size!=y.row_size
68
+ raise "x.columns!=p.rows" if x.column_size!=b.row_size
69
+ n = x.row_size
70
+ k = x.column_size
71
+ if HAS_GSL
72
+ sum=GSL::Matrix.zeros(k)
73
+ else
74
+ sum=Matrix.zero(k)
40
75
  end
41
- # First derivative of log-likehood probit function
42
- # x: Matrix (NxM)
43
- # y: Matrix (Nx1)
44
- # p: Matrix (Mx1)
45
- def first_derivative(x,y,b)
46
- raise "x.rows!=y.rows" if x.row_size!=y.row_size
47
- raise "x.columns!=p.rows" if x.column_size!=b.row_size
48
- n = x.row_size
49
- k = x.column_size
50
- fd = Array.new(k)
51
- k.times {|i| fd[i] = [0.0]}
52
- n.times do |i|
53
- xi = Matrix.rows([x.row(i).to_a])
54
- fbx=f(b,xi)
55
- value1 = (y[i,0]-fbx)/ ( fbx*(1-fbx))*ff(b,xi)
56
- k.times do |j|
57
- fd[j][0] += value1*xi[0,j]
58
- end
59
- end
60
- Matrix.rows(fd, true)
76
+ n.times do |i|
77
+ xi=Matrix.rows([x.row(i).to_a])
78
+ fbx=f(b,xi)
79
+ val=((ff(b,xi)**2) / (fbx*(1.0-fbx)))*xi.t*xi
80
+ if HAS_GSL
81
+ val=val.to_gsl
82
+ end
83
+ sum-=val
61
84
  end
62
- # Second derivative of log-likehood probit function
63
- # x: Matrix (NxM)
64
- # y: Matrix (Nx1)
65
- # p: Matrix (Mx1)
66
-
67
- def second_derivative(x,y,b)
68
- raise "x.rows!=y.rows" if x.row_size!=y.row_size
69
- raise "x.columns!=p.rows" if x.column_size!=b.row_size
70
- n = x.row_size
71
- k = x.column_size
72
- if HAS_GSL
73
- sum=GSL::Matrix.zeros(k)
74
- else
75
- sum=Matrix.zero(k)
76
- end
77
- n.times do |i|
78
- xi=Matrix.rows([x.row(i).to_a])
79
- fbx=f(b,xi)
80
- val=((ff(b,xi)**2) / (fbx*(1.0-fbx)))*xi.t*xi
81
- if HAS_GSL
82
- val=val.to_gsl
83
- end
84
- sum-=val
85
- end
86
- if HAS_GSL
87
- sum=sum.to_matrix
88
- end
89
- sum
85
+ if HAS_GSL
86
+ sum=sum.to_matrix
90
87
  end
91
- end # Probit
92
- end # MLE
88
+ sum
89
+ end
90
+ end # Probit
91
+ end # MLE
93
92
  end # Statsample