ruby_linear_regression 0.1.4 → 0.1.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/ruby_linear_regression.rb +26 -6
- metadata +3 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d1a7e40c59b29d9c61e173b85021538411aa2d24
|
4
|
+
data.tar.gz: 1e8a0bb5a74404a173c11b431b67df705a16c203
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 1f7e3b259260062226855102db3afcb7470337c361374bcdefcc2b1f21c753f4aecd2be668f748abe7d0b820959c14019930722135c96663e286c51c5e942cd4
|
7
|
+
data.tar.gz: 94ab7e7fae12b68083ff0bf3f7dfeb88997a00a9952a71f5fa8ad15d0e384d511dc5a0fd72d529dc935efcb0a0ad87f235b70ece50f04be691c8e92a2367b92d
|
@@ -32,12 +32,26 @@ class RubyLinearRegression
|
|
32
32
|
end
|
33
33
|
|
34
34
|
# Compute the mean squared cost / error function
|
35
|
-
def compute_cost
|
35
|
+
def compute_cost test_x = nil, test_y = nil
|
36
|
+
|
37
|
+
if not test_x.nil?
|
38
|
+
test_x.each_index do |row|
|
39
|
+
test_x[row].each_index do |i|
|
40
|
+
test_x[row][i] = (test_x[row][i] - @mu[i]) / @sigma[i].to_f
|
41
|
+
end
|
42
|
+
end if @normalize
|
43
|
+
test_x = test_x.map { |r| [1].concat(r) }
|
44
|
+
end
|
45
|
+
|
46
|
+
# per default use training data to compute cost if no data is given
|
47
|
+
cost_x = test_x.nil? ? @x : Matrix.rows( test_x )
|
48
|
+
cost_y = test_y.nil? ? @y : Matrix.rows( test_y.collect { |e| [e] } )
|
49
|
+
|
36
50
|
# First use matrix multiplication and vector subtracton to find errors
|
37
|
-
errors = (
|
51
|
+
errors = (cost_x * @theta) - cost_y
|
38
52
|
|
39
53
|
# Then square all errors
|
40
|
-
errors = errors.map { |e| e
|
54
|
+
errors = errors.map { |e| (e.to_f**2) }
|
41
55
|
|
42
56
|
# Find the mean of the square errors
|
43
57
|
mean_square_error = 0.5 * (errors.inject{ |sum, e| sum + e }.to_f / errors.row_size)
|
@@ -46,10 +60,16 @@ class RubyLinearRegression
|
|
46
60
|
end
|
47
61
|
|
48
62
|
# Calculate the optimal theta using the normal equation
|
49
|
-
def train_normal_equation
|
63
|
+
def train_normal_equation l = 0
|
64
|
+
|
65
|
+
@lambda = l
|
66
|
+
lambda_matrix = Matrix.build(@theta.row_size,@theta.row_size) do |c,r|
|
67
|
+
(( c == 0 && r == 0) || c != r) ? 0 : 1;
|
68
|
+
end
|
69
|
+
|
50
70
|
# Calculate the optimal theta using the normal equation
|
51
71
|
# theta = ( X' * X )^1 * X' * y
|
52
|
-
@theta = (@x.transpose * @x).inverse * @x.transpose * @y
|
72
|
+
@theta = (@x.transpose * @x + @lambda * lambda_matrix ).inverse * @x.transpose * @y
|
53
73
|
|
54
74
|
return @theta
|
55
75
|
end
|
@@ -97,7 +117,7 @@ class RubyLinearRegression
|
|
97
117
|
end
|
98
118
|
|
99
119
|
private
|
100
|
-
def normalize_data
|
120
|
+
def normalize_data(x_data, mu = nil, sigma = nil)
|
101
121
|
|
102
122
|
row_size = x_data.size
|
103
123
|
column_count = x_data[0].is_a?( Array) ? x_data[0].size : 1
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby_linear_regression
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.5
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Soren Blond Daugaard
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2017-
|
11
|
+
date: 2017-07-04 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: minitest
|
@@ -61,7 +61,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
61
61
|
version: '0'
|
62
62
|
requirements: []
|
63
63
|
rubyforge_project:
|
64
|
-
rubygems_version: 2.
|
64
|
+
rubygems_version: 2.5.1
|
65
65
|
signing_key:
|
66
66
|
specification_version: 4
|
67
67
|
summary: Linear regression implemented in Ruby.
|