deep_miner 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: 7c89c1b6c7bb8dc9f353433f7118270561f7e6c2
4
+ data.tar.gz: fa1136b9eb30bfe93bdf07f644caa6cf45e188f8
5
+ SHA512:
6
+ metadata.gz: 43f6750f420ef9983b15f415219390bec7f75a74e483350702124ffe3b3d98205e2ef278ac00bf8b9950268469354cdf619fcffda8f4bd390b8cea0b7706da95
7
+ data.tar.gz: 5468d263c5877b1b6f1273e29964a7e1144978e678b26f5d6da051bea679e47bdcdd5359b269db840ce41615cdc85afc5536ef1e07911fa9d63822857d7bdb4b
@@ -0,0 +1,6 @@
1
+ require "deep_miner/version"
2
+ require_relative "deep_miner/vector_matrix"
3
+ require_relative "deep_miner/perceptron"
4
+
5
+ module DeepMiner
6
+ end
@@ -0,0 +1,175 @@
1
+ module DeepMiner
2
+ # ajcost
3
+ #
4
+ # Perceptron is class that implements a single layer neural net
5
+ # Implemenation is made with vertex_matrix class
6
+ class Perceptron
7
+ attr_reader :input, :hidden, :output
8
+ attr_reader :weight_matrix_ih, :weight_matrix_ho
9
+ attr_reader :output_delta_v, :hidden_delta_v
10
+ attr_reader :change_matrix_i, :change_matrix_o
11
+ attr_reader :out_delta, :hidden_delta
12
+
13
+ def initialize(input, hidden, output)
14
+ fail ArgumentError, 'Input must be Array' unless input.is_a? Array
15
+ fail ArgumentError, 'Output must be Array' unless output.is_a? Array
16
+ @input = input
17
+ @hidden = hidden
18
+ @output = output
19
+ # Create activation vectors and weight matrices and change matrices
20
+ init_activations
21
+ init_weight_matrices
22
+ init_change_matrices
23
+ end
24
+
25
+ def predict(input_vector)
26
+ fail ArgumentError, 'Input vector must be Array' unless input_vector.is_a? Array
27
+ fail ArgumentError, 'Predict input must be same size as input' unless input_vector.size == @input.size
28
+ # Set activation vector for the input layer
29
+ @act_input.apply_new_matrix([] << input_vector)
30
+
31
+ # Calculate activation vector for the hidden layer
32
+ calculate_activation_hidden
33
+
34
+ # Calculate activation vector for the output layer
35
+ calculate_activation_output
36
+ end
37
+
38
+ def back_propogate(target, eta, momentum)
39
+ fail ArgumentError, 'Target must be Array' unless target.is_a? Array
40
+ fail ArgumentError, 'Target and Output must have same size' unless target.size == @output.size
41
+ # Calculate errors for output layer
42
+ errors_output(target)
43
+
44
+ # Calculate errors for hidden layer
45
+ errors_hidden
46
+
47
+ # Update weight vector from hidden to outputs
48
+ update_output_weight_vector(eta, momentum)
49
+
50
+ # Update weight vector from inputs to hidden
51
+ update_hidden_weight_vector(eta, momentum)
52
+
53
+ calculate_error(target)
54
+ end
55
+
56
+ # Train the network with training input, expectations, input, and epoch num
57
+ def train(train_data, expect, epoch_iter = 50, eta, m)
58
+ fail ArgumentError, 'Training must be Array' unless train_data.is_a? Array
59
+ fail ArgumentError, 'Expectation must be Array' unless expect.is_a? Array
60
+ fail ArgumentError, 'Data and Expectation must be same size' unless train_data.size == expect.size
61
+
62
+ 1.upto(epoch_iter) do
63
+ error = 0.0
64
+ train_data.zip(expect) do |sample, target|
65
+ predict(sample)
66
+ error += back_propogate(target, eta, m)
67
+ end
68
+ end
69
+ end
70
+
71
+ private
72
+
73
+ def calculate_activation_hidden
74
+ for j in 0..@hidden - 1
75
+ sum = 0.0
76
+ for i in 0..input.size - 1
77
+ sum += @act_input.get_value(0, j) * @weight_matrix_ih.get_value(j, i)
78
+ end
79
+ @act_hidden.set_value(0, j, tanh_sigmoid(sum))
80
+ end
81
+ end
82
+
83
+ def calculate_activation_output
84
+ for k in 0..@output.size - 1
85
+ sum = 0.0
86
+ for j in 0..@hidden - 1
87
+ sum += @act_hidden.get_value(0, j) * @weight_matrix_ho.get_value(j, k)
88
+ end
89
+ @act_output.set_value(0, k, tanh_sigmoid(sum))
90
+ end
91
+ @act_output.to_array
92
+ end
93
+
94
+ def calculate_error(target)
95
+ error = 0.0
96
+ 0.upto(target.size - 1) do |k|
97
+ error += 0.5 * (target[k] - @act_output.get_value(0, k))**2
98
+ end
99
+ error
100
+ end
101
+
102
+ def errors_output(target)
103
+ @out_delta = Array.new(@output.size, 0.0)
104
+ 0.upto(@output.size - 1) do |k|
105
+ act_val = @act_output.get_value(0, k)
106
+ error = target[k] - act_val
107
+ @out_delta[k] = dx_tanh_sigmoid(act_val) * error
108
+ end
109
+ end
110
+
111
+ def errors_hidden
112
+ @hidden_delta = Array.new(@hidden, 0.0)
113
+ 0.upto(@hidden - 1) do |i|
114
+ error = 0.0
115
+ 0.upto(@output.size - 1) do |j|
116
+ error += @out_delta[j] * @weight_matrix_ho.get_value(i, j)
117
+ end
118
+ @hidden_delta[i] = dx_tanh_sigmoid(@act_hidden.get_value(0, i)) * error
119
+ end
120
+ end
121
+
122
+ def update_output_weight_vector(eta, momentum)
123
+ 0.upto(@hidden - 1) do |j|
124
+ 0.upto(@output.size - 1) do |k|
125
+ delta = @out_delta[k] * @act_hidden.get_value(0, j)
126
+ value = delta * eta + momentum * @change_matrix_o.get_value(j, k)
127
+ @weight_matrix_ho.add_value(j, k, value)
128
+ @change_matrix_o.set_value(j, k, delta)
129
+ end
130
+ end
131
+ end
132
+
133
+ def update_hidden_weight_vector(eta, momentum)
134
+ 0.upto(@input.size - 1) do |i|
135
+ 0.upto(@hidden - 1) do |j|
136
+ delta = hidden_delta[j] * @act_input.get_value(0, i)
137
+ value = delta * eta + momentum * @change_matrix_i.get_value(i, j)
138
+ @weight_matrix_ih.add_value(i, j, value)
139
+ @change_matrix_i.set_value(i, j, delta)
140
+ end
141
+ end
142
+ end
143
+
144
+ def init_activations
145
+ ins = [1.0] * @input.size
146
+ hids = [1.0] * @hidden.size
147
+ outs = [1.0] * @output.size
148
+ @act_input = VectorMatrix.new(nil, nil, nil, [] << ins)
149
+ @act_hidden = VectorMatrix.new(nil, nil, nil, [] << hids)
150
+ @act_output = VectorMatrix.new(nil, nil, nil, [] << outs)
151
+ end
152
+
153
+ def init_weight_matrices
154
+ @weight_matrix_ih = VectorMatrix.new(input.size, hidden, 0.1)
155
+ @weight_matrix_ho = VectorMatrix.new(hidden, output.size, 1.0)
156
+ end
157
+
158
+ def init_change_matrices
159
+ changei = Array.new(@input.size) { Array.new(@hidden, rand(-0.2..0.2)) }
160
+ changeo = Array.new(@hidden) { Array.new(@output.size, rand(-0.2..0.2)) }
161
+ @change_matrix_i = VectorMatrix.objectify(changei)
162
+ @change_matrix_o = VectorMatrix.objectify(changeo)
163
+ end
164
+
165
+ # Computes the sigmoid function
166
+ def tanh_sigmoid(x)
167
+ Math.tanh(x)
168
+ end
169
+
170
+ # Computes the derivative of the sigmoid
171
+ def dx_tanh_sigmoid(x)
172
+ 1.0 - x**2
173
+ end
174
+ end
175
+ end
@@ -0,0 +1,240 @@
1
+ module DeepMiner
2
+ class VectorMatrix
3
+ attr_reader :matrix, :n, :m, :random
4
+
5
+ def initialize(n, m, random, matrix = nil)
6
+ if matrix.nil?
7
+ @n = n
8
+ @m = m
9
+ @random = random
10
+ initialize_h
11
+ else
12
+ simple_init(matrix)
13
+ end
14
+ end
15
+
16
+ # General Operations
17
+
18
+ def self.objectify(new_matrix)
19
+ fail ArgumentError, 'Input must be Array' unless new_matrix.is_a? Array
20
+ VectorMatrix.new(nil, nil, nil, new_matrix)
21
+ end
22
+
23
+ def apply_value(x)
24
+ @matrix = Array.new(@n) { Array.new(@m, x) }
25
+ end
26
+
27
+ def apply_new_matrix(x)
28
+ fail ArgumentError, 'Input must be Array' unless x.is_a? Array
29
+ @matrix = x
30
+ @n = @matrix.size
31
+ @m = @matrix[0].size
32
+ @random = nil
33
+ end
34
+
35
+ def to_array
36
+ @matrix
37
+ end
38
+
39
+ def set_value(i, j, x)
40
+ fail ArgumentError, 'Values cannot be nil' if i.nil? || j.nil? || x.nil?
41
+ fail ArgumentError, 'Indices must be in matrix bounds' if i < 0 || j < 0 || i > @n || j > @m
42
+ @matrix[i][j] = x
43
+ end
44
+
45
+ def add_value(i, j, x)
46
+ fail ArgumentError, 'Values cannot be nil' if i.nil? || j.nil? || x.nil?
47
+ fail ArgumentError, 'Indices must be in matrix bounds' if i < 0 || j < 0 || i > @n || j > @m
48
+ @matrix[i][j] += x
49
+ end
50
+
51
+ def get_value(i, j)
52
+ fail ArgumentError, 'Indices must be in matrix bounds' if i < 0 || j < 0 || i > @n || j > @m
53
+ @matrix[i][j]
54
+ end
55
+
56
+ def get_row(index, objectify = false)
57
+ fail ArgumentError, 'Index must be in matrix bounds' if index < 0 || index > @n - 1
58
+ return VectorMatrix.new(nil, nil, nil, [] << @matrix[index]) if objectify
59
+ @matrix[index]
60
+ end
61
+
62
+ def get_column(index, objectify = false)
63
+ fail ArgumentError, 'Index must be in matrix bounds' if index < 0 || index > @m - 1
64
+ column = []
65
+ 0.upto(@n - 1) do |i|
66
+ column << @matrix[i][index]
67
+ end
68
+ return VectorMatrix.new(nil, nil, nil, [] << column) if objectify
69
+ column
70
+ end
71
+
72
+ def get_diagonal(objectify = false)
73
+ diagonal = []
74
+ 0.upto(@n - 1) do |i|
75
+ 0.upto(@m - 1) do |j|
76
+ diagonal << @matrix[i][j] if i == j
77
+ end
78
+ end
79
+ return VectorMatrix.new(nil, nil, nil, [] << diagonal) if objectify
80
+ diagonal
81
+ end
82
+
83
+ def see_i_vector_size
84
+ @matrix.size
85
+ end
86
+
87
+ def see_j_vector_size
88
+ return 0 if self.see_i_vector_size == 0
89
+ @matrix[0].size
90
+ end
91
+
92
+ # Matrix Specific Operations
93
+
94
+ def self.identity(n, objectify = false)
95
+ fail ArgumentError, 'N must have a size greater than 0' if n <= 0
96
+ new_matrix = Array.new(n) { Array.new(n) }
97
+ 0.upto(n - 1) do |i|
98
+ 0.upto(n - 1) do |j|
99
+ if i == j
100
+ new_matrix[i][j] = 1
101
+ else
102
+ new_matrix[i][j] = 0
103
+ end
104
+ end
105
+ end
106
+ return VectorMatrix.new(n, n, nil, new_matrix) if objectify
107
+ new_matrix
108
+ end
109
+
110
+ def trace
111
+ diagonal = get_diagonal(false)
112
+ sum = 0.0
113
+ 0.upto(diagonal.size - 1) do |i|
114
+ sum += diagonal[i]
115
+ end
116
+ sum
117
+ end
118
+
119
+ def transpose(objectify = false)
120
+ transposed = []
121
+ 0.upto(@m - 1) do |i|
122
+ transposed << get_column(i)
123
+ end
124
+ return VectorMatrix.new(nil, nil, nil, transposed) if objectify
125
+ transposed
126
+ end
127
+
128
+ # Matrix Mathematical Operations
129
+
130
+ # Scalar Product
131
+ def scale(scale_val, objectify = false)
132
+ scaled = Array.new(@n) { Array.new(@m) }
133
+ 0.upto(@n - 1) do |i|
134
+ 0.upto(@m - 1) do |j|
135
+ scaled[i][j] = scale_val * @matrix[i][j]
136
+ end
137
+ end
138
+ return VectorMatrix.new(nil, nil, nil, scaled) if objectify
139
+ scaled
140
+ end
141
+
142
+
143
+ # Add
144
+ def add(o_matrix, objectify, not_objectified = true)
145
+ fail ArgumentError, 'Input matrix must be of type Array' if not_objectified and !o_matrix.is_a? Array
146
+ o_matrix = VectorMatrix.objectify(o_matrix) if not_objectified
147
+ added = Array.new(@n) { Array.new(@m) }
148
+ 0.upto(@n - 1) do |i|
149
+ 0.upto(@m - 1) do |j|
150
+ added[i][j] = @matrix[i][j] + o_matrix.get_value(i, j)
151
+ end
152
+ end
153
+ return VectorMatrix.new(nil, nil, nil, added) if objectify
154
+ added
155
+ end
156
+
157
+ # Subtract
158
+ def subtract(o_matrix, objectify, not_objectified = true)
159
+ fail ArgumentError, 'Input matrix must be of type Array' if not_objectified and !o_matrix.is_a? Array
160
+ o_matrix = VectorMatrix.objectify(o_matrix) if not_objectified
161
+ subtracted = Array.new(@n) { Array.new(@m) }
162
+ 0.upto(@n - 1) do |i|
163
+ 0.upto(@m - 1) do |j|
164
+ subtracted[i][j] = @matrix[i][j] - o_matrix.get_value(i, j)
165
+ end
166
+ end
167
+ return VectorMatrix.new(nil, nil, nil, subtracted) if objectify
168
+ subtracted
169
+ end
170
+
171
+ # Multiply
172
+ def multiply(o_matrix, objectify, not_objectified = true)
173
+ fail ArgumentError, 'Input matrix must be of type Array' if not_objectified and !o_matrix.is_a? Array
174
+ o_matrix = VectorMatrix.objectify(o_matrix) if not_objectified
175
+ fail ArgumentError, 'Illegal matrix multiplication dimensions' if @m != o_matrix.see_j_vector_size
176
+ multiplied = multiply_h(o_matrix)
177
+ return VectorMatrix.new(nil, nil, nil, multiplied) if objectify
178
+ multiplied
179
+ end
180
+
181
+ # Matrix Qualities
182
+
183
+ def diagonal?
184
+ 0.upto(@n - 1) do |i|
185
+ 0.upto(@m - 1) do |j|
186
+ return false if (@matrix[i][j].nil? || @matrix[i][j] != 0.0) && i != j
187
+ end
188
+ end
189
+ true
190
+ end
191
+
192
+ def empty?
193
+ return true if @matrix.size == 0
194
+ 0.upto(@n - 1) do |i|
195
+ 0.upto(@m - 1) do |j|
196
+ return false unless @matrix[i][j].nil?
197
+ end
198
+ end
199
+ true
200
+ end
201
+
202
+ def square?
203
+ @n == @m
204
+ end
205
+
206
+ private
207
+
208
+ def initialize_h
209
+ @matrix = Array.new(@n) { Array.new(@m) }
210
+ for i in 0..@n - 1
211
+ for j in 0..@m - 1
212
+ if !@random.nil?
213
+ @matrix[i][j] = 1.0 * rand(-@random...@random)
214
+ else
215
+ @matrix[i][j] = 1.0
216
+ end
217
+ end
218
+ end
219
+ end
220
+
221
+ def simple_init(matrix)
222
+ @n = matrix.size
223
+ @m = matrix[0].size
224
+ @matrix = matrix
225
+ @random = nil
226
+ end
227
+
228
+ def multiply_h(o_matrix)
229
+ multiplied = Array.new(@n) { Array.new(@m, 0.0) }
230
+ 0.upto(@n - 1) do |i|
231
+ 0.upto(o_matrix.see_j_vector_size - 1) do |j|
232
+ 0.upto(@m - 1) do |k|
233
+ multiplied[i][j] += @matrix[i][k] * o_matrix.get_value(k, j)
234
+ end
235
+ end
236
+ end
237
+ multiplied
238
+ end
239
+ end
240
+ end
@@ -0,0 +1,3 @@
1
+ module DeepMiner
2
+ VERSION = "0.1.1"
3
+ end
metadata ADDED
@@ -0,0 +1,77 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: deep_miner
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.1
5
+ platform: ruby
6
+ authors:
7
+ - ajcost
8
+ autorequire:
9
+ bindir: exe
10
+ cert_chain: []
11
+ date: 2016-04-29 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: bundler
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: '1.11'
20
+ type: :development
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: '1.11'
27
+ - !ruby/object:Gem::Dependency
28
+ name: rake
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - "~>"
32
+ - !ruby/object:Gem::Version
33
+ version: '10.0'
34
+ type: :development
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - "~>"
39
+ - !ruby/object:Gem::Version
40
+ version: '10.0'
41
+ description: A library allowing for implementation of neural networks in Ruby
42
+ email:
43
+ - acost@sas.upenn.edu
44
+ executables: []
45
+ extensions: []
46
+ extra_rdoc_files: []
47
+ files:
48
+ - lib/deep_miner.rb
49
+ - lib/deep_miner/perceptron.rb
50
+ - lib/deep_miner/vector_matrix.rb
51
+ - lib/deep_miner/version.rb
52
+ homepage: https://github.com/ajcost/deep_miner
53
+ licenses:
54
+ - MIT
55
+ metadata:
56
+ allowed_push_host: https://rubygems.org
57
+ post_install_message:
58
+ rdoc_options: []
59
+ require_paths:
60
+ - lib
61
+ required_ruby_version: !ruby/object:Gem::Requirement
62
+ requirements:
63
+ - - ">="
64
+ - !ruby/object:Gem::Version
65
+ version: '0'
66
+ required_rubygems_version: !ruby/object:Gem::Requirement
67
+ requirements:
68
+ - - ">="
69
+ - !ruby/object:Gem::Version
70
+ version: '0'
71
+ requirements: []
72
+ rubyforge_project:
73
+ rubygems_version: 2.4.8
74
+ signing_key:
75
+ specification_version: 4
76
+ summary: Neural Network Library
77
+ test_files: []