NeuralNet 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/lib/NeuralNet.rb +153 -0
- metadata +44 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: e26b008d3e48e32c5bd9e2b258f014736291f725a5ee73f8701dd63ddba83037
|
4
|
+
data.tar.gz: 12845989f991bfd9e9100679f4f167b098557986681ce37734800524ec14a8e7
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: fee7708d275efa089dde0677e27fc79af3938f7fd60a60bc654a7b7bad70ea33ab4f9f98c5cd78872735ed4df07a0ac4cc0131aa9be3d50c8fa68382864a2856
|
7
|
+
data.tar.gz: 4c2d72e51bbadc97293739ebfb416104cc11eb19b007297a64a62b15ba6811555640cb126ac608713b42d0a5e3ab04088c60d0e4c5c79d19c9a9dea4015eeefc
|
data/lib/NeuralNet.rb
ADDED
@@ -0,0 +1,153 @@
|
|
1
|
+
require "numo/narray"
|
2
|
+
require "numo/linalg/use/openblas"
|
3
|
+
require "numo/linalg/use/lapack"
|
4
|
+
|
5
|
+
include Numo
|
6
|
+
class NeuralNetwork
|
7
|
+
|
8
|
+
def initialize(inputNodes:0,hiddenNodes:[],outputNodes:0,
|
9
|
+
learningRate: 0.01,activation: "sigmoid",oneHot: false,
|
10
|
+
reluFactor: 1,momentum: 0.0)
|
11
|
+
@nInputs = inputNodes
|
12
|
+
@nHidden = hiddenNodes
|
13
|
+
@nHiddenLayers = hiddenNodes.length
|
14
|
+
@nOutputs = outputNodes
|
15
|
+
@learningRate = learningRate
|
16
|
+
@momentum = momentum
|
17
|
+
@hiddenWeights = []
|
18
|
+
@hiddenBias = []
|
19
|
+
@prevHWeightDeltas = []
|
20
|
+
@prevHBiasDeltas = []
|
21
|
+
|
22
|
+
tmp1,tmp2 = @nInputs,@nHidden[0]
|
23
|
+
@hiddenWeights[0] = DFloat.new([tmp2,tmp1]).rand * 2 - 1
|
24
|
+
@hiddenBias[0] = DFloat.new([tmp2,1]).rand * 2 - 1
|
25
|
+
|
26
|
+
# Hidden Layers Update Matrix (momentum)
|
27
|
+
@prevHWeightDeltas[0] = DFloat.zeros([tmp2,tmp1])
|
28
|
+
@prevHBiasDeltas[0] = DFloat.zeros([tmp2,1])
|
29
|
+
|
30
|
+
for i in (1...@nHiddenLayers)
|
31
|
+
tmp1,tmp2 = @nHidden[i-1],@nHidden[i]
|
32
|
+
@hiddenWeights[i] = DFloat.new([tmp2,tmp1]).rand * 2 - 1
|
33
|
+
@hiddenBias[i] = DFloat.new([tmp2,1]).rand * 2 - 1
|
34
|
+
@prevHWeightDeltas[i] = DFloat.zeros([tmp2,tmp1])
|
35
|
+
@prevHBiasDeltas[i] = DFloat.zeros([tmp2,1])
|
36
|
+
end
|
37
|
+
|
38
|
+
@outputWeights = DFloat.new([@nOutputs,@nHidden[@nHiddenLayers-1]]).rand * 2 - 1
|
39
|
+
@outputBias = DFloat.new([@nOutputs,1]).rand * 2 - 1
|
40
|
+
|
41
|
+
# Output Layer Update Matrix (momentum)
|
42
|
+
@prevOWeightDeltas = DFloat.zeros([@nOutputs,@nHidden[@nHiddenLayers-1]])
|
43
|
+
@prevOBiasDeltas = DFloat.zeros([@nOutputs,1])
|
44
|
+
|
45
|
+
@hiddenActivation = activation
|
46
|
+
@hiddenActivationDerv = activation + "_prime"
|
47
|
+
if activation == "relu"
|
48
|
+
@reluFactor = reluFactor.to_f
|
49
|
+
end
|
50
|
+
|
51
|
+
if oneHot
|
52
|
+
@outputActivation = "softmax"
|
53
|
+
@outDel = "softmax_out_delta"
|
54
|
+
else
|
55
|
+
@outputActivation = @hiddenActivation
|
56
|
+
@outputActivationDerv = @hiddenActivationDerv
|
57
|
+
@outDel = "_out_delta"
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
def train!(data,label)
|
62
|
+
x = DFloat[data].transpose
|
63
|
+
y = DFloat[label].transpose
|
64
|
+
activations = [x]
|
65
|
+
#feed forward
|
66
|
+
for i in(0...@nHiddenLayers)
|
67
|
+
x1 = method(@hiddenActivation).call(@hiddenWeights[i].dot(activations[-1])+@hiddenBias[i])
|
68
|
+
activations.push(x1)
|
69
|
+
end
|
70
|
+
output = method(@outputActivation).call(@outputWeights.dot(activations[-1])+@outputBias)
|
71
|
+
#backpropagation
|
72
|
+
diff = output - y
|
73
|
+
outdelta = method(@outDel).call(output,diff)
|
74
|
+
|
75
|
+
@prevOBiasDeltas = @momentum * @prevOBiasDeltas + outdelta
|
76
|
+
@prevOWeightDeltas = @momentum * @prevOWeightDeltas + outdelta.dot(activations[-1].transpose)
|
77
|
+
|
78
|
+
@outputBias -= @prevOBiasDeltas
|
79
|
+
@outputWeights -= @prevOWeightDeltas
|
80
|
+
|
81
|
+
delta = @outputWeights.transpose.dot(outdelta)
|
82
|
+
(@nHiddenLayers-1).downto(0) do |i|
|
83
|
+
delta = delta*(method(@hiddenActivationDerv).call(activations[i+1]))
|
84
|
+
@prevHWeightDeltas[i] = @momentum * @prevHWeightDeltas[i] + delta.dot(activations[i].transpose)
|
85
|
+
@prevHBiasDeltas[i] = @momentum * @prevHBiasDeltas[i] + delta
|
86
|
+
|
87
|
+
@hiddenWeights[i] -= @prevHWeightDeltas[i]
|
88
|
+
@hiddenBias[i] -= @prevHBiasDeltas[i]
|
89
|
+
delta = @hiddenWeights[i].transpose.dot(delta)
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
def predict(data)
|
94
|
+
x = DFloat[data].transpose
|
95
|
+
activations = [x]
|
96
|
+
#feed forward
|
97
|
+
for i in(0...@nHiddenLayers)
|
98
|
+
x1 = method(@hiddenActivation).call(@hiddenWeights[i].dot(activations[-1])+@hiddenBias[i])
|
99
|
+
activations.push(x1)
|
100
|
+
end
|
101
|
+
out = method(@outputActivation).call(@outputWeights.dot(activations[-1])+@outputBias).flatten.to_a
|
102
|
+
out
|
103
|
+
end
|
104
|
+
|
105
|
+
def self.load(path)
|
106
|
+
if File.exist?(path)
|
107
|
+
t = File.binread(path)
|
108
|
+
return Marshal.load(t)
|
109
|
+
else
|
110
|
+
raise Errno::ENOENT , path
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
def save(path)
|
115
|
+
File.binwrite(path,Marshal.dump(self))
|
116
|
+
end
|
117
|
+
|
118
|
+
private
|
119
|
+
def relu(x)
|
120
|
+
(x * (x>0))*@reluFactor
|
121
|
+
end
|
122
|
+
|
123
|
+
def relu_prime(y)
|
124
|
+
@reluFactor * (y > 0)
|
125
|
+
end
|
126
|
+
|
127
|
+
def softmax(x)
|
128
|
+
v = NMath.exp(x)
|
129
|
+
return (v*(v.sum**-1))
|
130
|
+
end
|
131
|
+
|
132
|
+
def softmax_prime(y)
|
133
|
+
phi = y.dot DFloat.ones(1,@nOutputs)
|
134
|
+
return phi*( DFloat.eye(@nOutputs) - phi.transpose)
|
135
|
+
end
|
136
|
+
|
137
|
+
def sigmoid(x)
|
138
|
+
return (NMath.exp(-x) + 1)**-1
|
139
|
+
end
|
140
|
+
|
141
|
+
def sigmoid_prime(y)
|
142
|
+
return y*(1-y)
|
143
|
+
end
|
144
|
+
|
145
|
+
# cross-entropy loss for softmax
|
146
|
+
def softmax_out_delta(output,diff)
|
147
|
+
diff * @learningRate
|
148
|
+
end
|
149
|
+
|
150
|
+
def _out_delta(output,diff)
|
151
|
+
method(@outputActivationDerv).call(output)*diff * @learningRate
|
152
|
+
end
|
153
|
+
end
|
metadata
ADDED
@@ -0,0 +1,44 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: NeuralNet
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.0.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- zaki98
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2018-03-05 00:00:00.000000000 Z
|
12
|
+
dependencies: []
|
13
|
+
description: Simple Feed Forward Neural Network
|
14
|
+
email: ounissizakaria@gmail.com
|
15
|
+
executables: []
|
16
|
+
extensions: []
|
17
|
+
extra_rdoc_files: []
|
18
|
+
files:
|
19
|
+
- lib/NeuralNet.rb
|
20
|
+
homepage:
|
21
|
+
licenses:
|
22
|
+
- MIT
|
23
|
+
metadata: {}
|
24
|
+
post_install_message:
|
25
|
+
rdoc_options: []
|
26
|
+
require_paths:
|
27
|
+
- lib
|
28
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
29
|
+
requirements:
|
30
|
+
- - ">="
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: '0'
|
33
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
34
|
+
requirements:
|
35
|
+
- - ">="
|
36
|
+
- !ruby/object:Gem::Version
|
37
|
+
version: '0'
|
38
|
+
requirements: []
|
39
|
+
rubyforge_project:
|
40
|
+
rubygems_version: 2.7.4
|
41
|
+
signing_key:
|
42
|
+
specification_version: 4
|
43
|
+
summary: NeuralNetwork
|
44
|
+
test_files: []
|