CooCoo 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (105) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +16 -0
  3. data/CooCoo.gemspec +47 -0
  4. data/Gemfile +4 -0
  5. data/Gemfile.lock +88 -0
  6. data/README.md +123 -0
  7. data/Rakefile +81 -0
  8. data/bin/cuda-dev-info +25 -0
  9. data/bin/cuda-free +28 -0
  10. data/bin/cuda-free-trend +7 -0
  11. data/bin/ffi-gen +267 -0
  12. data/bin/spec_runner_html.sh +42 -0
  13. data/bin/trainer +198 -0
  14. data/bin/trend-cost +13 -0
  15. data/examples/char-rnn.rb +405 -0
  16. data/examples/cifar/cifar.rb +94 -0
  17. data/examples/img-similarity.rb +201 -0
  18. data/examples/math_ops.rb +57 -0
  19. data/examples/mnist.rb +365 -0
  20. data/examples/mnist_classifier.rb +293 -0
  21. data/examples/mnist_dream.rb +214 -0
  22. data/examples/seeds.rb +268 -0
  23. data/examples/seeds_dataset.txt +210 -0
  24. data/examples/t10k-images-idx3-ubyte +0 -0
  25. data/examples/t10k-labels-idx1-ubyte +0 -0
  26. data/examples/train-images-idx3-ubyte +0 -0
  27. data/examples/train-labels-idx1-ubyte +0 -0
  28. data/ext/buffer/Rakefile +50 -0
  29. data/ext/buffer/buffer.pre.cu +727 -0
  30. data/ext/buffer/matrix.pre.cu +49 -0
  31. data/lib/CooCoo.rb +1 -0
  32. data/lib/coo-coo.rb +18 -0
  33. data/lib/coo-coo/activation_functions.rb +344 -0
  34. data/lib/coo-coo/consts.rb +5 -0
  35. data/lib/coo-coo/convolution.rb +298 -0
  36. data/lib/coo-coo/core_ext.rb +75 -0
  37. data/lib/coo-coo/cost_functions.rb +91 -0
  38. data/lib/coo-coo/cuda.rb +116 -0
  39. data/lib/coo-coo/cuda/device_buffer.rb +240 -0
  40. data/lib/coo-coo/cuda/device_buffer/ffi.rb +109 -0
  41. data/lib/coo-coo/cuda/error.rb +51 -0
  42. data/lib/coo-coo/cuda/host_buffer.rb +117 -0
  43. data/lib/coo-coo/cuda/runtime.rb +157 -0
  44. data/lib/coo-coo/cuda/vector.rb +315 -0
  45. data/lib/coo-coo/data_sources.rb +2 -0
  46. data/lib/coo-coo/data_sources/xournal.rb +25 -0
  47. data/lib/coo-coo/data_sources/xournal/bitmap_stream.rb +197 -0
  48. data/lib/coo-coo/data_sources/xournal/document.rb +377 -0
  49. data/lib/coo-coo/data_sources/xournal/loader.rb +144 -0
  50. data/lib/coo-coo/data_sources/xournal/renderer.rb +101 -0
  51. data/lib/coo-coo/data_sources/xournal/saver.rb +99 -0
  52. data/lib/coo-coo/data_sources/xournal/training_document.rb +78 -0
  53. data/lib/coo-coo/data_sources/xournal/training_document/constants.rb +15 -0
  54. data/lib/coo-coo/data_sources/xournal/training_document/document_maker.rb +89 -0
  55. data/lib/coo-coo/data_sources/xournal/training_document/document_reader.rb +105 -0
  56. data/lib/coo-coo/data_sources/xournal/training_document/example.rb +37 -0
  57. data/lib/coo-coo/data_sources/xournal/training_document/sets.rb +76 -0
  58. data/lib/coo-coo/debug.rb +8 -0
  59. data/lib/coo-coo/dot.rb +129 -0
  60. data/lib/coo-coo/drawing.rb +4 -0
  61. data/lib/coo-coo/drawing/cairo_canvas.rb +100 -0
  62. data/lib/coo-coo/drawing/canvas.rb +68 -0
  63. data/lib/coo-coo/drawing/chunky_canvas.rb +101 -0
  64. data/lib/coo-coo/drawing/sixel.rb +214 -0
  65. data/lib/coo-coo/enum.rb +17 -0
  66. data/lib/coo-coo/from_name.rb +58 -0
  67. data/lib/coo-coo/fully_connected_layer.rb +205 -0
  68. data/lib/coo-coo/generation_script.rb +38 -0
  69. data/lib/coo-coo/grapher.rb +140 -0
  70. data/lib/coo-coo/image.rb +286 -0
  71. data/lib/coo-coo/layer.rb +67 -0
  72. data/lib/coo-coo/layer_factory.rb +26 -0
  73. data/lib/coo-coo/linear_layer.rb +59 -0
  74. data/lib/coo-coo/math.rb +607 -0
  75. data/lib/coo-coo/math/abstract_vector.rb +121 -0
  76. data/lib/coo-coo/math/functions.rb +39 -0
  77. data/lib/coo-coo/math/interpolation.rb +7 -0
  78. data/lib/coo-coo/network.rb +264 -0
  79. data/lib/coo-coo/neuron.rb +112 -0
  80. data/lib/coo-coo/neuron_layer.rb +168 -0
  81. data/lib/coo-coo/option_parser.rb +18 -0
  82. data/lib/coo-coo/platform.rb +17 -0
  83. data/lib/coo-coo/progress_bar.rb +11 -0
  84. data/lib/coo-coo/recurrence/backend.rb +99 -0
  85. data/lib/coo-coo/recurrence/frontend.rb +101 -0
  86. data/lib/coo-coo/sequence.rb +187 -0
  87. data/lib/coo-coo/shell.rb +2 -0
  88. data/lib/coo-coo/temporal_network.rb +291 -0
  89. data/lib/coo-coo/trainer.rb +21 -0
  90. data/lib/coo-coo/trainer/base.rb +67 -0
  91. data/lib/coo-coo/trainer/batch.rb +82 -0
  92. data/lib/coo-coo/trainer/batch_stats.rb +27 -0
  93. data/lib/coo-coo/trainer/momentum_stochastic.rb +59 -0
  94. data/lib/coo-coo/trainer/stochastic.rb +47 -0
  95. data/lib/coo-coo/transformer.rb +272 -0
  96. data/lib/coo-coo/vector_layer.rb +194 -0
  97. data/lib/coo-coo/version.rb +3 -0
  98. data/lib/coo-coo/weight_deltas.rb +23 -0
  99. data/prototypes/convolution.rb +116 -0
  100. data/prototypes/linear_drop.rb +51 -0
  101. data/prototypes/recurrent_layers.rb +79 -0
  102. data/www/images/screamer.png +0 -0
  103. data/www/images/screamer.xcf +0 -0
  104. data/www/index.html +82 -0
  105. metadata +373 -0
@@ -0,0 +1,27 @@
1
+ module CooCoo
2
+ module Trainer
3
+ class BatchStats
4
+ attr_reader :trainer
5
+ attr_reader :batch
6
+ attr_reader :batch_size
7
+ attr_reader :total_time
8
+ attr_reader :total_loss
9
+
10
+ def initialize(trainer, batch, batch_size, total_time, total_loss)
11
+ @trainer = trainer
12
+ @batch = batch
13
+ @batch_size = batch_size
14
+ @total_time = total_time
15
+ @total_loss = total_loss
16
+ end
17
+
18
+ def average_time
19
+ total_time / batch_size.to_f
20
+ end
21
+
22
+ def average_loss
23
+ total_loss / batch_size.to_f
24
+ end
25
+ end
26
+ end
27
+ end
@@ -0,0 +1,59 @@
1
+ require 'coo-coo/cost_functions'
2
+ require 'coo-coo/sequence'
3
+ require 'coo-coo/trainer/base'
4
+ require 'coo-coo/trainer/batch_stats'
5
+
6
+ module CooCoo
7
+ module Trainer
8
+ class MomentumStochastic < Base
9
+ DEFAULT_OPTIONS = Base::DEFAULT_OPTIONS.merge(momentum: 1/30.0)
10
+
11
+ def options
12
+ super(DEFAULT_OPTIONS) do |o, options|
13
+ o.on('--momentum FLOAT', Float, 'Multiplier for the accumulated changes.') do |n|
14
+ options.momentum = n
15
+ end
16
+ end
17
+ end
18
+
19
+ # @option options [Float] :momentum The dampening factor on the reuse of the previous network change.
20
+ def train(options, &block)
21
+ options = options.to_h
22
+ network = options.fetch(:network)
23
+ training_data = options.fetch(:data)
24
+ learning_rate = options.fetch(:learning_rate, 1/3.0)
25
+ batch_size = options.fetch(:batch_size, 1024)
26
+ cost_function = options.fetch(:cost_function, CostFunctions::MeanSquare)
27
+ momentum = options.fetch(:momentum, 1/30.0)
28
+
29
+ t = Time.now
30
+
31
+ training_data.each_slice(batch_size).with_index do |batch, i|
32
+ last_delta = 0.0
33
+ total_errs = batch.inject(nil) do |acc, (expecting, input)|
34
+ errs, hidden_state, last_delta = learn(network, input, expecting, learning_rate, last_delta, momentum, cost_function, Hash.new)
35
+ errs + (acc || 0)
36
+ end
37
+
38
+ if block
39
+ block.call(BatchStats.new(self, i, batch_size, Time.now - t, total_errs))
40
+ end
41
+
42
+ t = Time.now
43
+ end
44
+ end
45
+
46
+ def learn(network, input, expecting, rate, last_deltas, momentum, cost_function, hidden_state)
47
+ output, hidden_state = network.forward(input, hidden_state)
48
+ target = expecting
49
+ target = network.prep_output_target(expecting)
50
+ final_output = network.final_output(output)
51
+ errors = cost_function.derivative(target, final_output)
52
+ deltas, hidden_state = network.backprop(input, output, errors, hidden_state)
53
+ deltas = CooCoo::Sequence[deltas] * rate
54
+ network.update_weights!(input, output, deltas - last_deltas * momentum)
55
+ return cost_function.call(target, final_output), hidden_state, deltas
56
+ end
57
+ end
58
+ end
59
+ end
@@ -0,0 +1,47 @@
1
+ require 'coo-coo/cost_functions'
2
+ require 'coo-coo/sequence'
3
+ require 'coo-coo/trainer/base'
4
+ require 'coo-coo/trainer/batch_stats'
5
+
6
+ module CooCoo
7
+ module Trainer
8
+ # Implements straight up stochastic gradient descent. No alterations
9
+ # get made to any hyperparameters while learning happens after every
10
+ # example.
11
+ class Stochastic < Base
12
+ def train(options, &block)
13
+ options = options.to_h
14
+ network = options.fetch(:network)
15
+ training_data = options.fetch(:data)
16
+ learning_rate = options.fetch(:learning_rate, 0.3)
17
+ batch_size = options.fetch(:batch_size, 1024)
18
+ cost_function = options.fetch(:cost_function, CostFunctions::MeanSquare)
19
+
20
+ t = Time.now
21
+
22
+ training_data.each_slice(batch_size).with_index do |batch, i|
23
+ total_errs = batch.inject(nil) do |acc, (expecting, input)|
24
+ errs, hidden_state = learn(network, input, expecting, learning_rate, cost_function, Hash.new)
25
+ errs + (acc || 0)
26
+ end
27
+
28
+ if block
29
+ block.call(BatchStats.new(self, i, batch.size, Time.now - t, total_errs))
30
+ end
31
+
32
+ t = Time.now
33
+ end
34
+ end
35
+
36
+ def learn(network, input, expecting, rate, cost_function = CostFunctions::MeanSquare, hidden_state)
37
+ output, hidden_state = network.forward(input, hidden_state)
38
+ target = network.prep_output_target(expecting)
39
+ final_output = network.final_output(output)
40
+ errors = cost_function.derivative(target, final_output)
41
+ deltas, hidden_state = network.backprop(input, output, errors, hidden_state)
42
+ network.update_weights!(input, output, deltas * rate)
43
+ return cost_function.call(target, final_output), hidden_state
44
+ end
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,272 @@
1
+ require 'nmatrix'
2
+
3
+ module CooCoo
4
+ module Transformers
5
+ class Base < Enumerator
6
+ def drop(n)
7
+ n.times do
8
+ self.next
9
+ end
10
+
11
+ self
12
+ rescue StopIteration
13
+ self
14
+ end
15
+
16
+ def first(n)
17
+ Stopper.new(self, n)
18
+ end
19
+
20
+ def self.bin_op(*ops)
21
+ ops.each do |op|
22
+ bin_op_inner(op)
23
+ end
24
+ end
25
+
26
+ def self.bin_op_inner(op)
27
+ define_method(op) do |other|
28
+ Combo.new(self, other) do |a, b|
29
+ a.send(op, b)
30
+ end
31
+ end
32
+ end
33
+
34
+ bin_op :+, :-, :*, :/
35
+ end
36
+
37
+ class Proxy < Base
38
+ def initialize(enum)
39
+ @enum = enum
40
+
41
+ super() do |yielder|
42
+ loop do
43
+ yielder << self.next
44
+ end
45
+ end
46
+ end
47
+
48
+ def next
49
+ @enum.next
50
+ end
51
+ end
52
+
53
+ class Stopper < Proxy
54
+ def initialize(enum, n)
55
+ @stop_after = n
56
+ @index = 0
57
+
58
+ super(enum)
59
+ end
60
+
61
+ def next
62
+ if @index < @stop_after
63
+ @index += 1
64
+ super
65
+ else
66
+ raise StopIteration
67
+ end
68
+ end
69
+ end
70
+
71
+ class Combo < Base
72
+ def initialize(src, other, &op)
73
+ @src = src
74
+ @other = other
75
+ @op = op
76
+
77
+ super() do |yielder|
78
+ loop do
79
+ yielder << self.next
80
+ end
81
+ end
82
+ end
83
+
84
+ def next
85
+ @op.call(@src.next, @other.next)
86
+ end
87
+ end
88
+
89
+ module Image
90
+ class Base < ::CooCoo::Transformers::Proxy
91
+ def initialize(enum, width, height)
92
+ @width = width
93
+ @height = height
94
+ super(enum)
95
+ end
96
+
97
+ attr_reader :width, :height
98
+
99
+ def translate(x, y)
100
+ Translation.new(self, width, height, x, y)
101
+ end
102
+
103
+ def rotate(radians, ox = 0, oy = 0)
104
+ Rotation.new(self, width, height, ox, oy, radians)
105
+ end
106
+
107
+ def scale(x, y)
108
+ Scaler.new(self, width, height, x, y || x)
109
+ end
110
+ end
111
+
112
+ class Translation < Base
113
+ def initialize(enum, width, height, tx, ty)
114
+ super(enum, width, height)
115
+ @tx = tx
116
+ @ty = ty
117
+ end
118
+
119
+ def next
120
+ i = super()
121
+ r = NMatrix.zeroes([1, width * height])
122
+ height.times do |y|
123
+ width.times do |x|
124
+ r[0, map_pixel(x, y)] = i[0, map_pixel(*translate_pixel(x, y))]
125
+ end
126
+ end
127
+ r
128
+ end
129
+
130
+ private
131
+
132
+ def map_pixel(x, y)
133
+ x + y * width
134
+ end
135
+
136
+ def translate_pixel(x, y)
137
+ [ (x + @tx) % width, (y + @ty) % height ]
138
+ end
139
+ end
140
+
141
+ class Rotation < Base
142
+ def initialize(enum, width, height, ox, oy, radians)
143
+ super(enum, width, height)
144
+ @ox = ox
145
+ @oy = oy
146
+ @radians = radians
147
+ end
148
+
149
+ def next
150
+ i = super()
151
+ r = NMatrix.zeroes([1, width * height])
152
+ height.times do |y|
153
+ width.times do |x|
154
+ r[0, map_pixel(x, y)] = sample(i, x, y)
155
+ end
156
+ end
157
+ r
158
+ end
159
+
160
+ private
161
+
162
+ def sample(image, x, y)
163
+ rx, ry = *rotate_pixel(x, y)
164
+ rx_min = rx.floor % width
165
+ rx_max = rx.ceil % width
166
+ ry_min = ry.floor % height
167
+ ry_max = ry.ceil % height
168
+
169
+ (image[0, map_pixel(rx_min, ry_min)] +
170
+ image[0, map_pixel(rx_max, ry_min)] +
171
+ image[0, map_pixel(rx_min, ry_max)] +
172
+ image[0, map_pixel(rx_max, ry_max)]) / 4.0
173
+ end
174
+
175
+ def map_pixel(x, y)
176
+ x + y * width
177
+ end
178
+
179
+ def rotate_pixel(x, y)
180
+ c = Math.cos(@radians)
181
+ s = Math.sin(@radians)
182
+ x = (x - @ox)
183
+ x = x * c - y * s
184
+ y = y - @oy
185
+ y = x * s + y * c
186
+ [ x, y ]
187
+ end
188
+ end
189
+
190
+ class Scaler < Base
191
+ def initialize(enum, width, height, scale_x, scale_y)
192
+ super(enum, width, height)
193
+ @scale_x = scale_x
194
+ @scale_y = scale_y
195
+ end
196
+
197
+ def next
198
+ i = super()
199
+ r = NMatrix.zeroes([1, width * height])
200
+ height.times do |y|
201
+ width.times do |x|
202
+ r[0, map_pixel(x, y)] = sample(i, x, y)
203
+ end
204
+ end
205
+ r
206
+ end
207
+
208
+ private
209
+
210
+ def sample(image, x, y)
211
+ if @scale_x == 1.0 && @scale_y == 1.0
212
+ image[0, map_pixel(x, y)]
213
+ elsif @scale_x > 1.0 && @scale_y > 1.0
214
+ rx, ry = *scale_pixel(x, y)
215
+ image[0, map_pixel(rx.to_i, ry.to_i)]
216
+ else
217
+ rx, ry = *scale_pixel(x, y)
218
+ rx_min = rx.floor % width
219
+ rx_max = rx.ceil % width
220
+ ry_min = ry.floor % height
221
+ ry_max = ry.ceil % height
222
+
223
+ (image[0, map_pixel(rx_min, ry_min)] +
224
+ image[0, map_pixel(rx_max, ry_min)] +
225
+ image[0, map_pixel(rx_min, ry_max)] +
226
+ image[0, map_pixel(rx_max, ry_max)]) / 4.0
227
+ end
228
+ end
229
+
230
+ def map_pixel(x, y)
231
+ x + y * width
232
+ end
233
+
234
+ def scale_pixel(x, y)
235
+ [ (x * @scale_x) % width, (y * @scale_y) % height ]
236
+ end
237
+ end
238
+ end
239
+ end
240
+ end
241
+
242
+ if __FILE__ == $0
243
+ ra = (0...Float::INFINITY)
244
+ data = CooCoo::Transformers::Proxy.new(ra.each)
245
+ data2 = CooCoo::Transformers::Proxy.new(ra.each)
246
+ data3 = CooCoo::Transformers::Proxy.new(ra.each)
247
+ puts("Raw data: #{ra.first(10).inspect}")
248
+ t = data * data2
249
+ r = t.first(10)
250
+ puts("Squared: #{r.to_a.inspect} #{r.class}")
251
+ r = (t + 1000) / data3.drop(1) - 2
252
+ r = r.first(10)
253
+ puts("Add 1000: #{r.to_a.inspect} #{r.class}")
254
+ #puts(t.each.to_a.inspect)
255
+
256
+ i = Array.new(8, [1.0] + [0.0] * 7)
257
+ i[0] = [1.0] * 8
258
+ i[7] = i[0]
259
+ puts(*i.each_slice(8).to_a[0].collect(&:inspect))
260
+
261
+ e = Enumerator.new do |y|
262
+ y << i.flatten.to_nm([1, 64])
263
+ end
264
+
265
+ t = CooCoo::Transformers::Image::Base.new(e, 8, 8).
266
+ #rotate(Math::PI / 4.0).
267
+ scale(0.5, 0.5).
268
+ translate(2, 1)
269
+ i = t.next
270
+ puts()
271
+ puts(*i.each_slice(8).to_a.collect(&:inspect))
272
+ end
@@ -0,0 +1,194 @@
1
+ require 'coo-coo/consts'
2
+ require 'coo-coo/math'
3
+ require 'coo-coo/debug'
4
+ require 'coo-coo/layer_factory'
5
+ require 'coo-coo/activation_functions'
6
+ require 'coo-coo/weight_deltas'
7
+
8
+ module CooCoo
9
+ class VectorLayer
10
+ LayerFactory.register_type(self)
11
+
12
+ attr_accessor :activation_function
13
+ attr_reader :bias
14
+ attr_reader :weights
15
+
16
+ def initialize(num_inputs, size, activation_function = CooCoo.default_activation)
17
+ @activation_function = activation_function
18
+ @num_inputs = num_inputs.to_i
19
+ @size = size.to_i
20
+ @weights = @activation_function.initial_weights(@num_inputs, @size)
21
+ @bias = @activation_function.initial_bias(@size)
22
+ end
23
+
24
+ def num_inputs
25
+ @num_inputs
26
+ end
27
+
28
+ def size
29
+ @size
30
+ end
31
+
32
+ def forward(input, hidden_state)
33
+ return transfer(activate(input)), hidden_state
34
+ end
35
+
36
+ def transfer(activations)
37
+ @activation_function.call(activations)
38
+ end
39
+
40
+ def activate(input)
41
+ @weights.dot(num_inputs, size, input, 1, num_inputs) + @bias
42
+ end
43
+
44
+ def backprop(input, output, errors, hidden_state)
45
+ # Properly: return errors * @activation_func.derivative(activate(input), output), hidden_state
46
+ return errors * @activation_function.derivative(nil, output), hidden_state
47
+ end
48
+
49
+ def transfer_error(deltas)
50
+ deltas.dot(size, 1, @weights, num_inputs, size)
51
+ end
52
+
53
+ def transfer_input_error(expecting)
54
+ (output - expecting).to_a
55
+ end
56
+
57
+ def update_weights!(inputs, deltas)
58
+ adjust_weights!(weight_deltas(inputs, deltas))
59
+ end
60
+
61
+ def adjust_weights!(deltas)
62
+ @bias -= deltas.bias_deltas
63
+ @weights -= deltas.weight_deltas
64
+
65
+ self
66
+ end
67
+
68
+ def weight_deltas(inputs, deltas)
69
+ WeightDeltas.new(deltas, deltas.dot(1, size, inputs, num_inputs, 1))
70
+ end
71
+
72
+ def to_hash(network = nil)
73
+ { type: self.class.to_s,
74
+ outputs: size,
75
+ neurons: neuron_hash
76
+ }
77
+ end
78
+
79
+ def neuron_hash
80
+ @weights.each_slice(num_inputs).with_index.collect do |neuron_weights, i|
81
+ { num_inputs: num_inputs,
82
+ weights: neuron_weights.to_a,
83
+ bias: @bias[i],
84
+ f: @activation_function.name
85
+ }
86
+ end
87
+ end
88
+
89
+ def add_neurons!(new_size)
90
+ if new_size != @size
91
+ w = CooCoo::Vector.zeros(num_inputs * new_size)
92
+ w[0, @weights.size] = @weights
93
+ w[@weights.size, num_inputs] = @activation_func.initial_weights(num_inputs, 1)
94
+ @weights = w
95
+
96
+ @bias = CooCoo::Vector.ones(new_size).set(@bias)
97
+ @bias[-1] = @activation_func.initial_bias(1)[0]
98
+
99
+ @size = new_size
100
+ end
101
+
102
+ self
103
+ end
104
+
105
+ def add_inputs!(new_size)
106
+ if new_size != num_inputs
107
+ w = CooCoo::Vector.zeros(new_size * size)
108
+ w.set2d!(new_size, @weights, num_inputs, 0, 0)
109
+ w.set2d!(new_size, @activation_func.initial_weights(size, 1), 1, new_size - 1, 0)
110
+ @weights = w
111
+ @num_inputs = new_size
112
+ end
113
+
114
+ self
115
+ end
116
+
117
+ def update_neuron_from_hash!(neuron_index, h)
118
+ if neuron_index > size
119
+ add_neurons!(neuron_index)
120
+ end
121
+
122
+ @weights[neuron_index * num_inputs, num_inputs] = h[:weights]
123
+ @bias[neuron_index] = h[:bias]
124
+ end
125
+
126
+ def update_from_hash!(h)
127
+ @activation_function = ActivationFunctions.from_name(h[:neurons][0][:f])
128
+ add_neurons!(h[:outputs])
129
+ add_inputs!(h[:neurons][0][:num_inputs])
130
+
131
+ h[:outputs].times do |i|
132
+ update_neuron_from_hash!(i, h[:neurons][i])
133
+ end
134
+
135
+ self
136
+ end
137
+
138
+ def ==(other)
139
+ other.kind_of?(self.class) &&
140
+ size == other.size &&
141
+ bias == other.bias &&
142
+ weights == other.weights &&
143
+ activation_function == other.activation_function
144
+ end
145
+
146
+ class << self
147
+ def from_hash(h, network = nil)
148
+ self.new(h[:neurons][0][:num_inputs],
149
+ h[:outputs],
150
+ ActivationFunctions.from_name(h[:neurons][0][:f])).
151
+ update_from_hash!(h)
152
+ end
153
+ end
154
+ end
155
+ end
156
+
157
+ if __FILE__ == $0
158
+ layer = CooCoo::VectorLayer.new(4, 2, CooCoo::ActivationFunctions.from_name(ENV.fetch("ACTIVATION", "Logistic")))
159
+ inputs = [ [ 1.0, 0.0, 0.0, 0.0 ],
160
+ [ 0.0, 0.0, 1.0, 0.0 ],
161
+ [ 0.0, 1.0, 0.0, 0.0],
162
+ [ 0.0, 0.0, 0.0, 1.0 ]
163
+ ].collect do |v|
164
+ CooCoo::CUDA::Vector[v]
165
+ end
166
+ targets = [ [ 1.0, 0.0 ],
167
+ [ 0.0, 1.0 ],
168
+ [ 0.0, 0.0 ],
169
+ [ 0.0, 0.0 ]
170
+ ].collect do |v|
171
+ CooCoo::CUDA::Vector[v]
172
+ end
173
+
174
+ inputs.zip(targets).cycle(ENV.fetch('LOOPS', 100).to_i).each do |(input, target)|
175
+ output, hidden_state = layer.forward(input, Hash.new)
176
+ puts("#{input} -> #{target} #{target.inspect}")
177
+ puts("\toutput: #{output}")
178
+
179
+ err = (output - target)
180
+ puts("\terr: #{err}")
181
+ #err = err * err * 0.5
182
+ delta, hidden_state = layer.backprop(input, output, err, hidden_state)
183
+ puts("\tdelta: #{delta}")
184
+ puts("\terror: #{err}")
185
+ puts("\txfer: #{layer.transfer_error(delta)}")
186
+
187
+ layer.update_weights!(input, delta * 0.5)
188
+ end
189
+
190
+ inputs.zip(targets).each do |(input, target)|
191
+ output, hidden_state = layer.forward(input, Hash.new)
192
+ puts("#{input} -> #{output}\t#{target}")
193
+ end
194
+ end