tensor_stream 0.3.0 → 0.4.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (59) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +7 -7
  3. data/CHANGELOG.md +13 -0
  4. data/Dockerfile +25 -0
  5. data/Rakefile +6 -0
  6. data/benchmark/benchmark.rb +16 -57
  7. data/benchmark_intel.txt +21 -0
  8. data/benchmark_nvidia.txt +33 -0
  9. data/lib/tensor_stream.rb +4 -173
  10. data/lib/tensor_stream/debugging/debugging.rb +20 -0
  11. data/lib/tensor_stream/evaluator/kernels/abs.cl +9 -5
  12. data/lib/tensor_stream/evaluator/kernels/add.cl +2 -4
  13. data/lib/tensor_stream/evaluator/kernels/argmax.cl +2 -9
  14. data/lib/tensor_stream/evaluator/kernels/argmin.cl +2 -9
  15. data/lib/tensor_stream/evaluator/kernels/cast.cl +3 -8
  16. data/lib/tensor_stream/evaluator/kernels/cond.cl.erb +1 -1
  17. data/lib/tensor_stream/evaluator/kernels/cos.cl +2 -1
  18. data/lib/tensor_stream/evaluator/kernels/div.cl.erb +2 -4
  19. data/lib/tensor_stream/evaluator/kernels/exp.cl +2 -1
  20. data/lib/tensor_stream/evaluator/kernels/gemm.cl +8 -39
  21. data/lib/tensor_stream/evaluator/kernels/log.cl +2 -1
  22. data/lib/tensor_stream/evaluator/kernels/log1p.cl +2 -1
  23. data/lib/tensor_stream/evaluator/kernels/max.cl +4 -49
  24. data/lib/tensor_stream/evaluator/kernels/mul.cl +2 -4
  25. data/lib/tensor_stream/evaluator/kernels/negate.cl +2 -9
  26. data/lib/tensor_stream/evaluator/kernels/pow.cl +4 -88
  27. data/lib/tensor_stream/evaluator/kernels/reciprocal.cl +2 -9
  28. data/lib/tensor_stream/evaluator/kernels/round.cl +2 -1
  29. data/lib/tensor_stream/evaluator/kernels/sigmoid.cl +2 -1
  30. data/lib/tensor_stream/evaluator/kernels/sigmoid_grad.cl +6 -5
  31. data/lib/tensor_stream/evaluator/kernels/sign.cl +12 -14
  32. data/lib/tensor_stream/evaluator/kernels/sin.cl +2 -1
  33. data/lib/tensor_stream/evaluator/kernels/softmax.cl +26 -0
  34. data/lib/tensor_stream/evaluator/kernels/softmax_grad.cl +46 -0
  35. data/lib/tensor_stream/evaluator/kernels/sqrt.cl +2 -1
  36. data/lib/tensor_stream/evaluator/kernels/square.cl +2 -8
  37. data/lib/tensor_stream/evaluator/kernels/sub.cl +2 -4
  38. data/lib/tensor_stream/evaluator/kernels/tan.cl +2 -1
  39. data/lib/tensor_stream/evaluator/kernels/tanh.cl +2 -1
  40. data/lib/tensor_stream/evaluator/kernels/tanh_grad.cl +2 -1
  41. data/lib/tensor_stream/evaluator/kernels/where.cl +2 -9
  42. data/lib/tensor_stream/evaluator/opencl_evaluator.rb +108 -58
  43. data/lib/tensor_stream/evaluator/opencl_template_helper.rb +40 -5
  44. data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +35 -0
  45. data/lib/tensor_stream/evaluator/ruby_evaluator.rb +30 -9
  46. data/lib/tensor_stream/graph_serializers/graphml.rb +1 -1
  47. data/lib/tensor_stream/graph_serializers/pbtext.rb +4 -0
  48. data/lib/tensor_stream/math_gradients.rb +6 -5
  49. data/lib/tensor_stream/nn/nn_ops.rb +18 -2
  50. data/lib/tensor_stream/ops.rb +237 -44
  51. data/lib/tensor_stream/tensor.rb +16 -2
  52. data/lib/tensor_stream/utils.rb +205 -0
  53. data/lib/tensor_stream/variable.rb +2 -1
  54. data/lib/tensor_stream/version.rb +1 -1
  55. data/samples/error.graphml +2755 -0
  56. data/{test_samples → samples}/iris.rb +18 -24
  57. data/samples/logistic_regression.rb +0 -1
  58. data/test_samples/raw_neural_net_sample.rb +80 -23
  59. metadata +11 -3
@@ -1,18 +1,17 @@
1
1
  require "bundler/setup"
2
2
  require 'tensor_stream'
3
- require 'pry-byebug'
4
3
 
5
4
  # This neural network will predict the species of an iris based on sepal and petal size
6
5
  # Dataset: http://en.wikipedia.org/wiki/Iris_flower_data_set
7
-
6
+ tf = TensorStream
8
7
  rows = File.readlines(File.join("samples","iris.data")).map {|l| l.chomp.split(',') }
9
8
 
10
9
  rows.shuffle!
11
10
 
12
11
  label_encodings = {
13
- "Iris-setosa" => [1, 0, 0],
14
- "Iris-versicolor" => [0, 1, 0],
15
- "Iris-virginica" => [0, 0 ,1]
12
+ 'Iris-setosa' => [1, 0, 0],
13
+ 'Iris-versicolor' => [0, 1, 0],
14
+ 'Iris-virginica' => [0, 0, 1]
16
15
  }
17
16
 
18
17
  x_data = rows.map {|row| row[0,4].map(&:to_f) }
@@ -40,7 +39,7 @@ y_test = y_data.slice(100, 50)
40
39
 
41
40
  test_cases = []
42
41
  x_train.each_with_index do |x, index|
43
- test_cases << [x, y_train[index] ]
42
+ test_cases << [x, y_train[index]]
44
43
  end
45
44
 
46
45
  validation_cases = []
@@ -54,11 +53,10 @@ batch_size = 128
54
53
  display_step = 100
55
54
 
56
55
  # Network Parameters
57
- n_hidden_1 = 32 # 1st layer number of neurons
58
- n_hidden_2 = 32 # 2nd layer number of neurons
56
+ n_hidden_1 = 4 # 1st layer number of neurons
59
57
  num_classes = 3 # MNIST total classes (0-9 digits)
60
58
  num_input = 4
61
- training_epochs = 10
59
+ training_epochs = 100
62
60
 
63
61
  tf = TensorStream
64
62
 
@@ -68,26 +66,20 @@ y = tf.placeholder("float", shape: [nil, num_classes], name: 'y')
68
66
 
69
67
  # Store layers weight & bias
70
68
  weights = {
71
- h1: tf.variable(tf.random_normal([num_input, n_hidden_1]), name: 'h1'),
72
- h2: tf.variable(tf.random_normal([n_hidden_1, n_hidden_2]), name: 'h2'),
73
- out: tf.variable(tf.random_normal([n_hidden_2, num_classes]), name: 'out')
69
+ h1: tf.variable(tf.random_normal([num_input, n_hidden_1]), name: 'h1'),
70
+ out: tf.variable(tf.random_normal([num_classes, num_classes]), name: 'out')
74
71
  }
75
72
 
76
73
  biases = {
77
- b1: tf.variable(tf.random_normal([n_hidden_1]), name: 'b1'),
78
- b2: tf.variable(tf.random_normal([n_hidden_2]), name: 'b2'),
79
- out: tf.variable(tf.random_normal([num_classes]), name: 'b_out')
74
+ b1: tf.variable(tf.random_normal([n_hidden_1]), name: 'b1'),
75
+ out: tf.variable(tf.random_normal([num_classes]), name: 'b_out')
80
76
  }
81
77
 
82
-
83
78
  # Create model
84
79
  def neural_net(x, weights, biases)
85
- # Hidden fully connected layer with 256 neurons
86
- layer_1 = TensorStream.add(TensorStream.matmul(x, weights[:h1]), biases[:b1], name: 'layer1_add')
87
- # Hidden fully connected layer with 256 neurons
88
- layer_2 = TensorStream.add(TensorStream.matmul(layer_1, weights[:h2]), biases[:b2], name: 'layer2_add')
80
+ layer_1 = TensorStream.tanh(TensorStream.add(TensorStream.matmul(x, weights[:h1]), biases[:b1], name: 'layer1_add'))
89
81
  # Output fully connected layer with a neuron for each class
90
- TensorStream.matmul(layer_2, weights[:out]) + biases[:out]
82
+ TensorStream.sigmoid(TensorStream.matmul(layer_1, weights[:out]) + biases[:out])
91
83
  end
92
84
 
93
85
  # Construct model
@@ -105,11 +97,13 @@ TensorStream.session do |sess|
105
97
  sess.run(init)
106
98
  puts "Testing the untrained network..."
107
99
  loss = sess.run(cost, feed_dict: { x => x_train, y => y_train })
108
- puts sess.run(loss)
100
+ puts loss
109
101
  puts "loss before training"
110
102
  (0..training_epochs).each do |epoch|
111
- sess.run(optimizer, feed_dict: { x => x_train, y => y_train })
112
- loss = sess.run(cost, feed_dict: { x => x_train, y => y_train })
103
+ x_train.zip(y_train).each do |t_x, t_y|
104
+ sess.run(optimizer, feed_dict: { x => [t_x], y => [t_y] })
105
+ loss = sess.run(cost, feed_dict: { x => [t_x], y => [t_y] })
106
+ end
113
107
  puts "loss #{loss}"
114
108
  end
115
109
  loss = sess.run(cost, feed_dict: { x => x_train, y => y_train })
@@ -2,7 +2,6 @@
2
2
 
3
3
  require "bundler/setup"
4
4
  require 'tensor_stream'
5
- require 'pry-byebug'
6
5
 
7
6
  tf = TensorStream
8
7
 
@@ -1,7 +1,26 @@
1
+ """ Neural Network.
2
+
3
+ A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
4
+ implementation with TensorFlow. This example is using the MNIST database
5
+ of handwritten digits (http://yann.lecun.com/exdb/mnist/).
6
+
7
+ Links:
8
+ [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
9
+
10
+ Author: Aymeric Damien
11
+ Project: https://github.com/aymericdamien/TensorFlow-Examples/
12
+ """
1
13
  require "bundler/setup"
2
14
  require 'tensor_stream'
15
+ require 'mnist-learn'
16
+ require 'tensor_stream/evaluator/opencl_evaluator'
3
17
  require 'pry-byebug'
4
18
 
19
+ tf = TensorStream
20
+ # Import MNIST data
21
+ mnist = Mnist.read_data_sets('/tmp/data', one_hot: true)
22
+
23
+ # Parameters
5
24
  learning_rate = 0.1
6
25
  num_steps = 500
7
26
  batch_size = 128
@@ -13,42 +32,80 @@ n_hidden_2 = 256 # 2nd layer number of neurons
13
32
  num_input = 784 # MNIST data input (img shape: 28*28)
14
33
  num_classes = 10 # MNIST total classes (0-9 digits)
15
34
 
16
- tf = TensorStream
17
-
18
35
  # tf Graph input
19
- X = tf.placeholder("float", shape: [nil, num_input])
20
- Y = tf.placeholder("float", shape: [nil, num_classes])
36
+ X = tf.placeholder(:float64, shape: [nil, num_input])
37
+ Y = tf.placeholder(:float64, shape: [nil, num_classes])
21
38
 
22
39
  # Store layers weight & bias
23
- @weights = {
24
- h1: tf.variable(tf.random_normal([num_input, n_hidden_1])),
25
- h2: tf.variable(tf.random_normal([n_hidden_1, n_hidden_2])),
26
- out: tf.variable(tf.random_normal([n_hidden_2, num_classes]))
40
+ weights = {
41
+ 'h1' => tf.variable(tf.random_normal([num_input, n_hidden_1]), dtype: :float64),
42
+ 'h2' => tf.variable(tf.random_normal([n_hidden_1, n_hidden_2]), dtype: :float64),
43
+ 'out' => tf.variable(tf.random_normal([n_hidden_2, num_classes]), dtype: :float64)
27
44
  }
28
45
 
29
- @biases = {
30
- b1: tf.variable(tf.random_normal([n_hidden_1])),
31
- b2: tf.variable(tf.random_normal([n_hidden_2])),
32
- out: tf.variable(tf.random_normal([num_classes]))
46
+ biases = {
47
+ 'b1' => tf.variable(tf.random_normal([n_hidden_1]), dtype: :float64),
48
+ 'b2' => tf.variable(tf.random_normal([n_hidden_2]), dtype: :float64),
49
+ 'out' => tf.variable(tf.random_normal([num_classes]), dtype: :float64)
33
50
  }
34
51
 
35
52
 
36
53
  # Create model
37
- def neural_net(x)
54
+ def neural_net(x, weights, biases)
55
+ tf = TensorStream
38
56
  # Hidden fully connected layer with 256 neurons
39
- layer_1 = TensorStream.add(TensorStream.matmul(x, @weights[:h1]), @biases[:b1])
57
+ layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
40
58
  # Hidden fully connected layer with 256 neurons
41
- layer_2 = TensorStream.add(TensorStream.matmul(layer_1, @weights[:h2]), @biases[:b2])
59
+ layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
42
60
  # Output fully connected layer with a neuron for each class
43
- TensorStream.matmul(layer_2, @weights[:out]) + @biases[:out]
44
- end
45
-
46
- def softmax(logits)
47
- TensorStream.exp(logits) / TensorStream.reduce_sum(TensorStream.exp(logits))
61
+ tf.matmul(layer_2, weights['out']) + biases['out']
48
62
  end
49
63
 
50
64
  # Construct model
51
- logits = neural_net(X)
52
- prediction = softmax(logits)
65
+ logits = neural_net(X, weights, biases)
66
+ prediction = tf.nn.softmax(logits)
67
+
68
+ # Define loss and optimizer
69
+ loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
70
+ logits: logits, labels: Y))
71
+
72
+ optimizer = TensorStream::Train::GradientDescentOptimizer.new(learning_rate)
73
+ train_op = optimizer.minimize(loss_op)
74
+
75
+ # Evaluate model
76
+ correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
77
+ accuracy = tf.reduce_mean(tf.cast(correct_pred, :float32))
78
+
79
+ # tf.add_check_numerics_ops
80
+
81
+ # Initialize the variables (i.e. assign their default value)
82
+ init = tf.global_variables_initializer
83
+
84
+ # Start training
85
+ tf.session(:opencl_evaluator) do |sess|
86
+ # Run the initializer
87
+ sess.run(init)
88
+
89
+ (1..num_steps+1).each do |step|
90
+
91
+ batch_x, batch_y = mnist.train.next_batch(batch_size)
92
+ # Run optimization op (backprop)
93
+ puts "...."
94
+ sess.run(train_op, feed_dict: { X => batch_x, Y => batch_y })
95
+ puts "----"
96
+ if step % display_step == 0 || step == 1
97
+ # Calculate batch loss and accuracy
98
+ loss, acc = sess.run([loss_op, accuracy], feed_dict: { X => batch_x, Y => batch_y})
99
+ print("Step " + str(step) + ", Minibatch Loss= " + \
100
+ "{:.4f}".format(loss) + ", Training Accuracy= " + \
101
+ "{:.3f}".format(acc))
102
+ end
103
+ end
104
+
105
+ print("Optimization Finished!")
53
106
 
54
- puts prediction.to_math
107
+ # Calculate accuracy for MNIST test images
108
+ print("Testing Accuracy:", \
109
+ sess.run(accuracy, feed_dict: { X => mnist.test.images,
110
+ Y => mnist.test.labels}))
111
+ end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: tensor_stream
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.0
4
+ version: 0.4.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Joseph Emmanuel Dayo
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-06-04 00:00:00.000000000 Z
11
+ date: 2018-06-17 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -238,15 +238,19 @@ files:
238
238
  - ".travis.yml"
239
239
  - CHANGELOG.md
240
240
  - CODE_OF_CONDUCT.md
241
+ - Dockerfile
241
242
  - Gemfile
242
243
  - LICENSE.txt
243
244
  - README.md
244
245
  - Rakefile
245
246
  - benchmark/benchmark.rb
247
+ - benchmark_intel.txt
248
+ - benchmark_nvidia.txt
246
249
  - bin/console
247
250
  - bin/setup
248
251
  - lib/tensor_stream.rb
249
252
  - lib/tensor_stream/control_flow.rb
253
+ - lib/tensor_stream/debugging/debugging.rb
250
254
  - lib/tensor_stream/device.rb
251
255
  - lib/tensor_stream/evaluator/buffer.rb
252
256
  - lib/tensor_stream/evaluator/evaluator.rb
@@ -274,6 +278,8 @@ files:
274
278
  - lib/tensor_stream/evaluator/kernels/sigmoid_grad.cl
275
279
  - lib/tensor_stream/evaluator/kernels/sign.cl
276
280
  - lib/tensor_stream/evaluator/kernels/sin.cl
281
+ - lib/tensor_stream/evaluator/kernels/softmax.cl
282
+ - lib/tensor_stream/evaluator/kernels/softmax_grad.cl
277
283
  - lib/tensor_stream/evaluator/kernels/sqrt.cl
278
284
  - lib/tensor_stream/evaluator/kernels/square.cl
279
285
  - lib/tensor_stream/evaluator/kernels/sub.cl
@@ -309,17 +315,19 @@ files:
309
315
  - lib/tensor_stream/train/saver.rb
310
316
  - lib/tensor_stream/trainer.rb
311
317
  - lib/tensor_stream/types.rb
318
+ - lib/tensor_stream/utils.rb
312
319
  - lib/tensor_stream/variable.rb
313
320
  - lib/tensor_stream/version.rb
321
+ - samples/error.graphml
314
322
  - samples/gradient_sample.graphml
315
323
  - samples/iris.data
324
+ - samples/iris.rb
316
325
  - samples/linear_regression.rb
317
326
  - samples/logistic_regression.rb
318
327
  - samples/nearest_neighbor.rb
319
328
  - tensor_stream.gemspec
320
329
  - test_samples/error.graphml
321
330
  - test_samples/gradient_sample.graphml
322
- - test_samples/iris.rb
323
331
  - test_samples/raw_neural_net_sample.rb
324
332
  - test_samples/test.py
325
333
  - test_samples/test2.py