tensor_stream 1.0.0 → 1.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (142) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -0
  3. data/.rubocop.yml +1 -0
  4. data/Gemfile +1 -1
  5. data/LICENSE.txt +1 -1
  6. data/README.md +34 -34
  7. data/Rakefile +3 -3
  8. data/USAGE_GUIDE.md +235 -0
  9. data/bin/stubgen +20 -0
  10. data/exe/model_utils +2 -2
  11. data/lib/tensor_stream.rb +45 -44
  12. data/lib/tensor_stream/constant.rb +2 -2
  13. data/lib/tensor_stream/control_flow.rb +1 -1
  14. data/lib/tensor_stream/debugging/debugging.rb +2 -2
  15. data/lib/tensor_stream/dynamic_stitch.rb +2 -2
  16. data/lib/tensor_stream/evaluator/base_evaluator.rb +18 -18
  17. data/lib/tensor_stream/evaluator/buffer.rb +1 -1
  18. data/lib/tensor_stream/evaluator/evaluator.rb +2 -2
  19. data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +41 -41
  20. data/lib/tensor_stream/evaluator/operation_helpers/math_helper.rb +1 -1
  21. data/lib/tensor_stream/evaluator/ruby/array_ops.rb +39 -39
  22. data/lib/tensor_stream/evaluator/ruby/check_ops.rb +2 -2
  23. data/lib/tensor_stream/evaluator/ruby/images_ops.rb +18 -18
  24. data/lib/tensor_stream/evaluator/ruby/math_ops.rb +13 -14
  25. data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +33 -36
  26. data/lib/tensor_stream/evaluator/ruby/random_ops.rb +20 -21
  27. data/lib/tensor_stream/evaluator/ruby_evaluator.rb +36 -49
  28. data/lib/tensor_stream/exceptions.rb +1 -1
  29. data/lib/tensor_stream/generated_stub/ops.rb +691 -0
  30. data/lib/tensor_stream/generated_stub/stub_file.erb +24 -0
  31. data/lib/tensor_stream/graph.rb +18 -18
  32. data/lib/tensor_stream/graph_builder.rb +17 -17
  33. data/lib/tensor_stream/graph_deserializers/protobuf.rb +97 -97
  34. data/lib/tensor_stream/graph_deserializers/yaml_loader.rb +1 -1
  35. data/lib/tensor_stream/graph_keys.rb +3 -3
  36. data/lib/tensor_stream/graph_serializers/graphml.rb +33 -33
  37. data/lib/tensor_stream/graph_serializers/packer.rb +23 -23
  38. data/lib/tensor_stream/graph_serializers/pbtext.rb +38 -42
  39. data/lib/tensor_stream/graph_serializers/serializer.rb +3 -2
  40. data/lib/tensor_stream/graph_serializers/yaml.rb +5 -5
  41. data/lib/tensor_stream/helpers/infer_shape.rb +56 -56
  42. data/lib/tensor_stream/helpers/op_helper.rb +8 -9
  43. data/lib/tensor_stream/helpers/string_helper.rb +15 -15
  44. data/lib/tensor_stream/helpers/tensor_mixins.rb +17 -17
  45. data/lib/tensor_stream/images.rb +1 -1
  46. data/lib/tensor_stream/initializer.rb +1 -1
  47. data/lib/tensor_stream/math_gradients.rb +28 -187
  48. data/lib/tensor_stream/monkey_patches/array.rb +1 -1
  49. data/lib/tensor_stream/monkey_patches/float.rb +1 -1
  50. data/lib/tensor_stream/monkey_patches/integer.rb +1 -1
  51. data/lib/tensor_stream/monkey_patches/op_patch.rb +5 -5
  52. data/lib/tensor_stream/monkey_patches/patch.rb +1 -1
  53. data/lib/tensor_stream/nn/nn_ops.rb +17 -15
  54. data/lib/tensor_stream/op_maker.rb +180 -0
  55. data/lib/tensor_stream/operation.rb +17 -17
  56. data/lib/tensor_stream/ops.rb +95 -384
  57. data/lib/tensor_stream/ops/add.rb +23 -0
  58. data/lib/tensor_stream/ops/argmax.rb +14 -0
  59. data/lib/tensor_stream/ops/argmin.rb +14 -0
  60. data/lib/tensor_stream/ops/case.rb +17 -0
  61. data/lib/tensor_stream/ops/cast.rb +15 -0
  62. data/lib/tensor_stream/ops/ceil.rb +15 -0
  63. data/lib/tensor_stream/ops/const.rb +0 -0
  64. data/lib/tensor_stream/ops/cos.rb +10 -0
  65. data/lib/tensor_stream/ops/div.rb +21 -0
  66. data/lib/tensor_stream/ops/equal.rb +15 -0
  67. data/lib/tensor_stream/ops/expand_dims.rb +17 -0
  68. data/lib/tensor_stream/ops/fill.rb +19 -0
  69. data/lib/tensor_stream/ops/floor.rb +15 -0
  70. data/lib/tensor_stream/ops/floor_div.rb +15 -0
  71. data/lib/tensor_stream/ops/greater.rb +11 -0
  72. data/lib/tensor_stream/ops/greater_equal.rb +11 -0
  73. data/lib/tensor_stream/ops/less_equal.rb +15 -0
  74. data/lib/tensor_stream/ops/log.rb +14 -0
  75. data/lib/tensor_stream/ops/mat_mul.rb +60 -0
  76. data/lib/tensor_stream/ops/max.rb +15 -0
  77. data/lib/tensor_stream/ops/min.rb +15 -0
  78. data/lib/tensor_stream/ops/mod.rb +23 -0
  79. data/lib/tensor_stream/ops/mul.rb +21 -0
  80. data/lib/tensor_stream/ops/negate.rb +14 -0
  81. data/lib/tensor_stream/ops/ones_like.rb +19 -0
  82. data/lib/tensor_stream/ops/pow.rb +25 -0
  83. data/lib/tensor_stream/ops/prod.rb +60 -0
  84. data/lib/tensor_stream/ops/random_uniform.rb +18 -0
  85. data/lib/tensor_stream/ops/range.rb +20 -0
  86. data/lib/tensor_stream/ops/rank.rb +13 -0
  87. data/lib/tensor_stream/ops/reshape.rb +24 -0
  88. data/lib/tensor_stream/ops/round.rb +15 -0
  89. data/lib/tensor_stream/ops/shape.rb +14 -0
  90. data/lib/tensor_stream/ops/sigmoid.rb +10 -0
  91. data/lib/tensor_stream/ops/sign.rb +12 -0
  92. data/lib/tensor_stream/ops/sin.rb +10 -0
  93. data/lib/tensor_stream/ops/size.rb +16 -0
  94. data/lib/tensor_stream/ops/sub.rb +24 -0
  95. data/lib/tensor_stream/ops/sum.rb +27 -0
  96. data/lib/tensor_stream/ops/tan.rb +12 -0
  97. data/lib/tensor_stream/ops/tanh.rb +10 -0
  98. data/lib/tensor_stream/ops/tile.rb +19 -0
  99. data/lib/tensor_stream/ops/zeros.rb +15 -0
  100. data/lib/tensor_stream/placeholder.rb +2 -2
  101. data/lib/tensor_stream/profile/report_tool.rb +3 -3
  102. data/lib/tensor_stream/session.rb +36 -38
  103. data/lib/tensor_stream/tensor.rb +2 -2
  104. data/lib/tensor_stream/tensor_shape.rb +4 -4
  105. data/lib/tensor_stream/train/adadelta_optimizer.rb +8 -8
  106. data/lib/tensor_stream/train/adagrad_optimizer.rb +3 -3
  107. data/lib/tensor_stream/train/adam_optimizer.rb +11 -11
  108. data/lib/tensor_stream/train/learning_rate_decay.rb +2 -2
  109. data/lib/tensor_stream/train/momentum_optimizer.rb +7 -7
  110. data/lib/tensor_stream/train/optimizer.rb +9 -9
  111. data/lib/tensor_stream/train/rmsprop_optimizer.rb +16 -16
  112. data/lib/tensor_stream/train/saver.rb +14 -14
  113. data/lib/tensor_stream/train/slot_creator.rb +6 -6
  114. data/lib/tensor_stream/train/utils.rb +12 -12
  115. data/lib/tensor_stream/trainer.rb +10 -10
  116. data/lib/tensor_stream/types.rb +1 -1
  117. data/lib/tensor_stream/utils.rb +33 -32
  118. data/lib/tensor_stream/utils/freezer.rb +5 -5
  119. data/lib/tensor_stream/variable.rb +5 -5
  120. data/lib/tensor_stream/variable_scope.rb +1 -1
  121. data/lib/tensor_stream/version.rb +1 -1
  122. data/samples/{iris.data → datasets/iris.data} +0 -0
  123. data/samples/jupyter_notebooks/linear_regression.ipynb +463 -0
  124. data/samples/{iris.rb → neural_networks/iris.rb} +21 -23
  125. data/samples/{mnist_data.rb → neural_networks/mnist_data.rb} +8 -8
  126. data/samples/neural_networks/raw_neural_net_sample.rb +112 -0
  127. data/samples/{rnn.rb → neural_networks/rnn.rb} +28 -31
  128. data/samples/{nearest_neighbor.rb → others/nearest_neighbor.rb} +12 -12
  129. data/samples/regression/linear_regression.rb +63 -0
  130. data/samples/{logistic_regression.rb → regression/logistic_regression.rb} +14 -16
  131. data/tensor_stream.gemspec +9 -8
  132. metadata +89 -19
  133. data/data_1.json +0 -4764
  134. data/data_2.json +0 -4764
  135. data/data_actual.json +0 -28
  136. data/data_expected.json +0 -28
  137. data/data_input.json +0 -28
  138. data/samples/error.graphml +0 -2755
  139. data/samples/gradient_sample.graphml +0 -1255
  140. data/samples/linear_regression.rb +0 -69
  141. data/samples/multigpu.rb +0 -73
  142. data/samples/raw_neural_net_sample.rb +0 -112
@@ -1,69 +0,0 @@
1
- # Linear Regression sample, using SGD and auto-differentiation
2
- require "bundler/setup"
3
- require 'tensor_stream'
4
-
5
- tf = TensorStream # use tf to make it look like TensorFlow
6
-
7
- learning_rate = 0.01
8
- momentum = 0.5
9
- training_epochs = 10000
10
- display_step = 50
11
-
12
- train_X = [3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
13
- 7.042,10.791,5.313,7.997,5.654,9.27,3.1]
14
- train_Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
15
- 2.827,3.465,1.65,2.904,2.42,2.94,1.3]
16
-
17
- n_samples = train_X.size
18
-
19
- X = Float.placeholder
20
- Y = Float.placeholder
21
-
22
- # Set model weights
23
-
24
- W = rand.t.var name: "weight"
25
- b = rand.t.var name: "bias"
26
-
27
- # Construct a linear model
28
- pred = X * W + b
29
-
30
- # Mean squared error
31
- cost = ((pred - Y) ** 2).reduce / ( 2 * n_samples)
32
-
33
- # Other possible Optimizers
34
-
35
- # optimizer = TensorStream::Train::MomentumOptimizer.new(learning_rate, momentum, use_nesterov: true).minimize(cost)
36
- # optimizer = TensorStream::Train::AdamOptimizer.new(learning_rate).minimize(cost)
37
- # optimizer = TensorStream::Train::AdadeltaOptimizer.new(1.0).minimize(cost)
38
- # optimizer = TensorStream::Train::AdagradOptimizer.new(0.01).minimize(cost)
39
- # optimizer = TensorStream::Train::RMSPropOptimizer.new(0.01, centered: true).minimize(cost)
40
- optimizer = TensorStream::Train::GradientDescentOptimizer.new(learning_rate).minimize(cost)
41
-
42
-
43
- # Initialize the variables (i.e. assign their default value)
44
- init = tf.global_variables_initializer()
45
- # Add ops to save and restore all the variables.
46
- saver = tf::Train::Saver.new
47
-
48
- tf.session do |sess|
49
- start_time = Time.now
50
- sess.run(init)
51
- (0..training_epochs).each do |epoch|
52
- train_X.zip(train_Y).each do |x,y|
53
- sess.run(optimizer, feed_dict: {X => x, Y => y})
54
- end
55
-
56
- if (epoch+1) % display_step == 0
57
- # Save the variables to disk.
58
- save_path = saver.save(sess, "/tmp/lg_model")
59
- c = sess.run(cost, feed_dict: {X => train_X, Y => train_Y})
60
- puts("Epoch:", '%04d' % (epoch+1), "cost=", c, \
61
- "W=", sess.run(W), "b=", sess.run(b))
62
- end
63
- end
64
-
65
- puts("Optimization Finished!")
66
- training_cost = sess.run(cost, feed_dict: { X => train_X, Y => train_Y})
67
- puts("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
68
- puts("time elapsed ", Time.now.to_i - start_time.to_i)
69
- end
data/samples/multigpu.rb DELETED
@@ -1,73 +0,0 @@
1
- require "bundler/setup"
2
- require 'tensor_stream'
3
- require 'tensor_stream/evaluator/opencl/opencl_evaluator'
4
- # require 'pry-byebug'
5
-
6
- ts = TensorStream
7
-
8
- n = 10
9
- DIMEN = 1024
10
-
11
- A = ts.random_uniform([DIMEN, DIMEN]).eval
12
- B = ts.random_uniform([DIMEN, DIMEN]).eval
13
-
14
-
15
- # Create a graph to store results
16
- c1 = []
17
- c2 = []
18
- a = nil
19
- b = nil
20
-
21
- def matpow(m, n)
22
- return m if n < 1
23
- TensorStream.matmul(m, matpow(m, n-1))
24
- end
25
-
26
- ts.device('/device:GPU:0') do
27
- a = ts.placeholder(:float32, shape: [DIMEN, DIMEN])
28
- b = ts.placeholder(:float32, shape: [DIMEN, DIMEN])
29
- # Compute A^n and B^n and store results in c1
30
- c1 << matpow(a, n)
31
- c1 << matpow(b, n)
32
- end
33
-
34
- sum = ts.device('/device:GPU:0') do
35
- ts.add_n(c1)
36
- end
37
-
38
- t1_1 = Time.now.to_i
39
- t2_1 = nil
40
-
41
- ts.session(log_device_placement: true) do |sess|
42
- sess.run(sum, feed_dict: { a => A, b => B})
43
- t2_1 = Time.now.to_i
44
- end
45
-
46
- # Multi GPU computing
47
- # GPU:0 computes A^n
48
- ts.device('/device:GPU:1') do
49
- a = ts.placeholder(:float32, shape: [DIMEN, DIMEN])
50
- c2 << matpow(a, n)
51
- end
52
-
53
- # GPU:1 computes B^n
54
- ts.device('/device:GPU:1') do
55
- b = ts.placeholder(:float32, shape: [DIMEN, DIMEN])
56
- c2 << matpow(b, n)
57
- end
58
-
59
- ts.device('/device:GPU:1') do
60
- sum = ts.add_n(c2) #Addition of all elements in c2, i.e. A^n + B^n
61
- end
62
-
63
- t1_2 = Time.now.to_i
64
- t2_2 = nil
65
- ts.session(log_device_placement:true) do |sess|
66
- # Run the op.
67
- sess.run(sum, feed_dict: {a => A, b => B})
68
- t2_2 = Time.now.to_i
69
- end
70
-
71
-
72
- print("Single GPU computation time: " + (t2_1-t1_1).to_s)
73
- print("Multi GPU computation time: " + (t2_2-t1_2).to_s)
@@ -1,112 +0,0 @@
1
- """ Neural Network.
2
-
3
- A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
4
- implementation with TensorFlow. This example is using the MNIST database
5
- of handwritten digits (http://yann.lecun.com/exdb/mnist/).
6
-
7
- Links:
8
- [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
9
-
10
- Author: Aymeric Damien
11
- Project: https://github.com/aymericdamien/TensorFlow-Examples/
12
-
13
- The mnist-learn gem is required as well as an OpenCL compatible device with drivers correctly installed
14
- """
15
- require "bundler/setup"
16
- require 'tensor_stream'
17
- require 'mnist-learn'
18
-
19
- tf = TensorStream
20
- # Import MNIST data
21
- puts "downloading minst data"
22
- mnist = Mnist.read_data_sets('/tmp/data', one_hot: true)
23
- puts "downloading finished"
24
-
25
- # Parameters
26
- learning_rate = 0.001
27
- momentum = 0.01
28
- num_steps = 100
29
- batch_size = 128
30
- display_step = 5
31
-
32
- # Network Parameters
33
- n_hidden_1 = 256 # 1st layer number of neurons
34
- n_hidden_2 = 256 # 2nd layer number of neurons
35
- num_input = 784 # MNIST data input (img shape: 28*28)
36
- num_classes = 10 # MNIST total classes (0-9 digits)
37
-
38
- # tf Graph input
39
- X = tf.placeholder(:float64, shape: [nil, num_input])
40
- Y = tf.placeholder(:float64, shape: [nil, num_classes])
41
-
42
- # Store layers weight & bias
43
- weights = {
44
- 'h1' => tf.variable(tf.random_normal([num_input, n_hidden_1]), dtype: :float64, name: 'h1'),
45
- 'h2' => tf.variable(tf.random_normal([n_hidden_1, n_hidden_2]), dtype: :float64, name: 'h2'),
46
- 'out' => tf.variable(tf.random_normal([n_hidden_2, num_classes]), dtype: :float64, name: 'out')
47
- }
48
-
49
- biases = {
50
- 'b1' => tf.variable(tf.random_normal([n_hidden_1]), dtype: :float64, name: 'b1'),
51
- 'b2' => tf.variable(tf.random_normal([n_hidden_2]), dtype: :float64, name: 'b2'),
52
- 'out' => tf.variable(tf.random_normal([num_classes]), dtype: :float64, name: 'out2')
53
- }
54
-
55
-
56
- # Create model
57
- def neural_net(x, weights, biases)
58
- tf = TensorStream
59
- # Hidden fully connected layer with 256 neurons
60
- layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
61
- # Hidden fully connected layer with 256 neurons
62
- layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
63
- # Output fully connected layer with a neuron for each class
64
- tf.matmul(layer_2, weights['out']) + biases['out']
65
- end
66
-
67
- # Construct model
68
- logits = neural_net(X, weights, biases)
69
- prediction = tf.nn.softmax(logits)
70
-
71
- # Define loss and optimizer
72
- loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
73
- logits: logits, labels: Y))
74
-
75
- optimizer = TensorStream::Train::MomentumOptimizer.new(learning_rate, momentum, use_nesterov: true)
76
- train_op = optimizer.minimize(loss_op)
77
-
78
- # Evaluate model
79
- correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
80
- accuracy = tf.reduce_mean(tf.cast(correct_pred, :float32))
81
-
82
- # tf.add_check_numerics_ops
83
-
84
- # Initialize the variables (i.e. assign their default value)
85
- init = tf.global_variables_initializer
86
-
87
- # Start training
88
- tf.session do |sess|
89
- # Run the initializer
90
- sess.run(init)
91
-
92
- print("Testing Accuracy:", \
93
- sess.run(accuracy, feed_dict: { X => mnist.test.images,
94
- Y => mnist.test.labels}))
95
-
96
- (1..num_steps+1).each do |step|
97
- batch_x, batch_y = mnist.train.next_batch(batch_size)
98
- # Run optimization op (backprop)
99
- sess.run(train_op, feed_dict: { X => batch_x, Y => batch_y })
100
- if step % display_step == 0 || step == 1
101
- # Calculate batch loss and accuracy
102
- loss, acc = sess.run([loss_op, accuracy], feed_dict: { X => batch_x, Y => batch_y})
103
- print("\nStep " + step.to_s + ", Minibatch Loss= " + \
104
- loss.to_s + ", Training Accuracy= " + \
105
- acc.to_s)
106
- end
107
- end
108
- print("\nOptimization Finished!")
109
- print("\nTesting Accuracy after optimization:", \
110
- sess.run(accuracy, feed_dict: { X => mnist.test.images,
111
- Y => mnist.test.labels}))
112
- end