tensor_stream 0.7.0 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/.rubocop.yml +6 -1
- data/CHANGELOG.md +10 -0
- data/README.md +35 -0
- data/lib/tensor_stream.rb +2 -2
- data/lib/tensor_stream/debugging/debugging.rb +2 -1
- data/lib/tensor_stream/dynamic_stitch.rb +23 -24
- data/lib/tensor_stream/evaluator/base_evaluator.rb +27 -18
- data/lib/tensor_stream/evaluator/opencl/kernels/apply_momentum.cl +16 -0
- data/lib/tensor_stream/evaluator/opencl/kernels/pack.cl +24 -0
- data/lib/tensor_stream/evaluator/opencl/kernels/softmax_cross.cl +6 -1
- data/lib/tensor_stream/evaluator/opencl/opencl_buffer.rb +6 -6
- data/lib/tensor_stream/evaluator/opencl/opencl_evaluator.rb +237 -107
- data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +97 -7
- data/lib/tensor_stream/evaluator/ruby_evaluator.rb +230 -123
- data/lib/tensor_stream/exceptions.rb +1 -0
- data/lib/tensor_stream/graph_builder.rb +2 -3
- data/lib/tensor_stream/graph_deserializers/protobuf.rb +22 -23
- data/lib/tensor_stream/graph_serializers/graphml.rb +26 -29
- data/lib/tensor_stream/graph_serializers/pbtext.rb +22 -19
- data/lib/tensor_stream/helpers/string_helper.rb +4 -5
- data/lib/tensor_stream/math_gradients.rb +141 -77
- data/lib/tensor_stream/nn/nn_ops.rb +4 -6
- data/lib/tensor_stream/operation.rb +139 -120
- data/lib/tensor_stream/ops.rb +36 -3
- data/lib/tensor_stream/session.rb +7 -11
- data/lib/tensor_stream/tensor.rb +3 -3
- data/lib/tensor_stream/tensor_shape.rb +5 -0
- data/lib/tensor_stream/train/gradient_descent_optimizer.rb +4 -37
- data/lib/tensor_stream/train/momentum_optimizer.rb +48 -0
- data/lib/tensor_stream/train/optimizer.rb +129 -0
- data/lib/tensor_stream/train/saver.rb +0 -1
- data/lib/tensor_stream/train/slot_creator.rb +62 -0
- data/lib/tensor_stream/train/utils.rb +11 -12
- data/lib/tensor_stream/trainer.rb +3 -0
- data/lib/tensor_stream/utils.rb +18 -11
- data/lib/tensor_stream/variable.rb +19 -12
- data/lib/tensor_stream/variable_scope.rb +1 -1
- data/lib/tensor_stream/version.rb +1 -1
- data/samples/iris.rb +2 -1
- data/samples/linear_regression.rb +3 -1
- data/samples/nearest_neighbor.rb +2 -0
- data/test_samples/neural_network_raw.py +101 -0
- data/test_samples/raw_neural_net_sample.rb +6 -4
- data/test_samples/test2.py +73 -27
- metadata +9 -3
@@ -8,7 +8,7 @@ module TensorStream
|
|
8
8
|
@initializer = initializer
|
9
9
|
end
|
10
10
|
|
11
|
-
def get_variable(name, dtype: nil, shape: nil, initializer: nil, trainable: true, collections: nil)
|
11
|
+
def get_variable(name, dtype: nil, shape: nil, initializer: nil, trainable: true, collections: nil, validate_shape: false)
|
12
12
|
TensorStream::Variable.new(dtype || :float32, nil, shape, self, collections: collections, name: name, initializer: initializer, trainable: trainable)
|
13
13
|
end
|
14
14
|
end
|
data/samples/iris.rb
CHANGED
@@ -79,7 +79,7 @@ predict = tf.argmax(yhat, 1)
|
|
79
79
|
|
80
80
|
# Backward propagation
|
81
81
|
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: yhat))
|
82
|
-
updates = TensorStream::Train::
|
82
|
+
updates = TensorStream::Train::MomentumOptimizer.new(0.01, 0.1, use_nesterov: true).minimize(cost)
|
83
83
|
|
84
84
|
# Run SGD
|
85
85
|
sess = tf.session
|
@@ -94,6 +94,7 @@ start_time = Time.now
|
|
94
94
|
x_train.size.times do |i|
|
95
95
|
sess.run(updates, feed_dict: {X => [x_train[i]], y => [y_train[i]]})
|
96
96
|
end
|
97
|
+
|
97
98
|
loss = sess.run(cost, feed_dict: { X => x_train, y => y_train })
|
98
99
|
puts "epoch: #{epoch}, loss #{loss}"
|
99
100
|
end
|
@@ -5,7 +5,8 @@ require 'benchmark'
|
|
5
5
|
tf = TensorStream
|
6
6
|
|
7
7
|
learning_rate = 0.01
|
8
|
-
|
8
|
+
momentum = 0.5
|
9
|
+
training_epochs = 10000
|
9
10
|
display_step = 50
|
10
11
|
|
11
12
|
train_X = [3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
|
@@ -28,6 +29,7 @@ pred = X * W + b
|
|
28
29
|
# Mean squared error
|
29
30
|
cost = ((pred - Y) ** 2).reduce(:+) / ( 2 * n_samples)
|
30
31
|
|
32
|
+
# optimizer = TensorStream::Train::MomentumOptimizer.new(learning_rate, momentum).minimize(cost)
|
31
33
|
optimizer = TensorStream::Train::GradientDescentOptimizer.new(learning_rate).minimize(cost)
|
32
34
|
|
33
35
|
# Initialize the variables (i.e. assign their default value)
|
data/samples/nearest_neighbor.rb
CHANGED
@@ -0,0 +1,101 @@
|
|
1
|
+
""" Neural Network.
|
2
|
+
|
3
|
+
A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
|
4
|
+
implementation with TensorFlow. This example is using the MNIST database
|
5
|
+
of handwritten digits (http://yann.lecun.com/exdb/mnist/).
|
6
|
+
|
7
|
+
Links:
|
8
|
+
[MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
|
9
|
+
|
10
|
+
Author: Aymeric Damien
|
11
|
+
Project: https://github.com/aymericdamien/TensorFlow-Examples/
|
12
|
+
"""
|
13
|
+
|
14
|
+
from __future__ import print_function
|
15
|
+
|
16
|
+
# Import MNIST data
|
17
|
+
from tensorflow.examples.tutorials.mnist import input_data
|
18
|
+
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
|
19
|
+
|
20
|
+
import tensorflow as tf
|
21
|
+
|
22
|
+
# Parameters
|
23
|
+
learning_rate = 0.1
|
24
|
+
num_steps = 500
|
25
|
+
batch_size = 128
|
26
|
+
display_step = 100
|
27
|
+
|
28
|
+
# Network Parameters
|
29
|
+
n_hidden_1 = 256 # 1st layer number of neurons
|
30
|
+
n_hidden_2 = 256 # 2nd layer number of neurons
|
31
|
+
num_input = 784 # MNIST data input (img shape: 28*28)
|
32
|
+
num_classes = 10 # MNIST total classes (0-9 digits)
|
33
|
+
|
34
|
+
# tf Graph input
|
35
|
+
X = tf.placeholder("float", [None, num_input])
|
36
|
+
Y = tf.placeholder("float", [None, num_classes])
|
37
|
+
|
38
|
+
# Store layers weight & bias
|
39
|
+
weights = {
|
40
|
+
'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
|
41
|
+
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
|
42
|
+
'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
|
43
|
+
}
|
44
|
+
biases = {
|
45
|
+
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
|
46
|
+
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
|
47
|
+
'out': tf.Variable(tf.random_normal([num_classes]))
|
48
|
+
}
|
49
|
+
|
50
|
+
|
51
|
+
# Create model
|
52
|
+
def neural_net(x):
|
53
|
+
# Hidden fully connected layer with 256 neurons
|
54
|
+
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
|
55
|
+
# Hidden fully connected layer with 256 neurons
|
56
|
+
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
|
57
|
+
# Output fully connected layer with a neuron for each class
|
58
|
+
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
|
59
|
+
return out_layer
|
60
|
+
|
61
|
+
# Construct model
|
62
|
+
logits = neural_net(X)
|
63
|
+
prediction = tf.nn.softmax(logits)
|
64
|
+
|
65
|
+
# Define loss and optimizer
|
66
|
+
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
|
67
|
+
logits=logits, labels=Y))
|
68
|
+
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
|
69
|
+
train_op = optimizer.minimize(loss_op)
|
70
|
+
|
71
|
+
# Evaluate model
|
72
|
+
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
|
73
|
+
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
|
74
|
+
|
75
|
+
# Initialize the variables (i.e. assign their default value)
|
76
|
+
init = tf.global_variables_initializer()
|
77
|
+
|
78
|
+
# Start training
|
79
|
+
with tf.Session() as sess:
|
80
|
+
|
81
|
+
# Run the initializer
|
82
|
+
sess.run(init)
|
83
|
+
|
84
|
+
for step in range(1, num_steps+1):
|
85
|
+
batch_x, batch_y = mnist.train.next_batch(batch_size)
|
86
|
+
# Run optimization op (backprop)
|
87
|
+
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
|
88
|
+
if step % display_step == 0 or step == 1:
|
89
|
+
# Calculate batch loss and accuracy
|
90
|
+
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
|
91
|
+
Y: batch_y})
|
92
|
+
print("Step " + str(step) + ", Minibatch Loss= " + \
|
93
|
+
"{:.4f}".format(loss) + ", Training Accuracy= " + \
|
94
|
+
"{:.3f}".format(acc))
|
95
|
+
|
96
|
+
print("Optimization Finished!")
|
97
|
+
|
98
|
+
# Calculate accuracy for MNIST test images
|
99
|
+
print("Testing Accuracy:", \
|
100
|
+
sess.run(accuracy, feed_dict={X: mnist.test.images,
|
101
|
+
Y: mnist.test.labels}))
|
@@ -18,13 +18,15 @@ require 'pry-byebug'
|
|
18
18
|
|
19
19
|
tf = TensorStream
|
20
20
|
# Import MNIST data
|
21
|
+
puts "downloading minst data"
|
21
22
|
mnist = Mnist.read_data_sets('/tmp/data', one_hot: true)
|
23
|
+
puts "downloading finished"
|
22
24
|
|
23
25
|
# Parameters
|
24
26
|
learning_rate = 0.1
|
25
27
|
num_steps = 500
|
26
28
|
batch_size = 128
|
27
|
-
display_step =
|
29
|
+
display_step = 5
|
28
30
|
|
29
31
|
# Network Parameters
|
30
32
|
n_hidden_1 = 256 # 1st layer number of neurons
|
@@ -86,9 +88,9 @@ tf.session do |sess|
|
|
86
88
|
# Run the initializer
|
87
89
|
sess.run(init)
|
88
90
|
|
89
|
-
print("Testing Accuracy:", \
|
90
|
-
|
91
|
-
|
91
|
+
# print("Testing Accuracy:", \
|
92
|
+
# sess.run(accuracy, feed_dict: { X => mnist.test.images,
|
93
|
+
# Y => mnist.test.labels}))
|
92
94
|
|
93
95
|
(1..num_steps+1).each do |step|
|
94
96
|
batch_x, batch_y = mnist.train.next_batch(batch_size)
|
data/test_samples/test2.py
CHANGED
@@ -1,41 +1,87 @@
|
|
1
1
|
import tensorflow as tf
|
2
2
|
|
3
|
-
|
3
|
+
batch_x = [
|
4
|
+
[0.686274, 0.10196, 0.6509, 1.0, 0.9686, 0.49803, 0.0, 0.0, 0.0, 0.0],
|
5
|
+
[0.543244, 0.10123, 0.4509, 0.0, 0.6986, 0.39803, 1.0, 0.0, 0.0, 0.0]]
|
4
6
|
|
5
|
-
|
7
|
+
batch_y = [
|
8
|
+
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
|
9
|
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
|
10
|
+
]
|
6
11
|
|
7
|
-
|
8
|
-
|
12
|
+
num_input = 10
|
13
|
+
num_classes = 10
|
14
|
+
n_hidden_1 = 4 # 1st layer number of neurons
|
15
|
+
n_hidden_2 = 4 # 2nd layer number of neurons
|
9
16
|
|
10
|
-
|
11
|
-
|
12
|
-
b3 = tf.constant([2.0, 3.1, 1.0, 0.2, 0.2])
|
17
|
+
X = batch_x #tf.placeholder(tf.float32, shape=[None, num_input])
|
18
|
+
Y = batch_y # tf.placeholder(tf.float32, shape=[None, num_classes])
|
13
19
|
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
20
|
+
h1_init = tf.constant([[0.5937, 0.2343, 1.4332, 0.4395],
|
21
|
+
[-1.0227, -0.6915, 1.2367, 0.3452],
|
22
|
+
[-0.5675, 1.0374, 1.0429, 0.8839],
|
23
|
+
[-0.1066, -0.0469, -1.6317, -1.4836],
|
24
|
+
[0.7835, -3.0105, 1.713, -0.4536],
|
25
|
+
[-0.3076, 1.3662, -0.6537, 0.0905],
|
26
|
+
[-0.2459, 0.2243, -2.7048, 0.848],
|
27
|
+
[0.3589, 0.3542, -0.0959, -1.327],
|
28
|
+
[-0.4685, 0.0844, 0.2794, 2.1275],
|
29
|
+
[-1.0733, 0.6189, 0.845, 0.033]])
|
19
30
|
|
31
|
+
h2_init = tf.constant([[0.5012, 1.302, -1.6217, 0.669], [0.1494, -0.7837, -0.2978, 1.7745], [1.9727, -0.5312, -0.7391, 0.9187], [-0.6412, -1.4434, -0.8801, 0.9343]])
|
32
|
+
h3_init = tf.constant([[0.5012, 1.302, -1.6217, 0.669, 0.1494, -0.7837, -0.2978, 1.7745, 1.9727, -0.5312],
|
33
|
+
[-0.7391, 0.9187, -0.6412, -1.4434, -0.8801, 0.9343, -0.1665, -0.0032, 0.2959, -2.0488],
|
34
|
+
[-0.9135, 1.0376, 0.8537, 0.4376, 1.3255, -0.5921, -1.4081, 1.0614, -0.5283, 1.1832],
|
35
|
+
[0.7285, -0.7844, 0.1793, -0.5275, -0.4426, -1.4976, 0.4433, 2.2317, -2.0479, 0.7791]])
|
20
36
|
|
21
37
|
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
38
|
+
b1_init = tf.constant([0.1494, -0.7837, -0.2978, 1.7745])
|
39
|
+
|
40
|
+
b2_init = tf.constant([1.9727, -0.5312, -0.7391, 0.9187])
|
41
|
+
out_init = tf.constant([-0.6412, -1.4434, -0.8801, 0.9343, -0.1665, -0.0032, 0.2959, -2.0488, -0.9135, 1.0376])
|
42
|
+
|
43
|
+
h1 = tf.Variable(h1_init, dtype=tf.float32, name='h1')
|
44
|
+
h2 = tf.Variable(h2_init, dtype=tf.float32, name='h2')
|
45
|
+
h3 = tf.Variable(h3_init, dtype=tf.float32, name='out')
|
46
|
+
|
47
|
+
b1 = tf.Variable(b1_init, dtype=tf.float32, name='b1')
|
48
|
+
b2 = tf.Variable(b2_init, dtype=tf.float32, name='b2')
|
49
|
+
out = tf.Variable(out_init, dtype=tf.float32, name='out2')
|
50
|
+
|
51
|
+
layer_1 = tf.add(tf.matmul(X, h1), b1)
|
52
|
+
# Hidden fully connected layer with 256 neurons
|
53
|
+
layer_2 = tf.add(tf.matmul(layer_1, h2), b2)
|
54
|
+
# Output fully connected layer with a neuron for each class
|
26
55
|
|
27
56
|
sess = tf.Session()
|
28
|
-
s2 = sess.run(g_matmul_layer_2_add)
|
29
|
-
g_a2 = tf.gradients(a2, [b], name="final")
|
30
57
|
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
58
|
+
logits = tf.matmul(layer_2, h3) + out
|
59
|
+
prediction = tf.nn.softmax(logits)
|
60
|
+
|
61
|
+
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
|
62
|
+
|
63
|
+
optimizer = tf.train.GradientDescentOptimizer(0.01)
|
64
|
+
train_op = optimizer.minimize(loss_op)
|
65
|
+
init = tf.global_variables_initializer()
|
66
|
+
|
67
|
+
sess.run(init)
|
68
|
+
# print(sess.run(layer_1))
|
69
|
+
tf.gradients(loss_op, [logits])
|
70
|
+
print("------------")
|
71
|
+
|
72
|
+
print("H1: ", sess.run(h1))
|
73
|
+
print("------------ Running train 1")
|
74
|
+
# sess.run(train_op, feed_dict={ X: batch_x, Y: batch_y })
|
75
|
+
sess.run(train_op)
|
76
|
+
print("H1:", sess.run(h1))
|
77
|
+
print("H2:", sess.run(h2))
|
78
|
+
print("H3:", sess.run(h3))
|
37
79
|
|
38
|
-
|
39
|
-
sess.run(
|
40
|
-
|
80
|
+
print(sess.run(b1))
|
81
|
+
print(sess.run(b2))
|
82
|
+
print(sess.run(out))
|
41
83
|
|
84
|
+
# sess.run(train_op, feed_dict={ X: batch_x, Y: batch_y })
|
85
|
+
print("------------- Running train 2")
|
86
|
+
sess.run(train_op)
|
87
|
+
print("H1:", sess.run(h1))
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: tensor_stream
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.8.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Joseph Emmanuel Dayo
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-08-
|
11
|
+
date: 2018-08-29 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -249,6 +249,7 @@ files:
|
|
249
249
|
- lib/tensor_stream/evaluator/opencl/kernels/acos.cl
|
250
250
|
- lib/tensor_stream/evaluator/opencl/kernels/add.cl
|
251
251
|
- lib/tensor_stream/evaluator/opencl/kernels/apply_gradient.cl
|
252
|
+
- lib/tensor_stream/evaluator/opencl/kernels/apply_momentum.cl
|
252
253
|
- lib/tensor_stream/evaluator/opencl/kernels/argmax.cl
|
253
254
|
- lib/tensor_stream/evaluator/opencl/kernels/argmin.cl
|
254
255
|
- lib/tensor_stream/evaluator/opencl/kernels/asin.cl
|
@@ -270,6 +271,7 @@ files:
|
|
270
271
|
- lib/tensor_stream/evaluator/opencl/kernels/mod.cl
|
271
272
|
- lib/tensor_stream/evaluator/opencl/kernels/mul.cl
|
272
273
|
- lib/tensor_stream/evaluator/opencl/kernels/negate.cl
|
274
|
+
- lib/tensor_stream/evaluator/opencl/kernels/pack.cl
|
273
275
|
- lib/tensor_stream/evaluator/opencl/kernels/pow.cl
|
274
276
|
- lib/tensor_stream/evaluator/opencl/kernels/real_div.cl
|
275
277
|
- lib/tensor_stream/evaluator/opencl/kernels/reciprocal.cl
|
@@ -319,7 +321,10 @@ files:
|
|
319
321
|
- lib/tensor_stream/tensor.rb
|
320
322
|
- lib/tensor_stream/tensor_shape.rb
|
321
323
|
- lib/tensor_stream/train/gradient_descent_optimizer.rb
|
324
|
+
- lib/tensor_stream/train/momentum_optimizer.rb
|
325
|
+
- lib/tensor_stream/train/optimizer.rb
|
322
326
|
- lib/tensor_stream/train/saver.rb
|
327
|
+
- lib/tensor_stream/train/slot_creator.rb
|
323
328
|
- lib/tensor_stream/train/utils.rb
|
324
329
|
- lib/tensor_stream/trainer.rb
|
325
330
|
- lib/tensor_stream/types.rb
|
@@ -338,6 +343,7 @@ files:
|
|
338
343
|
- tensor_stream.gemspec
|
339
344
|
- test_samples/error.graphml
|
340
345
|
- test_samples/gradient_sample.graphml
|
346
|
+
- test_samples/neural_network_raw.py
|
341
347
|
- test_samples/raw_neural_net_sample.rb
|
342
348
|
- test_samples/test.py
|
343
349
|
- test_samples/test2.py
|
@@ -362,7 +368,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
362
368
|
version: '0'
|
363
369
|
requirements: []
|
364
370
|
rubyforge_project:
|
365
|
-
rubygems_version: 2.
|
371
|
+
rubygems_version: 2.6.8
|
366
372
|
signing_key:
|
367
373
|
specification_version: 4
|
368
374
|
summary: A Pure ruby tensorflow implementation
|