tensor_stream 0.8.0 → 0.8.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/CHANGELOG.md +4 -0
- data/README.md +18 -5
- data/lib/tensor_stream/evaluator/opencl/kernels/apply_adam.cl +23 -0
- data/lib/tensor_stream/evaluator/opencl/opencl_evaluator.rb +51 -4
- data/lib/tensor_stream/evaluator/ruby/math_ops.rb +144 -0
- data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +99 -0
- data/lib/tensor_stream/evaluator/ruby_evaluator.rb +6 -253
- data/lib/tensor_stream/ops.rb +2 -1
- data/lib/tensor_stream/session.rb +17 -8
- data/lib/tensor_stream/train/adam_optimizer.rb +87 -0
- data/lib/tensor_stream/train/gradient_descent_optimizer.rb +2 -1
- data/lib/tensor_stream/train/optimizer.rb +25 -2
- data/lib/tensor_stream/train/slot_creator.rb +1 -1
- data/lib/tensor_stream/trainer.rb +1 -0
- data/lib/tensor_stream/utils.rb +25 -4
- data/lib/tensor_stream/variable.rb +1 -1
- data/lib/tensor_stream/variable_scope.rb +7 -1
- data/lib/tensor_stream/version.rb +1 -1
- data/samples/iris.rb +9 -6
- data/samples/linear_regression.rb +6 -4
- data/samples/nearest_neighbor.rb +2 -2
- data/{test_samples → samples}/raw_neural_net_sample.rb +17 -20
- metadata +7 -8
- data/test_samples/error.graphml +0 -120
- data/test_samples/gradient_sample.graphml +0 -1255
- data/test_samples/neural_network_raw.py +0 -101
- data/test_samples/test.py +0 -46
- data/test_samples/test2.py +0 -87
@@ -1,101 +0,0 @@
|
|
1
|
-
""" Neural Network.
|
2
|
-
|
3
|
-
A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
|
4
|
-
implementation with TensorFlow. This example is using the MNIST database
|
5
|
-
of handwritten digits (http://yann.lecun.com/exdb/mnist/).
|
6
|
-
|
7
|
-
Links:
|
8
|
-
[MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
|
9
|
-
|
10
|
-
Author: Aymeric Damien
|
11
|
-
Project: https://github.com/aymericdamien/TensorFlow-Examples/
|
12
|
-
"""
|
13
|
-
|
14
|
-
from __future__ import print_function
|
15
|
-
|
16
|
-
# Import MNIST data
|
17
|
-
from tensorflow.examples.tutorials.mnist import input_data
|
18
|
-
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
|
19
|
-
|
20
|
-
import tensorflow as tf
|
21
|
-
|
22
|
-
# Parameters
|
23
|
-
learning_rate = 0.1
|
24
|
-
num_steps = 500
|
25
|
-
batch_size = 128
|
26
|
-
display_step = 100
|
27
|
-
|
28
|
-
# Network Parameters
|
29
|
-
n_hidden_1 = 256 # 1st layer number of neurons
|
30
|
-
n_hidden_2 = 256 # 2nd layer number of neurons
|
31
|
-
num_input = 784 # MNIST data input (img shape: 28*28)
|
32
|
-
num_classes = 10 # MNIST total classes (0-9 digits)
|
33
|
-
|
34
|
-
# tf Graph input
|
35
|
-
X = tf.placeholder("float", [None, num_input])
|
36
|
-
Y = tf.placeholder("float", [None, num_classes])
|
37
|
-
|
38
|
-
# Store layers weight & bias
|
39
|
-
weights = {
|
40
|
-
'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
|
41
|
-
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
|
42
|
-
'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
|
43
|
-
}
|
44
|
-
biases = {
|
45
|
-
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
|
46
|
-
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
|
47
|
-
'out': tf.Variable(tf.random_normal([num_classes]))
|
48
|
-
}
|
49
|
-
|
50
|
-
|
51
|
-
# Create model
|
52
|
-
def neural_net(x):
|
53
|
-
# Hidden fully connected layer with 256 neurons
|
54
|
-
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
|
55
|
-
# Hidden fully connected layer with 256 neurons
|
56
|
-
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
|
57
|
-
# Output fully connected layer with a neuron for each class
|
58
|
-
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
|
59
|
-
return out_layer
|
60
|
-
|
61
|
-
# Construct model
|
62
|
-
logits = neural_net(X)
|
63
|
-
prediction = tf.nn.softmax(logits)
|
64
|
-
|
65
|
-
# Define loss and optimizer
|
66
|
-
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
|
67
|
-
logits=logits, labels=Y))
|
68
|
-
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
|
69
|
-
train_op = optimizer.minimize(loss_op)
|
70
|
-
|
71
|
-
# Evaluate model
|
72
|
-
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
|
73
|
-
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
|
74
|
-
|
75
|
-
# Initialize the variables (i.e. assign their default value)
|
76
|
-
init = tf.global_variables_initializer()
|
77
|
-
|
78
|
-
# Start training
|
79
|
-
with tf.Session() as sess:
|
80
|
-
|
81
|
-
# Run the initializer
|
82
|
-
sess.run(init)
|
83
|
-
|
84
|
-
for step in range(1, num_steps+1):
|
85
|
-
batch_x, batch_y = mnist.train.next_batch(batch_size)
|
86
|
-
# Run optimization op (backprop)
|
87
|
-
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
|
88
|
-
if step % display_step == 0 or step == 1:
|
89
|
-
# Calculate batch loss and accuracy
|
90
|
-
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
|
91
|
-
Y: batch_y})
|
92
|
-
print("Step " + str(step) + ", Minibatch Loss= " + \
|
93
|
-
"{:.4f}".format(loss) + ", Training Accuracy= " + \
|
94
|
-
"{:.3f}".format(acc))
|
95
|
-
|
96
|
-
print("Optimization Finished!")
|
97
|
-
|
98
|
-
# Calculate accuracy for MNIST test images
|
99
|
-
print("Testing Accuracy:", \
|
100
|
-
sess.run(accuracy, feed_dict={X: mnist.test.images,
|
101
|
-
Y: mnist.test.labels}))
|
data/test_samples/test.py
DELETED
@@ -1,46 +0,0 @@
|
|
1
|
-
import tensorflow as tf
|
2
|
-
|
3
|
-
test_inputs = [
|
4
|
-
[0.5937, 0.2343, 1.4332, 0.4395],
|
5
|
-
[-1.0227, -0.6915, 1.2367, 0.3452],
|
6
|
-
[-0.5675, 1.0374, 1.0429, 0.8839],
|
7
|
-
[-0.1066, -0.0469, -1.6317, -1.4836],
|
8
|
-
[0.7835, -3.0105, 1.713, -0.4536],
|
9
|
-
[-0.3076, 1.3662, -0.6537, 0.0905],
|
10
|
-
[-0.2459, 0.2243, -2.7048, 0.848],
|
11
|
-
]
|
12
|
-
|
13
|
-
num_inputs = 4
|
14
|
-
num_neurons = 5
|
15
|
-
inputs = tf.placeholder("float", shape=(None, num_inputs))
|
16
|
-
biases = tf.constant([0.5012, 1.302, -1.6217, 0.669, 0.1494], name='b1')
|
17
|
-
biases2 = tf.constant([0.2012, 1.102, -1.5217, 0.469, 0.0494], name='b2')
|
18
|
-
|
19
|
-
weights = tf.constant([
|
20
|
-
[-0.9135, 1.0376, 0.8537, 0.4376, 1.3255],
|
21
|
-
[-0.5921, -1.4081, 1.0614, -0.5283, 1.1832],
|
22
|
-
[0.7285, -0.7844, 0.1793, -0.5275, -0.4426],
|
23
|
-
[-1.4976, 0.4433, 2.2317, -2.0479, 0.7791]], name='w')
|
24
|
-
|
25
|
-
weights_layer2 = tf.constant([
|
26
|
-
[-1.0465, -0.8766, 1.6849, -0.6625, 0.7928],
|
27
|
-
[2.0412, 1.3564, 0.7905, 0.6434, -2.5495],
|
28
|
-
[2.4276, -0.6893, -1.5917, 0.0911, 0.9112],
|
29
|
-
[-0.012, 0.0794, 1.3829, -1.018, -0.9328],
|
30
|
-
[0.061, 0.9791, -2.1727, -0.9553, -1.434]], name='w2')
|
31
|
-
|
32
|
-
|
33
|
-
sess = tf.Session()
|
34
|
-
|
35
|
-
layer_1 = tf.matmul(inputs, weights) + biases
|
36
|
-
neural_net = tf.matmul(layer_1, weights_layer2) + biases2
|
37
|
-
|
38
|
-
output = sess.run(neural_net, feed_dict={ inputs: test_inputs })
|
39
|
-
|
40
|
-
g0 = tf.gradients(layer_1, [weights, biases])
|
41
|
-
g = tf.gradients(neural_net, [weights, biases])
|
42
|
-
g2 = tf.gradients(neural_net, [weights_layer2, biases2])
|
43
|
-
|
44
|
-
weight_gradient0, biases_gradient0 = sess.run(g0, feed_dict = { inputs: test_inputs })
|
45
|
-
weight_gradient, biases_gradient = sess.run(g, feed_dict = { inputs: test_inputs })
|
46
|
-
weight_gradient2, biases_gradient2 = sess.run(g2, feed_dict: { inputs => test_inputs })
|
data/test_samples/test2.py
DELETED
@@ -1,87 +0,0 @@
|
|
1
|
-
import tensorflow as tf
|
2
|
-
|
3
|
-
batch_x = [
|
4
|
-
[0.686274, 0.10196, 0.6509, 1.0, 0.9686, 0.49803, 0.0, 0.0, 0.0, 0.0],
|
5
|
-
[0.543244, 0.10123, 0.4509, 0.0, 0.6986, 0.39803, 1.0, 0.0, 0.0, 0.0]]
|
6
|
-
|
7
|
-
batch_y = [
|
8
|
-
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
|
9
|
-
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
|
10
|
-
]
|
11
|
-
|
12
|
-
num_input = 10
|
13
|
-
num_classes = 10
|
14
|
-
n_hidden_1 = 4 # 1st layer number of neurons
|
15
|
-
n_hidden_2 = 4 # 2nd layer number of neurons
|
16
|
-
|
17
|
-
X = batch_x #tf.placeholder(tf.float32, shape=[None, num_input])
|
18
|
-
Y = batch_y # tf.placeholder(tf.float32, shape=[None, num_classes])
|
19
|
-
|
20
|
-
h1_init = tf.constant([[0.5937, 0.2343, 1.4332, 0.4395],
|
21
|
-
[-1.0227, -0.6915, 1.2367, 0.3452],
|
22
|
-
[-0.5675, 1.0374, 1.0429, 0.8839],
|
23
|
-
[-0.1066, -0.0469, -1.6317, -1.4836],
|
24
|
-
[0.7835, -3.0105, 1.713, -0.4536],
|
25
|
-
[-0.3076, 1.3662, -0.6537, 0.0905],
|
26
|
-
[-0.2459, 0.2243, -2.7048, 0.848],
|
27
|
-
[0.3589, 0.3542, -0.0959, -1.327],
|
28
|
-
[-0.4685, 0.0844, 0.2794, 2.1275],
|
29
|
-
[-1.0733, 0.6189, 0.845, 0.033]])
|
30
|
-
|
31
|
-
h2_init = tf.constant([[0.5012, 1.302, -1.6217, 0.669], [0.1494, -0.7837, -0.2978, 1.7745], [1.9727, -0.5312, -0.7391, 0.9187], [-0.6412, -1.4434, -0.8801, 0.9343]])
|
32
|
-
h3_init = tf.constant([[0.5012, 1.302, -1.6217, 0.669, 0.1494, -0.7837, -0.2978, 1.7745, 1.9727, -0.5312],
|
33
|
-
[-0.7391, 0.9187, -0.6412, -1.4434, -0.8801, 0.9343, -0.1665, -0.0032, 0.2959, -2.0488],
|
34
|
-
[-0.9135, 1.0376, 0.8537, 0.4376, 1.3255, -0.5921, -1.4081, 1.0614, -0.5283, 1.1832],
|
35
|
-
[0.7285, -0.7844, 0.1793, -0.5275, -0.4426, -1.4976, 0.4433, 2.2317, -2.0479, 0.7791]])
|
36
|
-
|
37
|
-
|
38
|
-
b1_init = tf.constant([0.1494, -0.7837, -0.2978, 1.7745])
|
39
|
-
|
40
|
-
b2_init = tf.constant([1.9727, -0.5312, -0.7391, 0.9187])
|
41
|
-
out_init = tf.constant([-0.6412, -1.4434, -0.8801, 0.9343, -0.1665, -0.0032, 0.2959, -2.0488, -0.9135, 1.0376])
|
42
|
-
|
43
|
-
h1 = tf.Variable(h1_init, dtype=tf.float32, name='h1')
|
44
|
-
h2 = tf.Variable(h2_init, dtype=tf.float32, name='h2')
|
45
|
-
h3 = tf.Variable(h3_init, dtype=tf.float32, name='out')
|
46
|
-
|
47
|
-
b1 = tf.Variable(b1_init, dtype=tf.float32, name='b1')
|
48
|
-
b2 = tf.Variable(b2_init, dtype=tf.float32, name='b2')
|
49
|
-
out = tf.Variable(out_init, dtype=tf.float32, name='out2')
|
50
|
-
|
51
|
-
layer_1 = tf.add(tf.matmul(X, h1), b1)
|
52
|
-
# Hidden fully connected layer with 256 neurons
|
53
|
-
layer_2 = tf.add(tf.matmul(layer_1, h2), b2)
|
54
|
-
# Output fully connected layer with a neuron for each class
|
55
|
-
|
56
|
-
sess = tf.Session()
|
57
|
-
|
58
|
-
logits = tf.matmul(layer_2, h3) + out
|
59
|
-
prediction = tf.nn.softmax(logits)
|
60
|
-
|
61
|
-
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
|
62
|
-
|
63
|
-
optimizer = tf.train.GradientDescentOptimizer(0.01)
|
64
|
-
train_op = optimizer.minimize(loss_op)
|
65
|
-
init = tf.global_variables_initializer()
|
66
|
-
|
67
|
-
sess.run(init)
|
68
|
-
# print(sess.run(layer_1))
|
69
|
-
tf.gradients(loss_op, [logits])
|
70
|
-
print("------------")
|
71
|
-
|
72
|
-
print("H1: ", sess.run(h1))
|
73
|
-
print("------------ Running train 1")
|
74
|
-
# sess.run(train_op, feed_dict={ X: batch_x, Y: batch_y })
|
75
|
-
sess.run(train_op)
|
76
|
-
print("H1:", sess.run(h1))
|
77
|
-
print("H2:", sess.run(h2))
|
78
|
-
print("H3:", sess.run(h3))
|
79
|
-
|
80
|
-
print(sess.run(b1))
|
81
|
-
print(sess.run(b2))
|
82
|
-
print(sess.run(out))
|
83
|
-
|
84
|
-
# sess.run(train_op, feed_dict={ X: batch_x, Y: batch_y })
|
85
|
-
print("------------- Running train 2")
|
86
|
-
sess.run(train_op)
|
87
|
-
print("H1:", sess.run(h1))
|