tensor_stream 1.0.0 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +1 -0
- data/.rubocop.yml +1 -0
- data/Gemfile +1 -1
- data/LICENSE.txt +1 -1
- data/README.md +34 -34
- data/Rakefile +3 -3
- data/USAGE_GUIDE.md +235 -0
- data/bin/stubgen +20 -0
- data/exe/model_utils +2 -2
- data/lib/tensor_stream.rb +45 -44
- data/lib/tensor_stream/constant.rb +2 -2
- data/lib/tensor_stream/control_flow.rb +1 -1
- data/lib/tensor_stream/debugging/debugging.rb +2 -2
- data/lib/tensor_stream/dynamic_stitch.rb +2 -2
- data/lib/tensor_stream/evaluator/base_evaluator.rb +18 -18
- data/lib/tensor_stream/evaluator/buffer.rb +1 -1
- data/lib/tensor_stream/evaluator/evaluator.rb +2 -2
- data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +41 -41
- data/lib/tensor_stream/evaluator/operation_helpers/math_helper.rb +1 -1
- data/lib/tensor_stream/evaluator/ruby/array_ops.rb +39 -39
- data/lib/tensor_stream/evaluator/ruby/check_ops.rb +2 -2
- data/lib/tensor_stream/evaluator/ruby/images_ops.rb +18 -18
- data/lib/tensor_stream/evaluator/ruby/math_ops.rb +13 -14
- data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +33 -36
- data/lib/tensor_stream/evaluator/ruby/random_ops.rb +20 -21
- data/lib/tensor_stream/evaluator/ruby_evaluator.rb +36 -49
- data/lib/tensor_stream/exceptions.rb +1 -1
- data/lib/tensor_stream/generated_stub/ops.rb +691 -0
- data/lib/tensor_stream/generated_stub/stub_file.erb +24 -0
- data/lib/tensor_stream/graph.rb +18 -18
- data/lib/tensor_stream/graph_builder.rb +17 -17
- data/lib/tensor_stream/graph_deserializers/protobuf.rb +97 -97
- data/lib/tensor_stream/graph_deserializers/yaml_loader.rb +1 -1
- data/lib/tensor_stream/graph_keys.rb +3 -3
- data/lib/tensor_stream/graph_serializers/graphml.rb +33 -33
- data/lib/tensor_stream/graph_serializers/packer.rb +23 -23
- data/lib/tensor_stream/graph_serializers/pbtext.rb +38 -42
- data/lib/tensor_stream/graph_serializers/serializer.rb +3 -2
- data/lib/tensor_stream/graph_serializers/yaml.rb +5 -5
- data/lib/tensor_stream/helpers/infer_shape.rb +56 -56
- data/lib/tensor_stream/helpers/op_helper.rb +8 -9
- data/lib/tensor_stream/helpers/string_helper.rb +15 -15
- data/lib/tensor_stream/helpers/tensor_mixins.rb +17 -17
- data/lib/tensor_stream/images.rb +1 -1
- data/lib/tensor_stream/initializer.rb +1 -1
- data/lib/tensor_stream/math_gradients.rb +28 -187
- data/lib/tensor_stream/monkey_patches/array.rb +1 -1
- data/lib/tensor_stream/monkey_patches/float.rb +1 -1
- data/lib/tensor_stream/monkey_patches/integer.rb +1 -1
- data/lib/tensor_stream/monkey_patches/op_patch.rb +5 -5
- data/lib/tensor_stream/monkey_patches/patch.rb +1 -1
- data/lib/tensor_stream/nn/nn_ops.rb +17 -15
- data/lib/tensor_stream/op_maker.rb +180 -0
- data/lib/tensor_stream/operation.rb +17 -17
- data/lib/tensor_stream/ops.rb +95 -384
- data/lib/tensor_stream/ops/add.rb +23 -0
- data/lib/tensor_stream/ops/argmax.rb +14 -0
- data/lib/tensor_stream/ops/argmin.rb +14 -0
- data/lib/tensor_stream/ops/case.rb +17 -0
- data/lib/tensor_stream/ops/cast.rb +15 -0
- data/lib/tensor_stream/ops/ceil.rb +15 -0
- data/lib/tensor_stream/ops/const.rb +0 -0
- data/lib/tensor_stream/ops/cos.rb +10 -0
- data/lib/tensor_stream/ops/div.rb +21 -0
- data/lib/tensor_stream/ops/equal.rb +15 -0
- data/lib/tensor_stream/ops/expand_dims.rb +17 -0
- data/lib/tensor_stream/ops/fill.rb +19 -0
- data/lib/tensor_stream/ops/floor.rb +15 -0
- data/lib/tensor_stream/ops/floor_div.rb +15 -0
- data/lib/tensor_stream/ops/greater.rb +11 -0
- data/lib/tensor_stream/ops/greater_equal.rb +11 -0
- data/lib/tensor_stream/ops/less_equal.rb +15 -0
- data/lib/tensor_stream/ops/log.rb +14 -0
- data/lib/tensor_stream/ops/mat_mul.rb +60 -0
- data/lib/tensor_stream/ops/max.rb +15 -0
- data/lib/tensor_stream/ops/min.rb +15 -0
- data/lib/tensor_stream/ops/mod.rb +23 -0
- data/lib/tensor_stream/ops/mul.rb +21 -0
- data/lib/tensor_stream/ops/negate.rb +14 -0
- data/lib/tensor_stream/ops/ones_like.rb +19 -0
- data/lib/tensor_stream/ops/pow.rb +25 -0
- data/lib/tensor_stream/ops/prod.rb +60 -0
- data/lib/tensor_stream/ops/random_uniform.rb +18 -0
- data/lib/tensor_stream/ops/range.rb +20 -0
- data/lib/tensor_stream/ops/rank.rb +13 -0
- data/lib/tensor_stream/ops/reshape.rb +24 -0
- data/lib/tensor_stream/ops/round.rb +15 -0
- data/lib/tensor_stream/ops/shape.rb +14 -0
- data/lib/tensor_stream/ops/sigmoid.rb +10 -0
- data/lib/tensor_stream/ops/sign.rb +12 -0
- data/lib/tensor_stream/ops/sin.rb +10 -0
- data/lib/tensor_stream/ops/size.rb +16 -0
- data/lib/tensor_stream/ops/sub.rb +24 -0
- data/lib/tensor_stream/ops/sum.rb +27 -0
- data/lib/tensor_stream/ops/tan.rb +12 -0
- data/lib/tensor_stream/ops/tanh.rb +10 -0
- data/lib/tensor_stream/ops/tile.rb +19 -0
- data/lib/tensor_stream/ops/zeros.rb +15 -0
- data/lib/tensor_stream/placeholder.rb +2 -2
- data/lib/tensor_stream/profile/report_tool.rb +3 -3
- data/lib/tensor_stream/session.rb +36 -38
- data/lib/tensor_stream/tensor.rb +2 -2
- data/lib/tensor_stream/tensor_shape.rb +4 -4
- data/lib/tensor_stream/train/adadelta_optimizer.rb +8 -8
- data/lib/tensor_stream/train/adagrad_optimizer.rb +3 -3
- data/lib/tensor_stream/train/adam_optimizer.rb +11 -11
- data/lib/tensor_stream/train/learning_rate_decay.rb +2 -2
- data/lib/tensor_stream/train/momentum_optimizer.rb +7 -7
- data/lib/tensor_stream/train/optimizer.rb +9 -9
- data/lib/tensor_stream/train/rmsprop_optimizer.rb +16 -16
- data/lib/tensor_stream/train/saver.rb +14 -14
- data/lib/tensor_stream/train/slot_creator.rb +6 -6
- data/lib/tensor_stream/train/utils.rb +12 -12
- data/lib/tensor_stream/trainer.rb +10 -10
- data/lib/tensor_stream/types.rb +1 -1
- data/lib/tensor_stream/utils.rb +33 -32
- data/lib/tensor_stream/utils/freezer.rb +5 -5
- data/lib/tensor_stream/variable.rb +5 -5
- data/lib/tensor_stream/variable_scope.rb +1 -1
- data/lib/tensor_stream/version.rb +1 -1
- data/samples/{iris.data → datasets/iris.data} +0 -0
- data/samples/jupyter_notebooks/linear_regression.ipynb +463 -0
- data/samples/{iris.rb → neural_networks/iris.rb} +21 -23
- data/samples/{mnist_data.rb → neural_networks/mnist_data.rb} +8 -8
- data/samples/neural_networks/raw_neural_net_sample.rb +112 -0
- data/samples/{rnn.rb → neural_networks/rnn.rb} +28 -31
- data/samples/{nearest_neighbor.rb → others/nearest_neighbor.rb} +12 -12
- data/samples/regression/linear_regression.rb +63 -0
- data/samples/{logistic_regression.rb → regression/logistic_regression.rb} +14 -16
- data/tensor_stream.gemspec +9 -8
- metadata +89 -19
- data/data_1.json +0 -4764
- data/data_2.json +0 -4764
- data/data_actual.json +0 -28
- data/data_expected.json +0 -28
- data/data_input.json +0 -28
- data/samples/error.graphml +0 -2755
- data/samples/gradient_sample.graphml +0 -1255
- data/samples/linear_regression.rb +0 -69
- data/samples/multigpu.rb +0 -73
- data/samples/raw_neural_net_sample.rb +0 -112
@@ -1,34 +1,34 @@
|
|
1
1
|
require "bundler/setup"
|
2
|
-
require
|
3
|
-
# require 'tensor_stream/
|
2
|
+
require "tensor_stream"
|
3
|
+
# require 'tensor_stream/opencl'
|
4
4
|
|
5
5
|
# This neural network will predict the species of an iris based on sepal and petal size
|
6
6
|
# Dataset: http://en.wikipedia.org/wiki/Iris_flower_data_set
|
7
7
|
tf = TensorStream
|
8
|
-
rows = File.readlines(
|
8
|
+
rows = File.readlines("samples/datasets/iris.data").map {|l| l.chomp.split(",") }
|
9
9
|
|
10
10
|
rows.shuffle!
|
11
11
|
|
12
12
|
label_encodings = {
|
13
|
-
|
14
|
-
|
15
|
-
|
13
|
+
"Iris-setosa" => [1, 0, 0],
|
14
|
+
"Iris-versicolor" => [0, 1, 0],
|
15
|
+
"Iris-virginica" => [0, 0, 1],
|
16
16
|
}
|
17
17
|
|
18
|
-
x_data = rows.map {|row| row[0,4].map(&:to_f) }
|
18
|
+
x_data = rows.map {|row| row[0, 4].map(&:to_f) }
|
19
19
|
y_data = rows.map {|row| label_encodings[row[4]] }
|
20
20
|
|
21
21
|
# Normalize data values before feeding into network
|
22
|
-
normalize = ->
|
22
|
+
normalize = ->(val, high, low) { (val - low) / (high - low) } # maps input to float between 0 and 1
|
23
23
|
|
24
|
-
columns = (0..3).map
|
24
|
+
columns = (0..3).map { |i|
|
25
25
|
x_data.map {|row| row[i] }
|
26
|
-
|
26
|
+
}
|
27
27
|
|
28
28
|
x_data.map! do |row|
|
29
29
|
row.map.with_index do |val, j|
|
30
30
|
max, min = columns[j].max, columns[j].min
|
31
|
-
normalize.(val, max, min)
|
31
|
+
normalize.call(val, max, min)
|
32
32
|
end
|
33
33
|
end
|
34
34
|
|
@@ -45,11 +45,9 @@ end
|
|
45
45
|
|
46
46
|
validation_cases = []
|
47
47
|
x_test.each_with_index do |x, index|
|
48
|
-
validation_cases << [x, y_test[index]
|
48
|
+
validation_cases << [x, y_test[index]]
|
49
49
|
end
|
50
50
|
|
51
|
-
|
52
|
-
|
53
51
|
def init_weights(shape)
|
54
52
|
# Weight initialization
|
55
53
|
TensorStream.random_normal(shape, stddev: 0.1).var
|
@@ -58,8 +56,8 @@ end
|
|
58
56
|
def forwardprop(x, w_1, w_2)
|
59
57
|
# Forward-propagation.
|
60
58
|
# IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
|
61
|
-
h
|
62
|
-
h.matmul w_2
|
59
|
+
h = TensorStream.nn.sigmoid(x.matmul(w_1)) # The \sigma function
|
60
|
+
h.matmul w_2 # The \varphi function
|
63
61
|
end
|
64
62
|
|
65
63
|
x_size = x_train[0].size
|
@@ -81,15 +79,15 @@ cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logi
|
|
81
79
|
|
82
80
|
# updates = TensorStream::Train::GradientDescentOptimizer.new(0.01).minimize(cost)
|
83
81
|
# updates = TensorStream::Train::MomentumOptimizer.new(0.01, 0.5, use_nesterov: true).minimize(cost)
|
84
|
-
updates =
|
82
|
+
updates = TensorStream::Train::RMSPropOptimizer.new(0.01).minimize(cost)
|
85
83
|
|
86
84
|
# Run SGD
|
87
85
|
sess = tf.session
|
88
86
|
init = tf.global_variables_initializer
|
89
87
|
sess.run(init)
|
90
|
-
loss = sess.run(cost, feed_dict: {
|
88
|
+
loss = sess.run(cost, feed_dict: {X => x_test, y => y_test})
|
91
89
|
puts "loss test data set #{loss}"
|
92
|
-
loss = sess.run(cost, feed_dict: {
|
90
|
+
loss = sess.run(cost, feed_dict: {X => x_train, y => y_train})
|
93
91
|
puts "Testing the untrained network..."
|
94
92
|
puts loss
|
95
93
|
start_time = Time.now
|
@@ -98,12 +96,12 @@ start_time = Time.now
|
|
98
96
|
sess.run(updates, feed_dict: {X => [x_train[i]], y => [y_train[i]]})
|
99
97
|
end
|
100
98
|
|
101
|
-
loss = sess.run(cost, feed_dict: {
|
99
|
+
loss = sess.run(cost, feed_dict: {X => x_train, y => y_train})
|
102
100
|
puts "epoch: #{epoch}, loss #{loss}"
|
103
101
|
end
|
104
102
|
|
105
|
-
loss = sess.run(cost, feed_dict: {
|
103
|
+
loss = sess.run(cost, feed_dict: {X => x_train, y => y_train})
|
106
104
|
puts "loss after training #{loss}"
|
107
|
-
loss = sess.run(cost, feed_dict: {
|
105
|
+
loss = sess.run(cost, feed_dict: {X => x_test, y => y_test})
|
108
106
|
puts "loss test data set #{loss}"
|
109
|
-
puts("time elapsed ", Time.now.to_i - start_time.to_i)
|
107
|
+
puts("time elapsed ", Time.now.to_i - start_time.to_i)
|
@@ -7,17 +7,18 @@
|
|
7
7
|
# mnist-learn gem
|
8
8
|
# opencl_ruby_ffi gem
|
9
9
|
require "bundler/setup"
|
10
|
-
require
|
11
|
-
require
|
10
|
+
require "tensor_stream"
|
11
|
+
require "mnist-learn"
|
12
12
|
|
13
13
|
# Enable OpenCL hardware accelerated computation, not using OpenCL can be very slow
|
14
|
-
#
|
14
|
+
# gem install tensor_stream-opencl
|
15
|
+
require 'tensor_stream/opencl'
|
15
16
|
|
16
17
|
tf = TensorStream
|
17
18
|
|
18
19
|
# Import MNIST data
|
19
20
|
puts "downloading minst data"
|
20
|
-
mnist = Mnist.read_data_sets(
|
21
|
+
mnist = Mnist.read_data_sets("/tmp/data", one_hot: true)
|
21
22
|
puts "downloading finished"
|
22
23
|
|
23
24
|
x = Float.placeholder shape: [nil, 784]
|
@@ -45,19 +46,18 @@ sess.run(init)
|
|
45
46
|
(0...1000).each do |i|
|
46
47
|
# load batch of images and correct answers
|
47
48
|
batch_x, batch_y = mnist.train.next_batch(100)
|
48
|
-
train_data = {
|
49
|
+
train_data = {x => batch_x, y_ => batch_y}
|
49
50
|
|
50
51
|
# train
|
51
52
|
sess.run(train_step, feed_dict: train_data)
|
52
|
-
if
|
53
|
+
if i % 10 == 0
|
53
54
|
# success? add code to print it
|
54
55
|
a, c = sess.run([accuracy, cross_entropy], feed_dict: train_data)
|
55
56
|
puts "#{i} train accuracy #{a}, error #{c}"
|
56
57
|
|
57
58
|
# success on test data?
|
58
|
-
test_data = {
|
59
|
+
test_data = {x => mnist.test.images, y_ => mnist.test.labels}
|
59
60
|
a, c = sess.run([accuracy, cross_entropy], feed_dict: test_data)
|
60
61
|
puts " test accuracy #{a}, error #{c}"
|
61
62
|
end
|
62
63
|
end
|
63
|
-
|
@@ -0,0 +1,112 @@
|
|
1
|
+
""" Neural Network.
|
2
|
+
|
3
|
+
A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
|
4
|
+
implementation with TensorFlow. This example is using the MNIST database
|
5
|
+
of handwritten digits (http://yann.lecun.com/exdb/mnist/).
|
6
|
+
|
7
|
+
Links:
|
8
|
+
[MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
|
9
|
+
|
10
|
+
Author: Aymeric Damien
|
11
|
+
Project: https://github.com/aymericdamien/TensorFlow-Examples/
|
12
|
+
|
13
|
+
The mnist-learn gem is required as well as an OpenCL compatible device with drivers correctly installed
|
14
|
+
"""
|
15
|
+
require "bundler/setup"
|
16
|
+
require "tensor_stream"
|
17
|
+
require "mnist-learn"
|
18
|
+
|
19
|
+
tf = TensorStream
|
20
|
+
# Import MNIST data
|
21
|
+
puts "downloading minst data"
|
22
|
+
mnist = Mnist.read_data_sets("/tmp/data", one_hot: true)
|
23
|
+
puts "downloading finished"
|
24
|
+
|
25
|
+
# Parameters
|
26
|
+
learning_rate = 0.001
|
27
|
+
momentum = 0.01
|
28
|
+
num_steps = 100
|
29
|
+
batch_size = 128
|
30
|
+
display_step = 5
|
31
|
+
|
32
|
+
# Network Parameters
|
33
|
+
n_hidden_1 = 256 # 1st layer number of neurons
|
34
|
+
n_hidden_2 = 256 # 2nd layer number of neurons
|
35
|
+
num_input = 784 # MNIST data input (img shape: 28*28)
|
36
|
+
num_classes = 10 # MNIST total classes (0-9 digits)
|
37
|
+
|
38
|
+
# tf Graph input
|
39
|
+
X = tf.placeholder(:float64, shape: [nil, num_input])
|
40
|
+
Y = tf.placeholder(:float64, shape: [nil, num_classes])
|
41
|
+
|
42
|
+
# Store layers weight & bias
|
43
|
+
weights = {
|
44
|
+
"h1" => tf.variable(tf.random_normal([num_input, n_hidden_1]), dtype: :float64, name: "h1"),
|
45
|
+
"h2" => tf.variable(tf.random_normal([n_hidden_1, n_hidden_2]), dtype: :float64, name: "h2"),
|
46
|
+
"out" => tf.variable(tf.random_normal([n_hidden_2, num_classes]), dtype: :float64, name: "out"),
|
47
|
+
}
|
48
|
+
|
49
|
+
biases = {
|
50
|
+
"b1" => tf.variable(tf.random_normal([n_hidden_1]), dtype: :float64, name: "b1"),
|
51
|
+
"b2" => tf.variable(tf.random_normal([n_hidden_2]), dtype: :float64, name: "b2"),
|
52
|
+
"out" => tf.variable(tf.random_normal([num_classes]), dtype: :float64, name: "out2"),
|
53
|
+
}
|
54
|
+
|
55
|
+
# Create model
|
56
|
+
def neural_net(x, weights, biases)
|
57
|
+
tf = TensorStream
|
58
|
+
# Hidden fully connected layer with 256 neurons
|
59
|
+
layer_1 = tf.add(tf.matmul(x, weights["h1"]), biases["b1"])
|
60
|
+
# Hidden fully connected layer with 256 neurons
|
61
|
+
layer_2 = tf.add(tf.matmul(layer_1, weights["h2"]), biases["b2"])
|
62
|
+
# Output fully connected layer with a neuron for each class
|
63
|
+
tf.matmul(layer_2, weights["out"]) + biases["out"]
|
64
|
+
end
|
65
|
+
|
66
|
+
# Construct model
|
67
|
+
logits = neural_net(X, weights, biases)
|
68
|
+
prediction = tf.nn.softmax(logits)
|
69
|
+
|
70
|
+
# Define loss and optimizer
|
71
|
+
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
|
72
|
+
logits: logits, labels: Y
|
73
|
+
))
|
74
|
+
|
75
|
+
optimizer = TensorStream::Train::MomentumOptimizer.new(learning_rate, momentum, use_nesterov: true)
|
76
|
+
train_op = optimizer.minimize(loss_op)
|
77
|
+
|
78
|
+
# Evaluate model
|
79
|
+
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
|
80
|
+
accuracy = tf.reduce_mean(tf.cast(correct_pred, :float32))
|
81
|
+
|
82
|
+
# tf.add_check_numerics_ops
|
83
|
+
|
84
|
+
# Initialize the variables (i.e. assign their default value)
|
85
|
+
init = tf.global_variables_initializer
|
86
|
+
|
87
|
+
# Start training
|
88
|
+
tf.session do |sess|
|
89
|
+
# Run the initializer
|
90
|
+
sess.run(init)
|
91
|
+
|
92
|
+
print("Testing Accuracy:", \
|
93
|
+
sess.run(accuracy, feed_dict: {X => mnist.test.images,
|
94
|
+
Y => mnist.test.labels,}))
|
95
|
+
|
96
|
+
(1..num_steps + 1).each do |step|
|
97
|
+
batch_x, batch_y = mnist.train.next_batch(batch_size)
|
98
|
+
# Run optimization op (backprop)
|
99
|
+
sess.run(train_op, feed_dict: {X => batch_x, Y => batch_y})
|
100
|
+
if step % display_step == 0 || step == 1
|
101
|
+
# Calculate batch loss and accuracy
|
102
|
+
loss, acc = sess.run([loss_op, accuracy], feed_dict: {X => batch_x, Y => batch_y})
|
103
|
+
print("\nStep " + step.to_s + ", Minibatch Loss= " + \
|
104
|
+
loss.to_s + ", Training Accuracy= " + \
|
105
|
+
acc.to_s)
|
106
|
+
end
|
107
|
+
end
|
108
|
+
print("\nOptimization Finished!")
|
109
|
+
print("\nTesting Accuracy after optimization:", \
|
110
|
+
sess.run(accuracy, feed_dict: {X => mnist.test.images,
|
111
|
+
Y => mnist.test.labels,}))
|
112
|
+
end
|
@@ -6,11 +6,10 @@
|
|
6
6
|
#
|
7
7
|
|
8
8
|
require "bundler/setup"
|
9
|
-
require
|
9
|
+
require "tensor_stream"
|
10
10
|
# require 'tensor_stream/opencl'
|
11
11
|
# require 'pry-byebug'
|
12
12
|
|
13
|
-
|
14
13
|
tf = TensorStream
|
15
14
|
|
16
15
|
num_epochs = 100
|
@@ -23,30 +22,27 @@ batch_size = 5
|
|
23
22
|
num_batches = total_series_length / batch_size / truncated_backprop_length
|
24
23
|
randomizer = TensorStream.random_uniform([total_series_length], minval: 0, maxval: 2)
|
25
24
|
|
26
|
-
|
27
25
|
def generate_data(randomizer, total_series_length, batch_size, echo_step)
|
28
26
|
x = randomizer.eval
|
29
27
|
y = x.rotate(-echo_step)
|
30
28
|
|
31
29
|
y[echo_step] = 0
|
32
30
|
|
33
|
-
x = TensorStream::TensorShape.reshape(x, [batch_size, -1])
|
31
|
+
x = TensorStream::TensorShape.reshape(x, [batch_size, -1]) # The first index changing slowest, subseries as rows
|
34
32
|
y = TensorStream::TensorShape.reshape(y, [batch_size, -1])
|
35
33
|
[x, y]
|
36
34
|
end
|
37
35
|
|
38
|
-
batchX_placeholder = tf.placeholder(:float32, shape: [batch_size, truncated_backprop_length], name:
|
39
|
-
batchY_placeholder = tf.placeholder(:int32, shape: [batch_size, truncated_backprop_length], name:
|
40
|
-
|
41
|
-
init_state = tf.placeholder(:float32, shape: [batch_size, state_size], name: 'init_state')
|
42
|
-
|
36
|
+
batchX_placeholder = tf.placeholder(:float32, shape: [batch_size, truncated_backprop_length], name: "batch_x")
|
37
|
+
batchY_placeholder = tf.placeholder(:int32, shape: [batch_size, truncated_backprop_length], name: "batch_y")
|
43
38
|
|
44
|
-
|
45
|
-
b = tf.variable(tf.zeros([state_size]), dtype: :float32, name: 'b')
|
39
|
+
init_state = tf.placeholder(:float32, shape: [batch_size, state_size], name: "init_state")
|
46
40
|
|
47
|
-
|
48
|
-
|
41
|
+
W = tf.variable(tf.random_uniform([state_size + 1, state_size]), dtype: :float32, name: "W")
|
42
|
+
b = tf.variable(tf.zeros([state_size]), dtype: :float32, name: "b")
|
49
43
|
|
44
|
+
W2 = tf.variable(tf.random_uniform([state_size, num_classes]), dtype: :float32, name: "W2")
|
45
|
+
b2 = tf.variable(tf.zeros([num_classes]), dtype: :float32, name: "b2")
|
50
46
|
|
51
47
|
inputs_series = tf.unpack(batchX_placeholder, axis: 1)
|
52
48
|
labels_series = tf.unpack(batchY_placeholder, axis: 1)
|
@@ -56,23 +52,23 @@ states_series = []
|
|
56
52
|
|
57
53
|
inputs_series.each do |current_input|
|
58
54
|
current_input = tf.reshape(current_input, [batch_size, 1])
|
59
|
-
input_and_state_concatenated = tf.concat([current_input, current_state], 1)
|
60
|
-
next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b)
|
55
|
+
input_and_state_concatenated = tf.concat([current_input, current_state], 1) # Increasing number of columns
|
56
|
+
next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b) # Broadcasted addition
|
61
57
|
states_series << next_state
|
62
58
|
current_state = next_state
|
63
59
|
end
|
64
60
|
|
65
|
-
logits_series = states_series.collect
|
61
|
+
logits_series = states_series.collect { |state|
|
66
62
|
tf.matmul(state, W2) + b2
|
67
|
-
|
63
|
+
}
|
68
64
|
|
69
|
-
predictions_series = logits_series.collect
|
65
|
+
predictions_series = logits_series.collect { |logits|
|
70
66
|
tf.nn.softmax(logits)
|
71
|
-
|
67
|
+
}
|
72
68
|
|
73
|
-
losses = logits_series.zip(labels_series).collect
|
69
|
+
losses = logits_series.zip(labels_series).collect { |logits, labels|
|
74
70
|
tf.nn.sparse_softmax_cross_entropy_with_logits(logits: logits, labels: labels)
|
75
|
-
|
71
|
+
}
|
76
72
|
total_loss = tf.reduce_mean(losses)
|
77
73
|
|
78
74
|
train_step = TensorStream::Train::AdagradOptimizer.new(0.1).minimize(total_loss)
|
@@ -93,16 +89,17 @@ tf.session do |sess|
|
|
93
89
|
batchY = y.map { |y| y[start_idx...end_idx] }
|
94
90
|
|
95
91
|
_total_loss, _train_step, _current_state, _predictions_series = sess.run(
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
92
|
+
[total_loss, train_step, current_state, predictions_series],
|
93
|
+
feed_dict: {
|
94
|
+
batchX_placeholder => batchX,
|
95
|
+
batchY_placeholder => batchY,
|
96
|
+
init_state => _current_state,
|
97
|
+
}
|
98
|
+
)
|
99
|
+
|
100
|
+
if batch_idx % 10 == 0
|
101
|
+
print("Step", batch_idx, " Loss ", _total_loss, "\n")
|
105
102
|
end
|
106
103
|
end
|
107
104
|
end
|
108
|
-
end
|
105
|
+
end
|
@@ -1,4 +1,4 @@
|
|
1
|
-
'
|
1
|
+
""'
|
2
2
|
A nearest neighbor learning algorithm example using TensorFlow library.
|
3
3
|
This example is using the MNIST database of handwritten digits
|
4
4
|
(http://yann.lecun.com/exdb/mnist/)
|
@@ -7,19 +7,19 @@ Author: Aymeric Damien
|
|
7
7
|
Project: https://github.com/aymericdamien/TensorFlow-Examples/
|
8
8
|
|
9
9
|
Make sure to install the mnist-learn gem !!
|
10
|
-
'
|
10
|
+
'""
|
11
11
|
require "bundler/setup"
|
12
|
-
require
|
13
|
-
require
|
12
|
+
require "tensor_stream"
|
13
|
+
require "mnist-learn"
|
14
14
|
|
15
15
|
tf = TensorStream
|
16
16
|
|
17
17
|
# Import MNIST data
|
18
|
-
mnist = Mnist.read_data_sets(
|
18
|
+
mnist = Mnist.read_data_sets("/tmp/data", one_hot: true)
|
19
19
|
|
20
20
|
# In this example, we limit mnist data
|
21
|
-
Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates)
|
22
|
-
Xte, Yte = mnist.test.next_batch(200) #200 for testing
|
21
|
+
Xtr, Ytr = mnist.train.next_batch(5000) # 5000 for training (nn candidates)
|
22
|
+
Xte, Yte = mnist.test.next_batch(200) # 200 for testing
|
23
23
|
|
24
24
|
# tf Graph Input
|
25
25
|
xtr = tf.placeholder(:float, shape: [nil, 784])
|
@@ -34,7 +34,7 @@ pred = tf.argmin(distance, 0)
|
|
34
34
|
accuracy = 0.0
|
35
35
|
|
36
36
|
# Initialize the variables (i.e. assign their default value)
|
37
|
-
init = tf.global_variables_initializer
|
37
|
+
init = tf.global_variables_initializer
|
38
38
|
|
39
39
|
# Start training
|
40
40
|
tf.session do |sess|
|
@@ -43,13 +43,13 @@ tf.session do |sess|
|
|
43
43
|
Xte.size.times do |i|
|
44
44
|
# Get nearest neighbor
|
45
45
|
nn_index = sess.run(pred, feed_dict: {xtr => Xtr, xte => Xte[i]})
|
46
|
-
print("Test ", i, "Prediction: ",Ytr[nn_index].max, \
|
47
|
-
|
46
|
+
print("Test ", i, "Prediction: ", Ytr[nn_index].max, \
|
47
|
+
"True Class: ", Yte[i].max, "\n")
|
48
48
|
if Ytr[nn_index].max == Yte[i].max
|
49
|
-
accuracy += 1.0/ Xte.size
|
49
|
+
accuracy += 1.0 / Xte.size
|
50
50
|
end
|
51
51
|
end
|
52
52
|
|
53
53
|
print("Done!")
|
54
54
|
print("Accuracy:", accuracy)
|
55
|
-
end
|
55
|
+
end
|
@@ -0,0 +1,63 @@
|
|
1
|
+
require "tensor_stream"
|
2
|
+
|
3
|
+
tf = TensorStream
|
4
|
+
|
5
|
+
learning_rate = 0.01
|
6
|
+
training_epochs = 1000
|
7
|
+
display_step = 50
|
8
|
+
|
9
|
+
train_x = [3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
|
10
|
+
7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1,]
|
11
|
+
|
12
|
+
train_y = [1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
|
13
|
+
2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3,]
|
14
|
+
|
15
|
+
n_samples = train_x.size
|
16
|
+
|
17
|
+
x_value = Float.placeholder
|
18
|
+
y_value = Float.placeholder
|
19
|
+
|
20
|
+
# Set model weights
|
21
|
+
weight = rand.t.var name: "weight"
|
22
|
+
|
23
|
+
bias = rand.t.var name: "bias"
|
24
|
+
|
25
|
+
# Construct a linear model
|
26
|
+
pred = x_value * weight + bias
|
27
|
+
|
28
|
+
# Mean squared error
|
29
|
+
cost = ((pred - y_value)**2).reduce / (2 * n_samples)
|
30
|
+
|
31
|
+
# Other optimizers --
|
32
|
+
#
|
33
|
+
# optimizer = TensorStream::Train::MomentumOptimizer.new(learning_rate, momentum, use_nesterov: true).minimize(cost)
|
34
|
+
# optimizer = TensorStream::Train::AdamOptimizer.new(learning_rate).minimize(cost)
|
35
|
+
# optimizer = TensorStream::Train::AdadeltaOptimizer.new(1.0).minimize(cost)
|
36
|
+
# optimizer = TensorStream::Train::AdagradOptimizer.new(0.01).minimize(cost)
|
37
|
+
# optimizer = TensorStream::Train::RMSPropOptimizer.new(0.01, centered: true).minimize(cost)
|
38
|
+
optimizer = TensorStream::Train::GradientDescentOptimizer.new(learning_rate).minimize(cost)
|
39
|
+
|
40
|
+
# Initialize the variables (i.e. assign their default value)
|
41
|
+
init = tf.global_variables_initializer
|
42
|
+
|
43
|
+
tf.session do |sess|
|
44
|
+
start_time = Time.now
|
45
|
+
sess.run(init)
|
46
|
+
|
47
|
+
(0..training_epochs).each do |epoch|
|
48
|
+
train_x.zip(train_y).each do |x, y|
|
49
|
+
sess.run(optimizer, feed_dict: {x_value => x, y_value => y})
|
50
|
+
end
|
51
|
+
|
52
|
+
if (epoch + 1) % display_step == 0
|
53
|
+
c = sess.run(cost, feed_dict: {x_value => train_x, y_value => train_y})
|
54
|
+
puts("Epoch:", "%04d" % (epoch + 1), "cost=", c, \
|
55
|
+
"W=", sess.run(weight), "b=", sess.run(bias))
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
puts "Optimization Finished!"
|
60
|
+
training_cost = sess.run(cost, feed_dict: {x_value => train_x, y_value => train_y})
|
61
|
+
puts "Training cost=", training_cost, "W=", sess.run(weight), "b=", sess.run(bias), '\n'
|
62
|
+
puts "time elapsed ", Time.now.to_i - start_time.to_i
|
63
|
+
end
|