tensor_stream 1.0.0 → 1.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +1 -0
- data/.rubocop.yml +1 -0
- data/Gemfile +1 -1
- data/LICENSE.txt +1 -1
- data/README.md +34 -34
- data/Rakefile +3 -3
- data/USAGE_GUIDE.md +235 -0
- data/bin/stubgen +20 -0
- data/exe/model_utils +2 -2
- data/lib/tensor_stream.rb +45 -44
- data/lib/tensor_stream/constant.rb +2 -2
- data/lib/tensor_stream/control_flow.rb +1 -1
- data/lib/tensor_stream/debugging/debugging.rb +2 -2
- data/lib/tensor_stream/dynamic_stitch.rb +2 -2
- data/lib/tensor_stream/evaluator/base_evaluator.rb +18 -18
- data/lib/tensor_stream/evaluator/buffer.rb +1 -1
- data/lib/tensor_stream/evaluator/evaluator.rb +2 -2
- data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +41 -41
- data/lib/tensor_stream/evaluator/operation_helpers/math_helper.rb +1 -1
- data/lib/tensor_stream/evaluator/ruby/array_ops.rb +39 -39
- data/lib/tensor_stream/evaluator/ruby/check_ops.rb +2 -2
- data/lib/tensor_stream/evaluator/ruby/images_ops.rb +18 -18
- data/lib/tensor_stream/evaluator/ruby/math_ops.rb +13 -14
- data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +33 -36
- data/lib/tensor_stream/evaluator/ruby/random_ops.rb +20 -21
- data/lib/tensor_stream/evaluator/ruby_evaluator.rb +36 -49
- data/lib/tensor_stream/exceptions.rb +1 -1
- data/lib/tensor_stream/generated_stub/ops.rb +691 -0
- data/lib/tensor_stream/generated_stub/stub_file.erb +24 -0
- data/lib/tensor_stream/graph.rb +18 -18
- data/lib/tensor_stream/graph_builder.rb +17 -17
- data/lib/tensor_stream/graph_deserializers/protobuf.rb +97 -97
- data/lib/tensor_stream/graph_deserializers/yaml_loader.rb +1 -1
- data/lib/tensor_stream/graph_keys.rb +3 -3
- data/lib/tensor_stream/graph_serializers/graphml.rb +33 -33
- data/lib/tensor_stream/graph_serializers/packer.rb +23 -23
- data/lib/tensor_stream/graph_serializers/pbtext.rb +38 -42
- data/lib/tensor_stream/graph_serializers/serializer.rb +3 -2
- data/lib/tensor_stream/graph_serializers/yaml.rb +5 -5
- data/lib/tensor_stream/helpers/infer_shape.rb +56 -56
- data/lib/tensor_stream/helpers/op_helper.rb +8 -9
- data/lib/tensor_stream/helpers/string_helper.rb +15 -15
- data/lib/tensor_stream/helpers/tensor_mixins.rb +17 -17
- data/lib/tensor_stream/images.rb +1 -1
- data/lib/tensor_stream/initializer.rb +1 -1
- data/lib/tensor_stream/math_gradients.rb +28 -187
- data/lib/tensor_stream/monkey_patches/array.rb +1 -1
- data/lib/tensor_stream/monkey_patches/float.rb +1 -1
- data/lib/tensor_stream/monkey_patches/integer.rb +1 -1
- data/lib/tensor_stream/monkey_patches/op_patch.rb +5 -5
- data/lib/tensor_stream/monkey_patches/patch.rb +1 -1
- data/lib/tensor_stream/nn/nn_ops.rb +17 -15
- data/lib/tensor_stream/op_maker.rb +180 -0
- data/lib/tensor_stream/operation.rb +17 -17
- data/lib/tensor_stream/ops.rb +95 -384
- data/lib/tensor_stream/ops/add.rb +23 -0
- data/lib/tensor_stream/ops/argmax.rb +14 -0
- data/lib/tensor_stream/ops/argmin.rb +14 -0
- data/lib/tensor_stream/ops/case.rb +17 -0
- data/lib/tensor_stream/ops/cast.rb +15 -0
- data/lib/tensor_stream/ops/ceil.rb +15 -0
- data/lib/tensor_stream/ops/const.rb +0 -0
- data/lib/tensor_stream/ops/cos.rb +10 -0
- data/lib/tensor_stream/ops/div.rb +21 -0
- data/lib/tensor_stream/ops/equal.rb +15 -0
- data/lib/tensor_stream/ops/expand_dims.rb +17 -0
- data/lib/tensor_stream/ops/fill.rb +19 -0
- data/lib/tensor_stream/ops/floor.rb +15 -0
- data/lib/tensor_stream/ops/floor_div.rb +15 -0
- data/lib/tensor_stream/ops/greater.rb +11 -0
- data/lib/tensor_stream/ops/greater_equal.rb +11 -0
- data/lib/tensor_stream/ops/less_equal.rb +15 -0
- data/lib/tensor_stream/ops/log.rb +14 -0
- data/lib/tensor_stream/ops/mat_mul.rb +60 -0
- data/lib/tensor_stream/ops/max.rb +15 -0
- data/lib/tensor_stream/ops/min.rb +15 -0
- data/lib/tensor_stream/ops/mod.rb +23 -0
- data/lib/tensor_stream/ops/mul.rb +21 -0
- data/lib/tensor_stream/ops/negate.rb +14 -0
- data/lib/tensor_stream/ops/ones_like.rb +19 -0
- data/lib/tensor_stream/ops/pow.rb +25 -0
- data/lib/tensor_stream/ops/prod.rb +60 -0
- data/lib/tensor_stream/ops/random_uniform.rb +18 -0
- data/lib/tensor_stream/ops/range.rb +20 -0
- data/lib/tensor_stream/ops/rank.rb +13 -0
- data/lib/tensor_stream/ops/reshape.rb +24 -0
- data/lib/tensor_stream/ops/round.rb +15 -0
- data/lib/tensor_stream/ops/shape.rb +14 -0
- data/lib/tensor_stream/ops/sigmoid.rb +10 -0
- data/lib/tensor_stream/ops/sign.rb +12 -0
- data/lib/tensor_stream/ops/sin.rb +10 -0
- data/lib/tensor_stream/ops/size.rb +16 -0
- data/lib/tensor_stream/ops/sub.rb +24 -0
- data/lib/tensor_stream/ops/sum.rb +27 -0
- data/lib/tensor_stream/ops/tan.rb +12 -0
- data/lib/tensor_stream/ops/tanh.rb +10 -0
- data/lib/tensor_stream/ops/tile.rb +19 -0
- data/lib/tensor_stream/ops/zeros.rb +15 -0
- data/lib/tensor_stream/placeholder.rb +2 -2
- data/lib/tensor_stream/profile/report_tool.rb +3 -3
- data/lib/tensor_stream/session.rb +36 -38
- data/lib/tensor_stream/tensor.rb +2 -2
- data/lib/tensor_stream/tensor_shape.rb +4 -4
- data/lib/tensor_stream/train/adadelta_optimizer.rb +8 -8
- data/lib/tensor_stream/train/adagrad_optimizer.rb +3 -3
- data/lib/tensor_stream/train/adam_optimizer.rb +11 -11
- data/lib/tensor_stream/train/learning_rate_decay.rb +2 -2
- data/lib/tensor_stream/train/momentum_optimizer.rb +7 -7
- data/lib/tensor_stream/train/optimizer.rb +9 -9
- data/lib/tensor_stream/train/rmsprop_optimizer.rb +16 -16
- data/lib/tensor_stream/train/saver.rb +14 -14
- data/lib/tensor_stream/train/slot_creator.rb +6 -6
- data/lib/tensor_stream/train/utils.rb +12 -12
- data/lib/tensor_stream/trainer.rb +10 -10
- data/lib/tensor_stream/types.rb +1 -1
- data/lib/tensor_stream/utils.rb +33 -32
- data/lib/tensor_stream/utils/freezer.rb +5 -5
- data/lib/tensor_stream/variable.rb +5 -5
- data/lib/tensor_stream/variable_scope.rb +1 -1
- data/lib/tensor_stream/version.rb +1 -1
- data/samples/{iris.data → datasets/iris.data} +0 -0
- data/samples/jupyter_notebooks/linear_regression.ipynb +463 -0
- data/samples/{iris.rb → neural_networks/iris.rb} +21 -23
- data/samples/{mnist_data.rb → neural_networks/mnist_data.rb} +8 -8
- data/samples/neural_networks/raw_neural_net_sample.rb +112 -0
- data/samples/{rnn.rb → neural_networks/rnn.rb} +28 -31
- data/samples/{nearest_neighbor.rb → others/nearest_neighbor.rb} +12 -12
- data/samples/regression/linear_regression.rb +63 -0
- data/samples/{logistic_regression.rb → regression/logistic_regression.rb} +14 -16
- data/tensor_stream.gemspec +9 -8
- metadata +89 -19
- data/data_1.json +0 -4764
- data/data_2.json +0 -4764
- data/data_actual.json +0 -28
- data/data_expected.json +0 -28
- data/data_input.json +0 -28
- data/samples/error.graphml +0 -2755
- data/samples/gradient_sample.graphml +0 -1255
- data/samples/linear_regression.rb +0 -69
- data/samples/multigpu.rb +0 -73
- data/samples/raw_neural_net_sample.rb +0 -112
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 29a84327103d7c26e81d6d8c0dfa2b92f70941a00e2971ed6bfd0901fa3f516c
|
4
|
+
data.tar.gz: dbd793450ec664358e942eb6c92cff380be5ce5be90539bdf696859ae79446a5
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 62b9963921a2e6b5ff3d7ef4692c84839c6067be6cf6df0f43b189f560d0ccdac24f1df79216356bd06614ed48389085e2c6d64f31a10d15bcb4864218cb15f0
|
7
|
+
data.tar.gz: e0b41ffc15b7c53b26930a5df7088df22bb4e25ff6729609610acd1296be86c71b32234382791ca582afe8e35133d666b33145dac5774d5059fd73b86ba28e9d
|
data/.gitignore
CHANGED
data/.rubocop.yml
CHANGED
data/Gemfile
CHANGED
data/LICENSE.txt
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
The MIT License (MIT)
|
2
2
|
|
3
|
-
Copyright (c) 2018 Joseph Emmanuel Dayo
|
3
|
+
Copyright (c) 2018-2019 Joseph Emmanuel Dayo
|
4
4
|
|
5
5
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
6
|
of this software and associated documentation files (the "Software"), to deal
|
data/README.md
CHANGED
@@ -2,9 +2,9 @@
|
|
2
2
|
|
3
3
|
# TensorStream
|
4
4
|
|
5
|
-
|
5
|
+
TensorStream is an opensource framework for machine learning for ruby, its goal is to allow machine learning models to be easily built and run them in various hardware like GPUs and CPUs. It is heavily based on TensorFlow with the goal of being able to easily port its higher level libraries and model examples. As such it is also based on data flow graphs wherein you define computations and data flows between those computations in order to achieve the desired output.
|
6
6
|
|
7
|
-
|
7
|
+
TensorStream is designed to support various backends with a Pure Ruby and OpenCL implementation. These implementations are designed to work together, you can perform training on an OpenCL implementation (where you have a GPU) and then run the resulting trained model on a Pure Ruby implementation where you can deploy anywhere that you can run ruby on. TensorStream has been tested to run on most ruby implementations like MRI, JRuby and TruffleRuby.
|
8
8
|
|
9
9
|
## Goals & Features
|
10
10
|
|
@@ -12,6 +12,7 @@ This is a framework is heavily influenced by tensorflow and aims to be familiar
|
|
12
12
|
- Replicates most of the commonly used low-level tensorflow ops (tf.add, tf.constant, tf.placeholder, tf.matmul, tf.sin etc...)
|
13
13
|
- Supports auto-differentiation using formal derivation
|
14
14
|
- Extensible - use your own opcode evaluator (OpenCL and Pure ruby currently supported)
|
15
|
+
- wide support - Run on most ruby implementations as well as hardware acceleration on OpenCL supported hardware
|
15
16
|
|
16
17
|
## Compatibility
|
17
18
|
|
@@ -68,32 +69,30 @@ learning_rate = 0.01
|
|
68
69
|
training_epochs = 1000
|
69
70
|
display_step = 50
|
70
71
|
|
71
|
-
|
72
|
-
7.042,10.791,5.313,7.997,5.654,9.27,3.1]
|
73
|
-
train_Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
|
74
|
-
2.827,3.465,1.65,2.904,2.42,2.94,1.3]
|
72
|
+
train_x = [3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
|
73
|
+
7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1]
|
75
74
|
|
76
|
-
|
75
|
+
train_y = [1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
|
76
|
+
2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3]
|
77
77
|
|
78
|
-
|
79
|
-
X = Float.placeholder
|
78
|
+
n_samples = train_x.size
|
80
79
|
|
81
|
-
|
82
|
-
|
80
|
+
x_value = Float.placeholder
|
81
|
+
y_value = Float.placeholder
|
83
82
|
|
84
83
|
# Set model weights
|
85
|
-
|
86
|
-
W = rand.t.var name: "weight"
|
84
|
+
weight = rand.t.var name: "weight"
|
87
85
|
|
88
|
-
|
89
|
-
b = rand.t.var name: "bias"
|
86
|
+
bias = rand.t.var name: "bias"
|
90
87
|
|
91
88
|
# Construct a linear model
|
92
|
-
pred =
|
89
|
+
pred = x_value * weight + bias
|
93
90
|
|
94
91
|
# Mean squared error
|
95
|
-
cost = ((pred -
|
92
|
+
cost = ((pred - y_value)**2).reduce / (2 * n_samples)
|
96
93
|
|
94
|
+
# Other optimizers --
|
95
|
+
#
|
97
96
|
# optimizer = TensorStream::Train::MomentumOptimizer.new(learning_rate, momentum, use_nesterov: true).minimize(cost)
|
98
97
|
# optimizer = TensorStream::Train::AdamOptimizer.new(learning_rate).minimize(cost)
|
99
98
|
# optimizer = TensorStream::Train::AdadeltaOptimizer.new(1.0).minimize(cost)
|
@@ -102,27 +101,28 @@ cost = ((pred - Y) ** 2).reduce / ( 2 * n_samples)
|
|
102
101
|
optimizer = TensorStream::Train::GradientDescentOptimizer.new(learning_rate).minimize(cost)
|
103
102
|
|
104
103
|
# Initialize the variables (i.e. assign their default value)
|
105
|
-
init = tf.global_variables_initializer
|
104
|
+
init = tf.global_variables_initializer
|
106
105
|
|
107
106
|
tf.session do |sess|
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
if (epoch+1) % display_step == 0
|
116
|
-
c = sess.run(cost, feed_dict: { X => train_X, Y => train_Y })
|
117
|
-
puts("Epoch:", '%04d' % (epoch+1), "cost=", c, \
|
118
|
-
"W=", sess.run(W), "b=", sess.run(b))
|
119
|
-
end
|
107
|
+
start_time = Time.now
|
108
|
+
sess.run(init)
|
109
|
+
|
110
|
+
(0..training_epochs).each do |epoch|
|
111
|
+
train_x.zip(train_y).each do |x, y|
|
112
|
+
sess.run(optimizer, feed_dict: { x_value => x, y_value => y })
|
120
113
|
end
|
121
114
|
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
115
|
+
if (epoch + 1) % display_step == 0
|
116
|
+
c = sess.run(cost, feed_dict: { x_value => train_x, y_value => train_y })
|
117
|
+
puts("Epoch:", '%04d' % (epoch + 1), "cost=", c, \
|
118
|
+
"W=", sess.run(weight), "b=", sess.run(bias))
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
puts "Optimization Finished!"
|
123
|
+
training_cost = sess.run(cost, feed_dict: { x_value => train_x, y_value => train_y })
|
124
|
+
puts "Training cost=", training_cost, "W=", sess.run(weight), "b=", sess.run(bias), '\n'
|
125
|
+
puts "time elapsed ", Time.now.to_i - start_time.to_i
|
126
126
|
end
|
127
127
|
```
|
128
128
|
|
data/Rakefile
CHANGED
@@ -1,12 +1,12 @@
|
|
1
1
|
require "bundler/gem_tasks"
|
2
2
|
require "rspec/core/rake_task"
|
3
|
-
require
|
3
|
+
require "rdoc/task"
|
4
4
|
|
5
5
|
RSpec::Core::RakeTask.new(:spec)
|
6
6
|
|
7
|
-
task :
|
7
|
+
task default: :spec
|
8
8
|
|
9
9
|
RDoc::Task.new do |rdoc|
|
10
10
|
rdoc.main = "README.rdoc"
|
11
11
|
rdoc.rdoc_files.include("README.rdoc", "lib /*.rb")
|
12
|
-
end
|
12
|
+
end
|
data/USAGE_GUIDE.md
ADDED
@@ -0,0 +1,235 @@
|
|
1
|
+
Introduction
|
2
|
+
============
|
3
|
+
|
4
|
+
This document describes the basic usage of TensorStream and serves as a walthrough with regards to its features, limitations as well as various tools for debugging and development.
|
5
|
+
|
6
|
+
Since TensorStream is heavily based on TensorFlow, if you have used TensorFlow before then aside from the syntax a lot of things will be quite familiar.
|
7
|
+
|
8
|
+
What is TensorStream?
|
9
|
+
--------------------
|
10
|
+
|
11
|
+
TensorStream is an opensource framework for machine learning for ruby, its goal is to allow machine learning models to be easily built and run them in various hardware like GPUs and CPUs. It is heavily based on TensorFlow with the goal of being able to easily port its higher level libraries and model examples. As such it is also based on data flow graphs wherein you define computations and data flows between those computations in order to achieve the desired output.
|
12
|
+
|
13
|
+
TensorStream is designed to support various backends with a Pure Ruby and OpenCL implementation. These implementations are designed to work together, you can perform training on an OpenCL implementation (where you have a GPU) and then run the resulting trained model on a Pure Ruby implementation where you can deploy anywhere that you can run ruby on. TensorStream has been tested to run on most ruby implementations like MRI, JRuby and TruffleRuby.
|
14
|
+
|
15
|
+
Introduction to Tensors
|
16
|
+
-----------------------
|
17
|
+
|
18
|
+
Tensors are just a mathematical term to describe scalar, single and multidimensional arrays in a consistent manner. Though there is a formal mathematical definition for it, for all intents and purposes these are how data like numbers, strings are represented and structured in order to be fed into operations which in turn process them to be changed into another number or have its structure changed.
|
19
|
+
|
20
|
+
Tensors have properties that describe their shape and rank as well as their data type.
|
21
|
+
|
22
|
+
Below are examples of Constant Tensors. Their values are immutable and cannot be changed.
|
23
|
+
|
24
|
+
```ruby
|
25
|
+
t1 = 1.0.t # scalar a tensor or rank 0
|
26
|
+
t2 = [1.0, 2.1].t # a float tensor of rank 1
|
27
|
+
t3 = [[2.0, 2.1], [2.1, 2.2]].t # a float tensor of rank 2
|
28
|
+
t4 = [[2, 2], [3, 3]].t # an integer tensor of rank 2
|
29
|
+
|
30
|
+
# alternatively you can use tensorflow style constant definition
|
31
|
+
ts = TensorStream
|
32
|
+
t1 = ts.constant(1.0)
|
33
|
+
```
|
34
|
+
|
35
|
+
Notice that you can create a constant tensor by calling the .t method on an Integer, Float and an Array. You can also use the TensorStream.constant method to achieve the same effect.
|
36
|
+
|
37
|
+
tensors can be referenced later by giving it a name (They automatically get a name if you don't give it one)
|
38
|
+
|
39
|
+
```ruby
|
40
|
+
t1 = 1.0.t(name: 'c1')
|
41
|
+
t2 = [5.0].t
|
42
|
+
t2.name
|
43
|
+
=> "Const"
|
44
|
+
|
45
|
+
# tensorflow style
|
46
|
+
ts = TensorStream
|
47
|
+
t1 = ts.constant(1.0, name: 'c1')
|
48
|
+
|
49
|
+
# Reference later
|
50
|
+
graph = ts.get_default_graph
|
51
|
+
tx = graph['c1']
|
52
|
+
tx.run
|
53
|
+
=> 1.0
|
54
|
+
```
|
55
|
+
|
56
|
+
Tensor Shapes
|
57
|
+
-------------
|
58
|
+
|
59
|
+
The shape to use depends on what the data represents and what computation you want to achieve.
|
60
|
+
|
61
|
+
The shape of a tensor describes its structure or describes the dimensions of the array. So for example in order to represent a 28x28 2D grayscale image you would need a tensor with shape [28, 28] with each cell representing a single channel. If instead you have an 28x28 RGB image you would then need a tensor with shape [28, 28, 3], now you need 3 values to represent each pixel. Now what if you need to represent 100 RGB images? then it follows that you need a tensor of size [100, 28, 28, 3]!
|
62
|
+
|
63
|
+
Computations
|
64
|
+
------------
|
65
|
+
|
66
|
+
Naturally the whole point of all of this is to be able to perform computations. TensorStream supports all of the basic math operations you would expect, only beefed up to work with tensors:
|
67
|
+
|
68
|
+
```ruby
|
69
|
+
t1 = 1.0.t
|
70
|
+
t2 = 2.0.t
|
71
|
+
sum = t1 + t2
|
72
|
+
=> Op(add name: add shape: TensorShape([]) data_type: float32)
|
73
|
+
```
|
74
|
+
|
75
|
+
Note that sum did not actually compute the "sum" ... yet.
|
76
|
+
what happened is that you only defined the data flow graph, in order to get the actual result you need to run it in a session
|
77
|
+
|
78
|
+
|
79
|
+
```ruby
|
80
|
+
t1 = 1.0.t
|
81
|
+
t2 = 2.0.t
|
82
|
+
sum = t1 + t2
|
83
|
+
=> Op(add name: add shape: TensorShape([]) data_type: float32)
|
84
|
+
|
85
|
+
sess = TensorStream.session
|
86
|
+
sess.run(sum)
|
87
|
+
=> 3.0
|
88
|
+
sess.run(t1, t2) # pass multiple tensors/ops
|
89
|
+
=> [1.0, 2.0]
|
90
|
+
|
91
|
+
# this also works as a shortcut and is equivalent to above
|
92
|
+
sum.run
|
93
|
+
=> 3.0
|
94
|
+
```
|
95
|
+
|
96
|
+
TensorStream and TensorFlow (in non eager execution mode) works like this since it uses the dataflow graph to be able to perform gradient computation for machine learning operations. It also uses the graph structure to be able to run that computation in an optimal manner on various hardware like the GPU.
|
97
|
+
|
98
|
+
Of course operations on multidimensional arrays work as you would expect
|
99
|
+
|
100
|
+
```ruby
|
101
|
+
t1 = [1.0, 1.5].t
|
102
|
+
t2 = [1.2, 1.5].t
|
103
|
+
sum = t1 + t2
|
104
|
+
sum.run
|
105
|
+
=> [2.2, 3.0].t
|
106
|
+
```
|
107
|
+
|
108
|
+
There are a wealth of other operations available like reduction for example:
|
109
|
+
|
110
|
+
```ruby
|
111
|
+
t1 = [1.0, 1.5, 2.0].t
|
112
|
+
t1.reduce(:+).run
|
113
|
+
=> 4.5
|
114
|
+
|
115
|
+
# or tensorflow style
|
116
|
+
# ts.reduce_sum(t1)
|
117
|
+
```
|
118
|
+
|
119
|
+
Broadcast Operations
|
120
|
+
--------------------
|
121
|
+
|
122
|
+
So Tensor sizes don't have to be the same, you can, in some instances use a different but compatible size in order to perform an operation like below
|
123
|
+
|
124
|
+
```ruby
|
125
|
+
t1 = [[1.0, 1.5], [1.0, 1.5]].t
|
126
|
+
sum = t1 + 1.0
|
127
|
+
sum.run
|
128
|
+
=> [[2.0, 2.5], [2.0, 2.5]].t
|
129
|
+
```
|
130
|
+
|
131
|
+
Here we "broadcasted" a scalar float constant to all cells in a tensor. If these were run on a GPU you can imagine that this operation can be run in parallel.
|
132
|
+
|
133
|
+
Below is another way, but using a tensor of rank 1 on a rank 2 tensor:
|
134
|
+
|
135
|
+
```ruby
|
136
|
+
t1 = [[1.0, 1.5], [1.0, 1.5]].t
|
137
|
+
sum = t1 + [1.0, 0.5].t
|
138
|
+
sum.run
|
139
|
+
=> [[2.0, 2.0], [2.0, 2.0]].t
|
140
|
+
```
|
141
|
+
|
142
|
+
In this case we saw that a row by row operation was done instead. There are a number of operations that support broadcasting like multipliation, subtraction, divison etc.
|
143
|
+
|
144
|
+
Placeholders and Variables
|
145
|
+
--------------------------
|
146
|
+
|
147
|
+
There are special types of tensors that are frequently used in building a model in order
|
148
|
+
to serve as (Placeholders) for values as well as to store data that can be used in
|
149
|
+
succeeding sessions (Variables)
|
150
|
+
|
151
|
+
Placeholders are like parameters which take on a value during the time that the model is ran.
|
152
|
+
|
153
|
+
For example:
|
154
|
+
|
155
|
+
```ruby
|
156
|
+
param1 = Float.placeholder
|
157
|
+
|
158
|
+
sum = 2.0.t + param1
|
159
|
+
sess = TensorStream.session
|
160
|
+
sess.run(sum, feed_dict: { param1 => 1.0 })
|
161
|
+
=> 3.0
|
162
|
+
sess.run(sum, feed_dict: { param1 => 2.0 })
|
163
|
+
=> 4.0
|
164
|
+
```
|
165
|
+
|
166
|
+
Note that NOT passing a value for the placeholder will result in an error.
|
167
|
+
|
168
|
+
Variables on the other hand provide persistent data that survives between sessions, however they need to be initialized first otherwise an error will occur.
|
169
|
+
|
170
|
+
See below for an example:
|
171
|
+
|
172
|
+
```ruby
|
173
|
+
ts = TensorStream
|
174
|
+
var1 = 1.0.t.var(name: 'var')
|
175
|
+
|
176
|
+
acc = var1 + 1
|
177
|
+
assign = var1.assign(acc)
|
178
|
+
|
179
|
+
# Or tensorflow style
|
180
|
+
# var1 = ts.variable(1.0, dtype: :float32, name: 'var1')
|
181
|
+
|
182
|
+
# initialize the variables to their initial value
|
183
|
+
sess = TensorStream.session
|
184
|
+
init = ts.global_variables_initializer
|
185
|
+
sess.run(init)
|
186
|
+
|
187
|
+
# first run
|
188
|
+
sess.run(acc, assign)
|
189
|
+
=> [2.0, 2.0]
|
190
|
+
sess.run(acc, assign)
|
191
|
+
=> [3.0, 3.0]
|
192
|
+
```
|
193
|
+
|
194
|
+
Variables can be trainable or non-trainable. This property is used by training algorithms to determine if these will be updated during training.
|
195
|
+
|
196
|
+
```ruby
|
197
|
+
v = TensorStream.variable(1.0, name: 'v', trainable: false)
|
198
|
+
```
|
199
|
+
|
200
|
+
Graphs
|
201
|
+
------
|
202
|
+
|
203
|
+
Graphs hold the entire model data structure, each operation defined is stored in a graph which is later used during runtime to perform operations as well as during serialization and deserialization.
|
204
|
+
|
205
|
+
When there is no graph present when a tensor is defined, one will automatically be created and will serve as the "default" graph.
|
206
|
+
|
207
|
+
Access to the graph can be accomplished using the get_default_graph method.
|
208
|
+
|
209
|
+
```ruby
|
210
|
+
ts = TensorStream
|
211
|
+
graph = ts.get_default_graph
|
212
|
+
|
213
|
+
# access nodes
|
214
|
+
graph.nodes
|
215
|
+
=> {"Const"=>Op(const name: Const shape: TensorShape([]) data_type: float32), "Const_1"=>Op(const name: Const_1 shape: TensorShape([]) data_type: float32)}
|
216
|
+
|
217
|
+
```
|
218
|
+
|
219
|
+
The graph object can also be used to access collections like a list of variables
|
220
|
+
|
221
|
+
```ruby
|
222
|
+
vars = graph.get_collection(TensorStream::GraphKeys::GLOBAL_VARIABLES)
|
223
|
+
=> [Variable(Variable:0 shape: TensorShape([]) data_type: float32)]
|
224
|
+
```
|
225
|
+
|
226
|
+
Limitations
|
227
|
+
-----------
|
228
|
+
|
229
|
+
- Current version only supports dense tensors, meaning for multidimensional arrays each row in a dimension must have the same size. Examples below are not support as of now:
|
230
|
+
|
231
|
+
```ruby
|
232
|
+
[[1, 2], [1], [2, 3]] # second array has a different size than the others
|
233
|
+
```
|
234
|
+
|
235
|
+
- The ruby evaluator uses the ruby Float and Integer objects during computation as such the width of the data types (float32 vs float64, int32 vs int64) aren't really used. This however matters with the OpenCL evaluator that uses the width for determining the correct C data type to use in the OpenCL kernels.
|
data/bin/stubgen
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
# Script to auto generate op stub file from the opdef folder
|
4
|
+
|
5
|
+
require "bundler/setup"
|
6
|
+
require "tensor_stream"
|
7
|
+
require "erb"
|
8
|
+
|
9
|
+
target = File.join(__dir__, '..', 'lib', 'tensor_stream', 'generated_stub')
|
10
|
+
|
11
|
+
FileUtils.mkdir_p(target)
|
12
|
+
|
13
|
+
stub_file = File.join(target, 'ops.rb')
|
14
|
+
File.delete(stub_file) if File.exist?(stub_file)
|
15
|
+
|
16
|
+
f = File.open(stub_file, 'wb')
|
17
|
+
|
18
|
+
template = File.read(File.join(target, 'stub_file.erb'))
|
19
|
+
f << ERB.new(template, nil, '%').result(binding)
|
20
|
+
f.close
|
data/exe/model_utils
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
require "bundler/setup"
|
4
4
|
require "tensor_stream"
|
5
|
-
require
|
5
|
+
require "tensor_stream/utils/freezer"
|
6
6
|
|
7
7
|
if ARGV[0].nil?
|
8
8
|
puts "source checkpoint folder not specified"
|
@@ -21,4 +21,4 @@ end
|
|
21
21
|
sess = TensorStream.session
|
22
22
|
freezer = TensorStream::Freezer.new
|
23
23
|
freezer.convert(sess, ARGV[0], ARGV[1])
|
24
|
-
exit(0)
|
24
|
+
exit(0)
|
data/lib/tensor_stream.rb
CHANGED
@@ -1,51 +1,52 @@
|
|
1
|
-
require
|
2
|
-
require
|
3
|
-
require
|
4
|
-
require
|
5
|
-
require
|
6
|
-
require
|
7
|
-
require
|
8
|
-
require
|
9
|
-
require
|
10
|
-
require
|
11
|
-
require
|
12
|
-
require
|
13
|
-
require
|
14
|
-
require
|
15
|
-
require
|
16
|
-
require
|
17
|
-
require
|
18
|
-
require
|
19
|
-
require
|
20
|
-
require
|
21
|
-
require
|
22
|
-
require
|
23
|
-
require
|
24
|
-
require
|
25
|
-
require
|
26
|
-
require
|
27
|
-
require
|
28
|
-
require
|
29
|
-
require
|
30
|
-
require
|
31
|
-
require
|
32
|
-
require
|
33
|
-
require
|
34
|
-
require
|
1
|
+
require "tensor_stream/version"
|
2
|
+
require "deep_merge"
|
3
|
+
require "matrix"
|
4
|
+
require "concurrent"
|
5
|
+
require "tensor_stream/exceptions"
|
6
|
+
require "tensor_stream/helpers/op_helper"
|
7
|
+
require "tensor_stream/helpers/string_helper"
|
8
|
+
require "tensor_stream/initializer"
|
9
|
+
require "tensor_stream/graph_keys"
|
10
|
+
require "tensor_stream/types"
|
11
|
+
require "tensor_stream/graph_builder"
|
12
|
+
require "tensor_stream/graph"
|
13
|
+
require "tensor_stream/device"
|
14
|
+
require "tensor_stream/session"
|
15
|
+
require "tensor_stream/tensor_shape"
|
16
|
+
require "tensor_stream/helpers/tensor_mixins"
|
17
|
+
require "tensor_stream/tensor"
|
18
|
+
require "tensor_stream/constant"
|
19
|
+
require "tensor_stream/variable"
|
20
|
+
require "tensor_stream/variable_scope"
|
21
|
+
require "tensor_stream/operation"
|
22
|
+
require "tensor_stream/placeholder"
|
23
|
+
require "tensor_stream/control_flow"
|
24
|
+
require "tensor_stream/dynamic_stitch"
|
25
|
+
require "tensor_stream/nn/nn_ops"
|
26
|
+
require "tensor_stream/evaluator/evaluator"
|
27
|
+
require "tensor_stream/graph_serializers/packer"
|
28
|
+
require "tensor_stream/graph_serializers/serializer"
|
29
|
+
require "tensor_stream/graph_deserializers/protobuf"
|
30
|
+
require "tensor_stream/graph_deserializers/yaml_loader"
|
31
|
+
require "tensor_stream/graph_serializers/pbtext"
|
32
|
+
require "tensor_stream/graph_serializers/graphml"
|
33
|
+
require "tensor_stream/graph_serializers/yaml"
|
34
|
+
require "tensor_stream/math_gradients"
|
35
35
|
require "tensor_stream/debugging/debugging"
|
36
|
-
require
|
37
|
-
require
|
38
|
-
require
|
36
|
+
require "tensor_stream/utils"
|
37
|
+
require "tensor_stream/train/utils"
|
38
|
+
require "tensor_stream/images"
|
39
39
|
|
40
|
-
require
|
40
|
+
require "tensor_stream/profile/report_tool"
|
41
41
|
|
42
42
|
# require 'tensor_stream/libraries/layers'
|
43
|
-
require
|
44
|
-
require
|
45
|
-
require
|
46
|
-
require
|
47
|
-
require
|
48
|
-
require
|
43
|
+
require "tensor_stream/monkey_patches/patch"
|
44
|
+
require "tensor_stream/monkey_patches/integer"
|
45
|
+
require "tensor_stream/monkey_patches/float"
|
46
|
+
require "tensor_stream/monkey_patches/array"
|
47
|
+
require "tensor_stream/ops"
|
48
|
+
require "tensor_stream/trainer"
|
49
|
+
require "tensor_stream/op_maker"
|
49
50
|
|
50
51
|
# module that exposes TensorStream top level functions
|
51
52
|
module TensorStream
|