tensor_stream 1.0.4 → 1.0.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +1 -0
- data/CHANGELOG.md +12 -2
- data/Dockerfile +1 -1
- data/USAGE_GUIDE.md +68 -0
- data/lib/tensor_stream.rb +1 -0
- data/lib/tensor_stream/evaluator/base_evaluator.rb +21 -1
- data/lib/tensor_stream/evaluator/evaluator.rb +1 -0
- data/lib/tensor_stream/evaluator/evaluator_utils.rb +20 -0
- data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +60 -0
- data/lib/tensor_stream/evaluator/ruby/array_ops.rb +53 -1
- data/lib/tensor_stream/evaluator/ruby/images_ops.rb +26 -0
- data/lib/tensor_stream/evaluator/ruby/math_ops.rb +60 -5
- data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +25 -29
- data/lib/tensor_stream/evaluator/ruby/random_ops.rb +7 -11
- data/lib/tensor_stream/evaluator/ruby/storage_manager.rb +40 -0
- data/lib/tensor_stream/evaluator/ruby/variable_ops.rb +74 -0
- data/lib/tensor_stream/evaluator/ruby_evaluator.rb +31 -77
- data/lib/tensor_stream/generated_stub/ops.rb +256 -166
- data/lib/tensor_stream/generated_stub/stub_file.erb +4 -4
- data/lib/tensor_stream/graph.rb +3 -3
- data/lib/tensor_stream/graph_deserializers/yaml_loader.rb +4 -6
- data/lib/tensor_stream/helpers/infer_shape.rb +1 -7
- data/lib/tensor_stream/helpers/tensor_mixins.rb +10 -1
- data/lib/tensor_stream/images.rb +4 -0
- data/lib/tensor_stream/math/math_ops.rb +22 -0
- data/lib/tensor_stream/math_gradients.rb +15 -1
- data/lib/tensor_stream/nn/embedding_lookup.rb +114 -0
- data/lib/tensor_stream/nn/nn_ops.rb +16 -0
- data/lib/tensor_stream/op_maker.rb +36 -3
- data/lib/tensor_stream/operation.rb +8 -20
- data/lib/tensor_stream/ops.rb +14 -11
- data/lib/tensor_stream/ops/bias_add.rb +16 -0
- data/lib/tensor_stream/ops/equal.rb +4 -0
- data/lib/tensor_stream/ops/greater.rb +4 -0
- data/lib/tensor_stream/ops/greater_equal.rb +4 -0
- data/lib/tensor_stream/ops/less.rb +19 -0
- data/lib/tensor_stream/ops/less_equal.rb +4 -0
- data/lib/tensor_stream/ops/not_equal.rb +19 -0
- data/lib/tensor_stream/ops/rsqrt.rb +11 -0
- data/lib/tensor_stream/ops/strided_slice.rb +24 -0
- data/lib/tensor_stream/ops/sum.rb +4 -2
- data/lib/tensor_stream/ops/top_k.rb +23 -0
- data/lib/tensor_stream/session.rb +6 -12
- data/lib/tensor_stream/tensor.rb +1 -0
- data/lib/tensor_stream/tensor_shape.rb +32 -1
- data/lib/tensor_stream/train/saver.rb +2 -3
- data/lib/tensor_stream/utils.rb +18 -13
- data/lib/tensor_stream/utils/freezer.rb +5 -1
- data/lib/tensor_stream/utils/py_ports.rb +11 -0
- data/lib/tensor_stream/variable.rb +9 -6
- data/lib/tensor_stream/version.rb +1 -1
- data/samples/word_embeddings/word_embedding_1.rb +192 -0
- data/samples/word_embeddings/word_embedding_2.rb +203 -0
- data/tensor_stream.gemspec +7 -2
- metadata +67 -10
data/lib/tensor_stream/ops.rb
CHANGED
@@ -70,9 +70,8 @@ module TensorStream
|
|
70
70
|
|
71
71
|
##
|
72
72
|
# Outputs random values from a truncated normal distribution.
|
73
|
-
def truncated_normal(shape, dtype: :float32, mean: 0.0, stddev: 1.0, seed: nil, name: nil)
|
74
|
-
|
75
|
-
_op(:truncated_normal, shape, options)
|
73
|
+
def truncated_normal(shape, dtype: :float32, mean: 0.0, stddev: 1.0, seed: nil, name: nil, pre_gen_table_size: nil)
|
74
|
+
_op(:truncated_normal, shape, dtype: dtype, mean: mean, stddev: stddev, seed: seed, name: name, pre_gen_table_size: pre_gen_table_size)
|
76
75
|
end
|
77
76
|
|
78
77
|
##
|
@@ -163,14 +162,6 @@ module TensorStream
|
|
163
162
|
_op(:ones, shape, data_type: dtype, name: name)
|
164
163
|
end
|
165
164
|
|
166
|
-
##
|
167
|
-
# Returns the truth value of (x < y) element-wise.
|
168
|
-
# This operation supports broadcasting
|
169
|
-
def less(input_a, input_b, name: nil)
|
170
|
-
check_data_types(input_a, input_b)
|
171
|
-
_op(:less, input_a, input_b, name: name)
|
172
|
-
end
|
173
|
-
|
174
165
|
##
|
175
166
|
# Returns the truth value of x AND y element-wise.
|
176
167
|
def logical_and(input_a, input_b, name: nil)
|
@@ -203,6 +194,15 @@ module TensorStream
|
|
203
194
|
end
|
204
195
|
end
|
205
196
|
|
197
|
+
##
|
198
|
+
# Partitions data into num_partitions tensors using indices from partitions
|
199
|
+
def dynamic_partition(data, partitions, num_partitions, name: nil)
|
200
|
+
result = _op(:dynamic_partition, data, partitions, num_partitions: num_partitions, name: nil)
|
201
|
+
num_partitions.times.map do |index|
|
202
|
+
result[index]
|
203
|
+
end
|
204
|
+
end
|
205
|
+
|
206
206
|
def split(value, num_or_size_splits, axis: 0, num: nil, name: "split")
|
207
207
|
value = convert_to_tensor(value)
|
208
208
|
num_or_size_splits = convert_to_tensor(num_or_size_splits)
|
@@ -532,6 +532,9 @@ module TensorStream
|
|
532
532
|
_op(:squeeze, value, axis: axis, name: nil)
|
533
533
|
end
|
534
534
|
|
535
|
+
def clip_by_norm(tensor, clip_norm, axes: nil, name: nil)
|
536
|
+
end
|
537
|
+
|
535
538
|
##
|
536
539
|
# Computes the difference between two lists of numbers or strings.
|
537
540
|
# Given a list x and a list y, this operation returns a list out that represents all values
|
@@ -0,0 +1,16 @@
|
|
1
|
+
TensorStream::OpMaker.define_operation :bias_add do |op|
|
2
|
+
op.what_it_does "Adds bias to value."
|
3
|
+
|
4
|
+
op.parameter :value, "A Tensor", :nil, validate: 'NUMERIC_TYPES'
|
5
|
+
op.parameter :bias, "A 1 D tensor", :nil, validate: 'NUMERIC_TYPES'
|
6
|
+
|
7
|
+
op.supports_broadcasting!
|
8
|
+
op.exclude!
|
9
|
+
|
10
|
+
op.option :name, "Optional name", :nil
|
11
|
+
op.option :data_format, "A string. 'NHWC' and 'NCHW' are supported.", :nil
|
12
|
+
|
13
|
+
op.define_gradient do |grad, node, _params|
|
14
|
+
[grad, _op(:bias_add_grad, grad, data_format: node.options[:data_format])]
|
15
|
+
end
|
16
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
TensorStream::OpMaker.define_operation :less do |op|
|
2
|
+
op.what_it_does "Returns the truth value of (x < y) element-wise."
|
3
|
+
|
4
|
+
op.parameter :input_a, "tensor X"
|
5
|
+
op.parameter :input_b, "tensor Y"
|
6
|
+
|
7
|
+
op.apply_data_type_coercion!
|
8
|
+
op.supports_broadcasting!
|
9
|
+
|
10
|
+
op.option :name, "Optional name", :nil
|
11
|
+
|
12
|
+
op.define_gradient do |grad, node, _params|
|
13
|
+
_min_or_max_grad(node.inputs, grad, ->(a, b) { ts.less(a, b) })
|
14
|
+
end
|
15
|
+
|
16
|
+
op.define_data_type do
|
17
|
+
:boolean
|
18
|
+
end
|
19
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
TensorStream::OpMaker.define_operation :not_equal do |op|
|
2
|
+
op.what_it_does "Returns the truth value of (x != y) element-wise."
|
3
|
+
|
4
|
+
op.parameter :input_a, "tensor X"
|
5
|
+
op.parameter :input_b, "tensor Y"
|
6
|
+
|
7
|
+
op.apply_data_type_coercion!
|
8
|
+
op.supports_broadcasting!
|
9
|
+
|
10
|
+
op.option :name, "Optional name", :nil
|
11
|
+
|
12
|
+
op.define_gradient do |grad, node, params|
|
13
|
+
_min_or_max_grad(node.inputs, grad, ->(a, b) { ts.not_equal(a, b) })
|
14
|
+
end
|
15
|
+
|
16
|
+
op.define_data_type do
|
17
|
+
:boolean
|
18
|
+
end
|
19
|
+
end
|
@@ -0,0 +1,11 @@
|
|
1
|
+
TensorStream::OpMaker.define_operation :rsqrt do |op|
|
2
|
+
op.what_it_does "Computes reciprocal of square root of x element-wise."
|
3
|
+
|
4
|
+
op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES'
|
5
|
+
op.option :name, "Optional name", :nil
|
6
|
+
|
7
|
+
op.define_gradient do |grad, node, params|
|
8
|
+
# Returns -0.5 * grad * conj(y)^3.
|
9
|
+
i_op(:rsqrt_grad, node, grad)
|
10
|
+
end
|
11
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
TensorStream::OpMaker.define_operation :strided_slice do |op|
|
2
|
+
op.what_it_does "Extracts a strided slice of a tensor "
|
3
|
+
op.what_it_does "this op extracts a slice of size `(end-begin)/stride`
|
4
|
+
from the given `input_` tensor. Starting at the location specified by `begin`
|
5
|
+
the slice continues by adding `stride` to the index until all dimensions are
|
6
|
+
not less than `end`.
|
7
|
+
Note that a stride can be negative, which causes a reverse slice."
|
8
|
+
|
9
|
+
op.parameter :input, "A tensor"
|
10
|
+
op.parameter :_begin, "start index"
|
11
|
+
op.parameter :_end, "end index"
|
12
|
+
op.parameter :strides, "end index", :nil
|
13
|
+
op.option :name, "Optional name", :nil
|
14
|
+
|
15
|
+
op.define_gradient do |grad, node, params|
|
16
|
+
input, b_index, e_index, strides = params
|
17
|
+
x = ts.shape(input, out_type: node.inputs[0].data_type)
|
18
|
+
|
19
|
+
_op(:strided_slice_grad, x, b_index, e_index, strides, grad)
|
20
|
+
end
|
21
|
+
|
22
|
+
op.define_shape do |tensor|
|
23
|
+
end
|
24
|
+
end
|
@@ -7,14 +7,16 @@ TensorStream::OpMaker.define_operation :sum do |op|
|
|
7
7
|
op.what_it_does "If axis has no entries, all dimensions are reduced, and a tensor with a single element is returned."
|
8
8
|
|
9
9
|
op.parameter :input_a, "tensor X"
|
10
|
-
op.parameter :
|
10
|
+
op.parameter :axis_p, "tensor X", :nil, validate: 'INTEGER_TYPES'
|
11
11
|
|
12
|
+
op.option :axis, "axis", :nil, exclude: true
|
12
13
|
op.option :name, "Optional name", :nil
|
13
14
|
op.option :keepdims, "If true, retains reduced dimensions with length 1.", :false
|
14
15
|
|
15
16
|
op.add_custom "input_a = TensorStream.convert_to_tensor(input_a)"
|
16
17
|
op.add_custom "return input_a if input_a.shape.scalar?"
|
17
|
-
op.add_custom "
|
18
|
+
op.add_custom "axis_p = axis_p || axis"
|
19
|
+
op.add_custom "axis_p = cast_axis(input_a, axis_p)"
|
18
20
|
|
19
21
|
op.define_gradient do |grad, node, params|
|
20
22
|
x, y = params
|
@@ -0,0 +1,23 @@
|
|
1
|
+
TensorStream::OpMaker.define_operation :top_k do |op|
|
2
|
+
op.what_it_does "Finds values and indices of the `k` largest entries for the last dimension."
|
3
|
+
|
4
|
+
op.parameter :input, "1-D or higher `Tensor` with last dimension at least `k`."
|
5
|
+
op.parameter :k, "0-D `int32` `Tensor`. Number of top elements to look for along the last dimension (along each row for matrices)", 1
|
6
|
+
op.option :sorted, "If true the resulting `k` elements will be sorted by the values in descending order.", "true"
|
7
|
+
op.option :name, "Optional name", :nil
|
8
|
+
|
9
|
+
op.add_custom_post "[result[0], result[1]]"
|
10
|
+
|
11
|
+
op.define_shape do |tensor|
|
12
|
+
next nil unless tensor.inputs[0].shape.known?
|
13
|
+
|
14
|
+
input_shape = tensor.inputs[0].shape.shape.dup
|
15
|
+
k = tensor.options[:k]
|
16
|
+
input_shape[-1] = k
|
17
|
+
input_shape
|
18
|
+
end
|
19
|
+
|
20
|
+
op.define_gradient do |grad, node, params|
|
21
|
+
#TODO
|
22
|
+
end
|
23
|
+
end
|
@@ -18,17 +18,7 @@ module TensorStream
|
|
18
18
|
end
|
19
19
|
|
20
20
|
def get_evaluator_classes(evaluators)
|
21
|
-
@evaluator_classes =
|
22
|
-
if evaluators.empty?
|
23
|
-
TensorStream::Evaluator.default_evaluators
|
24
|
-
else
|
25
|
-
evaluators.collect { |name| Object.const_get("TensorStream::Evaluator::#{camelize(name.to_s)}") }
|
26
|
-
end
|
27
|
-
elsif evaluators.nil?
|
28
|
-
TensorStream::Evaluator.default_evaluators
|
29
|
-
else
|
30
|
-
[Object.const_get("TensorStream::Evaluator::#{camelize(evaluators.to_s)}")]
|
31
|
-
end
|
21
|
+
@evaluator_classes = TensorStream::EvaluatorUtils.get_evaluator_classes(evaluators)
|
32
22
|
end
|
33
23
|
|
34
24
|
def clear_session_cache
|
@@ -58,7 +48,8 @@ module TensorStream
|
|
58
48
|
# scan for placeholders and assign value
|
59
49
|
options[:feed_dict]&.each_key do |k|
|
60
50
|
if k.is_a?(Placeholder)
|
61
|
-
|
51
|
+
ph = options[:feed_dict][k]
|
52
|
+
context[k.name.to_sym] = ph.is_a?(Tensor) ? ph.op : ph
|
62
53
|
elsif k.is_a?(String)
|
63
54
|
target_graph = args[0].graph
|
64
55
|
node = target_graph.get_node(k)
|
@@ -98,6 +89,9 @@ module TensorStream
|
|
98
89
|
end
|
99
90
|
|
100
91
|
def close
|
92
|
+
# unlink resources to save memory
|
93
|
+
@last_session_context = nil
|
94
|
+
@session_cache = {}
|
101
95
|
@closed = true
|
102
96
|
end
|
103
97
|
|
data/lib/tensor_stream/tensor.rb
CHANGED
@@ -18,7 +18,8 @@ module TensorStream
|
|
18
18
|
end
|
19
19
|
|
20
20
|
def [](index)
|
21
|
-
@shape[index]
|
21
|
+
new_shape = @shape[index]
|
22
|
+
TensorShape.new(@shape[index])
|
22
23
|
end
|
23
24
|
|
24
25
|
def ndims
|
@@ -42,6 +43,36 @@ module TensorStream
|
|
42
43
|
known?
|
43
44
|
end
|
44
45
|
|
46
|
+
def merge_with(other)
|
47
|
+
assert_compatible_with(other)
|
48
|
+
|
49
|
+
if @shape.nil?
|
50
|
+
TensorShape.new(other)
|
51
|
+
else
|
52
|
+
TensorShape.new(@shape)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def compatible_with?(other)
|
57
|
+
other = as_dimension(other)
|
58
|
+
|
59
|
+
shape.nil? || other.nil? || shape == other
|
60
|
+
end
|
61
|
+
|
62
|
+
def as_dimension(value)
|
63
|
+
value.is_a?(TensorShape) ? value.shape : value
|
64
|
+
end
|
65
|
+
|
66
|
+
def value
|
67
|
+
shape
|
68
|
+
end
|
69
|
+
|
70
|
+
##
|
71
|
+
# Raises an exception if `other` is not compatible with this shape.
|
72
|
+
def assert_compatible_with(other)
|
73
|
+
raise TensorStream::ValueError, "Dimensions #{self} and #{other} are not compatible" unless compatible_with?(other)
|
74
|
+
end
|
75
|
+
|
45
76
|
def self.infer_shape(shape_a, shape_b)
|
46
77
|
return nil if shape_a.nil? || shape_b.nil?
|
47
78
|
return shape_a if shape_b.empty?
|
@@ -7,9 +7,9 @@ module TensorStream
|
|
7
7
|
class Saver
|
8
8
|
include TensorStream::OpHelper
|
9
9
|
|
10
|
-
def initialize
|
10
|
+
def initialize(var_list = nil)
|
11
11
|
graph = TensorStream::Graph.get_default_graph
|
12
|
-
vars = graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
|
12
|
+
vars = var_list || graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
|
13
13
|
|
14
14
|
@filename = graph["ts_filename"] || TensorStream.placeholder(:string, name: "ts_filename", shape: [])
|
15
15
|
|
@@ -50,7 +50,6 @@ module TensorStream
|
|
50
50
|
meta_data = JSON.parse(File.read(meta_file))
|
51
51
|
gs = meta_data["gs"]
|
52
52
|
filename = File.join(modelpath, ["model", gs, ".ckpt"].compact.join("-"))
|
53
|
-
|
54
53
|
session.run(@restore_op, feed_dict: {@filename => filename})
|
55
54
|
end
|
56
55
|
|
data/lib/tensor_stream/utils.rb
CHANGED
@@ -45,25 +45,22 @@ module TensorStream
|
|
45
45
|
# Creates a variable
|
46
46
|
# A variable maintains state across sessions
|
47
47
|
def variable(value, name: nil, initializer: nil, graph: nil, dtype: nil, trainable: true)
|
48
|
-
op = Graph.get_default_graph.add_op(:assign, nil, value)
|
49
48
|
common_options = {
|
50
|
-
initializer: initializer ||
|
49
|
+
initializer: TensorStream.convert_to_tensor(initializer || value),
|
51
50
|
name: name,
|
52
51
|
graph: graph,
|
53
52
|
dtype: dtype,
|
54
53
|
trainable: trainable,
|
55
54
|
}
|
56
55
|
tensor = if value.is_a?(String)
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
op.set_input(0, tensor.op)
|
66
|
-
Graph.get_default_graph.add_node(op)
|
56
|
+
i_var(dtype || :string, 0, [], get_variable_scope, common_options)
|
57
|
+
elsif value.is_a?(Integer)
|
58
|
+
i_var(dtype || :int32, 0, [], get_variable_scope, common_options)
|
59
|
+
elsif value.is_a?(Float)
|
60
|
+
i_var(dtype || :float32, 0, [], get_variable_scope, common_options)
|
61
|
+
else
|
62
|
+
i_var(dtype || :float32, 0, nil, get_variable_scope, common_options)
|
63
|
+
end
|
67
64
|
tensor
|
68
65
|
end
|
69
66
|
|
@@ -219,6 +216,10 @@ module TensorStream
|
|
219
216
|
TensorStream::Trainer
|
220
217
|
end
|
221
218
|
|
219
|
+
def math
|
220
|
+
TensorStream::Maths
|
221
|
+
end
|
222
|
+
|
222
223
|
def image
|
223
224
|
TensorStream::Images
|
224
225
|
end
|
@@ -242,12 +243,16 @@ module TensorStream
|
|
242
243
|
return convert_to_tensor(value.call) if value.is_a?(Proc)
|
243
244
|
# raise "Invalid tensor value" if value.nil?
|
244
245
|
|
245
|
-
if value.is_a?(Array) && value
|
246
|
+
if value.is_a?(Array) && value.detect { |v| v.is_a?(Tensor) }
|
246
247
|
return TensorStream.stack(value) if value.size > 1
|
247
248
|
|
248
249
|
return TensorStream.expand_dims(value[0], 0)
|
249
250
|
end
|
250
251
|
|
252
|
+
if value.is_a?(TensorShape)
|
253
|
+
value = value.shape
|
254
|
+
end
|
255
|
+
|
251
256
|
check_if_dense(value)
|
252
257
|
i_cons(value, dtype: dtype || Tensor.detect_type(value), name: name)
|
253
258
|
end
|
@@ -19,7 +19,11 @@ module TensorStream
|
|
19
19
|
node = graph.get_tensor_by_name(node_key)
|
20
20
|
case node.operation
|
21
21
|
when :variable_v2
|
22
|
-
value = node.
|
22
|
+
value = Evaluator.read_variable(node.graph, node.options[:var_name])
|
23
|
+
if value.nil?
|
24
|
+
raise "#{node.options[:var_name]} has no value"
|
25
|
+
end
|
26
|
+
|
23
27
|
options = {
|
24
28
|
value: value,
|
25
29
|
data_type: node.data_type,
|
@@ -46,17 +46,16 @@ module TensorStream
|
|
46
46
|
|
47
47
|
def assign(value, name: nil, use_locking: false)
|
48
48
|
TensorStream.check_data_types(self, value)
|
49
|
-
_op(:assign,
|
49
|
+
_op(:assign, value, name: name, var_name: @name)
|
50
50
|
end
|
51
51
|
|
52
52
|
def read_value
|
53
|
-
@
|
54
|
-
@value
|
53
|
+
Evaluator.read_variable(@graph, @name)
|
55
54
|
end
|
56
55
|
|
57
56
|
def assign_add(value, name: nil)
|
58
57
|
TensorStream.check_data_types(self, value)
|
59
|
-
_op(:assign_add,
|
58
|
+
_op(:assign_add, value, data_type: data_type, name: name, var_name: @name)
|
60
59
|
end
|
61
60
|
|
62
61
|
def to_math(_tensor, _name_only = false, _max_depth = 99, _unused = 0)
|
@@ -65,11 +64,15 @@ module TensorStream
|
|
65
64
|
|
66
65
|
def assign_sub(value)
|
67
66
|
TensorStream.check_data_types(self, value)
|
68
|
-
_op(:assign_sub,
|
67
|
+
_op(:assign_sub, value, data_type: data_type, name: name, var_name: @name)
|
69
68
|
end
|
70
69
|
|
71
70
|
def self.variables_initializer(collection)
|
72
|
-
TensorStream.
|
71
|
+
global_variables_ops = TensorStream.get_default_graph.get_collection(collection).map do |variable|
|
72
|
+
_op(:assign, variable.initializer, var_name: variable.name)
|
73
|
+
end
|
74
|
+
|
75
|
+
TensorStream.group(global_variables_ops)
|
73
76
|
end
|
74
77
|
|
75
78
|
def self.global_variables_initializer
|