tensor_stream 0.2.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/.circleci/config.yml +2 -1
- data/CHANGELOG.md +5 -0
- data/README.md +28 -1
- data/benchmark/benchmark.rb +129 -0
- data/lib/tensor_stream.rb +7 -4
- data/lib/tensor_stream/evaluator/buffer.rb +10 -0
- data/lib/tensor_stream/evaluator/evaluator.rb +1 -0
- data/lib/tensor_stream/evaluator/kernels/_bool_operand.cl +45 -0
- data/lib/tensor_stream/evaluator/kernels/_operand.cl +45 -0
- data/lib/tensor_stream/evaluator/kernels/abs.cl +16 -0
- data/lib/tensor_stream/evaluator/kernels/add.cl +5 -0
- data/lib/tensor_stream/evaluator/kernels/argmax.cl +15 -0
- data/lib/tensor_stream/evaluator/kernels/argmin.cl +15 -0
- data/lib/tensor_stream/evaluator/kernels/cast.cl +15 -0
- data/lib/tensor_stream/evaluator/kernels/cond.cl.erb +5 -0
- data/lib/tensor_stream/evaluator/kernels/cos.cl +7 -0
- data/lib/tensor_stream/evaluator/kernels/div.cl.erb +5 -0
- data/lib/tensor_stream/evaluator/kernels/exp.cl +7 -0
- data/lib/tensor_stream/evaluator/kernels/gemm.cl +63 -0
- data/lib/tensor_stream/evaluator/kernels/log.cl +7 -0
- data/lib/tensor_stream/evaluator/kernels/log1p.cl +7 -0
- data/lib/tensor_stream/evaluator/kernels/max.cl +91 -0
- data/lib/tensor_stream/evaluator/kernels/mul.cl +5 -0
- data/lib/tensor_stream/evaluator/kernels/negate.cl +15 -0
- data/lib/tensor_stream/evaluator/kernels/pow.cl +130 -0
- data/lib/tensor_stream/evaluator/kernels/reciprocal.cl +15 -0
- data/lib/tensor_stream/evaluator/kernels/round.cl +7 -0
- data/lib/tensor_stream/evaluator/kernels/sigmoid.cl +8 -0
- data/lib/tensor_stream/evaluator/kernels/sigmoid_grad.cl +54 -0
- data/lib/tensor_stream/evaluator/kernels/sign.cl +23 -0
- data/lib/tensor_stream/evaluator/kernels/sin.cl +8 -0
- data/lib/tensor_stream/evaluator/kernels/sqrt.cl +8 -0
- data/lib/tensor_stream/evaluator/kernels/square.cl +15 -0
- data/lib/tensor_stream/evaluator/kernels/sub.cl +5 -0
- data/lib/tensor_stream/evaluator/kernels/tan.cl +7 -0
- data/lib/tensor_stream/evaluator/kernels/tanh.cl +7 -0
- data/lib/tensor_stream/evaluator/kernels/tanh_grad.cl +6 -0
- data/lib/tensor_stream/evaluator/kernels/where.cl +15 -0
- data/lib/tensor_stream/evaluator/opencl_buffer.rb +30 -0
- data/lib/tensor_stream/evaluator/opencl_evaluator.rb +1095 -0
- data/lib/tensor_stream/evaluator/opencl_template_helper.rb +58 -0
- data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +27 -0
- data/lib/tensor_stream/evaluator/ruby_evaluator.rb +20 -31
- data/lib/tensor_stream/graph.rb +4 -2
- data/lib/tensor_stream/math_gradients.rb +3 -0
- data/lib/tensor_stream/operation.rb +29 -2
- data/lib/tensor_stream/ops.rb +14 -2
- data/lib/tensor_stream/placeholder.rb +1 -1
- data/lib/tensor_stream/session.rb +10 -3
- data/lib/tensor_stream/tensor_shape.rb +1 -1
- data/lib/tensor_stream/train/saver.rb +1 -1
- data/lib/tensor_stream/variable.rb +7 -1
- data/lib/tensor_stream/version.rb +1 -1
- data/samples/logistic_regression.rb +2 -1
- data/samples/nearest_neighbor.rb +54 -0
- data/tensor_stream.gemspec +3 -1
- metadata +107 -28
@@ -0,0 +1,58 @@
|
|
1
|
+
require 'erb'
|
2
|
+
class OpenclTemplateHelper
|
3
|
+
def initialize(source)
|
4
|
+
@source = source
|
5
|
+
end
|
6
|
+
|
7
|
+
def generate
|
8
|
+
ERB.new(@source, nil, '%').result(binding)
|
9
|
+
end
|
10
|
+
|
11
|
+
def render(template, locals = {})
|
12
|
+
filename = File.join(File.dirname(__FILE__), 'kernels', "_#{template}")
|
13
|
+
source = File.read(filename)
|
14
|
+
current_scope = binding
|
15
|
+
locals.each do |k,v|
|
16
|
+
current_scope.local_variable_set(k.to_sym, v)
|
17
|
+
end
|
18
|
+
ERB.new(source, nil, '%').result(current_scope)
|
19
|
+
end
|
20
|
+
|
21
|
+
def dtype_to_c_type(dtype)
|
22
|
+
case(dtype)
|
23
|
+
when 'fp'
|
24
|
+
'float'
|
25
|
+
when 'int'
|
26
|
+
'int'
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def operator_to_c(op)
|
31
|
+
case(op)
|
32
|
+
when 'less'
|
33
|
+
'<'
|
34
|
+
when 'less_equal'
|
35
|
+
'<='
|
36
|
+
when 'equal'
|
37
|
+
'=='
|
38
|
+
when 'greater'
|
39
|
+
'>'
|
40
|
+
when 'greater_equal'
|
41
|
+
'>='
|
42
|
+
when 'not_equal'
|
43
|
+
'!='
|
44
|
+
when 'logical_and'
|
45
|
+
'&&'
|
46
|
+
when 'div'
|
47
|
+
'/'
|
48
|
+
when 'add'
|
49
|
+
'+'
|
50
|
+
when 'sub'
|
51
|
+
'-'
|
52
|
+
when 'mul'
|
53
|
+
'*'
|
54
|
+
else
|
55
|
+
raise "unsupported op #{op}"
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
@@ -22,6 +22,17 @@ module TensorStream
|
|
22
22
|
slice_tensor(input, start, target_shape)
|
23
23
|
end
|
24
24
|
|
25
|
+
def reduced_shape(input_shape, axes)
|
26
|
+
return [] if axes.nil? # reduce to scalar
|
27
|
+
axes = [ axes ] unless axes.is_a?(Array)
|
28
|
+
return input_shape if axes.empty?
|
29
|
+
|
30
|
+
axes.each do |dimen|
|
31
|
+
input_shape[dimen] = 1
|
32
|
+
end
|
33
|
+
input_shape
|
34
|
+
end
|
35
|
+
|
25
36
|
def broadcast(input_a, input_b)
|
26
37
|
sa = shape_eval(input_a)
|
27
38
|
sb = shape_eval(input_b)
|
@@ -137,5 +148,21 @@ module TensorStream
|
|
137
148
|
new_arr * t
|
138
149
|
end
|
139
150
|
end
|
151
|
+
|
152
|
+
def process_function_op(a, op)
|
153
|
+
# ruby scalar
|
154
|
+
if (a.is_a?(Tensor) && a.shape.rank > 0) || a.is_a?(Array)
|
155
|
+
vector_op(a, 0, op)
|
156
|
+
else
|
157
|
+
op.call(a, 0)
|
158
|
+
end
|
159
|
+
end
|
160
|
+
|
161
|
+
def get_rank(value, rank = 0)
|
162
|
+
return rank unless value.is_a?(Array)
|
163
|
+
return rank + 1 if value.empty?
|
164
|
+
|
165
|
+
get_rank(value[0], rank + 1)
|
166
|
+
end
|
140
167
|
end
|
141
168
|
end
|
@@ -76,10 +76,11 @@ module TensorStream
|
|
76
76
|
protected
|
77
77
|
|
78
78
|
def eval_variable(tensor, child_context)
|
79
|
-
|
79
|
+
value = tensor.read_value
|
80
|
+
if value.nil?
|
80
81
|
raise "variable #{tensor.name} not initalized"
|
81
82
|
end
|
82
|
-
eval_tensor(
|
83
|
+
eval_tensor(value, child_context).tap do |val|
|
83
84
|
child_context[:returns] ||= {}
|
84
85
|
child_context[:returns][:vars] ||= []
|
85
86
|
child_context[:returns][:vars] << { name: tensor.name, val: val }
|
@@ -99,7 +100,12 @@ module TensorStream
|
|
99
100
|
a = complete_eval(a, child_context)
|
100
101
|
axis = tensor.options[:axis] || 0
|
101
102
|
|
102
|
-
|
103
|
+
get_op_with_axis(a, axis, 0, tensor.data_type)
|
104
|
+
when :argmin
|
105
|
+
a = complete_eval(a, child_context)
|
106
|
+
axis = tensor.options[:axis] || 0
|
107
|
+
|
108
|
+
get_op_with_axis(a, axis, 0, tensor.data_type, ->(a, b) { a < b })
|
103
109
|
when :cast
|
104
110
|
a = complete_eval(a, child_context)
|
105
111
|
|
@@ -499,7 +505,10 @@ module TensorStream
|
|
499
505
|
|
500
506
|
def eval_tensor(tensor, child_context)
|
501
507
|
return tensor unless tensor.is_a?(Tensor)
|
502
|
-
|
508
|
+
|
509
|
+
cache_key = "#{tensor.graph.object_id}_ruby_#{tensor.name}"
|
510
|
+
return @context[cache_key] if @context.key?(cache_key)
|
511
|
+
return @context[:_cache][cache_key] if @context[:_cache] && @context[:_cache].key?(tensor.name)
|
503
512
|
|
504
513
|
if tensor.value.is_a?(Array)
|
505
514
|
tensor.value.collect do |item|
|
@@ -508,20 +517,21 @@ module TensorStream
|
|
508
517
|
else
|
509
518
|
tensor.value.is_a?(Tensor) ? run(tensor.value, child_context) : tensor.value
|
510
519
|
end.tap do |result|
|
511
|
-
@context[
|
520
|
+
@context[cache_key] = result
|
521
|
+
@context[:_cache][cache_key] = result if @context[:_cache] && tensor.is_const
|
512
522
|
end
|
513
523
|
end
|
514
524
|
|
515
525
|
private
|
516
526
|
|
517
|
-
def
|
527
|
+
def get_op_with_axis(a, target_axis, current_axis, output_type, op = ->(t, u) { t > u })
|
518
528
|
if target_axis == current_axis
|
519
529
|
if a[0].is_a?(Array)
|
520
530
|
(0...a[0].size).each.collect do |column_index|
|
521
531
|
max = nil
|
522
532
|
max_index = 0
|
523
533
|
a.each_with_index do |row, row_index|
|
524
|
-
if max.nil? || row[column_index]
|
534
|
+
if max.nil? || op.call(row[column_index], max)
|
525
535
|
max = row[column_index]
|
526
536
|
max_index = row_index
|
527
537
|
end
|
@@ -533,7 +543,7 @@ module TensorStream
|
|
533
543
|
max = nil
|
534
544
|
max_index = 0
|
535
545
|
a.each_with_index do |x, index|
|
536
|
-
if max.nil? || x
|
546
|
+
if max.nil? || op.call(x, max)
|
537
547
|
max = x
|
538
548
|
max_index = index
|
539
549
|
end
|
@@ -542,7 +552,7 @@ module TensorStream
|
|
542
552
|
end
|
543
553
|
else
|
544
554
|
a.collect do |row|
|
545
|
-
|
555
|
+
get_op_with_axis(row, target_axis, current_axis + 1, output_type, op)
|
546
556
|
end
|
547
557
|
end
|
548
558
|
end
|
@@ -605,7 +615,7 @@ module TensorStream
|
|
605
615
|
|
606
616
|
def call_op(op, a, child_context, func)
|
607
617
|
a = complete_eval(a, child_context)
|
608
|
-
process_function_op(a,
|
618
|
+
process_function_op(a, func)
|
609
619
|
rescue FullEvalNotPossible
|
610
620
|
TensorStream.send(op.to_sym, a)
|
611
621
|
end
|
@@ -667,13 +677,6 @@ module TensorStream
|
|
667
677
|
end
|
668
678
|
end
|
669
679
|
|
670
|
-
def get_rank(value, rank = 0)
|
671
|
-
return rank unless value.is_a?(Array)
|
672
|
-
return rank + 1 if value.empty?
|
673
|
-
|
674
|
-
get_rank(value[0], rank + 1)
|
675
|
-
end
|
676
|
-
|
677
680
|
def concat_array(values, axis)
|
678
681
|
combined_array = values.shift
|
679
682
|
axis = get_rank(combined_array) - 1 if axis == -1
|
@@ -694,20 +697,6 @@ module TensorStream
|
|
694
697
|
end
|
695
698
|
end
|
696
699
|
|
697
|
-
def process_function_op(a, child_context, op)
|
698
|
-
# ruby scalar
|
699
|
-
if (a.is_a?(Tensor) && a.shape.rank > 0) || a.is_a?(Array)
|
700
|
-
vector_op(a, 0, op)
|
701
|
-
elsif !a.is_a?(Tensor) || a.shape.rank.zero?
|
702
|
-
v = run(a, child_context)
|
703
|
-
raise FullEvalNotPossible.new, "full eval not possible for #{v.name}" if v.is_a?(Tensor) && !v.is_const
|
704
|
-
|
705
|
-
op.call(v, 0)
|
706
|
-
else
|
707
|
-
raise 'cannot be here'
|
708
|
-
end
|
709
|
-
end
|
710
|
-
|
711
700
|
def resolve_placeholder(placeholder, _execution_context = {})
|
712
701
|
return nil if placeholder.nil?
|
713
702
|
return placeholder if retain.include?(placeholder)
|
data/lib/tensor_stream/graph.rb
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
module TensorStream
|
2
2
|
# A class that defines a TensorStream graph
|
3
3
|
class Graph
|
4
|
-
attr_accessor :nodes, :collections, :eager_execution, :random_seed
|
4
|
+
attr_accessor :nodes, :collections, :eager_execution, :random_seed, :constants
|
5
5
|
|
6
6
|
def initialize
|
7
7
|
@eager_execution = false
|
@@ -9,6 +9,7 @@ module TensorStream
|
|
9
9
|
@collections = {
|
10
10
|
:"#{GraphKeys::GLOBAL_VARIABLES}" => []
|
11
11
|
}
|
12
|
+
@constants = {}
|
12
13
|
end
|
13
14
|
|
14
15
|
def reset
|
@@ -21,6 +22,7 @@ module TensorStream
|
|
21
22
|
@collections = {
|
22
23
|
:"#{GraphKeys::GLOBAL_VARIABLES}" => []
|
23
24
|
}
|
25
|
+
@constants = {}
|
24
26
|
end
|
25
27
|
|
26
28
|
def as_default
|
@@ -68,7 +70,7 @@ module TensorStream
|
|
68
70
|
end
|
69
71
|
|
70
72
|
@nodes[node.name] = node
|
71
|
-
|
73
|
+
@constants[node.name] = node if node.is_const
|
72
74
|
node.send(:propagate_outputs)
|
73
75
|
node.send(:propagate_consumer, node)
|
74
76
|
node.value = node.eval if @eager_execution
|
@@ -17,7 +17,7 @@ module TensorStream
|
|
17
17
|
|
18
18
|
@items = [input_a, input_b].map { |i| options[:preserve_params_type] ? i : TensorStream.convert_to_tensor(i) }
|
19
19
|
@data_type = set_data_type(options[:data_type])
|
20
|
-
|
20
|
+
@is_const = infer_const
|
21
21
|
@shape = TensorShape.new(infer_shape)
|
22
22
|
@graph.add_node(self)
|
23
23
|
end
|
@@ -48,14 +48,39 @@ module TensorStream
|
|
48
48
|
true
|
49
49
|
end
|
50
50
|
|
51
|
+
def infer_const
|
52
|
+
return false if breakpoint
|
53
|
+
case operation
|
54
|
+
when :random_normal, :random_uniform, :glorot_uniform, :print
|
55
|
+
false
|
56
|
+
else
|
57
|
+
non_const = @items.compact.find { |item| !item.is_const }
|
58
|
+
non_const ? false : true
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
51
62
|
def set_data_type(passed_data_type)
|
52
63
|
case operation
|
53
|
-
when :greater, :less, :equal, :not_equal, :greater_equal, :less_equal
|
64
|
+
when :greater, :less, :equal, :not_equal, :greater_equal, :less_equal, :logical_and
|
54
65
|
:boolean
|
55
66
|
when :shape, :rank
|
56
67
|
:int32
|
68
|
+
when :random_normal, :random_uniform, :glorot_uniform
|
69
|
+
passed_data_type || :float32
|
70
|
+
when :index
|
71
|
+
if @items[0].is_a?(ControlFlow)
|
72
|
+
|
73
|
+
if @items[1].is_const
|
74
|
+
@items[0].items[@items[1].value].data_type
|
75
|
+
else
|
76
|
+
:unknown
|
77
|
+
end
|
78
|
+
else
|
79
|
+
@items[0].data_type
|
80
|
+
end
|
57
81
|
else
|
58
82
|
return passed_data_type if passed_data_type
|
83
|
+
|
59
84
|
if @items[0]
|
60
85
|
@items[0].data_type
|
61
86
|
elsif @items[1]
|
@@ -228,6 +253,8 @@ module TensorStream
|
|
228
253
|
return []
|
229
254
|
when :zeros, :ones
|
230
255
|
return items[0] ? items[0].value : options[:shape]
|
256
|
+
when :zeros_like, :ones_like
|
257
|
+
items[0].shape.shape
|
231
258
|
when :shape
|
232
259
|
return items[0].shape.shape ? [items[0].shape.shape.size] : nil
|
233
260
|
when :matmul
|
data/lib/tensor_stream/ops.rb
CHANGED
@@ -9,6 +9,10 @@ module TensorStream
|
|
9
9
|
_op(:argmax, input, nil, axis: axis, name: name, dimension: dimension, data_type: output_type)
|
10
10
|
end
|
11
11
|
|
12
|
+
def argmin(input, axis = nil, name: nil, dimension: nil, output_type: :int32)
|
13
|
+
_op(:argmin, input, nil, axis: axis, name: name, dimension: dimension, data_type: output_type)
|
14
|
+
end
|
15
|
+
|
12
16
|
def gradients(input, wrt_xs, grad_ys: nil,
|
13
17
|
name: 'gradients',
|
14
18
|
colocate_gradients_with_ops: false,
|
@@ -157,6 +161,10 @@ module TensorStream
|
|
157
161
|
_op(:sub, input_a, input_b, name: name)
|
158
162
|
end
|
159
163
|
|
164
|
+
def subtract(input_a, input_b, name: nil)
|
165
|
+
sub(input_a, input_b, name: name)
|
166
|
+
end
|
167
|
+
|
160
168
|
def max(input_a, input_b, name: nil)
|
161
169
|
check_allowed_types(input_a, NUMERIC_TYPES)
|
162
170
|
check_allowed_types(input_b, NUMERIC_TYPES)
|
@@ -176,8 +184,12 @@ module TensorStream
|
|
176
184
|
_op(:print, input, data, message: message, name: name)
|
177
185
|
end
|
178
186
|
|
179
|
-
def negate(input,
|
180
|
-
_op(:negate, input, nil,
|
187
|
+
def negate(input, name: nil)
|
188
|
+
_op(:negate, input, nil, name: name)
|
189
|
+
end
|
190
|
+
|
191
|
+
def negative(input, name: nil)
|
192
|
+
negate(input, name: name)
|
181
193
|
end
|
182
194
|
|
183
195
|
def equal(input_a, input_b, name: nil)
|
@@ -3,15 +3,20 @@ module TensorStream
|
|
3
3
|
class Session
|
4
4
|
include StringHelper
|
5
5
|
|
6
|
-
attr_reader :last_session_context, :closed, :target
|
6
|
+
attr_reader :last_session_context, :closed, :target, :session_cache
|
7
7
|
attr_accessor :randomizer
|
8
8
|
|
9
|
-
def initialize(evaluator = :ruby_evaluator, thread_pool_class: Concurrent::ImmediateExecutor)
|
9
|
+
def initialize(evaluator = :ruby_evaluator, thread_pool_class: Concurrent::ImmediateExecutor, evaluator_options: {})
|
10
10
|
@evaluator_class = Object.const_get("TensorStream::Evaluator::#{camelize(evaluator.to_s)}")
|
11
11
|
@thread_pool = thread_pool_class.new
|
12
12
|
@closed = false
|
13
13
|
@session_cache = {}
|
14
14
|
@randomizer = {}
|
15
|
+
@evaluator_options = evaluator_options
|
16
|
+
end
|
17
|
+
|
18
|
+
def clear_session_cache
|
19
|
+
@session_cache = {}
|
15
20
|
end
|
16
21
|
|
17
22
|
def self.default_session
|
@@ -37,7 +42,9 @@ module TensorStream
|
|
37
42
|
end
|
38
43
|
end
|
39
44
|
|
40
|
-
|
45
|
+
@evaluator_options[:thread_pool] = @thread_pool
|
46
|
+
@evaluator_options[:log_intermediates] = options[:log_intermediates]
|
47
|
+
evaluator = @evaluator_class.new(self, context.merge!(retain: options[:retain]), @evaluator_options)
|
41
48
|
|
42
49
|
execution_context = {}
|
43
50
|
@last_session_context = context
|
@@ -1,7 +1,7 @@
|
|
1
1
|
module TensorStream
|
2
2
|
# Class that defines a TensorStream variable
|
3
3
|
class Variable < Tensor
|
4
|
-
attr_accessor :trainable, :options
|
4
|
+
attr_accessor :trainable, :options, :buffer
|
5
5
|
def initialize(data_type, rank, shape, options = {})
|
6
6
|
setup_initial_state(options)
|
7
7
|
|
@@ -10,6 +10,7 @@ module TensorStream
|
|
10
10
|
@data_type = data_type
|
11
11
|
@rank = rank
|
12
12
|
@value = nil
|
13
|
+
@is_const = false
|
13
14
|
@name = [TensorStream.get_variable_scope, options[:name] || build_name].compact.reject(&:empty?).join('/')
|
14
15
|
@initalizer_tensor = options[:initializer] ? options[:initializer] : _variable_scope.initializer || TensorStream.glorot_uniform_initializer
|
15
16
|
if shape.nil? && @initalizer_tensor && @initalizer_tensor.shape
|
@@ -36,6 +37,11 @@ module TensorStream
|
|
36
37
|
end
|
37
38
|
|
38
39
|
def read_value
|
40
|
+
if buffer && buffer.dirty
|
41
|
+
@value = buffer.to_ruby
|
42
|
+
buffer.dirty = false
|
43
|
+
end
|
44
|
+
|
39
45
|
@value
|
40
46
|
end
|
41
47
|
|