tensor_stream 0.7.0 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. checksums.yaml +5 -5
  2. data/.rubocop.yml +6 -1
  3. data/CHANGELOG.md +10 -0
  4. data/README.md +35 -0
  5. data/lib/tensor_stream.rb +2 -2
  6. data/lib/tensor_stream/debugging/debugging.rb +2 -1
  7. data/lib/tensor_stream/dynamic_stitch.rb +23 -24
  8. data/lib/tensor_stream/evaluator/base_evaluator.rb +27 -18
  9. data/lib/tensor_stream/evaluator/opencl/kernels/apply_momentum.cl +16 -0
  10. data/lib/tensor_stream/evaluator/opencl/kernels/pack.cl +24 -0
  11. data/lib/tensor_stream/evaluator/opencl/kernels/softmax_cross.cl +6 -1
  12. data/lib/tensor_stream/evaluator/opencl/opencl_buffer.rb +6 -6
  13. data/lib/tensor_stream/evaluator/opencl/opencl_evaluator.rb +237 -107
  14. data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +97 -7
  15. data/lib/tensor_stream/evaluator/ruby_evaluator.rb +230 -123
  16. data/lib/tensor_stream/exceptions.rb +1 -0
  17. data/lib/tensor_stream/graph_builder.rb +2 -3
  18. data/lib/tensor_stream/graph_deserializers/protobuf.rb +22 -23
  19. data/lib/tensor_stream/graph_serializers/graphml.rb +26 -29
  20. data/lib/tensor_stream/graph_serializers/pbtext.rb +22 -19
  21. data/lib/tensor_stream/helpers/string_helper.rb +4 -5
  22. data/lib/tensor_stream/math_gradients.rb +141 -77
  23. data/lib/tensor_stream/nn/nn_ops.rb +4 -6
  24. data/lib/tensor_stream/operation.rb +139 -120
  25. data/lib/tensor_stream/ops.rb +36 -3
  26. data/lib/tensor_stream/session.rb +7 -11
  27. data/lib/tensor_stream/tensor.rb +3 -3
  28. data/lib/tensor_stream/tensor_shape.rb +5 -0
  29. data/lib/tensor_stream/train/gradient_descent_optimizer.rb +4 -37
  30. data/lib/tensor_stream/train/momentum_optimizer.rb +48 -0
  31. data/lib/tensor_stream/train/optimizer.rb +129 -0
  32. data/lib/tensor_stream/train/saver.rb +0 -1
  33. data/lib/tensor_stream/train/slot_creator.rb +62 -0
  34. data/lib/tensor_stream/train/utils.rb +11 -12
  35. data/lib/tensor_stream/trainer.rb +3 -0
  36. data/lib/tensor_stream/utils.rb +18 -11
  37. data/lib/tensor_stream/variable.rb +19 -12
  38. data/lib/tensor_stream/variable_scope.rb +1 -1
  39. data/lib/tensor_stream/version.rb +1 -1
  40. data/samples/iris.rb +2 -1
  41. data/samples/linear_regression.rb +3 -1
  42. data/samples/nearest_neighbor.rb +2 -0
  43. data/test_samples/neural_network_raw.py +101 -0
  44. data/test_samples/raw_neural_net_sample.rb +6 -4
  45. data/test_samples/test2.py +73 -27
  46. metadata +9 -3
@@ -25,8 +25,8 @@ module TensorStream
25
25
  logits = tf.convert_to_tensor(logits, name: 'logits')
26
26
  labels = tf.convert_to_tensor(labels, name: 'labels')
27
27
  labels = tf.cast(labels, logits.dtype)
28
- softmax_logits = _op(:softmax_cross_entropy_with_logits_v2, logits, labels)
29
- tf.reduce_sum(softmax_logits, tf.rank(logits) - 1)
28
+ output = _op(:softmax_cross_entropy_with_logits_v2, logits, labels)
29
+ output[0]
30
30
  end
31
31
  end
32
32
 
@@ -45,10 +45,8 @@ module TensorStream
45
45
  relu_logits = tf.where(cond, logits, zeros)
46
46
  neg_abs_logits = tf.where(cond, -logits, logits)
47
47
 
48
- return tf.add(
49
- relu_logits - logits * labels,
50
- tf.log1p(tf.exp(neg_abs_logits)),
51
- name: name)
48
+ tf.add(relu_logits - logits * labels,
49
+ tf.log1p(tf.exp(neg_abs_logits)), name: name)
52
50
  end
53
51
  end
54
52
  end
@@ -87,124 +87,124 @@ module TensorStream
87
87
  end
88
88
  end
89
89
 
90
- def to_math(name_only = false, max_depth = 99, _cur_depth = 0)
90
+ def to_math(name_only = false, max_depth = 99, cur_depth = 0)
91
91
  return @name if max_depth.zero?
92
92
 
93
- sub_input = auto_math(inputs[0], name_only, max_depth - 1, _cur_depth + 1)
94
- sub_input2 = auto_math(inputs[1], name_only, max_depth - 1, _cur_depth + 1) if inputs[1]
93
+ sub_input = auto_math(inputs[0], name_only, max_depth - 1, cur_depth + 1)
94
+ sub_input2 = auto_math(inputs[1], name_only, max_depth - 1, cur_depth + 1) if inputs[1]
95
95
 
96
96
  out = case operation
97
- when :argmax
98
- "argmax(#{sub_input},#{options[:axis]})"
99
- when :negate
100
- "-#{sub_input}"
101
- when :index
102
- "#{sub_input}[#{sub_input2}]"
103
- when :slice
104
- "#{sub_input}[#{sub_input2}]"
105
- when :assign_sub
106
- "(#{inputs[0] ? inputs[0].name : 'self'} -= #{auto_math(inputs[1], name_only, 1)})"
107
- when :assign_add
108
- "(#{inputs[0] ? inputs[0].name : 'self'} += #{auto_math(inputs[1], name_only, 1)})"
109
- when :assign
110
- "(#{inputs[0] ? inputs[0].name : 'self'} = #{auto_math(inputs[1], name_only, 1)})"
111
- when :sin, :cos, :tanh
112
- "#{operation}(#{sub_input})"
113
- when :add
114
- "(#{sub_input} + #{sub_input2})"
115
- when :sub
116
- "(#{sub_input} - #{sub_input2})"
117
- when :pow
118
- "(#{sub_input}^#{sub_input2})"
119
- when :div
120
- "(#{sub_input} / #{sub_input2})"
121
- when :mul
122
- if auto_math(inputs[0]) == 1
123
- sub_input2
124
- elsif auto_math(inputs[1]) == 1
125
- sub_input
126
- else
127
- "(#{sub_input} * #{sub_input2})"
128
- end
129
- when :sum
130
- "sum(|#{sub_input}|, axis=#{sub_input2})"
131
- when :mean
132
- "mean(|#{sub_input}|, axis=#{sub_input2})"
133
- when :prod
134
- "prod(|#{sub_input}|, axis=#{sub_input2})"
135
- when :gradients
136
- "gradient(#{sub_input})"
137
- when :stop_gradient
138
- sub_input
139
- when :mat_mul
140
- "#{sub_input}.matmul(#{sub_input2})"
141
- when :eye
142
- "eye(#{sub_input})"
143
- when :transpose
144
- "transpose(#{sub_input})"
145
- when :shape
146
- "#{sub_input}.shape"
147
- when :exp
148
- "e^#{sub_input})"
149
- when :ones
150
- "ones(#{sub_input})"
151
- when :ones_like
152
- "ones_like(#{sub_input})"
153
- when :flow_group
154
- "flow_group(#{inputs.collect { |i| auto_math(i, name_only, max_depth - 1, _cur_depth) }.join(',')})"
155
- when :zeros
156
- "zeros(#{sub_input})"
157
- when :reshape
158
- "reshape(#{sub_input},#{sub_input2})"
159
- when :rank
160
- "#{sub_input}.rank"
161
- when :cond
162
- "(#{auto_math(options[:pred], name_only, max_depth - 1, _cur_depth)} ? #{sub_input} : #{sub_input2})"
163
- when :less
164
- "#{sub_input} < #{sub_input2}"
165
- when :less_equal
166
- "#{sub_input} <= #{sub_input2}"
167
- when :greater
168
- "#{sub_input} > #{sub_input2}"
169
- when :greater_equal
170
- "#{sub_input} >= #{sub_input2}"
171
- when :square
172
- "#{sub_input}\u00B2"
173
- when :log
174
- "log(#{sub_input})"
175
- when :identity
176
- "identity(#{sub_input})"
177
- when :print
178
- "print(#{sub_input})"
179
- when :pad
180
- "pad(#{sub_input},#{auto_math(options[:paddings])})"
181
- when :equal
182
- "#{sub_input} == #{sub_input2}"
183
- when :not_equal
184
- "#{sub_input} != #{sub_input2}"
185
- when :logical_and
186
- "#{sub_input} && #{sub_input2}"
187
- when :sqrt
188
- "sqrt(#{sub_input})"
189
- when :log1p
190
- "log1p(#{sub_input})"
191
- when :zeros_like
192
- "zeros_like(#{sub_input})"
193
- when :where
194
- "where(#{auto_math(options[:pred], name_only, max_depth - 1, _cur_depth)}, #{sub_input}, #{sub_input2})"
195
- when :max
196
- "max(#{sub_input},#{sub_input2})"
197
- when :cast
198
- "cast(#{sub_input}, #{data_type})"
199
- when :broadcast_transform
200
- "broadcast_transform(#{sub_input},#{sub_input2})"
201
- when :broadcast_gradient_args
202
- "broadcast_transform(#{sub_input},#{sub_input2})"
203
- else
204
- "#{operation}(#{sub_input})" if sub_input
205
- "#{operation}(#{sub_input}, #{sub_input2})" if sub_input && sub_input2
206
- end
207
- ["\n",(_cur_depth + 1).times.collect { ' ' }, out].flatten.join
97
+ when :argmax
98
+ "argmax(#{sub_input},#{options[:axis]})"
99
+ when :negate
100
+ "-#{sub_input}"
101
+ when :index
102
+ "#{sub_input}[#{sub_input2}]"
103
+ when :slice
104
+ "#{sub_input}[#{sub_input2}]"
105
+ when :assign_sub
106
+ "(#{inputs[0] ? inputs[0].name : 'self'} -= #{auto_math(inputs[1], name_only, 1)})"
107
+ when :assign_add
108
+ "(#{inputs[0] ? inputs[0].name : 'self'} += #{auto_math(inputs[1], name_only, 1)})"
109
+ when :assign
110
+ "(#{inputs[0] ? inputs[0].name : 'self'} = #{auto_math(inputs[1], name_only, 1)})"
111
+ when :sin, :cos, :tanh
112
+ "#{operation}(#{sub_input})"
113
+ when :add
114
+ "(#{sub_input} + #{sub_input2})"
115
+ when :sub
116
+ "(#{sub_input} - #{sub_input2})"
117
+ when :pow
118
+ "(#{sub_input}^#{sub_input2})"
119
+ when :div
120
+ "(#{sub_input} / #{sub_input2})"
121
+ when :mul
122
+ if auto_math(inputs[0]) == 1
123
+ sub_input2
124
+ elsif auto_math(inputs[1]) == 1
125
+ sub_input
126
+ else
127
+ "(#{sub_input} * #{sub_input2})"
128
+ end
129
+ when :sum
130
+ "sum(|#{sub_input}|, axis=#{sub_input2})"
131
+ when :mean
132
+ "mean(|#{sub_input}|, axis=#{sub_input2})"
133
+ when :prod
134
+ "prod(|#{sub_input}|, axis=#{sub_input2})"
135
+ when :gradients
136
+ "gradient(#{sub_input})"
137
+ when :stop_gradient
138
+ sub_input
139
+ when :mat_mul
140
+ "#{sub_input}.matmul(#{sub_input2})"
141
+ when :eye
142
+ "eye(#{sub_input})"
143
+ when :transpose
144
+ "transpose(#{sub_input})"
145
+ when :shape
146
+ "#{sub_input}.shape"
147
+ when :exp
148
+ "e^#{sub_input})"
149
+ when :ones
150
+ "ones(#{sub_input})"
151
+ when :ones_like
152
+ "ones_like(#{sub_input})"
153
+ when :flow_group
154
+ "flow_group(#{inputs.collect { |i| auto_math(i, name_only, max_depth - 1, cur_depth) }.join(',')})"
155
+ when :zeros
156
+ "zeros(#{sub_input})"
157
+ when :reshape
158
+ "reshape(#{sub_input},#{sub_input2})"
159
+ when :rank
160
+ "#{sub_input}.rank"
161
+ when :cond
162
+ "(#{auto_math(options[:pred], name_only, max_depth - 1, cur_depth)} ? #{sub_input} : #{sub_input2})"
163
+ when :less
164
+ "#{sub_input} < #{sub_input2}"
165
+ when :less_equal
166
+ "#{sub_input} <= #{sub_input2}"
167
+ when :greater
168
+ "#{sub_input} > #{sub_input2}"
169
+ when :greater_equal
170
+ "#{sub_input} >= #{sub_input2}"
171
+ when :square
172
+ "#{sub_input}\u00B2"
173
+ when :log
174
+ "log(#{sub_input})"
175
+ when :identity
176
+ "identity(#{sub_input})"
177
+ when :print
178
+ "print(#{sub_input})"
179
+ when :pad
180
+ "pad(#{sub_input},#{auto_math(options[:paddings])})"
181
+ when :equal
182
+ "#{sub_input} == #{sub_input2}"
183
+ when :not_equal
184
+ "#{sub_input} != #{sub_input2}"
185
+ when :logical_and
186
+ "#{sub_input} && #{sub_input2}"
187
+ when :sqrt
188
+ "sqrt(#{sub_input})"
189
+ when :log1p
190
+ "log1p(#{sub_input})"
191
+ when :zeros_like
192
+ "zeros_like(#{sub_input})"
193
+ when :where
194
+ "where(#{auto_math(options[:pred], name_only, max_depth - 1, cur_depth)}, #{sub_input}, #{sub_input2})"
195
+ when :max
196
+ "max(#{sub_input},#{sub_input2})"
197
+ when :cast
198
+ "cast(#{sub_input}, #{data_type})"
199
+ when :broadcast_transform
200
+ "broadcast_transform(#{sub_input},#{sub_input2})"
201
+ when :broadcast_gradient_args
202
+ "broadcast_transform(#{sub_input},#{sub_input2})"
203
+ else
204
+ "#{operation}(#{sub_input})" if sub_input
205
+ "#{operation}(#{sub_input}, #{sub_input2})" if sub_input && sub_input2
206
+ end
207
+ ["\n", Array.new(cur_depth + 1) { ' ' }, out].flatten.join
208
208
  end
209
209
 
210
210
  def run
@@ -232,7 +232,7 @@ module TensorStream
232
232
 
233
233
  axis = inputs[1].is_a?(Tensor) ? inputs[1].value : inputs[1]
234
234
 
235
- axis = [ axis ] unless axis.is_a?(Array)
235
+ axis = [axis] unless axis.is_a?(Array)
236
236
  return input_shape.each_with_index.map do |s, index|
237
237
  next nil if axis.include?(index)
238
238
  s
@@ -240,6 +240,7 @@ module TensorStream
240
240
  when :reshape
241
241
  new_shape = inputs[1] && inputs[1].value ? inputs[1].value : nil
242
242
  return nil if new_shape.nil?
243
+ return nil if inputs[0].shape.nil?
243
244
 
244
245
  input_shape = inputs[0].shape.shape
245
246
  return new_shape if input_shape.nil?
@@ -257,11 +258,29 @@ module TensorStream
257
258
  shape1 = inputs[0].shape.shape.nil? ? nil : inputs[0].shape.shape[0]
258
259
  shape2 = inputs[1].shape.shape.nil? ? nil : inputs[1].shape.shape[1]
259
260
  return [shape1, shape2]
261
+ when :transpose
262
+ return nil unless shape_full_specified(inputs[0])
263
+ return nil if inputs[1].is_a?(Tensor)
264
+
265
+ rank = inputs[0].shape.shape.size
266
+ perm = inputs[1] || (0...rank).to_a.reverse
267
+ return perm.map { |p| inputs[0].shape.shape[p] }
268
+ when :stack
269
+ return nil unless shape_full_specified(inputs[0])
270
+
271
+ axis = options[:axis] || 0
272
+ new_shape = [inputs.size]
273
+ inputs[0].shape.shape.inject(new_shape) { |ns, s| ns << s }
274
+ rank = inputs[0].shape.shape.size + 1
275
+ axis = rank + axis if axis < 0
276
+ rotated_shape = Array.new(axis + 1) { new_shape.shift }
277
+ rotated_shape.rotate! + new_shape
278
+ when :tile
279
+ nil
260
280
  else
281
+ return nil if inputs[0].nil?
261
282
  return inputs[0].shape.shape if inputs.size == 1
262
- if inputs.size == 2 && inputs[0] && inputs[1]
263
- return TensorShape.infer_shape(inputs[0].shape.shape, inputs[1].shape.shape)
264
- end
283
+ return TensorShape.infer_shape(inputs[0].shape.shape, inputs[1].shape.shape) if inputs.size == 2 && inputs[0] && inputs[1]
265
284
  end
266
285
 
267
286
  nil
@@ -287,8 +306,8 @@ module TensorStream
287
306
  input.flatten.compact.each do |t|
288
307
  t.send(:setup_output, self) if t.is_a?(Tensor)
289
308
  end
290
- else
291
- input.send(:setup_output, self) if input.is_a?(Tensor) && (input.name != self.name)
309
+ elsif input.is_a?(Tensor) && (input.name != name)
310
+ input.send(:setup_output, self)
292
311
  end
293
312
  end
294
313
  end
@@ -245,7 +245,7 @@ module TensorStream
245
245
  ##
246
246
  # Concatenates tensors along one dimension.
247
247
  def concat(values, axis, name: 'concat')
248
- _op(:concat, values, nil, axis: axis, name: name)
248
+ _op(:concat, *values, axis: axis, name: name)
249
249
  end
250
250
 
251
251
  ##
@@ -318,6 +318,13 @@ module TensorStream
318
318
  _op(:acos, input, name: name)
319
319
  end
320
320
 
321
+ ##
322
+ # Computes atan of input element-wise
323
+ def atan(input, name: nil)
324
+ check_allowed_types(input, FLOATING_POINT_TYPES)
325
+ _op(:atan, input, name: name)
326
+ end
327
+
321
328
  ##
322
329
  # Returns x - y element-wise.
323
330
  #
@@ -582,8 +589,8 @@ module TensorStream
582
589
 
583
590
  ##
584
591
  # Transposes a. Permutes the dimensions according to perm.
585
- def transpose(tensor, perm: nil, name: 'transpose')
586
- _op(:transpose, tensor, nil, perm: perm, name: name)
592
+ def transpose(tensor, perm = nil, name: 'transpose')
593
+ _op(:transpose, tensor, perm, name: name)
587
594
  end
588
595
 
589
596
  ##
@@ -612,5 +619,31 @@ module TensorStream
612
619
  op_result = _op(:broadcast_gradient_args, shape_a, shape_b, name: name)
613
620
  [op_result[0], op_result[1]]
614
621
  end
622
+
623
+ ##
624
+ # Gather slices from params and axis according to indices.
625
+ #
626
+ def gather(params, indices, validate_indices: nil,
627
+ name: nil,
628
+ axis: 0)
629
+ _op(:gather, params, indices, validate_indices: validate_indices, name: name, axis: axis)
630
+ end
631
+
632
+ def stack(values, axis: 0, name: 'stack')
633
+ _op(:stack, *values, axis: axis, name: name)
634
+ end
635
+
636
+ def setdiff1d(x, y, index_dtype: :int32, name: nil)
637
+ result = _op(:setdiff1d, x, y, index_dtype: index_dtype, name: name)
638
+ [result[0], result[1]]
639
+ end
640
+
641
+ def cumprod(x, axis: 0, exclusive: false, reverse: false, name: nil)
642
+ _op(:cumprod, x, axis: axis, exclusive: exclusive, reverse: reverse, name: name)
643
+ end
644
+
645
+ def invert_permutation(x, name: nil)
646
+ _op(:invert_permutation, x, name: name)
647
+ end
615
648
  end
616
649
  end
@@ -22,7 +22,7 @@ module TensorStream
22
22
  if evaluators.empty?
23
23
  TensorStream::Evaluator.default_evaluators
24
24
  else
25
- evaluators.collect { |name| Object.const_get("TensorStream::Evaluator::#{camelize(name.to_s)}") }
25
+ evaluators.collect { |name| Object.const_get("TensorStream::Evaluator::#{camelize(name.to_s)}") }
26
26
  end
27
27
  elsif evaluators.nil?
28
28
  TensorStream::Evaluator.default_evaluators
@@ -52,9 +52,7 @@ module TensorStream
52
52
  # scan for placeholders and assign value
53
53
  if options[:feed_dict]
54
54
  options[:feed_dict].keys.each do |k|
55
- if k.is_a?(Placeholder)
56
- context[k.name.to_sym] = options[:feed_dict][k]
57
- end
55
+ context[k.name.to_sym] = options[:feed_dict][k] if k.is_a?(Placeholder)
58
56
  end
59
57
  end
60
58
 
@@ -77,7 +75,7 @@ module TensorStream
77
75
  end
78
76
 
79
77
  def list_devices
80
- TensorStream::Evaluator.evaluators.collect do |k, v|
78
+ TensorStream::Evaluator.evaluators.collect do |_k, v|
81
79
  v[:class].query_supported_devices.collect do |device|
82
80
  device
83
81
  end
@@ -102,10 +100,10 @@ module TensorStream
102
100
 
103
101
  def dump_ops(tensor, selector)
104
102
  graph = tensor.graph
105
- graph.nodes.select { |k, v| selector.call(k, v) }.collect { |k, node|
103
+ graph.nodes.select { |k, v| selector.call(k, v) }.collect do |k, node|
106
104
  next unless @last_session_context[node.name]
107
105
  "#{k} #{node.to_math(true, 1)} = #{@last_session_context[node.name]}"
108
- }.compact
106
+ end.compact
109
107
  end
110
108
 
111
109
  def graph_ml(tensor, filename)
@@ -115,9 +113,7 @@ module TensorStream
115
113
  def delegate_to_evaluator(tensor_arr, session_context, context)
116
114
  arr = tensor_arr.is_a?(Array) ? tensor_arr : [tensor_arr]
117
115
  result = arr.collect do |tensor|
118
- if session_context[:_cache][:placement][tensor.name].nil?
119
- session_context[:_cache][:placement][tensor.name] = assign_evaluator(tensor)
120
- end
116
+ session_context[:_cache][:placement][tensor.name] = assign_evaluator(tensor) if session_context[:_cache][:placement][tensor.name].nil?
121
117
 
122
118
  session_context[:_cache][:placement][tensor.name][1].run_with_buffer(tensor, session_context, context)
123
119
  end
@@ -144,7 +140,7 @@ module TensorStream
144
140
 
145
141
  raise "no evaluator available to execute #{tensor.operation}" if device.nil?
146
142
 
147
- key = "#{device.evaluator.to_s}/#{device.name}"
143
+ key = "#{device.evaluator}/#{device.name}"
148
144
  if @evaluators.key?(key)
149
145
  @evaluators[key]
150
146
  else
@@ -4,9 +4,9 @@ module TensorStream
4
4
  # Base class that defines a tensor like interface
5
5
  class Tensor
6
6
  include OpHelper
7
-
7
+ attr_reader :graph
8
8
  attr_accessor :name, :data_type, :shape, :rank, :native_buffer, :is_const,
9
- :value, :breakpoint, :internal, :source, :given_name, :graph,
9
+ :value, :breakpoint, :internal, :source, :given_name,
10
10
  :consumers, :outputs, :device
11
11
 
12
12
  def initialize(data_type, rank, shape, options = {})
@@ -174,7 +174,7 @@ module TensorStream
174
174
  end
175
175
 
176
176
  def op
177
- is_const ? _op(:const, self, nil, name: name) : _op(:variable, self, nil, name: name)
177
+ @op ||= is_const ? _op(:const, self, nil, name: name) : _op(:variable, self, nil, name: name)
178
178
  end
179
179
 
180
180
  def eval(options = {})