tensor_stream 1.0.4 → 1.0.9

Sign up to get free protection for your applications and to get access to all the features.
Files changed (56) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -0
  3. data/CHANGELOG.md +12 -2
  4. data/Dockerfile +1 -1
  5. data/USAGE_GUIDE.md +68 -0
  6. data/lib/tensor_stream.rb +1 -0
  7. data/lib/tensor_stream/evaluator/base_evaluator.rb +21 -1
  8. data/lib/tensor_stream/evaluator/evaluator.rb +1 -0
  9. data/lib/tensor_stream/evaluator/evaluator_utils.rb +20 -0
  10. data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +60 -0
  11. data/lib/tensor_stream/evaluator/ruby/array_ops.rb +53 -1
  12. data/lib/tensor_stream/evaluator/ruby/images_ops.rb +26 -0
  13. data/lib/tensor_stream/evaluator/ruby/math_ops.rb +60 -5
  14. data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +25 -29
  15. data/lib/tensor_stream/evaluator/ruby/random_ops.rb +7 -11
  16. data/lib/tensor_stream/evaluator/ruby/storage_manager.rb +40 -0
  17. data/lib/tensor_stream/evaluator/ruby/variable_ops.rb +74 -0
  18. data/lib/tensor_stream/evaluator/ruby_evaluator.rb +31 -77
  19. data/lib/tensor_stream/generated_stub/ops.rb +256 -166
  20. data/lib/tensor_stream/generated_stub/stub_file.erb +4 -4
  21. data/lib/tensor_stream/graph.rb +3 -3
  22. data/lib/tensor_stream/graph_deserializers/yaml_loader.rb +4 -6
  23. data/lib/tensor_stream/helpers/infer_shape.rb +1 -7
  24. data/lib/tensor_stream/helpers/tensor_mixins.rb +10 -1
  25. data/lib/tensor_stream/images.rb +4 -0
  26. data/lib/tensor_stream/math/math_ops.rb +22 -0
  27. data/lib/tensor_stream/math_gradients.rb +15 -1
  28. data/lib/tensor_stream/nn/embedding_lookup.rb +114 -0
  29. data/lib/tensor_stream/nn/nn_ops.rb +16 -0
  30. data/lib/tensor_stream/op_maker.rb +36 -3
  31. data/lib/tensor_stream/operation.rb +8 -20
  32. data/lib/tensor_stream/ops.rb +14 -11
  33. data/lib/tensor_stream/ops/bias_add.rb +16 -0
  34. data/lib/tensor_stream/ops/equal.rb +4 -0
  35. data/lib/tensor_stream/ops/greater.rb +4 -0
  36. data/lib/tensor_stream/ops/greater_equal.rb +4 -0
  37. data/lib/tensor_stream/ops/less.rb +19 -0
  38. data/lib/tensor_stream/ops/less_equal.rb +4 -0
  39. data/lib/tensor_stream/ops/not_equal.rb +19 -0
  40. data/lib/tensor_stream/ops/rsqrt.rb +11 -0
  41. data/lib/tensor_stream/ops/strided_slice.rb +24 -0
  42. data/lib/tensor_stream/ops/sum.rb +4 -2
  43. data/lib/tensor_stream/ops/top_k.rb +23 -0
  44. data/lib/tensor_stream/session.rb +6 -12
  45. data/lib/tensor_stream/tensor.rb +1 -0
  46. data/lib/tensor_stream/tensor_shape.rb +32 -1
  47. data/lib/tensor_stream/train/saver.rb +2 -3
  48. data/lib/tensor_stream/utils.rb +18 -13
  49. data/lib/tensor_stream/utils/freezer.rb +5 -1
  50. data/lib/tensor_stream/utils/py_ports.rb +11 -0
  51. data/lib/tensor_stream/variable.rb +9 -6
  52. data/lib/tensor_stream/version.rb +1 -1
  53. data/samples/word_embeddings/word_embedding_1.rb +192 -0
  54. data/samples/word_embeddings/word_embedding_2.rb +203 -0
  55. data/tensor_stream.gemspec +7 -2
  56. metadata +67 -10
@@ -7,22 +7,20 @@ module TensorStream
7
7
  target_var, learning_rate, delta = inputs
8
8
  assign = tensor.inputs[0] || tensor
9
9
 
10
- assign.container = process_vector_math_op(tensor, target_var, delta, context) { |t, u| t - u * learning_rate }
11
- assign.container
10
+ var_assign_value(assign, process_vector_math_op(tensor, target_var, delta, context) { |t, u| t - u * learning_rate })
12
11
  end
13
12
 
14
13
  register_op :apply_momentum do |_context, tensor, inputs|
15
14
  target_var, momentum_var, learning_rate, grad, momentum = inputs
16
15
  assign = tensor.inputs[0] || tensor
17
16
  assign_acc = tensor.inputs[1]
18
- assign_acc.container = multi_array_op(->(t, u) { t * momentum + u }, momentum_var, grad)
19
- assign.container = if tensor.options[:use_nesterov]
20
- multi_array_op(->(v, g, acc) { v - (g * learning_rate + acc * momentum * learning_rate) }, target_var, grad, momentum_var)
21
- else
22
- multi_array_op(->(v, acc) { v - acc * learning_rate }, target_var, momentum_var)
23
- end
24
-
25
- assign.container
17
+ var_assign_value(assign_acc, multi_array_op(->(t, u) { t * momentum + u }, momentum_var, grad))
18
+ var = if tensor.options[:use_nesterov]
19
+ multi_array_op(->(v, g, acc) { v - (g * learning_rate + acc * momentum * learning_rate) }, target_var, grad, momentum_var)
20
+ else
21
+ multi_array_op(->(v, acc) { v - acc * learning_rate }, target_var, momentum_var)
22
+ end
23
+ var_assign_value(assign, var)
26
24
  end
27
25
 
28
26
  register_op :apply_adadelta do |_context, tensor, inputs|
@@ -30,19 +28,18 @@ module TensorStream
30
28
  assign = tensor.inputs[0] || tensor
31
29
  assign_acc = tensor.inputs[1]
32
30
  assign_acc_update = tensor.inputs[2]
33
- assign_acc.container = multi_array_op(->(acc_t, grad_t) { acc_t * rho + (grad_t * grad_t) * (1.0 - rho) }, accum, grad)
34
- update = multi_array_op(->(acc_update_t, acc_t, grad_t) { Math.sqrt(acc_update_t + epsilon) * (1.0 / Math.sqrt(acc_t + epsilon)) * grad_t }, accum_update, assign_acc.container, grad)
35
- assign.container = multi_array_op(->(v, u) { v - (u * lr) }, target_var, update)
36
- assign_acc_update.container = multi_array_op(->(acc_update_t, u) { acc_update_t * rho + (u * u) * (1.0 - rho) }, accum_update, update)
31
+ acc_val = var_assign_value(assign_acc, multi_array_op(->(acc_t, grad_t) { acc_t * rho + (grad_t * grad_t) * (1.0 - rho) }, accum, grad))
32
+ update = multi_array_op(->(acc_update_t, acc_t, grad_t) { Math.sqrt(acc_update_t + epsilon) * (1.0 / Math.sqrt(acc_t + epsilon)) * grad_t }, accum_update, acc_val, grad)
33
+ result = var_assign_value(assign, multi_array_op(->(v, u) { v - (u * lr) }, target_var, update))
34
+ var_assign_value(assign_acc_update,multi_array_op(->(acc_update_t, u) { acc_update_t * rho + (u * u) * (1.0 - rho) }, accum_update, update))
37
35
 
38
- assign.container
36
+ result
39
37
  end
40
38
 
41
39
  register_op :apply_adagrad do |_context, tensor, inputs|
42
40
  target_var, accum, lr, grad = inputs
43
41
  assign = tensor.inputs[0] || tensor
44
- assign.container = multi_array_op(->(v, a, g) { v - (g * lr * (1.0 / Math.sqrt(a))) }, target_var, accum, grad)
45
- assign.container
42
+ var_assign_value(assign, multi_array_op(->(v, a, g) { v - (g * lr * (1.0 / Math.sqrt(a))) }, target_var, accum, grad))
46
43
  end
47
44
 
48
45
  register_op :apply_adam do |_context, tensor, inputs|
@@ -52,10 +49,9 @@ module TensorStream
52
49
  assign_m = tensor.inputs[1]
53
50
  assign_v = tensor.inputs[2]
54
51
 
55
- assign_m.container = multi_array_op(->(u_d, g) { u_d + (g - u_d) * (1.0 - beta1_t) }, m, grad)
56
- assign_v.container = multi_array_op(->(u_d, v_d) { u_d + (v_d**2 - u_d) * (1.0 - beta2_t)}, v, grad)
57
- assign.container = multi_array_op(->(t, m_d, v_d) { t - ((m_d * alpha) / (Math.sqrt(v_d) + epsilon_t)) }, target_var, assign_m.container, assign_v.container)
58
- assign.container
52
+ m_val = var_assign_value(assign_m, multi_array_op(->(u_d, g) { u_d + (g - u_d) * (1.0 - beta1_t) }, m, grad))
53
+ v_val = var_assign_value(assign_v, multi_array_op(->(u_d, v_d) { u_d + (v_d**2 - u_d) * (1.0 - beta2_t)}, v, grad))
54
+ var_assign_value(assign, multi_array_op(->(t, m_d, v_d) { t - ((m_d * alpha) / (Math.sqrt(v_d) + epsilon_t)) }, target_var, m_val, v_val))
59
55
  end
60
56
 
61
57
  register_op :apply_rms_prop do |_context, tensor, inputs|
@@ -63,9 +59,9 @@ module TensorStream
63
59
  assign = tensor.inputs[0]
64
60
  assign_ms = tensor.inputs[1]
65
61
  assign_mom = tensor.inputs[2]
66
- assign_ms.container = multi_array_op(->(g, m) { m + (g * g - m) * (1.0 - rho)}, grad, ms)
67
- assign_mom.container = multi_array_op(->(mom_t, g, m) { mom_t * momentum + (g * lr) / Math.sqrt(m + epsilon)}, mom, grad, assign_ms.container)
68
- assign.container = multi_array_op(->(v, m) { v - m }, var, assign_mom.container)
62
+ ms_val = var_assign_value(assign_ms, multi_array_op(->(g, m) { m + (g * g - m) * (1.0 - rho)}, grad, ms))
63
+ mom_val = var_assign_value(assign_mom, multi_array_op(->(mom_t, g, m) { mom_t * momentum + (g * lr) / Math.sqrt(m + epsilon)}, mom, grad, ms_val))
64
+ var_assign_value(assign, multi_array_op(->(v, m) { v - m }, var, mom_val))
69
65
  end
70
66
 
71
67
  register_op :apply_centered_rms_prop do |_context, tensor, inputs|
@@ -75,11 +71,11 @@ module TensorStream
75
71
  assign_ms = tensor.inputs[2]
76
72
  assign_mom = tensor.inputs[3]
77
73
 
78
- assign_ms.container = multi_array_op(->(g, m) { m + (g * g - m) * (1.0 - rho) }, grad, ms)
79
- assign_mg.container = multi_array_op(->(g, mg_t) { (g - mg_t) * (1.0 - rho) }, grad, mg)
80
- denom = multi_array_op(->(s, mg_t) { (s - mg_t * mg_t) + epsilon }, assign_ms.container, mg)
81
- assign_mom.container = multi_array_op(->(mom_t, g, d) { mom_t * momentum + (g * lr) / Math.sqrt(d)}, mom, grad, denom)
82
- assign.container = multi_array_op(->(v, m) { v - m }, var, assign_mom.container)
74
+ val_ms = var_assign_value(assign_ms, multi_array_op(->(g, m) { m + (g * g - m) * (1.0 - rho) }, grad, ms))
75
+ var_assign_value(assign_mg, multi_array_op(->(g, mg_t) { (g - mg_t) * (1.0 - rho) }, grad, mg))
76
+ denom = multi_array_op(->(s, mg_t) { (s - mg_t * mg_t) + epsilon }, val_ms, mg)
77
+ val_mom = var_assign_value(assign_mom, multi_array_op(->(mom_t, g, d) { mom_t * momentum + (g * lr) / Math.sqrt(d)}, mom, grad, denom))
78
+ var_assign_value(assign, multi_array_op(->(v, m) { v - m }, var, val_mom))
83
79
  end
84
80
 
85
81
  register_op %i[softmax_cross_entropy_with_logits_v2 softmax_cross_entropy_with_logits] do |_context, tensor, inputs|
@@ -9,12 +9,12 @@ module TensorStream
9
9
 
10
10
  shape = tensor.options[:shape] || tensor.shape.shape
11
11
  fan_in, fan_out = if shape.size.zero?
12
- [1, 1]
13
- elsif shape.size == 1
14
- [1, shape[0]]
15
- else
16
- [shape[0], shape.last]
17
- end
12
+ [1, 1]
13
+ elsif shape.size == 1
14
+ [1, shape[0]]
15
+ else
16
+ [shape[0], shape.last]
17
+ end
18
18
 
19
19
  limit = Math.sqrt(6.0 / (fan_in + fan_out))
20
20
 
@@ -50,7 +50,7 @@ module TensorStream
50
50
  seed = tensor.options[:seed]
51
51
  random = _get_randomizer(tensor, seed)
52
52
  r = RandomGaussian.new(tensor.options.fetch(:mean), tensor.options.fetch(:stddev), -> { random.rand })
53
- random = _get_randomizer(tensor, seed)
53
+
54
54
  generator = -> { r.rand }
55
55
  shape = inputs[0] || tensor.shape.shape
56
56
  random_values = Array.new(shape.reduce(:*) || 1) {
@@ -75,9 +75,6 @@ module TensorStream
75
75
 
76
76
  norm_min = (minval - mean) / stddev
77
77
  norm_max = (maxval - mean) / stddev
78
- sqrt_factor = Math.sqrt((norm_min * norm_min) + 4.0)
79
- cutoff = 2.0 * Math.exp(0.5 + (norm_min * (norm_min - sqrt_factor)) / 4.0) / (norm_min + sqrt_factor)
80
- diff = norm_max - norm_min
81
78
 
82
79
  val = random_values.map { |v|
83
80
  iterations = 0
@@ -93,7 +90,6 @@ module TensorStream
93
90
 
94
91
  pick
95
92
  }
96
-
97
93
  TensorShape.reshape(val, shape)
98
94
  end
99
95
  end
@@ -0,0 +1,40 @@
1
+ module TensorStream
2
+ class RubyStorageManager
3
+ def self.current_storage_manager
4
+ @storage_manager ||= RubyStorageManager.new
5
+ end
6
+
7
+ def initialize
8
+ @variables = {}
9
+ end
10
+
11
+ def exists?(graph, name)
12
+ return false if !@variables.key?(graph.object_id)
13
+
14
+ @variables[graph.object_id].key?(name.to_sym)
15
+ end
16
+
17
+ def create_variable(graph, name, value)
18
+ raise "no name specified" if name.nil?
19
+
20
+ @variables[graph.object_id][name.to_sym] = value
21
+ end
22
+
23
+ def assign_value(graph, name, value)
24
+ raise "no name specified" if name.nil?
25
+
26
+ @variables[graph.object_id] ||= {}
27
+ @variables[graph.object_id][name.to_sym] = value
28
+ end
29
+
30
+ def read_value(graph, name)
31
+ raise "no name specified" if name.nil?
32
+
33
+ @variables[graph.object_id][name.to_sym]
34
+ end
35
+
36
+ def clear_variables(graph)
37
+ @variables[graph.object_id] = {}
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,74 @@
1
+ module TensorStream
2
+ ## Collection of machine learning related ops
3
+ module VariableOps
4
+ def self.included(klass)
5
+ klass.class_eval do
6
+ register_op :variable_v2 do |_context, tensor, _inputs|
7
+ value = var_read_value(tensor)
8
+ raise "variable #{tensor.options[:var_name]} not initalized" if value.nil?
9
+
10
+ value
11
+ end
12
+
13
+ register_op :assign do |context, tensor, inputs|
14
+ var_assign_value(tensor, inputs[0])
15
+ end
16
+
17
+ register_op :assign_add, no_eval: true do |context, tensor, inputs|
18
+ current_val = var_read_value(tensor)
19
+
20
+ raise "variable #{tensor.options[:var_name]} not initialized" if current_val.nil?
21
+ eval_a, eval_b = broadcast(current_val, inputs[0])
22
+ result = multi_array_op(->(var, val) { var + val }, eval_a, eval_b)
23
+ var_assign_value(tensor, result)
24
+ end
25
+
26
+ register_op :assign_sub do |context, tensor, inputs|
27
+ current_val = var_read_value(tensor)
28
+ raise "variable #{tensor.options[:var_name]} not initialized" if current_val.nil?
29
+ eval_a, eval_b = broadcast(current_val, inputs[0])
30
+ result = multi_array_op(->(var, val) { var - val }, eval_a, eval_b)
31
+ var_assign_value(tensor, result)
32
+ end
33
+
34
+ register_op :save_ts do |_context, tensor, inputs|
35
+ outputfile = inputs[0]
36
+ inputs = tensor.inputs.dup
37
+
38
+ inputs.shift
39
+ variables = {}
40
+ inputs.each do |savable|
41
+ val = var_read_value(savable)
42
+
43
+ packed_data = Zlib::Deflate.deflate(TensorStream::Packer.pack(val, savable.data_type))
44
+ variables[savable.options[:var_name]] = {
45
+ "shape" => shape_eval(val),
46
+ "data" => Base64.strict_encode64(packed_data),
47
+ }
48
+ end
49
+
50
+ File.write(outputfile, {"variables" => variables}.to_yaml)
51
+ nil
52
+ end
53
+
54
+ register_op :restore_ts do |_context, tensor, inputs|
55
+ inputs = inputs.dup
56
+ filename = inputs.shift
57
+ tensor_names = inputs
58
+
59
+ input_dump = YAML.safe_load(File.read(filename), [Symbol])
60
+ vars = tensor.graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
61
+ vars.select! { |v| input_dump["variables"].key?(v.name) && tensor_names.include?(v.name) }
62
+ vars.each do |variable|
63
+ data = TensorStream::Packer.unpack(Zlib::Inflate.inflate(Base64.decode64(input_dump["variables"][variable.name]["data"])), variable.data_type)
64
+ shape = input_dump["variables"][variable.name]["shape"]
65
+ variable.buffer = nil
66
+ var_assign_value(variable, TensorShape.reshape(data, shape))
67
+ end
68
+
69
+ nil
70
+ end
71
+ end
72
+ end
73
+ end
74
+ end
@@ -2,12 +2,14 @@ require "tensor_stream/evaluator/operation_helpers/random_gaussian"
2
2
  require "tensor_stream/evaluator/operation_helpers/array_ops_helper"
3
3
  require "tensor_stream/evaluator/operation_helpers/math_helper"
4
4
  require "tensor_stream/evaluator/base_evaluator"
5
+ require "tensor_stream/evaluator/ruby/storage_manager"
5
6
  require "tensor_stream/evaluator/ruby/math_ops"
6
7
  require "tensor_stream/evaluator/ruby/nn_ops"
7
8
  require "tensor_stream/evaluator/ruby/array_ops"
8
9
  require "tensor_stream/evaluator/ruby/random_ops"
9
10
  require "tensor_stream/evaluator/ruby/images_ops"
10
11
  require "tensor_stream/evaluator/ruby/check_ops"
12
+ require "tensor_stream/evaluator/ruby/variable_ops"
11
13
 
12
14
  module TensorStream
13
15
  module Evaluator
@@ -41,6 +43,11 @@ module TensorStream
41
43
  include TensorStream::RandomOps
42
44
  include TensorStream::ImagesOps
43
45
  include TensorStream::CheckOps
46
+ include TensorStream::VariableOps
47
+
48
+ def self.get_storage_manager
49
+ RubyStorageManager.current_storage_manager
50
+ end
44
51
 
45
52
  def run(tensor, execution_context)
46
53
  return tensor.map { |t| run(t, execution_context) } if tensor.is_a?(Array) && !tensor.empty? && tensor[0].is_a?(Tensor)
@@ -49,12 +56,12 @@ module TensorStream
49
56
 
50
57
  child_context = execution_context.dup
51
58
  res = if tensor.is_a?(Operation)
52
- eval_operation(tensor, child_context)
53
- elsif !tensor.is_a?(Tensor)
54
- tensor
55
- else
56
- tensor.op
57
- end
59
+ eval_operation(tensor, child_context)
60
+ elsif !tensor.is_a?(Tensor)
61
+ tensor
62
+ else
63
+ tensor.op
64
+ end
58
65
  execution_context.deep_merge!(returns: child_context[:returns])
59
66
  res
60
67
  end
@@ -77,11 +84,23 @@ module TensorStream
77
84
  break unless tensor.is_a?(Tensor)
78
85
  end
79
86
 
80
- tensor.is_a?(OutputGroup) ? tensor.outputs[0] : tensor
87
+ tensor.is_a?(OutputGroup) ? tensor.outputs : tensor
81
88
  end
82
89
 
83
90
  protected
84
91
 
92
+ def var_read_value(tensor)
93
+ @storage_manager ||= TensorStream::RubyStorageManager.current_storage_manager
94
+ @storage_manager.read_value(tensor.graph, tensor.options[:var_name])
95
+ end
96
+
97
+ def var_assign_value(tensor, value)
98
+ @storage_manager ||= TensorStream::RubyStorageManager.current_storage_manager
99
+ @storage_manager.assign_value(tensor.graph, tensor.options[:var_name] || tensor.name, value)
100
+
101
+ value
102
+ end
103
+
85
104
  def prepare_input(tensor, context, options = {})
86
105
  return nil unless tensor
87
106
 
@@ -154,37 +173,10 @@ module TensorStream
154
173
  end
155
174
  end
156
175
 
157
- register_op :variable_v2, no_eval: true do |_context, tensor, _inputs|
158
- value = tensor.options[:container].read_value
159
- raise "variable #{tensor.options[:container].name} not initalized" if value.nil?
160
-
161
- value
162
- end
163
-
164
176
  register_op :stop_gradient, no_eval: true do |_context, _tensor, inputs|
165
177
  inputs[0]
166
178
  end
167
179
 
168
- register_op :assign, noop: true do |context, tensor, _inputs|
169
- assign = tensor.inputs[0] || tensor
170
- assign.container = global_eval(tensor, tensor.inputs[1], context)
171
- assign.container
172
- end
173
-
174
- register_op :assign_add, noop: true do |context, tensor, _inputs|
175
- assign = tensor.inputs[0] || tensor
176
-
177
- assign.container = process_vector_math_op(tensor, tensor.inputs[0], tensor.inputs[1], context) { |t, u| t + u }
178
- assign.container
179
- end
180
-
181
- register_op :assign_sub, noop: true do |context, tensor, _inputs|
182
- assign = tensor.inputs[0] || tensor
183
-
184
- assign.container = process_vector_math_op(tensor, tensor.inputs[0], tensor.inputs[1], context) { |t, u| t - u }
185
- assign.container
186
- end
187
-
188
180
  register_op :less do |context, tensor, inputs|
189
181
  a, b = inputs
190
182
  call_vector_op(tensor, :less, a, b, context) { |t, u| t < u }
@@ -236,44 +228,6 @@ module TensorStream
236
228
  softmax(inputs[0])
237
229
  end
238
230
 
239
- register_op :save_ts do |_context, tensor, inputs|
240
- outputfile = inputs[0]
241
- inputs = tensor.inputs.dup
242
-
243
- inputs.shift
244
- variables = {}
245
- inputs.each do |savable|
246
- val = savable.container
247
- packed_data = Zlib::Deflate.deflate(TensorStream::Packer.pack(val, savable.data_type))
248
- variables[savable.name] = {
249
- "shape" => shape_eval(val),
250
- "data" => Base64.strict_encode64(packed_data),
251
- }
252
- end
253
-
254
- File.write(outputfile, {"variables" => variables}.to_yaml)
255
- nil
256
- end
257
-
258
- register_op :restore_ts do |_context, tensor, inputs|
259
- inputs = inputs.dup
260
- filename = inputs.shift
261
- tensor_names = inputs
262
-
263
- input_dump = YAML.safe_load(File.read(filename), [Symbol])
264
- vars = tensor.graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
265
-
266
- vars.select! { |v| input_dump["variables"].key?(v.name) && tensor_names.include?(v.name) }
267
- vars.each do |variable|
268
- data = TensorStream::Packer.unpack(Zlib::Inflate.inflate(Base64.decode64(input_dump["variables"][variable.name]["data"])), variable.data_type)
269
- shape = input_dump["variables"][variable.name]["shape"]
270
- variable.buffer = nil
271
- variable.value = TensorShape.reshape(data, shape)
272
- end
273
-
274
- nil
275
- end
276
-
277
231
  register_op :check_numerics do |context, tensor, inputs|
278
232
  message = tensor.options[:message]
279
233
  call_op(inputs[0], context) do |t, _b|
@@ -286,7 +240,7 @@ module TensorStream
286
240
  def eval_operation(tensor, child_context)
287
241
  return @context[tensor.name] if @context.key?(tensor.name)
288
242
 
289
- # puts "ruby eval #{object_id}: #{tensor.name}"
243
+ # puts "ruby eval #{tensor.operation} -> #{object_id}: #{tensor.name}"
290
244
  invoke(tensor, child_context).tap do |result|
291
245
  # puts "result done ruby #{object_id}: #{tensor.name}"
292
246
  # assertions to make sure inferred shapes == actual evaluated shapes
@@ -314,10 +268,10 @@ module TensorStream
314
268
  @context[tensor.name] = result
315
269
  end
316
270
  rescue EvaluatorExcecutionException => e
317
- raise e, "error #{e.message} while evaluating #{tensor.name} defined at #{tensor.source}"
271
+ raise e, "error #{e.message} while evaluating #{tensor.name} defined at #{tensor.source}"
318
272
  rescue TensorStreamError => e
319
- raise e, "error #{e.message} while evaluating #{tensor.name} defined at #{tensor.source}"
320
- rescue => e
273
+ raise e, "error #{e.message} while evaluating #{tensor.name} defined at #{tensor.source}"
274
+ rescue StandardError => e
321
275
  puts e.message
322
276
  puts e.backtrace.join("\n")
323
277
  raise EvaluatorExcecutionException.new(e, tensor), "error #{e.message} while evaluating #{tensor.name} : #{tensor.to_math(true, 1)} defined at #{tensor.source}"
@@ -379,7 +333,7 @@ module TensorStream
379
333
  elem = args[0]
380
334
  if elem.is_a?(Array)
381
335
  elem.each_with_index.collect do |_item, index|
382
- indexed_args = args.collect { |a| a[index] }
336
+ indexed_args = args.collect { |a| a = a.is_a?(Array) ? a : [a]; a[index] }
383
337
  multi_array_op(func, *indexed_args)
384
338
  end
385
339
  else
@@ -9,12 +9,12 @@ module TensorStream
9
9
  #
10
10
  # This operation supports broadcasting
11
11
  #
12
- # Params:
13
- # +input_a+:: tensor X
14
- # +input_b+:: tensor Y
12
+ # @param input_a tensor X
13
+ # @param input_b tensor Y
15
14
  #
16
15
  # Options:
17
- # +:name+:: Optional name
16
+ # @option name Optional name
17
+ # @return Tensor
18
18
  def add(input_a, input_b, name: nil)
19
19
  input_a, input_b = apply_data_type_coercion(input_a, input_b)
20
20
  _op(:add, input_a, input_b, name: name)
@@ -25,14 +25,14 @@ module TensorStream
25
25
  # Returns the index with the largest value across axes of a tensor.
26
26
  #
27
27
  #
28
- # Params:
29
- # +input_a+:: tensor X (of type NUMERIC_TYPES)
30
- # +axis+:: Describes which axis of the input tensor to reduce across. For vectors, use axis = 0 (of type INTEGER_TYPES)
28
+ # @param input_a tensor X (of type NUMERIC_TYPES)
29
+ # @param axis Describes which axis of the input tensor to reduce across. For vectors, use axis = 0 (of type INTEGER_TYPES)
31
30
  #
32
31
  # Options:
33
- # +:name+:: Optional name
34
- # +:dimension+:: Same as axis
35
- # +:output_type+:: Output data type defaults to int32 default (:int32)
32
+ # @option name Optional name
33
+ # @option dimension Same as axis
34
+ # @option output_type Output data type defaults to int32 default (:int32)
35
+ # @return Tensor
36
36
  def argmax(input_a, axis = nil, name: nil, dimension: nil, output_type: :int32)
37
37
  check_allowed_types(input_a, TensorStream::Ops::NUMERIC_TYPES)
38
38
  check_allowed_types(axis, TensorStream::Ops::INTEGER_TYPES)
@@ -44,14 +44,14 @@ module TensorStream
44
44
  # Returns the index with the smallest value across axes of a tensor.
45
45
  #
46
46
  #
47
- # Params:
48
- # +input_a+:: tensor X (of type NUMERIC_TYPES)
49
- # +axis+:: Describes which axis of the input tensor to reduce across. For vectors, use axis = 0 (of type INTEGER_TYPES)
47
+ # @param input_a tensor X (of type NUMERIC_TYPES)
48
+ # @param axis Describes which axis of the input tensor to reduce across. For vectors, use axis = 0 (of type INTEGER_TYPES)
50
49
  #
51
50
  # Options:
52
- # +:name+:: Optional name
53
- # +:dimension+:: Same as axis
54
- # +:output_type+:: Output data type defaults to int32 default (:int32)
51
+ # @option name Optional name
52
+ # @option dimension Same as axis
53
+ # @option output_type Output data type defaults to int32 default (:int32)
54
+ # @return Tensor
55
55
  def argmin(input_a, axis = nil, name: nil, dimension: nil, output_type: :int32)
56
56
  check_allowed_types(input_a, TensorStream::Ops::NUMERIC_TYPES)
57
57
  check_allowed_types(axis, TensorStream::Ops::INTEGER_TYPES)
@@ -63,11 +63,11 @@ module TensorStream
63
63
  # Returns element-wise smallest integer in not less than x
64
64
  #
65
65
  #
66
- # Params:
67
- # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
66
+ # @param input_a tensor X (of type FLOATING_POINT_TYPES)
68
67
  #
69
68
  # Options:
70
- # +:name+:: Optional name
69
+ # @option name Optional name
70
+ # @return Tensor
71
71
  def ceil(input_a, name: nil)
72
72
  check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
73
73
  _op(:ceil, input_a, name: name)
@@ -78,11 +78,11 @@ module TensorStream
78
78
  # Computes cos of input element-wise.
79
79
  #
80
80
  #
81
- # Params:
82
- # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
81
+ # @param input_a tensor X (of type FLOATING_POINT_TYPES)
83
82
  #
84
83
  # Options:
85
- # +:name+:: Optional name
84
+ # @option name Optional name
85
+ # @return Tensor
86
86
  def cos(input_a, name: nil)
87
87
  check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
88
88
  _op(:cos, input_a, name: name)
@@ -94,12 +94,12 @@ module TensorStream
94
94
  #
95
95
  # This operation supports broadcasting
96
96
  #
97
- # Params:
98
- # +input_a+:: tensor X
99
- # +input_b+:: tensor Y
97
+ # @param input_a tensor X
98
+ # @param input_b tensor Y
100
99
  #
101
100
  # Options:
102
- # +:name+:: Optional name
101
+ # @option name Optional name
102
+ # @return Tensor
103
103
  def div(input_a, input_b, name: nil)
104
104
  input_a, input_b = apply_data_type_coercion(input_a, input_b)
105
105
  _op(:div, input_a, input_b, name: name)
@@ -111,12 +111,12 @@ module TensorStream
111
111
  #
112
112
  # This operation supports broadcasting
113
113
  #
114
- # Params:
115
- # +input_a+:: tensor X
116
- # +input_b+:: tensor Y
114
+ # @param input_a tensor X
115
+ # @param input_b tensor Y
117
116
  #
118
117
  # Options:
119
- # +:name+:: Optional name
118
+ # @option name Optional name
119
+ # @return Tensor
120
120
  def equal(input_a, input_b, name: nil)
121
121
  input_a, input_b = apply_data_type_coercion(input_a, input_b)
122
122
  _op(:equal, input_a, input_b, name: name)
@@ -129,12 +129,12 @@ module TensorStream
129
129
  # dimension index axis starts at zero; if you specify a negative number for axis it is counted backward from the end.
130
130
  #
131
131
  #
132
- # Params:
133
- # +input+:: A tensor
134
- # +axis+:: Specifies the dimension index at which to expand the shape of input. Must be in the range [-rank(input) - 1, rank(input)].
132
+ # @param input A tensor
133
+ # @param axis Specifies the dimension index at which to expand the shape of input. Must be in the range [-rank(input) - 1, rank(input)].
135
134
  #
136
135
  # Options:
137
- # +:name+:: Optional name
136
+ # @option name Optional name
137
+ # @return Tensor
138
138
  def expand_dims(input, axis, name: nil)
139
139
  _op(:expand_dims, input, axis, name: name)
140
140
  end
@@ -144,12 +144,12 @@ module TensorStream
144
144
  # This operation creates a tensor of shape dims and fills it with value.
145
145
  #
146
146
  #
147
- # Params:
148
- # +dims+:: tensor shape
149
- # +value+:: scalar value to fill with
147
+ # @param dims tensor shape
148
+ # @param value scalar value to fill with
150
149
  #
151
150
  # Options:
152
- # +:name+:: Optional name
151
+ # @option name Optional name
152
+ # @return Tensor
153
153
  def fill(dims, value, name: nil)
154
154
  _op(:fill, dims, value, name: name)
155
155
  end
@@ -159,11 +159,11 @@ module TensorStream
159
159
  # Returns element-wise largest integer not greater than x.
160
160
  #
161
161
  #
162
- # Params:
163
- # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
162
+ # @param input_a tensor X (of type FLOATING_POINT_TYPES)
164
163
  #
165
164
  # Options:
166
- # +:name+:: Optional name
165
+ # @option name Optional name
166
+ # @return Tensor
167
167
  def floor(input_a, name: nil)
168
168
  check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
169
169
  _op(:floor, input_a, name: name)
@@ -175,12 +175,12 @@ module TensorStream
175
175
  #
176
176
  # This operation supports broadcasting
177
177
  #
178
- # Params:
179
- # +input_a+:: tensor X
180
- # +input_b+:: tensor Y
178
+ # @param input_a tensor X
179
+ # @param input_b tensor Y
181
180
  #
182
181
  # Options:
183
- # +:name+:: Optional name
182
+ # @option name Optional name
183
+ # @return Tensor
184
184
  def floor_div(input_a, input_b, name: nil)
185
185
  input_a, input_b = apply_data_type_coercion(input_a, input_b)
186
186
  _op(:floor_div, input_a, input_b, name: name)
@@ -192,12 +192,12 @@ module TensorStream
192
192
  #
193
193
  # This operation supports broadcasting
194
194
  #
195
- # Params:
196
- # +input_a+:: tensor X
197
- # +input_b+:: tensor Y
195
+ # @param input_a tensor X
196
+ # @param input_b tensor Y
198
197
  #
199
198
  # Options:
200
- # +:name+:: Optional name
199
+ # @option name Optional name
200
+ # @return Tensor
201
201
  def greater(input_a, input_b, name: nil)
202
202
  input_a, input_b = apply_data_type_coercion(input_a, input_b)
203
203
  _op(:greater, input_a, input_b, name: name)
@@ -209,29 +209,46 @@ module TensorStream
209
209
  #
210
210
  # This operation supports broadcasting
211
211
  #
212
- # Params:
213
- # +input_a+:: tensor X
214
- # +input_b+:: tensor Y
212
+ # @param input_a tensor X
213
+ # @param input_b tensor Y
215
214
  #
216
215
  # Options:
217
- # +:name+:: Optional name
216
+ # @option name Optional name
217
+ # @return Tensor
218
218
  def greater_equal(input_a, input_b, name: nil)
219
219
  input_a, input_b = apply_data_type_coercion(input_a, input_b)
220
220
  _op(:greater_equal, input_a, input_b, name: name)
221
221
  end
222
222
 
223
223
 
224
+ ##
225
+ # Returns the truth value of (x < y) element-wise.
226
+ #
227
+ # This operation supports broadcasting
228
+ #
229
+ # @param input_a tensor X
230
+ # @param input_b tensor Y
231
+ #
232
+ # Options:
233
+ # @option name Optional name
234
+ # @return Tensor
235
+ def less(input_a, input_b, name: nil)
236
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
237
+ _op(:less, input_a, input_b, name: name)
238
+ end
239
+
240
+
224
241
  ##
225
242
  # Returns the truth value of (x <= y) element-wise.
226
243
  #
227
244
  # This operation supports broadcasting
228
245
  #
229
- # Params:
230
- # +input_a+:: tensor X
231
- # +input_b+:: tensor Y
246
+ # @param input_a tensor X
247
+ # @param input_b tensor Y
232
248
  #
233
249
  # Options:
234
- # +:name+:: Optional name
250
+ # @option name Optional name
251
+ # @return Tensor
235
252
  def less_equal(input_a, input_b, name: nil)
236
253
  input_a, input_b = apply_data_type_coercion(input_a, input_b)
237
254
  _op(:less_equal, input_a, input_b, name: name)
@@ -242,11 +259,11 @@ module TensorStream
242
259
  # Computes natural logarithm of x element-wise.
243
260
  #
244
261
  #
245
- # Params:
246
- # +input+:: tensor X
262
+ # @param input tensor X
247
263
  #
248
264
  # Options:
249
- # +:name+:: Optional name
265
+ # @option name Optional name
266
+ # @return Tensor
250
267
  def log(input, name: nil)
251
268
  _op(:log, input, name: name)
252
269
  end
@@ -257,14 +274,14 @@ module TensorStream
257
274
  #
258
275
  # This operation supports broadcasting
259
276
  #
260
- # Params:
261
- # +input_a+:: tensor X
262
- # +input_b+:: tensor Y
277
+ # @param input_a tensor X
278
+ # @param input_b tensor Y
263
279
  #
264
280
  # Options:
265
- # +:transpose_a+:: Transpose matrix A first default (false)
266
- # +:transpose_b+:: Transpose matrix B first default (false)
267
- # +:name+:: Optional name
281
+ # @option transpose_a Transpose matrix A first default (false)
282
+ # @option transpose_b Transpose matrix B first default (false)
283
+ # @option name Optional name
284
+ # @return Tensor
268
285
  def mat_mul(input_a, input_b, transpose_a: false, transpose_b: false, name: nil)
269
286
  input_a, input_b = apply_data_type_coercion(input_a, input_b)
270
287
  _op(:mat_mul, input_a, input_b, transpose_a: transpose_a, transpose_b: transpose_b, name: name)
@@ -277,12 +294,12 @@ module TensorStream
277
294
  #
278
295
  # This operation supports broadcasting
279
296
  #
280
- # Params:
281
- # +input_a+:: tensor X (of type NUMERIC_TYPES)
282
- # +input_b+:: tensor Y (of type NUMERIC_TYPES)
297
+ # @param input_a tensor X (of type NUMERIC_TYPES)
298
+ # @param input_b tensor Y (of type NUMERIC_TYPES)
283
299
  #
284
300
  # Options:
285
- # +:name+:: Optional name
301
+ # @option name Optional name
302
+ # @return Tensor
286
303
  def max(input_a, input_b, name: nil)
287
304
  check_allowed_types(input_a, TensorStream::Ops::NUMERIC_TYPES)
288
305
  check_allowed_types(input_b, TensorStream::Ops::NUMERIC_TYPES)
@@ -296,12 +313,12 @@ module TensorStream
296
313
  #
297
314
  # This operation supports broadcasting
298
315
  #
299
- # Params:
300
- # +input_a+:: tensor X (of type NUMERIC_TYPES)
301
- # +input_b+:: tensor Y (of type NUMERIC_TYPES)
316
+ # @param input_a tensor X (of type NUMERIC_TYPES)
317
+ # @param input_b tensor Y (of type NUMERIC_TYPES)
302
318
  #
303
319
  # Options:
304
- # +:name+:: Optional name
320
+ # @option name Optional name
321
+ # @return Tensor
305
322
  def min(input_a, input_b, name: nil)
306
323
  check_allowed_types(input_a, TensorStream::Ops::NUMERIC_TYPES)
307
324
  check_allowed_types(input_b, TensorStream::Ops::NUMERIC_TYPES)
@@ -315,12 +332,12 @@ module TensorStream
315
332
  #
316
333
  # This operation supports broadcasting
317
334
  #
318
- # Params:
319
- # +input_a+:: tensor X
320
- # +input_b+:: tensor Y
335
+ # @param input_a tensor X
336
+ # @param input_b tensor Y
321
337
  #
322
338
  # Options:
323
- # +:name+:: Optional name
339
+ # @option name Optional name
340
+ # @return Tensor
324
341
  def mod(input_a, input_b, name: nil)
325
342
  input_a, input_b = apply_data_type_coercion(input_a, input_b)
326
343
  _op(:mod, input_a, input_b, name: name)
@@ -332,12 +349,12 @@ module TensorStream
332
349
  #
333
350
  # This operation supports broadcasting
334
351
  #
335
- # Params:
336
- # +input_a+:: tensor X
337
- # +input_b+:: tensor Y
352
+ # @param input_a tensor X
353
+ # @param input_b tensor Y
338
354
  #
339
355
  # Options:
340
- # +:name+:: Optional name
356
+ # @option name Optional name
357
+ # @return Tensor
341
358
  def mul(input_a, input_b, name: nil)
342
359
  input_a, input_b = apply_data_type_coercion(input_a, input_b)
343
360
  _op(:mul, input_a, input_b, name: name)
@@ -348,16 +365,33 @@ module TensorStream
348
365
  # Computes numerical negative value element-wise.
349
366
  #
350
367
  #
351
- # Params:
352
- # +input+:: tensor X
368
+ # @param input tensor X
353
369
  #
354
370
  # Options:
355
- # +:name+:: Optional name
371
+ # @option name Optional name
372
+ # @return Tensor
356
373
  def negate(input, name: nil)
357
374
  _op(:negate, input, name: name)
358
375
  end
359
376
 
360
377
 
378
+ ##
379
+ # Returns the truth value of (x != y) element-wise.
380
+ #
381
+ # This operation supports broadcasting
382
+ #
383
+ # @param input_a tensor X
384
+ # @param input_b tensor Y
385
+ #
386
+ # Options:
387
+ # @option name Optional name
388
+ # @return Tensor
389
+ def not_equal(input_a, input_b, name: nil)
390
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
391
+ _op(:not_equal, input_a, input_b, name: name)
392
+ end
393
+
394
+
361
395
  ##
362
396
  # Creates a tensor with all elements set to 1.
363
397
  # Given a single tensor (tensor), this operation returns a
@@ -365,12 +399,12 @@ module TensorStream
365
399
  # Optionally, you can specify a new type (dtype) for the returned tensor.
366
400
  #
367
401
  #
368
- # Params:
369
- # +input+:: A tensor
402
+ # @param input A tensor
370
403
  #
371
404
  # Options:
372
- # +:dtype+:: Optional new data type to cast into
373
- # +:name+:: Optional name
405
+ # @option dtype Optional new data type to cast into
406
+ # @option name Optional name
407
+ # @return Tensor
374
408
  def ones_like(input, dtype: nil, name: nil)
375
409
  _op(:ones_like, input, data_type: dtype, name: name)
376
410
  end
@@ -381,12 +415,12 @@ module TensorStream
381
415
  #
382
416
  # This operation supports broadcasting
383
417
  #
384
- # Params:
385
- # +input_a+:: tensor X
386
- # +input_b+:: tensor Y
418
+ # @param input_a tensor X
419
+ # @param input_b tensor Y
387
420
  #
388
421
  # Options:
389
- # +:name+:: Optional name
422
+ # @option name Optional name
423
+ # @return Tensor
390
424
  def pow(input_a, input_b, name: nil)
391
425
  input_a, input_b = apply_data_type_coercion(input_a, input_b)
392
426
  _op(:pow, input_a, input_b, name: name)
@@ -401,13 +435,13 @@ module TensorStream
401
435
  # If axis has no entries, all dimensions are reduced, and a tensor with a single element is returned.
402
436
  #
403
437
  #
404
- # Params:
405
- # +input_a+:: tensor X
406
- # +axis+:: tensor X (of type INTEGER_TYPES)
438
+ # @param input_a tensor X
439
+ # @param axis tensor X (of type INTEGER_TYPES)
407
440
  #
408
441
  # Options:
409
- # +:name+:: Optional name
410
- # +:keepdims+:: If true, retains reduced dimensions with length 1. default (false)
442
+ # @option name Optional name
443
+ # @option keepdims If true, retains reduced dimensions with length 1. default (false)
444
+ # @return Tensor
411
445
  def prod(input_a, axis = nil, name: nil, keepdims: false)
412
446
  check_allowed_types(axis, TensorStream::Ops::INTEGER_TYPES)
413
447
  input_a = TensorStream.convert_to_tensor(input_a)
@@ -422,15 +456,15 @@ module TensorStream
422
456
  # Outputs random values from a uniform distribution.
423
457
  #
424
458
  #
425
- # Params:
426
- # +shape+:: A 1-D integer Tensor or array. The shape of the output tensor.
459
+ # @param shape A 1-D integer Tensor or array. The shape of the output tensor.
427
460
  #
428
461
  # Options:
429
- # +:name+:: Optional name
430
- # +:dtype+:: The type of the output: float16, float32, float64, int32, or int64 default (:float32)
431
- # +:minval+:: A 0-D Tensor or ruby value of type dtype. The lower bound on the range of random values to generate. Defaults to 0. default (0)
432
- # +:maxval+:: A 0-D Tensor or ruby value of type dtype. The upper bound on the range of random values to generate. Defaults to 1 if dtype is floating point. default (1)
433
- # +:seed+:: A ruby integer. Used to create a random seed for the distribution. See set_random_seed for behavior.
462
+ # @option name Optional name
463
+ # @option dtype The type of the output: float16, float32, float64, int32, or int64 default (:float32)
464
+ # @option minval A 0-D Tensor or ruby value of type dtype. The lower bound on the range of random values to generate. Defaults to 0. default (0)
465
+ # @option maxval A 0-D Tensor or ruby value of type dtype. The upper bound on the range of random values to generate. Defaults to 1 if dtype is floating point. default (1)
466
+ # @option seed A ruby integer. Used to create a random seed for the distribution. See set_random_seed for behavior.
467
+ # @return Tensor
434
468
  def random_uniform(shape, name: nil, dtype: :float32, minval: 0, maxval: 1, seed: nil)
435
469
  _op(:random_uniform, shape, name: name, dtype: dtype, minval: minval, maxval: maxval, seed: seed)
436
470
  end
@@ -441,15 +475,15 @@ module TensorStream
441
475
  # Creates a sequence of numbers that begins at start and extends by increments of delta up to but not including limit.
442
476
  #
443
477
  #
444
- # Params:
445
- # +start+:: Acts as first entry in the range if limit is not nil; otherwise, acts as range limit and first entry defaults to 0.
446
- # +limit+:: Upper limit of sequence, exclusive. If nil, defaults to the value of start while the first entry of the range defaults to 0.
447
- # +delta+:: Number that increments start. Defaults to 1.
478
+ # @param start Acts as first entry in the range if limit is not nil; otherwise, acts as range limit and first entry defaults to 0.
479
+ # @param limit Upper limit of sequence, exclusive. If nil, defaults to the value of start while the first entry of the range defaults to 0.
480
+ # @param delta Number that increments start. Defaults to 1.
448
481
  #
449
482
  # Options:
450
- # +:name+:: A name for the operation. Defaults to "range". default ("range")
451
- # +:dtype+:: The type of the elements of the resulting tensor.
452
- # +:output_type+:: Output data type defaults to int32 default (:int32)
483
+ # @option name A name for the operation. Defaults to "range". default ("range")
484
+ # @option dtype The type of the elements of the resulting tensor.
485
+ # @option output_type Output data type defaults to int32 default (:int32)
486
+ # @return Tensor
453
487
  def range(start = 0, limit = 0, delta = 1, name: "range", dtype: nil, output_type: :int32)
454
488
  _op(:range, start, limit, delta, name: name, dtype: dtype, output_type: output_type)
455
489
  end
@@ -459,11 +493,11 @@ module TensorStream
459
493
  # Returns the rank of a tensor
460
494
  #
461
495
  #
462
- # Params:
463
- # +input+:: A tensor
496
+ # @param input A tensor
464
497
  #
465
498
  # Options:
466
- # +:name+:: Optional name
499
+ # @option name Optional name
500
+ # @return Tensor
467
501
  def rank(input, name: nil)
468
502
  input = convert_to_tensor(input)
469
503
  return cons(input.shape.ndims) if input.shape.known?
@@ -476,12 +510,12 @@ module TensorStream
476
510
  # Given tensor, this operation returns a tensor that has the same values as tensor with shape shape.
477
511
  #
478
512
  #
479
- # Params:
480
- # +input+:: A tensor
481
- # +shape+:: A new tensor shape
513
+ # @param input A tensor
514
+ # @param shape A new tensor shape
482
515
  #
483
516
  # Options:
484
- # +:name+:: Optional name
517
+ # @option name Optional name
518
+ # @return Tensor
485
519
  def reshape(input, shape, name: nil)
486
520
  _op(:reshape, input, shape, name: name)
487
521
  end
@@ -491,27 +525,42 @@ module TensorStream
491
525
  # Rounds the values of a tensor to the nearest integer, element-wise
492
526
  #
493
527
  #
494
- # Params:
495
- # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
528
+ # @param input_a tensor X (of type FLOATING_POINT_TYPES)
496
529
  #
497
530
  # Options:
498
- # +:name+:: Optional name
531
+ # @option name Optional name
532
+ # @return Tensor
499
533
  def round(input_a, name: nil)
500
534
  check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
501
535
  _op(:round, input_a, name: name)
502
536
  end
503
537
 
504
538
 
539
+ ##
540
+ # Computes reciprocal of square root of x element-wise.
541
+ #
542
+ #
543
+ # @param input_a tensor X (of type FLOATING_POINT_TYPES)
544
+ #
545
+ # Options:
546
+ # @option name Optional name
547
+ # @return Tensor
548
+ def rsqrt(input_a, name: nil)
549
+ check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
550
+ _op(:rsqrt, input_a, name: name)
551
+ end
552
+
553
+
505
554
  ##
506
555
  # This operation returns a 1-D integer tensor representing the shape of input
507
556
  #
508
557
  #
509
- # Params:
510
- # +input+:: A tensor
558
+ # @param input A tensor
511
559
  #
512
560
  # Options:
513
- # +:name+:: Optional name
514
- # +:out_type+:: Optional output type default (:int32)
561
+ # @option name Optional name
562
+ # @option out_type Optional output type default (:int32)
563
+ # @return Tensor
515
564
  def shape(input, name: nil, out_type: :int32)
516
565
  return constant(shape_eval(input, out_type), dtype: out_type, name: "Shape/#{name}") if input.is_a?(Array) && !input[0].is_a?(Tensor)
517
566
  return constant(input.shape.shape, dtype: out_type, name: "Shape/#{input.name}_c") if shape_full_specified(input)
@@ -523,11 +572,11 @@ module TensorStream
523
572
  # Computes sigmoid of x element-wise.
524
573
  #
525
574
  #
526
- # Params:
527
- # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
575
+ # @param input_a tensor X (of type FLOATING_POINT_TYPES)
528
576
  #
529
577
  # Options:
530
- # +:name+:: Optional name
578
+ # @option name Optional name
579
+ # @return Tensor
531
580
  def sigmoid(input_a, name: nil)
532
581
  check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
533
582
  _op(:sigmoid, input_a, name: name)
@@ -540,11 +589,11 @@ module TensorStream
540
589
  # Zero is returned for NaN inputs.
541
590
  #
542
591
  #
543
- # Params:
544
- # +input_a+:: tensor X
592
+ # @param input_a tensor X
545
593
  #
546
594
  # Options:
547
- # +:name+:: Optional name
595
+ # @option name Optional name
596
+ # @return Tensor
548
597
  def sign(input_a, name: nil)
549
598
  _op(:sign, input_a, name: name)
550
599
  end
@@ -554,11 +603,11 @@ module TensorStream
554
603
  # Computes sin of input element-wise.
555
604
  #
556
605
  #
557
- # Params:
558
- # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
606
+ # @param input_a tensor X (of type FLOATING_POINT_TYPES)
559
607
  #
560
608
  # Options:
561
- # +:name+:: Optional name
609
+ # @option name Optional name
610
+ # @return Tensor
562
611
  def sin(input_a, name: nil)
563
612
  check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
564
613
  _op(:sin, input_a, name: name)
@@ -570,28 +619,50 @@ module TensorStream
570
619
  # Returns a 0-D Tensor representing the number of elements in input of type out_type. Defaults to :int32.
571
620
  #
572
621
  #
573
- # Params:
574
- # +input+:: A tensor
622
+ # @param input A tensor
575
623
  #
576
624
  # Options:
577
- # +:name+:: Optional name
578
- # +:out_type+:: Optional output type default (:int32)
625
+ # @option name Optional name
626
+ # @option out_type Optional output type default (:int32)
627
+ # @return Tensor
579
628
  def size(input, name: nil, out_type: :int32)
580
629
  _op(:size, input, name: name, out_type: out_type)
581
630
  end
582
631
 
583
632
 
633
+ ##
634
+ # Extracts a strided slice of a tensor
635
+ # this op extracts a slice of size `(end-begin)/stride`
636
+ # from the given `input_` tensor. Starting at the location specified by `begin`
637
+ # the slice continues by adding `stride` to the index until all dimensions are
638
+ # not less than `end`.
639
+ # Note that a stride can be negative, which causes a reverse slice.
640
+ #
641
+ #
642
+ # @param input A tensor
643
+ # @param _begin start index
644
+ # @param _end end index
645
+ # @param strides end index
646
+ #
647
+ # Options:
648
+ # @option name Optional name
649
+ # @return Tensor
650
+ def strided_slice(input, _begin, _end, strides = nil, name: nil)
651
+ _op(:strided_slice, input, _begin, _end, strides, name: name)
652
+ end
653
+
654
+
584
655
  ##
585
656
  # Returns x - y element-wise.
586
657
  #
587
658
  # This operation supports broadcasting
588
659
  #
589
- # Params:
590
- # +input_a+:: tensor X
591
- # +input_b+:: tensor Y
660
+ # @param input_a tensor X
661
+ # @param input_b tensor Y
592
662
  #
593
663
  # Options:
594
- # +:name+:: Optional name
664
+ # @option name Optional name
665
+ # @return Tensor
595
666
  def sub(input_a, input_b, name: nil)
596
667
  input_a, input_b = apply_data_type_coercion(input_a, input_b)
597
668
  _op(:sub, input_a, input_b, name: name)
@@ -607,19 +678,21 @@ module TensorStream
607
678
  # If axis has no entries, all dimensions are reduced, and a tensor with a single element is returned.
608
679
  #
609
680
  #
610
- # Params:
611
- # +input_a+:: tensor X
612
- # +axis+:: tensor X (of type INTEGER_TYPES)
681
+ # @param input_a tensor X
682
+ # @param axis_p tensor X (of type INTEGER_TYPES)
613
683
  #
614
684
  # Options:
615
- # +:name+:: Optional name
616
- # +:keepdims+:: If true, retains reduced dimensions with length 1. default (false)
617
- def sum(input_a, axis = nil, name: nil, keepdims: false)
618
- check_allowed_types(axis, TensorStream::Ops::INTEGER_TYPES)
685
+ # @option axis axis
686
+ # @option name Optional name
687
+ # @option keepdims If true, retains reduced dimensions with length 1. default (false)
688
+ # @return Tensor
689
+ def sum(input_a, axis_p = nil, axis: nil, name: nil, keepdims: false)
690
+ check_allowed_types(axis_p, TensorStream::Ops::INTEGER_TYPES)
619
691
  input_a = TensorStream.convert_to_tensor(input_a)
620
692
  return input_a if input_a.shape.scalar?
621
- axis = cast_axis(input_a, axis)
622
- _op(:sum, input_a, axis, name: name, keepdims: keepdims)
693
+ axis_p = axis_p || axis
694
+ axis_p = cast_axis(input_a, axis_p)
695
+ _op(:sum, input_a, axis_p, name: name, keepdims: keepdims)
623
696
  end
624
697
 
625
698
  alias_method :reduce_sum, :sum
@@ -628,11 +701,11 @@ module TensorStream
628
701
  # Computes tan of input element-wise.
629
702
  #
630
703
  #
631
- # Params:
632
- # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
704
+ # @param input_a tensor X (of type FLOATING_POINT_TYPES)
633
705
  #
634
706
  # Options:
635
- # +:name+:: Optional name
707
+ # @option name Optional name
708
+ # @return Tensor
636
709
  def tan(input_a, name: nil)
637
710
  check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
638
711
  _op(:tan, input_a, name: name)
@@ -643,11 +716,11 @@ module TensorStream
643
716
  # Computes tanh of input element-wise.
644
717
  #
645
718
  #
646
- # Params:
647
- # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
719
+ # @param input_a tensor X (of type FLOATING_POINT_TYPES)
648
720
  #
649
721
  # Options:
650
- # +:name+:: Optional name
722
+ # @option name Optional name
723
+ # @return Tensor
651
724
  def tanh(input_a, name: nil)
652
725
  check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
653
726
  _op(:tanh, input_a, name: name)
@@ -661,27 +734,44 @@ module TensorStream
661
734
  # and the values of input are replicated multiples[i] times along the 'i'th dimension. For example, tiling [a b c d] by [2] produces [a b c d a b c d].
662
735
  #
663
736
  #
664
- # Params:
665
- # +input+:: A tensor
666
- # +multiples+:: Must be one of the following types: int32, int64. 1-D. Length must be the same as the number of dimensions in input
737
+ # @param input A tensor
738
+ # @param multiples Must be one of the following types: int32, int64. 1-D. Length must be the same as the number of dimensions in input
667
739
  #
668
740
  # Options:
669
- # +:name+:: Optional name
741
+ # @option name Optional name
742
+ # @return Tensor
670
743
  def tile(input, multiples, name: nil)
671
744
  _op(:tile, input, multiples, name: name)
672
745
  end
673
746
 
674
747
 
748
+ ##
749
+ # Finds values and indices of the `k` largest entries for the last dimension.
750
+ #
751
+ #
752
+ # @param input 1-D or higher `Tensor` with last dimension at least `k`.
753
+ # @param k 0-D `int32` `Tensor`. Number of top elements to look for along the last dimension (along each row for matrices)
754
+ #
755
+ # Options:
756
+ # @option sorted If true the resulting `k` elements will be sorted by the values in descending order. default (true)
757
+ # @option name Optional name
758
+ # @return Tensor
759
+ def top_k(input, k = 1, sorted: true, name: nil)
760
+ result = _op(:top_k, input, k, sorted: sorted, name: name)
761
+ [result[0], result[1]]
762
+ end
763
+
764
+
675
765
  ##
676
766
  # Creates a tensor with all elements set to zero
677
767
  #
678
768
  #
679
- # Params:
680
- # +shape+:: A 1-D integer Tensor or ruby array. The shape of the output tensor.
769
+ # @param shape A 1-D integer Tensor or ruby array. The shape of the output tensor.
681
770
  #
682
771
  # Options:
683
- # +:dtype+:: Optional name default (:float32)
684
- # +:name+:: Optional name
772
+ # @option dtype Optional name default (:float32)
773
+ # @option name Optional name
774
+ # @return Tensor
685
775
  def zeros(shape, dtype: :float32, name: nil)
686
776
  _op(:zeros, shape, dtype: dtype, name: name)
687
777
  end