tensor_stream 0.3.0 → 0.4.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (59) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +7 -7
  3. data/CHANGELOG.md +13 -0
  4. data/Dockerfile +25 -0
  5. data/Rakefile +6 -0
  6. data/benchmark/benchmark.rb +16 -57
  7. data/benchmark_intel.txt +21 -0
  8. data/benchmark_nvidia.txt +33 -0
  9. data/lib/tensor_stream.rb +4 -173
  10. data/lib/tensor_stream/debugging/debugging.rb +20 -0
  11. data/lib/tensor_stream/evaluator/kernels/abs.cl +9 -5
  12. data/lib/tensor_stream/evaluator/kernels/add.cl +2 -4
  13. data/lib/tensor_stream/evaluator/kernels/argmax.cl +2 -9
  14. data/lib/tensor_stream/evaluator/kernels/argmin.cl +2 -9
  15. data/lib/tensor_stream/evaluator/kernels/cast.cl +3 -8
  16. data/lib/tensor_stream/evaluator/kernels/cond.cl.erb +1 -1
  17. data/lib/tensor_stream/evaluator/kernels/cos.cl +2 -1
  18. data/lib/tensor_stream/evaluator/kernels/div.cl.erb +2 -4
  19. data/lib/tensor_stream/evaluator/kernels/exp.cl +2 -1
  20. data/lib/tensor_stream/evaluator/kernels/gemm.cl +8 -39
  21. data/lib/tensor_stream/evaluator/kernels/log.cl +2 -1
  22. data/lib/tensor_stream/evaluator/kernels/log1p.cl +2 -1
  23. data/lib/tensor_stream/evaluator/kernels/max.cl +4 -49
  24. data/lib/tensor_stream/evaluator/kernels/mul.cl +2 -4
  25. data/lib/tensor_stream/evaluator/kernels/negate.cl +2 -9
  26. data/lib/tensor_stream/evaluator/kernels/pow.cl +4 -88
  27. data/lib/tensor_stream/evaluator/kernels/reciprocal.cl +2 -9
  28. data/lib/tensor_stream/evaluator/kernels/round.cl +2 -1
  29. data/lib/tensor_stream/evaluator/kernels/sigmoid.cl +2 -1
  30. data/lib/tensor_stream/evaluator/kernels/sigmoid_grad.cl +6 -5
  31. data/lib/tensor_stream/evaluator/kernels/sign.cl +12 -14
  32. data/lib/tensor_stream/evaluator/kernels/sin.cl +2 -1
  33. data/lib/tensor_stream/evaluator/kernels/softmax.cl +26 -0
  34. data/lib/tensor_stream/evaluator/kernels/softmax_grad.cl +46 -0
  35. data/lib/tensor_stream/evaluator/kernels/sqrt.cl +2 -1
  36. data/lib/tensor_stream/evaluator/kernels/square.cl +2 -8
  37. data/lib/tensor_stream/evaluator/kernels/sub.cl +2 -4
  38. data/lib/tensor_stream/evaluator/kernels/tan.cl +2 -1
  39. data/lib/tensor_stream/evaluator/kernels/tanh.cl +2 -1
  40. data/lib/tensor_stream/evaluator/kernels/tanh_grad.cl +2 -1
  41. data/lib/tensor_stream/evaluator/kernels/where.cl +2 -9
  42. data/lib/tensor_stream/evaluator/opencl_evaluator.rb +108 -58
  43. data/lib/tensor_stream/evaluator/opencl_template_helper.rb +40 -5
  44. data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +35 -0
  45. data/lib/tensor_stream/evaluator/ruby_evaluator.rb +30 -9
  46. data/lib/tensor_stream/graph_serializers/graphml.rb +1 -1
  47. data/lib/tensor_stream/graph_serializers/pbtext.rb +4 -0
  48. data/lib/tensor_stream/math_gradients.rb +6 -5
  49. data/lib/tensor_stream/nn/nn_ops.rb +18 -2
  50. data/lib/tensor_stream/ops.rb +237 -44
  51. data/lib/tensor_stream/tensor.rb +16 -2
  52. data/lib/tensor_stream/utils.rb +205 -0
  53. data/lib/tensor_stream/variable.rb +2 -1
  54. data/lib/tensor_stream/version.rb +1 -1
  55. data/samples/error.graphml +2755 -0
  56. data/{test_samples → samples}/iris.rb +18 -24
  57. data/samples/logistic_regression.rb +0 -1
  58. data/test_samples/raw_neural_net_sample.rb +80 -23
  59. metadata +11 -3
@@ -4,8 +4,18 @@ class OpenclTemplateHelper
4
4
  @source = source
5
5
  end
6
6
 
7
- def generate
8
- ERB.new(@source, nil, '%').result(binding)
7
+ def generate(args = {})
8
+ current_scope = binding
9
+
10
+ args.each do |k, v|
11
+ current_scope.local_variable_set(k.to_sym, v)
12
+ end
13
+
14
+ ERB.new(@source, nil, '%').result(current_scope)
15
+ end
16
+
17
+ def is_floating_point?(dtype)
18
+ TensorStream::Ops::FLOATING_POINT_TYPES.include?(dtype)
9
19
  end
10
20
 
11
21
  def render(template, locals = {})
@@ -19,11 +29,36 @@ class OpenclTemplateHelper
19
29
  end
20
30
 
21
31
  def dtype_to_c_type(dtype)
22
- case(dtype)
23
- when 'fp'
32
+ case(dtype.to_s)
33
+ when 'float64'
34
+ 'double'
35
+ when 'float32', 'float'
24
36
  'float'
25
- when 'int'
37
+ when 'int32', 'int'
26
38
  'int'
39
+ when 'int16'
40
+ 'short'
41
+ when 'boolean'
42
+ 'int'
43
+ else
44
+ raise "unknown dtype #{dtype}"
45
+ end
46
+ end
47
+
48
+ def min_value_for(dtype)
49
+ case(dtype.to_s)
50
+ when 'float64'
51
+ 'DBL_MIN'
52
+ when 'float32', 'float'
53
+ 'FLT_MIN'
54
+ when 'int32', 'int'
55
+ 'INT_MIN'
56
+ when 'int16'
57
+ 'SHRT_MIN'
58
+ when 'boolean'
59
+ '0'
60
+ else
61
+ raise "unknown dtype #{dtype}"
27
62
  end
28
63
  end
29
64
 
@@ -2,6 +2,7 @@ module TensorStream
2
2
  # varoius utility functions for array processing
3
3
  module ArrayOpsHelper
4
4
  def slice_tensor(input, start, size)
5
+ return input if size.empty?
5
6
  start_index = start.shift
6
7
  dimen_size = start_index + size.shift
7
8
 
@@ -164,5 +165,39 @@ module TensorStream
164
165
 
165
166
  get_rank(value[0], rank + 1)
166
167
  end
168
+
169
+ def softmax(arr)
170
+ return arr if arr.empty?
171
+
172
+ sum = if !arr[0].is_a?(Array)
173
+ arr.map { |a| Math.exp(a - arr.max) }.reduce(:+)
174
+ end
175
+
176
+ arr.collect do |item|
177
+ if item.is_a?(Array)
178
+ softmax(item)
179
+ else
180
+ Math.exp(item - arr.max) / sum
181
+ end
182
+ end
183
+ end
184
+
185
+ def softmax_grad(arr)
186
+ return arr if arr.empty?
187
+
188
+ arr.each_with_index.collect do |item, index|
189
+ if item.is_a?(Array)
190
+ softmax_grad(item)
191
+ else
192
+ arr.each_with_index.collect do |item2, index2|
193
+ if index != index2
194
+ -item * item2
195
+ else
196
+ item * (1.0 - item)
197
+ end
198
+ end
199
+ end
200
+ end
201
+ end
167
202
  end
168
203
  end
@@ -41,7 +41,9 @@ module TensorStream
41
41
  end
42
42
 
43
43
  def run(tensor, execution_context)
44
- return tensor.map { |t| run(t, execution_context) } if tensor.is_a?(Array)
44
+ if tensor.is_a?(Array) && tensor.size > 0 && tensor[0].is_a?(Tensor)
45
+ return tensor.map { |t| run(t, execution_context) }
46
+ end
45
47
 
46
48
  return tensor if retain.include?(tensor) # if var is in retain don't eval to value
47
49
 
@@ -89,10 +91,9 @@ module TensorStream
89
91
 
90
92
  def eval_operation(tensor, child_context)
91
93
  return @context[tensor.name] if @context.key?(tensor.name)
92
-
93
94
  a = resolve_placeholder(tensor.items[0], child_context) if tensor.items && tensor.items[0]
94
95
  b = resolve_placeholder(tensor.items[1], child_context) if tensor.items && tensor.items[1]
95
-
96
+ # puts tensor.name
96
97
  case tensor.operation
97
98
  when :const
98
99
  complete_eval(a, child_context)
@@ -459,6 +460,26 @@ module TensorStream
459
460
 
460
461
  tile = tile_arr(input, 0, multiples)
461
462
  tile.nil? ? [] : tile
463
+ when :softmax
464
+ input = complete_eval(a, child_context)
465
+ softmax(input)
466
+ when :softmax_grad
467
+ input = complete_eval(a, child_context)
468
+ grad = complete_eval(b, child_context)
469
+ softmax_input = softmax(input)
470
+ f_grad = softmax_grad(softmax_input)
471
+ f_grad.transpose.each_with_index.collect do |row, index|
472
+ sum = 0.0
473
+ row.each_with_index do |r, g_index|
474
+ sum += r * grad[g_index]
475
+ end
476
+ sum
477
+ end
478
+ when :check_numerics
479
+ a = complete_eval(a, child_context)
480
+ message = tensor.options[:message]
481
+ f = ->(t, _b) { raise "#{message} Invalid argument" if t.nan? || t.infinite?; t }
482
+ call_op(:check_numerics, a, child_context, f)
462
483
  else
463
484
  raise "unknown op #{tensor.operation}"
464
485
  end.tap do |result|
@@ -486,19 +507,19 @@ module TensorStream
486
507
  puts e.message
487
508
  puts e.backtrace.join("\n")
488
509
 
489
- shape_a = a.shape.shape if a
490
- shape_b = b.shape.shape if b
491
- dtype_a = a.data_type if a
492
- dtype_b = b.data_type if b
510
+ # shape_a = a.shape.shape if a
511
+ # shape_b = b.shape.shape if b
512
+ # dtype_a = a.data_type if a
513
+ # dtype_b = b.data_type if b
493
514
  a = complete_eval(a, child_context)
494
515
  b = complete_eval(b, child_context)
516
+
495
517
  # puts "name: #{tensor.given_name}"
496
518
  # # puts "op: #{tensor.to_math(true, 1)}"
497
519
  # puts "A #{shape_a} #{dtype_a}: #{a}" if a
498
520
  # puts "B #{shape_b} #{dtype_b}: #{b}" if b
499
521
  # dump_intermediates if @log_intermediates
500
522
  # File.write('/home/jedld/workspace/tensor_stream/samples/error.graphml', TensorStream::Graphml.new.get_string(tensor, @session))
501
-
502
523
  # File.write('/Users/josephemmanueldayo/workspace/gradients.graphml', TensorStream::Graphml.new.get_string(tensor, @session))
503
524
  raise EvaluatorExcecutionException.new(e, tensor), "error #{e.message} while evaluating #{tensor.name} : #{tensor.to_math(true,1)} defined at #{tensor.source}"
504
525
  end
@@ -741,7 +762,7 @@ module TensorStream
741
762
 
742
763
  v_a.each_with_index.collect do |v1, index|
743
764
  v2 = v_b[index]
744
- v3 = v_c[index]
765
+ v3 = v_c.is_a?(Array) ? v_c[index] : v_c
745
766
  if v1.is_a?(Array)
746
767
  call_3way_vector_op(v1, v2, v3, child_context, op)
747
768
  else
@@ -101,7 +101,7 @@ module TensorStream
101
101
 
102
102
  def _val(tensor)
103
103
  # JSON.pretty_generate(@last_session_context[tensor.name])
104
- @last_session_context[tensor.name]
104
+ @last_session_context[tensor.name] || @last_session_context[:_cache][tensor.name]
105
105
  end
106
106
 
107
107
  def to_graph_ml(tensor, arr_buf = [], added = {}, groups = {}, _id = 0)
@@ -110,8 +110,12 @@ module TensorStream
110
110
  case type
111
111
  when :int32, :int
112
112
  "DT_INT32"
113
+ when :int16
114
+ "DT_INT16"
113
115
  when :float, :float32
114
116
  "DT_FLOAT"
117
+ when :float64
118
+ "DT_FLOAT64"
115
119
  when :string
116
120
  "DT_STRING"
117
121
  else
@@ -127,11 +127,10 @@ module TensorStream
127
127
  sx = tf.shape(x)
128
128
  sy = tf.shape(y)
129
129
  rx, ry = _broadcast_gradient_args(sx, sy)
130
- gx = tf.reshape(
131
- tf.reduce_sum(grad * y * tf.pow(x, y - 1), rx), sx)
130
+ gx = tf.reduce_sum(grad * y * tf.pow(x, y - 1), rx)
132
131
 
133
132
  log_x = tf.where(x > 0, tf.log(x), tf.zeros_like(x))
134
- gy = tf.reshape(tf.reduce_sum(grad * z * log_x, ry), sy)
133
+ gy = tf.reduce_sum(grad * z * log_x, ry)
135
134
 
136
135
  [gx, gy]
137
136
  when :abs
@@ -182,9 +181,11 @@ module TensorStream
182
181
  factor = _safe_shape_div(tf.reduce_prod(input_shape), tf.reduce_prod(output_shape))
183
182
  tf.div(sum_grad, tf.cast(factor, sum_grad.data_type))
184
183
  when :log1p
185
- grad * tf.reciprocal(i_cons(1, data_type: grad.data_type) + x)
184
+ grad * tf.reciprocal(i_cons(1, dtype: grad.data_type) + x)
186
185
  when :sigmoid
187
186
  i_op(:sigmoid_grad, x, grad)
187
+ when :softmax
188
+ i_op(:softmax_grad, x, grad)
188
189
  when :zeros_like
189
190
  # non differentiable
190
191
  nil
@@ -210,7 +211,7 @@ module TensorStream
210
211
  end
211
212
 
212
213
  def self._sum_grad(x, y, grad)
213
- tf.ones_like(x) * grad
214
+ tf.ones_like(grad) * grad
214
215
  end
215
216
 
216
217
  def self._op_supports_broadcast?(node)
@@ -1,14 +1,30 @@
1
1
  module TensorStream
2
2
  # High level machine learning functions
3
3
  class NN
4
- def self.softmax(logits, _options = {})
5
- TensorStream.exp(logits) / TensorStream.reduce_sum(TensorStream.exp(logits))
4
+ extend TensorStream::OpHelper
5
+ def self.softmax(logits, axis: nil, name: nil)
6
+ _op(:softmax, logits, nil, axis: axis, name: name)
6
7
  end
7
8
 
8
9
  def self.relu(features, name: nil)
9
10
  TensorStream.max(features, 0, name: "relu_#{name}")
10
11
  end
11
12
 
13
+ def self.sigmoid(input, name: nil)
14
+ TensorStream.sigmoid(input, name)
15
+ end
16
+
17
+ def self.softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
18
+ TensorStream.name_scope(name, default: 'softmax_cross_entropy_with_logits', values: [logits, labels]) do |name|
19
+ tf = TensorStream
20
+ logits = tf.convert_to_tensor(logits, name: 'logits')
21
+ labels = tf.convert_to_tensor(labels, name: 'labels')
22
+ labels = tf.cast(labels, logits.dtype)
23
+ softmax_logits = -tf.log(softmax(logits)) * labels
24
+ tf.reduce_sum(softmax_logits, tf.rank(logits) - 1)
25
+ end
26
+ end
27
+
12
28
  def self.sigmoid_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
13
29
  TensorStream.name_scope(name, default: 'logistic_loss', values: [logits, labels]) do |name|
14
30
  tf = TensorStream
@@ -5,32 +5,52 @@ module TensorStream
5
5
  INTEGER_TYPES = %i[int32 int int64].freeze
6
6
  NUMERIC_TYPES = FLOATING_POINT_TYPES + INTEGER_TYPES
7
7
 
8
+ ##
9
+ # Returns the index with the largest value across axes of a tensor.
10
+ #
11
+ # Argmuments
12
+ #
13
+ # +input+ A Tensor. Must be one of the following types: float32, float64, int32, int16
14
+ # +axis+ Describes which axis of the input Tensor to reduce across. For vectors, use axis = 0
15
+ # +output_type+ Output data type defaults to int32
8
16
  def argmax(input, axis = nil, name: nil, dimension: nil, output_type: :int32)
9
17
  _op(:argmax, input, nil, axis: axis, name: name, dimension: dimension, data_type: output_type)
10
18
  end
11
19
 
20
+ ##
21
+ # Returns the index with the smallest value across axes of a tensor.
22
+ #
23
+ # Argmuments
24
+ #
25
+ # +input+ A Tensor. Must be one of the following types: float32, float64, int32, int16
26
+ # +axis+ Describes which axis of the input Tensor to reduce across. For vectors, use axis = 0
27
+ # +output_type+ Output data type defaults to int32
12
28
  def argmin(input, axis = nil, name: nil, dimension: nil, output_type: :int32)
13
29
  _op(:argmin, input, nil, axis: axis, name: name, dimension: dimension, data_type: output_type)
14
30
  end
15
31
 
16
- def gradients(input, wrt_xs, grad_ys: nil,
17
- name: 'gradients',
18
- colocate_gradients_with_ops: false,
19
- gate_gradients: false,
20
- aggregation_method: nil,
21
- stop_gradients: nil)
32
+ ##
33
+ # Constructs symbolic derivatives of ys of input w.r.t. x in wrt_xs.
34
+ #
35
+ # ys and xs are each a Tensor or a list of tensors. grad_ys is a list of Tensor, holding the gradients received by the ys. The list must be the same length as ys.
36
+ #
37
+ # Arguments:
38
+ # +ys+ : A Tensor or list of tensors to be differentiated.
39
+ # +wrt_xs+ : A Tensor or list of tensors to be used for differentiation.
40
+ # +stop_gradients+: Optional. A Tensor or list of tensors not to differentiate through
41
+ def gradients(ys, wrt_xs, name: 'gradients', stop_gradients: nil)
22
42
 
23
43
  gs = wrt_xs.collect do |x|
24
44
  stops = stop_gradients ? stop_gradients.map(&:name).join('_') : ''
25
- gradient_program_name = "grad_#{input.name}_#{x.name}_#{stops}".to_sym
45
+ gradient_program_name = "grad_#{ys.name}_#{x.name}_#{stops}".to_sym
26
46
 
27
- tensor_program = if input.graph.node_added?(gradient_program_name)
28
- input.graph.get_node(gradient_program_name)
47
+ tensor_program = if ys.graph.node_added?(gradient_program_name)
48
+ ys.graph.get_node(gradient_program_name)
29
49
  else
30
- input.graph.name_scope("gradient_wrt_#{x.name}") do
31
- derivative_ops = TensorStream::MathGradients.derivative(input, x, graph: input.graph,
50
+ ys.graph.name_scope("gradient_wrt_#{x.name}") do
51
+ derivative_ops = TensorStream::MathGradients.derivative(ys, x, graph: ys.graph,
32
52
  stop_gradients: stop_gradients)
33
- input.graph.add_node!(gradient_program_name, derivative_ops)
53
+ ys.graph.add_node!(gradient_program_name, derivative_ops)
34
54
  end
35
55
  end
36
56
  tensor_program
@@ -38,272 +58,445 @@ module TensorStream
38
58
  TensorStream.group(gs)
39
59
  end
40
60
 
61
+ ##
62
+ # Outputs random values from a uniform distribution.
41
63
  def random_uniform(shape, dtype: :float32, minval: 0, maxval: 1, seed: nil, name: nil)
42
64
  options = { shape: shape, dtype: dtype, minval: minval, maxval: maxval, seed: seed, name: name }
43
65
  _op(:random_uniform, nil, nil, options)
44
66
  end
45
67
 
68
+ ##
69
+ # Outputs random values from a normal distribution.
46
70
  def random_normal(shape, dtype: :float32, mean: 0.0, stddev: 1.0, seed: nil, name: nil)
47
71
  options = { shape: shape, dtype: dtype, mean: mean, stddev: stddev, seed: seed, name: name }
48
72
  _op(:random_normal, nil, nil, options)
49
73
  end
50
74
 
75
+ ##
76
+ # Stops gradient computation.
77
+ #
78
+ # When executed in a graph, this op outputs its input tensor as-is.
51
79
  def stop_gradient(tensor, options = {})
52
80
  _op(:stop_gradient, tensor, nil, options)
53
81
  end
54
82
 
83
+ ##
84
+ # Construct an identity matrix
55
85
  def eye(num_rows, num_columns: nil, dtype: :float32, name: nil)
56
86
  _op(:eye, num_rows, num_columns || num_rows, data_type: dtype, name: name)
57
87
  end
58
88
 
89
+ ##
90
+ # This operation returns a 1-D integer tensor representing the shape of input
59
91
  def shape(input, name: nil, out_type: :int32)
60
92
  _op(:shape, input, nil, name: name, out_type: out_type)
61
93
  end
62
94
 
95
+ ##
96
+ # Constructs a tensor by tiling a given tensor.
97
+ #
98
+ # This operation creates a new tensor by replicating input multiples times. The output tensor's i'th dimension has input.dims(i) * multiples[i] elements, and the values of input are replicated multiples[i] times along the 'i'th dimension. For example, tiling [a b c d] by [2] produces [a b c d a b c d].
63
99
  def tile(input, multiples, name: nil)
64
100
  _op(:tile, input, multiples, name: name)
65
101
  end
66
102
 
103
+ ##
104
+ # Returns the rank of a tensor.
67
105
  def rank(input, name: nil)
68
106
  _op(:rank, input, name: name)
69
107
  end
70
108
 
71
- def zeros_initializer(options = {})
72
- _op(:zeros, nil, nil, options)
109
+ ##
110
+ # initializer that generates tensors initialized to 0.
111
+ def zeros_initializer(dtype: nil)
112
+ TensorStream::Initializer.new(-> { _op(:zeros, nil, nil, data_type: dtype) })
73
113
  end
74
114
 
75
- def glorot_uniform_initializer(seed: nil, dtype: :float32)
115
+ ##
116
+ # The Glorot uniform initializer, also called Xavier uniform initializer.
117
+ #
118
+ # It draws samples from a uniform distribution within [-limit, limit] where limit is sqrt(6 / (fan_in + fan_out)) where fan_in is the number of input units in the weight tensor and fan_out is the number of output units in the weight tensor.
119
+ def glorot_uniform_initializer(seed: nil, dtype: nil)
76
120
  TensorStream::Initializer.new(-> { _op(:glorot_uniform, nil, nil, seed: seed, data_type: dtype) })
77
121
  end
78
122
 
123
+ ##
124
+ # Initializer that generates tensors with a uniform distribution.
79
125
  def random_uniform_initializer(minval: 0, maxval: 1, seed: nil, dtype: nil)
80
126
  TensorStream::Initializer.new(-> { _op(:random_uniform, nil, nil, minval: 0, maxval: 1, seed: seed, data_type: dtype) })
81
127
  end
82
128
 
129
+ ##
130
+ # Extracts a slice from a tensor.
131
+ #
132
+ # This operation extracts a slice of size size from a tensor input starting at the location specified by begin. The slice size is represented as a tensor shape, where size[i] is the number of elements of the 'i'th dimension of input that you want to slice. The starting location (begin) for the slice is represented as an offset in each dimension of input. In other words, begin[i] is the offset into the 'i'th dimension of input that you want to slice from.
83
133
  def slice(input, start, size, name: nil)
84
134
  _op(:slice, input, start, size: size, name: name)
85
135
  end
86
136
 
137
+ ##
138
+ # Creates a tensor with all elements set to zero
87
139
  def zeros(shape, dtype: :float32, name: nil)
88
140
  _op(:zeros, shape, nil, data_type: dtype, name: name)
89
141
  end
90
142
 
143
+ ##
144
+ # Creates a tensor with all elements set to 1.
91
145
  def ones(shape, dtype: :float32, name: nil)
92
146
  _op(:ones, shape, nil, data_type: dtype, name: name)
93
147
  end
94
148
 
149
+ ##
150
+ # Returns the truth value of (x < y) element-wise.
151
+ # This operation supports broadcasting
95
152
  def less(input_a, input_b, name: nil)
153
+ input_a, input_b = check_data_types(input_a, input_b)
96
154
  _op(:less, input_a, input_b, name: name)
97
155
  end
98
156
 
157
+ ##
158
+ # Returns the truth value of x AND y element-wise.
99
159
  def logical_and(input_a, input_b, name: nil)
160
+ input_a, input_b = check_data_types(input_a, input_b)
100
161
  _op(:logical_and, input_a, input_b, name: name)
101
162
  end
102
163
 
164
+ ##
165
+ # Returns the truth value of (x > y) element-wise.
166
+ # This operation supports broadcasting
103
167
  def greater(input_a, input_b, name: nil)
168
+ input_a, input_b = check_data_types(input_a, input_b)
104
169
  _op(:greater, input_a, input_b, name: name)
105
170
  end
106
171
 
172
+ ##
173
+ # Returns the truth value of (x >= y) element-wise.
174
+ #
175
+ # This operation supports broadcasting
107
176
  def greater_equal(input_a, input_b, name: nil)
177
+ input_a, input_b = check_data_types(input_a, input_b)
108
178
  _op(:greater_equal, input_a, input_b, name: name)
109
179
  end
110
180
 
181
+ ##
182
+ # Returns the truth value of (x <= y) element-wise.
111
183
  def less_equal(input_a, input_b, name: nil)
184
+ input_a, input_b = check_data_types(input_a, input_b)
112
185
  _op(:less_equal, input_a, input_b, name: name)
113
186
  end
114
187
 
188
+ ##
189
+ # Computes the mean of elements across dimensions of a tensor.
115
190
  def reduce_mean(input_tensor, axis = nil, keepdims: false, name: nil)
116
191
  _op(:mean, input_tensor, axis, keepdims: keepdims, name: name)
117
192
  end
118
193
 
194
+ ##
195
+ # Computes the sum of elements across dimensions of a tensor.
196
+ #
197
+ # Reduces input_tensor along the dimensions given in axis. Unless keepdims is true, the rank of the tensor is reduced by 1 for each entry in axis. If keepdims is true, the reduced dimensions are retained with length 1.
198
+ # If axis has no entries, all dimensions are reduced, and a tensor with a single element is returned.
119
199
  def reduce_sum(input_tensor, axis = nil, keepdims: false, name: nil)
120
200
  _op(:sum, input_tensor, axis, keepdims: keepdims, name: name)
121
201
  end
122
202
 
203
+ ##
204
+ # Computes the product of elements across dimensions of a tensor.
205
+ #
206
+ # Reduces input_tensor along the dimensions given in axis. Unless keepdims is true, the rank of the tensor is reduced by 1 for each entry in axis. If keepdims is true, the reduced dimensions are retained with length 1.
207
+ #
208
+ # If axis has no entries, all dimensions are reduced, and a tensor with a single element is returned.
123
209
  def reduce_prod(input, axis = nil, keepdims: false, name: nil)
124
210
  _op(:prod, input, axis, keepdims: keepdims, name: name)
125
211
  end
126
212
 
213
+ ##
214
+ # Concatenates tensors along one dimension.
127
215
  def concat(values, axis, name: 'concat')
128
216
  _op(:concat, values, nil, axis: axis, name: name)
129
217
  end
130
218
 
219
+ ##
220
+ # Reshapes a tensor.
221
+ #
222
+ # Given tensor, this operation returns a tensor that has the same values as tensor with shape shape.
131
223
  def reshape(tensor, shape, name: nil)
132
224
  _op(:reshape, tensor, shape, name: name)
133
225
  end
134
226
 
227
+ ##
228
+ # Computes square of x element-wise.
135
229
  def square(tensor, name: nil)
136
230
  _op(:square, tensor, nil, name: name)
137
231
  end
138
232
 
233
+ ##
234
+ # Rounds the values of a tensor to the nearest integer, element-wise
139
235
  def round(tensor, name: nil)
140
236
  check_allowed_types(tensor, FLOATING_POINT_TYPES)
141
237
  _op(:round, tensor, nil, name: name)
142
238
  end
143
239
 
240
+ ##
241
+ # Computes the reciprocal of x element-wise.
144
242
  def reciprocal(tensor, name: nil)
145
243
  _op(:reciprocal, tensor, nil, name: name)
146
244
  end
147
245
 
246
+ ##
247
+ # Return true_fn() if the predicate pred is true else false_fn().
148
248
  def cond(pred, true_fn, false_fn, name: nil)
149
249
  _op(:cond, true_fn, false_fn, pred: pred, name: name)
150
250
  end
151
251
 
252
+ ##
253
+ # Return the elements, either from x or y, depending on the condition.
152
254
  def where(condition, true_t = nil, false_t = nil, name: nil)
153
255
  _op(:where, true_t, false_t, pred: condition, name: name)
154
256
  end
155
257
 
258
+ ##
259
+ # Returns x + y element-wise.
260
+ #
261
+ # This operation supports broadcasting
156
262
  def add(input_a, input_b, name: nil)
263
+ input_a, input_b = check_data_types(input_a, input_b)
157
264
  _op(:add, input_a, input_b, name: name)
158
265
  end
159
266
 
267
+ ##
268
+ # Returns x - y element-wise.
269
+ #
270
+ # This operation supports boradcasting
160
271
  def sub(input_a, input_b, name: nil)
272
+ input_a, input_b = check_data_types(input_a, input_b)
161
273
  _op(:sub, input_a, input_b, name: name)
162
274
  end
163
275
 
276
+ ##
277
+ # Returns x - y element-wise.
278
+ #
279
+ # This operation supports boradcasting
164
280
  def subtract(input_a, input_b, name: nil)
281
+ input_a, input_b = check_data_types(input_a, input_b)
165
282
  sub(input_a, input_b, name: name)
166
283
  end
167
284
 
285
+ ##
286
+ # Returns the max of x and y (i.e. x > y ? x : y) element-wise.
168
287
  def max(input_a, input_b, name: nil)
169
288
  check_allowed_types(input_a, NUMERIC_TYPES)
170
289
  check_allowed_types(input_b, NUMERIC_TYPES)
171
-
290
+ input_a, input_b = check_data_types(input_a, input_b)
172
291
  _op(:max, input_a, input_b, name: name)
173
292
  end
174
293
 
294
+ ##
295
+ # Returns the max of x and y (i.e. x > y ? x : y) element-wise.
175
296
  def maximum(input_a, input_b, name: nil)
297
+ check_allowed_types(input_a, NUMERIC_TYPES)
298
+ check_allowed_types(input_b, NUMERIC_TYPES)
299
+ input_a, input_b = check_data_types(input_a, input_b)
176
300
  max(input_a, input_b, name: name)
177
301
  end
178
-
302
+
303
+ ##
304
+ # Casts a tensor to a new type.
179
305
  def cast(input, dtype, name: nil)
180
306
  _op(:cast, input, nil, data_type: dtype, name: name)
181
307
  end
182
308
 
309
+ ##
310
+ # Prints a list of tensors.
311
+ #
312
+ # This is an identity op (behaves like tf.identity) with the side effect of printing data when evaluating.
183
313
  def print(input, data, message: nil, name: nil)
184
314
  _op(:print, input, data, message: message, name: name)
185
315
  end
186
316
 
317
+ ##
318
+ # Computes numerical negative value element-wise.
187
319
  def negate(input, name: nil)
188
320
  _op(:negate, input, nil, name: name)
189
321
  end
190
322
 
323
+ ##
324
+ # Computes numerical negative value element-wise.
191
325
  def negative(input, name: nil)
192
326
  negate(input, name: name)
193
327
  end
194
328
 
329
+ ##
330
+ # Returns the truth value of (x == y) element-wise.
195
331
  def equal(input_a, input_b, name: nil)
332
+ input_a, input_b = check_data_types(input_a, input_b)
196
333
  _op(:equal, input_a, input_b, name: name)
197
334
  end
198
335
 
336
+ ##
337
+ # Returns the truth value of (x != y) element-wise.
338
+ # This ops supports broadcasting
199
339
  def not_equal(input_a, input_b, name: nil)
340
+ input_a, input_b = check_data_types(input_a, input_b)
200
341
  _op(:not_equal, input_a, input_b, name: name)
201
342
  end
202
343
 
344
+ ##
345
+ # reates a tensor with all elements set to zero.
346
+ # Given a single tensor (tensor), this operation returns a tensor of the same type and shape as tensor with all elements set to zero. Optionally, you can use dtype to specify a new type for the returned tensor.
203
347
  def zeros_like(tensor, dtype: nil, name: nil)
204
348
  _op(:zeros_like, tensor, nil, data_type: dtype, name: name)
205
349
  end
206
350
 
351
+ ##
352
+ # Creates a tensor with all elements set to 1.
353
+ # Given a single tensor (tensor), this operation returns a tensor of the same type and shape as tensor with all elements set to 1. Optionally, you can specify a new type (dtype) for the returned tensor.
207
354
  def ones_like(tensor, dtype: nil, name: nil)
208
355
  _op(:ones_like, tensor, nil, data_type: dtype, name: name)
209
356
  end
210
357
 
358
+ ##
359
+ # Return a tensor with the same shape and contents as input.
211
360
  def identity(input, name: nil)
212
361
  _op(:identity, input, nil, name: name)
213
362
  end
214
363
 
364
+ ##
365
+ # Returns x * y element-wise.
366
+ # This operation supports broadcasting
215
367
  def multiply(input_a, input_b, name: nil)
368
+ input_a, input_b = check_data_types(input_a, input_b)
216
369
  _op(:mul, input_a, input_b, name: name)
217
370
  end
218
371
 
372
+ ##
373
+ # Returns x * y element-wise.
374
+ # This operation supports broadcasting
219
375
  def mul(input_a, input_b, name: nil)
376
+ input_a, input_b = check_data_types(input_a, input_b)
220
377
  _op(:mul, input_a, input_b, name: name)
221
378
  end
222
379
 
380
+ ##
381
+ # Divides x / y elementwise
382
+ # This operation supports broadcasting
223
383
  def div(input_a, input_b, name: nil)
384
+ input_a, input_b = check_data_types(input_a, input_b)
224
385
  _op(:div, input_a, input_b, name: name)
225
386
  end
226
387
 
388
+ ##
389
+ # Computes the power of one value to another.
227
390
  def pow(input_a, input_e, name: nil)
391
+ input_a, input_e = check_data_types(input_a, input_e)
228
392
  _op(:pow, input_a, input_e, name: name)
229
393
  end
230
394
 
395
+ ##
396
+ # Computes the absolute value of a tensor.
231
397
  def abs(input, name: nil)
232
398
  _op(:abs, input, nil, name: name)
233
399
  end
234
400
 
401
+ ##
402
+ # Returns an element-wise indication of the sign of a number.
403
+ # y = sign(x) = -1 if x < 0; 0 if x == 0 or tf.is_nan(x); 1 if x > 0.
404
+ # Zero is returned for NaN inputs.
235
405
  def sign(input, name: nil)
236
406
  _op(:sign, input, nil, name: name)
237
407
  end
238
408
 
239
- def sin(input, options = {})
240
- options[:data_type] ||= :float32
409
+ ##
410
+ # Computes sin of input element-wise.
411
+ def sin(input, name: nil)
241
412
  check_allowed_types(input, FLOATING_POINT_TYPES)
242
- _op(:sin, input, nil, options)
413
+ _op(:sin, input, nil, name: name)
243
414
  end
244
415
 
245
- def cos(input, options = {})
246
- options[:data_type] ||= :float32
416
+ ##
417
+ # Computes cos of input element-wise.
418
+ def cos(input, name: nil)
247
419
  check_allowed_types(input, FLOATING_POINT_TYPES)
248
- _op(:cos, input, nil, options)
420
+ _op(:cos, input, nil, name: name)
249
421
  end
250
422
 
251
- def tan(input, options = {})
252
- options[:data_type] ||= :float32
423
+ ##
424
+ # Computes tan of input element-wise.
425
+ def tan(input, name: nil)
253
426
  check_allowed_types(input, FLOATING_POINT_TYPES)
254
- _op(:tan, input, nil, options)
427
+ _op(:tan, input, nil, name: name)
255
428
  end
256
429
 
257
- def tanh(input, options = {})
258
- options[:data_type] ||= :float32
430
+ ##
431
+ # Computes tanh of input element-wise.
432
+ def tanh(input, name: nil)
259
433
  check_allowed_types(input, FLOATING_POINT_TYPES)
260
- _op(:tanh, input, nil, options)
434
+ _op(:tanh, input, nil, name: name)
261
435
  end
262
436
 
437
+ ##
438
+ # Computes sqrt of input element-wise.
263
439
  def sqrt(input, name: nil)
264
- options = {
265
- data_type: input.data_type,
266
- name: name
267
- }
268
440
  check_allowed_types(input, FLOATING_POINT_TYPES)
269
- _op(:sqrt, input, nil, options)
441
+ _op(:sqrt, input, nil, name: name)
270
442
  end
271
443
 
272
- def log(input, options = {})
273
- options[:data_type] ||= :float32
444
+ ##
445
+ # Computes natural logarithm of x element-wise.
446
+ def log(input, name: nil)
274
447
  check_allowed_types(input, FLOATING_POINT_TYPES)
275
- _op(:log, input, nil, options)
448
+ _op(:log, input, nil, name: name)
276
449
  end
277
-
278
- def log1p(input, options = {})
279
- options[:data_type] ||= :float32
450
+
451
+ ##
452
+ # Computes natural logarithm of (1 + x) element-wise.
453
+ def log1p(input, name: nil)
280
454
  check_allowed_types(input, FLOATING_POINT_TYPES)
281
- _op(:log1p, input, nil, options)
455
+ _op(:log1p, input, nil, name: name)
282
456
  end
283
457
 
284
- def exp(input, options = {})
285
- options[:data_type] ||= :float32
458
+ ##
459
+ # Computes exponential of x element-wise.
460
+ def exp(input, name: nil)
286
461
  check_allowed_types(input, FLOATING_POINT_TYPES)
287
- _op(:exp, input, nil, options)
462
+ _op(:exp, input, nil, name: name)
288
463
  end
289
464
 
465
+ ##
466
+ # Computes sigmoid of x element-wise.
290
467
  def sigmoid(input, name: nil)
291
468
  check_allowed_types(input, FLOATING_POINT_TYPES)
292
469
  _op(:sigmoid, input, nil, name: name)
293
470
  end
294
471
 
472
+ ##
473
+ # Multiplies matrix a by matrix b, producing a * b.
474
+ # The inputs must, following any transpositions, be tensors of rank 2 .
295
475
  def matmul(input_a, input_b, transpose_a: false,
296
476
  transpose_b: false,
297
477
  name: nil)
478
+ input_a, input_b = check_data_types(input_a, input_b)
298
479
  _op(:matmul, input_a, input_b, transpose_a: transpose_a, transpose_b: transpose_b, name: name)
299
480
  end
300
481
 
482
+ ##
483
+ # Transposes a. Permutes the dimensions according to perm.
301
484
  def transpose(tensor, perm: nil, name: 'transpose')
302
485
  _op(:transpose, tensor, nil, perm: perm, name: name)
303
486
  end
304
487
 
488
+ ##
489
+ # Pads a tensor.
490
+ # This operation pads a tensor according to the paddings you specify.
305
491
  def pad(tensor, paddings, mode: 'CONSTANT', name: nil)
306
492
  _op(:pad, tensor, nil, paddings: paddings, mode: mode, name: name)
307
493
  end
494
+
495
+ ##
496
+ # Checks a tensor for NaN and Inf values.
497
+ # When run, reports an InvalidArgument error if tensor has any values that are not a number (NaN) or infinity (Inf). Otherwise, passes tensor as-is.
498
+ def check_numerics(tensor, message, name: nil)
499
+ _op(:check_numerics, tensor, nil, message: message, name: name)
500
+ end
308
501
  end
309
502
  end