tensor_stream 1.0.0 → 1.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (142) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -0
  3. data/.rubocop.yml +1 -0
  4. data/Gemfile +1 -1
  5. data/LICENSE.txt +1 -1
  6. data/README.md +34 -34
  7. data/Rakefile +3 -3
  8. data/USAGE_GUIDE.md +235 -0
  9. data/bin/stubgen +20 -0
  10. data/exe/model_utils +2 -2
  11. data/lib/tensor_stream.rb +45 -44
  12. data/lib/tensor_stream/constant.rb +2 -2
  13. data/lib/tensor_stream/control_flow.rb +1 -1
  14. data/lib/tensor_stream/debugging/debugging.rb +2 -2
  15. data/lib/tensor_stream/dynamic_stitch.rb +2 -2
  16. data/lib/tensor_stream/evaluator/base_evaluator.rb +18 -18
  17. data/lib/tensor_stream/evaluator/buffer.rb +1 -1
  18. data/lib/tensor_stream/evaluator/evaluator.rb +2 -2
  19. data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +41 -41
  20. data/lib/tensor_stream/evaluator/operation_helpers/math_helper.rb +1 -1
  21. data/lib/tensor_stream/evaluator/ruby/array_ops.rb +39 -39
  22. data/lib/tensor_stream/evaluator/ruby/check_ops.rb +2 -2
  23. data/lib/tensor_stream/evaluator/ruby/images_ops.rb +18 -18
  24. data/lib/tensor_stream/evaluator/ruby/math_ops.rb +13 -14
  25. data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +33 -36
  26. data/lib/tensor_stream/evaluator/ruby/random_ops.rb +20 -21
  27. data/lib/tensor_stream/evaluator/ruby_evaluator.rb +36 -49
  28. data/lib/tensor_stream/exceptions.rb +1 -1
  29. data/lib/tensor_stream/generated_stub/ops.rb +691 -0
  30. data/lib/tensor_stream/generated_stub/stub_file.erb +24 -0
  31. data/lib/tensor_stream/graph.rb +18 -18
  32. data/lib/tensor_stream/graph_builder.rb +17 -17
  33. data/lib/tensor_stream/graph_deserializers/protobuf.rb +97 -97
  34. data/lib/tensor_stream/graph_deserializers/yaml_loader.rb +1 -1
  35. data/lib/tensor_stream/graph_keys.rb +3 -3
  36. data/lib/tensor_stream/graph_serializers/graphml.rb +33 -33
  37. data/lib/tensor_stream/graph_serializers/packer.rb +23 -23
  38. data/lib/tensor_stream/graph_serializers/pbtext.rb +38 -42
  39. data/lib/tensor_stream/graph_serializers/serializer.rb +3 -2
  40. data/lib/tensor_stream/graph_serializers/yaml.rb +5 -5
  41. data/lib/tensor_stream/helpers/infer_shape.rb +56 -56
  42. data/lib/tensor_stream/helpers/op_helper.rb +8 -9
  43. data/lib/tensor_stream/helpers/string_helper.rb +15 -15
  44. data/lib/tensor_stream/helpers/tensor_mixins.rb +17 -17
  45. data/lib/tensor_stream/images.rb +1 -1
  46. data/lib/tensor_stream/initializer.rb +1 -1
  47. data/lib/tensor_stream/math_gradients.rb +28 -187
  48. data/lib/tensor_stream/monkey_patches/array.rb +1 -1
  49. data/lib/tensor_stream/monkey_patches/float.rb +1 -1
  50. data/lib/tensor_stream/monkey_patches/integer.rb +1 -1
  51. data/lib/tensor_stream/monkey_patches/op_patch.rb +5 -5
  52. data/lib/tensor_stream/monkey_patches/patch.rb +1 -1
  53. data/lib/tensor_stream/nn/nn_ops.rb +17 -15
  54. data/lib/tensor_stream/op_maker.rb +180 -0
  55. data/lib/tensor_stream/operation.rb +17 -17
  56. data/lib/tensor_stream/ops.rb +95 -384
  57. data/lib/tensor_stream/ops/add.rb +23 -0
  58. data/lib/tensor_stream/ops/argmax.rb +14 -0
  59. data/lib/tensor_stream/ops/argmin.rb +14 -0
  60. data/lib/tensor_stream/ops/case.rb +17 -0
  61. data/lib/tensor_stream/ops/cast.rb +15 -0
  62. data/lib/tensor_stream/ops/ceil.rb +15 -0
  63. data/lib/tensor_stream/ops/const.rb +0 -0
  64. data/lib/tensor_stream/ops/cos.rb +10 -0
  65. data/lib/tensor_stream/ops/div.rb +21 -0
  66. data/lib/tensor_stream/ops/equal.rb +15 -0
  67. data/lib/tensor_stream/ops/expand_dims.rb +17 -0
  68. data/lib/tensor_stream/ops/fill.rb +19 -0
  69. data/lib/tensor_stream/ops/floor.rb +15 -0
  70. data/lib/tensor_stream/ops/floor_div.rb +15 -0
  71. data/lib/tensor_stream/ops/greater.rb +11 -0
  72. data/lib/tensor_stream/ops/greater_equal.rb +11 -0
  73. data/lib/tensor_stream/ops/less_equal.rb +15 -0
  74. data/lib/tensor_stream/ops/log.rb +14 -0
  75. data/lib/tensor_stream/ops/mat_mul.rb +60 -0
  76. data/lib/tensor_stream/ops/max.rb +15 -0
  77. data/lib/tensor_stream/ops/min.rb +15 -0
  78. data/lib/tensor_stream/ops/mod.rb +23 -0
  79. data/lib/tensor_stream/ops/mul.rb +21 -0
  80. data/lib/tensor_stream/ops/negate.rb +14 -0
  81. data/lib/tensor_stream/ops/ones_like.rb +19 -0
  82. data/lib/tensor_stream/ops/pow.rb +25 -0
  83. data/lib/tensor_stream/ops/prod.rb +60 -0
  84. data/lib/tensor_stream/ops/random_uniform.rb +18 -0
  85. data/lib/tensor_stream/ops/range.rb +20 -0
  86. data/lib/tensor_stream/ops/rank.rb +13 -0
  87. data/lib/tensor_stream/ops/reshape.rb +24 -0
  88. data/lib/tensor_stream/ops/round.rb +15 -0
  89. data/lib/tensor_stream/ops/shape.rb +14 -0
  90. data/lib/tensor_stream/ops/sigmoid.rb +10 -0
  91. data/lib/tensor_stream/ops/sign.rb +12 -0
  92. data/lib/tensor_stream/ops/sin.rb +10 -0
  93. data/lib/tensor_stream/ops/size.rb +16 -0
  94. data/lib/tensor_stream/ops/sub.rb +24 -0
  95. data/lib/tensor_stream/ops/sum.rb +27 -0
  96. data/lib/tensor_stream/ops/tan.rb +12 -0
  97. data/lib/tensor_stream/ops/tanh.rb +10 -0
  98. data/lib/tensor_stream/ops/tile.rb +19 -0
  99. data/lib/tensor_stream/ops/zeros.rb +15 -0
  100. data/lib/tensor_stream/placeholder.rb +2 -2
  101. data/lib/tensor_stream/profile/report_tool.rb +3 -3
  102. data/lib/tensor_stream/session.rb +36 -38
  103. data/lib/tensor_stream/tensor.rb +2 -2
  104. data/lib/tensor_stream/tensor_shape.rb +4 -4
  105. data/lib/tensor_stream/train/adadelta_optimizer.rb +8 -8
  106. data/lib/tensor_stream/train/adagrad_optimizer.rb +3 -3
  107. data/lib/tensor_stream/train/adam_optimizer.rb +11 -11
  108. data/lib/tensor_stream/train/learning_rate_decay.rb +2 -2
  109. data/lib/tensor_stream/train/momentum_optimizer.rb +7 -7
  110. data/lib/tensor_stream/train/optimizer.rb +9 -9
  111. data/lib/tensor_stream/train/rmsprop_optimizer.rb +16 -16
  112. data/lib/tensor_stream/train/saver.rb +14 -14
  113. data/lib/tensor_stream/train/slot_creator.rb +6 -6
  114. data/lib/tensor_stream/train/utils.rb +12 -12
  115. data/lib/tensor_stream/trainer.rb +10 -10
  116. data/lib/tensor_stream/types.rb +1 -1
  117. data/lib/tensor_stream/utils.rb +33 -32
  118. data/lib/tensor_stream/utils/freezer.rb +5 -5
  119. data/lib/tensor_stream/variable.rb +5 -5
  120. data/lib/tensor_stream/variable_scope.rb +1 -1
  121. data/lib/tensor_stream/version.rb +1 -1
  122. data/samples/{iris.data → datasets/iris.data} +0 -0
  123. data/samples/jupyter_notebooks/linear_regression.ipynb +463 -0
  124. data/samples/{iris.rb → neural_networks/iris.rb} +21 -23
  125. data/samples/{mnist_data.rb → neural_networks/mnist_data.rb} +8 -8
  126. data/samples/neural_networks/raw_neural_net_sample.rb +112 -0
  127. data/samples/{rnn.rb → neural_networks/rnn.rb} +28 -31
  128. data/samples/{nearest_neighbor.rb → others/nearest_neighbor.rb} +12 -12
  129. data/samples/regression/linear_regression.rb +63 -0
  130. data/samples/{logistic_regression.rb → regression/logistic_regression.rb} +14 -16
  131. data/tensor_stream.gemspec +9 -8
  132. metadata +89 -19
  133. data/data_1.json +0 -4764
  134. data/data_2.json +0 -4764
  135. data/data_actual.json +0 -28
  136. data/data_expected.json +0 -28
  137. data/data_input.json +0 -28
  138. data/samples/error.graphml +0 -2755
  139. data/samples/gradient_sample.graphml +0 -1255
  140. data/samples/linear_regression.rb +0 -69
  141. data/samples/multigpu.rb +0 -73
  142. data/samples/raw_neural_net_sample.rb +0 -112
@@ -0,0 +1,23 @@
1
+ TensorStream::OpMaker.define_operation :add do |op|
2
+ op.what_it_does "Returns x + y element-wise."
3
+
4
+ op.parameter :input_a, "tensor X"
5
+ op.parameter :input_b, "tensor Y"
6
+
7
+ op.apply_data_type_coercion!
8
+ op.supports_broadcasting!
9
+
10
+ op.option :name, "Optional name", :nil
11
+
12
+ op.define_gradient do |grad, node, params|
13
+ x, y = params
14
+ next [grad, grad] if shapes_fully_specified_and_equal(x, y)
15
+
16
+ sx = ts.shape(x, name: "add/shape_x")
17
+ sy = ts.shape(y, name: "add/shape_y")
18
+ rx, ry = _broadcast_gradient_args(sx, sy)
19
+
20
+ [ts.reshape(ts.reduce_sum(grad, rx, name: "add/reduce_sum_x"), sx),
21
+ ts.reshape(ts.reduce_sum(grad, ry, name: "add/reduce_sum_y"), sy),]
22
+ end
23
+ end
@@ -0,0 +1,14 @@
1
+ TensorStream::OpMaker.define_operation :argmax do |op|
2
+ op.what_it_does "Returns the index with the largest value across axes of a tensor."
3
+
4
+ op.parameter :input_a, "tensor X", validate: 'NUMERIC_TYPES'
5
+ op.parameter :axis, "Describes which axis of the input tensor to reduce across. For vectors, use axis = 0", :nil, validate: 'INTEGER_TYPES'
6
+
7
+ op.option :name, "Optional name", :nil
8
+ op.option :dimension, "Same as axis", :nil
9
+ op.option :output_type, "Output data type defaults to int32", ":int32"
10
+
11
+ op.define_gradient do |grad, node, params|
12
+ [nil, nil]
13
+ end
14
+ end
@@ -0,0 +1,14 @@
1
+ TensorStream::OpMaker.define_operation :argmin do |op|
2
+ op.what_it_does "Returns the index with the smallest value across axes of a tensor."
3
+
4
+ op.parameter :input_a, "tensor X", validate: 'NUMERIC_TYPES'
5
+ op.parameter :axis, "Describes which axis of the input tensor to reduce across. For vectors, use axis = 0", :nil, validate: 'INTEGER_TYPES'
6
+
7
+ op.option :name, "Optional name", :nil
8
+ op.option :dimension, "Same as axis", :nil
9
+ op.option :output_type, "Output data type defaults to int32", ":int32"
10
+
11
+ op.define_gradient do |grad, node, params|
12
+ [nil, nil]
13
+ end
14
+ end
@@ -0,0 +1,17 @@
1
+ TensorStream::OpMaker.define_operation :case do |op|
2
+ op.exclude!
3
+
4
+ op.define_gradient do |grad, node, params|
5
+ n_preds = node.inputs.size - 2
6
+
7
+ case_grads = Array.new(n_preds) { |index|
8
+ i_op(:case_grad, index, node.inputs[0], node.inputs[2 + index], grad)
9
+ }
10
+
11
+ [nil, i_op(:case_grad, -1, node.inputs[0], node.inputs[1], grad)] + case_grads
12
+ end
13
+
14
+ op.define_shape do |tensor|
15
+ tensor.inputs[2]&.shape&.shape
16
+ end
17
+ end
@@ -0,0 +1,15 @@
1
+ TensorStream::OpMaker.define_operation :cast do |op|
2
+ op.exclude!
3
+
4
+ op.define_gradient do |grad, node, params|
5
+ t = %i[float16 float32 float64]
6
+ src_type = node.inputs[0].data_type
7
+ dst_type = grad.data_type
8
+
9
+ if t.key?(src_type) && t.key?(dst_type)
10
+ next ts.cast(grad, src_type)
11
+ end
12
+
13
+ nil
14
+ end
15
+ end
@@ -0,0 +1,15 @@
1
+ TensorStream::OpMaker.define_operation :ceil do |op|
2
+ op.what_it_does "Returns element-wise smallest integer in not less than x"
3
+
4
+ op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES'
5
+
6
+ op.option :name, "Optional name", :nil
7
+
8
+ op.define_gradient do |grad, node, params|
9
+ nil
10
+ end
11
+
12
+ op.define_shape do |tensor|
13
+ tensor.inputs[0].shape.shape
14
+ end
15
+ end
File without changes
@@ -0,0 +1,10 @@
1
+ TensorStream::OpMaker.define_operation :cos do |op|
2
+ op.what_it_does "Computes cos of input element-wise."
3
+
4
+ op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES'
5
+ op.option :name, "Optional name", :nil
6
+
7
+ op.define_gradient do |grad, node, params|
8
+ -grad * ts.sin(params[0])
9
+ end
10
+ end
@@ -0,0 +1,21 @@
1
+ TensorStream::OpMaker.define_operation :div do |op|
2
+ op.what_it_does "Returns x / y element-wise."
3
+
4
+ op.parameter :input_a, "tensor X"
5
+ op.parameter :input_b, "tensor Y"
6
+
7
+ op.apply_data_type_coercion!
8
+ op.supports_broadcasting!
9
+
10
+ op.option :name, "Optional name", :nil
11
+
12
+ op.define_gradient do |grad, node, params|
13
+ x, y = params
14
+ sx = i_op(:shape, x)
15
+ sy = i_op(:shape, y)
16
+ rx, ry = _broadcast_gradient_args(sx, sy)
17
+
18
+ [ts.reshape(ts.reduce_sum(ts.div(grad, y), rx), sx),
19
+ ts.reshape(ts.reduce_sum(grad * ts.div(ts.div(-x, y), y), ry), sy),]
20
+ end
21
+ end
@@ -0,0 +1,15 @@
1
+ TensorStream::OpMaker.define_operation :equal do |op|
2
+ op.what_it_does "Returns the truth value of (x == y) element-wise."
3
+
4
+ op.parameter :input_a, "tensor X"
5
+ op.parameter :input_b, "tensor Y"
6
+
7
+ op.apply_data_type_coercion!
8
+ op.supports_broadcasting!
9
+
10
+ op.option :name, "Optional name", :nil
11
+
12
+ op.define_gradient do |grad, node, params|
13
+ _min_or_max_grad(node.inputs, grad, ->(a, b) { ts.equal(a, b) })
14
+ end
15
+ end
@@ -0,0 +1,17 @@
1
+ TensorStream::OpMaker.define_operation :expand_dims do |op|
2
+ op.what_it_does "Inserts a dimension of 1 into a tensor's shape. "
3
+ op.what_it_does "Given a tensor input, this operation inserts a dimension of 1 at the dimension index axis of input's shape. The "
4
+ op.what_it_does "dimension index axis starts at zero; if you specify a negative number for axis it is counted backward from the end."
5
+
6
+ op.parameter :input, "A tensor"
7
+ op.parameter :axis, "Specifies the dimension index at which to expand the shape of input. Must be in the range [-rank(input) - 1, rank(input)]."
8
+ op.option :name, "Optional name", :nil
9
+
10
+ op.define_gradient do |grad, node, params|
11
+ [_reshape_to_input(node, grad), nil]
12
+ end
13
+
14
+ op.define_shape do |tensor|
15
+ nil
16
+ end
17
+ end
@@ -0,0 +1,19 @@
1
+ TensorStream::OpMaker.define_operation :fill do |op|
2
+ op.what_it_does "This operation creates a tensor of shape dims and fills it with value."
3
+
4
+ op.parameter :dims, "tensor shape"
5
+ op.parameter :value, "scalar value to fill with"
6
+
7
+ op.option :name, "Optional name", :nil
8
+
9
+ op.define_gradient do |grad, node, params|
10
+ [nil, TensorStream.reduce_sum(grad)]
11
+ end
12
+
13
+ op.define_shape do |tensor|
14
+ a_shape = tensor.inputs[0] ? tensor.inputs[0].const_value : tensor.options[:shape]
15
+ next nil if a_shape.nil?
16
+
17
+ a_shape.is_a?(Array) ? a_shape : [a_shape]
18
+ end
19
+ end
@@ -0,0 +1,15 @@
1
+ TensorStream::OpMaker.define_operation :floor do |op|
2
+ op.what_it_does "Returns element-wise largest integer not greater than x."
3
+
4
+ op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES'
5
+
6
+ op.option :name, "Optional name", :nil
7
+
8
+ op.define_gradient do |grad, node, params|
9
+ nil
10
+ end
11
+
12
+ op.define_shape do |tensor|
13
+ tensor.inputs[0].shape.shape
14
+ end
15
+ end
@@ -0,0 +1,15 @@
1
+ TensorStream::OpMaker.define_operation :floor_div do |op|
2
+ op.what_it_does "Returns element-wise integer divistion."
3
+
4
+ op.parameter :input_a, "tensor X"
5
+ op.parameter :input_b, "tensor Y"
6
+
7
+ op.apply_data_type_coercion!
8
+ op.supports_broadcasting!
9
+
10
+ op.option :name, "Optional name", :nil
11
+
12
+ op.define_gradient do |grad, node, params|
13
+ [nil, nil]
14
+ end
15
+ end
@@ -0,0 +1,11 @@
1
+ TensorStream::OpMaker.define_operation :greater do |op|
2
+ op.what_it_does "Returns the truth value of (x > y) element-wise."
3
+
4
+ op.parameter :input_a, "tensor X"
5
+ op.parameter :input_b, "tensor Y"
6
+
7
+ op.apply_data_type_coercion!
8
+ op.supports_broadcasting!
9
+
10
+ op.option :name, "Optional name", :nil
11
+ end
@@ -0,0 +1,11 @@
1
+ TensorStream::OpMaker.define_operation :greater_equal do |op|
2
+ op.what_it_does "Returns the truth value of (x >= y) element-wise."
3
+
4
+ op.parameter :input_a, "tensor X"
5
+ op.parameter :input_b, "tensor Y"
6
+
7
+ op.apply_data_type_coercion!
8
+ op.supports_broadcasting!
9
+
10
+ op.option :name, "Optional name", :nil
11
+ end
@@ -0,0 +1,15 @@
1
+ TensorStream::OpMaker.define_operation :less_equal do |op|
2
+ op.what_it_does "Returns the truth value of (x <= y) element-wise."
3
+
4
+ op.parameter :input_a, "tensor X"
5
+ op.parameter :input_b, "tensor Y"
6
+
7
+ op.apply_data_type_coercion!
8
+ op.supports_broadcasting!
9
+
10
+ op.option :name, "Optional name", :nil
11
+
12
+ op.define_gradient do |grad, node, params|
13
+ _min_or_max_grad(node.inputs, grad, ->(a, b) { ts.greater_equal(a, b) })
14
+ end
15
+ end
@@ -0,0 +1,14 @@
1
+ TensorStream::OpMaker.define_operation :log do |op|
2
+ op.what_it_does "Computes natural logarithm of x element-wise."
3
+
4
+ op.parameter :input, "tensor X"
5
+ op.option :name, "Optional name", :nil
6
+
7
+ op.define_gradient do |grad, node, params|
8
+ grad * TensorStream.reciprocal(params[0])
9
+ end
10
+
11
+ op.define_shape do |tensor|
12
+ tensor.inputs[0].shape.shape
13
+ end
14
+ end
@@ -0,0 +1,60 @@
1
+ TensorStream::OpMaker.define_operation :mat_mul do |op|
2
+ op.other_names %w(matmul)
3
+ op.what_it_does "Multiplies matrix a by matrix b, producing a * b. The inputs must, following any transpositions, be tensors of rank 2 ."
4
+
5
+ op.parameter :input_a, "tensor X"
6
+ op.parameter :input_b, "tensor Y"
7
+
8
+ op.apply_data_type_coercion!
9
+ op.supports_broadcasting!
10
+
11
+ op.option :transpose_a, "Transpose matrix A first", :false
12
+ op.option :transpose_b, "Transpose matrix B first", :false
13
+ op.option :name, "Optional name", :nil
14
+
15
+ op.define_gradient do |grad, node, params|
16
+ x, y = params
17
+ t_a = node.options[:transpose_a]
18
+ t_b = node.options[:transpose_b]
19
+
20
+ if !t_a && !t_b
21
+ grad_a = ts.matmul(grad, y, transpose_b: true)
22
+ grad_b = ts.matmul(x, grad, transpose_a: true)
23
+ elsif !ta && tb
24
+ grad_a = ts.matmul(grad, y)
25
+ grad_b = ts.matmul(grad, x, transpose_a: true)
26
+ elsif t_a && !t_b
27
+ grad_a = ts.matmul(y, grad, transpose_b: true)
28
+ grad_b = ts.matmul(x, grad)
29
+ elsif t_a && t_b
30
+ grad_a = ts.matmul(y, grad, transpose_a: true, transpose_b: true)
31
+ grad_b = ts.matmul(grad, x, transpose_a: true, transpose_b: true)
32
+ end
33
+
34
+ [grad_a, grad_b]
35
+ end
36
+
37
+ op.define_shape do |tensor|
38
+ next nil if tensor.inputs[0].shape.shape.nil? || tensor.inputs[1].shape.shape.nil?
39
+ next [] if tensor.inputs[0].shape.shape.empty? || tensor.inputs[1].shape.shape.empty?
40
+ next nil if tensor.inputs[0].shape.shape.size != 2 || tensor.inputs[1].shape.shape.size != 2
41
+
42
+ shape1, m = if tensor.options[:transpose_a]
43
+ [tensor.inputs[0].shape.shape[0], tensor.inputs[0].shape.shape[1]]
44
+ else
45
+ [tensor.inputs[0].shape.shape[1], tensor.inputs[0].shape.shape[0]]
46
+ end
47
+
48
+ shape2, n = if tensor.options[:transpose_b]
49
+ [tensor.inputs[1].shape.shape[1], tensor.inputs[1].shape.shape[0]]
50
+ else
51
+ [tensor.inputs[1].shape.shape[0], tensor.inputs[1].shape.shape[1]]
52
+ end
53
+
54
+ next nil if shape1.nil? || shape2.nil? || shape1 < 0 || shape2 < 0
55
+
56
+ raise TensorStream::ValueError, "incompatible shape sizes for matrix multiplication (#{shape1} != #{shape2}) #{tensor.inputs[0].shape.shape} vs #{tensor.inputs[1].shape.shape}" if shape1 != shape2
57
+
58
+ [m, n]
59
+ end
60
+ end
@@ -0,0 +1,15 @@
1
+ TensorStream::OpMaker.define_operation :max do |op|
2
+ op.what_it_does "Returns the max of x and y (i.e. x > y ? x : y) element-wise."
3
+
4
+ op.parameter :input_a, "tensor X", nil, validate: 'NUMERIC_TYPES'
5
+ op.parameter :input_b, "tensor Y", nil, validate: 'NUMERIC_TYPES'
6
+
7
+ op.apply_data_type_coercion!
8
+ op.supports_broadcasting!
9
+
10
+ op.option :name, "Optional name", :nil
11
+
12
+ op.define_gradient do |grad, node, params|
13
+ _min_or_max_grad(node.inputs, grad, ->(a, b) { ts.greater_equal(a, b) })
14
+ end
15
+ end
@@ -0,0 +1,15 @@
1
+ TensorStream::OpMaker.define_operation :min do |op|
2
+ op.what_it_does "Returns the min of x and y (i.e. x < y ? x : y) element-wise."
3
+
4
+ op.parameter :input_a, "tensor X", nil, validate: 'NUMERIC_TYPES'
5
+ op.parameter :input_b, "tensor Y", nil, validate: 'NUMERIC_TYPES'
6
+
7
+ op.apply_data_type_coercion!
8
+ op.supports_broadcasting!
9
+
10
+ op.option :name, "Optional name", :nil
11
+
12
+ op.define_gradient do |grad, node, params|
13
+ _min_or_max_grad(node.inputs, grad, ->(a, b) { ts.less_equal(a, b) })
14
+ end
15
+ end
@@ -0,0 +1,23 @@
1
+ TensorStream::OpMaker.define_operation :mod do |op|
2
+ op.what_it_does "Returns element-wise remainder of division."
3
+
4
+ op.parameter :input_a, "tensor X"
5
+ op.parameter :input_b, "tensor Y"
6
+
7
+ op.apply_data_type_coercion!
8
+ op.supports_broadcasting!
9
+
10
+ op.option :name, "Optional name", :nil
11
+
12
+ op.define_gradient do |grad, node, params|
13
+ x, y = params
14
+ sx = ts.shape(x)
15
+ sy = ts.shape(y)
16
+ rx, ry = _broadcast_gradient_args(sx, sy)
17
+ floor_xy = ts.floor_div(x, y)
18
+ gx = ts.reshape(ts.reduce_sum(grad, rx), sx)
19
+ gy = ts.reshape(ts.reduce_sum(grad * ts.negative(floor_xy), ry), sy)
20
+
21
+ [gx, gy]
22
+ end
23
+ end
@@ -0,0 +1,21 @@
1
+ TensorStream::OpMaker.define_operation :mul do |op|
2
+ op.what_it_does "Returns x * y element-wise."
3
+
4
+ op.parameter :input_a, "tensor X"
5
+ op.parameter :input_b, "tensor Y"
6
+
7
+ op.apply_data_type_coercion!
8
+ op.supports_broadcasting!
9
+
10
+ op.option :name, "Optional name", :nil
11
+
12
+ op.define_gradient do |grad, node, params|
13
+ x, y = params
14
+ sx = ts.shape(x)
15
+ sy = ts.shape(y)
16
+ rx, ry = _broadcast_gradient_args(sx, sy)
17
+
18
+ [ts.reshape(ts.reduce_sum(ts.mul(grad, y), rx), sx),
19
+ ts.reshape(ts.reduce_sum(ts.mul(x, grad), ry), sy)]
20
+ end
21
+ end
@@ -0,0 +1,14 @@
1
+ TensorStream::OpMaker.define_operation :negate do |op|
2
+ op.what_it_does "Computes numerical negative value element-wise."
3
+
4
+ op.parameter :input, "tensor X"
5
+ op.option :name, "Optional name", :nil
6
+
7
+ op.define_gradient do |grad, node, params|
8
+ -grad
9
+ end
10
+
11
+ op.define_shape do |tensor|
12
+ tensor.inputs[0].shape.shape
13
+ end
14
+ end