tensor_stream 1.0.0 → 1.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (142) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -0
  3. data/.rubocop.yml +1 -0
  4. data/Gemfile +1 -1
  5. data/LICENSE.txt +1 -1
  6. data/README.md +34 -34
  7. data/Rakefile +3 -3
  8. data/USAGE_GUIDE.md +235 -0
  9. data/bin/stubgen +20 -0
  10. data/exe/model_utils +2 -2
  11. data/lib/tensor_stream.rb +45 -44
  12. data/lib/tensor_stream/constant.rb +2 -2
  13. data/lib/tensor_stream/control_flow.rb +1 -1
  14. data/lib/tensor_stream/debugging/debugging.rb +2 -2
  15. data/lib/tensor_stream/dynamic_stitch.rb +2 -2
  16. data/lib/tensor_stream/evaluator/base_evaluator.rb +18 -18
  17. data/lib/tensor_stream/evaluator/buffer.rb +1 -1
  18. data/lib/tensor_stream/evaluator/evaluator.rb +2 -2
  19. data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +41 -41
  20. data/lib/tensor_stream/evaluator/operation_helpers/math_helper.rb +1 -1
  21. data/lib/tensor_stream/evaluator/ruby/array_ops.rb +39 -39
  22. data/lib/tensor_stream/evaluator/ruby/check_ops.rb +2 -2
  23. data/lib/tensor_stream/evaluator/ruby/images_ops.rb +18 -18
  24. data/lib/tensor_stream/evaluator/ruby/math_ops.rb +13 -14
  25. data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +33 -36
  26. data/lib/tensor_stream/evaluator/ruby/random_ops.rb +20 -21
  27. data/lib/tensor_stream/evaluator/ruby_evaluator.rb +36 -49
  28. data/lib/tensor_stream/exceptions.rb +1 -1
  29. data/lib/tensor_stream/generated_stub/ops.rb +691 -0
  30. data/lib/tensor_stream/generated_stub/stub_file.erb +24 -0
  31. data/lib/tensor_stream/graph.rb +18 -18
  32. data/lib/tensor_stream/graph_builder.rb +17 -17
  33. data/lib/tensor_stream/graph_deserializers/protobuf.rb +97 -97
  34. data/lib/tensor_stream/graph_deserializers/yaml_loader.rb +1 -1
  35. data/lib/tensor_stream/graph_keys.rb +3 -3
  36. data/lib/tensor_stream/graph_serializers/graphml.rb +33 -33
  37. data/lib/tensor_stream/graph_serializers/packer.rb +23 -23
  38. data/lib/tensor_stream/graph_serializers/pbtext.rb +38 -42
  39. data/lib/tensor_stream/graph_serializers/serializer.rb +3 -2
  40. data/lib/tensor_stream/graph_serializers/yaml.rb +5 -5
  41. data/lib/tensor_stream/helpers/infer_shape.rb +56 -56
  42. data/lib/tensor_stream/helpers/op_helper.rb +8 -9
  43. data/lib/tensor_stream/helpers/string_helper.rb +15 -15
  44. data/lib/tensor_stream/helpers/tensor_mixins.rb +17 -17
  45. data/lib/tensor_stream/images.rb +1 -1
  46. data/lib/tensor_stream/initializer.rb +1 -1
  47. data/lib/tensor_stream/math_gradients.rb +28 -187
  48. data/lib/tensor_stream/monkey_patches/array.rb +1 -1
  49. data/lib/tensor_stream/monkey_patches/float.rb +1 -1
  50. data/lib/tensor_stream/monkey_patches/integer.rb +1 -1
  51. data/lib/tensor_stream/monkey_patches/op_patch.rb +5 -5
  52. data/lib/tensor_stream/monkey_patches/patch.rb +1 -1
  53. data/lib/tensor_stream/nn/nn_ops.rb +17 -15
  54. data/lib/tensor_stream/op_maker.rb +180 -0
  55. data/lib/tensor_stream/operation.rb +17 -17
  56. data/lib/tensor_stream/ops.rb +95 -384
  57. data/lib/tensor_stream/ops/add.rb +23 -0
  58. data/lib/tensor_stream/ops/argmax.rb +14 -0
  59. data/lib/tensor_stream/ops/argmin.rb +14 -0
  60. data/lib/tensor_stream/ops/case.rb +17 -0
  61. data/lib/tensor_stream/ops/cast.rb +15 -0
  62. data/lib/tensor_stream/ops/ceil.rb +15 -0
  63. data/lib/tensor_stream/ops/const.rb +0 -0
  64. data/lib/tensor_stream/ops/cos.rb +10 -0
  65. data/lib/tensor_stream/ops/div.rb +21 -0
  66. data/lib/tensor_stream/ops/equal.rb +15 -0
  67. data/lib/tensor_stream/ops/expand_dims.rb +17 -0
  68. data/lib/tensor_stream/ops/fill.rb +19 -0
  69. data/lib/tensor_stream/ops/floor.rb +15 -0
  70. data/lib/tensor_stream/ops/floor_div.rb +15 -0
  71. data/lib/tensor_stream/ops/greater.rb +11 -0
  72. data/lib/tensor_stream/ops/greater_equal.rb +11 -0
  73. data/lib/tensor_stream/ops/less_equal.rb +15 -0
  74. data/lib/tensor_stream/ops/log.rb +14 -0
  75. data/lib/tensor_stream/ops/mat_mul.rb +60 -0
  76. data/lib/tensor_stream/ops/max.rb +15 -0
  77. data/lib/tensor_stream/ops/min.rb +15 -0
  78. data/lib/tensor_stream/ops/mod.rb +23 -0
  79. data/lib/tensor_stream/ops/mul.rb +21 -0
  80. data/lib/tensor_stream/ops/negate.rb +14 -0
  81. data/lib/tensor_stream/ops/ones_like.rb +19 -0
  82. data/lib/tensor_stream/ops/pow.rb +25 -0
  83. data/lib/tensor_stream/ops/prod.rb +60 -0
  84. data/lib/tensor_stream/ops/random_uniform.rb +18 -0
  85. data/lib/tensor_stream/ops/range.rb +20 -0
  86. data/lib/tensor_stream/ops/rank.rb +13 -0
  87. data/lib/tensor_stream/ops/reshape.rb +24 -0
  88. data/lib/tensor_stream/ops/round.rb +15 -0
  89. data/lib/tensor_stream/ops/shape.rb +14 -0
  90. data/lib/tensor_stream/ops/sigmoid.rb +10 -0
  91. data/lib/tensor_stream/ops/sign.rb +12 -0
  92. data/lib/tensor_stream/ops/sin.rb +10 -0
  93. data/lib/tensor_stream/ops/size.rb +16 -0
  94. data/lib/tensor_stream/ops/sub.rb +24 -0
  95. data/lib/tensor_stream/ops/sum.rb +27 -0
  96. data/lib/tensor_stream/ops/tan.rb +12 -0
  97. data/lib/tensor_stream/ops/tanh.rb +10 -0
  98. data/lib/tensor_stream/ops/tile.rb +19 -0
  99. data/lib/tensor_stream/ops/zeros.rb +15 -0
  100. data/lib/tensor_stream/placeholder.rb +2 -2
  101. data/lib/tensor_stream/profile/report_tool.rb +3 -3
  102. data/lib/tensor_stream/session.rb +36 -38
  103. data/lib/tensor_stream/tensor.rb +2 -2
  104. data/lib/tensor_stream/tensor_shape.rb +4 -4
  105. data/lib/tensor_stream/train/adadelta_optimizer.rb +8 -8
  106. data/lib/tensor_stream/train/adagrad_optimizer.rb +3 -3
  107. data/lib/tensor_stream/train/adam_optimizer.rb +11 -11
  108. data/lib/tensor_stream/train/learning_rate_decay.rb +2 -2
  109. data/lib/tensor_stream/train/momentum_optimizer.rb +7 -7
  110. data/lib/tensor_stream/train/optimizer.rb +9 -9
  111. data/lib/tensor_stream/train/rmsprop_optimizer.rb +16 -16
  112. data/lib/tensor_stream/train/saver.rb +14 -14
  113. data/lib/tensor_stream/train/slot_creator.rb +6 -6
  114. data/lib/tensor_stream/train/utils.rb +12 -12
  115. data/lib/tensor_stream/trainer.rb +10 -10
  116. data/lib/tensor_stream/types.rb +1 -1
  117. data/lib/tensor_stream/utils.rb +33 -32
  118. data/lib/tensor_stream/utils/freezer.rb +5 -5
  119. data/lib/tensor_stream/variable.rb +5 -5
  120. data/lib/tensor_stream/variable_scope.rb +1 -1
  121. data/lib/tensor_stream/version.rb +1 -1
  122. data/samples/{iris.data → datasets/iris.data} +0 -0
  123. data/samples/jupyter_notebooks/linear_regression.ipynb +463 -0
  124. data/samples/{iris.rb → neural_networks/iris.rb} +21 -23
  125. data/samples/{mnist_data.rb → neural_networks/mnist_data.rb} +8 -8
  126. data/samples/neural_networks/raw_neural_net_sample.rb +112 -0
  127. data/samples/{rnn.rb → neural_networks/rnn.rb} +28 -31
  128. data/samples/{nearest_neighbor.rb → others/nearest_neighbor.rb} +12 -12
  129. data/samples/regression/linear_regression.rb +63 -0
  130. data/samples/{logistic_regression.rb → regression/logistic_regression.rb} +14 -16
  131. data/tensor_stream.gemspec +9 -8
  132. metadata +89 -19
  133. data/data_1.json +0 -4764
  134. data/data_2.json +0 -4764
  135. data/data_actual.json +0 -28
  136. data/data_expected.json +0 -28
  137. data/data_input.json +0 -28
  138. data/samples/error.graphml +0 -2755
  139. data/samples/gradient_sample.graphml +0 -1255
  140. data/samples/linear_regression.rb +0 -69
  141. data/samples/multigpu.rb +0 -73
  142. data/samples/raw_neural_net_sample.rb +0 -112
@@ -0,0 +1,19 @@
1
+ TensorStream::OpMaker.define_operation :ones_like do |op|
2
+ op.what_it_does "Creates a tensor with all elements set to 1."
3
+ op.what_it_does "Given a single tensor (tensor), this operation returns a"
4
+ op.what_it_does "tensor of the same type and shape as tensor with all elements set to 1."
5
+ op.what_it_does "Optionally, you can specify a new type (dtype) for the returned tensor."
6
+
7
+
8
+ op.parameter :input, "A tensor"
9
+ op.option :dtype, "Optional new data type to cast into", :nil, alias: :data_type
10
+ op.option :name, "Optional name", :nil
11
+
12
+ op.define_shape do |tensor|
13
+ tensor.inputs[0].shape.shape
14
+ end
15
+
16
+ op.define_gradient do |grad, node, params|
17
+ nil # non differentiable
18
+ end
19
+ end
@@ -0,0 +1,25 @@
1
+ TensorStream::OpMaker.define_operation :pow do |op|
2
+ op.what_it_does "Computes the power of one value to another X^Y element wise"
3
+
4
+ op.parameter :input_a, "tensor X"
5
+ op.parameter :input_b, "tensor Y"
6
+
7
+ op.apply_data_type_coercion!
8
+ op.supports_broadcasting!
9
+
10
+ op.option :name, "Optional name", :nil
11
+
12
+ op.define_gradient do |grad, node, params|
13
+ x, y = params
14
+ z = node
15
+ sx = ts.shape(x)
16
+ sy = ts.shape(y)
17
+ rx, ry = _broadcast_gradient_args(sx, sy)
18
+ gx = ts.reduce_sum(grad * y * ts.pow(x, y - 1), rx)
19
+
20
+ log_x = ts.where(x > 0, ts.log(x), ts.zeros_like(x))
21
+ gy = ts.reduce_sum(grad * z * log_x, ry)
22
+
23
+ [gx, gy]
24
+ end
25
+ end
@@ -0,0 +1,60 @@
1
+ TensorStream::OpMaker.define_operation :prod do |op|
2
+ op.other_names %w(reduce_prod)
3
+ op.what_it_does "Computes the product of elements across dimensions of a tensor."
4
+ op.what_it_does "Reduces input_tensor along the dimensions given in axis. Unless keepdims is true, the rank of the"
5
+ op.what_it_does "tensor is reduced by 1 for each entry in axis. If keepdims is true, the reduced dimensions are"
6
+ op.what_it_does "retained with length 1."
7
+ op.what_it_does "If axis has no entries, all dimensions are reduced, and a tensor with a single element is returned."
8
+
9
+ op.parameter :input_a, "tensor X"
10
+ op.parameter :axis, "tensor X", :nil, validate: 'INTEGER_TYPES'
11
+
12
+ op.option :name, "Optional name", :nil
13
+ op.option :keepdims, "If true, retains reduced dimensions with length 1.", :false
14
+
15
+ op.add_custom "input_a = TensorStream.convert_to_tensor(input_a)"
16
+ op.add_custom "return input_a if input_a.shape.scalar?"
17
+ op.add_custom "axis = cast_axis(input_a, axis)"
18
+
19
+ op.define_gradient do |grad, node, params|
20
+ x, y = params
21
+ input_shape = ts.shape(x)
22
+ y = ts.range(0, ts.rank(x)) if y.nil?
23
+ reduction_indices = ts.reshape(y, [-1])
24
+
25
+ output_shape_kept_dims = ts.reduced_shape(input_shape, y)
26
+ tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
27
+ grad = ts.reshape(grad, output_shape_kept_dims)
28
+ grad = ts.tile(grad, tile_scaling)
29
+
30
+ perm, reduced_num, other_num = ts.device("/cpu:0") {
31
+ rank = ts.rank(x)
32
+ reduction_indices = (reduction_indices + rank) % rank
33
+ reduced = ts.cast(reduction_indices, :int32)
34
+ idx = ts.range(0, rank)
35
+ other, = ts.setdiff1d(idx, reduced)
36
+ [ts.concat([reduced, other], 0),
37
+ ts.reduce_prod(ts.gather(input_shape, reduced)),
38
+ ts.reduce_prod(ts.gather(input_shape, other)),]
39
+ }
40
+
41
+ permuted = ts.transpose(x, perm)
42
+ permuted_shape = ts.shape(permuted)
43
+
44
+ reshaped = ts.reshape(permuted, [reduced_num, other_num])
45
+
46
+ # Calculate product, leaving out the current entry
47
+ left = ts.cumprod(reshaped, axis: 0, exclusive: true)
48
+ right = ts.cumprod(reshaped, axis: 0, exclusive: true, reverse: true)
49
+ y = ts.reshape(left * right, permuted_shape)
50
+
51
+ # Invert the transpose and reshape operations.
52
+ # Make sure to set the statically known shape information through a reshape.
53
+ out = grad * ts.transpose(y, ts.invert_permutation(perm))
54
+ [ts.reshape(out, input_shape, name: "prod"), nil]
55
+ end
56
+
57
+ op.define_shape do |tensor|
58
+ _infer_reduction_op_shape(tensor)
59
+ end
60
+ end
@@ -0,0 +1,18 @@
1
+ TensorStream::OpMaker.define_operation :random_uniform do |op|
2
+ op.what_it_does "Outputs random values from a uniform distribution."
3
+
4
+ op.parameter :shape, "A 1-D integer Tensor or array. The shape of the output tensor."
5
+
6
+ op.option :name, "Optional name", :nil
7
+ op.option :dtype, "The type of the output: float16, float32, float64, int32, or int64", ":float32"
8
+ op.option :minval, "A 0-D Tensor or ruby value of type dtype. The lower bound on the range of random values to generate. Defaults to 0.", 0
9
+ op.option :maxval, "A 0-D Tensor or ruby value of type dtype. The upper bound on the range of random values to generate. Defaults to 1 if dtype is floating point.", 1
10
+ op.option :seed, " A ruby integer. Used to create a random seed for the distribution. See set_random_seed for behavior.", :nil
11
+
12
+ op.define_shape do |tensor|
13
+ a_shape = tensor.inputs[0] ? tensor.inputs[0].const_value : tensor.options[:shape]
14
+ next nil if a_shape.nil?
15
+
16
+ a_shape.is_a?(Array) ? a_shape : [a_shape]
17
+ end
18
+ end
@@ -0,0 +1,20 @@
1
+ TensorStream::OpMaker.define_operation :range do |op|
2
+ op.what_it_does "Creates a sequence of numbers."
3
+ op.what_it_does "Creates a sequence of numbers that begins at start and extends by increments of delta up to but not including limit."
4
+
5
+ op.parameter :start, "Acts as first entry in the range if limit is not nil; otherwise, acts as range limit and first entry defaults to 0.", "0"
6
+ op.parameter :limit, "Upper limit of sequence, exclusive. If nil, defaults to the value of start while the first entry of the range defaults to 0.", "0"
7
+ op.parameter :delta, "Number that increments start. Defaults to 1.", 1
8
+
9
+ op.option :name, " A name for the operation. Defaults to \"range\".", "\"range\""
10
+ op.option :dtype, "The type of the elements of the resulting tensor.", :nil
11
+ op.option :output_type, "Output data type defaults to int32", ":int32"
12
+
13
+ op.define_gradient do |grad, node, params|
14
+ nil # non differentiable
15
+ end
16
+
17
+ op.define_shape do |tensor|
18
+ nil
19
+ end
20
+ end
@@ -0,0 +1,13 @@
1
+ TensorStream::OpMaker.define_operation :rank do |op|
2
+ op.what_it_does "Returns the rank of a tensor"
3
+
4
+ op.parameter :input, "A tensor"
5
+ op.option :name, "Optional name", :nil
6
+
7
+ op.add_custom "input = convert_to_tensor(input)"
8
+ op.add_custom "return cons(input.shape.ndims) if input.shape.known?"
9
+
10
+ op.define_shape do |tensor|
11
+ []
12
+ end
13
+ end
@@ -0,0 +1,24 @@
1
+ TensorStream::OpMaker.define_operation :reshape do |op|
2
+ op.what_it_does "Reshapes a tensor."
3
+ op.what_it_does "Given tensor, this operation returns a tensor that has the same values as tensor with shape shape."
4
+
5
+ op.parameter :input, "A tensor"
6
+ op.parameter :shape, "A new tensor shape"
7
+ op.option :name, "Optional name", :nil
8
+
9
+ op.define_gradient do |grad, node, params|
10
+ [ts.reshape(grad, ts.shape(node.inputs[0])), nil]
11
+ end
12
+
13
+ op.define_shape do |tensor|
14
+ new_shape = tensor.inputs[1]&.const_value ? tensor.inputs[1].const_value : nil
15
+ next nil if new_shape.nil?
16
+ next nil if tensor.inputs[0].shape.nil?
17
+
18
+ input_shape = tensor.inputs[0].shape.shape
19
+ next new_shape if input_shape.nil? && !new_shape.include?(-1) && !new_shape.include?(nil)
20
+ next nil if input_shape.nil? || input_shape.include?(nil)
21
+
22
+ TensorStream::TensorShape.fix_inferred_elements(new_shape, input_shape.reduce(:*))
23
+ end
24
+ end
@@ -0,0 +1,15 @@
1
+ TensorStream::OpMaker.define_operation :round do |op|
2
+ op.what_it_does "Rounds the values of a tensor to the nearest integer, element-wise"
3
+
4
+ op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES'
5
+
6
+ op.option :name, "Optional name", :nil
7
+
8
+ op.define_gradient do |grad, node, params|
9
+ nil
10
+ end
11
+
12
+ op.define_shape do |tensor|
13
+ tensor.inputs[0].shape.shape
14
+ end
15
+ end
@@ -0,0 +1,14 @@
1
+ TensorStream::OpMaker.define_operation :shape do |op|
2
+ op.what_it_does "This operation returns a 1-D integer tensor representing the shape of input"
3
+
4
+ op.parameter :input, "A tensor"
5
+ op.option :name, "Optional name", :nil
6
+ op.option :out_type, "Optional output type", ":int32"
7
+
8
+ op.add_custom 'return constant(shape_eval(input, out_type), dtype: out_type, name: "Shape/#{name}") if input.is_a?(Array) && !input[0].is_a?(Tensor)'
9
+ op.add_custom 'return constant(input.shape.shape, dtype: out_type, name: "Shape/#{input.name}_c") if shape_full_specified(input)'
10
+
11
+ op.define_shape do |tensor|
12
+ tensor.inputs[0].shape.shape ? [tensor.inputs[0].shape.shape.size] : nil
13
+ end
14
+ end
@@ -0,0 +1,10 @@
1
+ TensorStream::OpMaker.define_operation :sigmoid do |op|
2
+ op.what_it_does "Computes sigmoid of x element-wise."
3
+
4
+ op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES'
5
+ op.option :name, "Optional name", :nil
6
+
7
+ op.define_gradient do |grad, _node, params|
8
+ i_op(:sigmoid_grad, params[0], grad)
9
+ end
10
+ end
@@ -0,0 +1,12 @@
1
+ TensorStream::OpMaker.define_operation :sign do |op|
2
+ op.what_it_does "Computes sign of input element-wise."
3
+ op.what_it_does_code "y = sign(x) = -1 if x < 0; 0 if x == 0 or tf.is_nan(x); 1 if x > 0."
4
+ op.what_it_does "Zero is returned for NaN inputs."
5
+
6
+ op.parameter :input_a, "tensor X"
7
+ op.option :name, "Optional name", :nil
8
+
9
+ op.define_gradient do |grad, node, params|
10
+ ts.zeros(ts.shape(params[0]), dtype: params[0].data_type)
11
+ end
12
+ end
@@ -0,0 +1,10 @@
1
+ TensorStream::OpMaker.define_operation :sin do |op|
2
+ op.what_it_does "Computes sin of input element-wise."
3
+
4
+ op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES'
5
+ op.option :name, "Optional name", :nil
6
+
7
+ op.define_gradient do |grad, node, params|
8
+ grad * ts.cos(params[0])
9
+ end
10
+ end
@@ -0,0 +1,16 @@
1
+ TensorStream::OpMaker.define_operation :size do |op|
2
+ op.what_it_does "Returns the size of a tensor."
3
+ op.what_it_does "Returns a 0-D Tensor representing the number of elements in input of type out_type. Defaults to :int32."
4
+
5
+ op.parameter :input, "A tensor"
6
+ op.option :name, "Optional name", :nil
7
+ op.option :out_type, "Optional output type", ":int32"
8
+
9
+ op.define_gradient do |grad, node, params|
10
+ nil # non differentiable
11
+ end
12
+
13
+ op.define_shape do |tensor|
14
+ []
15
+ end
16
+ end
@@ -0,0 +1,24 @@
1
+ TensorStream::OpMaker.define_operation :sub do |op|
2
+ op.other_names %w(subtract)
3
+ op.what_it_does "Returns x - y element-wise."
4
+
5
+ op.parameter :input_a, "tensor X"
6
+ op.parameter :input_b, "tensor Y"
7
+
8
+ op.apply_data_type_coercion!
9
+ op.supports_broadcasting!
10
+
11
+ op.option :name, "Optional name", :nil
12
+
13
+ op.define_gradient do |grad, node, params|
14
+ x, y = params
15
+ next [grad, -grad] if shapes_fully_specified_and_equal(x, y)
16
+
17
+ sx = ts.shape(x, name: "sub/shape_x")
18
+ sy = ts.shape(y, name: "sub/shape_y")
19
+ rx, ry = _broadcast_gradient_args(sx, sy)
20
+
21
+ [ts.reshape(ts.reduce_sum(grad, rx, name: "add/reduce_sub_x"), sx),
22
+ -ts.reshape(ts.reduce_sum(grad, ry, name: "add/reduce_sub_y"), sy),]
23
+ end
24
+ end
@@ -0,0 +1,27 @@
1
+ TensorStream::OpMaker.define_operation :sum do |op|
2
+ op.other_names %w(reduce_sum)
3
+ op.what_it_does "Computes the sum of elements across dimensions of a tensor."
4
+ op.what_it_does "Reduces input_tensor along the dimensions given in axis. Unless keepdims is true, the rank of the"
5
+ op.what_it_does "tensor is reduced by 1 for each entry in axis. If keepdims is true, the reduced dimensions are"
6
+ op.what_it_does "retained with length 1."
7
+ op.what_it_does "If axis has no entries, all dimensions are reduced, and a tensor with a single element is returned."
8
+
9
+ op.parameter :input_a, "tensor X"
10
+ op.parameter :axis, "tensor X", :nil, validate: 'INTEGER_TYPES'
11
+
12
+ op.option :name, "Optional name", :nil
13
+ op.option :keepdims, "If true, retains reduced dimensions with length 1.", :false
14
+
15
+ op.add_custom "input_a = TensorStream.convert_to_tensor(input_a)"
16
+ op.add_custom "return input_a if input_a.shape.scalar?"
17
+ op.add_custom "axis = cast_axis(input_a, axis)"
18
+
19
+ op.define_gradient do |grad, node, params|
20
+ x, y = params
21
+ _sum_grad(x, y, grad)
22
+ end
23
+
24
+ op.define_shape do |tensor|
25
+ _infer_reduction_op_shape(tensor)
26
+ end
27
+ end
@@ -0,0 +1,12 @@
1
+ TensorStream::OpMaker.define_operation :tan do |op|
2
+ op.what_it_does "Computes tan of input element-wise."
3
+
4
+ op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES'
5
+ op.option :name, "Optional name", :nil
6
+
7
+ op.define_gradient do |grad, node, params|
8
+ secx = ts.reciprocal(ts.cos(params[0]))
9
+ secx2 = ts.square(secx)
10
+ grad * secx2
11
+ end
12
+ end
@@ -0,0 +1,10 @@
1
+ TensorStream::OpMaker.define_operation :tanh do |op|
2
+ op.what_it_does "Computes tanh of input element-wise."
3
+
4
+ op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES'
5
+ op.option :name, "Optional name", :nil
6
+
7
+ op.define_gradient do |grad, node, params|
8
+ grad * i_op(:tanh_grad, params[0])
9
+ end
10
+ end
@@ -0,0 +1,19 @@
1
+ TensorStream::OpMaker.define_operation :tile do |op|
2
+ op.what_it_does "Constructs a tensor by tiling a given tensor."
3
+ op.what_it_does "This operation creates a new tensor by replicating input multiples times."
4
+ op.what_it_does "The output tensor's i'th dimension has input.dims(i) * multiples[i] elements,"
5
+ op.what_it_does "and the values of input are replicated multiples[i] times along the 'i'th dimension. For example, tiling [a b c d] by [2] produces [a b c d a b c d]."
6
+
7
+ op.parameter :input, "A tensor"
8
+ op.parameter :multiples, "Must be one of the following types: int32, int64. 1-D. Length must be the same as the number of dimensions in input"
9
+ op.option :name, "Optional name", :nil
10
+
11
+
12
+ op.define_gradient do |grad, node, params|
13
+ nil # non differentiable
14
+ end
15
+
16
+ op.define_shape do |tensor|
17
+ nil
18
+ end
19
+ end
@@ -0,0 +1,15 @@
1
+ TensorStream::OpMaker.define_operation :zeros do |op|
2
+ op.what_it_does "Creates a tensor with all elements set to zero"
3
+
4
+ op.parameter :shape, "A 1-D integer Tensor or ruby array. The shape of the output tensor."
5
+
6
+ op.option :dtype, "Optional name", ":float32"
7
+ op.option :name, "Optional name", :nil
8
+
9
+ op.define_shape do |tensor|
10
+ a_shape = tensor.inputs[0] ? tensor.inputs[0].const_value : tensor.options[:shape]
11
+ next nil if a_shape.nil?
12
+
13
+ a_shape.is_a?(Array) ? a_shape : [a_shape]
14
+ end
15
+ end
@@ -10,12 +10,12 @@ module TensorStream
10
10
  @value = nil
11
11
  @is_const = false
12
12
 
13
- @name = [@graph.get_name_scope, options[:name] || build_name].compact.reject(&:empty?).join('/')
13
+ @name = [@graph.get_name_scope, options[:name] || build_name].compact.reject(&:empty?).join("/")
14
14
  @op = Graph.get_default_graph.add_op!(:placeholder, data_type: @data_type, shape: @shape, internal_name: @name)
15
15
  end
16
16
 
17
17
  def inspect
18
- "Placeholder(#{@name} shape: #{@shape || '?'} data_type: #{@data_type})"
18
+ "Placeholder(#{@name} shape: #{@shape || "?"} data_type: #{@data_type})"
19
19
  end
20
20
 
21
21
  private
@@ -4,9 +4,9 @@ module TensorStream
4
4
  class ReportTool
5
5
  def self.profile_for(session, order_by: :slowest)
6
6
  context = session.last_session_context
7
- eval_times = context[:profile][:operations].map do |name, profile|
7
+ eval_times = context[:profile][:operations].map { |name, profile|
8
8
  [name, profile[:op], profile[:eval_time], profile[:shape]]
9
- end
9
+ }
10
10
 
11
11
  if order_by == :slowest
12
12
  eval_times.sort_by { |a| a[2] }.reverse!
@@ -15,4 +15,4 @@ module TensorStream
15
15
  end
16
16
  end
17
17
  end
18
- end
18
+ end
@@ -19,16 +19,16 @@ module TensorStream
19
19
 
20
20
  def get_evaluator_classes(evaluators)
21
21
  @evaluator_classes = if evaluators.is_a?(Array)
22
- if evaluators.empty?
23
- TensorStream::Evaluator.default_evaluators
24
- else
25
- evaluators.collect { |name| Object.const_get("TensorStream::Evaluator::#{camelize(name.to_s)}") }
26
- end
27
- elsif evaluators.nil?
28
- TensorStream::Evaluator.default_evaluators
29
- else
30
- [Object.const_get("TensorStream::Evaluator::#{camelize(evaluators.to_s)}")]
31
- end
22
+ if evaluators.empty?
23
+ TensorStream::Evaluator.default_evaluators
24
+ else
25
+ evaluators.collect { |name| Object.const_get("TensorStream::Evaluator::#{camelize(name.to_s)}") }
26
+ end
27
+ elsif evaluators.nil?
28
+ TensorStream::Evaluator.default_evaluators
29
+ else
30
+ [Object.const_get("TensorStream::Evaluator::#{camelize(evaluators.to_s)}")]
31
+ end
32
32
  end
33
33
 
34
34
  def clear_session_cache
@@ -41,10 +41,10 @@ module TensorStream
41
41
 
42
42
  def run(*args)
43
43
  options = if args.last.is_a?(Hash)
44
- args.pop
45
- else
46
- {}
47
- end
44
+ args.pop
45
+ else
46
+ {}
47
+ end
48
48
 
49
49
  @evaluator_options[:thread_pool] = @thread_pool
50
50
  @evaluator_options[:log_intermediates] = options[:log_intermediates]
@@ -52,25 +52,23 @@ module TensorStream
52
52
  context = {
53
53
  _cache: @session_cache,
54
54
  _options: options.merge(@evaluator_options),
55
- profile: { step: 0, operations: {} },
55
+ profile: {step: 0, operations: {}},
56
56
  }
57
57
 
58
58
  # scan for placeholders and assign value
59
- if options[:feed_dict]
60
- options[:feed_dict].each_key do |k|
61
- if k.is_a?(Placeholder)
62
- context[k.name.to_sym] = options[:feed_dict][k]
63
- elsif k.is_a?(String)
64
- target_graph = args[0].graph
65
- node = target_graph.get_node(k)
66
- raise "Cannot find placeholder with the name of #{k}" if node.operation != :placeholder
67
-
68
- context[k.to_sym] = options[:feed_dict][k]
69
- elsif k.is_a?(Operation) && k.operation == :placeholder
70
- context[k.name.to_sym] = options[:feed_dict][k]
71
- else
72
- raise "Invalid placeholder type passed key must be a string or a placeholder type"
73
- end
59
+ options[:feed_dict]&.each_key do |k|
60
+ if k.is_a?(Placeholder)
61
+ context[k.name.to_sym] = options[:feed_dict][k]
62
+ elsif k.is_a?(String)
63
+ target_graph = args[0].graph
64
+ node = target_graph.get_node(k)
65
+ raise "Cannot find placeholder with the name of #{k}" if node.operation != :placeholder
66
+
67
+ context[k.to_sym] = options[:feed_dict][k]
68
+ elsif k.is_a?(Operation) && k.operation == :placeholder
69
+ context[k.name.to_sym] = options[:feed_dict][k]
70
+ else
71
+ raise "Invalid placeholder type passed key must be a string or a placeholder type"
74
72
  end
75
73
  end
76
74
 
@@ -82,21 +80,21 @@ module TensorStream
82
80
  puts "#{k} : #{v[0].name}"
83
81
  end
84
82
  end
85
- result = args.collect do |e|
83
+ result = args.collect { |e|
86
84
  next e.value if e.is_a?(Tensor) && e.is_const && e.value
87
85
 
88
86
  value = delegate_to_evaluator(e, context, {})
89
87
  recursive_eval(value)
90
- end
88
+ }
91
89
  args.size == 1 ? result.first : result
92
90
  end
93
91
 
94
92
  def list_devices
95
- TensorStream::Evaluator.evaluators.collect do |_k, v|
93
+ TensorStream::Evaluator.evaluators.collect { |_k, v|
96
94
  v[:class].query_supported_devices.collect do |device|
97
95
  device
98
96
  end
99
- end.flatten
97
+ }.flatten
100
98
  end
101
99
 
102
100
  def close
@@ -117,11 +115,11 @@ module TensorStream
117
115
 
118
116
  def dump_ops(tensor, selector)
119
117
  graph = tensor.graph
120
- graph.nodes.select { |k, v| selector.call(k, v) }.collect do |k, node|
118
+ graph.nodes.select { |k, v| selector.call(k, v) }.collect { |k, node|
121
119
  next unless @last_session_context[node.name]
122
120
 
123
121
  "#{k} #{node.to_math(true, 1)} = #{@last_session_context[node.name]}"
124
- end.compact
122
+ }.compact
125
123
  end
126
124
 
127
125
  def graph_ml(tensor, filename)
@@ -143,12 +141,12 @@ module TensorStream
143
141
  end
144
142
 
145
143
  def assign_evaluator(tensor)
146
- device = @evaluator_classes.map do |klass|
144
+ device = @evaluator_classes.map { |klass|
147
145
  next nil if tensor.is_a?(Operation) && !klass.ops.include?(tensor.operation.to_sym)
148
146
  next klass.default_device if tensor.device.nil?
149
147
 
150
148
  klass.query_device(tensor.device)
151
- end.compact.first
149
+ }.compact.first
152
150
 
153
151
  raise "no evaluator available to execute #{tensor.operation}" if device.nil?
154
152