tensor_stream 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -0
  3. data/.rubocop.yml +1 -0
  4. data/Gemfile +1 -1
  5. data/LICENSE.txt +1 -1
  6. data/README.md +34 -34
  7. data/Rakefile +3 -3
  8. data/USAGE_GUIDE.md +235 -0
  9. data/bin/stubgen +20 -0
  10. data/exe/model_utils +2 -2
  11. data/lib/tensor_stream.rb +45 -44
  12. data/lib/tensor_stream/constant.rb +2 -2
  13. data/lib/tensor_stream/control_flow.rb +1 -1
  14. data/lib/tensor_stream/debugging/debugging.rb +2 -2
  15. data/lib/tensor_stream/dynamic_stitch.rb +2 -2
  16. data/lib/tensor_stream/evaluator/base_evaluator.rb +18 -18
  17. data/lib/tensor_stream/evaluator/buffer.rb +1 -1
  18. data/lib/tensor_stream/evaluator/evaluator.rb +2 -2
  19. data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +41 -41
  20. data/lib/tensor_stream/evaluator/operation_helpers/math_helper.rb +1 -1
  21. data/lib/tensor_stream/evaluator/ruby/array_ops.rb +39 -39
  22. data/lib/tensor_stream/evaluator/ruby/check_ops.rb +2 -2
  23. data/lib/tensor_stream/evaluator/ruby/images_ops.rb +18 -18
  24. data/lib/tensor_stream/evaluator/ruby/math_ops.rb +13 -14
  25. data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +33 -36
  26. data/lib/tensor_stream/evaluator/ruby/random_ops.rb +20 -21
  27. data/lib/tensor_stream/evaluator/ruby_evaluator.rb +36 -49
  28. data/lib/tensor_stream/exceptions.rb +1 -1
  29. data/lib/tensor_stream/generated_stub/ops.rb +691 -0
  30. data/lib/tensor_stream/generated_stub/stub_file.erb +24 -0
  31. data/lib/tensor_stream/graph.rb +18 -18
  32. data/lib/tensor_stream/graph_builder.rb +17 -17
  33. data/lib/tensor_stream/graph_deserializers/protobuf.rb +97 -97
  34. data/lib/tensor_stream/graph_deserializers/yaml_loader.rb +1 -1
  35. data/lib/tensor_stream/graph_keys.rb +3 -3
  36. data/lib/tensor_stream/graph_serializers/graphml.rb +33 -33
  37. data/lib/tensor_stream/graph_serializers/packer.rb +23 -23
  38. data/lib/tensor_stream/graph_serializers/pbtext.rb +38 -42
  39. data/lib/tensor_stream/graph_serializers/serializer.rb +3 -2
  40. data/lib/tensor_stream/graph_serializers/yaml.rb +5 -5
  41. data/lib/tensor_stream/helpers/infer_shape.rb +56 -56
  42. data/lib/tensor_stream/helpers/op_helper.rb +8 -9
  43. data/lib/tensor_stream/helpers/string_helper.rb +15 -15
  44. data/lib/tensor_stream/helpers/tensor_mixins.rb +17 -17
  45. data/lib/tensor_stream/images.rb +1 -1
  46. data/lib/tensor_stream/initializer.rb +1 -1
  47. data/lib/tensor_stream/math_gradients.rb +28 -187
  48. data/lib/tensor_stream/monkey_patches/array.rb +1 -1
  49. data/lib/tensor_stream/monkey_patches/float.rb +1 -1
  50. data/lib/tensor_stream/monkey_patches/integer.rb +1 -1
  51. data/lib/tensor_stream/monkey_patches/op_patch.rb +5 -5
  52. data/lib/tensor_stream/monkey_patches/patch.rb +1 -1
  53. data/lib/tensor_stream/nn/nn_ops.rb +17 -15
  54. data/lib/tensor_stream/op_maker.rb +180 -0
  55. data/lib/tensor_stream/operation.rb +17 -17
  56. data/lib/tensor_stream/ops.rb +95 -384
  57. data/lib/tensor_stream/ops/add.rb +23 -0
  58. data/lib/tensor_stream/ops/argmax.rb +14 -0
  59. data/lib/tensor_stream/ops/argmin.rb +14 -0
  60. data/lib/tensor_stream/ops/case.rb +17 -0
  61. data/lib/tensor_stream/ops/cast.rb +15 -0
  62. data/lib/tensor_stream/ops/ceil.rb +15 -0
  63. data/lib/tensor_stream/ops/const.rb +0 -0
  64. data/lib/tensor_stream/ops/cos.rb +10 -0
  65. data/lib/tensor_stream/ops/div.rb +21 -0
  66. data/lib/tensor_stream/ops/equal.rb +15 -0
  67. data/lib/tensor_stream/ops/expand_dims.rb +17 -0
  68. data/lib/tensor_stream/ops/fill.rb +19 -0
  69. data/lib/tensor_stream/ops/floor.rb +15 -0
  70. data/lib/tensor_stream/ops/floor_div.rb +15 -0
  71. data/lib/tensor_stream/ops/greater.rb +11 -0
  72. data/lib/tensor_stream/ops/greater_equal.rb +11 -0
  73. data/lib/tensor_stream/ops/less_equal.rb +15 -0
  74. data/lib/tensor_stream/ops/log.rb +14 -0
  75. data/lib/tensor_stream/ops/mat_mul.rb +60 -0
  76. data/lib/tensor_stream/ops/max.rb +15 -0
  77. data/lib/tensor_stream/ops/min.rb +15 -0
  78. data/lib/tensor_stream/ops/mod.rb +23 -0
  79. data/lib/tensor_stream/ops/mul.rb +21 -0
  80. data/lib/tensor_stream/ops/negate.rb +14 -0
  81. data/lib/tensor_stream/ops/ones_like.rb +19 -0
  82. data/lib/tensor_stream/ops/pow.rb +25 -0
  83. data/lib/tensor_stream/ops/prod.rb +60 -0
  84. data/lib/tensor_stream/ops/random_uniform.rb +18 -0
  85. data/lib/tensor_stream/ops/range.rb +20 -0
  86. data/lib/tensor_stream/ops/rank.rb +13 -0
  87. data/lib/tensor_stream/ops/reshape.rb +24 -0
  88. data/lib/tensor_stream/ops/round.rb +15 -0
  89. data/lib/tensor_stream/ops/shape.rb +14 -0
  90. data/lib/tensor_stream/ops/sigmoid.rb +10 -0
  91. data/lib/tensor_stream/ops/sign.rb +12 -0
  92. data/lib/tensor_stream/ops/sin.rb +10 -0
  93. data/lib/tensor_stream/ops/size.rb +16 -0
  94. data/lib/tensor_stream/ops/sub.rb +24 -0
  95. data/lib/tensor_stream/ops/sum.rb +27 -0
  96. data/lib/tensor_stream/ops/tan.rb +12 -0
  97. data/lib/tensor_stream/ops/tanh.rb +10 -0
  98. data/lib/tensor_stream/ops/tile.rb +19 -0
  99. data/lib/tensor_stream/ops/zeros.rb +15 -0
  100. data/lib/tensor_stream/placeholder.rb +2 -2
  101. data/lib/tensor_stream/profile/report_tool.rb +3 -3
  102. data/lib/tensor_stream/session.rb +36 -38
  103. data/lib/tensor_stream/tensor.rb +2 -2
  104. data/lib/tensor_stream/tensor_shape.rb +4 -4
  105. data/lib/tensor_stream/train/adadelta_optimizer.rb +8 -8
  106. data/lib/tensor_stream/train/adagrad_optimizer.rb +3 -3
  107. data/lib/tensor_stream/train/adam_optimizer.rb +11 -11
  108. data/lib/tensor_stream/train/learning_rate_decay.rb +2 -2
  109. data/lib/tensor_stream/train/momentum_optimizer.rb +7 -7
  110. data/lib/tensor_stream/train/optimizer.rb +9 -9
  111. data/lib/tensor_stream/train/rmsprop_optimizer.rb +16 -16
  112. data/lib/tensor_stream/train/saver.rb +14 -14
  113. data/lib/tensor_stream/train/slot_creator.rb +6 -6
  114. data/lib/tensor_stream/train/utils.rb +12 -12
  115. data/lib/tensor_stream/trainer.rb +10 -10
  116. data/lib/tensor_stream/types.rb +1 -1
  117. data/lib/tensor_stream/utils.rb +33 -32
  118. data/lib/tensor_stream/utils/freezer.rb +5 -5
  119. data/lib/tensor_stream/variable.rb +5 -5
  120. data/lib/tensor_stream/variable_scope.rb +1 -1
  121. data/lib/tensor_stream/version.rb +1 -1
  122. data/samples/{iris.data → datasets/iris.data} +0 -0
  123. data/samples/jupyter_notebooks/linear_regression.ipynb +463 -0
  124. data/samples/{iris.rb → neural_networks/iris.rb} +21 -23
  125. data/samples/{mnist_data.rb → neural_networks/mnist_data.rb} +8 -8
  126. data/samples/neural_networks/raw_neural_net_sample.rb +112 -0
  127. data/samples/{rnn.rb → neural_networks/rnn.rb} +28 -31
  128. data/samples/{nearest_neighbor.rb → others/nearest_neighbor.rb} +12 -12
  129. data/samples/regression/linear_regression.rb +63 -0
  130. data/samples/{logistic_regression.rb → regression/logistic_regression.rb} +14 -16
  131. data/tensor_stream.gemspec +9 -8
  132. metadata +89 -19
  133. data/data_1.json +0 -4764
  134. data/data_2.json +0 -4764
  135. data/data_actual.json +0 -28
  136. data/data_expected.json +0 -28
  137. data/data_input.json +0 -28
  138. data/samples/error.graphml +0 -2755
  139. data/samples/gradient_sample.graphml +0 -1255
  140. data/samples/linear_regression.rb +0 -69
  141. data/samples/multigpu.rb +0 -73
  142. data/samples/raw_neural_net_sample.rb +0 -112
@@ -4,4 +4,4 @@ module TensorStream
4
4
  class ValueError < TensorStreamError; end
5
5
  class InvalidArgumentError < TensorStreamError; end
6
6
  class NotImplementedError < TensorStreamError; end
7
- end
7
+ end
@@ -0,0 +1,691 @@
1
+ # This file has ben automatically generated by stubgen
2
+ # DO NOT EDIT
3
+ #
4
+ module TensorStream
5
+ module OpStub
6
+
7
+ ##
8
+ # Returns x + y element-wise.
9
+ #
10
+ # This operation supports broadcasting
11
+ #
12
+ # Params:
13
+ # +input_a+:: tensor X
14
+ # +input_b+:: tensor Y
15
+ #
16
+ # Options:
17
+ # +:name+:: Optional name
18
+ def add(input_a, input_b, name: nil)
19
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
20
+ _op(:add, input_a, input_b, name: name)
21
+ end
22
+
23
+
24
+ ##
25
+ # Returns the index with the largest value across axes of a tensor.
26
+ #
27
+ #
28
+ # Params:
29
+ # +input_a+:: tensor X (of type NUMERIC_TYPES)
30
+ # +axis+:: Describes which axis of the input tensor to reduce across. For vectors, use axis = 0 (of type INTEGER_TYPES)
31
+ #
32
+ # Options:
33
+ # +:name+:: Optional name
34
+ # +:dimension+:: Same as axis
35
+ # +:output_type+:: Output data type defaults to int32 default (:int32)
36
+ def argmax(input_a, axis = nil, name: nil, dimension: nil, output_type: :int32)
37
+ check_allowed_types(input_a, TensorStream::Ops::NUMERIC_TYPES)
38
+ check_allowed_types(axis, TensorStream::Ops::INTEGER_TYPES)
39
+ _op(:argmax, input_a, axis, name: name, dimension: dimension, output_type: output_type)
40
+ end
41
+
42
+
43
+ ##
44
+ # Returns the index with the smallest value across axes of a tensor.
45
+ #
46
+ #
47
+ # Params:
48
+ # +input_a+:: tensor X (of type NUMERIC_TYPES)
49
+ # +axis+:: Describes which axis of the input tensor to reduce across. For vectors, use axis = 0 (of type INTEGER_TYPES)
50
+ #
51
+ # Options:
52
+ # +:name+:: Optional name
53
+ # +:dimension+:: Same as axis
54
+ # +:output_type+:: Output data type defaults to int32 default (:int32)
55
+ def argmin(input_a, axis = nil, name: nil, dimension: nil, output_type: :int32)
56
+ check_allowed_types(input_a, TensorStream::Ops::NUMERIC_TYPES)
57
+ check_allowed_types(axis, TensorStream::Ops::INTEGER_TYPES)
58
+ _op(:argmin, input_a, axis, name: name, dimension: dimension, output_type: output_type)
59
+ end
60
+
61
+
62
+ ##
63
+ # Returns element-wise smallest integer in not less than x
64
+ #
65
+ #
66
+ # Params:
67
+ # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
68
+ #
69
+ # Options:
70
+ # +:name+:: Optional name
71
+ def ceil(input_a, name: nil)
72
+ check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
73
+ _op(:ceil, input_a, name: name)
74
+ end
75
+
76
+
77
+ ##
78
+ # Computes cos of input element-wise.
79
+ #
80
+ #
81
+ # Params:
82
+ # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
83
+ #
84
+ # Options:
85
+ # +:name+:: Optional name
86
+ def cos(input_a, name: nil)
87
+ check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
88
+ _op(:cos, input_a, name: name)
89
+ end
90
+
91
+
92
+ ##
93
+ # Returns x / y element-wise.
94
+ #
95
+ # This operation supports broadcasting
96
+ #
97
+ # Params:
98
+ # +input_a+:: tensor X
99
+ # +input_b+:: tensor Y
100
+ #
101
+ # Options:
102
+ # +:name+:: Optional name
103
+ def div(input_a, input_b, name: nil)
104
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
105
+ _op(:div, input_a, input_b, name: name)
106
+ end
107
+
108
+
109
+ ##
110
+ # Returns the truth value of (x == y) element-wise.
111
+ #
112
+ # This operation supports broadcasting
113
+ #
114
+ # Params:
115
+ # +input_a+:: tensor X
116
+ # +input_b+:: tensor Y
117
+ #
118
+ # Options:
119
+ # +:name+:: Optional name
120
+ def equal(input_a, input_b, name: nil)
121
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
122
+ _op(:equal, input_a, input_b, name: name)
123
+ end
124
+
125
+
126
+ ##
127
+ # Inserts a dimension of 1 into a tensor's shape.
128
+ # Given a tensor input, this operation inserts a dimension of 1 at the dimension index axis of input's shape. The
129
+ # dimension index axis starts at zero; if you specify a negative number for axis it is counted backward from the end.
130
+ #
131
+ #
132
+ # Params:
133
+ # +input+:: A tensor
134
+ # +axis+:: Specifies the dimension index at which to expand the shape of input. Must be in the range [-rank(input) - 1, rank(input)].
135
+ #
136
+ # Options:
137
+ # +:name+:: Optional name
138
+ def expand_dims(input, axis, name: nil)
139
+ _op(:expand_dims, input, axis, name: name)
140
+ end
141
+
142
+
143
+ ##
144
+ # This operation creates a tensor of shape dims and fills it with value.
145
+ #
146
+ #
147
+ # Params:
148
+ # +dims+:: tensor shape
149
+ # +value+:: scalar value to fill with
150
+ #
151
+ # Options:
152
+ # +:name+:: Optional name
153
+ def fill(dims, value, name: nil)
154
+ _op(:fill, dims, value, name: name)
155
+ end
156
+
157
+
158
+ ##
159
+ # Returns element-wise largest integer not greater than x.
160
+ #
161
+ #
162
+ # Params:
163
+ # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
164
+ #
165
+ # Options:
166
+ # +:name+:: Optional name
167
+ def floor(input_a, name: nil)
168
+ check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
169
+ _op(:floor, input_a, name: name)
170
+ end
171
+
172
+
173
+ ##
174
+ # Returns element-wise integer divistion.
175
+ #
176
+ # This operation supports broadcasting
177
+ #
178
+ # Params:
179
+ # +input_a+:: tensor X
180
+ # +input_b+:: tensor Y
181
+ #
182
+ # Options:
183
+ # +:name+:: Optional name
184
+ def floor_div(input_a, input_b, name: nil)
185
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
186
+ _op(:floor_div, input_a, input_b, name: name)
187
+ end
188
+
189
+
190
+ ##
191
+ # Returns the truth value of (x > y) element-wise.
192
+ #
193
+ # This operation supports broadcasting
194
+ #
195
+ # Params:
196
+ # +input_a+:: tensor X
197
+ # +input_b+:: tensor Y
198
+ #
199
+ # Options:
200
+ # +:name+:: Optional name
201
+ def greater(input_a, input_b, name: nil)
202
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
203
+ _op(:greater, input_a, input_b, name: name)
204
+ end
205
+
206
+
207
+ ##
208
+ # Returns the truth value of (x >= y) element-wise.
209
+ #
210
+ # This operation supports broadcasting
211
+ #
212
+ # Params:
213
+ # +input_a+:: tensor X
214
+ # +input_b+:: tensor Y
215
+ #
216
+ # Options:
217
+ # +:name+:: Optional name
218
+ def greater_equal(input_a, input_b, name: nil)
219
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
220
+ _op(:greater_equal, input_a, input_b, name: name)
221
+ end
222
+
223
+
224
+ ##
225
+ # Returns the truth value of (x <= y) element-wise.
226
+ #
227
+ # This operation supports broadcasting
228
+ #
229
+ # Params:
230
+ # +input_a+:: tensor X
231
+ # +input_b+:: tensor Y
232
+ #
233
+ # Options:
234
+ # +:name+:: Optional name
235
+ def less_equal(input_a, input_b, name: nil)
236
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
237
+ _op(:less_equal, input_a, input_b, name: name)
238
+ end
239
+
240
+
241
+ ##
242
+ # Computes natural logarithm of x element-wise.
243
+ #
244
+ #
245
+ # Params:
246
+ # +input+:: tensor X
247
+ #
248
+ # Options:
249
+ # +:name+:: Optional name
250
+ def log(input, name: nil)
251
+ _op(:log, input, name: name)
252
+ end
253
+
254
+
255
+ ##
256
+ # Multiplies matrix a by matrix b, producing a * b. The inputs must, following any transpositions, be tensors of rank 2 .
257
+ #
258
+ # This operation supports broadcasting
259
+ #
260
+ # Params:
261
+ # +input_a+:: tensor X
262
+ # +input_b+:: tensor Y
263
+ #
264
+ # Options:
265
+ # +:transpose_a+:: Transpose matrix A first default (false)
266
+ # +:transpose_b+:: Transpose matrix B first default (false)
267
+ # +:name+:: Optional name
268
+ def mat_mul(input_a, input_b, transpose_a: false, transpose_b: false, name: nil)
269
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
270
+ _op(:mat_mul, input_a, input_b, transpose_a: transpose_a, transpose_b: transpose_b, name: name)
271
+ end
272
+
273
+ alias_method :matmul, :mat_mul
274
+
275
+ ##
276
+ # Returns the max of x and y (i.e. x > y ? x : y) element-wise.
277
+ #
278
+ # This operation supports broadcasting
279
+ #
280
+ # Params:
281
+ # +input_a+:: tensor X (of type NUMERIC_TYPES)
282
+ # +input_b+:: tensor Y (of type NUMERIC_TYPES)
283
+ #
284
+ # Options:
285
+ # +:name+:: Optional name
286
+ def max(input_a, input_b, name: nil)
287
+ check_allowed_types(input_a, TensorStream::Ops::NUMERIC_TYPES)
288
+ check_allowed_types(input_b, TensorStream::Ops::NUMERIC_TYPES)
289
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
290
+ _op(:max, input_a, input_b, name: name)
291
+ end
292
+
293
+
294
+ ##
295
+ # Returns the min of x and y (i.e. x < y ? x : y) element-wise.
296
+ #
297
+ # This operation supports broadcasting
298
+ #
299
+ # Params:
300
+ # +input_a+:: tensor X (of type NUMERIC_TYPES)
301
+ # +input_b+:: tensor Y (of type NUMERIC_TYPES)
302
+ #
303
+ # Options:
304
+ # +:name+:: Optional name
305
+ def min(input_a, input_b, name: nil)
306
+ check_allowed_types(input_a, TensorStream::Ops::NUMERIC_TYPES)
307
+ check_allowed_types(input_b, TensorStream::Ops::NUMERIC_TYPES)
308
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
309
+ _op(:min, input_a, input_b, name: name)
310
+ end
311
+
312
+
313
+ ##
314
+ # Returns element-wise remainder of division.
315
+ #
316
+ # This operation supports broadcasting
317
+ #
318
+ # Params:
319
+ # +input_a+:: tensor X
320
+ # +input_b+:: tensor Y
321
+ #
322
+ # Options:
323
+ # +:name+:: Optional name
324
+ def mod(input_a, input_b, name: nil)
325
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
326
+ _op(:mod, input_a, input_b, name: name)
327
+ end
328
+
329
+
330
+ ##
331
+ # Returns x * y element-wise.
332
+ #
333
+ # This operation supports broadcasting
334
+ #
335
+ # Params:
336
+ # +input_a+:: tensor X
337
+ # +input_b+:: tensor Y
338
+ #
339
+ # Options:
340
+ # +:name+:: Optional name
341
+ def mul(input_a, input_b, name: nil)
342
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
343
+ _op(:mul, input_a, input_b, name: name)
344
+ end
345
+
346
+
347
+ ##
348
+ # Computes numerical negative value element-wise.
349
+ #
350
+ #
351
+ # Params:
352
+ # +input+:: tensor X
353
+ #
354
+ # Options:
355
+ # +:name+:: Optional name
356
+ def negate(input, name: nil)
357
+ _op(:negate, input, name: name)
358
+ end
359
+
360
+
361
+ ##
362
+ # Creates a tensor with all elements set to 1.
363
+ # Given a single tensor (tensor), this operation returns a
364
+ # tensor of the same type and shape as tensor with all elements set to 1.
365
+ # Optionally, you can specify a new type (dtype) for the returned tensor.
366
+ #
367
+ #
368
+ # Params:
369
+ # +input+:: A tensor
370
+ #
371
+ # Options:
372
+ # +:dtype+:: Optional new data type to cast into
373
+ # +:name+:: Optional name
374
+ def ones_like(input, dtype: nil, name: nil)
375
+ _op(:ones_like, input, data_type: dtype, name: name)
376
+ end
377
+
378
+
379
+ ##
380
+ # Computes the power of one value to another X^Y element wise
381
+ #
382
+ # This operation supports broadcasting
383
+ #
384
+ # Params:
385
+ # +input_a+:: tensor X
386
+ # +input_b+:: tensor Y
387
+ #
388
+ # Options:
389
+ # +:name+:: Optional name
390
+ def pow(input_a, input_b, name: nil)
391
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
392
+ _op(:pow, input_a, input_b, name: name)
393
+ end
394
+
395
+
396
+ ##
397
+ # Computes the product of elements across dimensions of a tensor.
398
+ # Reduces input_tensor along the dimensions given in axis. Unless keepdims is true, the rank of the
399
+ # tensor is reduced by 1 for each entry in axis. If keepdims is true, the reduced dimensions are
400
+ # retained with length 1.
401
+ # If axis has no entries, all dimensions are reduced, and a tensor with a single element is returned.
402
+ #
403
+ #
404
+ # Params:
405
+ # +input_a+:: tensor X
406
+ # +axis+:: tensor X (of type INTEGER_TYPES)
407
+ #
408
+ # Options:
409
+ # +:name+:: Optional name
410
+ # +:keepdims+:: If true, retains reduced dimensions with length 1. default (false)
411
+ def prod(input_a, axis = nil, name: nil, keepdims: false)
412
+ check_allowed_types(axis, TensorStream::Ops::INTEGER_TYPES)
413
+ input_a = TensorStream.convert_to_tensor(input_a)
414
+ return input_a if input_a.shape.scalar?
415
+ axis = cast_axis(input_a, axis)
416
+ _op(:prod, input_a, axis, name: name, keepdims: keepdims)
417
+ end
418
+
419
+ alias_method :reduce_prod, :prod
420
+
421
+ ##
422
+ # Outputs random values from a uniform distribution.
423
+ #
424
+ #
425
+ # Params:
426
+ # +shape+:: A 1-D integer Tensor or array. The shape of the output tensor.
427
+ #
428
+ # Options:
429
+ # +:name+:: Optional name
430
+ # +:dtype+:: The type of the output: float16, float32, float64, int32, or int64 default (:float32)
431
+ # +:minval+:: A 0-D Tensor or ruby value of type dtype. The lower bound on the range of random values to generate. Defaults to 0. default (0)
432
+ # +:maxval+:: A 0-D Tensor or ruby value of type dtype. The upper bound on the range of random values to generate. Defaults to 1 if dtype is floating point. default (1)
433
+ # +:seed+:: A ruby integer. Used to create a random seed for the distribution. See set_random_seed for behavior.
434
+ def random_uniform(shape, name: nil, dtype: :float32, minval: 0, maxval: 1, seed: nil)
435
+ _op(:random_uniform, shape, name: name, dtype: dtype, minval: minval, maxval: maxval, seed: seed)
436
+ end
437
+
438
+
439
+ ##
440
+ # Creates a sequence of numbers.
441
+ # Creates a sequence of numbers that begins at start and extends by increments of delta up to but not including limit.
442
+ #
443
+ #
444
+ # Params:
445
+ # +start+:: Acts as first entry in the range if limit is not nil; otherwise, acts as range limit and first entry defaults to 0.
446
+ # +limit+:: Upper limit of sequence, exclusive. If nil, defaults to the value of start while the first entry of the range defaults to 0.
447
+ # +delta+:: Number that increments start. Defaults to 1.
448
+ #
449
+ # Options:
450
+ # +:name+:: A name for the operation. Defaults to "range". default ("range")
451
+ # +:dtype+:: The type of the elements of the resulting tensor.
452
+ # +:output_type+:: Output data type defaults to int32 default (:int32)
453
+ def range(start = 0, limit = 0, delta = 1, name: "range", dtype: nil, output_type: :int32)
454
+ _op(:range, start, limit, delta, name: name, dtype: dtype, output_type: output_type)
455
+ end
456
+
457
+
458
+ ##
459
+ # Returns the rank of a tensor
460
+ #
461
+ #
462
+ # Params:
463
+ # +input+:: A tensor
464
+ #
465
+ # Options:
466
+ # +:name+:: Optional name
467
+ def rank(input, name: nil)
468
+ input = convert_to_tensor(input)
469
+ return cons(input.shape.ndims) if input.shape.known?
470
+ _op(:rank, input, name: name)
471
+ end
472
+
473
+
474
+ ##
475
+ # Reshapes a tensor.
476
+ # Given tensor, this operation returns a tensor that has the same values as tensor with shape shape.
477
+ #
478
+ #
479
+ # Params:
480
+ # +input+:: A tensor
481
+ # +shape+:: A new tensor shape
482
+ #
483
+ # Options:
484
+ # +:name+:: Optional name
485
+ def reshape(input, shape, name: nil)
486
+ _op(:reshape, input, shape, name: name)
487
+ end
488
+
489
+
490
+ ##
491
+ # Rounds the values of a tensor to the nearest integer, element-wise
492
+ #
493
+ #
494
+ # Params:
495
+ # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
496
+ #
497
+ # Options:
498
+ # +:name+:: Optional name
499
+ def round(input_a, name: nil)
500
+ check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
501
+ _op(:round, input_a, name: name)
502
+ end
503
+
504
+
505
+ ##
506
+ # This operation returns a 1-D integer tensor representing the shape of input
507
+ #
508
+ #
509
+ # Params:
510
+ # +input+:: A tensor
511
+ #
512
+ # Options:
513
+ # +:name+:: Optional name
514
+ # +:out_type+:: Optional output type default (:int32)
515
+ def shape(input, name: nil, out_type: :int32)
516
+ return constant(shape_eval(input, out_type), dtype: out_type, name: "Shape/#{name}") if input.is_a?(Array) && !input[0].is_a?(Tensor)
517
+ return constant(input.shape.shape, dtype: out_type, name: "Shape/#{input.name}_c") if shape_full_specified(input)
518
+ _op(:shape, input, name: name, out_type: out_type)
519
+ end
520
+
521
+
522
+ ##
523
+ # Computes sigmoid of x element-wise.
524
+ #
525
+ #
526
+ # Params:
527
+ # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
528
+ #
529
+ # Options:
530
+ # +:name+:: Optional name
531
+ def sigmoid(input_a, name: nil)
532
+ check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
533
+ _op(:sigmoid, input_a, name: name)
534
+ end
535
+
536
+
537
+ ##
538
+ # Computes sign of input element-wise.
539
+ # <tt>y = sign(x) = -1 if x < 0; 0 if x == 0 or tf.is_nan(x); 1 if x > 0.</tt>
540
+ # Zero is returned for NaN inputs.
541
+ #
542
+ #
543
+ # Params:
544
+ # +input_a+:: tensor X
545
+ #
546
+ # Options:
547
+ # +:name+:: Optional name
548
+ def sign(input_a, name: nil)
549
+ _op(:sign, input_a, name: name)
550
+ end
551
+
552
+
553
+ ##
554
+ # Computes sin of input element-wise.
555
+ #
556
+ #
557
+ # Params:
558
+ # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
559
+ #
560
+ # Options:
561
+ # +:name+:: Optional name
562
+ def sin(input_a, name: nil)
563
+ check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
564
+ _op(:sin, input_a, name: name)
565
+ end
566
+
567
+
568
+ ##
569
+ # Returns the size of a tensor.
570
+ # Returns a 0-D Tensor representing the number of elements in input of type out_type. Defaults to :int32.
571
+ #
572
+ #
573
+ # Params:
574
+ # +input+:: A tensor
575
+ #
576
+ # Options:
577
+ # +:name+:: Optional name
578
+ # +:out_type+:: Optional output type default (:int32)
579
+ def size(input, name: nil, out_type: :int32)
580
+ _op(:size, input, name: name, out_type: out_type)
581
+ end
582
+
583
+
584
+ ##
585
+ # Returns x - y element-wise.
586
+ #
587
+ # This operation supports broadcasting
588
+ #
589
+ # Params:
590
+ # +input_a+:: tensor X
591
+ # +input_b+:: tensor Y
592
+ #
593
+ # Options:
594
+ # +:name+:: Optional name
595
+ def sub(input_a, input_b, name: nil)
596
+ input_a, input_b = apply_data_type_coercion(input_a, input_b)
597
+ _op(:sub, input_a, input_b, name: name)
598
+ end
599
+
600
+ alias_method :subtract, :sub
601
+
602
+ ##
603
+ # Computes the sum of elements across dimensions of a tensor.
604
+ # Reduces input_tensor along the dimensions given in axis. Unless keepdims is true, the rank of the
605
+ # tensor is reduced by 1 for each entry in axis. If keepdims is true, the reduced dimensions are
606
+ # retained with length 1.
607
+ # If axis has no entries, all dimensions are reduced, and a tensor with a single element is returned.
608
+ #
609
+ #
610
+ # Params:
611
+ # +input_a+:: tensor X
612
+ # +axis+:: tensor X (of type INTEGER_TYPES)
613
+ #
614
+ # Options:
615
+ # +:name+:: Optional name
616
+ # +:keepdims+:: If true, retains reduced dimensions with length 1. default (false)
617
+ def sum(input_a, axis = nil, name: nil, keepdims: false)
618
+ check_allowed_types(axis, TensorStream::Ops::INTEGER_TYPES)
619
+ input_a = TensorStream.convert_to_tensor(input_a)
620
+ return input_a if input_a.shape.scalar?
621
+ axis = cast_axis(input_a, axis)
622
+ _op(:sum, input_a, axis, name: name, keepdims: keepdims)
623
+ end
624
+
625
+ alias_method :reduce_sum, :sum
626
+
627
+ ##
628
+ # Computes tan of input element-wise.
629
+ #
630
+ #
631
+ # Params:
632
+ # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
633
+ #
634
+ # Options:
635
+ # +:name+:: Optional name
636
+ def tan(input_a, name: nil)
637
+ check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
638
+ _op(:tan, input_a, name: name)
639
+ end
640
+
641
+
642
+ ##
643
+ # Computes tanh of input element-wise.
644
+ #
645
+ #
646
+ # Params:
647
+ # +input_a+:: tensor X (of type FLOATING_POINT_TYPES)
648
+ #
649
+ # Options:
650
+ # +:name+:: Optional name
651
+ def tanh(input_a, name: nil)
652
+ check_allowed_types(input_a, TensorStream::Ops::FLOATING_POINT_TYPES)
653
+ _op(:tanh, input_a, name: name)
654
+ end
655
+
656
+
657
+ ##
658
+ # Constructs a tensor by tiling a given tensor.
659
+ # This operation creates a new tensor by replicating input multiples times.
660
+ # The output tensor's i'th dimension has input.dims(i) * multiples[i] elements,
661
+ # and the values of input are replicated multiples[i] times along the 'i'th dimension. For example, tiling [a b c d] by [2] produces [a b c d a b c d].
662
+ #
663
+ #
664
+ # Params:
665
+ # +input+:: A tensor
666
+ # +multiples+:: Must be one of the following types: int32, int64. 1-D. Length must be the same as the number of dimensions in input
667
+ #
668
+ # Options:
669
+ # +:name+:: Optional name
670
+ def tile(input, multiples, name: nil)
671
+ _op(:tile, input, multiples, name: name)
672
+ end
673
+
674
+
675
+ ##
676
+ # Creates a tensor with all elements set to zero
677
+ #
678
+ #
679
+ # Params:
680
+ # +shape+:: A 1-D integer Tensor or ruby array. The shape of the output tensor.
681
+ #
682
+ # Options:
683
+ # +:dtype+:: Optional name default (:float32)
684
+ # +:name+:: Optional name
685
+ def zeros(shape, dtype: :float32, name: nil)
686
+ _op(:zeros, shape, dtype: dtype, name: name)
687
+ end
688
+
689
+
690
+ end
691
+ end