tensor_stream 0.9.8 → 0.9.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +31 -14
  3. data/lib/tensor_stream.rb +4 -0
  4. data/lib/tensor_stream/constant.rb +41 -0
  5. data/lib/tensor_stream/control_flow.rb +2 -1
  6. data/lib/tensor_stream/dynamic_stitch.rb +3 -1
  7. data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +4 -4
  8. data/lib/tensor_stream/evaluator/ruby/array_ops.rb +74 -23
  9. data/lib/tensor_stream/evaluator/ruby/math_ops.rb +45 -43
  10. data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +31 -30
  11. data/lib/tensor_stream/evaluator/ruby/random_ops.rb +6 -6
  12. data/lib/tensor_stream/evaluator/ruby_evaluator.rb +46 -111
  13. data/lib/tensor_stream/graph.rb +61 -12
  14. data/lib/tensor_stream/graph_builder.rb +3 -3
  15. data/lib/tensor_stream/graph_deserializers/yaml_loader.rb +38 -0
  16. data/lib/tensor_stream/graph_serializers/packer.rb +8 -0
  17. data/lib/tensor_stream/graph_serializers/pbtext.rb +62 -27
  18. data/lib/tensor_stream/graph_serializers/serializer.rb +2 -2
  19. data/lib/tensor_stream/graph_serializers/yaml.rb +27 -0
  20. data/lib/tensor_stream/helpers/infer_shape.rb +15 -9
  21. data/lib/tensor_stream/helpers/op_helper.rb +17 -6
  22. data/lib/tensor_stream/helpers/string_helper.rb +32 -1
  23. data/lib/tensor_stream/helpers/tensor_mixins.rb +135 -0
  24. data/lib/tensor_stream/math_gradients.rb +19 -12
  25. data/lib/tensor_stream/monkey_patches/float.rb +7 -0
  26. data/lib/tensor_stream/monkey_patches/integer.rb +7 -0
  27. data/lib/tensor_stream/monkey_patches/patch.rb +8 -8
  28. data/lib/tensor_stream/nn/nn_ops.rb +1 -1
  29. data/lib/tensor_stream/operation.rb +98 -36
  30. data/lib/tensor_stream/ops.rb +65 -13
  31. data/lib/tensor_stream/placeholder.rb +2 -2
  32. data/lib/tensor_stream/session.rb +15 -3
  33. data/lib/tensor_stream/tensor.rb +15 -172
  34. data/lib/tensor_stream/tensor_shape.rb +3 -1
  35. data/lib/tensor_stream/train/saver.rb +12 -10
  36. data/lib/tensor_stream/trainer.rb +7 -2
  37. data/lib/tensor_stream/utils.rb +13 -11
  38. data/lib/tensor_stream/utils/freezer.rb +37 -0
  39. data/lib/tensor_stream/variable.rb +17 -11
  40. data/lib/tensor_stream/variable_scope.rb +3 -1
  41. data/lib/tensor_stream/version.rb +1 -1
  42. data/samples/iris.rb +3 -4
  43. data/samples/linear_regression.rb +9 -5
  44. data/samples/logistic_regression.rb +11 -9
  45. data/samples/mnist_data.rb +8 -10
  46. metadata +8 -4
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0ad97bf7cefbf069baabd2c694489a08117110ab8ce5b6cfecd0af6d43ffd6f2
4
- data.tar.gz: c102c1e238d3f914cce40164f65f4dde86735543b8a05c979ff2a49f503ddab4
3
+ metadata.gz: d311558fd0e0503d46170a6e0d206cfa9eb3d3612143a1b42ef760b548155640
4
+ data.tar.gz: 5539763e003def63ee40417b9801b10c2694f08cf09f1be10ea3d7fd03cc0a4d
5
5
  SHA512:
6
- metadata.gz: f14b080e5541f1f34fabc7a46b64cc43ada2a3be667a41f036001baeb4209b13e162bd81af15ca195b18bc39372565ea578af87ed916a85e9aca68e0457c6c20
7
- data.tar.gz: 81ee1c521615fb36ded4cdbb94eb855350ca64f1332d579ec866dbc85250fb92711057019ba3a7b74bc4a482ff2d5ed6d130db53baa6becd01ebb9ceff69a05b
6
+ metadata.gz: 1918a785288e1f25fd75798f5743c924d71e4d7bd63012acdb18df41c5d3ee36b15a66a65fff18740885454b8683479d3e733d4111cbbd3e11fd81cec4fc4765
7
+ data.tar.gz: fe48b102afb40c36d77f92a3a53ffc155db108a705c01d5fd3cfab5e84d88dfd5c1b7a41f19322430e43335d225a526cf63c757030f7e4ac27f1cc2440797f2f
data/README.md CHANGED
@@ -2,17 +2,16 @@
2
2
 
3
3
  # TensorStream
4
4
 
5
- A reimplementation of TensorFlow for ruby. This is a ground up implementation with no dependency on TensorFlow. Effort has been made to make the programming style as near to TensorFlow as possible, comes with a pure ruby evaluator by default with support for an opencl evaluator for large models and datasets.
5
+ An opensource machine learning framework for ruby. Designed to run on a wide variety of ruby implementations (JRuby, TruffleRuby, MRI) as well as an option for High Performance computation (OpenCL).
6
6
 
7
- The goal of this gem is to have a high performance machine learning and compute solution for ruby with support for a wide range of hardware and software configuration.
7
+ This is a framework is heavily influenced by tensorflow and aims to be familiar with tensorflow users. This is a ground up implementation with no dependency on TensorFlow. Effort has been made to make the programming style as near to TensorFlow as possible, comes with a pure ruby evaluator by default with support for an opencl evaluator for large models and datasets.
8
8
 
9
- ## Features
9
+ ## Goals & Features
10
10
 
11
+ - Easy to use - Improve model readability
11
12
  - Replicates most of the commonly used low-level tensorflow ops (tf.add, tf.constant, tf.placeholder, tf.matmul, tf.sin etc...)
12
- - Supports auto-differentiation
13
- - Provision to use your own opcode evaluator (opencl, sciruby and tensorflow backends planned)
14
- - Goal is to be as close to TensorFlow in behavior but with some freedom to add ruby specific enhancements (with lots of test cases)
15
- - (08-08-2018) Load pbtext files from tensorflow (Graph.parse_from_string)
13
+ - Supports auto-differentiation using formal derivation
14
+ - Extensible - use your own opcode evaluator (OpenCL and Pure ruby currently supported)
16
15
 
17
16
  ## Compatibility
18
17
 
@@ -55,7 +54,8 @@ Or install it yourself as:
55
54
 
56
55
  ## Usage
57
56
 
58
- Usage is similar to how you would use TensorFlow except with ruby syntax
57
+ Usage is similar to how you would use TensorFlow except with ruby syntax.
58
+ There are also enhancements to the syntax to make it as consice as possible.
59
59
 
60
60
  Linear regression sample:
61
61
 
@@ -75,18 +75,24 @@ train_Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
75
75
 
76
76
  n_samples = train_X.size
77
77
 
78
- X = tf.placeholder("float")
79
- Y = tf.placeholder("float")
78
+ # X = tf.placeholder("float")
79
+ X = Float.placeholder
80
+
81
+ # Y = tf.placeholder("float")
82
+ Y = Float.placeholder
80
83
 
81
84
  # Set model weights
82
- W = tf.variable(rand, name: "weight")
83
- b = tf.variable(rand, name: "bias")
85
+ # W = tf.variable(rand, name: "weight")
86
+ W = rand.t.var name: "weight"
87
+
88
+ # b = tf.variable(rand, name: "bias")
89
+ b = rand.t.var name: "bias"
84
90
 
85
91
  # Construct a linear model
86
92
  pred = X * W + b
87
93
 
88
94
  # Mean squared error
89
- cost = ((pred - Y) ** 2).reduce(:+) / ( 2 * n_samples)
95
+ cost = ((pred - Y) ** 2).reduce / ( 2 * n_samples)
90
96
 
91
97
  # optimizer = TensorStream::Train::MomentumOptimizer.new(learning_rate, momentum, use_nesterov: true).minimize(cost)
92
98
  # optimizer = TensorStream::Train::AdamOptimizer.new(learning_rate).minimize(cost)
@@ -134,7 +140,7 @@ Not all ops are available. Available ops are defined in lib/tensor_stream/ops.rb
134
140
 
135
141
  There are also certain differences with regards to naming conventions, and named parameters:
136
142
 
137
- # Variables
143
+ # Variables and Constants
138
144
 
139
145
  To make referencing python examples easier it is recommended to use "tf" as the TensorStream namespace
140
146
 
@@ -157,8 +163,15 @@ Ruby
157
163
 
158
164
  ```ruby
159
165
  w = ts.variable(0, name: 'weights')
166
+ c = ts.constant(1.0)
167
+
168
+ # concise way when initializing using a constant
169
+ w = 0.t.var name: 'weights'
170
+ c = 1.0.t
160
171
  ```
161
172
 
173
+ Calling .t to Integer, Array and Float types converts it into a tensor
174
+
162
175
  # Shapes
163
176
 
164
177
  Python
@@ -173,6 +186,10 @@ Ruby
173
186
  ```ruby
174
187
  x = ts.placeholder(:float32, shape: [1024, 1024])
175
188
  x = ts.placeholder(:float32, shape: [nil, 1024])
189
+
190
+ # Another a bit more terse way
191
+ x = Float.placeholder shape: [1024, 1024]
192
+ y = Float.placeholder shape: [nil, 1024]
176
193
  ```
177
194
 
178
195
  For debugging, each operation or tensor supports the to_math method
data/lib/tensor_stream.rb CHANGED
@@ -13,7 +13,9 @@ require 'tensor_stream/graph'
13
13
  require 'tensor_stream/device'
14
14
  require 'tensor_stream/session'
15
15
  require 'tensor_stream/tensor_shape'
16
+ require 'tensor_stream/helpers/tensor_mixins'
16
17
  require 'tensor_stream/tensor'
18
+ require 'tensor_stream/constant'
17
19
  require 'tensor_stream/variable'
18
20
  require 'tensor_stream/variable_scope'
19
21
  require 'tensor_stream/operation'
@@ -25,8 +27,10 @@ require 'tensor_stream/evaluator/evaluator'
25
27
  require 'tensor_stream/graph_serializers/packer'
26
28
  require 'tensor_stream/graph_serializers/serializer'
27
29
  require 'tensor_stream/graph_deserializers/protobuf'
30
+ require 'tensor_stream/graph_deserializers/yaml_loader'
28
31
  require 'tensor_stream/graph_serializers/pbtext'
29
32
  require 'tensor_stream/graph_serializers/graphml'
33
+ require 'tensor_stream/graph_serializers/yaml'
30
34
  require 'tensor_stream/math_gradients'
31
35
  require "tensor_stream/debugging/debugging"
32
36
  require 'tensor_stream/utils'
@@ -0,0 +1,41 @@
1
+ module TensorStream
2
+ # Class that defines a TensorStream variable
3
+ class Constant < Tensor
4
+ def initialize(data_type, rank, shape, options = {})
5
+ setup_initial_state(options)
6
+ @data_type = data_type
7
+ @rank = rank
8
+ @breakpoint = false
9
+ @shape = TensorShape.new(shape, rank)
10
+ @value = nil
11
+ @options = options
12
+ @is_const = true
13
+ @internal = options[:internal]
14
+ @name = [@graph.get_name_scope, options[:name] || build_name].compact.reject(&:empty?).join('/')
15
+ @given_name = @name
16
+
17
+ if options[:value]
18
+ if options[:value].is_a?(Array)
19
+ # check if single dimenstion array is passed
20
+ options[:value] = _reshape(options[:value], shape.reverse.dup) if shape.size >= 2 && !options[:value].empty? && !options[:value][0].is_a?(Array)
21
+
22
+ @value = options[:value].map { |v| v.is_a?(Tensor) ? Tensor.cast_dtype(v, @data_type) : v }
23
+ elsif !shape.empty?
24
+ @value = _reshape(Tensor.cast_dtype(options[:value], @data_type), shape.dup)
25
+ else
26
+ @value = Tensor.cast_dtype(options[:value], @data_type)
27
+ end
28
+ @shape = TensorShape.new(shape_eval(@value))
29
+ end
30
+
31
+ @op = Graph.get_default_graph.add_op!(:const, value: @value, data_type: @data_type, internal_name: @name, shape: @shape)
32
+ @name = @op.name
33
+ end
34
+
35
+ protected
36
+
37
+ def build_name
38
+ "Const"
39
+ end
40
+ end
41
+ end
@@ -5,11 +5,12 @@ module TensorStream
5
5
 
6
6
  def initialize(flow_type, inputs, ops = nil, options = {})
7
7
  setup_initial_state(options)
8
-
8
+ @options = options
9
9
  @operation = :"flow_#{flow_type}"
10
10
  @inputs = inputs
11
11
  @name = [@graph.get_name_scope, options[:name] || set_name].compact.join('/')
12
12
  @ops = ops
13
+ @consumers = Set.new
13
14
  @shape = TensorShape.new([inputs.size])
14
15
  @graph.add_node(self)
15
16
  end
@@ -7,8 +7,10 @@ module TensorStream
7
7
  setup_initial_state(options)
8
8
 
9
9
  @operation = :"flow_#{flow_type}"
10
- @inputs = inputs
10
+ @options = options.merge(n: inputs[0].size)
11
+ @inputs = inputs.flatten(1).map { |i| TensorStream.convert_to_tensor(i) }.map { |i| i ? i.op : nil }
11
12
 
13
+ @consumers = Set.new
12
14
  @data_type = Tensor.detect_type(inputs[1])
13
15
  @name = [@graph.get_name_scope, options[:name] || set_name].compact.join('/')
14
16
  @ops = ops
@@ -16,10 +16,10 @@ module TensorStream
16
16
  start_index = start.shift
17
17
  current_size = size.shift
18
18
  dimen_size = if current_size == -1
19
- input.size - 1
20
- else
21
- start_index + current_size - 1
22
- end
19
+ input.size - 1
20
+ else
21
+ start_index + current_size - 1
22
+ end
23
23
 
24
24
  input[start_index..dimen_size].collect do |item|
25
25
  if item.is_a?(Array)
@@ -11,10 +11,14 @@ module TensorStream
11
11
  slice_tensor(input, start.dup, size.dup)
12
12
  end
13
13
 
14
- register_op %i[flow_dynamic_stitch dynamic_stitch] do |_context, _tensor, inputs|
15
- indexes, data = inputs
14
+ register_op %i[flow_dynamic_stitch dynamic_stitch] do |context, tensor, inputs|
15
+ number_of_indexes = tensor.options[:n]
16
+ indexes = inputs[0...number_of_indexes]
17
+ data = inputs[number_of_indexes...inputs.size]
18
+
16
19
  merged = []
17
- merge_dynamic_stitch(merged, indexes, data)
20
+
21
+ merge_dynamic_stitch(merged, indexes, data, context)
18
22
  merged
19
23
  end
20
24
 
@@ -77,8 +81,6 @@ module TensorStream
77
81
  end
78
82
 
79
83
  register_op :unstack do |_context, tensor, inputs|
80
- value = inputs[0]
81
-
82
84
  axis = tensor.options[:axis] || 0
83
85
  new_shape = shape_eval(inputs[0])
84
86
  rank = new_shape.size - 1
@@ -133,11 +135,9 @@ module TensorStream
133
135
 
134
136
  if !axis.empty?
135
137
  axis.each do |x|
136
- if shape[x] == 1
137
- shape[x] = nil
138
- else
139
- raise TensorStream::ValueError, "unable to squeeze dimension that does not have a size of 1"
140
- end
138
+ raise TensorStream::ValueError, "unable to squeeze dimension that does not have a size of 1" if shape[x] != 1
139
+
140
+ shape[x] = nil
141
141
  end
142
142
  else
143
143
  shape = shape.map { |s| s == 1 ? nil : s }
@@ -319,7 +319,7 @@ module TensorStream
319
319
  end
320
320
  end
321
321
 
322
- register_op :pad do |context, tensor, inputs|
322
+ register_op :pad do |_context, tensor, inputs|
323
323
  arr_pad(inputs[0], inputs[1], tensor.data_type)
324
324
  end
325
325
 
@@ -333,19 +333,9 @@ module TensorStream
333
333
  tile.nil? ? [] : tile
334
334
  end
335
335
 
336
- register_op :cond, noop: true do |context, tensor, inputs|
337
- pred = global_eval(tensor, tensor.options[:pred], context)
338
-
339
- if all_true?(pred)
340
- global_eval(tensor, inputs[0], context)
341
- else
342
- global_eval(tensor, inputs[1], context)
343
- end
344
- end
345
-
346
336
  register_op %i[select where] do |context, tensor, inputs|
347
- pred = complete_eval(tensor.options[:pred], context)
348
- call_3way_vector_op(pred, inputs[0], inputs[1], context, ->(t, u, v) { t ? u : v })
337
+ pred = inputs[0]
338
+ call_3way_vector_op(pred, inputs[1], inputs[2], context, ->(t, u, v) { t ? u : v })
349
339
  end
350
340
 
351
341
  register_op :shape do |_context, tensor, inputs|
@@ -358,6 +348,67 @@ module TensorStream
358
348
  end
359
349
  TensorStream::Evaluator::OutputGroup.new(shapes, shapes.map { tensor.options[:out_type] })
360
350
  end
351
+
352
+ register_op :transpose do |_context, _tensor, inputs|
353
+ shape = shape_eval(inputs[0])
354
+ rank = get_rank(inputs[0])
355
+ perm = inputs[1] || (0...rank).to_a.reverse
356
+
357
+ if rank == 2 && perm.nil? # use native transpose for general case
358
+ inputs[0].transpose
359
+ else
360
+ arr = inputs[0].flatten
361
+
362
+ new_shape = perm.map { |p| shape[p] }
363
+ new_arr = Array.new(shape.reduce(:*)) { 0 }
364
+ transpose_with_perm(arr, new_arr, shape, new_shape, perm)
365
+ TensorShape.reshape(new_arr, new_shape)
366
+ end
367
+ end
368
+
369
+ register_op :case, noop: true do |context, tensor, _inputs|
370
+ pred = global_eval(tensor, tensor.inputs[0], context)
371
+ result = nil
372
+
373
+ if tensor.options[:exclusive]
374
+ p_true = pred.each_with_index.collect { |p, index| [p, index] }.select { |a| a[0] }
375
+ raise TensorStream::ValueError, "more than one predicate returns true pos #{p_true.map { |a| a[1] }.join(',')}" if p_true.size > 1
376
+ end
377
+
378
+ pred.each_with_index do |p, index|
379
+ next unless p
380
+
381
+ result = global_eval(tensor, tensor.inputs[2 + index], context)
382
+ end
383
+
384
+ result = global_eval(tensor, tensor.inputs[1], context) if result.nil?
385
+
386
+ result
387
+ end
388
+
389
+ register_op :case_grad do |_context, tensor, inputs|
390
+ index, pred, func, grad = inputs
391
+ if index < 0 && !pred.find { |p| !!p }
392
+ grad
393
+ elsif index >= 0 && pred[index]
394
+ grad
395
+ else
396
+ func = -> { int_type?(tensor.data_type) ? 0 : 0.0 }
397
+ shape = shape_eval(func)
398
+ generate_vector(shape, generator: func)
399
+ end
400
+ end
401
+
402
+ def merge_dynamic_stitch(merged, indexes, data, context)
403
+ indexes.each_with_index do |ind, m|
404
+ if ind.is_a?(Array)
405
+ merge_dynamic_stitch(merged, ind, data[m], context)
406
+ else
407
+ ind = ind.is_a?(Tensor) ? complete_eval(ind, context) : ind
408
+ merged[ind] = data[m]
409
+ end
410
+ end
411
+ end
361
412
  end
362
413
  end
363
414
  end
@@ -2,24 +2,20 @@ module TensorStream
2
2
  module MathOps
3
3
  def MathOps.included(klass)
4
4
  klass.class_eval do
5
- register_op :tanh, no_eval: true do |context, tensor, inputs|
6
- call_op(tensor, inputs[0], context, ->(t, _b) { Math.tanh(t) })
5
+ register_op :tanh, no_eval: true do |context, _tensor, inputs|
6
+ call_op(inputs[0], context, ->(t, _b) { Math.tanh(t) })
7
7
  end
8
8
 
9
9
  register_op :tan, no_eval: true do |context, tensor, inputs|
10
- call_op(tensor, inputs[0], context, ->(t, _b) { Math.tan(t) })
10
+ call_op(inputs[0], context, ->(t, _b) { Math.tan(t) })
11
11
  end
12
12
 
13
- register_op :atan, no_eval: true do |context, tensor, inputs|
14
- call_op(tensor, inputs[0], context, ->(t, _b) { Math.atan(t) })
13
+ register_op :atan, no_eval: true do |context, _tensor, inputs|
14
+ call_op(inputs[0], context, ->(t, _b) { Math.atan(t) })
15
15
  end
16
16
 
17
- register_op :sec, no_eval: true do |context, tensor, inputs|
18
- call_op(tensor, inputs[0], context, ->(t, _b) { Math.sec(t) })
19
- end
20
-
21
- register_op :sin, no_eval: true do |context, tensor, inputs|
22
- call_op(tensor, inputs[0], context, ->(t, _b) { Math.sin(t) })
17
+ register_op :sin, no_eval: true do |context, _tensor, inputs|
18
+ call_op(inputs[0], context, ->(t, _b) { Math.sin(t) })
23
19
  end
24
20
 
25
21
  register_op :add, no_eval: true do |context, tensor, inputs|
@@ -76,67 +72,67 @@ module TensorStream
76
72
  end
77
73
 
78
74
  register_op :round, no_eval: true do |context, _tensor, inputs|
79
- call_op(:round, inputs[0], context, ->(t, _b) { t.round })
75
+ call_op(inputs[0], context, ->(t, _b) { t.round })
80
76
  end
81
77
 
82
- register_op :abs, no_eval: true do |context, tensor, inputs|
83
- call_op(tensor, inputs[0], context, ->(t, _b) { t.abs })
78
+ register_op :abs, no_eval: true do |context, _tensor, inputs|
79
+ call_op(inputs[0], context, ->(t, _b) { t.abs })
84
80
  end
85
81
 
86
- register_op :asin, no_eval: true do |context, tensor, inputs|
87
- call_op(tensor, inputs[0], context, ->(t, _b) { Math.asin(t) })
82
+ register_op :asin, no_eval: true do |context, _tensor, inputs|
83
+ call_op(inputs[0], context, ->(t, _b) { Math.asin(t) })
88
84
  end
89
85
 
90
- register_op :acos, no_eval: true do |context, tensor, inputs|
91
- call_op(tensor, inputs[0], context, ->(t, _b) { Math.acos(t) })
86
+ register_op :acos, no_eval: true do |context, _tensor, inputs|
87
+ call_op(inputs[0], context, ->(t, _b) { Math.acos(t) })
92
88
  end
93
89
 
94
90
  register_op :cos, no_eval: true do |context, tensor, inputs|
95
- call_op(tensor, inputs[0], context, ->(t, _b) { Math.cos(t) })
91
+ call_op(inputs[0], context, ->(t, _b) { Math.cos(t) })
96
92
  end
97
93
 
98
- register_op :log1p, no_eval: true do |context, tensor, inputs|
99
- call_op(tensor, inputs[0], context, ->(t, _b) { Math.log(1 + t) })
94
+ register_op :log1p, no_eval: true do |context, _tensor, inputs|
95
+ call_op(inputs[0], context, ->(t, _b) { Math.log(1 + t) })
100
96
  end
101
97
 
102
- register_op :log, no_eval: true do |context, tensor, inputs|
103
- call_op(tensor, inputs[0], context, ->(t, _b) { t < 0 ? Float::NAN : Math.log(t) })
98
+ register_op :log, no_eval: true do |context, _tensor, inputs|
99
+ call_op(inputs[0], context, ->(t, _b) { t < 0 ? Float::NAN : Math.log(t) })
104
100
  end
105
101
 
106
- register_op :exp, no_eval: true do |context, tensor, inputs|
107
- call_op(tensor, inputs[0], context, ->(t, _b) { Math.exp(t) })
102
+ register_op :exp, no_eval: true do |context, _tensor, inputs|
103
+ call_op(inputs[0], context, ->(t, _b) { Math.exp(t) })
108
104
  end
109
105
 
110
- register_op :sigmoid, no_eval: true do |context, tensor, inputs|
111
- call_op(tensor, inputs[0], context, ->(t, _b) { sigmoid(t) })
106
+ register_op :sigmoid, no_eval: true do |context, _tensor, inputs|
107
+ call_op(inputs[0], context, ->(t, _b) { sigmoid(t) })
112
108
  end
113
109
 
114
- register_op :sqrt, no_eval: true do |context, tensor, inputs|
115
- call_op(tensor, inputs[0], context, ->(t, _b) { Math.sqrt(t) })
110
+ register_op :sqrt, no_eval: true do |context, _tensor, inputs|
111
+ call_op(inputs[0], context, ->(t, _b) { Math.sqrt(t) })
116
112
  end
117
113
 
118
- register_op :floor, no_eval: true do |context, tensor, inputs|
119
- call_op(tensor, inputs[0], context, ->(t, _b) { t.floor })
114
+ register_op :floor, no_eval: true do |context, _tensor, inputs|
115
+ call_op(inputs[0], context, ->(t, _b) { t.floor })
120
116
  end
121
117
 
122
- register_op :ceil, no_eval: true do |context, tensor, inputs|
123
- call_op(tensor, inputs[0], context, ->(t, _b) { t.ceil })
118
+ register_op :ceil, no_eval: true do |context, _tensor, inputs|
119
+ call_op(inputs[0], context, ->(t, _b) { t.ceil })
124
120
  end
125
121
 
126
- register_op :square, no_eval: true do |context, tensor, inputs|
127
- call_op(tensor, inputs[0], context, ->(t, _b) { t * t })
122
+ register_op :square, no_eval: true do |context, _tensor, inputs|
123
+ call_op(inputs[0], context, ->(t, _b) { t * t })
128
124
  end
129
125
 
130
- register_op :reciprocal, no_eval: true do |context, tensor, inputs|
131
- call_op(tensor, inputs[0], context, ->(t, _b) { 1 / t })
126
+ register_op :reciprocal, no_eval: true do |context, _tensor, inputs|
127
+ call_op(inputs[0], context, ->(t, _b) { 1 / t })
132
128
  end
133
129
 
134
130
  register_op %i[neg negate], no_eval: true do |context, tensor, inputs|
135
131
  call_vector_op(tensor, :negate, inputs[0], nil, context, ->(t, _u) { -t })
136
132
  end
137
133
 
138
- register_op :tanh_grad, no_eval: true do |context, tensor, inputs|
139
- call_op(tensor, inputs[0], context, ->(t, _b) { 1 - Math.tanh(t) * Math.tanh(t) })
134
+ register_op :tanh_grad, no_eval: true do |context, _tensor, inputs|
135
+ call_op(inputs[0], context, ->(t, _b) { 1 - Math.tanh(t) * Math.tanh(t) })
140
136
  end
141
137
 
142
138
  register_op(%i[argmax arg_max]) do |_context, tensor, inputs|
@@ -170,16 +166,13 @@ module TensorStream
170
166
  end
171
167
 
172
168
  register_op :cumprod do |context, tensor, inputs|
173
- x = inputs[0]
174
169
  c = fp_type?(tensor.data_type) ? 1.0 : 1
175
170
  reverse_option = tensor.options[:reverse]
176
171
  exclusive = tensor.options[:exclusive]
177
-
178
172
  func = lambda do |arr|
179
173
  return c if arr.nil?
180
- count = arr.size
181
-
182
174
 
175
+ count = arr.size
183
176
  arr = arr.reverse if reverse_option
184
177
  arr = [1] + arr if exclusive
185
178
 
@@ -207,6 +200,7 @@ module TensorStream
207
200
 
208
201
  register_op :prod, noop: true do |context, tensor, _inputs|
209
202
  c = fp_type?(tensor.data_type) ? 1.0 : 1
203
+
210
204
  func = lambda do |arr|
211
205
  return c if arr.nil?
212
206
 
@@ -264,6 +258,14 @@ module TensorStream
264
258
  register_op %i[min minimum], noop: true do |context, tensor, inputs|
265
259
  call_vector_op(tensor, :min, inputs[0], inputs[1], context, ->(t, u) { [t, u].min })
266
260
  end
261
+
262
+ def reduction(child_context, tensor, func)
263
+ val = global_eval(tensor, tensor.inputs[0], child_context)
264
+ axis = global_eval(tensor, tensor.inputs[1], child_context)
265
+ keep_dims = global_eval(tensor, tensor.options[:keepdims], child_context)
266
+
267
+ reduce(val, axis, keep_dims, func)
268
+ end
267
269
  end
268
270
  end
269
271
  end