tensor_stream 0.1.1 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,8 @@
1
1
  module TensorStream
2
+ # A class that defines a TensorStream graph
2
3
  class Graph
3
4
  attr_accessor :nodes, :collections, :eager_execution
4
-
5
+
5
6
  def initialize
6
7
  @eager_execution = false
7
8
  @nodes = {}
@@ -25,7 +26,7 @@ module TensorStream
25
26
  Thread.current[:tensor_stream_current_graph] = TensorStream::Graph.new
26
27
  end
27
28
 
28
- def get_collection(name, options = {})
29
+ def get_collection(name, _options = {})
29
30
  @collections[name.to_sym]
30
31
  end
31
32
 
@@ -35,15 +36,10 @@ module TensorStream
35
36
  end
36
37
 
37
38
  def add_node(node)
38
- fail "Placeholder cannot be used when eager_execution is enabled" if @eager_execution && node.is_a?(Placeholder)
39
- if @nodes[node.name]
40
- node.name = uniqunify(node.name)
41
- end
42
-
39
+ raise 'Placeholder cannot be used when eager_execution is enabled' if @eager_execution && node.is_a?(Placeholder)
40
+ node.name = uniqunify(node.name) if @nodes[node.name]
43
41
  @nodes[node.name] = node
44
- if @eager_execution
45
- node.value = node.eval
46
- end
42
+ node.value = node.eval if @eager_execution
47
43
  end
48
44
 
49
45
  def node_added?(name)
@@ -60,15 +56,15 @@ module TensorStream
60
56
  end
61
57
 
62
58
  def add_variable(node, options = {})
63
- fail "duplicate variable detected #{node.name} and reuse=false in current scope" if @nodes[node.name] && !options[:reuse]
59
+ raise "duplicate variable detected #{node.name} and reuse=false in current scope" if @nodes[node.name] && !options[:reuse]
64
60
 
65
61
  add_to_collection(GraphKeys::GLOBAL_VARIABLES, node)
66
62
 
67
63
  add_node(node)
68
64
  end
69
65
 
70
- def control_dependencies(dependencies = [], &block)
71
-
66
+ def control_dependencies(_dependencies = [], &_block)
67
+ raise 'not implemented'
72
68
  end
73
69
 
74
70
  def enable_eager_execution
@@ -78,21 +74,58 @@ module TensorStream
78
74
  def disable_eager_execution
79
75
  @eager_execution = false
80
76
  end
81
-
77
+
82
78
  def executing_eagerly?
83
79
  @eager_execution
84
80
  end
85
81
 
82
+ def get_operation_counter
83
+ @op_counter ||= 0
84
+
85
+ name = @op_counter.zero? ? '' : "_#{@op_counter}"
86
+
87
+ @op_counter += 1
88
+
89
+ name
90
+ end
91
+
92
+ def get_placeholder_counter
93
+ @placeholder_counter ||= 0
94
+ @placeholder_counter += 1
95
+
96
+ return '' if @placeholder_counter == 1
97
+ "_#{@placeholder_counter}"
98
+ end
99
+
100
+ def get_var_counter
101
+ @var_counter ||= 0
102
+ @var_counter += 1
103
+
104
+ return '' if @var_counter == 1
105
+ "_#{@var_counter}"
106
+ end
107
+
108
+ def get_const_counter
109
+ @const_counter ||= 0
110
+
111
+ name = @const_counter.zero? ? '' : "_#{@const_counter}"
112
+
113
+ @const_counter += 1
114
+ name
115
+ end
116
+
86
117
  protected
87
118
 
88
119
  def uniqunify(name)
89
120
  counter = 0
90
121
  new_name = name
91
- begin
92
- counter +=1
122
+ Kernel.loop do
123
+ counter += 1
93
124
  new_name = "#{name}_#{counter}"
94
- end while @nodes[new_name]
125
+
126
+ break unless @nodes.key?(new_name)
127
+ end
95
128
  new_name
96
129
  end
97
130
  end
98
- end
131
+ end
@@ -1,5 +1,5 @@
1
1
  module TensorStream
2
2
  class GraphKeys
3
- GLOBAL_VARIABLES = 'variables'
3
+ GLOBAL_VARIABLES = 'variables'.freeze
4
4
  end
5
- end
5
+ end
@@ -1,12 +1,13 @@
1
1
  module TensorStream
2
+ # module that contains helper functions useful for ops
2
3
  module OpHelper
3
- def op(code, a, b = nil, options = {})
4
- Operation.new(code.to_sym, a, b, options)
4
+ def op(code, t_a, t_b = nil, options = {})
5
+ Operation.new(code.to_sym, t_a, t_b, options)
5
6
  end
6
7
 
7
8
  # same as op but with a marker that it was internal generated
8
- def i_op(code, a, b = nil, options = {})
9
- Operation.new(code.to_sym, a, b, options.merge(internal: true))
9
+ def i_op(code, t_a, t_b = nil, options = {})
10
+ Operation.new(code.to_sym, t_a, t_b, options.merge(internal: true))
10
11
  end
11
12
 
12
13
  def cons(value, options = {})
@@ -17,13 +18,13 @@ module TensorStream
17
18
  TensorStream.constant(value, options.merge(internal: true))
18
19
  end
19
20
 
20
- def shape_eval(input)
21
- return [] unless input.kind_of?(Array)
21
+ def shape_eval(input, output_type = :int32)
22
+ return [] unless input.is_a?(Array)
22
23
  arr = []
23
24
  arr_ptr = input
24
25
 
25
26
  Kernel.loop do
26
- arr << arr_ptr.size
27
+ arr << (TensorStream::Ops::FLOATING_POINT_TYPES.include?(output_type) ? arr_ptr.size.to_f : arr_ptr.size)
27
28
  arr_ptr = arr_ptr[0]
28
29
 
29
30
  break unless arr_ptr.is_a?(Array)
@@ -32,27 +33,30 @@ module TensorStream
32
33
  arr
33
34
  end
34
35
 
35
- def dtype_eval(dtype, rank, value)
36
- dtype = Tensor.detect_type(value[0])
37
- rank+=1 if dtype == :array
36
+ def dtype_eval(rank, value)
37
+ dtype = Tensor.detect_type(value[0])
38
38
 
39
- [dtype, rank, value[0], value.size]
40
- end
39
+ rank += 1 if dtype == :array
41
40
 
42
- def val_to_dtype(value, rank = 0)
43
- dtype = if value.is_a?(String)
44
- :string
45
- elsif value.is_a?(Float)
46
- :float32
47
- elsif value.is_a?(Integer)
48
- :int32
49
- elsif value.is_a?(Array)
50
- rank += 1
51
- :array
52
- else
53
- :float32
54
- end
55
- dtype
56
- end
41
+ [dtype, rank, value[0], value.size]
42
+ end
43
+
44
+ def val_to_dtype(value)
45
+ if value.is_a?(String)
46
+ :string
47
+ elsif value.is_a?(Float)
48
+ :float32
49
+ elsif value.is_a?(Integer)
50
+ :int32
51
+ elsif value.is_a?(Array)
52
+ :array
53
+ else
54
+ :float32
55
+ end
56
+ end
57
+
58
+ def fp_type?(type)
59
+ TensorStream::Ops::FLOATING_POINT_TYPES.include?(type)
60
+ end
57
61
  end
58
62
  end
@@ -3,19 +3,19 @@ module TensorStream
3
3
  class MathGradients
4
4
  extend TensorStream::OpHelper
5
5
 
6
- def self.derivative(tensor, dx, options = {})
7
- gradient_program_name = "_grad_#{tensor.name}_#{dx.name}"
6
+ def self.derivative(tensor, wrt_dx, options = {})
7
+ gradient_program_name = "_grad_#{tensor.name}_#{wrt_dx.name}"
8
8
  return options[:graph].get_node(gradient_program_name) if options[:graph] && options[:graph].node_added?(gradient_program_name)
9
9
 
10
10
  constant_options = { dtype: options[:dtype] }
11
11
  constant_options_1 = { dtype: options[:dtype] || tensor.data_type }
12
12
 
13
- return i_op(:ones_like, dx, constant_options_1) if tensor.equal?(dx)
13
+ return i_op(:ones_like, wrt_dx, constant_options_1) if tensor.equal?(wrt_dx)
14
14
  return i_cons(0, constant_options) if options[:stop_gradients] && _include?(options[:stop_gradients], tensor)
15
15
 
16
16
  if tensor.is_a?(Operation)
17
- grad = derivative(tensor.items[0], dx, options) if tensor.items[0]
18
- grad2 = derivative(tensor.items[1], dx, options) if tensor.items[1]
17
+ grad = derivative(tensor.items[0], wrt_dx, options) if tensor.items[0]
18
+ grad2 = derivative(tensor.items[1], wrt_dx, options) if tensor.items[1]
19
19
 
20
20
  case tensor.operation
21
21
  when :max
@@ -51,11 +51,11 @@ module TensorStream
51
51
  when :cos
52
52
  -i_op(:sin, tensor.items[0]) * grad
53
53
  when :add
54
- grad_with_broadcast(tensor, dx, ->(a,b) { i_op(:add, a, b, name: 'grad_sum') } , options)
54
+ grad_with_broadcast(tensor, wrt_dx, ->(a, b) { i_op(:add, a, b, name: 'grad_sum') }, options)
55
55
  when :sub
56
- grad_with_broadcast(tensor, dx, ->(a,b) { i_op(:sub, a, b, name: 'grad_sub') } , options)
56
+ grad_with_broadcast(tensor, wrt_dx, ->(a, b) { i_op(:sub, a, b, name: 'grad_sub') }, options)
57
57
  when :pow
58
- gx = _ds(tensor.items[1])*( _ds(tensor.items[0])**(_ds(tensor.items[1]) - 1)) * grad
58
+ gx = _ds(tensor.items[1]) * (_ds(tensor.items[0])**(_ds(tensor.items[1]) - 1)) * grad
59
59
 
60
60
  log_x = i_op(:where, i_op(:log, tensor.items[0], nil, name: 'log_pow_grad'), i_op(:zeros_like, tensor.items[0]), pred: tensor.items[0] > 0)
61
61
  gy = _ds(tensor.items[0])**_ds(tensor.items[1]) * log_x * grad2
@@ -74,18 +74,15 @@ module TensorStream
74
74
  input_size = i_op(:reduce_prod, i_op(:shape, tensor.items[0]))
75
75
  output_size = i_op(:reduce_prod, i_op(:shape, tensor))
76
76
  factor = input_size / output_size
77
-
77
+
78
78
  (grad / i_op(:cast, factor, data_type: grad.dtype))
79
79
  when :reduce_sum
80
80
  grad
81
81
  when :stop_gradient
82
82
  return i_cons(0, constant_options)
83
83
  when :matmul
84
- tensor_shape1 = tensor.items[1].shape ? tensor.items[1].shape.shape : nil
85
- tensor_shape0 = tensor.items[0].shape ? tensor.items[0].shape.shape : nil
86
-
87
- derivative_a = derivative(tensor.items[0], dx)
88
- derivative_b = derivative(tensor.items[1], dx)
84
+ derivative_a = derivative(tensor.items[0], wrt_dx)
85
+ derivative_b = derivative(tensor.items[1], wrt_dx)
89
86
 
90
87
  s0 = i_op(:shape, tensor.items[0])
91
88
  s1 = i_op(:shape, tensor.items[1])
@@ -94,13 +91,13 @@ module TensorStream
94
91
  identity_1 = i_op(:ones, [s0[0], s1[1]], nil, data_type: tensor.items[1].data_type)
95
92
 
96
93
  matmul_da = i_op(:matmul, identity_0, tensor.items[1], transpose_b: true,
97
- pad_zeros: true,
98
- name: 'matrix_dx')
94
+ pad_zeros: true,
95
+ name: 'matrix_dx')
99
96
  matmul_db = i_op(:matmul, tensor.items[0], identity_1, transpose_a: true,
100
- pad_zeros: true,
101
- name: 'matrix_dy')
97
+ pad_zeros: true,
98
+ name: 'matrix_dy')
102
99
 
103
- zero_vect = i_op(:zeros_like, dx, nil, name: 'zero_vect')
100
+ zero_vect = i_op(:zeros_like, wrt_dx, nil, name: 'zero_vect')
104
101
 
105
102
  # matmul_db = op(:transpose, matmul_db, nil).first
106
103
 
@@ -143,9 +140,9 @@ module TensorStream
143
140
  end
144
141
  end
145
142
 
146
- def self.grad_with_broadcast(tensor, dx, func, options)
147
- grad = derivative(tensor.items[0], dx, options)
148
- grad2 = derivative(tensor.items[1], dx, options)
143
+ def self.grad_with_broadcast(tensor, wrt_dx, func, options)
144
+ grad = derivative(tensor.items[0], wrt_dx, options)
145
+ grad2 = derivative(tensor.items[1], wrt_dx, options)
149
146
  elements1 = i_op(:reduce_prod, i_op(:shape, tensor.items[0]), data_type: :float32)
150
147
  elements2 = i_op(:reduce_prod, i_op(:shape, tensor.items[1]), data_type: :float32)
151
148
  multiplier = elements1 / elements2
@@ -157,4 +154,4 @@ module TensorStream
157
154
  false
158
155
  end
159
156
  end
160
- end
157
+ end
@@ -1,7 +1,7 @@
1
1
  module TensorStream
2
2
  # High level machine learning functions
3
3
  class NN
4
- def self.softmax(logits, options = {})
4
+ def self.softmax(logits, _options = {})
5
5
  TensorStream.exp(logits) / TensorStream.reduce_sum(TensorStream.exp(logits))
6
6
  end
7
7
 
@@ -14,4 +14,4 @@ module TensorStream
14
14
  def self.nn
15
15
  TensorStream::NN
16
16
  end
17
- end
17
+ end
@@ -1,35 +1,32 @@
1
1
  module TensorStream
2
+ # TensorStream class that defines an operation
2
3
  class Operation < Tensor
3
4
  attr_accessor :name, :operation, :items, :rank, :options
4
5
 
5
- def initialize(operation, a, b, options = {})
6
+ def initialize(operation, input_a, input_b, options = {})
7
+ @graph = options[:graph] || TensorStream.get_default_graph
8
+
6
9
  @operation = operation
7
10
  @rank = options[:rank] || 0
8
11
  @name = options[:name] || set_name
9
12
  @internal = options[:internal]
10
13
  @given_name = @name
11
- @source = set_source(caller_locations)
14
+ @source = format_source(caller_locations)
12
15
 
13
- @graph = options[:graph] || TensorStream.get_default_graph
14
16
  @options = options
15
17
 
16
-
17
- @items = [a, b].map { |i| options[:preserve_params_type] ? i : auto_wrap(i) }
18
+ @items = [input_a, input_b].map { |i| options[:preserve_params_type] ? i : auto_wrap(i) }
18
19
  @data_type = set_data_type(options[:data_type])
19
20
 
20
- if options[:shape]
21
- @shape = TensorShape.new(options[:shape], options[:shape].size || 0)
22
- end
21
+ @shape = TensorShape.new(options[:shape], options[:shape].size || 0) if options[:shape]
22
+
23
23
  @graph.add_node(self)
24
24
  end
25
+
25
26
  def to_s
26
27
  @name
27
28
  end
28
29
 
29
- def self.reset_counters
30
- @@op_counter = 0
31
- end
32
-
33
30
  def to_h
34
31
  {
35
32
  op: operation,
@@ -38,18 +35,18 @@ module TensorStream
38
35
  }
39
36
  end
40
37
 
41
- def self.empty_matrix?(m)
42
- if m.kind_of?(Array)
43
- m.each do |item|
44
- if item.kind_of?(Array)
45
- return false if !empty_matrix?(item)
46
- else
47
- return false if item!=0 || item!=0.0
38
+ def self.empty_matrix?(input)
39
+ if input.is_a?(Array)
40
+ input.each do |item|
41
+ if item.is_a?(Array)
42
+ return false unless empty_matrix?(item)
43
+ elsif item != 0 || item != 0.0
44
+ return false
48
45
  end
49
46
  end
50
47
  end
51
48
 
52
- return true
49
+ true
53
50
  end
54
51
 
55
52
  def set_data_type(passed_data_type)
@@ -64,7 +61,7 @@ module TensorStream
64
61
  end
65
62
 
66
63
  def to_math(name_only = false, max_depth = 99)
67
- return @name if max_depth == 0
64
+ return @name if max_depth.zero?
68
65
 
69
66
  sub_item = auto_math(items[0], name_only, max_depth - 1)
70
67
 
@@ -78,15 +75,15 @@ module TensorStream
78
75
  when :slice
79
76
  "#{sub_item}[#{auto_math(items[1], name_only, max_depth - 1)}]"
80
77
  when :assign_sub
81
- "(#{items[0] ? items[0].name : "self"} -= #{auto_math(items[1], name_only)})"
78
+ "(#{items[0] ? items[0].name : 'self'} -= #{auto_math(items[1], name_only)})"
82
79
  when :assign_add
83
- "(#{items[0] ? items[0].name : "self"} += #{auto_math(items[1], name_only)})"
80
+ "(#{items[0] ? items[0].name : 'self'} += #{auto_math(items[1], name_only)})"
84
81
  when :assign
85
- "(#{items[0] ? items[0].name : "self"} = #{auto_math(items[1], name_only)})"
82
+ "(#{items[0] ? items[0].name : 'self'} = #{auto_math(items[1], name_only)})"
86
83
  when :sin, :cos, :tanh
87
84
  "#{operation}(#{sub_item})"
88
85
  when :add
89
- "(#{sub_item} + #{auto_math(items[1], name_only, max_depth - 1)})"
86
+ "(#{sub_item} + #{auto_math(items[1], name_only, max_depth - 1)})"
90
87
  when :sub
91
88
  "(#{sub_item} - #{auto_math(items[1], name_only, max_depth - 1)})"
92
89
  when :pow
@@ -126,7 +123,7 @@ module TensorStream
126
123
  when :ones_like
127
124
  "ones_like(#{sub_item})"
128
125
  when :flow_group
129
- "flow_group(#{items.collect { |i| auto_math(i)}.join(',')})"
126
+ "flow_group(#{items.collect { |i| auto_math(i) }.join(',')})"
130
127
  when :zeros
131
128
  "zeros(#{sub_item})"
132
129
  when :reshape
@@ -158,38 +155,24 @@ module TensorStream
158
155
  when :zeros_like
159
156
  "zeros_like(#{sub_item})"
160
157
  when :where
161
- "where(#{auto_math(options[:pred] , name_only, max_depth - 1)},#{auto_math(items[0])},#{auto_math(items[1])})"
158
+ "where(#{auto_math(options[:pred], name_only, max_depth - 1)},#{auto_math(items[0])},#{auto_math(items[1])})"
162
159
  when :max
163
160
  "max(#{auto_math(sub_item)},#{auto_math(items[1])})"
164
161
  when :cast
165
162
  "cast(#{auto_math(sub_item)}, #{data_type})"
166
163
  else
167
- fail "math form for #{operation}"
164
+ raise "math form for #{operation}"
168
165
  end
169
166
  end
170
167
 
171
168
  def run
172
- self.eval
169
+ eval
173
170
  end
174
171
 
175
172
  private
176
173
 
177
- def self.operation_counter
178
- @@op_counter ||= 0
179
-
180
- name = if @@op_counter == 0
181
- ""
182
- else
183
- "_#{@@op_counter}"
184
- end
185
-
186
- @@op_counter += 1
187
-
188
- name
189
- end
190
-
191
174
  def set_name
192
- "#{@operation}#{Operation.operation_counter}:#{@rank}"
175
+ "#{@operation}#{graph.get_operation_counter}:#{@rank}"
193
176
  end
194
177
  end
195
- end
178
+ end