tensor_stream 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (41) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +12 -0
  3. data/.rake_tasks~ +0 -0
  4. data/.rspec +2 -0
  5. data/.travis.yml +5 -0
  6. data/CODE_OF_CONDUCT.md +74 -0
  7. data/Gemfile +4 -0
  8. data/LICENSE.txt +21 -0
  9. data/README.md +123 -0
  10. data/Rakefile +6 -0
  11. data/bin/console +14 -0
  12. data/bin/setup +8 -0
  13. data/lib/tensor_stream.rb +138 -0
  14. data/lib/tensor_stream/control_flow.rb +23 -0
  15. data/lib/tensor_stream/evaluator/evaluator.rb +7 -0
  16. data/lib/tensor_stream/evaluator/operation_helpers/random_gaussian.rb +32 -0
  17. data/lib/tensor_stream/evaluator/ruby_evaluator.rb +749 -0
  18. data/lib/tensor_stream/graph.rb +98 -0
  19. data/lib/tensor_stream/graph_keys.rb +5 -0
  20. data/lib/tensor_stream/helpers/op_helper.rb +58 -0
  21. data/lib/tensor_stream/math_gradients.rb +161 -0
  22. data/lib/tensor_stream/monkey_patches/integer.rb +0 -0
  23. data/lib/tensor_stream/nn/nn_ops.rb +17 -0
  24. data/lib/tensor_stream/operation.rb +195 -0
  25. data/lib/tensor_stream/ops.rb +225 -0
  26. data/lib/tensor_stream/placeholder.rb +21 -0
  27. data/lib/tensor_stream/session.rb +66 -0
  28. data/lib/tensor_stream/tensor.rb +317 -0
  29. data/lib/tensor_stream/tensor_shape.rb +25 -0
  30. data/lib/tensor_stream/train/gradient_descent_optimizer.rb +23 -0
  31. data/lib/tensor_stream/train/saver.rb +61 -0
  32. data/lib/tensor_stream/trainer.rb +7 -0
  33. data/lib/tensor_stream/types.rb +17 -0
  34. data/lib/tensor_stream/variable.rb +52 -0
  35. data/lib/tensor_stream/version.rb +7 -0
  36. data/samples/iris.data +150 -0
  37. data/samples/iris.rb +117 -0
  38. data/samples/linear_regression.rb +55 -0
  39. data/samples/raw_neural_net_sample.rb +54 -0
  40. data/tensor_stream.gemspec +40 -0
  41. metadata +185 -0
@@ -0,0 +1,98 @@
1
+ module TensorStream
2
+ class Graph
3
+ attr_accessor :nodes, :collections, :eager_execution
4
+
5
+ def initialize
6
+ @eager_execution = false
7
+ @nodes = {}
8
+ @collections = {
9
+ :"#{GraphKeys::GLOBAL_VARIABLES}" => []
10
+ }
11
+ end
12
+
13
+ def reset
14
+ @nodes = {}
15
+ @collections = {
16
+ :"#{GraphKeys::GLOBAL_VARIABLES}" => []
17
+ }
18
+ end
19
+
20
+ def self.get_default_graph
21
+ Thread.current[:tensor_stream_current_graph] || create_default
22
+ end
23
+
24
+ def self.create_default
25
+ Thread.current[:tensor_stream_current_graph] = TensorStream::Graph.new
26
+ end
27
+
28
+ def get_collection(name, options = {})
29
+ @collections[name.to_sym]
30
+ end
31
+
32
+ def add_to_collection(collection_name, val)
33
+ @collections[collection_name.to_sym] ||= []
34
+ @collections[collection_name.to_sym] << val
35
+ end
36
+
37
+ def add_node(node)
38
+ fail "Placeholder cannot be used when eager_execution is enabled" if @eager_execution && node.is_a?(Placeholder)
39
+ if @nodes[node.name]
40
+ node.name = uniqunify(node.name)
41
+ end
42
+
43
+ @nodes[node.name] = node
44
+ if @eager_execution
45
+ node.value = node.eval
46
+ end
47
+ end
48
+
49
+ def node_added?(name)
50
+ @nodes.key?(name)
51
+ end
52
+
53
+ def get_node(name)
54
+ @nodes[name]
55
+ end
56
+
57
+ def add_node!(name, node)
58
+ @nodes[name] = node
59
+ node
60
+ end
61
+
62
+ def add_variable(node, options = {})
63
+ fail "duplicate variable detected #{node.name} and reuse=false in current scope" if @nodes[node.name] && !options[:reuse]
64
+
65
+ add_to_collection(GraphKeys::GLOBAL_VARIABLES, node)
66
+
67
+ add_node(node)
68
+ end
69
+
70
+ def control_dependencies(dependencies = [], &block)
71
+
72
+ end
73
+
74
+ def enable_eager_execution
75
+ @eager_execution = true
76
+ end
77
+
78
+ def disable_eager_execution
79
+ @eager_execution = false
80
+ end
81
+
82
+ def executing_eagerly?
83
+ @eager_execution
84
+ end
85
+
86
+ protected
87
+
88
+ def uniqunify(name)
89
+ counter = 0
90
+ new_name = name
91
+ begin
92
+ counter +=1
93
+ new_name = "#{name}_#{counter}"
94
+ end while @nodes[new_name]
95
+ new_name
96
+ end
97
+ end
98
+ end
@@ -0,0 +1,5 @@
1
+ module TensorStream
2
+ class GraphKeys
3
+ GLOBAL_VARIABLES = 'variables'
4
+ end
5
+ end
@@ -0,0 +1,58 @@
1
+ module TensorStream
2
+ module OpHelper
3
+ def op(code, a, b = nil, options = {})
4
+ Operation.new(code.to_sym, a, b, options)
5
+ end
6
+
7
+ # same as op but with a marker that it was internal generated
8
+ def i_op(code, a, b = nil, options = {})
9
+ Operation.new(code.to_sym, a, b, options.merge(internal: true))
10
+ end
11
+
12
+ def cons(value, options = {})
13
+ TensorStream.constant(value, options)
14
+ end
15
+
16
+ def i_cons(value, options = {})
17
+ TensorStream.constant(value, options.merge(internal: true))
18
+ end
19
+
20
+ def shape_eval(input)
21
+ return [] unless input.kind_of?(Array)
22
+ arr = []
23
+ arr_ptr = input
24
+
25
+ Kernel.loop do
26
+ arr << arr_ptr.size
27
+ arr_ptr = arr_ptr[0]
28
+
29
+ break unless arr_ptr.is_a?(Array)
30
+ end
31
+
32
+ arr
33
+ end
34
+
35
+ def dtype_eval(dtype, rank, value)
36
+ dtype = Tensor.detect_type(value[0])
37
+ rank+=1 if dtype == :array
38
+
39
+ [dtype, rank, value[0], value.size]
40
+ end
41
+
42
+ def val_to_dtype(value, rank = 0)
43
+ dtype = if value.is_a?(String)
44
+ :string
45
+ elsif value.is_a?(Float)
46
+ :float32
47
+ elsif value.is_a?(Integer)
48
+ :int32
49
+ elsif value.is_a?(Array)
50
+ rank += 1
51
+ :array
52
+ else
53
+ :float32
54
+ end
55
+ dtype
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,161 @@
1
+ module TensorStream
2
+ # Class that provides auto-differentiation
3
+ class MathGradients
4
+ extend TensorStream::OpHelper
5
+
6
+ def self.derivative(tensor, dx, options = {})
7
+ gradient_program_name = "_grad_#{tensor.name}_#{dx.name}"
8
+ return options[:graph].get_node(gradient_program_name) if options[:graph] && options[:graph].node_added?(gradient_program_name)
9
+
10
+ target_shape = options[:target_shape]
11
+ constant_options = { dtype: options[:dtype] }
12
+ constant_options_1 = { dtype: options[:dtype] || tensor.data_type, shape: target_shape }
13
+
14
+ return i_cons(1, constant_options_1) if tensor.equal?(dx)
15
+ return i_cons(0, constant_options) if options[:stop_gradients] && _include?(options[:stop_gradients], tensor)
16
+
17
+ if tensor.is_a?(Operation)
18
+ grad = derivative(tensor.items[0], dx, options) if tensor.items[0]
19
+ grad2 = derivative(tensor.items[1], dx, options) if tensor.items[1]
20
+
21
+ case tensor.operation
22
+ when :max
23
+ x_mask = i_op(:where, i_op(:ones_like, tensor.items[0]), i_op(:zeros_like, tensor.items[1]), pred: tensor.items[0] > tensor.items[1])
24
+ y_mask = i_op(:where, i_op(:zeros_like, tensor.items[0]), i_op(:ones_like, tensor.items[1]), pred: tensor.items[0] < tensor.items[1])
25
+ x_mask * grad + y_mask * grad2
26
+ when :where
27
+ x_mask = i_op(:where, i_op(:ones_like, tensor.items[0]), i_op(:zeros_like, tensor.items[1]), pred: tensor.options[:pred])
28
+ y_mask = i_op(:where, i_op(:zeros_like, tensor.items[0]), i_op(:ones_like, tensor.items[1]), pred: tensor.options[:pred])
29
+ x_mask * grad + y_mask * grad2
30
+ when :cond
31
+ i_op(:cond, grad, grad2, pred: tensor.options[:pred])
32
+ when :identity, :print, :pad
33
+ grad
34
+ when :negate
35
+ i_cons(-1, constant_options_1) * grad
36
+ when :abs
37
+ grad * i_op(:sign, _ds(tensor.items[0]))
38
+ when :square
39
+ i_cons(2, constant_options_1) * _ds(tensor.items[0]) * grad
40
+ when :exp
41
+ i_op(:exp, tensor.items[0]) * grad
42
+ when :log
43
+ (i_cons(1, constant_options_1) / _ds(tensor.items[0])) * grad
44
+ when :tanh
45
+ (i_cons(1, constant_options_1) - (i_op(:tanh, _ds(tensor.items[0]))**2)) * grad
46
+ when :tan
47
+ (i_cons(1, constant_options_1) / (i_op(:cos, _ds(tensor.items[0]))**2)) * grad
48
+ when :sin
49
+ i_op(:cos, tensor.items[0]) * grad
50
+ when :sqrt
51
+ i_cons(1, constant_options_1) / (i_cons(2, constant_options_1) * i_op(:sqrt, _ds(tensor.items[0]))) * grad
52
+ when :cos
53
+ -i_op(:sin, tensor.items[0]) * grad
54
+ when :add
55
+ grad_with_broadcast(tensor, dx, ->(a,b) { i_op(:add, a, b, name: 'grad_sum') } , options)
56
+ when :sub
57
+ grad_with_broadcast(tensor, dx, ->(a,b) { i_op(:sub, a, b, name: 'grad_sub') } , options)
58
+ when :pow
59
+ gx = _ds(tensor.items[1])*( _ds(tensor.items[0])**(_ds(tensor.items[1]) - 1)) * grad
60
+
61
+ log_x = i_op(:where, i_op(:log, tensor.items[0], nil, name: 'log_pow_grad'), i_op(:zeros_like, tensor.items[0]), pred: tensor.items[0] > 0)
62
+ gy = _ds(tensor.items[0])**_ds(tensor.items[1]) * log_x * grad2
63
+
64
+ gx + gy
65
+ when :div
66
+ # apply the quotient rule
67
+ gx = i_op(:div, grad, _ds(tensor.items[1]))
68
+ gy = grad2 * i_op(:div, i_op(:div, -_ds(tensor.items[0]), _ds(tensor.items[1])), _ds(tensor.items[1]))
69
+
70
+ gx + gy
71
+ when :mul
72
+ # apply the product rule
73
+ grad * _ds(tensor.items[1]) + _ds(tensor.items[0]) * grad2
74
+ when :reduce_mean
75
+ input_size = i_op(:reduce_prod, i_op(:shape, tensor.items[0]))
76
+ output_size = i_op(:reduce_prod, i_op(:shape, tensor))
77
+ factor = input_size / output_size
78
+
79
+ (grad / i_op(:cast, factor, data_type: grad.dtype))
80
+ when :reduce_sum
81
+ grad
82
+ when :stop_gradient
83
+ return i_cons(0, constant_options)
84
+ when :matmul
85
+ tensor_shape1 = tensor.items[1].shape ? tensor.items[1].shape.shape : nil
86
+ tensor_shape0 = tensor.items[0].shape ? tensor.items[0].shape.shape : nil
87
+
88
+ derivative_a = derivative(tensor.items[0], dx, target_shape: target_shape)
89
+ derivative_b = derivative(tensor.items[1], dx, target_shape: target_shape)
90
+
91
+ s0 = i_op(:shape, tensor.items[0])
92
+ s1 = i_op(:shape, tensor.items[1])
93
+
94
+ identity_0 = i_op(:ones, [s0[0], s1[1]], nil, data_type: tensor.items[0].data_type)
95
+ identity_1 = i_op(:ones, [s0[0], s1[1]], nil, data_type: tensor.items[1].data_type)
96
+
97
+ matmul_da = i_op(:matmul, identity_0, tensor.items[1], transpose_b: true,
98
+ pad_zeros: true,
99
+ name: 'matrix_dx')
100
+ matmul_db = i_op(:matmul, tensor.items[0], identity_1, transpose_a: true,
101
+ pad_zeros: true,
102
+ name: 'matrix_dy')
103
+
104
+ zero_vect = i_op(:zeros, target_shape, nil, name: 'zero_vect')
105
+
106
+ # matmul_db = op(:transpose, matmul_db, nil).first
107
+
108
+ # begin_a = op(:zeros, op(:rank, matmul_db), nil, data_type: :int32, name: 'begin_a')
109
+ # matmul_b_shape = op(:shape, matmul_db)
110
+ # end_a = [matmul_b_shape[0], 1]
111
+
112
+ matmul_da = i_op(:cond, matmul_da[0], matmul_da, pred: op(:rank, derivative_a) > 0)
113
+
114
+ # matmul_da = op(:cond, matmul_da[0], matmul_da, pred: op(:rank, derivative_a) > 0)
115
+ norm_a = i_op(:mul, derivative_a, matmul_da, name: 'grad_a_norm_mul_da')
116
+ norm_b = i_op(:mul, derivative_b, matmul_db, name: 'grad_b_norm_mul_db')
117
+
118
+ # norm_a = i_op(:cond, norm_a[0], norm_a, pred: i_op(:rank, matmul_da) > i_op(:rank, derivative_a))
119
+ # norm_b = i_op(:cond, norm_b[0], norm_b, pred: i_op(:rank, matmul_db) > i_op(:rank, derivative_b))
120
+
121
+ i_op(:cond, norm_a, zero_vect, pred: i_op(:reduce_sum, norm_a) != 0) + i_op(:cond, norm_b, zero_vect, pred: i_op(:reduce_sum, norm_b) != 0)
122
+ else
123
+ raise "no derivative implementation found for op #{tensor.operation}"
124
+ end
125
+ elsif tensor.is_a?(TensorStream::Variable)
126
+ i_cons(0, constant_options)
127
+ elsif tensor.is_a?(TensorStream::Placeholder)
128
+ i_cons(0, constant_options)
129
+ else
130
+ i_cons(0, constant_options)
131
+ end.tap do |ops|
132
+ options[:graph].add_node!(gradient_program_name, ops) if options[:graph]
133
+ end
134
+ end
135
+
136
+ def self._ds(tensor)
137
+ return tensor unless tensor.is_a?(Operation)
138
+
139
+ case tensor.operation
140
+ when :reduce_sum
141
+ tensor.items[0]
142
+ else
143
+ tensor
144
+ end
145
+ end
146
+
147
+ def self.grad_with_broadcast(tensor, dx, func, options)
148
+ grad = derivative(tensor.items[0], dx, options)
149
+ grad2 = derivative(tensor.items[1], dx, options)
150
+ elements1 = i_op(:reduce_prod, i_op(:shape, tensor.items[0]), data_type: :float32)
151
+ elements2 = i_op(:reduce_prod, i_op(:shape, tensor.items[1]), data_type: :float32)
152
+ multiplier = elements1 / elements2
153
+ func.call(grad, grad2 * multiplier)
154
+ end
155
+
156
+ def self._include?(arr, obj)
157
+ arr.each { |a| return true if a.equal?(obj) }
158
+ false
159
+ end
160
+ end
161
+ end
File without changes
@@ -0,0 +1,17 @@
1
+ module TensorStream
2
+ # High level machine learning functions
3
+ class NN
4
+ def self.softmax(logits, options = {})
5
+ TensorStream.exp(logits) / TensorStream.reduce_sum(TensorStream.exp(logits))
6
+ end
7
+
8
+ def self.relu(features, name: nil)
9
+ TensorStream.max(features, 0, name: "relu_#{name}")
10
+ end
11
+ end
12
+
13
+ # tensorflow compatibility
14
+ def self.nn
15
+ TensorStream::NN
16
+ end
17
+ end
@@ -0,0 +1,195 @@
1
+ module TensorStream
2
+ class Operation < Tensor
3
+ attr_accessor :name, :operation, :items, :rank, :options
4
+
5
+ def initialize(operation, a, b, options = {})
6
+ @operation = operation
7
+ @rank = options[:rank] || 0
8
+ @name = options[:name] || set_name
9
+ @internal = options[:internal]
10
+ @given_name = @name
11
+ @source = set_source(caller_locations)
12
+
13
+ @graph = options[:graph] || TensorStream.get_default_graph
14
+ @options = options
15
+
16
+
17
+ @items = [a, b].map { |i| options[:preserve_params_type] ? i : auto_wrap(i) }
18
+ @data_type = set_data_type(options[:data_type])
19
+
20
+ if options[:shape]
21
+ @shape = TensorShape.new(options[:shape], options[:shape].size || 0)
22
+ end
23
+ @graph.add_node(self)
24
+ end
25
+ def to_s
26
+ @name
27
+ end
28
+
29
+ def self.reset_counters
30
+ @@op_counter = 0
31
+ end
32
+
33
+ def to_h
34
+ {
35
+ op: operation,
36
+ name: name,
37
+ operands: hashify_tensor(items)
38
+ }
39
+ end
40
+
41
+ def self.empty_matrix?(m)
42
+ if m.kind_of?(Array)
43
+ m.each do |item|
44
+ if item.kind_of?(Array)
45
+ return false if !empty_matrix?(item)
46
+ else
47
+ return false if item!=0 || item!=0.0
48
+ end
49
+ end
50
+ end
51
+
52
+ return true
53
+ end
54
+
55
+ def set_data_type(passed_data_type)
56
+ case operation
57
+ when :greater, :less, :equal
58
+ :boolean
59
+ when :shape, :rank
60
+ :int32
61
+ else
62
+ passed_data_type || (@items[0] ? @items[0].data_type : :unknown)
63
+ end
64
+ end
65
+
66
+ def to_math(name_only = false, max_depth = 99)
67
+ return @name if max_depth == 0
68
+
69
+ sub_item = auto_math(items[0], name_only, max_depth - 1)
70
+
71
+ case operation
72
+ when :argmax
73
+ "argmax(#{auto_math(items[0])},#{options[:axis]})"
74
+ when :negate
75
+ "-#{sub_item}"
76
+ when :index
77
+ "#{sub_item}[#{auto_math(items[1], name_only, max_depth - 1)}]"
78
+ when :slice
79
+ "#{sub_item}[#{auto_math(items[1], name_only, max_depth - 1)}]"
80
+ when :assign_sub
81
+ "(#{items[0] ? items[0].name : "self"} -= #{auto_math(items[1], name_only)})"
82
+ when :assign_add
83
+ "(#{items[0] ? items[0].name : "self"} += #{auto_math(items[1], name_only)})"
84
+ when :assign
85
+ "(#{items[0] ? items[0].name : "self"} = #{auto_math(items[1], name_only)})"
86
+ when :sin, :cos, :tanh
87
+ "#{operation}(#{sub_item})"
88
+ when :add
89
+ "(#{sub_item} + #{auto_math(items[1], name_only, max_depth - 1)})"
90
+ when :sub
91
+ "(#{sub_item} - #{auto_math(items[1], name_only, max_depth - 1)})"
92
+ when :pow
93
+ "(#{sub_item}^#{auto_math(items[1], name_only, max_depth - 1)})"
94
+ when :div
95
+ "(#{sub_item} / #{auto_math(items[1], name_only, max_depth - 1)})"
96
+ when :mul
97
+ if auto_math(items[0]) == 1
98
+ auto_math(items[1], name_only, max_depth - 1)
99
+ elsif auto_math(items[1]) == 1
100
+ sub_item
101
+ else
102
+ "(#{sub_item} * #{auto_math(items[1], name_only, max_depth - 1)})"
103
+ end
104
+ when :reduce_sum
105
+ "reduce_sum(|#{sub_item}|)"
106
+ when :reduce_mean
107
+ "reduce_mean(|#{sub_item}|)"
108
+ when :reduce_prod
109
+ "reduce_prod(|#{sub_item}|)"
110
+ when :gradients
111
+ "gradient(#{sub_item})"
112
+ when :stop_gradient
113
+ sub_item
114
+ when :matmul
115
+ "#{sub_item}.matmul(#{auto_math(items[1], name_only, max_depth - 1)})"
116
+ when :eye
117
+ "eye(#{sub_item})"
118
+ when :transpose
119
+ "transpose(#{sub_item})"
120
+ when :shape
121
+ "#{sub_item}.shape"
122
+ when :exp
123
+ "e^#{sub_item})"
124
+ when :ones
125
+ "ones(#{sub_item})"
126
+ when :ones_like
127
+ "ones_like(#{sub_item})"
128
+ when :flow_group
129
+ "flow_group(#{items.collect { |i| auto_math(i)}.join(',')})"
130
+ when :zeros
131
+ "zeros(#{sub_item})"
132
+ when :reshape
133
+ "reshape(#{sub_item},#{auto_math(items[1], name_only, max_depth - 1)})"
134
+ when :rank
135
+ "#{sub_item}.rank"
136
+ when :cond
137
+ "(#{auto_math(options[:pred])} ? #{sub_item} : #{auto_math(items[1], name_only, max_depth - 1)})"
138
+ when :less
139
+ "#{sub_item} < #{auto_math(items[1], name_only, max_depth - 1)}"
140
+ when :greater
141
+ "#{sub_item} > #{auto_math(items[1], name_only, max_depth - 1)}"
142
+ when :square
143
+ "#{sub_item}\u00B2"
144
+ when :log
145
+ "log(#{sub_item})"
146
+ when :identity
147
+ "identity(#{sub_item})"
148
+ when :print
149
+ "print(#{sub_item})"
150
+ when :pad
151
+ "pad(#{sub_item},#{auto_math(options[:paddings])})"
152
+ when :equal
153
+ "#{sub_item} == #{auto_math(items[1], name_only, max_depth - 1)}"
154
+ when :not_equal
155
+ "#{sub_item} != #{auto_math(items[1], name_only, max_depth - 1)}"
156
+ when :sqrt
157
+ "sqrt(#{sub_item})"
158
+ when :zeros_like
159
+ "zeros_like(#{sub_item})"
160
+ when :where
161
+ "where(#{auto_math(options[:pred] , name_only, max_depth - 1)},#{auto_math(items[0])},#{auto_math(items[1])})"
162
+ when :max
163
+ "max(#{auto_math(sub_item)},#{auto_math(items[1])})"
164
+ when :cast
165
+ "cast(#{auto_math(sub_item)}, #{data_type})"
166
+ else
167
+ fail "math form for #{operation}"
168
+ end
169
+ end
170
+
171
+ def run
172
+ self.eval
173
+ end
174
+
175
+ private
176
+
177
+ def self.operation_counter
178
+ @@op_counter ||= 0
179
+
180
+ name = if @@op_counter == 0
181
+ ""
182
+ else
183
+ "_#{@@op_counter}"
184
+ end
185
+
186
+ @@op_counter += 1
187
+
188
+ name
189
+ end
190
+
191
+ def set_name
192
+ "#{@operation}#{Operation.operation_counter}:#{@rank}"
193
+ end
194
+ end
195
+ end