tensor_stream 0.1.2 → 0.1.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: b5b9fcecbe7b27fadeaf403d886ebd0bbdaff531
4
- data.tar.gz: b33ee60d3a4fc3c182a2984b8caaac80b7559435
3
+ metadata.gz: adb3c1e45383170db2d5c9f5f3568c251cdddb64
4
+ data.tar.gz: 99cd0f23353e8f72af2bda3bb84cf4b61c615445
5
5
  SHA512:
6
- metadata.gz: f847e4d908f8c1b77f331caa45ddfec15554c44076d8e49b1d5520a61bdfe4294542c5a50433699cfa9e8e88eee3ae5780c00f02d2451282f1143fb5a92822be
7
- data.tar.gz: 0ce481db0db30fc53921f011be4d4f294b7f9b78d9fc6a91690f694a912d4d3cbd4cf452290162d8af88b4916bc0a3a7738cf58a5f2082292e833bb8325c92b4
6
+ metadata.gz: 62eb911e080ba894d6c349870d86286fecbd02f8ad0b762fe33ca311ffbd500f18f78f80520d5e875dc821b1c0f8d556f45fe913142bc02ae33d931af0fb840c
7
+ data.tar.gz: d599f839bd0d6ba706d3c7fafb3e903a17070af7dfd5dbe733ce21e2641bc4428f8e0d204f649f7f80c843aa48ff2e3c98880c36822a071279de5cd62a39b3dc
data/README.md CHANGED
@@ -6,10 +6,11 @@ The goal of this gem is to have a high performance machine learning and compute
6
6
 
7
7
  ## Features
8
8
 
9
- - Replicates most of the commonly used low-level tensorflow ops
9
+ - Replicates most of the commonly used low-level tensorflow ops (tf.add, tf.constant, tf.placeholder, tf.matmul, tf.sin etc...)
10
10
  - Supports auto-differentiation via tf.gradients (mostly)
11
11
  - Provision to use your own opcode evaluator (opencl, sciruby and tensorflow backends planned)
12
12
  - Goal is to be as close to TensorFlow in behavior but with some freedom to add ruby specific enhancements (with lots of test cases)
13
+ - eager execution (experimental)
13
14
 
14
15
  Since this is a pure ruby implementation for now, performance is not there yet. However it should be a good enough environment to learn about tensorflow and experiment with some models.
15
16
 
@@ -93,6 +94,76 @@ tf.session do |sess|
93
94
  end
94
95
  ```
95
96
 
97
+ ## python to ruby guide
98
+
99
+ Not all ops are available. Available ops are defined in lib/tensor_stream/ops.rb, corresponding gradients are found at lib/tensor_stream/math_gradients.
100
+
101
+ There are also certain differences with regards to naming conventions, and named parameters:
102
+
103
+ # Variables
104
+
105
+ To make referencing python examples easier it is recommended to use "tf" as the TensorStream namespace
106
+
107
+ At the beginning
108
+ ```ruby
109
+ tf = TensorStream # recommended to use tf since most sample models on the net use this
110
+ ts = TensorStream # use this if you plan to use TensorStream only features, so other devs will know about that
111
+ ```
112
+
113
+ Note the difference in named and optional parameters
114
+
115
+ Python
116
+
117
+ ```python
118
+ w = tf.Variable(0, name='weights')
119
+ w = tf.Variable(0, 'weights')
120
+ ```
121
+
122
+ Ruby
123
+
124
+ ```ruby
125
+ w =tf.variable(0, name: 'weights')
126
+ ```
127
+
128
+ # Shapes
129
+
130
+ Python
131
+ ```python
132
+ x = tf.placeholder(tf.float32, shape=(1024, 1024))
133
+ x = tf.placeholder(tf.float32, shape=(None, 1024))
134
+ ```
135
+
136
+ ruby supports symbols for specifying data types, nil can be used for None
137
+
138
+ Ruby
139
+ ```ruby
140
+ x = tf.placeholder(:float32, shape: [1024, 1024])
141
+ x = tf.placeholder(:float32, shape: [nil, 1024])
142
+ ```
143
+
144
+ For debugging, each operation or tensor supports the to_math method
145
+
146
+ ```ruby
147
+ X = tf.placeholder("float")
148
+ Y = tf.placeholder("float")
149
+ W = tf.variable(rand, name: "weight")
150
+ b = tf.variable(rand, name: "bias")
151
+ pred = X * W + b
152
+ cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / ( 2 * 10)
153
+ cost.to_math # "(reduce_sum(|((((Placeholder: * weight) + bias) - Placeholder_2:)^2)|) / 10.0)"
154
+ ```
155
+
156
+ breakpoints can also be set, block will be evaluated during computation
157
+
158
+ ```ruby
159
+ a = tf.constant([2,2])
160
+ b = tf.constant([3,3])
161
+
162
+ f = tf.matmul(a, b).breakpoint! { |tensor, a, b, result_value| binding.pry }
163
+
164
+ tf.session.run(f)
165
+ ```
166
+
96
167
  ## Roadmap
97
168
 
98
169
  - Docs
@@ -175,8 +175,7 @@ module TensorStream
175
175
 
176
176
  generate_vector(tensor.options[:shape], generator: generator)
177
177
  when :flow_group
178
- threads = tensor.items.collect { |item| Concurrent::Future.execute(executor: @thread_pool) { run(item, child_context) } }
179
- threads.collect(&:value)
178
+ tensor.items.collect { |item| run(item, child_context) }
180
179
  when :assign
181
180
  assign = tensor.items[0] || tensor
182
181
  assign.value = complete_eval(tensor.items[1], child_context)
@@ -634,7 +633,16 @@ module TensorStream
634
633
  raise FullEvalNotPossible.new, "full eval not possible for #{eval_vector.name}" if eval_vector.is_a?(Tensor) || constant.is_a?(Tensor)
635
634
 
636
635
  eval_vector.each_with_index.collect do |item, index|
637
- c = constant.is_a?(Array) ? constant[index] : constant
636
+ c = if constant.is_a?(Array)
637
+ if index < constant.size
638
+ constant[index]
639
+ else
640
+ raise "incompatible tensor shapes used during op" if constant.size != 1
641
+ constant[0]
642
+ end
643
+ else
644
+ constant
645
+ end
638
646
  if item.is_a?(Array)
639
647
  constant_op(item, c, child_context, op, switch)
640
648
  elsif item.respond_to?(:value)
@@ -51,9 +51,9 @@ module TensorStream
51
51
  when :cos
52
52
  -i_op(:sin, tensor.items[0]) * grad
53
53
  when :add
54
- grad_with_broadcast(tensor, wrt_dx, ->(a, b) { i_op(:add, a, b, name: 'grad_sum') }, options)
54
+ _grad_with_broadcast(tensor, wrt_dx, ->(a, b) { i_op(:add, a, b, name: 'grad_sum') }, options)
55
55
  when :sub
56
- grad_with_broadcast(tensor, wrt_dx, ->(a, b) { i_op(:sub, a, b, name: 'grad_sub') }, options)
56
+ _grad_with_broadcast(tensor, wrt_dx, ->(a, b) { i_op(:sub, a, b, name: 'grad_sub') }, options)
57
57
  when :pow
58
58
  gx = _ds(tensor.items[1]) * (_ds(tensor.items[0])**(_ds(tensor.items[1]) - 1)) * grad
59
59
 
@@ -66,10 +66,10 @@ module TensorStream
66
66
  gx = i_op(:div, grad, _ds(tensor.items[1]))
67
67
  gy = grad2 * i_op(:div, i_op(:div, -_ds(tensor.items[0]), _ds(tensor.items[1])), _ds(tensor.items[1]))
68
68
 
69
- gx + gy
69
+ _reduce_when_necessary(gx + gy, wrt_dx)
70
70
  when :mul
71
71
  # apply the product rule
72
- grad * _ds(tensor.items[1]) + _ds(tensor.items[0]) * grad2
72
+ _reduce_when_necessary(grad * _ds(tensor.items[1]) + _ds(tensor.items[0]) * grad2, wrt_dx)
73
73
  when :reduce_mean
74
74
  input_size = i_op(:reduce_prod, i_op(:shape, tensor.items[0]))
75
75
  output_size = i_op(:reduce_prod, i_op(:shape, tensor))
@@ -115,6 +115,7 @@ module TensorStream
115
115
  # norm_b = i_op(:cond, norm_b[0], norm_b, pred: i_op(:rank, matmul_db) > i_op(:rank, derivative_b))
116
116
 
117
117
  i_op(:cond, norm_a, zero_vect, pred: i_op(:reduce_sum, norm_a) != 0) + i_op(:cond, norm_b, zero_vect, pred: i_op(:reduce_sum, norm_b) != 0)
118
+ # m.breakpoint! { |t, a, b, v| binding.pry }
118
119
  else
119
120
  raise "no derivative implementation found for op #{tensor.operation}"
120
121
  end
@@ -140,18 +141,25 @@ module TensorStream
140
141
  end
141
142
  end
142
143
 
143
- def self.grad_with_broadcast(tensor, wrt_dx, func, options)
144
+ def self._grad_with_broadcast(tensor, wrt_dx, func, options)
144
145
  grad = derivative(tensor.items[0], wrt_dx, options)
145
146
  grad2 = derivative(tensor.items[1], wrt_dx, options)
146
147
  elements1 = i_op(:reduce_prod, i_op(:shape, tensor.items[0]), data_type: :float32)
147
148
  elements2 = i_op(:reduce_prod, i_op(:shape, tensor.items[1]), data_type: :float32)
148
149
  multiplier = elements1 / elements2
149
- func.call(grad, grad2 * multiplier)
150
+ _reduce_when_necessary(func.call(grad, grad2 * multiplier), wrt_dx)
150
151
  end
151
152
 
152
153
  def self._include?(arr, obj)
153
154
  arr.each { |a| return true if a.equal?(obj) }
154
155
  false
155
156
  end
157
+
158
+ def self._reduce_when_necessary(tensor, wrt_dx)
159
+ rank = op(:rank, tensor)
160
+ dx_rank = op(:rank, wrt_dx)
161
+ reduced = op(:reduce_sum, tensor, nil, axis: 0)
162
+ op(:cond, ->{ reduced }, tensor, pred: rank > dx_rank)
163
+ end
156
164
  end
157
165
  end
@@ -1,5 +1,5 @@
1
1
  module TensorStream
2
- VERSION = '0.1.2'.freeze
2
+ VERSION = '0.1.3'.freeze
3
3
 
4
4
  def self.version
5
5
  VERSION
@@ -83,7 +83,7 @@ biases = {
83
83
  # Create model
84
84
  def neural_net(x, weights, biases)
85
85
  # Hidden fully connected layer with 256 neurons
86
- layer_1 = TensorStream.add(TensorStream.matmul(x, weights[:h1]), biases[:b1] , name: 'layer1_add')
86
+ layer_1 = TensorStream.add(TensorStream.matmul(x, weights[:h1]), biases[:b1], name: 'layer1_add')
87
87
  # Hidden fully connected layer with 256 neurons
88
88
  layer_2 = TensorStream.add(TensorStream.matmul(layer_1, weights[:h2]), biases[:b2], name: 'layer2_add')
89
89
  # Output fully connected layer with a neuron for each class
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: tensor_stream
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.2
4
+ version: 0.1.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Joseph Emmanuel Dayo
@@ -193,7 +193,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
193
193
  version: '0'
194
194
  requirements: []
195
195
  rubyforge_project:
196
- rubygems_version: 2.6.11
196
+ rubygems_version: 2.6.8
197
197
  signing_key:
198
198
  specification_version: 4
199
199
  summary: A Pure ruby tensorflow implementation