tensor_stream 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -0
  3. data/.rubocop.yml +1 -0
  4. data/Gemfile +1 -1
  5. data/LICENSE.txt +1 -1
  6. data/README.md +34 -34
  7. data/Rakefile +3 -3
  8. data/USAGE_GUIDE.md +235 -0
  9. data/bin/stubgen +20 -0
  10. data/exe/model_utils +2 -2
  11. data/lib/tensor_stream.rb +45 -44
  12. data/lib/tensor_stream/constant.rb +2 -2
  13. data/lib/tensor_stream/control_flow.rb +1 -1
  14. data/lib/tensor_stream/debugging/debugging.rb +2 -2
  15. data/lib/tensor_stream/dynamic_stitch.rb +2 -2
  16. data/lib/tensor_stream/evaluator/base_evaluator.rb +18 -18
  17. data/lib/tensor_stream/evaluator/buffer.rb +1 -1
  18. data/lib/tensor_stream/evaluator/evaluator.rb +2 -2
  19. data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +41 -41
  20. data/lib/tensor_stream/evaluator/operation_helpers/math_helper.rb +1 -1
  21. data/lib/tensor_stream/evaluator/ruby/array_ops.rb +39 -39
  22. data/lib/tensor_stream/evaluator/ruby/check_ops.rb +2 -2
  23. data/lib/tensor_stream/evaluator/ruby/images_ops.rb +18 -18
  24. data/lib/tensor_stream/evaluator/ruby/math_ops.rb +13 -14
  25. data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +33 -36
  26. data/lib/tensor_stream/evaluator/ruby/random_ops.rb +20 -21
  27. data/lib/tensor_stream/evaluator/ruby_evaluator.rb +36 -49
  28. data/lib/tensor_stream/exceptions.rb +1 -1
  29. data/lib/tensor_stream/generated_stub/ops.rb +691 -0
  30. data/lib/tensor_stream/generated_stub/stub_file.erb +24 -0
  31. data/lib/tensor_stream/graph.rb +18 -18
  32. data/lib/tensor_stream/graph_builder.rb +17 -17
  33. data/lib/tensor_stream/graph_deserializers/protobuf.rb +97 -97
  34. data/lib/tensor_stream/graph_deserializers/yaml_loader.rb +1 -1
  35. data/lib/tensor_stream/graph_keys.rb +3 -3
  36. data/lib/tensor_stream/graph_serializers/graphml.rb +33 -33
  37. data/lib/tensor_stream/graph_serializers/packer.rb +23 -23
  38. data/lib/tensor_stream/graph_serializers/pbtext.rb +38 -42
  39. data/lib/tensor_stream/graph_serializers/serializer.rb +3 -2
  40. data/lib/tensor_stream/graph_serializers/yaml.rb +5 -5
  41. data/lib/tensor_stream/helpers/infer_shape.rb +56 -56
  42. data/lib/tensor_stream/helpers/op_helper.rb +8 -9
  43. data/lib/tensor_stream/helpers/string_helper.rb +15 -15
  44. data/lib/tensor_stream/helpers/tensor_mixins.rb +17 -17
  45. data/lib/tensor_stream/images.rb +1 -1
  46. data/lib/tensor_stream/initializer.rb +1 -1
  47. data/lib/tensor_stream/math_gradients.rb +28 -187
  48. data/lib/tensor_stream/monkey_patches/array.rb +1 -1
  49. data/lib/tensor_stream/monkey_patches/float.rb +1 -1
  50. data/lib/tensor_stream/monkey_patches/integer.rb +1 -1
  51. data/lib/tensor_stream/monkey_patches/op_patch.rb +5 -5
  52. data/lib/tensor_stream/monkey_patches/patch.rb +1 -1
  53. data/lib/tensor_stream/nn/nn_ops.rb +17 -15
  54. data/lib/tensor_stream/op_maker.rb +180 -0
  55. data/lib/tensor_stream/operation.rb +17 -17
  56. data/lib/tensor_stream/ops.rb +95 -384
  57. data/lib/tensor_stream/ops/add.rb +23 -0
  58. data/lib/tensor_stream/ops/argmax.rb +14 -0
  59. data/lib/tensor_stream/ops/argmin.rb +14 -0
  60. data/lib/tensor_stream/ops/case.rb +17 -0
  61. data/lib/tensor_stream/ops/cast.rb +15 -0
  62. data/lib/tensor_stream/ops/ceil.rb +15 -0
  63. data/lib/tensor_stream/ops/const.rb +0 -0
  64. data/lib/tensor_stream/ops/cos.rb +10 -0
  65. data/lib/tensor_stream/ops/div.rb +21 -0
  66. data/lib/tensor_stream/ops/equal.rb +15 -0
  67. data/lib/tensor_stream/ops/expand_dims.rb +17 -0
  68. data/lib/tensor_stream/ops/fill.rb +19 -0
  69. data/lib/tensor_stream/ops/floor.rb +15 -0
  70. data/lib/tensor_stream/ops/floor_div.rb +15 -0
  71. data/lib/tensor_stream/ops/greater.rb +11 -0
  72. data/lib/tensor_stream/ops/greater_equal.rb +11 -0
  73. data/lib/tensor_stream/ops/less_equal.rb +15 -0
  74. data/lib/tensor_stream/ops/log.rb +14 -0
  75. data/lib/tensor_stream/ops/mat_mul.rb +60 -0
  76. data/lib/tensor_stream/ops/max.rb +15 -0
  77. data/lib/tensor_stream/ops/min.rb +15 -0
  78. data/lib/tensor_stream/ops/mod.rb +23 -0
  79. data/lib/tensor_stream/ops/mul.rb +21 -0
  80. data/lib/tensor_stream/ops/negate.rb +14 -0
  81. data/lib/tensor_stream/ops/ones_like.rb +19 -0
  82. data/lib/tensor_stream/ops/pow.rb +25 -0
  83. data/lib/tensor_stream/ops/prod.rb +60 -0
  84. data/lib/tensor_stream/ops/random_uniform.rb +18 -0
  85. data/lib/tensor_stream/ops/range.rb +20 -0
  86. data/lib/tensor_stream/ops/rank.rb +13 -0
  87. data/lib/tensor_stream/ops/reshape.rb +24 -0
  88. data/lib/tensor_stream/ops/round.rb +15 -0
  89. data/lib/tensor_stream/ops/shape.rb +14 -0
  90. data/lib/tensor_stream/ops/sigmoid.rb +10 -0
  91. data/lib/tensor_stream/ops/sign.rb +12 -0
  92. data/lib/tensor_stream/ops/sin.rb +10 -0
  93. data/lib/tensor_stream/ops/size.rb +16 -0
  94. data/lib/tensor_stream/ops/sub.rb +24 -0
  95. data/lib/tensor_stream/ops/sum.rb +27 -0
  96. data/lib/tensor_stream/ops/tan.rb +12 -0
  97. data/lib/tensor_stream/ops/tanh.rb +10 -0
  98. data/lib/tensor_stream/ops/tile.rb +19 -0
  99. data/lib/tensor_stream/ops/zeros.rb +15 -0
  100. data/lib/tensor_stream/placeholder.rb +2 -2
  101. data/lib/tensor_stream/profile/report_tool.rb +3 -3
  102. data/lib/tensor_stream/session.rb +36 -38
  103. data/lib/tensor_stream/tensor.rb +2 -2
  104. data/lib/tensor_stream/tensor_shape.rb +4 -4
  105. data/lib/tensor_stream/train/adadelta_optimizer.rb +8 -8
  106. data/lib/tensor_stream/train/adagrad_optimizer.rb +3 -3
  107. data/lib/tensor_stream/train/adam_optimizer.rb +11 -11
  108. data/lib/tensor_stream/train/learning_rate_decay.rb +2 -2
  109. data/lib/tensor_stream/train/momentum_optimizer.rb +7 -7
  110. data/lib/tensor_stream/train/optimizer.rb +9 -9
  111. data/lib/tensor_stream/train/rmsprop_optimizer.rb +16 -16
  112. data/lib/tensor_stream/train/saver.rb +14 -14
  113. data/lib/tensor_stream/train/slot_creator.rb +6 -6
  114. data/lib/tensor_stream/train/utils.rb +12 -12
  115. data/lib/tensor_stream/trainer.rb +10 -10
  116. data/lib/tensor_stream/types.rb +1 -1
  117. data/lib/tensor_stream/utils.rb +33 -32
  118. data/lib/tensor_stream/utils/freezer.rb +5 -5
  119. data/lib/tensor_stream/variable.rb +5 -5
  120. data/lib/tensor_stream/variable_scope.rb +1 -1
  121. data/lib/tensor_stream/version.rb +1 -1
  122. data/samples/{iris.data → datasets/iris.data} +0 -0
  123. data/samples/jupyter_notebooks/linear_regression.ipynb +463 -0
  124. data/samples/{iris.rb → neural_networks/iris.rb} +21 -23
  125. data/samples/{mnist_data.rb → neural_networks/mnist_data.rb} +8 -8
  126. data/samples/neural_networks/raw_neural_net_sample.rb +112 -0
  127. data/samples/{rnn.rb → neural_networks/rnn.rb} +28 -31
  128. data/samples/{nearest_neighbor.rb → others/nearest_neighbor.rb} +12 -12
  129. data/samples/regression/linear_regression.rb +63 -0
  130. data/samples/{logistic_regression.rb → regression/logistic_regression.rb} +14 -16
  131. data/tensor_stream.gemspec +9 -8
  132. metadata +89 -19
  133. data/data_1.json +0 -4764
  134. data/data_2.json +0 -4764
  135. data/data_actual.json +0 -28
  136. data/data_expected.json +0 -28
  137. data/data_input.json +0 -28
  138. data/samples/error.graphml +0 -2755
  139. data/samples/gradient_sample.graphml +0 -1255
  140. data/samples/linear_regression.rb +0 -69
  141. data/samples/multigpu.rb +0 -73
  142. data/samples/raw_neural_net_sample.rb +0 -112
@@ -1,6 +1,6 @@
1
1
  module TensorStream
2
2
  module CheckOps
3
- def CheckOps.included(klass)
3
+ def self.included(klass)
4
4
  klass.class_eval do
5
5
  register_op :assert_equal do |context, tensor, inputs|
6
6
  result = call_vector_op(tensor, :equal, inputs[0], inputs[1], context) { |t, u| t == u }
@@ -14,4 +14,4 @@ module TensorStream
14
14
  end
15
15
  end
16
16
  end
17
- end
17
+ end
@@ -1,8 +1,8 @@
1
- require 'chunky_png'
1
+ require "chunky_png"
2
2
 
3
3
  module TensorStream
4
4
  module ImagesOps
5
- def ImagesOps.included(klass)
5
+ def self.included(klass)
6
6
  klass.class_eval do
7
7
  register_op :decode_png do |_context, tensor, inputs|
8
8
  content = inputs[0]
@@ -26,26 +26,26 @@ module TensorStream
26
26
  end
27
27
  end
28
28
 
29
- image_data = image.pixels.collect do |pixel|
29
+ image_data = image.pixels.collect { |pixel|
30
30
  color_values = if channels == 4
31
- [ChunkyPNG::Color.r(pixel),
32
- ChunkyPNG::Color.g(pixel),
33
- ChunkyPNG::Color.b(pixel),
34
- ChunkyPNG::Color.a(pixel)]
35
- elsif channels == 3
36
- [ChunkyPNG::Color.r(pixel),
37
- ChunkyPNG::Color.g(pixel),
38
- ChunkyPNG::Color.b(pixel)]
39
- elsif channels == 1
40
- [ChunkyPNG::Color.r(pixel)]
41
- else
42
- raise "Invalid channel value #{channels}"
43
- end
31
+ [ChunkyPNG::Color.r(pixel),
32
+ ChunkyPNG::Color.g(pixel),
33
+ ChunkyPNG::Color.b(pixel),
34
+ ChunkyPNG::Color.a(pixel),]
35
+ elsif channels == 3
36
+ [ChunkyPNG::Color.r(pixel),
37
+ ChunkyPNG::Color.g(pixel),
38
+ ChunkyPNG::Color.b(pixel),]
39
+ elsif channels == 1
40
+ [ChunkyPNG::Color.r(pixel)]
41
+ else
42
+ raise "Invalid channel value #{channels}"
43
+ end
44
44
 
45
45
  color_values.map!(&:to_f) if fp_type?(tensor.data_type)
46
46
 
47
47
  color_values
48
- end
48
+ }
49
49
  TensorShape.reshape(image_data, [image.height, image.width, channels])
50
50
  end
51
51
 
@@ -85,4 +85,4 @@ module TensorStream
85
85
  end
86
86
  end
87
87
  end
88
- end
88
+ end
@@ -1,13 +1,13 @@
1
1
  module TensorStream
2
2
  module MathOps
3
- def MathOps.included(klass)
3
+ def self.included(klass)
4
4
  klass.class_eval do
5
5
  register_op :tanh, no_eval: true do |context, _tensor, inputs|
6
6
  call_op(inputs[0], context) { |t, _b| Math.tanh(t) }
7
7
  end
8
8
 
9
9
  register_op :tan, no_eval: true do |context, tensor, inputs|
10
- call_op(inputs[0], context) { |t, _b| Math.tan(t) }
10
+ call_op(inputs[0], context) { |t, _b| Math.tan(t) }
11
11
  end
12
12
 
13
13
  register_op :atan, no_eval: true do |context, _tensor, inputs|
@@ -20,7 +20,7 @@ module TensorStream
20
20
 
21
21
  register_op :add, no_eval: true do |context, tensor, inputs|
22
22
  a, b = inputs
23
- call_vector_op(tensor, :add, a, b, context) { |t, u| t + u }
23
+ call_vector_op(tensor, :add, a, b, context) { |t, u| t + u }
24
24
  end
25
25
 
26
26
  register_op :add_n, no_eval: true do |context, tensor, inputs|
@@ -58,7 +58,7 @@ module TensorStream
58
58
 
59
59
  register_op :mul, no_eval: true do |context, tensor, inputs|
60
60
  a, b = inputs
61
- call_vector_op(tensor, :mul, a, b, context) { |t, u| t * u }
61
+ call_vector_op(tensor, :mul, a, b, context) { |t, u| t * u }
62
62
  end
63
63
 
64
64
  register_op :pow, no_eval: true do |context, tensor, inputs|
@@ -141,13 +141,13 @@ module TensorStream
141
141
  raise TensorStream::InvalidArgumentError, "Expected dimension in the range [#{-rank},#{rank}) but got #{axis}" if axis < -rank || axis >= rank
142
142
 
143
143
  new_shape = shape_eval(inputs[0])
144
- ns = new_shape.each_with_index.collect do |shape, index|
144
+ ns = new_shape.each_with_index.collect { |shape, index|
145
145
  next nil if index == axis
146
146
 
147
147
  shape
148
- end.compact
148
+ }.compact
149
149
 
150
- Tensor.cast_dtype(TensorShape.reshape(get_op_with_axis(inputs[0], axis, 0, :max), ns), tensor.data_type)
150
+ Tensor.cast_dtype(TensorShape.reshape(get_op_with_axis(inputs[0], axis, 0, :max), ns), tensor.options[:output_type])
151
151
  end
152
152
 
153
153
  register_op(%i[argmin arg_min]) do |_context, tensor, inputs|
@@ -156,13 +156,13 @@ module TensorStream
156
156
  raise TensorStream::InvalidArgumentError, "Expected dimension in the range [#{-rank},#{rank}) but got #{axis}" if axis < -rank || axis >= rank
157
157
 
158
158
  new_shape = shape_eval(inputs[0])
159
- ns = new_shape.each_with_index.collect do |shape, index|
159
+ ns = new_shape.each_with_index.collect { |shape, index|
160
160
  next nil if index == axis
161
161
 
162
162
  shape
163
- end.compact
163
+ }.compact
164
164
 
165
- Tensor.cast_dtype(TensorShape.reshape(get_op_with_axis(inputs[0], axis, 0, :min), ns), tensor.data_type)
165
+ Tensor.cast_dtype(TensorShape.reshape(get_op_with_axis(inputs[0], axis, 0, :min), ns), tensor.options[:output_type])
166
166
  end
167
167
 
168
168
  register_op :cumprod do |context, tensor, inputs|
@@ -179,9 +179,9 @@ module TensorStream
179
179
  arr = [1] + arr if exclusive
180
180
 
181
181
  start_prod = arr[0]
182
- mapped = arr[1...count].map do |v|
182
+ mapped = arr[1...count].map { |v|
183
183
  start_prod = vector_op(start_prod, v) { |a, b| a * b }
184
- end
184
+ }
185
185
 
186
186
  arr = [arr[0]] + mapped
187
187
  reverse_option ? arr.reverse : arr
@@ -190,7 +190,6 @@ module TensorStream
190
190
  end
191
191
 
192
192
  register_op :sum, noop: true do |context, tensor, _inputs|
193
-
194
193
  reduction(context, tensor) do |arr|
195
194
  reduced_val = arr[0]
196
195
  arr[1..arr.size].each do |v|
@@ -269,4 +268,4 @@ module TensorStream
269
268
  end
270
269
  end
271
270
  end
272
- end
271
+ end
@@ -1,7 +1,7 @@
1
1
  module TensorStream
2
2
  ## Collection of machine learning related ops
3
3
  module NNOps
4
- def NNOps.included(klass)
4
+ def self.included(klass)
5
5
  klass.class_eval do
6
6
  register_op :apply_gradient_descent do |context, tensor, inputs|
7
7
  target_var, learning_rate, delta = inputs
@@ -17,10 +17,10 @@ module TensorStream
17
17
  assign_acc = tensor.inputs[1]
18
18
  assign_acc.container = multi_array_op(->(t, u) { t * momentum + u }, momentum_var, grad)
19
19
  assign.container = if tensor.options[:use_nesterov]
20
- multi_array_op(->(v, g, acc) { v - (g * learning_rate + acc * momentum * learning_rate) }, target_var, grad, momentum_var)
21
- else
22
- multi_array_op(->(v, acc) { v - acc * learning_rate }, target_var, momentum_var)
23
- end
20
+ multi_array_op(->(v, g, acc) { v - (g * learning_rate + acc * momentum * learning_rate) }, target_var, grad, momentum_var)
21
+ else
22
+ multi_array_op(->(v, acc) { v - acc * learning_rate }, target_var, momentum_var)
23
+ end
24
24
 
25
25
  assign.container
26
26
  end
@@ -52,9 +52,9 @@ module TensorStream
52
52
  assign_m = tensor.inputs[1]
53
53
  assign_v = tensor.inputs[2]
54
54
 
55
- assign_m.container = multi_array_op(->(u_d , g) { u_d + (g - u_d) * (1.0 - beta1_t) }, m, grad)
56
- assign_v.container = multi_array_op(->(u_d , v_d) { u_d + (v_d**2 - u_d) * (1.0 - beta2_t)}, v, grad)
57
- assign.container = multi_array_op(->(t, m_d , v_d) { t - ((m_d * alpha) / (Math.sqrt(v_d) + epsilon_t)) }, target_var, assign_m.container, assign_v.container)
55
+ assign_m.container = multi_array_op(->(u_d, g) { u_d + (g - u_d) * (1.0 - beta1_t) }, m, grad)
56
+ assign_v.container = multi_array_op(->(u_d, v_d) { u_d + (v_d**2 - u_d) * (1.0 - beta2_t)}, v, grad)
57
+ assign.container = multi_array_op(->(t, m_d, v_d) { t - ((m_d * alpha) / (Math.sqrt(v_d) + epsilon_t)) }, target_var, assign_m.container, assign_v.container)
58
58
  assign.container
59
59
  end
60
60
 
@@ -123,11 +123,11 @@ module TensorStream
123
123
  labels = last_axis(inputs[1])
124
124
  num_classes = input_shape.last
125
125
 
126
- labels = labels.map do |l|
126
+ labels = labels.map { |l|
127
127
  one_hot = Array.new(num_classes) { 0 }
128
128
  one_hot[l] = 1
129
129
  one_hot
130
- end
130
+ }
131
131
 
132
132
  func = lambda { |logits, label|
133
133
  c = logits.max
@@ -173,9 +173,9 @@ module TensorStream
173
173
  if input_shape.size == 1
174
174
  func.call(last_dimen_list)
175
175
  else
176
- arr = last_dimen_list.collect do |list|
176
+ arr = last_dimen_list.collect { |list|
177
177
  func.call(list)
178
- end
178
+ }
179
179
  TensorShape.reshape(arr, input_shape)
180
180
  end
181
181
  end
@@ -202,9 +202,9 @@ module TensorStream
202
202
  if input_shape.size == 1
203
203
  func.call(last_dimen_list, last_grad_list)
204
204
  else
205
- arr = last_dimen_list.zip(last_grad_list).collect do |list, last_grad|
205
+ arr = last_dimen_list.zip(last_grad_list).collect { |list, last_grad|
206
206
  func.call(list, last_grad)
207
- end
207
+ }
208
208
  TensorShape.reshape(arr, input_shape)
209
209
  end
210
210
  end
@@ -226,32 +226,31 @@ module TensorStream
226
226
 
227
227
  _batch, height, width, _channels = shape_eval(inputs[0])
228
228
  padding = conv2d_padding_options(padding_option, filter_shape, height, width, height_stride, width_stride)
229
- inputs[0].collect do |image|
229
+ inputs[0].collect { |image|
230
230
  f_height, f_width, _input_channels, _output_channels = filter_shape
231
- (-padding[0]...height).step(height_stride).map do |y|
231
+ (-padding[0]...height).step(height_stride).map { |y|
232
232
  next if (y + f_height) > (height + padding[2])
233
233
 
234
- (-padding[1]...width).step(width_stride).map do |x|
234
+ (-padding[1]...width).step(width_stride).map { |x|
235
235
  next if (x + f_width) > (width + padding[3])
236
236
 
237
- filter_result = (0...f_height).map do |f_y|
238
- (0...f_width).map do |f_x|
237
+ filter_result = (0...f_height).map { |f_y|
238
+ (0...f_width).map { |f_x|
239
239
  f_element = filter[f_y][f_x]
240
240
 
241
241
  next if (x + f_x >= width) || (x + f_x < 0)
242
242
  next if (y + f_y >= height) || (y + f_y < 0)
243
243
 
244
-
245
244
  image[y + f_y][x + f_x].zip(f_element).map do |image_channel, filter_channels|
246
245
  filter_channels.map { |c| image_channel * c }
247
246
  end
248
- end.compact
249
- end.flatten(2)
247
+ }.compact
248
+ }.flatten(2)
250
249
 
251
250
  filter_result.transpose.map { |e| e.reduce(:+) }
252
- end.compact
253
- end.compact
254
- end.compact
251
+ }.compact
252
+ }.compact
253
+ }.compact
255
254
  end
256
255
 
257
256
  register_op :conv2d_backprop_input do |_context, tensor, inputs|
@@ -285,15 +284,14 @@ module TensorStream
285
284
  img_grad = grad[b][(y + padding[0]) / height_stride][(x + padding[1]) / width_stride]
286
285
 
287
286
  channels.times.each do |c|
288
- g = Array.new(output_channels) do |o_c|
287
+ g = Array.new(output_channels) { |o_c|
289
288
  filter[f_y][f_x][c][o_c] * img_grad[o_c]
290
- end.reduce(:+)
289
+ }.reduce(:+)
291
290
 
292
291
  image_gradient[y + f_y][x + f_x][c] += g
293
292
  end
294
293
  end
295
294
  end
296
-
297
295
  end
298
296
  end
299
297
 
@@ -319,19 +317,19 @@ module TensorStream
319
317
 
320
318
  ((0 - padding[0])...height).step(height_stride).each do |y|
321
319
  ((0 - padding[1])...width).step(width_stride).each do |x|
322
- filter_result = (0...f_height).map do |f_y|
320
+ filter_result = (0...f_height).map { |f_y|
323
321
  (0...f_width).map do |f_x|
324
322
  next Array.new(input_channels * output_channels) { 0.0 } if x + f_x >= width || (x + f_x < 0) || ((x + f_width) > (width + padding[3]))
325
323
  next Array.new(input_channels * output_channels) { 0.0 } if y + f_y >= height || (y + f_y < 0) || ((y + f_height) > (height + padding[2]))
326
324
 
327
- image_grad = grad[index][(y + padding[0]) / height_stride][(x + padding[1])/ width_stride]
325
+ image_grad = grad[index][(y + padding[0]) / height_stride][(x + padding[1]) / width_stride]
328
326
  image[y + f_y][x + f_x].map do |image_channel|
329
327
  Array.new(output_channels) do |o_c|
330
328
  image_channel * image_grad[o_c]
331
329
  end
332
330
  end
333
331
  end
334
- end.flatten
332
+ }.flatten
335
333
 
336
334
  filter_gradient_sum = multi_array_op(->(a, b) { a + b }, filter_gradient_sum, filter_result)
337
335
  end
@@ -341,17 +339,16 @@ module TensorStream
341
339
  TensorShape.reshape(filter_gradient_sum, filter_shape)
342
340
  end
343
341
 
344
-
345
342
  def conv2d_padding_options(padding_option, filter_shape, height, width, h_stride, w_stride)
346
343
  case padding_option
347
- when 'SAME'
344
+ when "SAME"
348
345
  [
349
346
  calc_pad(height, h_stride, filter_shape[0]),
350
347
  calc_pad(width, w_stride, filter_shape[1]),
351
348
  calc_pad(height, h_stride, filter_shape[0], true),
352
- calc_pad(width, w_stride, filter_shape[1], true)
349
+ calc_pad(width, w_stride, filter_shape[1], true),
353
350
  ]
354
- when 'VALID'
351
+ when "VALID"
355
352
  [0, 0, 0, 0]
356
353
  else
357
354
  raise TensorStream::ValueError, "Unsupported padding value #{padding_option}, valid values 'SAME', 'VALID'"
@@ -369,4 +366,4 @@ module TensorStream
369
366
  end
370
367
  end
371
368
  end
372
- end
369
+ end
@@ -1,7 +1,7 @@
1
1
  module TensorStream
2
2
  ## Collection of machine learning related ops
3
3
  module RandomOps
4
- def RandomOps.included(klass)
4
+ def self.included(klass)
5
5
  klass.class_eval do
6
6
  register_op :glorot_uniform, no_eval: true do |_context, tensor, _inputs|
7
7
  seed = tensor.options[:seed]
@@ -9,12 +9,12 @@ module TensorStream
9
9
 
10
10
  shape = tensor.options[:shape] || tensor.shape.shape
11
11
  fan_in, fan_out = if shape.size.zero?
12
- [1, 1]
13
- elsif shape.size == 1
14
- [1, shape[0]]
15
- else
16
- [shape[0], shape.last]
17
- end
12
+ [1, 1]
13
+ elsif shape.size == 1
14
+ [1, shape[0]]
15
+ else
16
+ [shape[0], shape.last]
17
+ end
18
18
 
19
19
  limit = Math.sqrt(6.0 / (fan_in + fan_out))
20
20
 
@@ -46,7 +46,6 @@ module TensorStream
46
46
  generate_vector(shape, generator: generator)
47
47
  end
48
48
 
49
-
50
49
  register_op :truncated_normal, no_eval: true do |_context, tensor, inputs|
51
50
  seed = tensor.options[:seed]
52
51
  random = _get_randomizer(tensor, seed)
@@ -54,14 +53,14 @@ module TensorStream
54
53
  random = _get_randomizer(tensor, seed)
55
54
  generator = -> { r.rand }
56
55
  shape = inputs[0] || tensor.shape.shape
57
- random_values = Array.new(shape.reduce(:*) || 1) do
56
+ random_values = Array.new(shape.reduce(:*) || 1) {
58
57
  generator.call
59
- end
58
+ }
60
59
  mean = random_values.reduce(:+) / random_values.size
61
60
 
62
61
  # standard deviation
63
62
 
64
- stddev = Math.sqrt( random_values.map { |v| ( v - mean )**2 }.reduce(:+) / (random_values.size - 1) )
63
+ stddev = Math.sqrt(random_values.map { |v| (v - mean)**2 }.reduce(:+) / (random_values.size - 1))
65
64
  minval = random_values.min
66
65
  maxval = random_values.max
67
66
  max_iterations = 100
@@ -73,14 +72,14 @@ module TensorStream
73
72
  maxval = a
74
73
  stddev = -stddev
75
74
  end
76
-
77
- norm_min = (minval - mean) / stddev;
78
- norm_max = (maxval - mean) / stddev;
79
- sqrt_factor = Math.sqrt((norm_min * norm_min) + 4.0);
80
- cutoff = 2.0 * Math.exp( 0.5 + (norm_min * (norm_min - sqrt_factor)) / 4.0 ) / (norm_min + sqrt_factor)
81
- diff = norm_max - norm_min;
82
75
 
83
- val = random_values.map do |v|
76
+ norm_min = (minval - mean) / stddev
77
+ norm_max = (maxval - mean) / stddev
78
+ sqrt_factor = Math.sqrt((norm_min * norm_min) + 4.0)
79
+ cutoff = 2.0 * Math.exp(0.5 + (norm_min * (norm_min - sqrt_factor)) / 4.0) / (norm_min + sqrt_factor)
80
+ diff = norm_max - norm_min
81
+
82
+ val = random_values.map { |v|
84
83
  iterations = 0
85
84
  pick = v
86
85
  while (pick > norm_max) || (pick < norm_min)
@@ -92,12 +91,12 @@ module TensorStream
92
91
  end
93
92
  end
94
93
 
95
- pick
96
- end
94
+ pick
95
+ }
97
96
 
98
97
  TensorShape.reshape(val, shape)
99
98
  end
100
99
  end
101
100
  end
102
101
  end
103
- end
102
+ end
@@ -1,13 +1,13 @@
1
- require 'tensor_stream/evaluator/operation_helpers/random_gaussian'
2
- require 'tensor_stream/evaluator/operation_helpers/array_ops_helper'
3
- require 'tensor_stream/evaluator/operation_helpers/math_helper'
4
- require 'tensor_stream/evaluator/base_evaluator'
5
- require 'tensor_stream/evaluator/ruby/math_ops'
6
- require 'tensor_stream/evaluator/ruby/nn_ops'
7
- require 'tensor_stream/evaluator/ruby/array_ops'
8
- require 'tensor_stream/evaluator/ruby/random_ops'
9
- require 'tensor_stream/evaluator/ruby/images_ops'
10
- require 'tensor_stream/evaluator/ruby/check_ops'
1
+ require "tensor_stream/evaluator/operation_helpers/random_gaussian"
2
+ require "tensor_stream/evaluator/operation_helpers/array_ops_helper"
3
+ require "tensor_stream/evaluator/operation_helpers/math_helper"
4
+ require "tensor_stream/evaluator/base_evaluator"
5
+ require "tensor_stream/evaluator/ruby/math_ops"
6
+ require "tensor_stream/evaluator/ruby/nn_ops"
7
+ require "tensor_stream/evaluator/ruby/array_ops"
8
+ require "tensor_stream/evaluator/ruby/random_ops"
9
+ require "tensor_stream/evaluator/ruby/images_ops"
10
+ require "tensor_stream/evaluator/ruby/check_ops"
11
11
 
12
12
  module TensorStream
13
13
  module Evaluator
@@ -49,12 +49,12 @@ module TensorStream
49
49
 
50
50
  child_context = execution_context.dup
51
51
  res = if tensor.is_a?(Operation)
52
- eval_operation(tensor, child_context)
53
- elsif !tensor.is_a?(Tensor)
54
- tensor
55
- else
56
- tensor.op
57
- end
52
+ eval_operation(tensor, child_context)
53
+ elsif !tensor.is_a?(Tensor)
54
+ tensor
55
+ else
56
+ tensor.op
57
+ end
58
58
  execution_context.deep_merge!(returns: child_context[:returns])
59
59
  res
60
60
  end
@@ -84,6 +84,7 @@ module TensorStream
84
84
 
85
85
  def prepare_input(tensor, context, options = {})
86
86
  return nil unless tensor
87
+
87
88
  if options[:noop]
88
89
  tensor
89
90
  elsif options[:no_eval]
@@ -102,10 +103,10 @@ module TensorStream
102
103
  end
103
104
 
104
105
  register_op(:cast) do |context, tensor, inputs|
105
- call_op(inputs[0], context) { |t, _b| Tensor.cast_dtype(t, tensor.data_type) }
106
+ call_op(inputs[0], context) { |t, _b| Tensor.cast_dtype(t, tensor.data_type) }
106
107
  end
107
108
 
108
- register_op(:sign) do |context, tensor, inputs|
109
+ register_op(:sign) do |context, _tensor, inputs|
109
110
  call_op(inputs[0], context) do |x, _b|
110
111
  if x.zero? || (x.is_a?(Float) && x.nan?)
111
112
  0
@@ -114,7 +115,7 @@ module TensorStream
114
115
  elsif x > 0
115
116
  1
116
117
  else
117
- raise 'assert: cannot be here'
118
+ raise "assert: cannot be here"
118
119
  end
119
120
  end
120
121
  end
@@ -213,7 +214,7 @@ module TensorStream
213
214
  end
214
215
 
215
216
  register_op :print do |_context, tensor, inputs|
216
- puts "#{tensor.options.fetch(:message, '')} #{inputs[1]}"
217
+ puts "#{tensor.options.fetch(:message, "")} #{inputs[1]}"
217
218
  inputs[0]
218
219
  end
219
220
 
@@ -245,12 +246,12 @@ module TensorStream
245
246
  val = savable.container
246
247
  packed_data = Zlib::Deflate.deflate(TensorStream::Packer.pack(val, savable.data_type))
247
248
  variables[savable.name] = {
248
- 'shape' => shape_eval(val),
249
- 'data' => Base64.strict_encode64(packed_data)
249
+ "shape" => shape_eval(val),
250
+ "data" => Base64.strict_encode64(packed_data),
250
251
  }
251
252
  end
252
253
 
253
- File.write(outputfile, { 'variables' => variables }.to_yaml)
254
+ File.write(outputfile, {"variables" => variables}.to_yaml)
254
255
  nil
255
256
  end
256
257
 
@@ -262,10 +263,10 @@ module TensorStream
262
263
  input_dump = YAML.safe_load(File.read(filename), [Symbol])
263
264
  vars = tensor.graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
264
265
 
265
- vars.select! { |v| input_dump['variables'].key?(v.name) && tensor_names.include?(v.name) }
266
+ vars.select! { |v| input_dump["variables"].key?(v.name) && tensor_names.include?(v.name) }
266
267
  vars.each do |variable|
267
- data = TensorStream::Packer.unpack(Zlib::Inflate.inflate(Base64.decode64(input_dump['variables'][variable.name]['data'])), variable.data_type)
268
- shape = input_dump['variables'][variable.name]['shape']
268
+ data = TensorStream::Packer.unpack(Zlib::Inflate.inflate(Base64.decode64(input_dump["variables"][variable.name]["data"])), variable.data_type)
269
+ shape = input_dump["variables"][variable.name]["shape"]
269
270
  variable.buffer = nil
270
271
  variable.value = TensorShape.reshape(data, shape)
271
272
  end
@@ -277,6 +278,7 @@ module TensorStream
277
278
  message = tensor.options[:message]
278
279
  call_op(inputs[0], context) do |t, _b|
279
280
  raise TensorStream::InvalidArgumentError, "#{message} Invalid argument" if t.nan? || t.infinite?
281
+
280
282
  t
281
283
  end
282
284
  end
@@ -289,9 +291,7 @@ module TensorStream
289
291
  # puts "result done ruby #{object_id}: #{tensor.name}"
290
292
  # assertions to make sure inferred shapes == actual evaluated shapes
291
293
  if tensor.shape.known? && (result.is_a?(Array) || result.is_a?(Float) || result.is_a?(Integer))
292
- if shape_eval(result) != tensor.shape.shape
293
- raise "assert error #{tensor.name} #{shape_eval(result)} != #{tensor.shape.shape}"
294
- end
294
+ raise "assert error #{tensor.name} #{shape_eval(result)} != #{tensor.shape.shape}" if shape_eval(result) != tensor.shape.shape
295
295
  end
296
296
 
297
297
  if tensor.breakpoint
@@ -308,7 +308,7 @@ module TensorStream
308
308
  shape: shape_eval(result),
309
309
  source: tensor.source,
310
310
  description: tensor.to_math(true, 1),
311
- value: result
311
+ value: result,
312
312
  }
313
313
  end
314
314
  @context[tensor.name] = result
@@ -317,22 +317,9 @@ module TensorStream
317
317
  raise e, "error #{e.message} while evaluating #{tensor.name} defined at #{tensor.source}"
318
318
  rescue TensorStreamError => e
319
319
  raise e, "error #{e.message} while evaluating #{tensor.name} defined at #{tensor.source}"
320
- rescue StandardError => e
321
- puts e.message
320
+ rescue => e
321
+ puts e.message
322
322
  puts e.backtrace.join("\n")
323
- # shape_a = a.shape.shape if a
324
- # shape_b = b.shape.shape if b
325
- # dtype_a = a.data_type if a
326
- # dtype_b = b.data_type if b
327
- # a = complete_eval(a, child_context)
328
- # b = complete_eval(b, child_context)
329
- # puts "name: #{tensor.given_name}"
330
- # # puts "op: #{tensor.to_math(true, 1)}"
331
- # puts "A #{shape_a} #{dtype_a}: #{a}" if a
332
- # puts "B #{shape_b} #{dtype_b}: #{b}" if b
333
- # dump_intermediates if @log_intermediates
334
- # File.write('/home/jedld/workspace/tensor_stream/samples/error.graphml', TensorStream::Graphml.new.get_string(tensor, @session))
335
- # File.write('/Users/josephemmanueldayo/workspace/gradients.graphml', TensorStream::Graphml.new.get_string(tensor, @session))
336
323
  raise EvaluatorExcecutionException.new(e, tensor), "error #{e.message} while evaluating #{tensor.name} : #{tensor.to_math(true, 1)} defined at #{tensor.source}"
337
324
  end
338
325
 
@@ -367,7 +354,7 @@ module TensorStream
367
354
  TensorStream.send(op.to_sym, a, b)
368
355
  end
369
356
 
370
- def process_vector_math_op(tensor, a, b, child_context, &block)
357
+ def process_vector_math_op(tensor, a, b, child_context, &block)
371
358
  eval_a = global_eval(tensor, a, child_context) unless a.nil?
372
359
  eval_b = global_eval(tensor, b, child_context) unless b.nil?
373
360
 
@@ -390,7 +377,7 @@ module TensorStream
390
377
  # multi array ops on ruby arrays with same sizes
391
378
  def multi_array_op(func, *args)
392
379
  elem = args[0]
393
- if (elem.is_a?(Array))
380
+ if elem.is_a?(Array)
394
381
  elem.each_with_index.collect do |_item, index|
395
382
  indexed_args = args.collect { |a| a[index] }
396
383
  multi_array_op(func, *indexed_args)
@@ -497,10 +484,10 @@ module TensorStream
497
484
  end
498
485
  arr << "============== end ====================="
499
486
  str = arr.join("\n")
500
- File.write('/tmp/intermediates.txt', str)
487
+ File.write("/tmp/intermediates.txt", str)
501
488
  end
502
489
  end
503
490
  end
504
491
  end
505
492
 
506
- TensorStream::Evaluator.register_evaluator(TensorStream::Evaluator::RubyEvaluator, 'ruby')
493
+ TensorStream::Evaluator.register_evaluator(TensorStream::Evaluator::RubyEvaluator, "ruby")