tensor_stream 0.9.7 → 0.9.8

Sign up to get free protection for your applications and to get access to all the features.
data/data_actual.json ADDED
@@ -0,0 +1,28 @@
1
+
2
+
3
+
4
+
5
+
6
+ **********
7
+ ********************
8
+ ********************
9
+ ********************
10
+ ********************
11
+ ********************
12
+ ********************
13
+ ***************
14
+ **********
15
+ **********
16
+ **********
17
+ **********
18
+ ***********
19
+ ***********
20
+ **********
21
+ ***********
22
+ **********
23
+ ***********
24
+ **********
25
+ **********
26
+ *********
27
+ *********
28
+ *********
@@ -0,0 +1,28 @@
1
+
2
+
3
+
4
+
5
+
6
+ **********
7
+ ********************
8
+ ********************
9
+ ********************
10
+ ********************
11
+ ********************
12
+ ********************
13
+ ***************
14
+ **********
15
+ **********
16
+ **********
17
+ **********
18
+ ***********
19
+ ***********
20
+ **********
21
+ ***********
22
+ **********
23
+ ***********
24
+ **********
25
+ **********
26
+ *********
27
+ *********
28
+ *********
data/data_input.json ADDED
@@ -0,0 +1,28 @@
1
+
2
+
3
+
4
+
5
+
6
+
7
+
8
+ ******
9
+ ****************
10
+ ****************
11
+ ***********
12
+ ****
13
+ ****
14
+ ****
15
+ ****
16
+ ****
17
+ ****
18
+ ***
19
+ ****
20
+ ****
21
+ *****
22
+ ****
23
+ *****
24
+ ****
25
+ *****
26
+ *****
27
+ ****
28
+
@@ -214,25 +214,32 @@ module TensorStream
214
214
 
215
215
  register_op :conv2d do |_context, tensor, inputs|
216
216
  filter = inputs[1]
217
+
217
218
  filter_shape = shape_eval(filter)
218
219
  strides = tensor.options[:strides]
220
+ padding_option = tensor.options[:padding]
219
221
  height_stride = strides[1]
220
222
  width_stride = strides[2]
221
223
 
222
224
  raise TensorStream::ValueError, " Current implementation does not yet support strides in the batch and depth dimensions." if strides[0] != 1 || strides[3] != 1
223
225
 
226
+ _batch, height, width, _channels = shape_eval(inputs[0])
227
+ padding = conv2d_padding_options(padding_option, filter_shape, height, width, height_stride, width_stride)
224
228
  inputs[0].collect do |image|
225
- height, width, _channels = shape_eval(image)
226
229
  f_height, f_width, _input_channels, _output_channels = filter_shape
230
+ (-padding[0]...height).step(height_stride).map do |y|
231
+ next if (y + f_height) > (height + padding[2])
232
+
233
+ (-padding[1]...width).step(width_stride).map do |x|
234
+ next if (x + f_width) > (width + padding[3])
227
235
 
228
- (0...height).step(height_stride).map do |y|
229
- (0...width).step(width_stride).map do |x|
230
236
  filter_result = (0...f_height).map do |f_y|
231
237
  (0...f_width).map do |f_x|
232
238
  f_element = filter[f_y][f_x]
233
239
 
234
- next if x + f_x >= width
235
- next if y + f_y >= height
240
+ next if (x + f_x >= width) || (x + f_x < 0)
241
+ next if (y + f_y >= height) || (y + f_y < 0)
242
+
236
243
 
237
244
  image[y + f_y][x + f_x].zip(f_element).map do |image_channel, filter_channels|
238
245
  filter_channels.map { |c| image_channel * c }
@@ -241,16 +248,15 @@ module TensorStream
241
248
  end.flatten(2)
242
249
 
243
250
  filter_result.transpose.map { |e| e.reduce(:+) }
244
- end
245
- end
246
- end
251
+ end.compact
252
+ end.compact
253
+ end.compact
247
254
  end
248
255
 
249
256
  register_op :conv2d_backprop_input do |_context, tensor, inputs|
250
257
  image_shape, filter, grad = inputs
251
-
252
258
  strides = tensor.options[:strides]
253
-
259
+ padding_option = tensor.options[:padding]
254
260
  height_stride = strides[1]
255
261
  width_stride = strides[2]
256
262
 
@@ -259,25 +265,34 @@ module TensorStream
259
265
  f_height, f_width, _input_channels, output_channels = filter_shape
260
266
  batch, height, width, channels = image_shape
261
267
 
268
+ padding = conv2d_padding_options(padding_option, filter_shape, height, width, height_stride, width_stride)
269
+
262
270
  Array.new(batch) do |b|
263
271
  image_gradient = TensorShape.reshape(Array.new(height * width * channels) { 0.0 }, [height, width, channels])
264
272
 
265
- (0...height).step(height_stride).each do |y|
266
- (0...width).step(width_stride).each do |x|
267
- img_grad = grad[b][y/height_stride][x/width_stride]
273
+ ((0 - padding[0])...height).step(height_stride).each do |y|
274
+ next if (y + f_height) > (height + padding[2])
275
+
276
+ ((0 - padding[1])...width).step(width_stride).each do |x|
277
+ next if (x + f_width) > (width + padding[3])
268
278
 
269
279
  (0...f_height).each do |f_y|
270
280
  (0...f_width).each do |f_x|
271
- next if x + f_x >= width
272
- next if y + f_y >= height
281
+ next if (y + f_y) < 0 || (y + f_y) >= height
282
+ next if (x + f_x) < 0 || (x + f_x) >= width
283
+
284
+ img_grad = grad[b][(y + padding[0]) / height_stride][(x + padding[1]) / width_stride]
273
285
 
274
286
  channels.times.each do |c|
275
- image_gradient[y + f_y][x + f_x][c] += Array.new(output_channels) do |o_c|
287
+ g = Array.new(output_channels) do |o_c|
276
288
  filter[f_y][f_x][c][o_c] * img_grad[o_c]
277
289
  end.reduce(:+)
290
+
291
+ image_gradient[y + f_y][x + f_x][c] += g
278
292
  end
279
293
  end
280
294
  end
295
+
281
296
  end
282
297
  end
283
298
 
@@ -289,25 +304,28 @@ module TensorStream
289
304
  images, filter_shape, grad = inputs
290
305
 
291
306
  strides = tensor.options[:strides]
307
+ padding_option = tensor.options[:padding]
292
308
  height_stride = strides[1]
293
309
  width_stride = strides[2]
294
310
 
295
311
  filter_gradient_sum = Array.new(filter_shape.reduce(:*)) { 0.0 }
296
312
 
313
+ _batch, height, width, _channels = shape_eval(images)
314
+ padding = conv2d_padding_options(padding_option, filter_shape, height, width, height_stride, width_stride)
315
+
297
316
  images.each_with_index.map do |image, index|
298
- height, width, _channels = shape_eval(image)
299
317
  f_height, f_width, input_channels, output_channels = filter_shape
300
318
 
301
- (0...height).step(height_stride).each do |y|
302
- (0...width).step(width_stride).each do |x|
303
- image_grad = grad[index][y/height_stride][x/width_stride]
319
+ ((0 - padding[0])...height).step(height_stride).each do |y|
320
+ ((0 - padding[1])...width).step(width_stride).each do |x|
304
321
  filter_result = (0...f_height).map do |f_y|
305
322
  (0...f_width).map do |f_x|
306
- next Array.new(input_channels * output_channels) { 0.0 } if x + f_x >= width
307
- next Array.new(input_channels * output_channels) { 0.0 } if y + f_y >= height
323
+ next Array.new(input_channels * output_channels) { 0.0 } if x + f_x >= width || (x + f_x < 0) || ((x + f_width) > (width + padding[3]))
324
+ next Array.new(input_channels * output_channels) { 0.0 } if y + f_y >= height || (y + f_y < 0) || ((y + f_height) > (height + padding[2]))
308
325
 
309
- image[y + f_y][x + f_x].each_with_index.map do |image_channel, c_channel|
310
- output_channels.times.map do |o_c|
326
+ image_grad = grad[index][(y + padding[0]) / height_stride][(x + padding[1])/ width_stride]
327
+ image[y + f_y][x + f_x].map do |image_channel|
328
+ Array.new(output_channels) do |o_c|
311
329
  image_channel * image_grad[o_c]
312
330
  end
313
331
  end
@@ -321,6 +339,32 @@ module TensorStream
321
339
 
322
340
  TensorShape.reshape(filter_gradient_sum, filter_shape)
323
341
  end
342
+
343
+
344
+ def conv2d_padding_options(padding_option, filter_shape, height, width, h_stride, w_stride)
345
+ case padding_option
346
+ when 'SAME'
347
+ [
348
+ calc_pad(height, h_stride, filter_shape[0]),
349
+ calc_pad(width, w_stride, filter_shape[1]),
350
+ calc_pad(height, h_stride, filter_shape[0], true),
351
+ calc_pad(width, w_stride, filter_shape[1], true)
352
+ ]
353
+ when 'VALID'
354
+ [0, 0, 0, 0]
355
+ else
356
+ raise TensorStream::ValueError, "Unsupported padding value #{padding_option}, valid values 'SAME', 'VALID'"
357
+ end
358
+ end
359
+
360
+ def calc_pad(w, stride, f_shape, ceil = false)
361
+ r = ((w / stride - 1) * stride - w + f_shape)
362
+ if ceil
363
+ r.odd? ? r / 2 + 1 : r / 2
364
+ else
365
+ r / 2
366
+ end
367
+ end
324
368
  end
325
369
  end
326
370
  end
@@ -45,6 +45,58 @@ module TensorStream
45
45
  shape = inputs[0] || tensor.shape.shape
46
46
  generate_vector(shape, generator: generator)
47
47
  end
48
+
49
+
50
+ register_op :truncated_normal, no_eval: true do |_context, tensor, inputs|
51
+ seed = tensor.options[:seed]
52
+ random = _get_randomizer(tensor, seed)
53
+ r = RandomGaussian.new(tensor.options.fetch(:mean), tensor.options.fetch(:stddev), -> { random.rand })
54
+ random = _get_randomizer(tensor, seed)
55
+ generator = -> { r.rand }
56
+ shape = inputs[0] || tensor.shape.shape
57
+ random_values = Array.new(shape.reduce(:*) || 1) {
58
+ generator.call
59
+ }
60
+ mean = random_values.reduce(:+) / random_values.size
61
+
62
+ # standard deviation
63
+
64
+ stddev = Math.sqrt( random_values.map { |v| ( v - mean )**2 }.reduce(:+) / (random_values.size - 1) )
65
+ minval = random_values.min
66
+ maxval = random_values.max
67
+ max_iterations = 100
68
+
69
+ if (minval.infinite? && minval < 0.0) || (maxval < mean)
70
+ # Reverse all calculations. normMin and normMax will be flipped.
71
+ a = minval
72
+ minval = maxval
73
+ maxval = a
74
+ stddev = -stddev
75
+ end
76
+
77
+ norm_min = (minval - mean) / stddev;
78
+ norm_max = (maxval - mean) / stddev;
79
+ sqrt_factor = Math.sqrt((norm_min * norm_min) + 4.0);
80
+ cutoff = 2.0 * Math.exp( 0.5 + (norm_min * (norm_min - sqrt_factor)) / 4.0 ) / (norm_min + sqrt_factor)
81
+ diff = norm_max - norm_min;
82
+
83
+ val = random_values.map { |v|
84
+ iterations = 0
85
+ pick = v
86
+ while ( (pick > norm_max) || (pick < norm_min) )
87
+ pick = generator.call
88
+ iterations += 1
89
+ if iterations > 100
90
+ pick = v
91
+ break
92
+ end
93
+ end
94
+
95
+ pick
96
+ }
97
+
98
+ TensorShape.reshape(val, shape)
99
+ end
48
100
  end
49
101
  end
50
102
  end
@@ -289,7 +289,8 @@ module TensorStream
289
289
  # assertions to make sure inferred shapes == actual evaluated shapes
290
290
  if tensor.shape.known? && (result.is_a?(Array) || result.is_a?(Float) || result.is_a?(Integer))
291
291
  if shape_eval(result) != tensor.shape.shape
292
- # raise "assert error #{tensor.name} #{shape_eval(result)} != #{tensor.shape.shape}"
292
+
293
+ raise "assert error #{tensor.name} #{shape_eval(result)} != #{tensor.shape.shape}"
293
294
  end
294
295
  end
295
296
 
@@ -66,14 +66,16 @@ module TensorStream
66
66
  return nil if tensor.inputs[0].shape.nil?
67
67
 
68
68
  input_shape = tensor.inputs[0].shape.shape
69
- return new_shape if input_shape.nil?
70
- return nil if input_shape.include?(nil)
69
+ return new_shape if input_shape.nil? && !new_shape.include?(-1) && !new_shape.include?(nil)
70
+ return nil if input_shape.nil? || input_shape.include?(nil)
71
+
71
72
  TensorShape.fix_inferred_elements(new_shape, input_shape.reduce(:*))
72
73
  when :flow_group
73
74
  []
74
- when :zeros, :ones, :fill, :random_standard_normal, :random_uniform
75
+ when :zeros, :ones, :fill, :random_standard_normal, :random_uniform, :truncated_normal
75
76
  a_shape = tensor.inputs[0] ? tensor.inputs[0].const_value : tensor.options[:shape]
76
77
  return nil if a_shape.nil?
78
+
77
79
  a_shape.is_a?(Array) ? a_shape : [a_shape]
78
80
  when :zeros_like, :ones_like
79
81
  tensor.inputs[0].shape.shape
@@ -177,6 +179,21 @@ module TensorStream
177
179
 
178
180
  new_shape = tensor.inputs[0].shape.shape.dup
179
181
  new_shape[3] = tensor.inputs[1].shape.shape[3]
182
+
183
+ # account for stride and padding options
184
+ strides = tensor.options[:strides]
185
+
186
+ case tensor.options[:padding]
187
+ when 'SAME'
188
+ new_shape[1] /= strides[1]
189
+ new_shape[2] /= strides[2]
190
+ when 'VALID'
191
+ new_shape[1] = (new_shape[1] - tensor.inputs[1].shape.shape[0]) / strides[1] + 1
192
+ new_shape[2] = (new_shape[2] - tensor.inputs[1].shape.shape[1]) / strides[2] + 1
193
+ else
194
+ raise TensorStream::ValueError, "Invalid padding option only 'SAME', 'VALID' accepted"
195
+ end
196
+
180
197
  new_shape
181
198
  when :conv2d_backprop_input
182
199
  return nil unless tensor.inputs[0].value
@@ -71,7 +71,7 @@ module TensorStream
71
71
  return false if tensor.shape.nil?
72
72
  return false if tensor.shape.shape.nil?
73
73
 
74
- tensor.shape.shape.each { |s| return false if s.nil? }
74
+ tensor.shape.shape.each { |s| return false if s.nil? || (s < 0) }
75
75
  true
76
76
  end
77
77
 
@@ -46,7 +46,7 @@ module TensorStream
46
46
  def infer_const
47
47
  return false if breakpoint
48
48
  case operation
49
- when :random_standard_normal, :random_uniform, :glorot_uniform, :print, :check_numerics
49
+ when :random_standard_normal, :random_uniform, :truncated_normal, :glorot_uniform, :print, :check_numerics
50
50
  false
51
51
  else
52
52
  non_const = @inputs.compact.find { |input| !input.is_const }
@@ -62,7 +62,7 @@ module TensorStream
62
62
  :boolean
63
63
  when :shape, :rank, :shape_n
64
64
  options[:out_type] || :int32
65
- when :random_standard_normal, :random_uniform, :glorot_uniform
65
+ when :random_standard_normal, :random_uniform, :glorot_uniform, :truncated_normal
66
66
  passed_data_type || :float32
67
67
  when :concat
68
68
  @inputs[1].data_type
@@ -92,6 +92,13 @@ module TensorStream
92
92
  _op(:random_standard_normal, shape, nil, options)
93
93
  end
94
94
 
95
+ ##
96
+ # Outputs random values from a truncated normal distribution.
97
+ def truncated_normal(shape, dtype: :float32, mean: 0.0, stddev: 1.0, seed: nil, name: nil)
98
+ options = { dtype: dtype, mean: mean, stddev: stddev, seed: seed, name: name }
99
+ _op(:truncated_normal, shape, nil, options)
100
+ end
101
+
95
102
  ##
96
103
  # Stops gradient computation.
97
104
  #
@@ -114,7 +121,7 @@ module TensorStream
114
121
  # This operation returns a 1-D integer tensor representing the shape of input
115
122
  def shape(input, name: nil, out_type: :int32)
116
123
  return constant(shape_eval(input, out_type), dtype: out_type, name: "Shape/#{name}") if input.is_a?(Array) && !input[0].is_a?(Tensor)
117
- return constant(input.shape.shape, dtype: out_type, name: "Shape/#{input.name}") if shape_full_specified(input)
124
+ return constant(input.shape.shape, dtype: out_type, name: "Shape/#{input.name}_c") if shape_full_specified(input)
118
125
 
119
126
  _op(:shape, input, name: name, out_type: out_type)
120
127
  end
@@ -1,5 +1,5 @@
1
1
  module TensorStream
2
- VERSION = '0.9.7'.freeze
2
+ VERSION = '0.9.8'.freeze
3
3
 
4
4
  def self.version
5
5
  VERSION
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: tensor_stream
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.7
4
+ version: 0.9.8
5
5
  platform: ruby
6
6
  authors:
7
7
  - Joseph Emmanuel Dayo
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-11-19 00:00:00.000000000 Z
11
+ date: 2018-11-25 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -248,6 +248,11 @@ files:
248
248
  - benchmark_ryzen_amd.txt
249
249
  - bin/console
250
250
  - bin/setup
251
+ - data_1.json
252
+ - data_2.json
253
+ - data_actual.json
254
+ - data_expected.json
255
+ - data_input.json
251
256
  - lib/tensor_stream.rb
252
257
  - lib/tensor_stream/control_flow.rb
253
258
  - lib/tensor_stream/debugging/debugging.rb