tensor_stream 0.9.5 → 0.9.6

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 51cb6686663dece94714073ff12b13ad7e57de1aadbee44506897c69a2d5fd67
4
- data.tar.gz: 47152a908b2cc966cba6721da8f0c3170e3198a70f416fd224caee7d35a1fca2
3
+ metadata.gz: 4059c6bb82fc83c5aee32090e9c58231f9fd421afb3aa8bea1cbff4857edeb18
4
+ data.tar.gz: b7ceee8509455146402c3b3cdf9fa0a63eeec3dfee5512dd6e097221f1247481
5
5
  SHA512:
6
- metadata.gz: 7fff67042fd35c651409e04e890feafab25e618d88d4c3f69c97b39e839a943c64a4e05be388a4464e32328a777f6ee56dc5ede0a6c69c977cc94bcb7cd38bad
7
- data.tar.gz: a30230994e81062626eaae5ca7a13ece36d2a5ea6210cfe1e3e465b5e5e24adad525547223f27e15991f74728d53d8b02e7cac0710fe16bf3c602402f5b65b5c
6
+ metadata.gz: 329eb6875b1add417656537beac95d67c1c061fa3eba3b04031d3ac6f1a20f7e26bf12e66873b70ad575fe7d9f0482c02888f0b544405ba9d73d5b325070966f
7
+ data.tar.gz: 639a4d90dc1525e063bcdac3b30805a38697b009634d17cb745c99189b88b10b95ce49a9136d5c9c0617fbf3ca3ee1a74429a8579437b7896954ff6529952e79
data/CHANGELOG.md CHANGED
@@ -4,6 +4,12 @@ All notable changes to this project will be documented in this file.
4
4
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
5
5
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
6
6
 
7
+ ## [0.9.5] - 2018-11-05
8
+ - [NEW OP] assert_equal, relu6
9
+ - [TRAINING] learning_rate_decay, dropout
10
+ - [BUG FIX] argmin, argmax now works properly
11
+ - [BUG FIX] shape inference fixes
12
+
7
13
  ## [0.9.2] - 2018-10-19
8
14
  - Add profiling support
9
15
  - Make sure sparse ruby arrays are caught
@@ -7,11 +7,25 @@ module TensorStream
7
7
  register_op :decode_png do |_context, tensor, inputs|
8
8
  content = inputs[0]
9
9
  channels = tensor.options[:channels]
10
+ resample_new_shape = tensor.options[:new_shape]
11
+ resample_method = tensor.options[:resample_method] || :bilinear
10
12
  channels = 4 if channels.zero?
11
13
 
12
14
  image = ChunkyPNG::Image.from_blob(content)
13
15
 
14
16
  image.grayscale! if channels == 1
17
+
18
+ if resample_new_shape
19
+ case resample_method
20
+ when :bilinear
21
+ image.resample_bilinear!(resample_new_shape[1], resample_new_shape[0]) # width, # height
22
+ when :nearest_neighbor
23
+ image.resample_nearest_neighbor!(resample_new_shape[1], resample_new_shape[0])
24
+ else
25
+ raise TensorStream::ValueError, "invalid resample method provided #{resample_method}. Available (:bilinear, :nearest_neighbor)"
26
+ end
27
+ end
28
+
15
29
  image_data = image.pixels.collect do |pixel|
16
30
  color_values = if channels == 4
17
31
  [ChunkyPNG::Color.r(pixel),
@@ -35,10 +49,13 @@ module TensorStream
35
49
  TensorShape.reshape(image_data, [image.height, image.width, channels])
36
50
  end
37
51
 
38
- register_op :encode_png do |_context, _tensor, inputs|
52
+ register_op :encode_png do |_context, tensor, inputs|
39
53
  image_data = inputs[0]
40
54
  height, width, channels = shape_eval(image_data)
41
55
 
56
+ resample_new_shape = tensor.options[:new_shape]
57
+ resample_method = tensor.options[:resample_method] || :bilinear
58
+
42
59
  png = ChunkyPNG::Image.new(width, height)
43
60
  image_data.each_with_index do |rows, h_index|
44
61
  rows.each_with_index do |p_data, w_index|
@@ -51,6 +68,18 @@ module TensorStream
51
68
  end
52
69
  end
53
70
  end
71
+
72
+ if resample_new_shape
73
+ case resample_method
74
+ when :bilinear
75
+ png.resample_bilinear!(resample_new_shape[1], resample_new_shape[0]) # width, # height
76
+ when :nearest_neighbor
77
+ png.resample_nearest_neighbor!(resample_new_shape[1], resample_new_shape[0])
78
+ else
79
+ raise TensorStream::ValueError, "invalid resample method provided #{resample_method}. Available (:bilinear, :nearest_neighbor)"
80
+ end
81
+ end
82
+
54
83
  png.to_s
55
84
  end
56
85
  end
@@ -211,6 +211,114 @@ module TensorStream
211
211
  register_op :relu6 do |context, tensor, inputs|
212
212
  call_vector_op(tensor, :relu6, inputs[0], inputs[1], context, ->(t, u) { [[t, 0].max, 6].min })
213
213
  end
214
+
215
+ register_op :conv2d do |_context, tensor, inputs|
216
+ filter = inputs[1]
217
+ filter_shape = shape_eval(filter)
218
+ strides = tensor.options[:strides]
219
+ height_stride = strides[1]
220
+ width_stride = strides[2]
221
+
222
+ raise TensorStream::ValueError, " Current implementation does not yet support strides in the batch and depth dimensions." if strides[0] != 1 || strides[3] != 1
223
+
224
+ inputs[0].collect do |image|
225
+ height, width, _channels = shape_eval(image)
226
+ f_height, f_width, _input_channels, _output_channels = filter_shape
227
+
228
+ (0...height).step(height_stride).map do |y|
229
+ (0...width).step(width_stride).map do |x|
230
+ filter_result = (0...f_height).map do |f_y|
231
+ (0...f_width).map do |f_x|
232
+ f_element = filter[f_y][f_x]
233
+
234
+ next if x + f_x >= width
235
+ next if y + f_y >= height
236
+
237
+ image[y + f_y][x + f_x].zip(f_element).map do |image_channel, filter_channels|
238
+ filter_channels.map { |c| image_channel * c }
239
+ end
240
+ end.compact
241
+ end.flatten(2)
242
+
243
+ filter_result.transpose.map { |e| e.reduce(:+) }
244
+ end
245
+ end
246
+ end
247
+ end
248
+
249
+ register_op :conv2d_backprop_input do |_context, tensor, inputs|
250
+ image_shape, filter, grad = inputs
251
+
252
+ strides = tensor.options[:strides]
253
+
254
+ height_stride = strides[1]
255
+ width_stride = strides[2]
256
+
257
+ filter_shape = shape_eval(filter)
258
+
259
+ f_height, f_width, _input_channels, output_channels = filter_shape
260
+ batch, height, width, channels = image_shape
261
+
262
+ Array.new(batch) do |b|
263
+ image_gradient = TensorShape.reshape(Array.new(height * width * channels) { 0.0 }, [height, width, channels])
264
+
265
+ (0...height).step(height_stride).each do |y|
266
+ (0...width).step(width_stride).each do |x|
267
+ (0...f_height).each do |f_y|
268
+ (0...f_width).each do |f_x|
269
+ next if x + f_x >= width
270
+ next if y + f_y >= height
271
+
272
+ channels.times.each do |c|
273
+ image_gradient[y + f_y][x + f_x][c] += Array.new(output_channels) do |o_c|
274
+ filter[f_y][f_x][c][o_c] * grad[b][(y/height_stride) + f_y][(x/width_stride) + f_x][o_c]
275
+ end.reduce(:+)
276
+ end
277
+ end
278
+ end
279
+ end
280
+ end
281
+
282
+ image_gradient
283
+ end
284
+ end
285
+
286
+ register_op :conv2d_backprop_filter do |_context, tensor, inputs|
287
+ images, filter_shape, grad = inputs
288
+
289
+ strides = tensor.options[:strides]
290
+ height_stride = strides[1]
291
+ width_stride = strides[2]
292
+
293
+ filter_gradient_sum = Array.new(filter_shape.reduce(:*)) { 0.0 }
294
+
295
+ images.each_with_index.map do |image, index|
296
+ height, width, _channels = shape_eval(image)
297
+ f_height, f_width, input_channels, output_channels = filter_shape
298
+
299
+ (0...height).step(height_stride).each do |y|
300
+ (0...width).step(width_stride).each do |x|
301
+ filter_result = (0...f_height).map do |f_y|
302
+ (0...f_width).map do |f_x|
303
+
304
+ next Array.new(input_channels * output_channels) { 0.0 } if x + f_x >= width
305
+ next Array.new(input_channels * output_channels) { 0.0 } if y + f_y >= height
306
+
307
+ image[y + f_y][x + f_x].each_with_index.map do |image_channel, c_channel|
308
+ output_channels.times.map do |o_c|
309
+ image_channel * grad[index][(y/height_stride) + f_y][(x/width_stride) + f_x][o_c]
310
+ end
311
+ end
312
+ end
313
+ end.flatten
314
+
315
+ filter_gradient_sum = multi_array_op(->(a, b) { a + b }, filter_gradient_sum, filter_result)
316
+ end
317
+ end
318
+ end
319
+
320
+ TensorShape.reshape(filter_gradient_sum, filter_shape)
321
+ end
214
322
  end
215
323
  end
216
324
  end
@@ -364,7 +364,7 @@ module TensorStream
364
364
 
365
365
  def get_op_with_axis(a, target_axis, current_axis, op)
366
366
  rank = get_rank(a)
367
- return a.index(a.send(:"#{op}")) if rank == 1
367
+ return a.send(:"#{op}_index") if rank == 1
368
368
 
369
369
  if current_axis == target_axis
370
370
  compare_items = a.collect(&:flatten).transpose
@@ -372,7 +372,7 @@ module TensorStream
372
372
  elsif a[0].is_a?(Array)
373
373
  a.map { |item| get_op_with_axis(item, target_axis, current_axis + 1, op) }
374
374
  else
375
- return a.index(a.send(:"#{op}"))
375
+ return a.send(:"#{op}_index")
376
376
  end
377
377
  end
378
378
 
@@ -171,6 +171,13 @@ module TensorStream
171
171
  axis = rank + axis if axis < 0
172
172
  rotated_shape = Array.new(axis + 1) { new_shape.shift }
173
173
  rotated_shape.rotate!(-1) + new_shape
174
+ when :conv2d
175
+ return nil unless tensor.inputs[0].shape.known?
176
+ return nil unless tensor.inputs[1].shape.known?
177
+
178
+ new_shape = tensor.inputs[0].shape.shape.dup
179
+ new_shape[3] = tensor.inputs[1].shape.shape[3]
180
+ new_shape
174
181
  else
175
182
  return nil if tensor.inputs[0].nil?
176
183
  return tensor.inputs[0].shape.shape if tensor.inputs.size == 1
@@ -3,14 +3,14 @@ module TensorStream
3
3
  extend OpHelper
4
4
  extend TensorStream::Utils
5
5
 
6
- def self.decode_png(contents, channels: 0, dtype: :uint8, name: nil)
7
- _op(:decode_png, contents, channels: channels, data_type: dtype, name: name)
6
+ def self.decode_png(contents, channels: 0, dtype: :uint8, name: nil, new_shape: nil)
7
+ _op(:decode_png, contents, channels: channels, data_type: dtype, name: name, new_shape: new_shape)
8
8
  end
9
9
 
10
- def self.encode_png(contents, compression: -1, name: nil)
10
+ def self.encode_png(contents, compression: -1, name: nil, new_shape: nil, resample_method: nil)
11
11
  check_allowed_types(contents, %i[uint8 uint16])
12
12
  contents = convert_to_tensor(contents, dtype: :uint16)
13
- _op(:encode_png, contents, compression: compression, name: name)
13
+ _op(:encode_png, contents, compression: compression, name: name, new_shape: new_shape, resample_method: resample_method)
14
14
  end
15
15
  end
16
16
  end
@@ -305,6 +305,8 @@ module TensorStream
305
305
  Array.new(node.inputs.size) { |i| res[i] }
306
306
  when :unstack
307
307
  ts.stack(grad, axis: node.options[:axis])
308
+ when :conv2d
309
+ _Conv2DGrad(node, grad)
308
310
  when :cast
309
311
  t = %i[float16 float32 float64]
310
312
  src_type = node.inputs[0].data_type
@@ -416,5 +418,33 @@ module TensorStream
416
418
  out_grads = ts.split(grad, sizes, axis: non_neg_concat_dim, num: op.inputs.size - 1)
417
419
  end_value_index <= dim_index ? out_grads + [nil] : [nil] + out_grads
418
420
  end
421
+
422
+ def self._Conv2DGrad(op, grad)
423
+ # dilations = op.get_attr("dilations")
424
+ strides = op.options[:strides]
425
+ padding = op.options[:padding]
426
+ use_cudnn_on_gpu = op.options[:use_cudnn_on_gpu]
427
+ data_format = op.options[:data_format]
428
+
429
+ shape_0, shape_1 = ts.shape_n([op.inputs[0], op.inputs[1]])
430
+ [
431
+ _op(:conv2d_backprop_input,
432
+ shape_0,
433
+ op.inputs[1],
434
+ grad,
435
+ strides: strides,
436
+ padding: padding,
437
+ use_cudnn_on_gpu: use_cudnn_on_gpu,
438
+ data_format: data_format),
439
+ _op(:conv2d_backprop_filter,
440
+ op.inputs[0],
441
+ shape_1,
442
+ grad,
443
+ strides: strides,
444
+ padding: padding,
445
+ use_cudnn_on_gpu: use_cudnn_on_gpu,
446
+ data_format: data_format)
447
+ ]
448
+ end
419
449
  end
420
450
  end
@@ -12,4 +12,40 @@ class Array
12
12
  def **(other)
13
13
  TensorStream.convert_to_tensor(self)**other
14
14
  end
15
+
16
+ def max_index
17
+ if first.is_a?(Float)
18
+ highest = first
19
+ highest_index = 0
20
+ each_with_index do |item, index|
21
+ next if item.nan?
22
+
23
+ if item > highest
24
+ highest = item
25
+ highest_index = index
26
+ end
27
+ end
28
+ highest_index
29
+ else
30
+ index(max)
31
+ end
32
+ end
33
+
34
+ def min_index
35
+ if first.is_a?(Float)
36
+ highest = first
37
+ highest_index = 0
38
+ each_with_index do |item, index|
39
+ next if item.nan?
40
+
41
+ if item < highest
42
+ highest = item
43
+ highest_index = index
44
+ end
45
+ end
46
+ highest_index
47
+ else
48
+ index(min)
49
+ end
50
+ end
15
51
  end
@@ -15,6 +15,10 @@ module TensorStream
15
15
  end
16
16
  end
17
17
 
18
+ def shape
19
+ TensorStream.shape_eval(self)
20
+ end
21
+
18
22
  def t(name = nil)
19
23
  TensorStream.convert_to_tensor(self, name: name)
20
24
  end
@@ -3,131 +3,137 @@ module TensorStream
3
3
  class NN
4
4
  extend TensorStream::OpHelper
5
5
 
6
- def self.softmax(logits, axis: nil, name: nil)
7
- _op(:softmax, logits, nil, axis: axis, name: name)
8
- end
6
+ class << self
7
+ def softmax(logits, axis: nil, name: nil)
8
+ _op(:softmax, logits, nil, axis: axis, name: name)
9
+ end
9
10
 
10
- def self.relu(features, name: nil)
11
- TensorStream.max(features, 0, name: "relu_#{name}")
12
- end
11
+ def relu(features, name: nil)
12
+ TensorStream.max(features, 0, name: "relu_#{name}")
13
+ end
13
14
 
14
- def self.relu6(features, name: nil)
15
- TensorStream.name_scope(name, "Relu6", values: [features]) do
16
- features = TensorStream.convert_to_tensor(features, name: "features")
17
- _op(:relu6, features, name: name)
15
+ def relu6(features, name: nil)
16
+ TensorStream.name_scope(name, "Relu6", values: [features]) do
17
+ features = TensorStream.convert_to_tensor(features, name: "features")
18
+ _op(:relu6, features, name: name)
19
+ end
18
20
  end
19
- end
20
21
 
21
- ##
22
- # Computes dropout.
23
- #
24
- # With probability keep_prob, outputs the input element scaled up by 1 / keep_prob, otherwise outputs 0. The scaling is so that the expected sum is unchanged.
25
- def self.dropout(x, keep_prob, noise_shape: nil, seed: nil, name: nil)
26
- TensorStream.name_scope(name, "dropout", values: [x]) do
27
- x = TensorStream.convert_to_tensor(x, name: "x")
28
- raise TensorStream::ValueError, "x has to be a floating point tensor since it's going to be scaled. Got a #{x.data_type} tensor instead." unless fp_type?(x.data_type)
29
- raise TensorStream::ValueError, "keep_prob must be a scalar tensor or a float in the range (0, 1], got #{keep_prob}" unless (0 < keep_prob && keep_prob <= 1)
22
+ ##
23
+ # Computes dropout.
24
+ #
25
+ # With probability keep_prob, outputs the input element scaled up by 1 / keep_prob, otherwise outputs 0. The scaling is so that the expected sum is unchanged.
26
+ def dropout(x, keep_prob, noise_shape: nil, seed: nil, name: nil)
27
+ TensorStream.name_scope(name, "dropout", values: [x]) do
28
+ x = TensorStream.convert_to_tensor(x, name: "x")
29
+ raise TensorStream::ValueError, "x has to be a floating point tensor since it's going to be scaled. Got a #{x.data_type} tensor instead." unless fp_type?(x.data_type)
30
+ raise TensorStream::ValueError, "keep_prob must be a scalar tensor or a float in the range (0, 1], got #{keep_prob}" if keep_prob.is_a?(Float) && !(0 < keep_prob && keep_prob <= 1)
30
31
 
31
- return x if keep_prob.is_a?(Float) && keep_prob.to_f == 1.0
32
+ return x if keep_prob.is_a?(Float) && keep_prob.to_f == 1.0
32
33
 
33
- keep_prob = TensorStream.convert_to_tensor(keep_prob, dtype: x.dtype, name: "keep_prob")
34
- return x if keep_prob.value == 1.0
34
+ keep_prob = TensorStream.convert_to_tensor(keep_prob, dtype: x.dtype, name: "keep_prob")
35
+ return x if keep_prob.value == 1.0
35
36
 
36
- noise_shape = if noise_shape.nil?
37
- TensorStream.shape(x)
38
- else
39
- noise_shape
40
- end
37
+ noise_shape = if noise_shape.nil?
38
+ TensorStream.shape(x)
39
+ else
40
+ noise_shape
41
+ end
41
42
 
42
- random_tensor = keep_prob
43
- random_tensor += TensorStream.random_uniform(noise_shape, seed: seed, dtype: x.dtype)
43
+ random_tensor = keep_prob
44
+ random_tensor += TensorStream.random_uniform(noise_shape, seed: seed, dtype: x.dtype)
44
45
 
45
- binary_tensor = TensorStream.floor(random_tensor)
46
- TensorStream.div(x, keep_prob) * binary_tensor
46
+ binary_tensor = TensorStream.floor(random_tensor)
47
+ TensorStream.div(x, keep_prob) * binary_tensor
48
+ end
47
49
  end
48
- end
49
50
 
50
- def self.sigmoid(input, name: nil)
51
- TensorStream.sigmoid(input, name: name)
52
- end
51
+ def sigmoid(input, name: nil)
52
+ TensorStream.sigmoid(input, name: name)
53
+ end
53
54
 
54
- def self.softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
55
- softmax_cross_entropy_with_logits_v2(labels: labels, logits: logits, name: name)
56
- end
55
+ def softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
56
+ softmax_cross_entropy_with_logits_v2(labels: labels, logits: logits, name: name)
57
+ end
57
58
 
58
- def self.softmax_cross_entropy_with_logits_v2(labels: nil, logits: nil, name: nil)
59
- TensorStream.name_scope(name, default: 'softmax_cross_entropy_with_logits', values: [logits, labels]) do
60
- ts = TensorStream
61
- logits = ts.convert_to_tensor(logits, name: 'logits')
62
- labels = ts.convert_to_tensor(labels, name: 'labels')
63
- labels = ts.cast(labels, logits.dtype)
59
+ def softmax_cross_entropy_with_logits_v2(labels: nil, logits: nil, name: nil)
60
+ TensorStream.name_scope(name, default: 'softmax_cross_entropy_with_logits', values: [logits, labels]) do
61
+ ts = TensorStream
62
+ logits = ts.convert_to_tensor(logits, name: 'logits')
63
+ labels = ts.convert_to_tensor(labels, name: 'labels')
64
+ labels = ts.cast(labels, logits.dtype)
64
65
 
65
- output = _op(:softmax_cross_entropy_with_logits_v2, logits, labels)
66
- output[0]
66
+ output = _op(:softmax_cross_entropy_with_logits_v2, logits, labels)
67
+ output[0]
68
+ end
67
69
  end
68
- end
69
70
 
70
- def self.sparse_softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
71
- TensorStream.name_scope(name, default: "SparseSoftmaxCrossEntropyWithLogits", values: [logits, labels]) do
72
- tf = TensorStream
73
- labels = tf.convert_to_tensor(labels)
74
- logits = tf.convert_to_tensor(logits)
75
- precise_logits = logits.data_type == :float16 ? tf.cast(logits, :float32) : logits
76
-
77
- labels_static_shape = labels.shape
78
- labels_shape = tf.shape(labels)
79
- static_shapes_fully_defined = labels_static_shape.known? && logits.shape.known?
80
-
81
- raise TensorStream::ValueError, "Logits cannot be scalars - received shape #{logits.shape.shape}." if logits.shape.known? && logits.shape.scalar?
82
- raise TensorStream::ValueError, "Rank mismatch: Rank of labels (received #{labels_static_shape.ndims}) " +
83
- "should equal rank of logits minus 1 (received #{logits.shape.ndims})." if logits.shape.known? && (labels_static_shape.known? && labels_static_shape.ndims != logits.shape.ndims - 1)
84
- if logits.shape.ndims == 2
85
- cost = _op(:sparse_softmax_cross_entropy_with_logits,
86
- precise_logits, labels, name: name)
87
- if logits.data_type == :float16
88
- return tf.cast(cost[0], :float16)
89
- else
90
- return cost[0]
71
+ def sparse_softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
72
+ TensorStream.name_scope(name, default: "SparseSoftmaxCrossEntropyWithLogits", values: [logits, labels]) do
73
+ tf = TensorStream
74
+ labels = tf.convert_to_tensor(labels)
75
+ logits = tf.convert_to_tensor(logits)
76
+ precise_logits = logits.data_type == :float16 ? tf.cast(logits, :float32) : logits
77
+
78
+ labels_static_shape = labels.shape
79
+ labels_shape = tf.shape(labels)
80
+ static_shapes_fully_defined = labels_static_shape.known? && logits.shape.known?
81
+
82
+ raise TensorStream::ValueError, "Logits cannot be scalars - received shape #{logits.shape.shape}." if logits.shape.known? && logits.shape.scalar?
83
+ raise TensorStream::ValueError, "Rank mismatch: Rank of labels (received #{labels_static_shape.ndims}) " +
84
+ "should equal rank of logits minus 1 (received #{logits.shape.ndims})." if logits.shape.known? && (labels_static_shape.known? && labels_static_shape.ndims != logits.shape.ndims - 1)
85
+ if logits.shape.ndims == 2
86
+ cost = _op(:sparse_softmax_cross_entropy_with_logits,
87
+ precise_logits, labels, name: name)
88
+ if logits.data_type == :float16
89
+ return tf.cast(cost[0], :float16)
90
+ else
91
+ return cost[0]
92
+ end
91
93
  end
92
- end
93
94
 
94
- shape_checks = []
95
+ shape_checks = []
95
96
 
96
- shape_checks << tf.assert_equal(tf.rank(labels), tf.rank(logits) - 1) unless static_shapes_fully_defined
97
+ shape_checks << tf.assert_equal(tf.rank(labels), tf.rank(logits) - 1) unless static_shapes_fully_defined
97
98
 
98
- tf.control_dependencies(shape_checks) do
99
- num_classes = tf.shape(logits)[tf.rank(logits) - 1]
100
- precise_logits = tf.reshape(precise_logits, [-1, num_classes])
101
- labels = tf.reshape(labels, [-1])
102
- cost = _op(:sparse_softmax_cross_entropy_with_logits, precise_logits, labels, name: name)
103
- cost = tf.reshape(cost[0], labels_shape)
99
+ tf.control_dependencies(shape_checks) do
100
+ num_classes = tf.shape(logits)[tf.rank(logits) - 1]
101
+ precise_logits = tf.reshape(precise_logits, [-1, num_classes])
102
+ labels = tf.reshape(labels, [-1])
103
+ cost = _op(:sparse_softmax_cross_entropy_with_logits, precise_logits, labels, name: name)
104
+ cost = tf.reshape(cost[0], labels_shape)
104
105
 
105
- if logits.data_type == :float16
106
- tf.cast(cost, :float16)
107
- else
108
- cost
106
+ if logits.data_type == :float16
107
+ tf.cast(cost, :float16)
108
+ else
109
+ cost
110
+ end
109
111
  end
110
112
  end
111
113
  end
112
- end
113
114
 
114
- # Computes log softmax activations.
115
- def self.log_softmax(logits, axis: -1, name: nil)
116
- _op(:log_softmax, logits, axis: axis, name: name)
117
- end
115
+ # Computes log softmax activations.
116
+ def log_softmax(logits, axis: -1, name: nil)
117
+ _op(:log_softmax, logits, axis: axis, name: name)
118
+ end
119
+
120
+ def sigmoid_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
121
+ TensorStream.name_scope(name, default: 'logistic_loss', values: [logits, labels]) do |_name|
122
+ tf = TensorStream
123
+ logits = tf.convert_to_tensor(logits, name: 'logits')
124
+ labels = tf.convert_to_tensor(labels, name: 'labels')
125
+ zeros = tf.zeros_like(logits, dtype: logits.dtype)
126
+ cond = (logits >= zeros)
127
+ relu_logits = tf.where(cond, logits, zeros)
128
+ neg_abs_logits = tf.where(cond, -logits, logits)
129
+
130
+ tf.add(relu_logits - logits * labels,
131
+ tf.log1p(tf.exp(neg_abs_logits)), name: name)
132
+ end
133
+ end
118
134
 
119
- def self.sigmoid_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
120
- TensorStream.name_scope(name, default: 'logistic_loss', values: [logits, labels]) do |_name|
121
- tf = TensorStream
122
- logits = tf.convert_to_tensor(logits, name: 'logits')
123
- labels = tf.convert_to_tensor(labels, name: 'labels')
124
- zeros = tf.zeros_like(logits, dtype: logits.dtype)
125
- cond = (logits >= zeros)
126
- relu_logits = tf.where(cond, logits, zeros)
127
- neg_abs_logits = tf.where(cond, -logits, logits)
128
-
129
- tf.add(relu_logits - logits * labels,
130
- tf.log1p(tf.exp(neg_abs_logits)), name: name)
135
+ def conv2d(input, filter, strides, padding, name: nil)
136
+ _op(:conv2d, input, filter, strides: strides, padding: padding, name: name)
131
137
  end
132
138
  end
133
139
  end
@@ -1,4 +1,5 @@
1
1
  require 'json'
2
+ require "zlib"
2
3
 
3
4
  module TensorStream
4
5
  module Train
@@ -12,11 +13,12 @@ module TensorStream
12
13
  write_meta_graph: true,
13
14
  write_state: true,
14
15
  strip_default_attrs: false)
15
- vars = TensorStream::Graph.get_default_graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
16
+ graph = TensorStream::Graph.get_default_graph
17
+ vars = graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
16
18
 
17
19
  variables = {}
18
- graph = {}
19
- gs = eval_global_step(session, global_step)
20
+
21
+ gs = eval_global_step(session, global_step)
20
22
  output_dump = {
21
23
  'variables' => variables,
22
24
  'global_step' => gs
@@ -24,7 +26,7 @@ module TensorStream
24
26
 
25
27
  vars.each do |variable|
26
28
  val = variable.read_value
27
- packed_data = TensorStream::Packer.pack(val, variable.data_type)
29
+ packed_data = Zlib::Deflate.deflate(TensorStream::Packer.pack(val, variable.data_type))
28
30
  variables[variable.name] = {
29
31
  'shape' => shape_eval(val),
30
32
  'data' => Base64.strict_encode64(packed_data)
@@ -36,18 +38,21 @@ module TensorStream
36
38
 
37
39
  new_filename = File.join(path, [basename, gs].compact.join('-'))
38
40
  File.write(new_filename, output_dump.to_yaml)
39
-
41
+ if write_meta_graph
42
+ graph_filename = "#{basename}.pbtext"
43
+ TensorStream.train.write_graph(graph, path, graph_filename)
44
+ end
40
45
  path
41
46
  end
42
47
 
43
48
  def restore(_session, inputfile)
44
- input_dump = YAML.load(File.read(inputfile))
49
+ input_dump = YAML.safe_load(File.read(inputfile))
45
50
 
46
51
  vars = TensorStream::Graph.get_default_graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
47
52
  vars.each do |variable|
48
53
  next unless input_dump['variables'].key?(variable.name)
49
54
 
50
- data = TensorStream::Packer.unpack(Base64.decode64(input_dump['variables'][variable.name]['data']), variable.data_type)
55
+ data = TensorStream::Packer.unpack(Zlib::Inflate.inflate(Base64.decode64(input_dump['variables'][variable.name]['data'])), variable.data_type)
51
56
  shape = input_dump['variables'][variable.name]['shape']
52
57
  variable.buffer = nil
53
58
  variable.value = TensorShape.reshape(data, shape)
@@ -1,5 +1,5 @@
1
1
  module TensorStream
2
- VERSION = '0.9.5'.freeze
2
+ VERSION = '0.9.6'.freeze
3
3
 
4
4
  def self.version
5
5
  VERSION
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: tensor_stream
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.5
4
+ version: 0.9.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - Joseph Emmanuel Dayo
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-11-04 00:00:00.000000000 Z
11
+ date: 2018-11-18 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler