tensor_stream 0.9.0 → 0.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/CHANGELOG.md +6 -0
- data/lib/tensor_stream/dynamic_stitch.rb +1 -1
- data/lib/tensor_stream/evaluator/base_evaluator.rb +13 -7
- data/lib/tensor_stream/evaluator/ruby/array_ops.rb +2 -2
- data/lib/tensor_stream/evaluator/ruby/math_ops.rb +1 -1
- data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +0 -1
- data/lib/tensor_stream/evaluator/ruby_evaluator.rb +23 -3
- data/lib/tensor_stream/graph_serializers/packer.rb +60 -0
- data/lib/tensor_stream/graph_serializers/serializer.rb +1 -5
- data/lib/tensor_stream/math_gradients.rb +6 -6
- data/lib/tensor_stream/operation.rb +82 -16
- data/lib/tensor_stream/ops.rb +8 -8
- data/lib/tensor_stream/tensor.rb +9 -2
- data/lib/tensor_stream/tensor_shape.rb +3 -1
- data/lib/tensor_stream/train/saver.rb +18 -9
- data/lib/tensor_stream/types.rb +12 -0
- data/lib/tensor_stream/version.rb +1 -1
- data/lib/tensor_stream.rb +1 -0
- metadata +4 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
|
-
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: 9c6649e7cb423246d4886776bf93a8422e3be936
|
4
|
+
data.tar.gz: c62ffe26455cff685925a5a9a27c49babae6d17e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 9dd8519a7c4c34fc0d0c8f4900aa2bec73e6a0dfefe15db4f4c6703188e55080cd2dcce4c8df02993beaa1d6f753b2309fabda623f5ab6a0218c3bd4ce382c0c
|
7
|
+
data.tar.gz: 2e2f051ada324f4f220db702762a179f1ee20985ecbde51d5c8fc976ad50ff32a315459b97a2366c7cf9a718672b42910466b8e1e81ea388a878035807bae1e0
|
data/CHANGELOG.md
CHANGED
@@ -4,6 +4,12 @@ All notable changes to this project will be documented in this file.
|
|
4
4
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
|
5
5
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
6
6
|
|
7
|
+
## [0.9.1] - 2018-10-19
|
8
|
+
- Bug fix release
|
9
|
+
|
10
|
+
## [0.9.0] - 2018-10-05
|
11
|
+
- Bug fix release for OpenCL gem
|
12
|
+
|
7
13
|
## [0.8.6] - 2018-09-11
|
8
14
|
|
9
15
|
### Added
|
@@ -12,7 +12,7 @@ module TensorStream
|
|
12
12
|
@data_type = Tensor.detect_type(inputs[1])
|
13
13
|
@name = [@graph.get_name_scope, options[:name] || set_name].compact.join('/')
|
14
14
|
@ops = ops
|
15
|
-
@shape = TensorShape.new(
|
15
|
+
@shape = TensorShape.new(nil)
|
16
16
|
@graph.add_node(self)
|
17
17
|
end
|
18
18
|
|
@@ -126,18 +126,24 @@ module TensorStream
|
|
126
126
|
|
127
127
|
protected
|
128
128
|
|
129
|
+
##
|
130
|
+
# called when passing control to another evaluator
|
131
|
+
def perform_transition(tensor, input, _next_evaluator)
|
132
|
+
cache_key = "#{tensor.graph.object_id}_#{input.name}:#{object_id}"
|
133
|
+
return @context[:_cache][cache_key] if @context[:_cache].key?(cache_key)
|
134
|
+
|
135
|
+
result = @session.delegate_to_evaluator(input, @context, execution_context)
|
136
|
+
convert_from_buffer(input, result).tap do |buffer|
|
137
|
+
@context[:_cache][cache_key] = buffer if input.is_const
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
129
141
|
def global_eval(tensor, input, execution_context, op_options = {})
|
130
142
|
return nil unless input
|
131
143
|
return input unless input.is_a?(Tensor)
|
132
144
|
@context[:_cache][:placement][input.name] = @session.assign_evaluator(input) if @context[:_cache][:placement][input.name].nil?
|
133
145
|
if object_id != @context[:_cache][:placement][input.name][1].object_id # tensor is on another device or evaluator
|
134
|
-
|
135
|
-
return @context[:_cache][cache_key] if @context[:_cache].key?(cache_key)
|
136
|
-
|
137
|
-
result = @session.delegate_to_evaluator(input, @context, execution_context)
|
138
|
-
convert_from_buffer(input, result).tap do |buffer|
|
139
|
-
@context[:_cache][cache_key] = buffer if input.is_const
|
140
|
-
end
|
146
|
+
perform_transition(tensor, input, @context[:_cache][:placement][input.name][1])
|
141
147
|
else
|
142
148
|
prepare_input(input, execution_context, op_options)
|
143
149
|
end
|
@@ -158,8 +158,8 @@ module TensorStream
|
|
158
158
|
TensorShape.reshape([val], new_shape)
|
159
159
|
end
|
160
160
|
|
161
|
-
register_op :fill do |_context,
|
162
|
-
shape = inputs[0]
|
161
|
+
register_op :fill do |_context, tensor, inputs|
|
162
|
+
shape = inputs[0] || tensor.shape.shape
|
163
163
|
value = inputs[1]
|
164
164
|
|
165
165
|
func = -> { value }
|
@@ -237,7 +237,7 @@ module TensorStream
|
|
237
237
|
matrix_b = matrix_b.transpose if tensor.options[:transpose_b]
|
238
238
|
|
239
239
|
# check matrix dimensions
|
240
|
-
raise "incompatible shape sizes for matrix multiplication (#{matrix_a[0].size} != #{matrix_b.size}) #{shape_eval(matrix_a)} vs #{shape_eval(matrix_b)}" if matrix_a[0].size != matrix_b.size
|
240
|
+
raise TensorStream::ValueError, "incompatible shape sizes for matrix multiplication (#{matrix_a[0].size} != #{matrix_b.size}) #{shape_eval(matrix_a)} vs #{shape_eval(matrix_b)}" if matrix_a[0].size != matrix_b.size
|
241
241
|
|
242
242
|
(Matrix[*matrix_a] * Matrix[*matrix_b]).to_a
|
243
243
|
end
|
@@ -40,7 +40,6 @@ module TensorStream
|
|
40
40
|
register_op :apply_adagrad do |_context, tensor, inputs|
|
41
41
|
target_var, accum, lr, grad = inputs
|
42
42
|
assign = tensor.inputs[0] || tensor
|
43
|
-
|
44
43
|
assign.value = multi_array_op(->(v, a, g) { v - (g * lr * (1.0 / Math.sqrt(a))) }, target_var, accum, grad)
|
45
44
|
assign.value
|
46
45
|
end
|
@@ -250,8 +250,21 @@ module TensorStream
|
|
250
250
|
softmax(inputs[0])
|
251
251
|
end
|
252
252
|
|
253
|
-
register_op :
|
254
|
-
|
253
|
+
register_op :save_ts do |_context, tensor, inputs|
|
254
|
+
outputfile = inputs[0]
|
255
|
+
inputs = tensor.inputs.dup
|
256
|
+
|
257
|
+
basename = File.basename(outputfile)
|
258
|
+
path = File.dirname(outputfile)
|
259
|
+
|
260
|
+
new_filename = File.join(path, [basename, gs].compact.join('-'))
|
261
|
+
|
262
|
+
inputs.shift
|
263
|
+
variables = {}
|
264
|
+
inputs.each do |savable|
|
265
|
+
variables[savable.name] = TensorStream::Packer.pack(savable.read_value, savable.data_type)
|
266
|
+
end
|
267
|
+
File.write(new_filename, variables.to_yaml)
|
255
268
|
end
|
256
269
|
|
257
270
|
register_op :restore_v2 do |context, tensor, inputs|
|
@@ -270,7 +283,14 @@ module TensorStream
|
|
270
283
|
def eval_operation(tensor, child_context)
|
271
284
|
return @context[tensor.name] if @context.key?(tensor.name)
|
272
285
|
invoke(tensor, child_context).tap do |result|
|
273
|
-
|
286
|
+
|
287
|
+
# assertions to make sure inferred shapes == actual evaluated shapes
|
288
|
+
if tensor.shape.known? && (result.is_a?(Array) || result.is_a?(Float) || result.is_a?(Integer))
|
289
|
+
if shape_eval(result) != tensor.shape.shape
|
290
|
+
raise "assert error #{tensor.name} #{shape_eval(result)} != #{tensor.shape.shape}"
|
291
|
+
end
|
292
|
+
end
|
293
|
+
|
274
294
|
if tensor.breakpoint
|
275
295
|
a = resolve_placeholder(tensor.inputs[0], child_context) if tensor.inputs && tensor.inputs[0]
|
276
296
|
b = resolve_placeholder(tensor.inputs[1], child_context) if tensor.inputs && tensor.inputs[1]
|
@@ -0,0 +1,60 @@
|
|
1
|
+
require 'base64'
|
2
|
+
|
3
|
+
module TensorStream
|
4
|
+
# Utility class to handle data type serialization
|
5
|
+
class Packer
|
6
|
+
def self.pack(value, data_type)
|
7
|
+
value = value.is_a?(Array) ? value.flatten : [value]
|
8
|
+
byte_value = case data_type
|
9
|
+
when :float64
|
10
|
+
value.pack('d*')
|
11
|
+
when :float32, :float16, :float
|
12
|
+
value.pack('f*')
|
13
|
+
when :uint32
|
14
|
+
value.pack('L*')
|
15
|
+
when :int32, :int
|
16
|
+
value.pack('l*')
|
17
|
+
when :int64
|
18
|
+
value.pack('q*')
|
19
|
+
when :uint64
|
20
|
+
value.pack('Q*')
|
21
|
+
when :uint8
|
22
|
+
value.pack('C*')
|
23
|
+
when :boolean
|
24
|
+
value.map { |v| v ? 1 : 0 }.pack('C*')
|
25
|
+
end
|
26
|
+
|
27
|
+
byte_value
|
28
|
+
end
|
29
|
+
|
30
|
+
def self.pack_to_str(value, data_type)
|
31
|
+
pack(value, data_type).bytes.map { |b| b.chr =~ /[^[:print:]]/ ? "\\#{sprintf("%o", b).rjust(3, '0')}" : b.chr }.join
|
32
|
+
end
|
33
|
+
|
34
|
+
def self.unpack_from_str(content, data_type)
|
35
|
+
unpacked = eval(%("#{content}"), __FILE__)
|
36
|
+
unpack(unpacked, data_type)
|
37
|
+
end
|
38
|
+
|
39
|
+
def self.unpack(unpacked, data_type)
|
40
|
+
case data_type
|
41
|
+
when :float32, :float, :float16
|
42
|
+
unpacked.unpack('f*')
|
43
|
+
when :float64
|
44
|
+
unpacked.unpack('d*')
|
45
|
+
when :int32, :int
|
46
|
+
unpacked.unpack('L*')
|
47
|
+
when :uint32
|
48
|
+
unpacked.unpack('l*')
|
49
|
+
when :int64
|
50
|
+
unpacked.unpack('q*')
|
51
|
+
when :uint64
|
52
|
+
unpacked.unpack('Q*')
|
53
|
+
when :uint8
|
54
|
+
unpacked.unpack('C*')
|
55
|
+
when :boolean
|
56
|
+
unpacked.unpack('C*').map { |v| v == 1 }
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
@@ -1,13 +1,9 @@
|
|
1
1
|
module TensorStream
|
2
2
|
class Serializer
|
3
|
-
def initialize
|
4
|
-
end
|
5
|
-
|
6
3
|
def serialize(filename, tensor, session = nil)
|
7
4
|
File.write(filename, get_string(tensor, session))
|
8
5
|
end
|
9
6
|
|
10
|
-
def get_string(tensor, session = nil)
|
11
|
-
end
|
7
|
+
def get_string(tensor, session = nil); end
|
12
8
|
end
|
13
9
|
end
|
@@ -267,7 +267,7 @@ module TensorStream
|
|
267
267
|
[_broadcast_mul(grad, output[1]), -ts.nn.log_softmax(logits)]
|
268
268
|
when :sparse_softmax_cross_entropy_with_logits
|
269
269
|
output = node
|
270
|
-
|
270
|
+
[_broadcast_mul(grad, output[1]), nil]
|
271
271
|
when :floor, :ceil
|
272
272
|
# non differentiable
|
273
273
|
nil
|
@@ -280,16 +280,16 @@ module TensorStream
|
|
280
280
|
when :transpose
|
281
281
|
return [ts.transpose(grad, ts.invert_permutation(y)), nil]
|
282
282
|
when :index
|
283
|
-
#hack!! not sure how to fix this yet
|
283
|
+
# hack!! not sure how to fix this yet
|
284
284
|
return grad if %i[softmax_cross_entropy_with_logits_v2 sparse_softmax_cross_entropy_with_logits].include?(node.inputs[0].operation)
|
285
285
|
|
286
286
|
if node.inputs[0].shape.known? && node.inputs[1].value
|
287
287
|
multiplier = node.inputs[0].shape.shape[0]
|
288
288
|
filler = ts.zeros_like(grad)
|
289
289
|
|
290
|
-
res = Array.new(multiplier)
|
290
|
+
res = Array.new(multiplier) do |index|
|
291
291
|
index == node.inputs[1].value ? grad : filler
|
292
|
-
|
292
|
+
end
|
293
293
|
[res]
|
294
294
|
end
|
295
295
|
when :squeeze
|
@@ -312,9 +312,9 @@ module TensorStream
|
|
312
312
|
|
313
313
|
if t.key?(src_type) && t.key?(dst_type)
|
314
314
|
ts.cast(grad, src_type)
|
315
|
-
else
|
316
|
-
nil
|
317
315
|
end
|
316
|
+
|
317
|
+
nil
|
318
318
|
else
|
319
319
|
raise "no derivative op for #{node.operation}"
|
320
320
|
end
|
@@ -221,23 +221,44 @@ module TensorStream
|
|
221
221
|
|
222
222
|
def infer_shape
|
223
223
|
case operation
|
224
|
+
when :assign
|
225
|
+
possible_shape = if inputs[0] && inputs[0].shape.shape
|
226
|
+
inputs[0].shape.shape
|
227
|
+
else
|
228
|
+
inputs[1].shape.shape
|
229
|
+
end
|
230
|
+
|
231
|
+
possible_shape
|
224
232
|
when :index
|
225
|
-
|
226
|
-
return nil
|
227
|
-
|
233
|
+
return nil unless inputs[0].is_a?(Tensor)
|
234
|
+
return nil unless inputs[0].const_value
|
235
|
+
|
236
|
+
input_shape = inputs[0].shape
|
237
|
+
return nil unless input_shape.known?
|
238
|
+
|
239
|
+
s = input_shape.shape.dup
|
240
|
+
s.shift
|
241
|
+
s
|
228
242
|
when :mean, :prod, :sum
|
229
243
|
return [] if inputs[1].nil?
|
230
244
|
return nil if inputs[0].nil?
|
231
|
-
|
232
|
-
return nil if input_shape.nil?
|
233
|
-
return nil if inputs[1].is_a?(Tensor) && inputs[1].value.nil?
|
245
|
+
return nil unless inputs[0].shape.known?
|
234
246
|
|
235
|
-
|
247
|
+
input_shape = inputs[0].shape.shape
|
248
|
+
rank = input_shape.size
|
249
|
+
|
250
|
+
axis = inputs[1].const_value
|
251
|
+
return nil if axis.nil?
|
236
252
|
|
237
253
|
axis = [axis] unless axis.is_a?(Array)
|
238
|
-
|
239
|
-
|
240
|
-
|
254
|
+
axis = axis.map { |a| a < 0 ? rank - a.abs : a }
|
255
|
+
|
256
|
+
input_shape.each_with_index.map do |item, index|
|
257
|
+
if axis.include?(index)
|
258
|
+
next 1 if options[:keepdims]
|
259
|
+
next nil
|
260
|
+
end
|
261
|
+
item
|
241
262
|
end.compact
|
242
263
|
when :reshape
|
243
264
|
new_shape = inputs[1] && inputs[1].value ? inputs[1].value : nil
|
@@ -250,16 +271,36 @@ module TensorStream
|
|
250
271
|
TensorShape.fix_inferred_elements(new_shape, input_shape.reduce(:*))
|
251
272
|
when :flow_group
|
252
273
|
[]
|
253
|
-
when :zeros, :ones, :fill
|
254
|
-
inputs[0] ? inputs[0].
|
274
|
+
when :zeros, :ones, :fill, :random_standard_normal, :random_uniform
|
275
|
+
a_shape = inputs[0] ? inputs[0].const_value : options[:shape]
|
276
|
+
return nil if a_shape.nil?
|
277
|
+
a_shape.is_a?(Array) ? a_shape : [a_shape]
|
255
278
|
when :zeros_like, :ones_like
|
256
279
|
inputs[0].shape.shape
|
257
280
|
when :shape
|
258
281
|
inputs[0].shape.shape ? [inputs[0].shape.shape.size] : nil
|
259
282
|
when :mat_mul
|
260
|
-
|
261
|
-
|
262
|
-
[
|
283
|
+
return nil if inputs[0].shape.shape.nil? || inputs[1].shape.shape.nil?
|
284
|
+
return [] if inputs[0].shape.shape.empty? || inputs[1].shape.shape.empty?
|
285
|
+
return nil if inputs[0].shape.shape.size != 2 || inputs[1].shape.shape.size != 2
|
286
|
+
|
287
|
+
shape1, m = if options[:transpose_a]
|
288
|
+
[inputs[0].shape.shape[0], inputs[0].shape.shape[1]]
|
289
|
+
else
|
290
|
+
[inputs[0].shape.shape[1], inputs[0].shape.shape[0]]
|
291
|
+
end
|
292
|
+
|
293
|
+
shape2, n = if options[:transpose_b]
|
294
|
+
[inputs[1].shape.shape[1], inputs[1].shape.shape[0]]
|
295
|
+
else
|
296
|
+
[inputs[1].shape.shape[0], inputs[1].shape.shape[1]]
|
297
|
+
end
|
298
|
+
|
299
|
+
return nil if shape1.nil? || shape2.nil? || shape1 < 0 || shape2 < 0
|
300
|
+
|
301
|
+
raise TensorStream::ValueError, "incompatible shape sizes for matrix multiplication (#{shape1} != #{shape2}) #{inputs[0].shape.shape} vs #{inputs[1].shape.shape}" if shape1 != shape2
|
302
|
+
|
303
|
+
[m, n]
|
263
304
|
when :transpose
|
264
305
|
return nil unless shape_full_specified(inputs[0])
|
265
306
|
return nil if inputs[1].is_a?(Tensor)
|
@@ -298,10 +339,35 @@ module TensorStream
|
|
298
339
|
nil
|
299
340
|
when :tile
|
300
341
|
nil
|
342
|
+
when :expand_dims
|
343
|
+
nil
|
344
|
+
when :broadcast_gradient_args
|
345
|
+
nil
|
346
|
+
when :no_op
|
347
|
+
nil
|
348
|
+
when :softmax_cross_entropy_with_logits_v2, :sparse_softmax_cross_entropy_with_logits
|
349
|
+
nil
|
350
|
+
when :decode_png, :flow_dynamic_stitch, :dynamic_stitch, :gather
|
351
|
+
nil
|
352
|
+
when :eye
|
353
|
+
return [inputs[0].const_value, inputs[1].const_value] if inputs[0].const_value && inputs[1].const_value
|
354
|
+
|
355
|
+
nil
|
356
|
+
when :size
|
357
|
+
[]
|
358
|
+
when :unstack
|
359
|
+
return nil unless inputs[0].shape.known?
|
360
|
+
|
361
|
+
new_shape = inputs[0].shape.shape.dup
|
362
|
+
rank = new_shape.size - 1
|
363
|
+
axis = options[:axis] || 0
|
364
|
+
axis = rank + axis if axis < 0
|
365
|
+
rotated_shape = Array.new(axis + 1) { new_shape.shift }
|
366
|
+
rotated_shape.rotate!(-1) + new_shape
|
301
367
|
else
|
302
368
|
return nil if inputs[0].nil?
|
303
369
|
return inputs[0].shape.shape if inputs.size == 1
|
304
|
-
TensorShape.infer_shape(inputs[0].shape.shape, inputs[1].shape.shape) if inputs.size == 2 && inputs[0] && inputs[1]
|
370
|
+
TensorShape.infer_shape(inputs[0].shape.shape, inputs[1].shape.shape) if inputs.size == 2 && inputs[0] && inputs[1] && inputs[0].shape.known? && inputs[1].shape.known?
|
305
371
|
end
|
306
372
|
end
|
307
373
|
|
data/lib/tensor_stream/ops.rb
CHANGED
@@ -6,8 +6,8 @@ module TensorStream
|
|
6
6
|
@op = op
|
7
7
|
end
|
8
8
|
end
|
9
|
-
FLOATING_POINT_TYPES = %i[float32 float64 float].freeze
|
10
|
-
INTEGER_TYPES = %i[uint8 int32 int int64].freeze
|
9
|
+
FLOATING_POINT_TYPES = %i[float32 float64 float float16].freeze
|
10
|
+
INTEGER_TYPES = %i[uint8 int32 int int16 uint16 int64 uint32 uint64].freeze
|
11
11
|
NUMERIC_TYPES = FLOATING_POINT_TYPES + INTEGER_TYPES
|
12
12
|
|
13
13
|
##
|
@@ -99,7 +99,7 @@ module TensorStream
|
|
99
99
|
##
|
100
100
|
# This operation returns a 1-D integer tensor representing the shape of input
|
101
101
|
def shape(input, name: nil, out_type: :int32)
|
102
|
-
return constant(shape_eval(input, out_type), dtype: out_type, name: name) if input.is_a?(Array) && !input[0].is_a?(Tensor)
|
102
|
+
return constant(shape_eval(input, out_type), dtype: out_type, name: "Shape/#{name}") if input.is_a?(Array) && !input[0].is_a?(Tensor)
|
103
103
|
return constant(input.shape.shape, dtype: out_type, name: "Shape/#{input.name}") if shape_full_specified(input)
|
104
104
|
|
105
105
|
_op(:shape, input, name: name, out_type: out_type)
|
@@ -144,7 +144,7 @@ module TensorStream
|
|
144
144
|
end
|
145
145
|
|
146
146
|
def constant_initializer(value, dtype: nil, verify_shape: false)
|
147
|
-
TensorStream::Initializer.new(-> { convert_to_tensor(value, dtype: dtype) })
|
147
|
+
TensorStream::Initializer.new(-> { _op(:fill, nil, convert_to_tensor(value, dtype: dtype)) })
|
148
148
|
end
|
149
149
|
|
150
150
|
##
|
@@ -303,17 +303,17 @@ module TensorStream
|
|
303
303
|
|
304
304
|
pieces = if value.shape.known? && num_or_size_splits.is_const && num_or_size_splits.value && axis.is_const
|
305
305
|
if num_or_size_splits.shape.scalar?
|
306
|
-
raise TensorStream::ValueError, "num_or_size_splits must divide dimension #{value.shape.shape[axis.value]} evenly" unless value.shape.shape[axis.value] % num_or_size_splits.value
|
306
|
+
raise TensorStream::ValueError, "num_or_size_splits must divide dimension #{value.shape.shape[axis.value]} evenly" unless (value.shape.shape[axis.value] % num_or_size_splits.value).zero?
|
307
307
|
div = num_or_size_splits.value
|
308
308
|
n = value.shape.shape[axis.value] / div
|
309
309
|
|
310
|
-
Array.new(div)
|
310
|
+
Array.new(div) do |i|
|
311
311
|
new_shape = value.shape.shape.dup
|
312
312
|
new_shape[axis.value] = n
|
313
313
|
new_shape
|
314
|
-
|
314
|
+
end
|
315
315
|
elsif num_or_size_splits.shape.ndims == 1
|
316
|
-
raise TensorStream::ValueError, "Sum of splits do not match total dimen in axis #{value.shape.shape[axis.value]} != #{
|
316
|
+
raise TensorStream::ValueError, "Sum of splits do not match total dimen in axis #{value.shape.shape[axis.value]} != #{num_or_size_splits.value.reduce(:+)}" if value.shape.shape[axis.value] != num_or_size_splits.value.reduce(:+)
|
317
317
|
num_or_size_splits.value.collect do |v|
|
318
318
|
new_shape = value.shape.shape.dup
|
319
319
|
new_shape[axis.value] = v
|
data/lib/tensor_stream/tensor.rb
CHANGED
@@ -33,6 +33,7 @@ module TensorStream
|
|
33
33
|
else
|
34
34
|
@value = Tensor.cast_dtype(options[:value], @data_type)
|
35
35
|
end
|
36
|
+
@shape = TensorShape.new(shape_eval(@value))
|
36
37
|
end
|
37
38
|
|
38
39
|
@graph.add_node(self)
|
@@ -170,6 +171,12 @@ module TensorStream
|
|
170
171
|
@name
|
171
172
|
end
|
172
173
|
|
174
|
+
def const_value
|
175
|
+
return nil unless is_const
|
176
|
+
|
177
|
+
@value
|
178
|
+
end
|
179
|
+
|
173
180
|
def op
|
174
181
|
@op ||= is_const ? _op(:const, self, nil, name: name) : _op(:variable, self, nil, name: name)
|
175
182
|
end
|
@@ -249,7 +256,7 @@ module TensorStream
|
|
249
256
|
dtype = dtype[:dtype] if dtype.is_a?(Hash)
|
250
257
|
|
251
258
|
case dtype.to_sym
|
252
|
-
when :float64, :float32, :float
|
259
|
+
when :float64, :float32, :float16, :float
|
253
260
|
if !!val == val
|
254
261
|
val ? 1.0 : 0.0
|
255
262
|
else
|
@@ -257,7 +264,7 @@ module TensorStream
|
|
257
264
|
end
|
258
265
|
when :string
|
259
266
|
val.to_s
|
260
|
-
when :int32, :int16, :uint8, :int
|
267
|
+
when :uint32, :int32, :uint64, :uint16, :int16, :int64, :uint8, :int
|
261
268
|
if !!val == val
|
262
269
|
val ? 1 : 0
|
263
270
|
else
|
@@ -18,30 +18,39 @@ module TensorStream
|
|
18
18
|
graph = {}
|
19
19
|
gs = eval_global_step(session, global_step)
|
20
20
|
output_dump = {
|
21
|
-
variables
|
22
|
-
|
23
|
-
global_step: gs
|
21
|
+
'variables' => variables,
|
22
|
+
'global_step' => gs
|
24
23
|
}
|
25
24
|
|
26
25
|
vars.each do |variable|
|
27
|
-
|
26
|
+
val = variable.read_value
|
27
|
+
packed_data = TensorStream::Packer.pack(val, variable.data_type)
|
28
|
+
variables[variable.name] = {
|
29
|
+
'shape' => shape_eval(val),
|
30
|
+
'data' => Base64.strict_encode64(packed_data)
|
31
|
+
}
|
28
32
|
end
|
29
33
|
|
30
34
|
basename = File.basename(outputfile)
|
31
35
|
path = File.dirname(outputfile)
|
32
36
|
|
33
37
|
new_filename = File.join(path, [basename, gs].compact.join('-'))
|
34
|
-
File.write(new_filename, output_dump.
|
38
|
+
File.write(new_filename, output_dump.to_yaml)
|
35
39
|
|
36
40
|
path
|
37
41
|
end
|
38
42
|
|
39
43
|
def restore(_session, inputfile)
|
40
|
-
input_dump =
|
44
|
+
input_dump = YAML.load(File.read(inputfile))
|
41
45
|
|
42
46
|
vars = TensorStream::Graph.get_default_graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
|
43
47
|
vars.each do |variable|
|
44
|
-
|
48
|
+
next unless input_dump['variables'].key?(variable.name)
|
49
|
+
|
50
|
+
data = TensorStream::Packer.unpack(Base64.decode64(input_dump['variables'][variable.name]['data']), variable.data_type)
|
51
|
+
shape = input_dump['variables'][variable.name]['shape']
|
52
|
+
variable.buffer = nil
|
53
|
+
variable.value = TensorShape.reshape(data, shape)
|
45
54
|
end
|
46
55
|
end
|
47
56
|
|
@@ -70,7 +79,7 @@ module TensorStream
|
|
70
79
|
end
|
71
80
|
|
72
81
|
def _add_saveable(saveables, seen_ops, saveable)
|
73
|
-
raise
|
82
|
+
raise TensorStream::ValueError, "The same saveable will be restored with two names: #{saveable.name}" if seen_ops.include?(saveable.op)
|
74
83
|
saveables << saveable
|
75
84
|
seen_ops << saveable.op
|
76
85
|
end
|
@@ -87,7 +96,7 @@ module TensorStream
|
|
87
96
|
tensor_slices << spec.slice_spec
|
88
97
|
end
|
89
98
|
end
|
90
|
-
i_op(:
|
99
|
+
i_op(:save_ts, filename_tensor, *tensors)
|
91
100
|
end
|
92
101
|
|
93
102
|
def eval_global_step(session, global_step)
|
data/lib/tensor_stream/types.rb
CHANGED
@@ -7,6 +7,14 @@ module TensorStream
|
|
7
7
|
:int16
|
8
8
|
end
|
9
9
|
|
10
|
+
def self.uint16
|
11
|
+
:uint16
|
12
|
+
end
|
13
|
+
|
14
|
+
def self.float16
|
15
|
+
:float16
|
16
|
+
end
|
17
|
+
|
10
18
|
def self.float32
|
11
19
|
:float32
|
12
20
|
end
|
@@ -15,6 +23,10 @@ module TensorStream
|
|
15
23
|
:int32
|
16
24
|
end
|
17
25
|
|
26
|
+
def self.uint32
|
27
|
+
:uint32
|
28
|
+
end
|
29
|
+
|
18
30
|
def self.uint8
|
19
31
|
:uint8
|
20
32
|
end
|
data/lib/tensor_stream.rb
CHANGED
@@ -22,6 +22,7 @@ require 'tensor_stream/control_flow'
|
|
22
22
|
require 'tensor_stream/dynamic_stitch'
|
23
23
|
require 'tensor_stream/nn/nn_ops'
|
24
24
|
require 'tensor_stream/evaluator/evaluator'
|
25
|
+
require 'tensor_stream/graph_serializers/packer'
|
25
26
|
require 'tensor_stream/graph_serializers/serializer'
|
26
27
|
require 'tensor_stream/graph_deserializers/protobuf'
|
27
28
|
require 'tensor_stream/graph_serializers/pbtext'
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: tensor_stream
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.9.
|
4
|
+
version: 0.9.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Joseph Emmanuel Dayo
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-10-
|
11
|
+
date: 2018-10-19 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -272,6 +272,7 @@ files:
|
|
272
272
|
- lib/tensor_stream/graph_deserializers/protobuf.rb
|
273
273
|
- lib/tensor_stream/graph_keys.rb
|
274
274
|
- lib/tensor_stream/graph_serializers/graphml.rb
|
275
|
+
- lib/tensor_stream/graph_serializers/packer.rb
|
275
276
|
- lib/tensor_stream/graph_serializers/pbtext.rb
|
276
277
|
- lib/tensor_stream/graph_serializers/serializer.rb
|
277
278
|
- lib/tensor_stream/helpers/op_helper.rb
|
@@ -335,7 +336,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
335
336
|
version: '0'
|
336
337
|
requirements: []
|
337
338
|
rubyforge_project:
|
338
|
-
rubygems_version: 2.
|
339
|
+
rubygems_version: 2.6.10
|
339
340
|
signing_key:
|
340
341
|
specification_version: 4
|
341
342
|
summary: A Pure ruby tensorflow implementation
|