tensor_stream 0.9.10 → 1.0.0.pre.rc1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e57a53a50a03509858b23387c5b82c114168c519aba871f04b86ddda9d5ed58d
4
- data.tar.gz: 7a1e662d373c57caa997e7c1d7031e7757576c93eddda016a3cc714608f88ef3
3
+ metadata.gz: db2ea87e941f738407781b7f63b7cf120d62a87bc1c950f6f272942fa5eed6a2
4
+ data.tar.gz: b81949fb1e4ed9bca63d9a7ef07f21ce634e4939b032f299cb96afb3016ad6bb
5
5
  SHA512:
6
- metadata.gz: c284011b3fc7ff7bafeb82a994bffbe4963d0300fb771b5f16fa3bedbc5afeef1c4f44f55e8f9801f9b6410866b5b93fcddabdc5ce797931c599be57f6dda539
7
- data.tar.gz: 8a83b49f4d7bde1612466a1db8348114a899058b5dc744041478649972005b59ce773b84714d0023761615b04f1966ae37afd0f1cc25ab3d4a9686be4c73ab6f
6
+ metadata.gz: fb5f081aac256b4d17222850727bbdd55ce327fd20f384d960c823abc0b7c6aa8f09888136df71e530af7bac835c57383cbe5cef88a959c55b02afb91bc21b13
7
+ data.tar.gz: 9dd9111eca29f8f3d3f99b94f92a2b8ffbdb2cad686cc9acc9eeda737b0e88483667072191dc77958b92948dd570015720ee2e410f6fdeb7afcfce90b1b561f5
data/README.md CHANGED
@@ -365,34 +365,45 @@ File.write("model.pbtext", result.graph.as_graph_def)
365
365
  ## Performance notes
366
366
 
367
367
  Comparative performance with respect to other ruby libraries have not yet been performed. However it is
368
- notable that TruffleRuby and ruby-2.6.0-preview2 with the --jit flag performs considerably better with respect
368
+ notable that TruffleRuby and ruby-2.6.0 performs considerably better with respect
369
369
  to previous versions of ruby(< 2.6)
370
370
 
371
- Benchmarks running samples/linear_regression.rb on an Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
371
+ Benchmarks running samples/linear_regression.rb with tensor_stream 1.0.0 on an AMD(R) Ryzen(TM) 3 1300X CPU
372
372
 
373
- ruby 2.4
373
+
374
+ ruby 2.5
374
375
 
375
376
  ```
376
377
  $ ruby -v
377
- ruby 2.4.0p0 (2016-12-24 revision 57164) [x86_64-linux]
378
+ ruby 2.5.1p57 (2018-03-29 revision 63029) [x86_64-linux]
378
379
  $ ruby samples/linear_regression.rb
379
- 495 seconds 10000 epochs
380
+ 296 seconds 3000 epochs
380
381
  ```
381
382
 
382
- ruby 2.6.0-preview2
383
+ ruby 2.6.0
383
384
 
384
385
  ```
385
386
  $ ruby -v
386
- ruby 2.6.0preview2 (2018-05-31 trunk 63539) [x86_64-linux]
387
- $ ruby --jit samples/linear_regression.rb
388
- 394 seconds 10000 epochs
387
+ ruby 2.6.0p0 (2018-12-25 revision 66547) [x86_64-linux]
388
+ $ ruby samples/linear_regression.rb
389
+ 232 seconds 10000 epochs
390
+
391
+ ruby --jit samples/linear_regression.rb
392
+ 222 seconds 10000 epochs
389
393
  ```
390
394
 
391
395
  truffleruby
392
396
  ```
393
397
  $ ruby -v
394
- truffleruby 1.0.0-rc5, like ruby 2.4.4, GraalVM CE Native [x86_64-linux]
395
- 219 seconds 10000 epochs
398
+ truffleruby 1.0.0-rc10, like ruby 2.4.4, GraalVM CE Native [x86_64-linux]
399
+ 246 seconds 10000 epochs
400
+ ```
401
+
402
+ jruby
403
+ ```
404
+ $ ruby -v
405
+ jruby 9.2.0.0 (2.5.0) 2018-05-24 81156a8 OpenJDK 64-Bit Server VM 25.191-b12 on 1.8.0_191-8u191-b12-0ubuntu0.18.04.1-b12 +jit [linux-x86_64]
406
+ 205 seconds 10000 epochs
396
407
  ```
397
408
 
398
409
  For training large networks that works on images, the opencl evaluator is the only way to go.
data/exe/model_utils ADDED
@@ -0,0 +1,24 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require "bundler/setup"
4
+ require "tensor_stream"
5
+ require 'tensor_stream/utils/freezer'
6
+
7
+ if ARGV[0].nil?
8
+ puts "source checkpoint folder not specified"
9
+ puts "usage: model_utils <checkpoint folder> <target yaml>"
10
+ puts "example: model_utils sample_model/ frozen.yml"
11
+ exit(1)
12
+ end
13
+
14
+ if ARGV[1].nil?
15
+ puts "dest YAML file for frozen model not specified"
16
+ puts "usage: model_utils <checkpoint folder> <target yaml>"
17
+ puts "example: model_utils sample_model/ frozen.yml"
18
+ exit(1)
19
+ end
20
+
21
+ sess = TensorStream.session
22
+ freezer = TensorStream::Freezer.new
23
+ freezer.convert(sess, ARGV[0], ARGV[1])
24
+ exit(0)
@@ -100,18 +100,18 @@ module TensorStream
100
100
  end
101
101
 
102
102
  # handle 2 tensor math operations
103
- def vector_op(vector, vector2, op = ->(a, b) { a + b }, switch = false, safe = true)
103
+ def vector_op(vector, vector2, switch = false, safe = true, &block)
104
104
  if get_rank(vector) < get_rank(vector2) # upgrade rank of A
105
105
  duplicated = Array.new(vector2.size) do
106
106
  vector
107
107
  end
108
- return vector_op(duplicated, vector2, op, switch)
108
+ return vector_op(duplicated, vector2, switch, &block)
109
109
  end
110
110
 
111
- return op.call(vector, vector2) unless vector.is_a?(Array)
111
+ return yield(vector, vector2) unless vector.is_a?(Array)
112
112
 
113
113
  vector.each_with_index.collect do |input, index|
114
- next vector_op(input, vector2, op, switch) if input.is_a?(Array) && get_rank(vector) > get_rank(vector2)
114
+ next vector_op(input, vector2, switch, &block) if input.is_a?(Array) && get_rank(vector) > get_rank(vector2)
115
115
 
116
116
  if safe && vector2.is_a?(Array)
117
117
  next nil if vector2.size != 1 && index >= vector2.size
@@ -129,9 +129,9 @@ module TensorStream
129
129
  end
130
130
 
131
131
  if input.is_a?(Array)
132
- vector_op(input, z, op, switch)
132
+ vector_op(input, z, switch, &block)
133
133
  else
134
- switch ? op.call(z, input) : op.call(input, z)
134
+ switch ? yield(z, input) : yield(input, z)
135
135
  end
136
136
  end.compact
137
137
  end
@@ -165,12 +165,12 @@ module TensorStream
165
165
  end
166
166
  end
167
167
 
168
- def process_function_op(a, op)
168
+ def process_function_op(a, &block)
169
169
  # ruby scalar
170
170
  if (a.is_a?(Tensor) && a.shape.rank > 0) || a.is_a?(Array)
171
- vector_op(a, 0, op)
171
+ vector_op(a, 0, &block)
172
172
  else
173
- op.call(a, 0)
173
+ yield a, 0
174
174
  end
175
175
  end
176
176
 
@@ -264,11 +264,11 @@ module TensorStream
264
264
  [new_arr, new_shape]
265
265
  end
266
266
 
267
- def reduce_axis(current_axis, axis, val, keep_dims, f)
267
+ def reduce_axis(current_axis, axis, val, keep_dims, &block)
268
268
  return val unless val.is_a?(Array)
269
269
 
270
270
  r = val.collect do |v|
271
- reduce_axis(current_axis + 1, axis, v, keep_dims, f)
271
+ reduce_axis(current_axis + 1, axis, v, keep_dims, &block)
272
272
  end
273
273
 
274
274
  should_reduce_axis = axis.nil? || (axis.is_a?(Array) && axis.include?(current_axis)) || (current_axis == axis)
@@ -276,9 +276,13 @@ module TensorStream
276
276
  if should_reduce_axis
277
277
  reduced_val = r[0]
278
278
  if r.size > 1
279
- reduced_val = f.call(r[0..val.size])
279
+ if block_given?
280
+ reduced_val = yield(r[0..val.size])
281
+ else
282
+ reduced_val = r[0..val.size].reduce(:+)
283
+ end
280
284
  elsif r.empty?
281
- reduced_val = f.call(nil)
285
+ reduced_val = yield(nil)
282
286
  end
283
287
  keep_dims ? [reduced_val] : reduced_val
284
288
  else
@@ -286,18 +290,10 @@ module TensorStream
286
290
  end
287
291
  end
288
292
 
289
- def reduce(val, axis, keep_dims, func = nil)
293
+ def reduce(val, axis, keep_dims, &block)
290
294
  rank = get_rank(val)
291
295
  return val if axis && axis.is_a?(Array) && axis.empty?
292
296
 
293
- func = lambda do |arr|
294
- reduced_val = arr[0]
295
- arr[1..arr.size].each do |v|
296
- reduced_val = vector_op(reduced_val, v, ->(t, u) { t + u })
297
- end
298
- reduced_val
299
- end if func.nil?
300
-
301
297
  axis = if axis.nil?
302
298
  nil
303
299
  elsif axis.is_a?(Array)
@@ -308,7 +304,7 @@ module TensorStream
308
304
  axis < 0 ? rank - axis.abs : axis
309
305
  end
310
306
 
311
- reduce_axis(0, axis, val, keep_dims, func)
307
+ reduce_axis(0, axis, val, keep_dims, &block)
312
308
  end
313
309
 
314
310
  def arr_pad(arr, paddings, data_type = :float32, rank = 0)
@@ -335,7 +335,7 @@ module TensorStream
335
335
 
336
336
  register_op %i[select where] do |context, tensor, inputs|
337
337
  pred = inputs[0]
338
- call_3way_vector_op(pred, inputs[1], inputs[2], context, ->(t, u, v) { t ? u : v })
338
+ call_3way_vector_op(pred, inputs[1], inputs[2], context) { |t, u, v| t ? u : v }
339
339
  end
340
340
 
341
341
  register_op :shape do |_context, tensor, inputs|
@@ -3,7 +3,7 @@ module TensorStream
3
3
  def CheckOps.included(klass)
4
4
  klass.class_eval do
5
5
  register_op :assert_equal do |context, tensor, inputs|
6
- result = call_vector_op(tensor, :equal, inputs[0], inputs[1], context, ->(t, u) { t == u })
6
+ result = call_vector_op(tensor, :equal, inputs[0], inputs[1], context) { |t, u| t == u }
7
7
 
8
8
  result = result.is_a?(Array) ? result.flatten.uniq : [result]
9
9
  prefix = tensor.options[:message] || ""
@@ -3,24 +3,24 @@ module TensorStream
3
3
  def MathOps.included(klass)
4
4
  klass.class_eval do
5
5
  register_op :tanh, no_eval: true do |context, _tensor, inputs|
6
- call_op(inputs[0], context, ->(t, _b) { Math.tanh(t) })
6
+ call_op(inputs[0], context) { |t, _b| Math.tanh(t) }
7
7
  end
8
8
 
9
9
  register_op :tan, no_eval: true do |context, tensor, inputs|
10
- call_op(inputs[0], context, ->(t, _b) { Math.tan(t) })
10
+ call_op(inputs[0], context) { |t, _b| Math.tan(t) }
11
11
  end
12
12
 
13
13
  register_op :atan, no_eval: true do |context, _tensor, inputs|
14
- call_op(inputs[0], context, ->(t, _b) { Math.atan(t) })
14
+ call_op(inputs[0], context) { |t, _b| Math.atan(t) }
15
15
  end
16
16
 
17
17
  register_op :sin, no_eval: true do |context, _tensor, inputs|
18
- call_op(inputs[0], context, ->(t, _b) { Math.sin(t) })
18
+ call_op(inputs[0], context) { |t, _b| Math.sin(t) }
19
19
  end
20
20
 
21
21
  register_op :add, no_eval: true do |context, tensor, inputs|
22
22
  a, b = inputs
23
- call_vector_op(tensor, :add, a, b, context, ->(t, u) { t + u })
23
+ call_vector_op(tensor, :add, a, b, context) { |t, u| t + u }
24
24
  end
25
25
 
26
26
  register_op :add_n, no_eval: true do |context, tensor, inputs|
@@ -31,7 +31,7 @@ module TensorStream
31
31
  a = inputs.pop
32
32
  until inputs.empty?
33
33
  b = inputs.pop
34
- a = call_vector_op(tensor, :add, a, b, context, ->(t, u) { t + u })
34
+ a = call_vector_op(tensor, :add, a, b, context) { |t, u| t + u }
35
35
  end
36
36
  a
37
37
  end
@@ -39,100 +39,100 @@ module TensorStream
39
39
 
40
40
  register_op :sub, no_eval: true do |context, tensor, inputs|
41
41
  a, b = inputs
42
- call_vector_op(tensor, :sub, a, b, context, ->(t, u) { t - u })
42
+ call_vector_op(tensor, :sub, a, b, context) { |t, u| t - u }
43
43
  end
44
44
 
45
45
  register_op %i[floor_mod mod], no_eval: true do |context, tensor, inputs|
46
46
  a, b = inputs
47
- call_vector_op(tensor, :mod, a, b, context, ->(t, u) { t % u })
47
+ call_vector_op(tensor, :mod, a, b, context) { |t, u| t % u }
48
48
  end
49
49
 
50
50
  register_op %i[floor_div], no_eval: true do |context, tensor, inputs|
51
51
  a, b = inputs
52
52
  if fp_type?(tensor.data_type)
53
- call_vector_op(tensor, :div, a, b, context, ->(t, u) { (t / u).to_i.to_f })
53
+ call_vector_op(tensor, :div, a, b, context) { |t, u| (t / u).to_i.to_f }
54
54
  else
55
- call_vector_op(tensor, :div, a, b, context, ->(t, u) { t / u })
55
+ call_vector_op(tensor, :div, a, b, context) { |t, u| t / u }
56
56
  end
57
57
  end
58
58
 
59
59
  register_op :mul, no_eval: true do |context, tensor, inputs|
60
60
  a, b = inputs
61
- call_vector_op(tensor, :mul, a, b, context, ->(t, u) { t * u })
61
+ call_vector_op(tensor, :mul, a, b, context) { |t, u| t * u }
62
62
  end
63
63
 
64
64
  register_op :pow, no_eval: true do |context, tensor, inputs|
65
65
  a, b = inputs
66
- call_vector_op(tensor, :pow, a, b, context, ->(t, u) { t**u })
66
+ call_vector_op(tensor, :pow, a, b, context) { |t, u| t**u }
67
67
  end
68
68
 
69
69
  register_op :squared_difference, no_eval: true do |context, tensor, inputs|
70
70
  a, b = inputs
71
- call_vector_op(tensor, :squared_difference, a, b, context, ->(t, u) { (t - u) * (t - u) })
71
+ call_vector_op(tensor, :squared_difference, a, b, context) { |t, u| (t - u) * (t - u) }
72
72
  end
73
73
 
74
74
  register_op :round, no_eval: true do |context, _tensor, inputs|
75
- call_op(inputs[0], context, ->(t, _b) { t.round })
75
+ call_op(inputs[0], context) { |t, _b| t.round }
76
76
  end
77
77
 
78
78
  register_op :abs, no_eval: true do |context, _tensor, inputs|
79
- call_op(inputs[0], context, ->(t, _b) { t.abs })
79
+ call_op(inputs[0], context) { |t, _b| t.abs }
80
80
  end
81
81
 
82
82
  register_op :asin, no_eval: true do |context, _tensor, inputs|
83
- call_op(inputs[0], context, ->(t, _b) { Math.asin(t) })
83
+ call_op(inputs[0], context) { |t, _b| Math.asin(t) }
84
84
  end
85
85
 
86
86
  register_op :acos, no_eval: true do |context, _tensor, inputs|
87
- call_op(inputs[0], context, ->(t, _b) { Math.acos(t) })
87
+ call_op(inputs[0], context) { |t, _b| Math.acos(t) }
88
88
  end
89
89
 
90
90
  register_op :cos, no_eval: true do |context, tensor, inputs|
91
- call_op(inputs[0], context, ->(t, _b) { Math.cos(t) })
91
+ call_op(inputs[0], context) { |t, _b| Math.cos(t) }
92
92
  end
93
93
 
94
94
  register_op :log1p, no_eval: true do |context, _tensor, inputs|
95
- call_op(inputs[0], context, ->(t, _b) { Math.log(1 + t) })
95
+ call_op(inputs[0], context) { |t, _b| Math.log(1 + t) }
96
96
  end
97
97
 
98
98
  register_op :log, no_eval: true do |context, _tensor, inputs|
99
- call_op(inputs[0], context, ->(t, _b) { t < 0 ? Float::NAN : Math.log(t) })
99
+ call_op(inputs[0], context) { |t, _b| t < 0 ? Float::NAN : Math.log(t) }
100
100
  end
101
101
 
102
102
  register_op :exp, no_eval: true do |context, _tensor, inputs|
103
- call_op(inputs[0], context, ->(t, _b) { Math.exp(t) })
103
+ call_op(inputs[0], context) { |t, _b| Math.exp(t) }
104
104
  end
105
105
 
106
106
  register_op :sigmoid, no_eval: true do |context, _tensor, inputs|
107
- call_op(inputs[0], context, ->(t, _b) { sigmoid(t) })
107
+ call_op(inputs[0], context) { |t, _b| sigmoid(t) }
108
108
  end
109
109
 
110
110
  register_op :sqrt, no_eval: true do |context, _tensor, inputs|
111
- call_op(inputs[0], context, ->(t, _b) { Math.sqrt(t) })
111
+ call_op(inputs[0], context) { |t, _b| Math.sqrt(t) }
112
112
  end
113
113
 
114
114
  register_op :floor, no_eval: true do |context, _tensor, inputs|
115
- call_op(inputs[0], context, ->(t, _b) { t.floor })
115
+ call_op(inputs[0], context) { |t, _b| t.floor }
116
116
  end
117
117
 
118
118
  register_op :ceil, no_eval: true do |context, _tensor, inputs|
119
- call_op(inputs[0], context, ->(t, _b) { t.ceil })
119
+ call_op(inputs[0], context) { |t, _b| t.ceil }
120
120
  end
121
121
 
122
122
  register_op :square, no_eval: true do |context, _tensor, inputs|
123
- call_op(inputs[0], context, ->(t, _b) { t * t })
123
+ call_op(inputs[0], context) { |t, _b| t * t }
124
124
  end
125
125
 
126
126
  register_op :reciprocal, no_eval: true do |context, _tensor, inputs|
127
- call_op(inputs[0], context, ->(t, _b) { 1 / t })
127
+ call_op(inputs[0], context) { |t, _b| 1 / t }
128
128
  end
129
129
 
130
130
  register_op %i[neg negate], no_eval: true do |context, tensor, inputs|
131
- call_vector_op(tensor, :negate, inputs[0], nil, context, ->(t, _u) { -t })
131
+ call_vector_op(tensor, :negate, inputs[0], nil, context) { |t, _u| -t }
132
132
  end
133
133
 
134
134
  register_op :tanh_grad, no_eval: true do |context, _tensor, inputs|
135
- call_op(inputs[0], context, ->(t, _b) { 1 - Math.tanh(t) * Math.tanh(t) })
135
+ call_op(inputs[0], context) { |t, _b| 1 - Math.tanh(t) * Math.tanh(t) }
136
136
  end
137
137
 
138
138
  register_op(%i[argmax arg_max]) do |_context, tensor, inputs|
@@ -169,70 +169,70 @@ module TensorStream
169
169
  c = fp_type?(tensor.data_type) ? 1.0 : 1
170
170
  reverse_option = tensor.options[:reverse]
171
171
  exclusive = tensor.options[:exclusive]
172
- func = lambda do |arr|
173
- return c if arr.nil?
174
-
175
- count = arr.size
176
- arr = arr.reverse if reverse_option
177
- arr = [1] + arr if exclusive
178
172
 
179
- start_prod = arr[0]
180
- mapped = arr[1...count].map do |v|
181
- start_prod = vector_op(start_prod, v, ->(a, b) { a * b })
173
+ reduction(context, tensor) do |arr|
174
+ if arr.nil?
175
+ c
176
+ else
177
+ count = arr.size
178
+ arr = arr.reverse if reverse_option
179
+ arr = [1] + arr if exclusive
180
+
181
+ start_prod = arr[0]
182
+ mapped = arr[1...count].map do |v|
183
+ start_prod = vector_op(start_prod, v) { |a, b| a * b }
184
+ end
185
+
186
+ arr = [arr[0]] + mapped
187
+ reverse_option ? arr.reverse : arr
182
188
  end
183
-
184
- arr = [arr[0]] + mapped
185
- reverse_option ? arr.reverse : arr
186
189
  end
187
- reduction(context, tensor, func)
188
190
  end
189
191
 
190
192
  register_op :sum, noop: true do |context, tensor, _inputs|
191
- func = lambda do |arr|
193
+
194
+ reduction(context, tensor) do |arr|
192
195
  reduced_val = arr[0]
193
196
  arr[1..arr.size].each do |v|
194
- reduced_val = vector_op(reduced_val, v, ->(t, u) { t + u })
197
+ reduced_val = vector_op(reduced_val, v) { |t, u| t + u }
195
198
  end
196
199
  reduced_val
197
200
  end
198
- reduction(context, tensor, func)
199
201
  end
200
202
 
201
203
  register_op :prod, noop: true do |context, tensor, _inputs|
202
204
  c = fp_type?(tensor.data_type) ? 1.0 : 1
203
-
204
- func = lambda do |arr|
205
- return c if arr.nil?
206
-
207
- reduced_val = arr[0]
208
- arr[1..arr.size].each do |v|
209
- reduced_val = vector_op(reduced_val, v, ->(a, b) { a * b })
205
+ reduction(context, tensor) do |arr|
206
+ if arr.nil?
207
+ c
208
+ else
209
+ reduced_val = arr[0]
210
+ arr[1..arr.size].each do |v|
211
+ reduced_val = vector_op(reduced_val, v) { |a, b| a * b }
212
+ end
213
+ reduced_val
210
214
  end
211
- reduced_val
212
215
  end
213
-
214
- reduction(context, tensor, func)
215
216
  end
216
217
 
217
218
  register_op :sigmoid_grad, no_eval: true do |context, tensor, inputs|
218
219
  a, b = inputs
219
- call_vector_op(tensor, :sigmoid_grad, a, b, context, ->(t, u) { u * sigmoid(t) * (1 - sigmoid(t)) })
220
+ call_vector_op(tensor, :sigmoid_grad, a, b, context) { |t, u| u * sigmoid(t) * (1 - sigmoid(t)) }
220
221
  end
221
222
 
222
223
  register_op :mean, noop: true do |context, tensor, _inputs|
223
224
  c = fp_type?(tensor.data_type) ? 0.0 : 0
224
- func = lambda do |arr|
225
+
226
+ reduction(context, tensor) do |arr|
225
227
  return c if arr.nil?
226
228
 
227
229
  reduced_val = arr[0]
228
230
  arr[1..arr.size].each do |v|
229
- reduced_val = vector_op(reduced_val, v, ->(a, b) { a + b })
231
+ reduced_val = vector_op(reduced_val, v) { |a, b| a + b }
230
232
  end
231
233
 
232
- vector_op(reduced_val, nil, ->(a, _b) { a / arr.size })
234
+ vector_op(reduced_val, nil) { |a, _b| a / arr.size }
233
235
  end
234
-
235
- reduction(context, tensor, func)
236
236
  end
237
237
 
238
238
  register_op :mat_mul do |_context, tensor, inputs|
@@ -252,19 +252,19 @@ module TensorStream
252
252
  end
253
253
 
254
254
  register_op %i[max maximum], noop: true do |context, tensor, inputs|
255
- call_vector_op(tensor, :max, inputs[0], inputs[1], context, ->(t, u) { [t, u].max })
255
+ call_vector_op(tensor, :max, inputs[0], inputs[1], context) { |t, u| [t, u].max }
256
256
  end
257
257
 
258
258
  register_op %i[min minimum], noop: true do |context, tensor, inputs|
259
- call_vector_op(tensor, :min, inputs[0], inputs[1], context, ->(t, u) { [t, u].min })
259
+ call_vector_op(tensor, :min, inputs[0], inputs[1], context) { |t, u| [t, u].min }
260
260
  end
261
261
 
262
- def reduction(child_context, tensor, func)
262
+ def reduction(child_context, tensor, &block)
263
263
  val = global_eval(tensor, tensor.inputs[0], child_context)
264
264
  axis = global_eval(tensor, tensor.inputs[1], child_context)
265
265
  keep_dims = global_eval(tensor, tensor.options[:keepdims], child_context)
266
266
 
267
- reduce(val, axis, keep_dims, func)
267
+ reduce(val, axis, keep_dims, &block)
268
268
  end
269
269
  end
270
270
  end
@@ -7,7 +7,7 @@ module TensorStream
7
7
  target_var, learning_rate, delta = inputs
8
8
  assign = tensor.inputs[0] || tensor
9
9
 
10
- assign.container = process_vector_math_op(tensor, target_var, delta, context, ->(t, u) { t - u * learning_rate })
10
+ assign.container = process_vector_math_op(tensor, target_var, delta, context) { |t, u| t - u * learning_rate }
11
11
  assign.container
12
12
  end
13
13
 
@@ -111,7 +111,7 @@ module TensorStream
111
111
  end
112
112
  reshaped_losses = TensorShape.reshape(losses.flatten, input_shape)
113
113
  reshaped_backprops = TensorShape.reshape(backprobs.flatten, input_shape)
114
- reshaped_losses = reduce(reshaped_losses, rank, false)
114
+ reshaped_losses = reduce(reshaped_losses, rank, false) { |a| a.reduce(:+) }
115
115
  TensorStream::Evaluator::OutputGroup.new([reshaped_losses, reshaped_backprops], [tensor.inputs[0].data_type, tensor.inputs[0].data_type])
116
116
  end
117
117
  end
@@ -210,7 +210,7 @@ module TensorStream
210
210
  end
211
211
 
212
212
  register_op :relu6 do |context, tensor, inputs|
213
- call_vector_op(tensor, :relu6, inputs[0], inputs[1], context, ->(t, u) { [[t, 0].max, 6].min })
213
+ call_vector_op(tensor, :relu6, inputs[0], inputs[1], context) { |t, u| [[t, 0].max, 6].min }
214
214
  end
215
215
 
216
216
  register_op :conv2d do |_context, tensor, inputs|
@@ -102,11 +102,11 @@ module TensorStream
102
102
  end
103
103
 
104
104
  register_op(:cast) do |context, tensor, inputs|
105
- call_op(inputs[0], context, ->(t, _b) { Tensor.cast_dtype(t, tensor.data_type) })
105
+ call_op(inputs[0], context) { |t, _b| Tensor.cast_dtype(t, tensor.data_type) }
106
106
  end
107
107
 
108
108
  register_op(:sign) do |context, tensor, inputs|
109
- func = lambda { |x, _b|
109
+ call_op(inputs[0], context) do |x, _b|
110
110
  if x.zero? || (x.is_a?(Float) && x.nan?)
111
111
  0
112
112
  elsif x < 0
@@ -116,21 +116,19 @@ module TensorStream
116
116
  else
117
117
  raise 'assert: cannot be here'
118
118
  end
119
- }
120
-
121
- call_op(inputs[0], context, func)
119
+ end
122
120
  end
123
121
 
124
122
  register_op(:logical_and) do |context, tensor, inputs|
125
- call_vector_op(tensor, :logical_and, inputs[0], inputs[1], context, ->(t, u) { t && u })
123
+ call_vector_op(tensor, :logical_and, inputs[0], inputs[1], context) { |t, u| t && u }
126
124
  end
127
125
 
128
126
  register_op(:equal) do |context, tensor, inputs|
129
- call_vector_op(tensor, :equal, inputs[0], inputs[1], context, ->(t, u) { t == u })
127
+ call_vector_op(tensor, :equal, inputs[0], inputs[1], context) { |t, u| t == u }
130
128
  end
131
129
 
132
130
  register_op(:not_equal) do |context, tensor, inputs|
133
- call_vector_op(tensor, :not_equal, inputs[0], inputs[1], context, ->(t, u) { t != u })
131
+ call_vector_op(tensor, :not_equal, inputs[0], inputs[1], context) { |t, u| t != u }
134
132
  end
135
133
 
136
134
  register_op :placeholder, no_eval: true do |context, tensor, _inputs|
@@ -175,35 +173,35 @@ module TensorStream
175
173
  register_op :assign_add, noop: true do |context, tensor, _inputs|
176
174
  assign = tensor.inputs[0] || tensor
177
175
 
178
- assign.container = process_vector_math_op(tensor, tensor.inputs[0], tensor.inputs[1], context, ->(t, u) { t + u })
176
+ assign.container = process_vector_math_op(tensor, tensor.inputs[0], tensor.inputs[1], context) { |t, u| t + u }
179
177
  assign.container
180
178
  end
181
179
 
182
180
  register_op :assign_sub, noop: true do |context, tensor, _inputs|
183
181
  assign = tensor.inputs[0] || tensor
184
182
 
185
- assign.container = process_vector_math_op(tensor, tensor.inputs[0], tensor.inputs[1], context, ->(t, u) { t - u })
183
+ assign.container = process_vector_math_op(tensor, tensor.inputs[0], tensor.inputs[1], context) { |t, u| t - u }
186
184
  assign.container
187
185
  end
188
186
 
189
187
  register_op :less do |context, tensor, inputs|
190
188
  a, b = inputs
191
- call_vector_op(tensor, :less, a, b, context, ->(t, u) { t < u })
189
+ call_vector_op(tensor, :less, a, b, context) { |t, u| t < u }
192
190
  end
193
191
 
194
192
  register_op :greater do |context, tensor, inputs|
195
193
  a, b = inputs
196
- call_vector_op(tensor, :greater, a, b, context, ->(t, u) { t > u })
194
+ call_vector_op(tensor, :greater, a, b, context) { |t, u| t > u }
197
195
  end
198
196
 
199
197
  register_op :greater_equal do |context, tensor, inputs|
200
198
  a, b = inputs
201
- call_vector_op(tensor, :greater_equal, a, b, context, ->(t, u) { t >= u })
199
+ call_vector_op(tensor, :greater_equal, a, b, context) { |t, u| t >= u }
202
200
  end
203
201
 
204
202
  register_op :less_equal do |context, tensor, inputs|
205
203
  a, b = inputs
206
- call_vector_op(tensor, :greater_equal, a, b, context, ->(t, u) { t <= u })
204
+ call_vector_op(tensor, :greater_equal, a, b, context) { |t, u| t <= u }
207
205
  end
208
206
 
209
207
  register_op :broadcast_transform do |_context, _tensor, inputs|
@@ -220,7 +218,7 @@ module TensorStream
220
218
  end
221
219
 
222
220
  register_op %i[div real_div], noop: true do |context, tensor, inputs|
223
- process_vector_math_op(tensor, inputs[0], inputs[1], context, ->(t, u) { t / u })
221
+ process_vector_math_op(tensor, inputs[0], inputs[1], context) { |t, u| t / u }
224
222
  end
225
223
 
226
224
  register_op :broadcast_gradient_args do |_context, tensor, inputs|
@@ -241,30 +239,46 @@ module TensorStream
241
239
  outputfile = inputs[0]
242
240
  inputs = tensor.inputs.dup
243
241
 
244
- basename = File.basename(outputfile)
245
- path = File.dirname(outputfile)
246
-
247
- new_filename = File.join(path, [basename, gs].compact.join('-'))
248
-
249
242
  inputs.shift
250
243
  variables = {}
251
244
  inputs.each do |savable|
252
- variables[savable.name] = TensorStream::Packer.pack(savable.read_value, savable.data_type)
245
+ val = savable.container
246
+ packed_data = Zlib::Deflate.deflate(TensorStream::Packer.pack(val, savable.data_type))
247
+ variables[savable.name] = {
248
+ 'shape' => shape_eval(val),
249
+ 'data' => Base64.strict_encode64(packed_data)
250
+ }
253
251
  end
254
- File.write(new_filename, variables.to_yaml)
252
+
253
+ File.write(outputfile, { 'variables' => variables }.to_yaml)
254
+ nil
255
255
  end
256
256
 
257
- register_op :restore_v2 do |context, tensor, inputs|
258
- # prefix, tensor_names, shape_and_slices = inputs[0..3]
257
+ register_op :restore_ts do |_context, tensor, inputs|
258
+ inputs = inputs.dup
259
+ filename = inputs.shift
260
+ tensor_names = inputs
261
+
262
+ input_dump = YAML.safe_load(File.read(filename), [Symbol])
263
+ vars = tensor.graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
264
+
265
+ vars.select! { |v| input_dump['variables'].key?(v.name) && tensor_names.include?(v.name) }
266
+ vars.each do |variable|
267
+ data = TensorStream::Packer.unpack(Zlib::Inflate.inflate(Base64.decode64(input_dump['variables'][variable.name]['data'])), variable.data_type)
268
+ shape = input_dump['variables'][variable.name]['shape']
269
+ variable.buffer = nil
270
+ variable.value = TensorShape.reshape(data, shape)
271
+ end
272
+
273
+ nil
259
274
  end
260
275
 
261
276
  register_op :check_numerics do |context, tensor, inputs|
262
277
  message = tensor.options[:message]
263
- f = lambda { |t, _b|
278
+ call_op(inputs[0], context) do |t, _b|
264
279
  raise TensorStream::InvalidArgumentError, "#{message} Invalid argument" if t.nan? || t.infinite?
265
280
  t
266
- }
267
- call_op(inputs[0], context, f)
281
+ end
268
282
  end
269
283
 
270
284
  def eval_operation(tensor, child_context)
@@ -342,18 +356,18 @@ module TensorStream
342
356
  end
343
357
  end
344
358
 
345
- def call_op(a, child_context, func)
359
+ def call_op(a, child_context, &block)
346
360
  a = complete_eval(a, child_context)
347
- process_function_op(a, func)
361
+ process_function_op(a, &block)
348
362
  end
349
363
 
350
- def call_vector_op(tensor, op, a, b, child_context, func)
351
- process_vector_math_op(tensor, a, b, child_context, func)
364
+ def call_vector_op(tensor, op, a, b, child_context, &block)
365
+ process_vector_math_op(tensor, a, b, child_context, &block)
352
366
  rescue FullEvalNotPossible
353
367
  TensorStream.send(op.to_sym, a, b)
354
368
  end
355
369
 
356
- def process_vector_math_op(tensor, a, b, child_context, op)
370
+ def process_vector_math_op(tensor, a, b, child_context, &block)
357
371
  eval_a = global_eval(tensor, a, child_context) unless a.nil?
358
372
  eval_b = global_eval(tensor, b, child_context) unless b.nil?
359
373
 
@@ -361,7 +375,7 @@ module TensorStream
361
375
 
362
376
  # ruby scalar
363
377
  eval_a, eval_b = broadcast(eval_a, eval_b)
364
- vector_op(eval_a, eval_b, op)
378
+ vector_op(eval_a, eval_b, &block)
365
379
  # if get_rank(eval_a).zero?
366
380
  # if get_rank(eval_b).zero?
367
381
  # op.call(eval_a, eval_b)
@@ -411,16 +425,16 @@ module TensorStream
411
425
  end
412
426
 
413
427
  # handle 3 tensor math operations
414
- def call_3way_vector_op(v_a, v_b, v_c, child_context, op = ->(a, b, c) { a + b + c })
415
- return op.call(v_a, v_b, v_c) unless v_a.is_a?(Array)
428
+ def call_3way_vector_op(v_a, v_b, v_c, child_context, &block)
429
+ return yield(v_a, v_b, v_c) unless v_a.is_a?(Array)
416
430
 
417
431
  v_a.each_with_index.collect do |v1, index|
418
432
  v2 = v_b[index]
419
433
  v3 = v_c.is_a?(Array) ? v_c[index] : v_c
420
434
  if v1.is_a?(Array)
421
- call_3way_vector_op(v1, v2, v3, child_context, op)
435
+ call_3way_vector_op(v1, v2, v3, child_context, &block)
422
436
  else
423
- op.call(v1, v2, v3)
437
+ yield(v1, v2, v3)
424
438
  end
425
439
  end
426
440
  end
@@ -118,6 +118,10 @@ module TensorStream
118
118
  get_node(name)
119
119
  end
120
120
 
121
+ def [](name)
122
+ get_node(name)
123
+ end
124
+
121
125
  def add_node!(name, node)
122
126
  @nodes[name] = node
123
127
  node
@@ -219,6 +223,7 @@ module TensorStream
219
223
  @placeholder_counter += 1
220
224
 
221
225
  return '' if @placeholder_counter == 1
226
+
222
227
  "_#{@placeholder_counter}"
223
228
  end
224
229
 
@@ -1,15 +1,36 @@
1
1
  module TensorStream
2
+ ##
3
+ # Class for deserialization from a YAML file
2
4
  class YamlLoader
3
5
  def initialize(graph = nil)
4
6
  @graph = graph || TensorStream.get_default_graph
5
7
  end
6
8
 
9
+ ##
10
+ # Loads a model Yaml file and builds the model from it
11
+ #
12
+ # Args:
13
+ # filename: String - Location of Yaml file
14
+ #
15
+ # Returns: Graph where model is restored to
16
+ def load_from_file(filename)
17
+ load_from_string(File.read(filename))
18
+ end
19
+
20
+ ##
21
+ # Loads a model Yaml file and builds the model from it
22
+ #
23
+ # Args:
24
+ # buffer: String - String in Yaml format of the model
25
+ #
26
+ # Returns: Graph where model is restored to
7
27
  def load_from_string(buffer)
8
- serialized_ops = YAML.safe_load(buffer, [Symbol])
28
+ serialized_ops = YAML.safe_load(buffer, [Symbol], [], true)
9
29
  serialized_ops.each do |op_def|
10
30
  inputs = op_def[:inputs].map { |i| @graph.get_tensor_by_name(i) }
11
31
  options = {}
12
32
 
33
+ new_var = nil
13
34
  if op_def.dig(:attrs, :container)
14
35
  new_var = Variable.new(op_def.dig(:attrs, :data_type))
15
36
  var_shape = op_def.dig(:attrs, :container, :shape)
@@ -30,9 +51,11 @@ module TensorStream
30
51
  new_op.data_type = new_op.set_data_type(op_def.dig(:attrs, :data_type))
31
52
  new_op.is_const = new_op.infer_const
32
53
  new_op.given_name = new_op.name
54
+ new_var.op = new_op if new_var
33
55
 
34
56
  @graph.add_node(new_op)
35
57
  end
58
+ @graph
36
59
  end
37
60
  end
38
61
  end
@@ -91,7 +91,6 @@ module TensorStream
91
91
  @lines << "#{spaces}}"
92
92
  when 'TensorStream::Variable'
93
93
  else
94
- binding.pry
95
94
  raise "unknown type #{val.class}"
96
95
  end
97
96
  end
@@ -0,0 +1,68 @@
1
+ module TensorStream
2
+ module OpPatch
3
+ def self.included(klass)
4
+ ops = if klass == Array
5
+ {:+ => 'add', :- => 'sub', :* => 'mul'}
6
+ else
7
+ {:+ => 'add', :- => 'sub', :/ => 'div', :% => 'mod', :* => 'mul', :** => 'pow' }
8
+ end
9
+
10
+ ops.each do |m, name|
11
+ klass.send(:alias_method, :"_tensor_stream_#{name}_orig", m)
12
+ klass.send(:remove_method, m)
13
+ end
14
+ end
15
+
16
+ def +(other)
17
+ if other.is_a?(TensorStream::Tensor)
18
+ TensorStream.convert_to_tensor(self, dtype: other.data_type) + other
19
+ else
20
+ _tensor_stream_add_orig(other)
21
+ end
22
+ end
23
+
24
+ def -(other)
25
+ if other.is_a?(TensorStream::Tensor)
26
+ TensorStream.convert_to_tensor(self, dtype: other.data_type) - other
27
+ else
28
+ _tensor_stream_sub_orig(other)
29
+ end
30
+ end
31
+
32
+ def *(other)
33
+ if other.is_a?(TensorStream::Tensor)
34
+ TensorStream.convert_to_tensor(self, dtype: other.data_type) * other
35
+ else
36
+ _tensor_stream_mul_orig(other)
37
+ end
38
+ end
39
+
40
+ def /(other)
41
+ if other.is_a?(TensorStream::Tensor)
42
+ TensorStream.convert_to_tensor(self, dtype: other.data_type) * other
43
+ else
44
+ _tensor_stream_div_orig(other)
45
+ end
46
+ end
47
+
48
+ def %(other)
49
+ if other.is_a?(TensorStream::Tensor)
50
+ TensorStream.convert_to_tensor(self, dtype: other.data_type) % other
51
+ else
52
+ _tensor_stream_mod_orig(other)
53
+ end
54
+ end
55
+
56
+ def **(other)
57
+ if other.is_a?(TensorStream::Tensor)
58
+ TensorStream.convert_to_tensor(self, dtype: other.data_type)**other
59
+ else
60
+ _tensor_stream_pow_orig(other)
61
+ end
62
+ end
63
+ end
64
+ end
65
+
66
+ Integer.include TensorStream::OpPatch
67
+ Float.include TensorStream::OpPatch
68
+ Array.include TensorStream::OpPatch
@@ -2,19 +2,6 @@
2
2
  module TensorStream
3
3
  # various monkey patches to FixNum types
4
4
  module MonkeyPatch
5
- def self.included(klass)
6
- ops = if klass == Array
7
- {:+ => 'add', :- => 'sub', :* => 'mul'}
8
- else
9
- {:+ => 'add', :- => 'sub', :/ => 'div', :% => 'mod', :* => 'mul', :** => 'pow' }
10
- end
11
-
12
- ops.each do |m, name|
13
- klass.send(:alias_method, :"_tensor_stream_#{name}_orig", m)
14
- klass.send(:remove_method, m)
15
- end
16
- end
17
-
18
5
  def shape
19
6
  TensorStream.shape_eval(self)
20
7
  end
@@ -22,53 +9,5 @@ module TensorStream
22
9
  def t(name = nil, dtype: nil)
23
10
  TensorStream.convert_to_tensor(self, name: name, dtype: dtype)
24
11
  end
25
-
26
- def +(other)
27
- if other.is_a?(TensorStream::Tensor)
28
- TensorStream.convert_to_tensor(self, dtype: other.data_type) + other
29
- else
30
- _tensor_stream_add_orig(other)
31
- end
32
- end
33
-
34
- def -(other)
35
- if other.is_a?(TensorStream::Tensor)
36
- TensorStream.convert_to_tensor(self, dtype: other.data_type) - other
37
- else
38
- _tensor_stream_sub_orig(other)
39
- end
40
- end
41
-
42
- def *(other)
43
- if other.is_a?(TensorStream::Tensor)
44
- TensorStream.convert_to_tensor(self, dtype: other.data_type) * other
45
- else
46
- _tensor_stream_mul_orig(other)
47
- end
48
- end
49
-
50
- def /(other)
51
- if other.is_a?(TensorStream::Tensor)
52
- TensorStream.convert_to_tensor(self, dtype: other.data_type) * other
53
- else
54
- _tensor_stream_div_orig(other)
55
- end
56
- end
57
-
58
- def %(other)
59
- if other.is_a?(TensorStream::Tensor)
60
- TensorStream.convert_to_tensor(self, dtype: other.data_type) % other
61
- else
62
- _tensor_stream_mod_orig(other)
63
- end
64
- end
65
-
66
- def **(other)
67
- if other.is_a?(TensorStream::Tensor)
68
- TensorStream.convert_to_tensor(self, dtype: other.data_type)**other
69
- else
70
- _tensor_stream_pow_orig(other)
71
- end
72
- end
73
12
  end
74
13
  end
@@ -137,7 +137,7 @@ module TensorStream
137
137
  end
138
138
 
139
139
  if shapes_known
140
- inputs.collect { |input| cons(input.shape.shape, dtype: out_type) }
140
+ inputs.collect { |input| cons(input.shape.shape, dtype: out_type).op }
141
141
  else
142
142
  res = _op(:shape_n, *inputs, out_type: out_type, name: name)
143
143
  Array.new(inputs.size) do |index|
@@ -5,13 +5,13 @@ module TensorStream
5
5
  def self.profile_for(session, order_by: :slowest)
6
6
  context = session.last_session_context
7
7
  eval_times = context[:profile][:operations].map do |name, profile|
8
- [name, profile[:eval_time], profile[:shape], profile[:tensor].source]
8
+ [name, profile[:op], profile[:eval_time], profile[:shape]]
9
9
  end
10
10
 
11
11
  if order_by == :slowest
12
- eval_times.sort_by { |a| a[1] }.reverse!
12
+ eval_times.sort_by { |a| a[2] }.reverse!
13
13
  else
14
- eval_times.sort_by { |a| a[1] }
14
+ eval_times.sort_by { |a| a[2] }
15
15
  end
16
16
  end
17
17
  end
@@ -51,7 +51,8 @@ module TensorStream
51
51
 
52
52
  context = {
53
53
  _cache: @session_cache,
54
- _options: options.merge(@evaluator_options)
54
+ _options: options.merge(@evaluator_options),
55
+ profile: { step: 0, operations: {} },
55
56
  }
56
57
 
57
58
  # scan for placeholders and assign value
@@ -62,18 +63,17 @@ module TensorStream
62
63
  elsif k.is_a?(String)
63
64
  target_graph = args[0].graph
64
65
  node = target_graph.get_node(k)
65
- if node.operation == :placeholder
66
- context[k.to_sym] = options[:feed_dict][k]
67
- else
68
- raise "Cannot find placeholder with the name of #{k}"
69
- end
66
+ raise "Cannot find placeholder with the name of #{k}" if node.operation != :placeholder
67
+
68
+ context[k.to_sym] = options[:feed_dict][k]
69
+ elsif k.is_a?(Operation) && k.operation == :placeholder
70
+ context[k.name.to_sym] = options[:feed_dict][k]
70
71
  else
71
72
  raise "Invalid placeholder type passed key must be a string or a placeholder type"
72
73
  end
73
74
  end
74
75
  end
75
76
 
76
-
77
77
  args.each { |t| prepare_evaluators(t, context) }
78
78
  @last_session_context = context
79
79
 
@@ -119,6 +119,7 @@ module TensorStream
119
119
  graph = tensor.graph
120
120
  graph.nodes.select { |k, v| selector.call(k, v) }.collect do |k, node|
121
121
  next unless @last_session_context[node.name]
122
+
122
123
  "#{k} #{node.to_math(true, 1)} = #{@last_session_context[node.name]}"
123
124
  end.compact
124
125
  end
@@ -7,6 +7,16 @@ module TensorStream
7
7
  class Saver
8
8
  include TensorStream::OpHelper
9
9
 
10
+ def initialize
11
+ graph = TensorStream::Graph.get_default_graph
12
+ vars = graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
13
+
14
+ @filename = graph['ts_filename'] || TensorStream.placeholder(:string, name: 'ts_filename', shape: [])
15
+
16
+ @save_op = _op(:save_ts, @filename, *vars)
17
+ @restore_op = _op(:restore_ts, @filename, *vars.map(&:name))
18
+ end
19
+
10
20
  def save(session, outputdir, global_step: nil,
11
21
  latest_filename: nil,
12
22
  meta_graph_suffix: 'meta',
@@ -19,25 +29,13 @@ module TensorStream
19
29
  variables = {}
20
30
 
21
31
  gs = eval_global_step(session, global_step)
22
- output_dump = {
23
- 'variables' => variables,
24
- 'global_step' => gs
25
- }
26
-
27
- vars.each do |variable|
28
- val = variable.read_value
29
- packed_data = Zlib::Deflate.deflate(TensorStream::Packer.pack(val, variable.data_type))
30
- variables[variable.name] = {
31
- 'shape' => shape_eval(val),
32
- 'data' => Base64.strict_encode64(packed_data)
33
- }
34
- end
35
32
 
36
33
  FileUtils.mkdir_p(outputdir)
37
34
  basename = 'model'
38
35
  File.write(File.join(outputdir, "#{basename}.meta"), { "gs" => gs }.to_json)
39
36
  new_filename = File.join(outputdir, [basename, gs, '.ckpt'].compact.join('-'))
40
- File.write(new_filename, output_dump.to_yaml)
37
+ session.run(@save_op, feed_dict: { @filename => new_filename })
38
+
41
39
  if write_meta_graph
42
40
  graph_filename = "#{basename}.yaml"
43
41
  TensorStream.train.write_graph(graph, outputdir, graph_filename, serializer: :yaml)
@@ -45,20 +43,15 @@ module TensorStream
45
43
  outputdir
46
44
  end
47
45
 
48
- def restore(_session, modelpath)
49
- meta_data = JSON.parse(File.read(File.join(modelpath, "model.meta")))
50
- gs = meta_data['gs']
51
- input_dump = YAML.safe_load(File.read(File.join(modelpath, ['model', gs, '.ckpt'].compact.join('-'))))
46
+ def restore(session, modelpath)
47
+ meta_file = File.join(modelpath, "model.meta")
48
+ return unless File.exist?(meta_file)
52
49
 
53
- vars = TensorStream::Graph.get_default_graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
54
- vars.each do |variable|
55
- next unless input_dump['variables'].key?(variable.name)
50
+ meta_data = JSON.parse(File.read(meta_file))
51
+ gs = meta_data['gs']
52
+ filename = File.join(modelpath, ['model', gs, '.ckpt'].compact.join('-'))
56
53
 
57
- data = TensorStream::Packer.unpack(Zlib::Inflate.inflate(Base64.decode64(input_dump['variables'][variable.name]['data'])), variable.data_type)
58
- shape = input_dump['variables'][variable.name]['shape']
59
- variable.buffer = nil
60
- variable.value = TensorShape.reshape(data, shape)
61
- end
54
+ session.run(@restore_op, feed_dict: { @filename => filename })
62
55
  end
63
56
 
64
57
  private
@@ -1,18 +1,24 @@
1
1
  module TensorStream
2
2
  class Freezer
3
- include OpHelper
3
+ include TensorStream::OpHelper
4
4
 
5
5
  ##
6
6
  # Utility class to convert variables to constants for production deployment
7
7
  #
8
- def convert(model_file, checkpoint_file, output_file)
8
+ def convert(session, checkpoint_folder, output_file)
9
+ model_file = File.join(checkpoint_folder, 'model.yaml')
9
10
  TensorStream.graph.as_default do |current_graph|
10
11
  YamlLoader.new.load_from_string(File.read(model_file))
11
12
  saver = TensorStream::Train::Saver.new
12
- saver.restore(nil, checkpoint_file)
13
+ saver.restore(session, checkpoint_folder)
14
+
15
+ # collect all assign ops and remove them from the graph
16
+ remove_nodes = Set.new(current_graph.nodes.values.select { |op| op.is_a?(TensorStream::Operation) && op.operation == :assign }.map { |op| op.consumers.to_a }.flatten.uniq)
17
+
13
18
  output_buffer = TensorStream::Yaml.new.get_string(current_graph) do |graph, node_key|
14
19
  node = graph.get_tensor_by_name(node_key)
15
- if node.operation == :variable_v2
20
+ case node.operation
21
+ when :variable_v2
16
22
  value = node.container
17
23
  options = {
18
24
  value: value,
@@ -26,8 +32,10 @@ module TensorStream
26
32
  const_op.shape = TensorShape.new(shape_eval(value))
27
33
 
28
34
  const_op
35
+ when :assign
36
+ nil
29
37
  else
30
- node
38
+ remove_nodes.include?(node.name) ? nil : node
31
39
  end
32
40
  end
33
41
  File.write(output_file, output_buffer)
@@ -1,5 +1,5 @@
1
1
  module TensorStream
2
- VERSION = '0.9.10'.freeze
2
+ VERSION = '1.0.0-rc1'.freeze
3
3
 
4
4
  def self.version
5
5
  VERSION
@@ -35,12 +35,13 @@ Gem::Specification.new do |spec|
35
35
  spec.add_development_dependency "rspec", "~> 3.0"
36
36
  spec.add_development_dependency "awesome_print"
37
37
  spec.add_development_dependency "rubocop"
38
- spec.add_development_dependency "pry-byebug"
39
- spec.add_development_dependency "byepry"
38
+ if RUBY_ENGINE == 'ruby'
39
+ spec.add_development_dependency "pry-byebug"
40
+ spec.add_development_dependency "byepry"
41
+ end
40
42
  spec.add_development_dependency "colorize"
41
43
  spec.add_development_dependency "rspec_junit_formatter"
42
44
  spec.add_development_dependency "mnist-learn"
43
- spec.add_development_dependency "opencl_ruby_ffi"
44
45
  spec.add_development_dependency "simplecov"
45
46
  spec.add_dependency "deep_merge"
46
47
  spec.add_dependency "concurrent-ruby"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: tensor_stream
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.10
4
+ version: 1.0.0.pre.rc1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Joseph Emmanuel Dayo
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2019-01-02 00:00:00.000000000 Z
11
+ date: 2019-01-06 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -150,20 +150,6 @@ dependencies:
150
150
  - - ">="
151
151
  - !ruby/object:Gem::Version
152
152
  version: '0'
153
- - !ruby/object:Gem::Dependency
154
- name: opencl_ruby_ffi
155
- requirement: !ruby/object:Gem::Requirement
156
- requirements:
157
- - - ">="
158
- - !ruby/object:Gem::Version
159
- version: '0'
160
- type: :development
161
- prerelease: false
162
- version_requirements: !ruby/object:Gem::Requirement
163
- requirements:
164
- - - ">="
165
- - !ruby/object:Gem::Version
166
- version: '0'
167
153
  - !ruby/object:Gem::Dependency
168
154
  name: simplecov
169
155
  requirement: !ruby/object:Gem::Requirement
@@ -226,7 +212,8 @@ description: A reimplementation of TensorFlow for ruby. This is a ground up impl
226
212
  well with support for an opencl evaluator.
227
213
  email:
228
214
  - joseph.dayo@gmail.com
229
- executables: []
215
+ executables:
216
+ - model_utils
230
217
  extensions: []
231
218
  extra_rdoc_files: []
232
219
  files:
@@ -253,6 +240,7 @@ files:
253
240
  - data_actual.json
254
241
  - data_expected.json
255
242
  - data_input.json
243
+ - exe/model_utils
256
244
  - lib/tensor_stream.rb
257
245
  - lib/tensor_stream/constant.rb
258
246
  - lib/tensor_stream/control_flow.rb
@@ -293,6 +281,7 @@ files:
293
281
  - lib/tensor_stream/monkey_patches/array.rb
294
282
  - lib/tensor_stream/monkey_patches/float.rb
295
283
  - lib/tensor_stream/monkey_patches/integer.rb
284
+ - lib/tensor_stream/monkey_patches/op_patch.rb
296
285
  - lib/tensor_stream/monkey_patches/patch.rb
297
286
  - lib/tensor_stream/nn/nn_ops.rb
298
287
  - lib/tensor_stream/operation.rb
@@ -348,9 +337,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
348
337
  version: '0'
349
338
  required_rubygems_version: !ruby/object:Gem::Requirement
350
339
  requirements:
351
- - - ">="
340
+ - - ">"
352
341
  - !ruby/object:Gem::Version
353
- version: '0'
342
+ version: 1.3.1
354
343
  requirements: []
355
344
  rubygems_version: 3.0.1
356
345
  signing_key: