tensor_stream 0.6.1 → 0.7.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.rubocop.yml +10 -0
- data/CHANGELOG.md +8 -0
- data/README.md +40 -1
- data/benchmark/benchmark.rb +4 -1
- data/lib/tensor_stream.rb +5 -0
- data/lib/tensor_stream/debugging/debugging.rb +4 -2
- data/lib/tensor_stream/device.rb +2 -1
- data/lib/tensor_stream/evaluator/base_evaluator.rb +43 -32
- data/lib/tensor_stream/evaluator/evaluator.rb +0 -1
- data/lib/tensor_stream/evaluator/opencl/kernels/acos.cl +8 -0
- data/lib/tensor_stream/evaluator/opencl/kernels/apply_gradient.cl +9 -0
- data/lib/tensor_stream/evaluator/opencl/kernels/asin.cl +9 -0
- data/lib/tensor_stream/evaluator/opencl/kernels/floor_mod.cl +3 -0
- data/lib/tensor_stream/evaluator/opencl/kernels/log_softmax.cl +26 -0
- data/lib/tensor_stream/evaluator/opencl/kernels/max.cl +5 -5
- data/lib/tensor_stream/evaluator/opencl/kernels/min.cl +46 -0
- data/lib/tensor_stream/evaluator/opencl/kernels/real_div.cl +3 -0
- data/lib/tensor_stream/evaluator/opencl/kernels/softmax_cross.cl +27 -0
- data/lib/tensor_stream/evaluator/opencl/kernels/softmax_cross_grad.cl +28 -0
- data/lib/tensor_stream/evaluator/opencl/opencl_buffer.rb +5 -6
- data/lib/tensor_stream/evaluator/opencl/opencl_evaluator.rb +200 -265
- data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +4 -8
- data/lib/tensor_stream/evaluator/ruby_evaluator.rb +193 -122
- data/lib/tensor_stream/exceptions.rb +6 -0
- data/lib/tensor_stream/graph.rb +21 -6
- data/lib/tensor_stream/graph_builder.rb +67 -0
- data/lib/tensor_stream/graph_deserializers/protobuf.rb +271 -0
- data/lib/tensor_stream/graph_keys.rb +1 -0
- data/lib/tensor_stream/graph_serializers/pbtext.rb +11 -10
- data/lib/tensor_stream/helpers/op_helper.rb +7 -33
- data/lib/tensor_stream/helpers/string_helper.rb +16 -0
- data/lib/tensor_stream/math_gradients.rb +67 -44
- data/lib/tensor_stream/nn/nn_ops.rb +7 -1
- data/lib/tensor_stream/operation.rb +14 -27
- data/lib/tensor_stream/ops.rb +82 -29
- data/lib/tensor_stream/session.rb +4 -0
- data/lib/tensor_stream/tensor.rb +30 -12
- data/lib/tensor_stream/tensor_shape.rb +1 -1
- data/lib/tensor_stream/train/gradient_descent_optimizer.rb +37 -4
- data/lib/tensor_stream/train/saver.rb +46 -0
- data/lib/tensor_stream/train/utils.rb +37 -0
- data/lib/tensor_stream/trainer.rb +2 -0
- data/lib/tensor_stream/utils.rb +24 -14
- data/lib/tensor_stream/variable.rb +5 -11
- data/lib/tensor_stream/variable_scope.rb +15 -0
- data/lib/tensor_stream/version.rb +1 -1
- data/samples/iris.rb +8 -4
- data/samples/linear_regression.rb +1 -1
- data/samples/multigpu.rb +73 -0
- data/samples/nearest_neighbor.rb +3 -3
- data/tensor_stream.gemspec +1 -1
- data/test_samples/raw_neural_net_sample.rb +4 -1
- metadata +21 -6
@@ -167,15 +167,11 @@ module TensorStream
|
|
167
167
|
end
|
168
168
|
|
169
169
|
def last_axis(arr)
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
arr.each do |sub|
|
175
|
-
all_items += last_axis(sub)
|
176
|
-
end
|
170
|
+
return arr if get_rank(arr) <= 2
|
171
|
+
|
172
|
+
arr.inject([]).map do |sub, rows|
|
173
|
+
rows + last_axis(sub)
|
177
174
|
end
|
178
|
-
all_items
|
179
175
|
end
|
180
176
|
|
181
177
|
def softmax(arr)
|
@@ -31,9 +31,7 @@ module TensorStream
|
|
31
31
|
include TensorStream::MathHelper
|
32
32
|
|
33
33
|
def run(tensor, execution_context)
|
34
|
-
if tensor.is_a?(Array) && tensor.
|
35
|
-
return tensor.map { |t| run(t, execution_context) }
|
36
|
-
end
|
34
|
+
return tensor.map { |t| run(t, execution_context) } if tensor.is_a?(Array) && !tensor.empty? && tensor[0].is_a?(Tensor)
|
37
35
|
|
38
36
|
tensor = tensor.call if tensor.is_a?(Proc)
|
39
37
|
|
@@ -44,6 +42,8 @@ module TensorStream
|
|
44
42
|
eval_variable(tensor, child_context)
|
45
43
|
elsif tensor.is_a?(Placeholder)
|
46
44
|
resolve_placeholder(tensor, child_context)
|
45
|
+
elsif tensor.is_a?(OutputGroup)
|
46
|
+
tensor.outputs[0]
|
47
47
|
else
|
48
48
|
eval_tensor(tensor, child_context)
|
49
49
|
end
|
@@ -65,9 +65,11 @@ module TensorStream
|
|
65
65
|
|
66
66
|
tensor = tensor.map { |t| complete_eval(t, context) } if tensor.is_a?(Array) && !tensor.empty? && tensor[0].is_a?(Tensor)
|
67
67
|
|
68
|
-
|
69
|
-
|
68
|
+
break if old_tensor.equal?(tensor)
|
69
|
+
break unless tensor.is_a?(Tensor)
|
70
70
|
end
|
71
|
+
|
72
|
+
tensor.is_a?(OutputGroup) ? tensor.outputs[0] : tensor
|
71
73
|
end
|
72
74
|
|
73
75
|
protected
|
@@ -86,9 +88,8 @@ module TensorStream
|
|
86
88
|
|
87
89
|
def eval_variable(tensor, child_context)
|
88
90
|
value = tensor.read_value
|
89
|
-
if value.nil?
|
90
|
-
|
91
|
-
end
|
91
|
+
raise "variable #{tensor.name} not initalized" if value.nil?
|
92
|
+
|
92
93
|
eval_tensor(value, child_context).tap do |val|
|
93
94
|
child_context[:returns] ||= {}
|
94
95
|
child_context[:returns][:vars] ||= []
|
@@ -100,17 +101,21 @@ module TensorStream
|
|
100
101
|
inputs
|
101
102
|
end
|
102
103
|
|
103
|
-
register_op(:const) do |
|
104
|
+
register_op(:const) do |_context, _tensor, inputs|
|
104
105
|
inputs[0]
|
105
106
|
end
|
106
107
|
|
107
|
-
register_op(
|
108
|
+
register_op(%i[argmax arg_max]) do |_context, tensor, inputs|
|
108
109
|
axis = tensor.options[:axis] || 0
|
110
|
+
rank = get_rank(inputs[0])
|
111
|
+
raise TensorStream::InvalidArgumentError, "Expected dimension in the range [#{-rank},#{rank}) but got #{axis}" if axis < -rank || axis >= rank
|
109
112
|
get_op_with_axis(inputs[0], axis, 0, tensor.data_type)
|
110
113
|
end
|
111
114
|
|
112
|
-
register_op(
|
115
|
+
register_op(%i[argmin arg_min]) do |_context, tensor, inputs|
|
113
116
|
axis = tensor.options[:axis] || 0
|
117
|
+
rank = get_rank(inputs[0])
|
118
|
+
raise TensorStream::InvalidArgumentError, "Expected dimension in the range [#{-rank},#{rank}) but got #{axis}" if axis < -rank || axis >= rank
|
114
119
|
get_op_with_axis(inputs[0], axis, 0, tensor.data_type, ->(a, b) { a < b })
|
115
120
|
end
|
116
121
|
|
@@ -118,7 +123,7 @@ module TensorStream
|
|
118
123
|
call_op(:cast, inputs[0], context, ->(t, _b) { Tensor.cast_dtype(t, tensor.data_type) })
|
119
124
|
end
|
120
125
|
|
121
|
-
register_op(:sign) do |context,
|
126
|
+
register_op(:sign) do |context, _tensor, inputs|
|
122
127
|
func = lambda { |x, _b|
|
123
128
|
if x.zero? || (x.is_a?(Float) && x.nan?)
|
124
129
|
0
|
@@ -134,22 +139,26 @@ module TensorStream
|
|
134
139
|
call_op(:sign, inputs[0], context, func)
|
135
140
|
end
|
136
141
|
|
137
|
-
register_op(:logical_and) do |context,
|
142
|
+
register_op(:logical_and) do |context, _tensor, inputs|
|
138
143
|
call_vector_op(:logical_and, inputs[0], inputs[1], context, ->(t, u) { t && u })
|
139
144
|
end
|
140
145
|
|
141
|
-
register_op(:equal) do |context,
|
146
|
+
register_op(:equal) do |context, _tensor, inputs|
|
142
147
|
call_vector_op(:equal, inputs[0], inputs[1], context, ->(t, u) { t == u })
|
143
148
|
end
|
144
149
|
|
145
|
-
register_op(:not_equal) do |context,
|
150
|
+
register_op(:not_equal) do |context, _tensor, inputs|
|
146
151
|
call_vector_op(:not_equal, inputs[0], inputs[1], context, ->(t, u) { t != u })
|
147
152
|
end
|
148
153
|
|
149
|
-
register_op :index, no_eval: true do |
|
154
|
+
register_op :index, no_eval: true do |_context, _tensor, inputs|
|
150
155
|
f = inputs[0]
|
151
156
|
index = inputs[1]
|
152
|
-
f
|
157
|
+
if f.is_a?(OutputGroup)
|
158
|
+
f.outputs[index]
|
159
|
+
else
|
160
|
+
f[index]
|
161
|
+
end
|
153
162
|
end
|
154
163
|
|
155
164
|
register_op :slice do |context, tensor, inputs|
|
@@ -170,7 +179,7 @@ module TensorStream
|
|
170
179
|
end
|
171
180
|
end
|
172
181
|
|
173
|
-
register_op
|
182
|
+
register_op %i[flow_dynamic_stitch dynamic_stitch], noop: true do |_context, _tensor, inputs|
|
174
183
|
indexes, data = inputs
|
175
184
|
merged = []
|
176
185
|
merge_dynamic_stitch(merged, indexes, data)
|
@@ -182,7 +191,7 @@ module TensorStream
|
|
182
191
|
Tensor.cast_dtype(input.flatten.size, tensor.options[:out_type])
|
183
192
|
end
|
184
193
|
|
185
|
-
register_op
|
194
|
+
register_op %i[neg negate], no_eval: true do |context, _tensor, inputs|
|
186
195
|
call_vector_op(:negate, inputs[0], nil, context, ->(t, _u) { -t })
|
187
196
|
end
|
188
197
|
|
@@ -191,24 +200,38 @@ module TensorStream
|
|
191
200
|
call_vector_op(:add, a, b, context, ->(t, u) { t + u })
|
192
201
|
end
|
193
202
|
|
203
|
+
register_op :add_n, no_eval: true do |context, _tensor, inputs|
|
204
|
+
if inputs.size == 1
|
205
|
+
complete_eval(inputs[0], context)
|
206
|
+
elsif inputs.size > 1
|
207
|
+
|
208
|
+
a = inputs.pop
|
209
|
+
until inputs.empty?
|
210
|
+
b = inputs.pop
|
211
|
+
a = call_vector_op(:add, a, b, context, ->(t, u) { t + u })
|
212
|
+
end
|
213
|
+
a
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
194
217
|
register_op :sub, no_eval: true do |context, _tensor, inputs|
|
195
218
|
a, b = inputs
|
196
219
|
call_vector_op(:sub, a, b, context, ->(t, u) { t - u })
|
197
220
|
end
|
198
221
|
|
199
|
-
register_op
|
222
|
+
register_op %i[floor_mod mod], no_eval: true do |context, _tensor, inputs|
|
200
223
|
a, b = inputs
|
201
224
|
call_vector_op(:sub, a, b, context, ->(t, u) { t % u })
|
202
225
|
end
|
203
226
|
|
204
|
-
register_op
|
227
|
+
register_op %i[floor_div real_div], no_eval: true do |context, tensor, inputs|
|
205
228
|
a, b = inputs
|
206
229
|
if fp_type?(tensor.data_type)
|
207
230
|
call_vector_op(:sub, a, b, context, ->(t, u) { (t / u).to_i.to_f })
|
208
231
|
else
|
209
232
|
call_vector_op(:sub, a, b, context, ->(t, u) { t / u })
|
210
233
|
end
|
211
|
-
end
|
234
|
+
end
|
212
235
|
|
213
236
|
register_op :mul, no_eval: true do |context, _tensor, inputs|
|
214
237
|
a, b = inputs
|
@@ -225,55 +248,63 @@ module TensorStream
|
|
225
248
|
call_vector_op(:squared_difference, a, b, context, ->(t, u) { (t - u) * (t - u) })
|
226
249
|
end
|
227
250
|
|
228
|
-
register_op
|
251
|
+
register_op %i[concat concat_v2] do |_context, tensor, inputs|
|
229
252
|
concat_array(inputs[0], tensor.options[:axis])
|
230
253
|
end
|
231
254
|
|
232
|
-
register_op :round, no_eval: true do |context,
|
255
|
+
register_op :round, no_eval: true do |context, _tensor, inputs|
|
233
256
|
call_op(:round, inputs[0], context, ->(t, _b) { t.round })
|
234
257
|
end
|
235
258
|
|
236
|
-
register_op :abs, no_eval: true do |context,
|
259
|
+
register_op :abs, no_eval: true do |context, _tensor, inputs|
|
237
260
|
call_op(:abs, inputs[0], context, ->(t, _b) { t.abs })
|
238
261
|
end
|
239
262
|
|
240
|
-
register_op :tanh, no_eval: true do |context,
|
263
|
+
register_op :tanh, no_eval: true do |context, _tensor, inputs|
|
241
264
|
call_op(:tanh, inputs[0], context, ->(t, _b) { Math.tanh(t) })
|
242
265
|
end
|
243
266
|
|
244
|
-
register_op :tan, no_eval: true do |context,
|
267
|
+
register_op :tan, no_eval: true do |context, _tensor, inputs|
|
245
268
|
call_op(:tan, inputs[0], context, ->(t, _b) { Math.tan(t) })
|
246
269
|
end
|
247
270
|
|
248
|
-
register_op :sec, no_eval: true do |context,
|
271
|
+
register_op :sec, no_eval: true do |context, _tensor, inputs|
|
249
272
|
call_op(:sec, inputs[0], context, ->(t, _b) { Math.sec(t) })
|
250
273
|
end
|
251
274
|
|
252
|
-
register_op :sin, no_eval: true do |context,
|
275
|
+
register_op :sin, no_eval: true do |context, _tensor, inputs|
|
253
276
|
call_op(:sin, inputs[0], context, ->(t, _b) { Math.sin(t) })
|
254
277
|
end
|
255
278
|
|
256
|
-
register_op :
|
279
|
+
register_op :asin, no_eval: true do |context, _tensor, inputs|
|
280
|
+
call_op(:asin, inputs[0], context, ->(t, _b) { Math.asin(t) })
|
281
|
+
end
|
282
|
+
|
283
|
+
register_op :acos, no_eval: true do |context, _tensor, inputs|
|
284
|
+
call_op(:acos, inputs[0], context, ->(t, _b) { Math.acos(t) })
|
285
|
+
end
|
286
|
+
|
287
|
+
register_op :cos, no_eval: true do |context, _tensor, inputs|
|
257
288
|
call_op(:cos, inputs[0], context, ->(t, _b) { Math.cos(t) })
|
258
289
|
end
|
259
290
|
|
260
|
-
register_op :log1p, no_eval: true do |context,
|
291
|
+
register_op :log1p, no_eval: true do |context, _tensor, inputs|
|
261
292
|
call_op(:log1p, inputs[0], context, ->(t, _b) { Math.log(1 + t) })
|
262
293
|
end
|
263
294
|
|
264
|
-
register_op :log, no_eval: true
|
295
|
+
register_op :log, no_eval: true do |context, _tensor, inputs|
|
265
296
|
call_op(:log, inputs[0], context, ->(t, _b) { t < 0 ? Float::NAN : Math.log(t) })
|
266
297
|
end
|
267
298
|
|
268
|
-
register_op :exp, no_eval: true
|
299
|
+
register_op :exp, no_eval: true do |context, _tensor, inputs|
|
269
300
|
call_op(:exp, inputs[0], context, ->(t, _b) { Math.exp(t) })
|
270
301
|
end
|
271
302
|
|
272
|
-
register_op :sigmoid, no_eval: true
|
303
|
+
register_op :sigmoid, no_eval: true do |context, _tensor, inputs|
|
273
304
|
call_op(:sigmoid, inputs[0], context, ->(t, _b) { sigmoid(t) })
|
274
305
|
end
|
275
306
|
|
276
|
-
register_op :sqrt, no_eval: true
|
307
|
+
register_op :sqrt, no_eval: true do |context, _tensor, inputs|
|
277
308
|
call_op(:sqrt, inputs[0], context, ->(t, _b) { Math.sqrt(t) })
|
278
309
|
end
|
279
310
|
|
@@ -285,21 +316,21 @@ module TensorStream
|
|
285
316
|
call_op(:ceil, inputs[0], context, ->(t, _b) { t.ceil })
|
286
317
|
end
|
287
318
|
|
288
|
-
register_op :square, no_eval: true
|
289
|
-
call_op(:square, inputs[0], context, ->(t, _b) {
|
319
|
+
register_op :square, no_eval: true do |context, _tensor, inputs|
|
320
|
+
call_op(:square, inputs[0], context, ->(t, _b) { t * t })
|
290
321
|
end
|
291
322
|
|
292
|
-
register_op :reciprocal, no_eval: true
|
293
|
-
call_op(:reciprocal, inputs[0], context,
|
323
|
+
register_op :reciprocal, no_eval: true do |context, _tensor, inputs|
|
324
|
+
call_op(:reciprocal, inputs[0], context, ->(t, _b) { 1 / t })
|
294
325
|
end
|
295
326
|
|
296
|
-
register_op :stop_gradient, no_eval: true
|
327
|
+
register_op :stop_gradient, no_eval: true do |_context, _tensor, inputs|
|
297
328
|
inputs[0]
|
298
329
|
end
|
299
330
|
|
300
331
|
register_op :sigmoid_grad, no_eval: true do |context, _tensor, inputs|
|
301
332
|
a, b = inputs
|
302
|
-
call_vector_op(:sigmoid_grad, a, b, context, ->(t, u) { u * sigmoid(t) * (1 - sigmoid(t))}
|
333
|
+
call_vector_op(:sigmoid_grad, a, b, context, ->(t, u) { u * sigmoid(t) * (1 - sigmoid(t)) })
|
303
334
|
end
|
304
335
|
|
305
336
|
register_op :random_uniform, no_eval: true do |_context, tensor, _inputs|
|
@@ -313,7 +344,7 @@ module TensorStream
|
|
313
344
|
generate_vector(shape, generator: generator)
|
314
345
|
end
|
315
346
|
|
316
|
-
register_op :
|
347
|
+
register_op :random_standard_normal, no_eval: true do |_context, tensor, _inputs|
|
317
348
|
seed = tensor.options[:seed]
|
318
349
|
random = _get_randomizer(tensor, seed)
|
319
350
|
r = RandomGaussian.new(tensor.options.fetch(:mean), tensor.options.fetch(:stddev), -> { random.rand })
|
@@ -345,18 +376,18 @@ module TensorStream
|
|
345
376
|
generate_vector(shape, generator: generator)
|
346
377
|
end
|
347
378
|
|
348
|
-
register_op :assign, noop: true do |context, tensor,
|
379
|
+
register_op :assign, noop: true do |context, tensor, _inputs|
|
349
380
|
assign = tensor.inputs[0] || tensor
|
350
381
|
assign.value = complete_eval(tensor.inputs[1], context)
|
351
382
|
assign.value
|
352
383
|
end
|
353
384
|
|
354
|
-
register_op :assign_add, noop: true do |context, tensor,
|
385
|
+
register_op :assign_add, noop: true do |context, tensor, _inputs|
|
355
386
|
tensor.inputs[0].value = process_vector_math_op(tensor.inputs[0], tensor.inputs[1], context, ->(t, u) { t + u })
|
356
387
|
tensor.inputs[0].value
|
357
388
|
end
|
358
389
|
|
359
|
-
register_op :assign_sub, noop: true do |context, tensor,
|
390
|
+
register_op :assign_sub, noop: true do |context, tensor, _inputs|
|
360
391
|
tensor.inputs[0].value = process_vector_math_op(tensor.inputs[0], tensor.inputs[1], context, ->(t, u) { t - u })
|
361
392
|
tensor.inputs[0].value
|
362
393
|
end
|
@@ -378,7 +409,14 @@ module TensorStream
|
|
378
409
|
end
|
379
410
|
|
380
411
|
register_op :sum, noop: true do |context, tensor, _inputs|
|
381
|
-
|
412
|
+
# axis = complete_eval(tensor.inputs[1], context)
|
413
|
+
# # fast path
|
414
|
+
# if axis.nil? && !tensor.options[:keepdims]
|
415
|
+
# arr = complete_eval(tensor.inputs[0], context)
|
416
|
+
# next arr unless arr.is_a?(Array)
|
417
|
+
# next arr.flatten.reduce(:+)
|
418
|
+
# end
|
419
|
+
|
382
420
|
func = lambda do |arr|
|
383
421
|
reduced_val = arr[0]
|
384
422
|
arr[1..arr.size].each do |v|
|
@@ -386,11 +424,19 @@ module TensorStream
|
|
386
424
|
end
|
387
425
|
reduced_val
|
388
426
|
end
|
389
|
-
|
427
|
+
|
390
428
|
reduction(context, tensor, func)
|
391
429
|
end
|
392
430
|
|
393
431
|
register_op :prod, noop: true do |context, tensor, _inputs|
|
432
|
+
# axis = complete_eval(tensor.inputs[1], context)
|
433
|
+
# # fast path
|
434
|
+
# if axis.nil? && !tensor.options[:keepdims]
|
435
|
+
# arr = complete_eval(tensor.inputs[0], context)
|
436
|
+
# next arr unless arr.is_a?(Array)
|
437
|
+
# next arr.flatten.reduce(:*)
|
438
|
+
# end
|
439
|
+
|
394
440
|
c = fp_type?(tensor.data_type) ? 1.0 : 1
|
395
441
|
func = lambda do |arr|
|
396
442
|
return c if arr.nil?
|
@@ -405,13 +451,12 @@ module TensorStream
|
|
405
451
|
reduction(context, tensor, func)
|
406
452
|
end
|
407
453
|
|
408
|
-
register_op :range do |
|
454
|
+
register_op :range do |_context, _tensor, inputs|
|
409
455
|
start, limit, delta = inputs
|
410
456
|
raise " delta !=0 " if delta.zero?
|
411
457
|
raise " Requires start <= limit when delta > 0" if (start > limit) && delta > 0
|
412
458
|
raise " Requires start >= limit when delta < 0" if (start < limit) && delta < 0
|
413
|
-
|
414
|
-
|
459
|
+
|
415
460
|
cur_step = start
|
416
461
|
r = []
|
417
462
|
Kernel.loop do
|
@@ -446,6 +491,18 @@ module TensorStream
|
|
446
491
|
end
|
447
492
|
end
|
448
493
|
|
494
|
+
register_op :expand_dims do |context, tensor, inputs|
|
495
|
+
val, axis = inputs
|
496
|
+
axis = axis.nil? ? 0 : axis
|
497
|
+
|
498
|
+
shape = shape_eval(val)
|
499
|
+
axis = -axis if axis == shape.size
|
500
|
+
|
501
|
+
new_shape = shape.dup.insert(axis, 1).compact
|
502
|
+
|
503
|
+
TensorShape.reshape([val].flatten, new_shape)
|
504
|
+
end
|
505
|
+
|
449
506
|
register_op :cond, noop: true do |context, tensor, inputs|
|
450
507
|
pred = complete_eval(tensor.options[:pred], context)
|
451
508
|
|
@@ -456,7 +513,7 @@ module TensorStream
|
|
456
513
|
end
|
457
514
|
end
|
458
515
|
|
459
|
-
register_op
|
516
|
+
register_op %i[select where] do |context, tensor, inputs|
|
460
517
|
pred = complete_eval(tensor.options[:pred], context)
|
461
518
|
call_3way_vector_op(pred, inputs[0], inputs[1], context, ->(t, u, v) { t ? u : v })
|
462
519
|
end
|
@@ -481,7 +538,7 @@ module TensorStream
|
|
481
538
|
call_vector_op(:greater_equal, a, b, context, ->(t, u) { t <= u })
|
482
539
|
end
|
483
540
|
|
484
|
-
register_op :fill do |_context,
|
541
|
+
register_op :fill do |_context, _tensor, inputs|
|
485
542
|
shape = inputs[0]
|
486
543
|
value = inputs[1]
|
487
544
|
|
@@ -528,7 +585,7 @@ module TensorStream
|
|
528
585
|
shape_eval(inputs[0], tensor.options[:out_type])
|
529
586
|
end
|
530
587
|
|
531
|
-
register_op :
|
588
|
+
register_op :mat_mul do |_context, tensor, inputs|
|
532
589
|
matrix_a, matrix_b = inputs
|
533
590
|
rank_a = get_rank(matrix_a)
|
534
591
|
rank_b = get_rank(matrix_b)
|
@@ -538,10 +595,6 @@ module TensorStream
|
|
538
595
|
matrix_a = matrix_a.transpose if tensor.options[:transpose_a]
|
539
596
|
matrix_b = matrix_b.transpose if tensor.options[:transpose_b]
|
540
597
|
|
541
|
-
# handle matrix multiplication with constants like 1 or 0
|
542
|
-
matrix_a = matmul_const_transform(matrix_a, matrix_b, tensor)
|
543
|
-
matrix_b = matmul_const_transform(matrix_b, matrix_a, tensor)
|
544
|
-
|
545
598
|
# check matrix dimensions
|
546
599
|
raise "incompatible shape sizes for matrix multiplication (#{matrix_a[0].size} != #{matrix_b.size}) #{shape_eval(matrix_a)} vs #{shape_eval(matrix_b)}" if matrix_a[0].size != matrix_b.size
|
547
600
|
|
@@ -593,12 +646,25 @@ module TensorStream
|
|
593
646
|
arr_pad(inputs[0], p, tensor.data_type)
|
594
647
|
end
|
595
648
|
|
596
|
-
register_op
|
649
|
+
register_op %i[max maximum], noop: true do |context, _tensor, inputs|
|
597
650
|
call_vector_op(:max, inputs[0], inputs[1], context, ->(t, u) { [t, u].max })
|
598
651
|
end
|
599
652
|
|
653
|
+
register_op %i[min minimum], noop: true do |context, _tensor, inputs|
|
654
|
+
call_vector_op(:min, inputs[0], inputs[1], context, ->(t, u) { [t, u].min })
|
655
|
+
end
|
656
|
+
|
657
|
+
register_op :apply_gradient_descent do |context, tensor, inputs|
|
658
|
+
target_var, learning_rate, delta = inputs
|
659
|
+
assign = tensor.inputs[0] || tensor
|
660
|
+
|
661
|
+
assign.value = process_vector_math_op(target_var, delta, context, ->(t, u) { t - u * learning_rate })
|
662
|
+
assign.value
|
663
|
+
end
|
664
|
+
|
600
665
|
register_op :broadcast_gradient_args do |_context, _tensor, inputs|
|
601
|
-
get_broadcast_gradient_args(inputs[0], inputs[1])
|
666
|
+
rx, ry = get_broadcast_gradient_args(inputs[0], inputs[1])
|
667
|
+
OutputGroup.new([rx, ry])
|
602
668
|
end
|
603
669
|
|
604
670
|
register_op :tile do |_context, _tensor, inputs|
|
@@ -620,6 +686,14 @@ module TensorStream
|
|
620
686
|
softmax(inputs[0])
|
621
687
|
end
|
622
688
|
|
689
|
+
register_op :save_v2 do |context, tensor, inputs|
|
690
|
+
prefix, tensor_names, shape_and_slices = inputs[0..3]
|
691
|
+
end
|
692
|
+
|
693
|
+
register_op :restore_v2 do |context, tensor, inputs|
|
694
|
+
prefix, tensor_names, shape_and_slices = inputs[0..3]
|
695
|
+
end
|
696
|
+
|
623
697
|
register_op :softmax_grad do |_context, _tensor, inputs|
|
624
698
|
input, grad = inputs
|
625
699
|
softmax_input = softmax(input)
|
@@ -628,10 +702,9 @@ module TensorStream
|
|
628
702
|
last_dimen_list = last_axis(softmax_input)
|
629
703
|
last_grad_list = last_axis(grad)
|
630
704
|
|
631
|
-
func =
|
632
|
-
|
705
|
+
func = lambda { |list, last_grad|
|
633
706
|
f_grad = softmax_grad(list)
|
634
|
-
f_grad.transpose.
|
707
|
+
f_grad.transpose.each.collect do |row|
|
635
708
|
sum = 0.0
|
636
709
|
row.each_with_index do |r, g_index|
|
637
710
|
sum += r * last_grad[g_index]
|
@@ -641,55 +714,75 @@ module TensorStream
|
|
641
714
|
}
|
642
715
|
|
643
716
|
if input_shape.size == 1
|
644
|
-
func.(last_dimen_list, last_grad_list)
|
717
|
+
func.call(last_dimen_list, last_grad_list)
|
645
718
|
else
|
646
719
|
arr = last_dimen_list.zip(last_grad_list).collect do |list, last_grad|
|
647
|
-
func.(list, last_grad)
|
720
|
+
func.call(list, last_grad)
|
721
|
+
end
|
722
|
+
TensorShape.reshape(arr.flatten, input_shape)
|
723
|
+
end
|
724
|
+
end
|
725
|
+
|
726
|
+
register_op :log_softmax do |_context, _tensor, inputs|
|
727
|
+
input_shape = shape_eval(inputs[0])
|
728
|
+
last_dimen_list = last_axis(inputs[0])
|
729
|
+
|
730
|
+
func = lambda { |logits|
|
731
|
+
c = logits.max
|
732
|
+
transformed_logits = logits.map { |l| l - c }
|
733
|
+
sum = transformed_logits.map { |x| Math.exp(x) }.reduce(:+)
|
734
|
+
transformed_logits.map { |x| x - Math.log(sum) }
|
735
|
+
}
|
736
|
+
|
737
|
+
if input_shape.size == 1
|
738
|
+
func.call(last_dimen_list)
|
739
|
+
else
|
740
|
+
arr = last_dimen_list.collect do |list|
|
741
|
+
func.call(list)
|
648
742
|
end
|
649
743
|
TensorShape.reshape(arr.flatten, input_shape)
|
650
744
|
end
|
651
|
-
|
652
745
|
end
|
653
746
|
|
654
|
-
register_op
|
747
|
+
register_op %i[softmax_cross_entropy_with_logits_v2 softmax_cross_entropy_with_logits] do |_context, _tensor, inputs|
|
655
748
|
last_dimen_list = last_axis(inputs[0])
|
656
749
|
input_shape = shape_eval(inputs[0])
|
657
750
|
labels = last_axis(inputs[1])
|
658
|
-
func =
|
751
|
+
func = lambda { |logits, label|
|
659
752
|
c = logits.max
|
660
|
-
transformed_logits = logits.map { |l| l - c}
|
753
|
+
transformed_logits = logits.map { |l| l - c }
|
661
754
|
sum = transformed_logits.map { |x| Math.exp(x) }.reduce(:+)
|
662
755
|
transformed_logits.zip(label).map { |x, y| (Math.log(sum) - x) * y }
|
663
756
|
}
|
664
757
|
|
665
758
|
if input_shape.size == 1
|
666
|
-
func.(last_dimen_list, labels)
|
759
|
+
func.call(last_dimen_list, labels)
|
667
760
|
else
|
668
761
|
arr = last_dimen_list.zip(labels).collect do |list, label|
|
669
|
-
func.(list, label)
|
762
|
+
func.call(list, label)
|
670
763
|
end
|
671
764
|
TensorShape.reshape(arr.flatten, input_shape)
|
672
765
|
end
|
673
766
|
end
|
674
767
|
|
675
|
-
register_op :softmax_cross_entropy_with_logits_v2_grad do |
|
768
|
+
register_op :softmax_cross_entropy_with_logits_v2_grad do |_context, _tensor, inputs|
|
676
769
|
last_dimen_list = last_axis(inputs[0])
|
677
770
|
labels = last_axis(inputs[1])
|
678
771
|
passed_grads = last_axis(inputs[2])
|
679
772
|
input_shape = shape_eval(inputs[0])
|
680
773
|
|
681
|
-
func =
|
774
|
+
func = lambda { |logits, label, grad|
|
682
775
|
c = logits.max
|
683
776
|
transformed_logits = logits.map { |l| Math.exp(l - c) }
|
684
777
|
e_sum = transformed_logits.reduce(:+)
|
685
|
-
transformed_logits.zip(label).zip(grad).map { |(x, y), g|
|
778
|
+
transformed_logits.zip(label).zip(grad).map { |(x, y), g| (x / e_sum) * g - y }
|
686
779
|
}
|
687
780
|
|
688
781
|
if input_shape.size == 1
|
689
|
-
func.(last_dimen_list, labels, passed_grads)
|
782
|
+
func.call(last_dimen_list, labels, passed_grads)
|
690
783
|
else
|
691
|
-
arr = last_dimen_list.zip(labels).zip(passed_grads).collect do |
|
692
|
-
func.(list, label, passed_grad)
|
784
|
+
arr = last_dimen_list.zip(labels).zip(passed_grads).collect do |(list, label), passed_grad|
|
785
|
+
func.call(list, label, passed_grad)
|
693
786
|
end
|
694
787
|
TensorShape.reshape(arr.flatten, input_shape)
|
695
788
|
end
|
@@ -697,7 +790,10 @@ module TensorStream
|
|
697
790
|
|
698
791
|
register_op :check_numerics do |context, tensor, inputs|
|
699
792
|
message = tensor.options[:message]
|
700
|
-
f =
|
793
|
+
f = lambda { |t, _b|
|
794
|
+
raise "#{message} Invalid argument" if t.nan? || t.infinite?
|
795
|
+
t
|
796
|
+
}
|
701
797
|
call_op(:check_numerics, inputs[0], context, f)
|
702
798
|
end
|
703
799
|
|
@@ -731,17 +827,19 @@ module TensorStream
|
|
731
827
|
end
|
732
828
|
rescue EvaluatorExcecutionException => e
|
733
829
|
raise e
|
830
|
+
rescue TensorStreamError => e
|
831
|
+
raise e
|
734
832
|
rescue StandardError => e
|
735
|
-
a = resolve_placeholder(tensor.inputs[0], child_context) if tensor.inputs && tensor.inputs[0]
|
736
|
-
b = resolve_placeholder(tensor.inputs[1], child_context) if tensor.inputs && tensor.inputs[1]
|
833
|
+
# a = resolve_placeholder(tensor.inputs[0], child_context) if tensor.inputs && tensor.inputs[0]
|
834
|
+
# b = resolve_placeholder(tensor.inputs[1], child_context) if tensor.inputs && tensor.inputs[1]
|
737
835
|
puts e.message
|
738
836
|
puts e.backtrace.join("\n")
|
739
837
|
# shape_a = a.shape.shape if a
|
740
838
|
# shape_b = b.shape.shape if b
|
741
839
|
# dtype_a = a.data_type if a
|
742
840
|
# dtype_b = b.data_type if b
|
743
|
-
a = complete_eval(a, child_context)
|
744
|
-
b = complete_eval(b, child_context)
|
841
|
+
# a = complete_eval(a, child_context)
|
842
|
+
# b = complete_eval(b, child_context)
|
745
843
|
# puts "name: #{tensor.given_name}"
|
746
844
|
# # puts "op: #{tensor.to_math(true, 1)}"
|
747
845
|
# puts "A #{shape_a} #{dtype_a}: #{a}" if a
|
@@ -749,7 +847,7 @@ module TensorStream
|
|
749
847
|
# dump_intermediates if @log_intermediates
|
750
848
|
# File.write('/home/jedld/workspace/tensor_stream/samples/error.graphml', TensorStream::Graphml.new.get_string(tensor, @session))
|
751
849
|
# File.write('/Users/josephemmanueldayo/workspace/gradients.graphml', TensorStream::Graphml.new.get_string(tensor, @session))
|
752
|
-
raise EvaluatorExcecutionException.new(e, tensor), "error #{e.message} while evaluating #{tensor.name} : #{tensor.to_math(true,1)} defined at #{tensor.source}"
|
850
|
+
raise EvaluatorExcecutionException.new(e, tensor), "error #{e.message} while evaluating #{tensor.name} : #{tensor.to_math(true, 1)} defined at #{tensor.source}"
|
753
851
|
end
|
754
852
|
|
755
853
|
def eval_tensor(tensor, child_context)
|
@@ -771,7 +869,7 @@ module TensorStream
|
|
771
869
|
end
|
772
870
|
end
|
773
871
|
|
774
|
-
def convert_from_buffer(
|
872
|
+
def convert_from_buffer(_tensor, result)
|
775
873
|
result.buffer
|
776
874
|
end
|
777
875
|
|
@@ -818,14 +916,14 @@ module TensorStream
|
|
818
916
|
return val if axis && axis.is_a?(Array) && axis.empty?
|
819
917
|
|
820
918
|
axis = if axis.nil?
|
821
|
-
|
822
|
-
|
823
|
-
|
919
|
+
nil
|
920
|
+
elsif axis.is_a?(Array)
|
921
|
+
return val if axis.empty?
|
824
922
|
|
825
|
-
|
826
|
-
|
827
|
-
|
828
|
-
|
923
|
+
axis.map { |a| a < 0 ? rank - a.abs : a }
|
924
|
+
else
|
925
|
+
axis < 0 ? rank - axis.abs : axis
|
926
|
+
end
|
829
927
|
|
830
928
|
reduce_axis(0, axis, val, keep_dims, func)
|
831
929
|
end
|
@@ -855,17 +953,6 @@ module TensorStream
|
|
855
953
|
end
|
856
954
|
end
|
857
955
|
|
858
|
-
def matmul_const_transform(mat, mat_b, tensor)
|
859
|
-
if !mat.is_a?(Array)
|
860
|
-
compat_shape = shape_eval(mat_b).reverse
|
861
|
-
func = -> { tensor.data_type == :int32 ? mat.to_i : mat.to_f }
|
862
|
-
|
863
|
-
generate_vector(compat_shape, generator: func)
|
864
|
-
else
|
865
|
-
mat
|
866
|
-
end
|
867
|
-
end
|
868
|
-
|
869
956
|
def call_op(op, a, child_context, func)
|
870
957
|
a = complete_eval(a, child_context)
|
871
958
|
process_function_op(a, func)
|
@@ -899,22 +986,6 @@ module TensorStream
|
|
899
986
|
# end
|
900
987
|
end
|
901
988
|
|
902
|
-
# determine possible reduction axis to be used
|
903
|
-
def _broadcast_gradient_op(vector_shape1, vector_shape2, level)
|
904
|
-
va_rank = _rank_from_shape(vector_shape1)
|
905
|
-
vb_rank = _rank_from_shape(vector_shape2)
|
906
|
-
return [] if vector_shape1 == vector_shape2 # same shape so no reductions
|
907
|
-
|
908
|
-
shape2_r = vector_shape2.reverse
|
909
|
-
|
910
|
-
vector_shape1.reverse.each_with_index.collect do |s, index|
|
911
|
-
next va_rank - index - 1 if index >= shape2_r.size
|
912
|
-
next nil if shape2_r[index] == s
|
913
|
-
next nil if shape2_r[index] > s
|
914
|
-
va_rank - index - 1
|
915
|
-
end.compact
|
916
|
-
end
|
917
|
-
|
918
989
|
def _rank_from_shape(shape)
|
919
990
|
shape.is_a?(Array) ? shape.size : 0
|
920
991
|
end
|
@@ -967,10 +1038,10 @@ module TensorStream
|
|
967
1038
|
reduced_val = r[0]
|
968
1039
|
if r.size > 1
|
969
1040
|
reduced_val = f.call(r[0..val.size])
|
970
|
-
elsif r.
|
1041
|
+
elsif r.empty?
|
971
1042
|
reduced_val = f.call(nil)
|
972
1043
|
end
|
973
|
-
keep_dims ? [
|
1044
|
+
keep_dims ? [reduced_val] : reduced_val
|
974
1045
|
else
|
975
1046
|
r
|
976
1047
|
end
|
@@ -1037,7 +1108,7 @@ module TensorStream
|
|
1037
1108
|
def dump_intermediates
|
1038
1109
|
arr = []
|
1039
1110
|
arr << "============== start ==================="
|
1040
|
-
@context[:compute_history].each_with_index do |history,
|
1111
|
+
@context[:compute_history].each_with_index do |history, _index|
|
1041
1112
|
arr << "------------------------------------"
|
1042
1113
|
arr << history[:name]
|
1043
1114
|
arr << "#{history[:type]} #{history[:shape]}"
|
@@ -1049,10 +1120,10 @@ module TensorStream
|
|
1049
1120
|
end
|
1050
1121
|
arr << "============== end ====================="
|
1051
1122
|
str = arr.join("\n")
|
1052
|
-
File.write(
|
1123
|
+
File.write('/tmp/intermediates.txt', str)
|
1053
1124
|
end
|
1054
1125
|
end
|
1055
1126
|
end
|
1056
1127
|
end
|
1057
1128
|
|
1058
|
-
TensorStream::Evaluator.register_evaluator(TensorStream::Evaluator::RubyEvaluator,
|
1129
|
+
TensorStream::Evaluator.register_evaluator(TensorStream::Evaluator::RubyEvaluator, 'ruby')
|