tensor_stream 0.5.1 → 0.6.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -86,6 +86,8 @@ class OpenclTemplateHelper
86
86
  '-'
87
87
  when 'mul'
88
88
  '*'
89
+ when 'mod'
90
+ '%'
89
91
  else
90
92
  raise "unsupported op #{op}"
91
93
  end
@@ -23,7 +23,7 @@ module TensorStream
23
23
  slice_tensor(input, start, target_shape)
24
24
  end
25
25
 
26
- def reduced_shape(input_shape, axes)
26
+ def _reduced_shape(input_shape, axes)
27
27
  return [] if axes.nil? # reduce to scalar
28
28
  axes = [ axes ] unless axes.is_a?(Array)
29
29
  return input_shape if axes.empty?
@@ -166,25 +166,35 @@ module TensorStream
166
166
  get_rank(value[0], rank + 1)
167
167
  end
168
168
 
169
+ def last_axis(arr)
170
+ all_items = []
171
+ if get_rank(arr) <=2
172
+ return arr
173
+ else
174
+ arr.each do |sub|
175
+ all_items += last_axis(sub)
176
+ end
177
+ end
178
+ all_items
179
+ end
180
+
169
181
  def softmax(arr)
170
182
  return arr if arr.empty?
171
183
 
172
- sum = if !arr[0].is_a?(Array)
173
- arr.map { |a| Math.exp(a - arr.max) }.reduce(:+)
174
- end
175
-
176
- arr.collect do |input|
177
- if input.is_a?(Array)
178
- softmax(input)
179
- else
180
- Math.exp(input - arr.max) / sum
184
+ if !arr[0].is_a?(Array)
185
+ c = arr.max
186
+ arr = arr.map { |a| Math.exp(a - c) }
187
+ sum = arr.reduce(:+)
188
+ arr.collect do |input|
189
+ input / sum
181
190
  end
191
+ else
192
+ arr.collect { |input| softmax(input) }
182
193
  end
183
194
  end
184
195
 
185
196
  def softmax_grad(arr)
186
197
  return arr if arr.empty?
187
-
188
198
  arr.each_with_index.collect do |input, index|
189
199
  if input.is_a?(Array)
190
200
  softmax_grad(input)
@@ -160,6 +160,28 @@ module TensorStream
160
160
  slice_tensor(input, start, size)
161
161
  end
162
162
 
163
+ def merge_dynamic_stitch(merged, indexes, data)
164
+ indexes.each_with_index do |ind, m|
165
+ if ind.is_a?(Array)
166
+ merge_dynamic_stitch(merged, ind, data[m])
167
+ else
168
+ merged[ind] = data[m]
169
+ end
170
+ end
171
+ end
172
+
173
+ register_op :flow_dynamic_stitch, noop: true do |context, tensor, inputs|
174
+ indexes, data = inputs
175
+ merged = []
176
+ merge_dynamic_stitch(merged, indexes, data)
177
+ merged
178
+ end
179
+
180
+ register_op :size do |_context, tensor, inputs|
181
+ input = inputs[0]
182
+ Tensor.cast_dtype(input.flatten.size, tensor.options[:out_type])
183
+ end
184
+
163
185
  register_op :negate, no_eval: true do |context, _tensor, inputs|
164
186
  call_vector_op(:negate, inputs[0], nil, context, ->(t, _u) { -t })
165
187
  end
@@ -174,6 +196,20 @@ module TensorStream
174
196
  call_vector_op(:sub, a, b, context, ->(t, u) { t - u })
175
197
  end
176
198
 
199
+ register_op :mod, no_eval: true do |context, _tensor, inputs|
200
+ a, b = inputs
201
+ call_vector_op(:sub, a, b, context, ->(t, u) { t % u })
202
+ end
203
+
204
+ register_op :floor_div, no_eval: true do |context, tensor, inputs|
205
+ a, b = inputs
206
+ if fp_type?(tensor.data_type)
207
+ call_vector_op(:sub, a, b, context, ->(t, u) { (t / u).to_i.to_f })
208
+ else
209
+ call_vector_op(:sub, a, b, context, ->(t, u) { t / u })
210
+ end
211
+ end
212
+
177
213
  register_op :mul, no_eval: true do |context, _tensor, inputs|
178
214
  a, b = inputs
179
215
  call_vector_op(:mul, a, b, context, ->(t, u) { t * u })
@@ -184,6 +220,11 @@ module TensorStream
184
220
  call_vector_op(:pow, a, b, context, ->(t, u) { t**u })
185
221
  end
186
222
 
223
+ register_op :squared_difference, no_eval: true do |context, _tensor, inputs|
224
+ a, b = inputs
225
+ call_vector_op(:squared_difference, a, b, context, ->(t, u) { (t - u) * (t - u) })
226
+ end
227
+
187
228
  register_op :concat do |_context, tensor, inputs|
188
229
  concat_array(inputs[0], tensor.options[:axis])
189
230
  end
@@ -345,7 +386,7 @@ module TensorStream
345
386
  end
346
387
  reduced_val
347
388
  end
348
-
389
+
349
390
  reduction(context, tensor, func)
350
391
  end
351
392
 
@@ -364,6 +405,25 @@ module TensorStream
364
405
  reduction(context, tensor, func)
365
406
  end
366
407
 
408
+ register_op :range do |context, tensor, inputs|
409
+ start, limit, delta = inputs
410
+ raise " delta !=0 " if delta.zero?
411
+ raise " Requires start <= limit when delta > 0" if (start > limit) && delta > 0
412
+ raise " Requires start >= limit when delta < 0" if (start < limit) && delta < 0
413
+
414
+
415
+ cur_step = start
416
+ r = []
417
+ Kernel.loop do
418
+ break if start == limit
419
+ break if (start < limit) && (cur_step >= limit)
420
+ break if (start > limit) && (cur_step <= limit)
421
+ r << cur_step
422
+ cur_step += delta
423
+ end
424
+ r
425
+ end
426
+
367
427
  register_op :tanh_grad, no_eval: true do |context, _tensor, inputs|
368
428
  call_op(:tanh_grad, inputs[0], context, ->(t, _b) { 1 - Math.tanh(t) * Math.tanh(t) })
369
429
  end
@@ -386,12 +446,13 @@ module TensorStream
386
446
  end
387
447
  end
388
448
 
389
- register_op :cond do |context, tensor, inputs|
449
+ register_op :cond, noop: true do |context, tensor, inputs|
390
450
  pred = complete_eval(tensor.options[:pred], context)
451
+
391
452
  if all_true?(pred)
392
- inputs[0]
453
+ complete_eval(inputs[0], context)
393
454
  else
394
- inputs[1]
455
+ complete_eval(inputs[1], context)
395
456
  end
396
457
  end
397
458
 
@@ -420,25 +481,39 @@ module TensorStream
420
481
  call_vector_op(:greater_equal, a, b, context, ->(t, u) { t <= u })
421
482
  end
422
483
 
423
- register_op %i[zeros ones zeros_like ones_like] do |_context, tensor, inputs|
424
- shape = if %i[zeros_like ones_like].include?(tensor.operation)
425
- shape_eval(inputs[0])
484
+ register_op :fill do |_context, tensor, inputs|
485
+ shape = inputs[0]
486
+ value = inputs[1]
487
+
488
+ func = -> { value }
489
+
490
+ if shape.is_a?(Array) && shape.size.zero?
491
+ func.call
426
492
  else
427
- inputs[0] || tensor.shape.shape
493
+ shape = [shape.to_i] unless shape.is_a?(Array)
494
+ generate_vector(shape, generator: func)
428
495
  end
496
+ end
497
+
498
+ register_op %i[zeros ones zeros_like ones_like] do |_context, tensor, inputs|
499
+ shape = if %i[zeros_like ones_like].include?(tensor.operation)
500
+ shape_eval(inputs[0])
501
+ else
502
+ inputs[0] || tensor.shape.shape
503
+ end
429
504
 
430
505
  func = if %i[zeros zeros_like].include?(tensor.operation)
431
- -> { tensor.data_type == :int32 ? 0 : 0.0 }
432
- else
433
- -> { tensor.data_type == :int32 ? 1 : 1.0 }
434
- end
506
+ -> { int_type?(tensor.data_type) ? 0 : 0.0 }
507
+ else
508
+ -> { int_type?(tensor.data_type) ? 1 : 1.0 }
509
+ end
435
510
 
436
511
  if shape.is_a?(Array) && shape.size.zero?
437
512
  func.call
438
513
  else
439
514
  shape = [shape.to_i] unless shape.is_a?(Array)
440
515
 
441
- cache_key = "#{tensor.operation}_#{shape.to_s}"
516
+ cache_key = "#{tensor.operation}_#{shape}"
442
517
  if @context[:_cache].key?(cache_key)
443
518
  @context[:_cache][cache_key]
444
519
  else
@@ -527,22 +602,10 @@ module TensorStream
527
602
  get_broadcast_gradient_args(inputs[0], inputs[1])
528
603
  end
529
604
 
530
- register_op :reduced_shape do |context, _tensor, inputs|
531
- input_shape, axes = inputs
532
-
533
- return [] if axes.nil? # reduce to scalar
534
- axes = [ axes ] unless axes.is_a?(Array)
535
- return input_shape if axes.empty?
536
-
537
- axes.each do |dimen|
538
- input_shape[dimen] = 1
539
- end
540
- input_shape
541
- end
542
-
543
- register_op :tile do |context, _tensor, inputs|
605
+ register_op :tile do |_context, _tensor, inputs|
544
606
  input, multiples = inputs
545
607
  rank = get_rank(input)
608
+
546
609
  raise '1D or higher tensor required' if rank.zero?
547
610
  raise "invalid multiple size passed #{rank} != #{multiples.size}" if rank != multiples.size
548
611
 
@@ -554,21 +617,82 @@ module TensorStream
554
617
  inputs.collect { |input| run(input, context) }
555
618
  end
556
619
 
557
- register_op :softmax do |context, _tensor, inputs|
620
+ register_op :softmax do |_context, _tensor, inputs|
558
621
  softmax(inputs[0])
559
622
  end
560
623
 
561
624
  register_op :softmax_grad do |_context, _tensor, inputs|
562
625
  input, grad = inputs
563
-
564
626
  softmax_input = softmax(input)
565
- f_grad = softmax_grad(softmax_input)
566
- f_grad.transpose.each_with_index.collect do |row, index|
567
- sum = 0.0
568
- row.each_with_index do |r, g_index|
569
- sum += r * grad[g_index]
627
+ input_shape = shape_eval(input)
628
+
629
+ last_dimen_list = last_axis(softmax_input)
630
+ last_grad_list = last_axis(grad)
631
+
632
+ func = -> (list, last_grad) {
633
+
634
+ f_grad = softmax_grad(list)
635
+ f_grad.transpose.each_with_index.collect do |row, index|
636
+ sum = 0.0
637
+ row.each_with_index do |r, g_index|
638
+ sum += r * last_grad[g_index]
639
+ end
640
+ sum
641
+ end
642
+ }
643
+
644
+ if input_shape.size == 1
645
+ func.(last_dimen_list, last_grad_list)
646
+ else
647
+ arr = last_dimen_list.zip(last_grad_list).collect do |list, last_grad|
648
+ func.(list, last_grad)
570
649
  end
571
- sum
650
+ TensorShape.reshape(arr.flatten, input_shape)
651
+ end
652
+
653
+ end
654
+
655
+ register_op :softmax_cross_entropy_with_logits_v2 do |context, tensor, inputs|
656
+ last_dimen_list = last_axis(inputs[0])
657
+ input_shape = shape_eval(inputs[0])
658
+ labels = last_axis(inputs[1])
659
+ func = -> (logits, label) {
660
+ c = logits.max
661
+ transformed_logits = logits.map { |l| l - c}
662
+ sum = transformed_logits.map { |x| Math.exp(x) }.reduce(:+)
663
+ transformed_logits.zip(label).map { |x, y| (Math.log(sum) - x) * y }
664
+ }
665
+
666
+ if input_shape.size == 1
667
+ func.(last_dimen_list, labels)
668
+ else
669
+ arr = last_dimen_list.zip(labels).collect do |list, label|
670
+ func.(list, label)
671
+ end
672
+ TensorShape.reshape(arr.flatten, input_shape)
673
+ end
674
+ end
675
+
676
+ register_op :softmax_cross_entropy_with_logits_v2_grad do |context, tensor, inputs|
677
+ last_dimen_list = last_axis(inputs[0])
678
+ labels = last_axis(inputs[1])
679
+ passed_grads = last_axis(inputs[2])
680
+ input_shape = shape_eval(inputs[0])
681
+
682
+ func = -> (logits, label, grad) {
683
+ c = logits.max
684
+ transformed_logits = logits.map { |l| Math.exp(l - c) }
685
+ e_sum = transformed_logits.reduce(:+)
686
+ transformed_logits.zip(label).zip(grad).map { |(x, y), g| (x / e_sum) * g - y }
687
+ }
688
+
689
+ if input_shape.size == 1
690
+ func.(last_dimen_list, labels, passed_grads)
691
+ else
692
+ arr = last_dimen_list.zip(labels).zip(passed_grads).collect do | (list, label), passed_grad|
693
+ func.(list, label, passed_grad)
694
+ end
695
+ TensorShape.reshape(arr.flatten, input_shape)
572
696
  end
573
697
  end
574
698
 
@@ -580,9 +704,13 @@ module TensorStream
580
704
 
581
705
  def eval_operation(tensor, child_context)
582
706
  return @context[tensor.name] if @context.key?(tensor.name)
583
-
584
- # puts tensor.name
585
707
  invoke(tensor, child_context).tap do |result|
708
+ # puts tensor.name
709
+ # if tensor.name.start_with?('softmax_cross_entropy_with_logits')
710
+ # puts result.inspect
711
+ # end
712
+ # result.flatten.each do |a|
713
+ # end if result.is_a?(Array)
586
714
  if tensor.breakpoint
587
715
  a = resolve_placeholder(tensor.inputs[0], child_context) if tensor.inputs && tensor.inputs[0]
588
716
  b = resolve_placeholder(tensor.inputs[1], child_context) if tensor.inputs && tensor.inputs[1]
@@ -792,17 +920,6 @@ module TensorStream
792
920
  shape.is_a?(Array) ? shape.size : 0
793
921
  end
794
922
 
795
- def get_broadcast_gradient_args(input_a, input_b)
796
- return [] if get_rank(input_b).zero? && get_rank(input_a).zero?
797
- return nil if get_rank(input_b).zero?
798
- # ruby scalar
799
- if get_rank(input_a).zero?
800
- _broadcast_gradient_op(input_b, input_a, 0, true)
801
- elsif get_rank(input_a) > 0
802
- _broadcast_gradient_op(input_a, input_b, 0)
803
- end
804
- end
805
-
806
923
  def concat_array(values, axis)
807
924
  combined_array = values.shift
808
925
  axis = get_rank(combined_array) - 1 if axis == -1
@@ -40,11 +40,19 @@ module TensorStream
40
40
  private
41
41
 
42
42
  def process_options(node)
43
+ return if node.options.nil?
43
44
  node.options.each do |k, v|
44
45
  next if %w[name].include?(k.to_s)
45
46
  @lines << " attr {"
46
47
  @lines << " key: \"#{k}\""
47
48
  @lines << " value {"
49
+ if (v.is_a?(TrueClass) || v.is_a?(FalseClass))
50
+ @lines << " b: #{v.to_s}"
51
+ elsif (v.is_a?(Integer))
52
+ @lines << " int_val: #{v}"
53
+ elsif (v.is_a?(Float))
54
+ @lines << " float_val: #{v}"
55
+ end
48
56
  @lines << " }"
49
57
  @lines << " }"
50
58
  end
@@ -1,8 +1,8 @@
1
1
  module TensorStream
2
2
  # module that contains helper functions useful for ops
3
3
  module OpHelper
4
- def _op(code, t_a, t_b = nil, options = {})
5
- op = Operation.new(code.to_sym, t_a, t_b, options)
4
+ def _op(code, *args)
5
+ op = Operation.new(code.to_sym, *args)
6
6
  if !TensorStream.get_default_graph.get_dependency_scope.nil?
7
7
  i_op(:identity, op, TensorStream.get_default_graph.get_dependency_scope, name: [op.name, 'tuple', 'control_dependency'].join('/'))
8
8
  else
@@ -11,8 +11,15 @@ module TensorStream
11
11
  end
12
12
 
13
13
  # same as op but with a marker that it was internal generated
14
- def i_op(code, t_a, t_b = nil, options = {})
15
- Operation.new(code.to_sym, t_a, t_b, options.merge(internal: true))
14
+ def i_op(code, *args)
15
+ options = if args.last.is_a?(Hash)
16
+ args.pop
17
+ else
18
+ {}
19
+ end
20
+
21
+ args << options.merge(internal: true)
22
+ Operation.new(code.to_sym, *args)
16
23
  end
17
24
 
18
25
  def cons(value, options = {})
@@ -68,10 +75,40 @@ module TensorStream
68
75
  TensorStream::Ops::FLOATING_POINT_TYPES.include?(type)
69
76
  end
70
77
 
78
+ def int_type?(type)
79
+ TensorStream::Ops::INTEGER_TYPES.include?(type)
80
+ end
81
+
71
82
  def format_source(trace)
72
83
  grad_source = trace.select { |c| c.to_s.include?(File.join('lib', 'tensor_stream', 'math_gradients')) }.first
73
84
  source = trace.reject { |c| c.to_s.include?(File.join('lib', 'tensor_stream')) }.first
74
85
  [grad_source, source].compact.join("\n")
75
86
  end
87
+
88
+ def shapes_fully_specified_and_equal(x, y)
89
+ return false if !shape_full_specified(x) || !shape_full_specified(y)
90
+ return false if x.shape.shape != y.shape.shape
91
+
92
+ true
93
+ end
94
+
95
+ def shape_full_specified(tensor)
96
+ return false if tensor.shape.nil?
97
+ return false if tensor.shape.shape.nil?
98
+
99
+ tensor.shape.shape.each { |s| return false if s.nil? }
100
+ true
101
+ end
102
+
103
+ def reduced_shape(input_shape, axes)
104
+ input_shape = TensorStream.convert_to_tensor(input_shape)
105
+ axes = TensorStream.convert_to_tensor(axes)
106
+ input_rank = i_op(:size, input_shape)
107
+ axes = TensorStream.range(0, input_rank) if axes.nil?
108
+ axes = (axes + input_rank) % input_rank
109
+ axes_shape = i_op(:shape, axes)
110
+ TensorStream.dynamic_stitch([TensorStream.range(0, input_rank), axes],
111
+ [input_shape, i_op(:fill, axes_shape, 1)])
112
+ end
76
113
  end
77
114
  end