tensor_stream 0.1.3 → 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +9 -3
- data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +100 -0
- data/lib/tensor_stream/evaluator/ruby_evaluator.rb +112 -123
- data/lib/tensor_stream/graph.rb +6 -0
- data/lib/tensor_stream/math_gradients.rb +41 -10
- data/lib/tensor_stream/operation.rb +13 -2
- data/lib/tensor_stream/ops.rb +5 -1
- data/lib/tensor_stream/tensor.rb +24 -2
- data/lib/tensor_stream/version.rb +1 -1
- data/samples/test.py +44 -0
- data/tensor_stream.gemspec +2 -0
- metadata +33 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a25008802340137903308c5ec471a1ff4bedfc42
|
4
|
+
data.tar.gz: ba3549b627248f16d1afcea678d844473765d399
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 912fc4724db50b4b45a4cf7af11cd28a1df8dc9589a2347fb9c5374ada51cc34c11c179c70fc392ef0bd5c8c6dc41802d8ee4457df47cf7776ee78d92d1f0b74
|
7
|
+
data.tar.gz: 506e86fbd0877c10b51b6106529284c33c6158576e53f2fe720b85213f2ec1577156c614e85a587320e2d8a063dcefee51a59f6325cc1e24a27e8a2e99114ebf
|
data/README.md
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
[](https://badge.fury.io/rb/tensor_stream)
|
2
|
+
|
1
3
|
# TensorStream
|
2
4
|
|
3
5
|
A reimplementation of TensorFlow for ruby. This is a ground up implementation with no dependency on TensorFlow. Effort has been made to make the programming style as near to TensorFlow as possible, comes with a pure ruby evaluator by default as well with support for an opencl evaluator.
|
@@ -94,7 +96,11 @@ tf.session do |sess|
|
|
94
96
|
end
|
95
97
|
```
|
96
98
|
|
97
|
-
|
99
|
+
You can take a look at spec/tensor_stream/operation_spec.rb for a list of supported ops and various examples and test cases used. Of course these contain only a
|
100
|
+
sliver of what TensorFlow can do, so feel free to file a PR to add requested
|
101
|
+
ops and test cases.
|
102
|
+
|
103
|
+
## Python to Ruby guide
|
98
104
|
|
99
105
|
Not all ops are available. Available ops are defined in lib/tensor_stream/ops.rb, corresponding gradients are found at lib/tensor_stream/math_gradients.
|
100
106
|
|
@@ -122,7 +128,7 @@ w = tf.Variable(0, 'weights')
|
|
122
128
|
Ruby
|
123
129
|
|
124
130
|
```ruby
|
125
|
-
w =tf.variable(0, name: 'weights')
|
131
|
+
w = tf.variable(0, name: 'weights')
|
126
132
|
```
|
127
133
|
|
128
134
|
# Shapes
|
@@ -150,7 +156,7 @@ W = tf.variable(rand, name: "weight")
|
|
150
156
|
b = tf.variable(rand, name: "bias")
|
151
157
|
pred = X * W + b
|
152
158
|
cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / ( 2 * 10)
|
153
|
-
cost.to_math # "(reduce_sum(|((((Placeholder: * weight) + bias) - Placeholder_2:)^2)|) /
|
159
|
+
cost.to_math # "(reduce_sum(|((((Placeholder: * weight) + bias) - Placeholder_2:)^2)|) / 20.0)"
|
154
160
|
```
|
155
161
|
|
156
162
|
breakpoints can also be set, block will be evaluated during computation
|
@@ -0,0 +1,100 @@
|
|
1
|
+
module TensorStream
|
2
|
+
# varoius utility functions for array processing
|
3
|
+
module ArrayOpsHelper
|
4
|
+
def broadcast(input_a, input_b)
|
5
|
+
sa = shape_eval(input_a)
|
6
|
+
sb = shape_eval(input_b)
|
7
|
+
|
8
|
+
return [input_a, input_b] if sa == sb
|
9
|
+
|
10
|
+
# descalar
|
11
|
+
if sa.empty?
|
12
|
+
input_a = [input_a]
|
13
|
+
sa = [1]
|
14
|
+
end
|
15
|
+
|
16
|
+
if sb.empty?
|
17
|
+
input_b = [input_b]
|
18
|
+
sb = [1]
|
19
|
+
end
|
20
|
+
|
21
|
+
target_shape = shape_diff(sa, sb)
|
22
|
+
|
23
|
+
if target_shape
|
24
|
+
input_b = broadcast_dimensions(input_b, target_shape)
|
25
|
+
else
|
26
|
+
target_shape = shape_diff(sb, sa)
|
27
|
+
raise "Incompatible shapes for op #{shape_eval(input_a)} vs #{shape_eval(input_a)}" if target_shape.nil?
|
28
|
+
|
29
|
+
input_a = broadcast_dimensions(input_a, target_shape)
|
30
|
+
end
|
31
|
+
|
32
|
+
[input_a, input_b]
|
33
|
+
end
|
34
|
+
|
35
|
+
# explicit broadcasting helper
|
36
|
+
def broadcast_dimensions(input, dims = [])
|
37
|
+
return input if dims.empty?
|
38
|
+
|
39
|
+
d = dims.shift
|
40
|
+
|
41
|
+
if input.is_a?(Array) && (get_rank(input) - 1) == dims.size
|
42
|
+
row_to_dup = input.collect do |item|
|
43
|
+
broadcast_dimensions(item, dims.dup)
|
44
|
+
end
|
45
|
+
|
46
|
+
row_to_dup + Array.new(d) { row_to_dup }.flatten(1)
|
47
|
+
elsif input.is_a?(Array)
|
48
|
+
Array.new(d) { broadcast_dimensions(input, dims.dup) }
|
49
|
+
else
|
50
|
+
Array.new(d + 1) { input }
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
# handle 2 tensor math operations
|
55
|
+
def vector_op(vector, vector2, op = ->(a, b) { a + b }, switch = false)
|
56
|
+
if get_rank(vector) < get_rank(vector2) # upgrade rank of A
|
57
|
+
duplicated = Array.new(vector2.size) do
|
58
|
+
vector
|
59
|
+
end
|
60
|
+
return vector_op(duplicated, vector2, op, switch)
|
61
|
+
end
|
62
|
+
|
63
|
+
return op.call(vector, vector2) unless vector.is_a?(Array)
|
64
|
+
|
65
|
+
vector.each_with_index.collect do |item, index|
|
66
|
+
next vector_op(item, vector2, op, switch) if item.is_a?(Array) && get_rank(vector) > get_rank(vector2)
|
67
|
+
|
68
|
+
z = if vector2.is_a?(Array)
|
69
|
+
if index < vector2.size
|
70
|
+
vector2[index]
|
71
|
+
else
|
72
|
+
raise 'incompatible tensor shapes used during op' if vector2.size != 1
|
73
|
+
vector2[0]
|
74
|
+
end
|
75
|
+
else
|
76
|
+
vector2
|
77
|
+
end
|
78
|
+
|
79
|
+
if item.is_a?(Array)
|
80
|
+
vector_op(item, z, op, switch)
|
81
|
+
else
|
82
|
+
switch ? op.call(z, item) : op.call(item, z)
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def shape_diff(shape_a, shape_b)
|
88
|
+
return nil if shape_b.size > shape_a.size
|
89
|
+
|
90
|
+
reversed_a = shape_a.reverse
|
91
|
+
reversed_b = shape_b.reverse
|
92
|
+
|
93
|
+
reversed_a.each_with_index.collect do |s, index|
|
94
|
+
next s if index >= reversed_b.size
|
95
|
+
return nil if reversed_b[index] > s
|
96
|
+
s - reversed_b[index]
|
97
|
+
end.reverse
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end
|
@@ -1,4 +1,5 @@
|
|
1
1
|
require 'tensor_stream/evaluator/operation_helpers/random_gaussian'
|
2
|
+
require 'tensor_stream/evaluator/operation_helpers/array_ops_helper'
|
2
3
|
require 'tensor_stream/math_gradients'
|
3
4
|
|
4
5
|
module TensorStream
|
@@ -25,6 +26,7 @@ module TensorStream
|
|
25
26
|
attr_accessor :retain
|
26
27
|
|
27
28
|
include TensorStream::OpHelper
|
29
|
+
include TensorStream::ArrayOpsHelper
|
28
30
|
|
29
31
|
def initialize(session, context, thread_pool: nil)
|
30
32
|
@session = session
|
@@ -37,6 +39,8 @@ module TensorStream
|
|
37
39
|
return tensor.map { |t| run(t, execution_context) } if tensor.is_a?(Array)
|
38
40
|
|
39
41
|
return tensor if retain.include?(tensor) # if var is in retain don't eval to value
|
42
|
+
|
43
|
+
tensor = tensor.call() if tensor.is_a?(Proc)
|
40
44
|
|
41
45
|
child_context = execution_context.dup
|
42
46
|
res = if tensor.is_a?(Operation)
|
@@ -107,6 +111,11 @@ module TensorStream
|
|
107
111
|
}
|
108
112
|
|
109
113
|
call_op(:sign, a, child_context, func)
|
114
|
+
when :logical_and
|
115
|
+
a = complete_eval(a, child_context)
|
116
|
+
b = complete_eval(b, child_context)
|
117
|
+
|
118
|
+
call_vector_op(:greater, a, b, child_context, ->(t, u) { t && u })
|
110
119
|
when :equal
|
111
120
|
a = complete_eval(a, child_context)
|
112
121
|
b = complete_eval(b, child_context)
|
@@ -188,35 +197,40 @@ module TensorStream
|
|
188
197
|
tensor.items[0].value
|
189
198
|
when :reduce_mean
|
190
199
|
c = fp_type?(tensor.data_type) ? 0.0 : 0
|
191
|
-
func = lambda
|
192
|
-
if
|
193
|
-
|
194
|
-
|
195
|
-
|
200
|
+
func = lambda do |arr|
|
201
|
+
return c if arr.nil?
|
202
|
+
|
203
|
+
reduced_val = arr[0]
|
204
|
+
arr[1..arr.size].each do |v|
|
205
|
+
reduced_val = vector_op(reduced_val, v, ->(a, b) { a + b })
|
196
206
|
end
|
197
|
-
|
207
|
+
|
208
|
+
vector_op(reduced_val, nil, ->(a, _b) { a / arr.size })
|
209
|
+
end
|
198
210
|
|
199
211
|
reduction(child_context, tensor, func)
|
200
212
|
when :reduce_sum
|
201
213
|
c = fp_type?(tensor.data_type) ? 0.0 : 0
|
202
|
-
func = lambda
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
v
|
214
|
+
func = lambda do |arr|
|
215
|
+
reduced_val = arr[0]
|
216
|
+
arr[1..arr.size].each do |v|
|
217
|
+
reduced_val = vector_op(reduced_val, v, ->(t, u) { t + u })
|
207
218
|
end
|
208
|
-
|
219
|
+
reduced_val
|
220
|
+
end
|
209
221
|
|
210
222
|
reduction(child_context, tensor, func)
|
211
223
|
when :reduce_prod
|
212
224
|
c = fp_type?(tensor.data_type) ? 1.0 : 1
|
213
|
-
func = lambda
|
214
|
-
if
|
215
|
-
|
216
|
-
|
217
|
-
|
225
|
+
func = lambda do |arr|
|
226
|
+
return c if arr.nil?
|
227
|
+
|
228
|
+
reduced_val = arr[0]
|
229
|
+
arr[1..arr.size].each do |v|
|
230
|
+
reduced_val = vector_op(reduced_val, v, ->(a, b) { a * b })
|
218
231
|
end
|
219
|
-
|
232
|
+
reduced_val
|
233
|
+
end
|
220
234
|
|
221
235
|
reduction(child_context, tensor, func)
|
222
236
|
when :transpose
|
@@ -316,6 +330,10 @@ module TensorStream
|
|
316
330
|
(Matrix[*matrix_a] * Matrix[*matrix_b]).to_a
|
317
331
|
when :gradients
|
318
332
|
raise 'not implemented in evaluator' # see TensorStream.gradients instead.
|
333
|
+
when :broadcast_transform
|
334
|
+
a = complete_eval(a, child_context)
|
335
|
+
b = complete_eval(b, child_context)
|
336
|
+
broadcast(a, b)
|
319
337
|
when :identity
|
320
338
|
complete_eval(a, child_context)
|
321
339
|
when :print
|
@@ -348,6 +366,11 @@ module TensorStream
|
|
348
366
|
b = complete_eval(b, child_context)
|
349
367
|
|
350
368
|
call_vector_op(:max, a, b, child_context, ->(t, u) { [t, u].max })
|
369
|
+
when :broadcast_gradient_args
|
370
|
+
a = complete_eval(a, child_context)
|
371
|
+
b = complete_eval(b, child_context)
|
372
|
+
|
373
|
+
get_broadcast_gradient_args(a, b)
|
351
374
|
else
|
352
375
|
raise "unknown op #{tensor.operation}"
|
353
376
|
end.tap do |result|
|
@@ -362,15 +385,15 @@ module TensorStream
|
|
362
385
|
rescue EvaluatorExcecutionException => e
|
363
386
|
raise e
|
364
387
|
rescue StandardError => e
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
388
|
+
a = complete_eval(a, child_context)
|
389
|
+
b = complete_eval(b, child_context)
|
390
|
+
puts "name: #{tensor.given_name}"
|
391
|
+
puts "op: #{tensor.to_math(true, 1)}"
|
392
|
+
puts "A: #{a}" if a
|
393
|
+
puts "B: #{b}" if b
|
394
|
+
|
372
395
|
puts e.backtrace.join("\n")
|
373
|
-
raise EvaluatorExcecutionException.new(e, tensor), "error #{e.message} while evaluating #{tensor.name} : #{tensor.to_math} defined at #{tensor.source}"
|
396
|
+
raise EvaluatorExcecutionException.new(e, tensor), "error #{e.message} while evaluating #{tensor.name} : #{tensor.to_math(true,1)} defined at #{tensor.source}"
|
374
397
|
end
|
375
398
|
|
376
399
|
def eval_tensor(tensor, child_context)
|
@@ -425,19 +448,22 @@ module TensorStream
|
|
425
448
|
|
426
449
|
def reduction(child_context, tensor, func)
|
427
450
|
val = complete_eval(tensor.items[0], child_context)
|
428
|
-
axis = tensor.options[:axis]
|
429
|
-
keep_dims = tensor.options[:keepdims]
|
451
|
+
axis = complete_eval(tensor.options[:axis], child_context)
|
452
|
+
keep_dims = complete_eval(tensor.options[:keepdims], child_context)
|
453
|
+
rank = get_rank(val)
|
454
|
+
return val if axis && axis.is_a?(Array) && axis.empty?
|
430
455
|
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
456
|
+
axis = if axis.nil?
|
457
|
+
nil
|
458
|
+
elsif axis.is_a?(Array)
|
459
|
+
return val if axis.empty?
|
435
460
|
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
461
|
+
axis.map { |a| a < 0 ? rank - a.abs : a }
|
462
|
+
else
|
463
|
+
axis < 0 ? rank - axis.abs : axis
|
464
|
+
end
|
465
|
+
|
466
|
+
reduce_axis(0, axis, val, keep_dims, func)
|
441
467
|
end
|
442
468
|
|
443
469
|
def arr_pad(arr, paddings, data_type = :float32, rank = 0)
|
@@ -537,14 +563,41 @@ module TensorStream
|
|
537
563
|
if get_rank(eval_b).zero?
|
538
564
|
op.call(eval_a, eval_b)
|
539
565
|
else
|
540
|
-
|
566
|
+
vector_op(eval_b, eval_a, op, true)
|
541
567
|
end
|
542
568
|
elsif get_rank(eval_a) > 0
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
569
|
+
vector_op(eval_a, eval_b, op)
|
570
|
+
end
|
571
|
+
end
|
572
|
+
|
573
|
+
# determine possible reduction axis to be used
|
574
|
+
def _broadcast_gradient_op(vector_shape1, vector_shape2, level)
|
575
|
+
va_rank = _rank_from_shape(vector_shape1)
|
576
|
+
vb_rank = _rank_from_shape(vector_shape2)
|
577
|
+
return [] if vector_shape1 == vector_shape2 # same shape so no reductions
|
578
|
+
|
579
|
+
shape2_r = vector_shape2.reverse
|
580
|
+
|
581
|
+
vector_shape1.reverse.each_with_index.collect do |s, index|
|
582
|
+
next va_rank - index - 1 if index >= shape2_r.size
|
583
|
+
next nil if shape2_r[index] == s
|
584
|
+
next nil if shape2_r[index] > s
|
585
|
+
va_rank - index - 1
|
586
|
+
end.compact
|
587
|
+
end
|
588
|
+
|
589
|
+
def _rank_from_shape(shape)
|
590
|
+
shape.is_a?(Array) ? shape.size : 0
|
591
|
+
end
|
592
|
+
|
593
|
+
def get_broadcast_gradient_args(input_a, input_b)
|
594
|
+
return [] if get_rank(input_b).zero? && get_rank(input_a).zero?
|
595
|
+
return nil if get_rank(input_b).zero?
|
596
|
+
# ruby scalar
|
597
|
+
if get_rank(input_a).zero?
|
598
|
+
_broadcast_gradient_op(input_b, input_a, 0, true)
|
599
|
+
elsif get_rank(input_a) > 0
|
600
|
+
_broadcast_gradient_op(input_a, input_b, 0)
|
548
601
|
end
|
549
602
|
end
|
550
603
|
|
@@ -578,7 +631,7 @@ module TensorStream
|
|
578
631
|
def process_function_op(a, child_context, op)
|
579
632
|
# ruby scalar
|
580
633
|
if (a.is_a?(Tensor) && a.shape.rank > 0) || a.is_a?(Array)
|
581
|
-
|
634
|
+
vector_op(a, 0, op)
|
582
635
|
elsif !a.is_a?(Tensor) || a.shape.rank.zero?
|
583
636
|
v = run(a, child_context)
|
584
637
|
raise FullEvalNotPossible.new, "full eval not possible for #{v.name}" if v.is_a?(Tensor) && !v.is_const
|
@@ -605,54 +658,29 @@ module TensorStream
|
|
605
658
|
Tensor.cast_dtype(var, placeholder.data_type)
|
606
659
|
end
|
607
660
|
|
608
|
-
def reduce_axis(axis, val, keep_dims,
|
609
|
-
val
|
610
|
-
return val.is_a?(Array) ? op.call(val.flatten) : val if axis.nil?
|
611
|
-
return val.transpose.collect { |v| keep_dims ? [op.call(v)] : op.call(v) } if axis.zero?
|
612
|
-
return val.collect { |v| keep_dims ? [op.call(v)] : op.call(v) } if axis == 1
|
613
|
-
|
614
|
-
raise "can't handle with axis > 1 :("
|
615
|
-
end
|
661
|
+
def reduce_axis(current_axis, axis, val, keep_dims, f = ->(a, b) { a + b })
|
662
|
+
return val unless val.is_a?(Array)
|
616
663
|
|
617
|
-
|
618
|
-
|
619
|
-
if item.is_a?(Array)
|
620
|
-
constant_add(item, constant)
|
621
|
-
elsif item.respond_to?(:value)
|
622
|
-
item.value + constant
|
623
|
-
else
|
624
|
-
item + constant
|
625
|
-
end
|
664
|
+
r = val.collect do |v|
|
665
|
+
reduce_axis(current_axis + 1, axis, v, keep_dims, f)
|
626
666
|
end
|
627
|
-
end
|
628
667
|
|
629
|
-
|
630
|
-
eval_vector = complete_eval(vector, child_context)
|
631
|
-
constant = complete_eval(constant, child_context)
|
668
|
+
should_reduce_axis = axis.nil? || (axis.is_a?(Array) && axis.include?(current_axis)) || (current_axis == axis)
|
632
669
|
|
633
|
-
|
634
|
-
|
635
|
-
|
636
|
-
|
637
|
-
|
638
|
-
|
639
|
-
else
|
640
|
-
raise "incompatible tensor shapes used during op" if constant.size != 1
|
641
|
-
constant[0]
|
642
|
-
end
|
643
|
-
else
|
644
|
-
constant
|
645
|
-
end
|
646
|
-
if item.is_a?(Array)
|
647
|
-
constant_op(item, c, child_context, op, switch)
|
648
|
-
elsif item.respond_to?(:value)
|
649
|
-
switch ? op.call(c, item.value) : op.call(item.value, c)
|
650
|
-
else
|
651
|
-
switch ? op.call(c, item) : op.call(item, c)
|
670
|
+
if should_reduce_axis
|
671
|
+
reduced_val = r[0]
|
672
|
+
if r.size > 1
|
673
|
+
reduced_val = f.call(r[0..val.size])
|
674
|
+
elsif r.size == 0
|
675
|
+
reduced_val = f.call(nil)
|
652
676
|
end
|
677
|
+
keep_dims ? [ reduced_val ] : reduced_val
|
678
|
+
else
|
679
|
+
r
|
653
680
|
end
|
654
681
|
end
|
655
682
|
|
683
|
+
# handle 3 tensor math operations
|
656
684
|
def call_3way_vector_op(v_a, v_b, v_c, child_context, op = ->(a, b, c) { a + b + c })
|
657
685
|
return op.call(v_a, v_b, v_c) unless v_a.is_a?(Array)
|
658
686
|
|
@@ -667,30 +695,6 @@ module TensorStream
|
|
667
695
|
end
|
668
696
|
end
|
669
697
|
|
670
|
-
def vector_op(vector, vector2, child_context, op = ->(a, b) { a + b })
|
671
|
-
v_a = run(vector, child_context)
|
672
|
-
v_b = run(vector2, child_context)
|
673
|
-
|
674
|
-
if get_rank(v_a) < get_rank(v_b) # upgrade rank of A
|
675
|
-
duplicated = Array.new(v_b.size) do
|
676
|
-
v_a
|
677
|
-
end
|
678
|
-
return vector_op(duplicated, v_b, child_context, op)
|
679
|
-
end
|
680
|
-
|
681
|
-
v_a.each_with_index.collect do |item, index|
|
682
|
-
next vector_op(item, v_b, child_context, op) if item.is_a?(Array) && get_rank(v_a) > get_rank(v_b)
|
683
|
-
|
684
|
-
z = index < v_b.size ? v_b[index] : v_b[0]
|
685
|
-
|
686
|
-
if item.is_a?(Array)
|
687
|
-
constant_op(item, z, child_context, op)
|
688
|
-
else
|
689
|
-
item.respond_to?(:value) ? op.call(item.value, z.value) : op.call(item, z)
|
690
|
-
end
|
691
|
-
end
|
692
|
-
end
|
693
|
-
|
694
698
|
def all_true?(arr)
|
695
699
|
if arr.is_a?(Array)
|
696
700
|
arr.each do |a|
|
@@ -702,21 +706,6 @@ module TensorStream
|
|
702
706
|
!!arr
|
703
707
|
end
|
704
708
|
|
705
|
-
def vector_add(vector, vector2, child_context)
|
706
|
-
v_a = run(vector, child_context)
|
707
|
-
v_b = run(vector2, child_context)
|
708
|
-
|
709
|
-
v_a.each_with_index.collect do |item, index|
|
710
|
-
if item.is_a?(Array)
|
711
|
-
constant_add(item, constant)
|
712
|
-
elsif item.respond_to?(:value)
|
713
|
-
item.value + v_b[index].value
|
714
|
-
else
|
715
|
-
item + v_b[index]
|
716
|
-
end
|
717
|
-
end
|
718
|
-
end
|
719
|
-
|
720
709
|
def generate_vector(shape, dtype: :float32, generator:)
|
721
710
|
if shape.is_a?(Integer)
|
722
711
|
Array.new(shape) do
|
data/lib/tensor_stream/graph.rb
CHANGED
@@ -12,6 +12,11 @@ module TensorStream
|
|
12
12
|
end
|
13
13
|
|
14
14
|
def reset
|
15
|
+
@placeholder_counter = 0
|
16
|
+
@const_counter = 0
|
17
|
+
@var_counter = 0
|
18
|
+
@op_counter = 0
|
19
|
+
|
15
20
|
@nodes = {}
|
16
21
|
@collections = {
|
17
22
|
:"#{GraphKeys::GLOBAL_VARIABLES}" => []
|
@@ -39,6 +44,7 @@ module TensorStream
|
|
39
44
|
raise 'Placeholder cannot be used when eager_execution is enabled' if @eager_execution && node.is_a?(Placeholder)
|
40
45
|
node.name = uniqunify(node.name) if @nodes[node.name]
|
41
46
|
@nodes[node.name] = node
|
47
|
+
node.send(:propagate_consumer, node)
|
42
48
|
node.value = node.eval if @eager_execution
|
43
49
|
end
|
44
50
|
|
@@ -41,17 +41,31 @@ module TensorStream
|
|
41
41
|
when :log
|
42
42
|
(i_cons(1, constant_options_1) / _ds(tensor.items[0])) * grad
|
43
43
|
when :tanh
|
44
|
-
(i_cons(1, constant_options_1) - (i_op(:tanh, _ds(tensor.items[0]))**2))
|
44
|
+
i_op(:mul, (i_cons(1, constant_options_1) - (i_op(:tanh, _ds(tensor.items[0]))**2)), grad, name: 'grad_tanh')
|
45
45
|
when :tan
|
46
46
|
(i_cons(1, constant_options_1) / (i_op(:cos, _ds(tensor.items[0]))**2)) * grad
|
47
47
|
when :sin
|
48
|
-
i_op(:cos, tensor.items[0])
|
48
|
+
i_op(:mul, i_op(:cos, tensor.items[0]), grad, name: 'grad_sin')
|
49
49
|
when :sqrt
|
50
50
|
i_cons(1, constant_options_1) / (i_cons(2, constant_options_1) * i_op(:sqrt, _ds(tensor.items[0]))) * grad
|
51
51
|
when :cos
|
52
52
|
-i_op(:sin, tensor.items[0]) * grad
|
53
53
|
when :add
|
54
|
-
|
54
|
+
# rx = op(:shape, tensor.items[0])
|
55
|
+
# ry = op(:shape, tensor.items[1])
|
56
|
+
|
57
|
+
# ones_a = op(:ones_like, tensor.items[0])
|
58
|
+
# ones_b = op(:ones_like, tensor.items[1])
|
59
|
+
# inputs = _broadcast_transform(grad * ones_a, grad2 * ones_b)
|
60
|
+
# sx, sy = _broadcast_gradient_args(rx, ry)
|
61
|
+
|
62
|
+
# keep_dims_x = op(:rank, inputs[0]) == op(:rank, tensor.items[0])
|
63
|
+
# keep_dims_y = op(:rank, inputs[1]) == op(:rank, tensor.items[1])
|
64
|
+
|
65
|
+
# add_x = op(:reduce_sum, inputs[0], nil, axis: sy, keepdims: keep_dims_x)
|
66
|
+
# add_y = op(:reduce_sum, inputs[1], nil, axis: sx, keepdims: keep_dims_y)
|
67
|
+
# _filtered_sum(add_x, add_y, wrt_dx)
|
68
|
+
_grad_with_broadcast(tensor, wrt_dx, ->(a, b) { i_op(:add, a, b, name: 'grad_add') }, options)
|
55
69
|
when :sub
|
56
70
|
_grad_with_broadcast(tensor, wrt_dx, ->(a, b) { i_op(:sub, a, b, name: 'grad_sub') }, options)
|
57
71
|
when :pow
|
@@ -69,7 +83,15 @@ module TensorStream
|
|
69
83
|
_reduce_when_necessary(gx + gy, wrt_dx)
|
70
84
|
when :mul
|
71
85
|
# apply the product rule
|
72
|
-
|
86
|
+
rx = op(:shape, tensor.items[0])
|
87
|
+
ry = op(:shape, tensor.items[1])
|
88
|
+
sx, sy = _broadcast_gradient_args(rx, ry)
|
89
|
+
inputs = _broadcast_transform(tensor.items[0], tensor.items[1])
|
90
|
+
keep_dims_x = op(:rank, inputs[0]) == op(:rank, tensor.items[0])
|
91
|
+
keep_dims_y = op(:rank, inputs[1]) == op(:rank, tensor.items[1])
|
92
|
+
|
93
|
+
_filtered_sum(op(:reduce_sum, grad * _ds(inputs[1]), nil, axis: sy, keepdims: keep_dims_x),
|
94
|
+
op(:reduce_sum, _ds(inputs[0]) * grad2, nil, axis: sx, keepdims: keep_dims_y), wrt_dx)
|
73
95
|
when :reduce_mean
|
74
96
|
input_size = i_op(:reduce_prod, i_op(:shape, tensor.items[0]))
|
75
97
|
output_size = i_op(:reduce_prod, i_op(:shape, tensor))
|
@@ -96,9 +118,6 @@ module TensorStream
|
|
96
118
|
matmul_db = i_op(:matmul, tensor.items[0], identity_1, transpose_a: true,
|
97
119
|
pad_zeros: true,
|
98
120
|
name: 'matrix_dy')
|
99
|
-
|
100
|
-
zero_vect = i_op(:zeros_like, wrt_dx, nil, name: 'zero_vect')
|
101
|
-
|
102
121
|
# matmul_db = op(:transpose, matmul_db, nil).first
|
103
122
|
|
104
123
|
# begin_a = op(:zeros, op(:rank, matmul_db), nil, data_type: :int32, name: 'begin_a')
|
@@ -113,9 +132,7 @@ module TensorStream
|
|
113
132
|
|
114
133
|
# norm_a = i_op(:cond, norm_a[0], norm_a, pred: i_op(:rank, matmul_da) > i_op(:rank, derivative_a))
|
115
134
|
# norm_b = i_op(:cond, norm_b[0], norm_b, pred: i_op(:rank, matmul_db) > i_op(:rank, derivative_b))
|
116
|
-
|
117
|
-
i_op(:cond, norm_a, zero_vect, pred: i_op(:reduce_sum, norm_a) != 0) + i_op(:cond, norm_b, zero_vect, pred: i_op(:reduce_sum, norm_b) != 0)
|
118
|
-
# m.breakpoint! { |t, a, b, v| binding.pry }
|
135
|
+
_filtered_sum(norm_a, norm_b, wrt_dx)
|
119
136
|
else
|
120
137
|
raise "no derivative implementation found for op #{tensor.operation}"
|
121
138
|
end
|
@@ -161,5 +178,19 @@ module TensorStream
|
|
161
178
|
reduced = op(:reduce_sum, tensor, nil, axis: 0)
|
162
179
|
op(:cond, ->{ reduced }, tensor, pred: rank > dx_rank)
|
163
180
|
end
|
181
|
+
|
182
|
+
def self._broadcast_gradient_args(input_a, input_b)
|
183
|
+
[op(:broadcast_gradient_args, input_a, input_b), op(:broadcast_gradient_args, input_b, input_a)]
|
184
|
+
end
|
185
|
+
|
186
|
+
def self._broadcast_transform(input_a, input_b)
|
187
|
+
op(:broadcast_transform, input_a, input_b)
|
188
|
+
end
|
189
|
+
|
190
|
+
# filter out zero arrays
|
191
|
+
def self._filtered_sum(input_a, input_b, wrt_dx)
|
192
|
+
zero_vect = op(:zeros_like, wrt_dx)
|
193
|
+
(i_op(:cond, input_a, zero_vect, pred: i_op(:reduce_sum, input_a) != 0) + i_op(:cond, input_b, zero_vect, pred: i_op(:reduce_sum, input_b) != 0))
|
194
|
+
end
|
164
195
|
end
|
165
196
|
end
|
@@ -2,6 +2,7 @@ module TensorStream
|
|
2
2
|
# TensorStream class that defines an operation
|
3
3
|
class Operation < Tensor
|
4
4
|
attr_accessor :name, :operation, :items, :rank, :options
|
5
|
+
attr_reader :outputs
|
5
6
|
|
6
7
|
def initialize(operation, input_a, input_b, options = {})
|
7
8
|
@graph = options[:graph] || TensorStream.get_default_graph
|
@@ -51,7 +52,7 @@ module TensorStream
|
|
51
52
|
|
52
53
|
def set_data_type(passed_data_type)
|
53
54
|
case operation
|
54
|
-
when :greater, :less, :equal
|
55
|
+
when :greater, :less, :equal, :not_equal, :greater_equal, :less_equal
|
55
56
|
:boolean
|
56
57
|
when :shape, :rank
|
57
58
|
:int32
|
@@ -150,6 +151,8 @@ module TensorStream
|
|
150
151
|
"#{sub_item} == #{auto_math(items[1], name_only, max_depth - 1)}"
|
151
152
|
when :not_equal
|
152
153
|
"#{sub_item} != #{auto_math(items[1], name_only, max_depth - 1)}"
|
154
|
+
when :logical_and
|
155
|
+
"#{sub_item} && #{auto_math(items[1], name_only, max_depth - 1)}"
|
153
156
|
when :sqrt
|
154
157
|
"sqrt(#{sub_item})"
|
155
158
|
when :zeros_like
|
@@ -161,7 +164,7 @@ module TensorStream
|
|
161
164
|
when :cast
|
162
165
|
"cast(#{auto_math(sub_item)}, #{data_type})"
|
163
166
|
else
|
164
|
-
raise "math form for #{operation}"
|
167
|
+
raise "no math form for #{operation} defined"
|
165
168
|
end
|
166
169
|
end
|
167
170
|
|
@@ -171,6 +174,14 @@ module TensorStream
|
|
171
174
|
|
172
175
|
private
|
173
176
|
|
177
|
+
def propagate_consumer(consumer)
|
178
|
+
super(consumer)
|
179
|
+
|
180
|
+
@items.compact.each do |item|
|
181
|
+
item.send(:propagate_consumer, consumer) if item.name!=self.name
|
182
|
+
end
|
183
|
+
end
|
184
|
+
|
174
185
|
def set_name
|
175
186
|
"#{@operation}#{graph.get_operation_counter}:#{@rank}"
|
176
187
|
end
|
data/lib/tensor_stream/ops.rb
CHANGED
@@ -49,7 +49,7 @@ module TensorStream
|
|
49
49
|
end
|
50
50
|
|
51
51
|
def eye(num_rows, num_columns: nil, dtype: :float32, name: nil)
|
52
|
-
op(:eye, num_rows, num_columns || num_rows, data_type: dtype, name: name
|
52
|
+
op(:eye, num_rows, num_columns || num_rows, data_type: dtype, name: name)
|
53
53
|
end
|
54
54
|
|
55
55
|
def shape(input, name: nil, out_type: :int32)
|
@@ -80,6 +80,10 @@ module TensorStream
|
|
80
80
|
op(:less, input_a, input_b, name: name)
|
81
81
|
end
|
82
82
|
|
83
|
+
def logical_and(input_a, input_b, name: nil)
|
84
|
+
op(:logical_and, input_a, input_b, name: name)
|
85
|
+
end
|
86
|
+
|
83
87
|
def greater(input_a, input_b, name: nil)
|
84
88
|
op(:greater, input_a, input_b, name: name)
|
85
89
|
end
|
data/lib/tensor_stream/tensor.rb
CHANGED
@@ -5,7 +5,9 @@ module TensorStream
|
|
5
5
|
class Tensor
|
6
6
|
include OpHelper
|
7
7
|
|
8
|
-
attr_accessor :name, :data_type, :shape, :rank, :native_buffer, :is_const,
|
8
|
+
attr_accessor :name, :data_type, :shape, :rank, :native_buffer, :is_const,
|
9
|
+
:value, :breakpoint, :internal, :source, :given_name, :graph,
|
10
|
+
:consumers
|
9
11
|
|
10
12
|
def initialize(data_type, rank, shape, options = {})
|
11
13
|
@data_type = data_type
|
@@ -104,6 +106,14 @@ module TensorStream
|
|
104
106
|
op(:less_equal, self, other)
|
105
107
|
end
|
106
108
|
|
109
|
+
def and(other)
|
110
|
+
op(:logical_and, self, other)
|
111
|
+
end
|
112
|
+
|
113
|
+
def matmul(other)
|
114
|
+
op(:matmul, self, other)
|
115
|
+
end
|
116
|
+
|
107
117
|
def collect(&block)
|
108
118
|
@value.collect(&block)
|
109
119
|
end
|
@@ -204,12 +214,24 @@ module TensorStream
|
|
204
214
|
end
|
205
215
|
|
206
216
|
def breakpoint!(&block)
|
207
|
-
@breakpoint = block
|
208
217
|
self
|
209
218
|
end
|
210
219
|
|
220
|
+
def print!(message)
|
221
|
+
op(:print, self, self, message: message)
|
222
|
+
end
|
223
|
+
|
211
224
|
protected
|
212
225
|
|
226
|
+
def add_consumer(consumer)
|
227
|
+
@consumers ||= []
|
228
|
+
@consumers << consumer.name if !@consumers.include?(consumer.name) && consumer.name!=self.name
|
229
|
+
end
|
230
|
+
|
231
|
+
def propagate_consumer(consumer)
|
232
|
+
add_consumer(consumer)
|
233
|
+
end
|
234
|
+
|
213
235
|
def format_source(trace)
|
214
236
|
trace.reject { |c| c.to_s.include?(File.join('lib', 'tensor_stream')) }.first
|
215
237
|
end
|
data/samples/test.py
ADDED
@@ -0,0 +1,44 @@
|
|
1
|
+
import tensorflow as tf
|
2
|
+
|
3
|
+
test_inputs = [
|
4
|
+
[0.5937, 0.2343, 1.4332, 0.4395],
|
5
|
+
[-1.0227, -0.6915, 1.2367, 0.3452],
|
6
|
+
[-0.5675, 1.0374, 1.0429, 0.8839],
|
7
|
+
[-0.1066, -0.0469, -1.6317, -1.4836],
|
8
|
+
[0.7835, -3.0105, 1.713, -0.4536],
|
9
|
+
[-0.3076, 1.3662, -0.6537, 0.0905],
|
10
|
+
[-0.2459, 0.2243, -2.7048, 0.848],
|
11
|
+
]
|
12
|
+
|
13
|
+
num_inputs = 4
|
14
|
+
num_neurons = 5
|
15
|
+
inputs = tf.placeholder("float", shape=(None, num_inputs))
|
16
|
+
biases = tf.constant([0.5012, 1.302, -1.6217, 0.669, 0.1494], name='b1')
|
17
|
+
biases2 = tf.constant([0.2012, 1.102, -1.5217, 0.469, 0.0494], name='b2')
|
18
|
+
|
19
|
+
weights = tf.constant([
|
20
|
+
[-0.9135, 1.0376, 0.8537, 0.4376, 1.3255],
|
21
|
+
[-0.5921, -1.4081, 1.0614, -0.5283, 1.1832],
|
22
|
+
[0.7285, -0.7844, 0.1793, -0.5275, -0.4426],
|
23
|
+
[-1.4976, 0.4433, 2.2317, -2.0479, 0.7791]], name='w')
|
24
|
+
|
25
|
+
weights_layer2 = tf.constant([
|
26
|
+
[-1.0465, -0.8766, 1.6849, -0.6625, 0.7928],
|
27
|
+
[2.0412, 1.3564, 0.7905, 0.6434, -2.5495],
|
28
|
+
[2.4276, -0.6893, -1.5917, 0.0911, 0.9112],
|
29
|
+
[-0.012, 0.0794, 1.3829, -1.018, -0.9328],
|
30
|
+
[0.061, 0.9791, -2.1727, -0.9553, -1.434]], name='w2')
|
31
|
+
|
32
|
+
|
33
|
+
sess = tf.Session()
|
34
|
+
|
35
|
+
layer_1 = tf.matmul(inputs, weights) + biases
|
36
|
+
neural_net = tf.matmul(layer_1, weights_layer2) + biases2
|
37
|
+
|
38
|
+
output = sess.run(neural_net, feed_dict={ inputs: test_inputs })
|
39
|
+
|
40
|
+
g = tf.gradients(neural_net, [weights, biases])
|
41
|
+
g2 = tf.gradients(neural_net, [weights_layer2, biases2])
|
42
|
+
|
43
|
+
weight_gradient, biases_gradient = sess.run(g, feed_dict = { inputs: test_inputs })
|
44
|
+
weight_gradient2, biases_gradient2 = sess.run(g2, feed_dict: { inputs => test_inputs })
|
data/tensor_stream.gemspec
CHANGED
@@ -36,6 +36,8 @@ Gem::Specification.new do |spec|
|
|
36
36
|
spec.add_development_dependency "awesome_print"
|
37
37
|
spec.add_development_dependency "rubocop"
|
38
38
|
spec.add_development_dependency "pry-byebug"
|
39
|
+
spec.add_development_dependency "byepry"
|
40
|
+
spec.add_development_dependency "colorize"
|
39
41
|
spec.add_dependency "deep_merge"
|
40
42
|
spec.add_dependency "concurrent-ruby"
|
41
43
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: tensor_stream
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Joseph Emmanuel Dayo
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-05-
|
11
|
+
date: 2018-05-15 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -94,6 +94,34 @@ dependencies:
|
|
94
94
|
- - ">="
|
95
95
|
- !ruby/object:Gem::Version
|
96
96
|
version: '0'
|
97
|
+
- !ruby/object:Gem::Dependency
|
98
|
+
name: byepry
|
99
|
+
requirement: !ruby/object:Gem::Requirement
|
100
|
+
requirements:
|
101
|
+
- - ">="
|
102
|
+
- !ruby/object:Gem::Version
|
103
|
+
version: '0'
|
104
|
+
type: :development
|
105
|
+
prerelease: false
|
106
|
+
version_requirements: !ruby/object:Gem::Requirement
|
107
|
+
requirements:
|
108
|
+
- - ">="
|
109
|
+
- !ruby/object:Gem::Version
|
110
|
+
version: '0'
|
111
|
+
- !ruby/object:Gem::Dependency
|
112
|
+
name: colorize
|
113
|
+
requirement: !ruby/object:Gem::Requirement
|
114
|
+
requirements:
|
115
|
+
- - ">="
|
116
|
+
- !ruby/object:Gem::Version
|
117
|
+
version: '0'
|
118
|
+
type: :development
|
119
|
+
prerelease: false
|
120
|
+
version_requirements: !ruby/object:Gem::Requirement
|
121
|
+
requirements:
|
122
|
+
- - ">="
|
123
|
+
- !ruby/object:Gem::Version
|
124
|
+
version: '0'
|
97
125
|
- !ruby/object:Gem::Dependency
|
98
126
|
name: deep_merge
|
99
127
|
requirement: !ruby/object:Gem::Requirement
|
@@ -147,6 +175,7 @@ files:
|
|
147
175
|
- lib/tensor_stream.rb
|
148
176
|
- lib/tensor_stream/control_flow.rb
|
149
177
|
- lib/tensor_stream/evaluator/evaluator.rb
|
178
|
+
- lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb
|
150
179
|
- lib/tensor_stream/evaluator/operation_helpers/random_gaussian.rb
|
151
180
|
- lib/tensor_stream/evaluator/ruby_evaluator.rb
|
152
181
|
- lib/tensor_stream/graph.rb
|
@@ -171,6 +200,7 @@ files:
|
|
171
200
|
- samples/iris.rb
|
172
201
|
- samples/linear_regression.rb
|
173
202
|
- samples/raw_neural_net_sample.rb
|
203
|
+
- samples/test.py
|
174
204
|
- tensor_stream.gemspec
|
175
205
|
homepage: http://www.github.com/jedld/tensor_stream
|
176
206
|
licenses:
|
@@ -193,7 +223,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
193
223
|
version: '0'
|
194
224
|
requirements: []
|
195
225
|
rubyforge_project:
|
196
|
-
rubygems_version: 2.6.
|
226
|
+
rubygems_version: 2.6.10
|
197
227
|
signing_key:
|
198
228
|
specification_version: 4
|
199
229
|
summary: A Pure ruby tensorflow implementation
|