tensor_stream 0.1.4 → 0.1.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.circleci/config.yml +57 -0
- data/README.md +2 -0
- data/lib/tensor_stream.rb +74 -10
- data/lib/tensor_stream/control_flow.rb +2 -2
- data/lib/tensor_stream/device.rb +8 -0
- data/lib/tensor_stream/evaluator/ruby_evaluator.rb +104 -40
- data/lib/tensor_stream/graph.rb +53 -5
- data/lib/tensor_stream/graph_keys.rb +1 -0
- data/lib/tensor_stream/graph_serializers/graphml.rb +91 -0
- data/lib/tensor_stream/graph_serializers/pbtext.rb +71 -0
- data/lib/tensor_stream/helpers/op_helper.rb +7 -1
- data/lib/tensor_stream/initializer.rb +16 -0
- data/lib/tensor_stream/math_gradients.rb +37 -30
- data/lib/tensor_stream/nn/nn_ops.rb +17 -0
- data/lib/tensor_stream/operation.rb +92 -31
- data/lib/tensor_stream/ops.rb +87 -53
- data/lib/tensor_stream/placeholder.rb +1 -1
- data/lib/tensor_stream/session.rb +26 -4
- data/lib/tensor_stream/tensor.rb +29 -33
- data/lib/tensor_stream/tensor_shape.rb +52 -2
- data/lib/tensor_stream/train/gradient_descent_optimizer.rb +1 -4
- data/lib/tensor_stream/variable.rb +23 -7
- data/lib/tensor_stream/version.rb +1 -1
- data/samples/logistic_regression.rb +76 -0
- data/tensor_stream.gemspec +3 -0
- metadata +50 -2
@@ -8,6 +8,23 @@ module TensorStream
|
|
8
8
|
def self.relu(features, name: nil)
|
9
9
|
TensorStream.max(features, 0, name: "relu_#{name}")
|
10
10
|
end
|
11
|
+
|
12
|
+
def self.sigmoid_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
|
13
|
+
TensorStream.name_scope(name, default: 'logistic_loss', values: [logits, labels]) do |name|
|
14
|
+
tf = TensorStream
|
15
|
+
logits = tf.convert_to_tensor(logits, name: 'logits')
|
16
|
+
labels = tf.convert_to_tensor(labels, name: 'labels')
|
17
|
+
zeros = tf.zeros_like(logits, dtype: logits.dtype)
|
18
|
+
cond = (logits >= zeros)
|
19
|
+
relu_logits = tf.where(cond, logits, zeros)
|
20
|
+
neg_abs_logits = tf.where(cond, -logits, logits)
|
21
|
+
|
22
|
+
return tf.add(
|
23
|
+
relu_logits - logits * labels,
|
24
|
+
tf.log1p(tf.exp(neg_abs_logits)),
|
25
|
+
name: name)
|
26
|
+
end
|
27
|
+
end
|
11
28
|
end
|
12
29
|
|
13
30
|
# tensorflow compatibility
|
@@ -9,18 +9,17 @@ module TensorStream
|
|
9
9
|
|
10
10
|
@operation = operation
|
11
11
|
@rank = options[:rank] || 0
|
12
|
-
@name = options[:name] || set_name
|
12
|
+
@name = [@graph.get_name_scope, options[:name] || set_name].compact.join('/')
|
13
13
|
@internal = options[:internal]
|
14
14
|
@given_name = @name
|
15
15
|
@source = format_source(caller_locations)
|
16
16
|
|
17
17
|
@options = options
|
18
18
|
|
19
|
-
@items = [input_a, input_b].map { |i| options[:preserve_params_type] ? i :
|
19
|
+
@items = [input_a, input_b].map { |i| options[:preserve_params_type] ? i : TensorStream.convert_to_tensor(i) }
|
20
20
|
@data_type = set_data_type(options[:data_type])
|
21
21
|
|
22
|
-
@shape = TensorShape.new(
|
23
|
-
|
22
|
+
@shape = TensorShape.new(infer_shape)
|
24
23
|
@graph.add_node(self)
|
25
24
|
end
|
26
25
|
|
@@ -57,47 +56,55 @@ module TensorStream
|
|
57
56
|
when :shape, :rank
|
58
57
|
:int32
|
59
58
|
else
|
60
|
-
passed_data_type
|
59
|
+
return passed_data_type if passed_data_type
|
60
|
+
if @items[0]
|
61
|
+
@items[0].data_type
|
62
|
+
elsif @items[1]
|
63
|
+
@items[1].data_type
|
64
|
+
else
|
65
|
+
:unknown
|
66
|
+
end
|
61
67
|
end
|
62
68
|
end
|
63
69
|
|
64
|
-
def to_math(name_only = false, max_depth = 99)
|
70
|
+
def to_math(name_only = false, max_depth = 99, _cur_depth = 0)
|
65
71
|
return @name if max_depth.zero?
|
66
72
|
|
67
|
-
sub_item = auto_math(items[0], name_only, max_depth - 1)
|
73
|
+
sub_item = auto_math(items[0], name_only, max_depth - 1, _cur_depth + 1)
|
74
|
+
sub_item2 = auto_math(items[1], name_only, max_depth - 1, _cur_depth + 1) if items[1]
|
68
75
|
|
69
|
-
case operation
|
76
|
+
out = case operation
|
70
77
|
when :argmax
|
71
|
-
"argmax(#{
|
78
|
+
"argmax(#{sub_item},#{options[:axis]})"
|
72
79
|
when :negate
|
73
80
|
"-#{sub_item}"
|
74
81
|
when :index
|
75
|
-
"#{sub_item}[#{
|
82
|
+
"#{sub_item}[#{sub_item2}]"
|
76
83
|
when :slice
|
77
|
-
"#{sub_item}[#{
|
84
|
+
"#{sub_item}[#{sub_item2}]"
|
78
85
|
when :assign_sub
|
79
|
-
"(#{items[0] ? items[0].name : 'self'} -= #{auto_math(items[1], name_only)})"
|
86
|
+
"(#{items[0] ? items[0].name : 'self'} -= #{auto_math(items[1], name_only, 1)})"
|
80
87
|
when :assign_add
|
81
|
-
"(#{items[0] ? items[0].name : 'self'} += #{auto_math(items[1], name_only)})"
|
88
|
+
"(#{items[0] ? items[0].name : 'self'} += #{auto_math(items[1], name_only, 1)})"
|
82
89
|
when :assign
|
83
|
-
"(#{items[0] ? items[0].name : 'self'} = #{auto_math(items[1], name_only)})"
|
90
|
+
"(#{items[0] ? items[0].name : 'self'} = #{auto_math(items[1], name_only, 1)})"
|
84
91
|
when :sin, :cos, :tanh
|
85
92
|
"#{operation}(#{sub_item})"
|
86
93
|
when :add
|
87
|
-
"(#{sub_item} + #{
|
94
|
+
"(#{sub_item} + #{sub_item2})"
|
88
95
|
when :sub
|
89
|
-
"(#{sub_item} - #{
|
96
|
+
"(#{sub_item} - #{sub_item2})"
|
90
97
|
when :pow
|
91
|
-
"(#{sub_item}^#{
|
98
|
+
"(#{sub_item}^#{sub_item2})"
|
92
99
|
when :div
|
93
|
-
"(#{sub_item} / #{
|
100
|
+
"(#{sub_item} / #{sub_item2})"
|
94
101
|
when :mul
|
95
102
|
if auto_math(items[0]) == 1
|
96
|
-
|
103
|
+
sub_item2
|
97
104
|
elsif auto_math(items[1]) == 1
|
98
105
|
sub_item
|
99
106
|
else
|
100
|
-
"(#{sub_item} * #{
|
107
|
+
"(#{sub_item} * #{sub_item2})"
|
101
108
|
end
|
102
109
|
when :reduce_sum
|
103
110
|
"reduce_sum(|#{sub_item}|)"
|
@@ -110,7 +117,7 @@ module TensorStream
|
|
110
117
|
when :stop_gradient
|
111
118
|
sub_item
|
112
119
|
when :matmul
|
113
|
-
"#{sub_item}.matmul(#{
|
120
|
+
"#{sub_item}.matmul(#{sub_item2})"
|
114
121
|
when :eye
|
115
122
|
"eye(#{sub_item})"
|
116
123
|
when :transpose
|
@@ -128,15 +135,19 @@ module TensorStream
|
|
128
135
|
when :zeros
|
129
136
|
"zeros(#{sub_item})"
|
130
137
|
when :reshape
|
131
|
-
"reshape(#{sub_item},#{
|
138
|
+
"reshape(#{sub_item},#{sub_item2})"
|
132
139
|
when :rank
|
133
140
|
"#{sub_item}.rank"
|
134
141
|
when :cond
|
135
|
-
"(#{auto_math(options[:pred])} ? #{sub_item} : #{
|
142
|
+
"(#{auto_math(options[:pred], name_only, max_depth - 1, _cur_depth)} ? #{sub_item} : #{sub_item2})"
|
136
143
|
when :less
|
137
|
-
"#{sub_item} < #{
|
144
|
+
"#{sub_item} < #{sub_item2}"
|
145
|
+
when :less_equal
|
146
|
+
"#{sub_item} <= #{sub_item2}"
|
138
147
|
when :greater
|
139
|
-
"#{sub_item} > #{
|
148
|
+
"#{sub_item} > #{sub_item2}"
|
149
|
+
when :greater_equal
|
150
|
+
"#{sub_item} >= #{sub_item2}"
|
140
151
|
when :square
|
141
152
|
"#{sub_item}\u00B2"
|
142
153
|
when :log
|
@@ -148,32 +159,82 @@ module TensorStream
|
|
148
159
|
when :pad
|
149
160
|
"pad(#{sub_item},#{auto_math(options[:paddings])})"
|
150
161
|
when :equal
|
151
|
-
"#{sub_item} == #{
|
162
|
+
"#{sub_item} == #{sub_item2}"
|
152
163
|
when :not_equal
|
153
|
-
"#{sub_item} != #{
|
164
|
+
"#{sub_item} != #{sub_item2}"
|
154
165
|
when :logical_and
|
155
|
-
"#{sub_item} && #{
|
166
|
+
"#{sub_item} && #{sub_item2}"
|
156
167
|
when :sqrt
|
157
168
|
"sqrt(#{sub_item})"
|
158
169
|
when :zeros_like
|
159
170
|
"zeros_like(#{sub_item})"
|
160
171
|
when :where
|
161
|
-
"where(#{auto_math(options[:pred], name_only, max_depth - 1)}
|
172
|
+
"where(#{auto_math(options[:pred], name_only, max_depth - 1, _cur_depth)}, #{sub_item}, #{sub_item2})"
|
162
173
|
when :max
|
163
|
-
"max(#{
|
174
|
+
"max(#{sub_item},#{sub_item2})"
|
164
175
|
when :cast
|
165
|
-
"cast(#{
|
176
|
+
"cast(#{sub_item}, #{data_type})"
|
166
177
|
else
|
167
178
|
raise "no math form for #{operation} defined"
|
168
179
|
end
|
180
|
+
["\n",(_cur_depth + 1).times.collect { ' ' }, out].flatten.join
|
169
181
|
end
|
170
182
|
|
171
183
|
def run
|
172
184
|
eval
|
173
185
|
end
|
174
186
|
|
187
|
+
def op
|
188
|
+
self
|
189
|
+
end
|
190
|
+
|
175
191
|
private
|
176
192
|
|
193
|
+
def infer_shape
|
194
|
+
case operation
|
195
|
+
when :index
|
196
|
+
item_shape = items[0].shape.shape
|
197
|
+
return nil if item_shape.nil?
|
198
|
+
return item_shape[1, item_shape.size]
|
199
|
+
when :reduce_mean, :reduce_prod, :reduce_sum
|
200
|
+
return [] if options[:axis].nil?
|
201
|
+
item_shape = items[0].shape.shape
|
202
|
+
return nil if item_shape.nil?
|
203
|
+
axis = options[:axis]
|
204
|
+
|
205
|
+
axis = [ axis ] unless axis.is_a?(Array)
|
206
|
+
return item_shape.each_with_index.map do |s, index|
|
207
|
+
next nil if axis.include?(index)
|
208
|
+
s
|
209
|
+
end.compact
|
210
|
+
when :reshape
|
211
|
+
new_shape = items[1] && items[1].value ? items[1].value : nil
|
212
|
+
return nil if new_shape.nil?
|
213
|
+
|
214
|
+
item_shape = items[0].shape.shape
|
215
|
+
return new_shape if item_shape.nil?
|
216
|
+
|
217
|
+
return TensorShape.fix_inferred_elements(new_shape, item_shape.reduce(:*))
|
218
|
+
when :flow_group
|
219
|
+
return []
|
220
|
+
when :zeros, :ones
|
221
|
+
return items[0] ? items[0].value : options[:shape]
|
222
|
+
when :shape
|
223
|
+
return items[0].shape.shape ? [items[0].shape.shape.size] : nil
|
224
|
+
when :matmul
|
225
|
+
shape1 = items[0].shape.shape.nil? ? nil : items[0].shape.shape[0]
|
226
|
+
shape2 = items[1].shape.shape.nil? ? nil : items[1].shape.shape[1]
|
227
|
+
return [shape1, shape2]
|
228
|
+
else
|
229
|
+
return items[0].shape.shape if items.size == 1
|
230
|
+
if items.size == 2 && items[0] && items[1]
|
231
|
+
return TensorShape.infer_shape(items[0].shape.shape, items[1].shape.shape)
|
232
|
+
end
|
233
|
+
end
|
234
|
+
|
235
|
+
nil
|
236
|
+
end
|
237
|
+
|
177
238
|
def propagate_consumer(consumer)
|
178
239
|
super(consumer)
|
179
240
|
|
data/lib/tensor_stream/ops.rb
CHANGED
@@ -5,7 +5,7 @@ module TensorStream
|
|
5
5
|
NUMERIC_TYPES = %w[int32 int64 float32 float64].map(&:to_sym)
|
6
6
|
|
7
7
|
def argmax(input, axis = nil, name: nil, dimension: nil, output_type: :int32)
|
8
|
-
|
8
|
+
_op(:argmax, input, nil, axis: axis, name: name, dimension: dimension, data_type: output_type)
|
9
9
|
end
|
10
10
|
|
11
11
|
def gradients(input, wrt_xs, grad_ys: nil,
|
@@ -24,10 +24,12 @@ module TensorStream
|
|
24
24
|
tensor_program = if input.graph.node_added?(gradient_program_name)
|
25
25
|
input.graph.get_node(gradient_program_name)
|
26
26
|
else
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
27
|
+
input.graph.name_scope("gradient_wrt_#{x.name}") do
|
28
|
+
derivative_ops = TensorStream::MathGradients.derivative(input, x, graph: input.graph,
|
29
|
+
stop_gradients: stop_gradients)
|
30
|
+
unit_matrix = _op(:ones_like, x)
|
31
|
+
input.graph.add_node!(gradient_program_name, unit_matrix * derivative_ops)
|
32
|
+
end
|
31
33
|
end
|
32
34
|
tensor_program
|
33
35
|
end
|
@@ -36,183 +38,204 @@ module TensorStream
|
|
36
38
|
|
37
39
|
def random_uniform(shape, dtype: :float32, minval: 0, maxval: 1, seed: nil, name: nil)
|
38
40
|
options = { shape: shape, dtype: dtype, minval: minval, maxval: maxval, seed: seed, name: name }
|
39
|
-
|
41
|
+
_op(:random_uniform, nil, nil, options)
|
40
42
|
end
|
41
43
|
|
42
44
|
def random_normal(shape, dtype: :float32, mean: 0.0, stddev: 1.0, seed: nil, name: nil)
|
43
45
|
options = { shape: shape, dtype: dtype, mean: mean, stddev: stddev, seed: seed, name: name }
|
44
|
-
|
46
|
+
_op(:random_normal, nil, nil, options)
|
45
47
|
end
|
46
48
|
|
47
49
|
def stop_gradient(tensor, options = {})
|
48
|
-
|
50
|
+
_op(:stop_gradient, tensor, nil, options)
|
49
51
|
end
|
50
52
|
|
51
53
|
def eye(num_rows, num_columns: nil, dtype: :float32, name: nil)
|
52
|
-
|
54
|
+
_op(:eye, num_rows, num_columns || num_rows, data_type: dtype, name: name)
|
53
55
|
end
|
54
56
|
|
55
57
|
def shape(input, name: nil, out_type: :int32)
|
56
|
-
|
58
|
+
_op(:shape, input, nil, name: name, out_type: out_type)
|
57
59
|
end
|
58
60
|
|
59
61
|
def rank(input, name: nil)
|
60
|
-
|
62
|
+
_op(:rank, input, name: name)
|
61
63
|
end
|
62
64
|
|
63
65
|
def zeros_initializer(options = {})
|
64
|
-
|
66
|
+
_op(:zeros, nil, nil, options)
|
67
|
+
end
|
68
|
+
|
69
|
+
def glorot_uniform_initializer(seed: nil, dtype: :float32)
|
70
|
+
TensorStream::Initializer.new(-> { _op(:glorot_uniform, nil, nil, seed: seed, data_type: dtype) })
|
71
|
+
end
|
72
|
+
|
73
|
+
def random_uniform_initializer(minval: 0, maxval: 1, seed: nil, dtype: nil)
|
74
|
+
TensorStream::Initializer.new(-> { _op(:random_uniform, nil, nil, minval: 0, maxval: 1, seed: seed, data_type: dtype) })
|
65
75
|
end
|
66
76
|
|
67
77
|
def slice(input, start, size, name: nil)
|
68
|
-
|
78
|
+
_op(:slice, input, start, size: size, name: name)
|
69
79
|
end
|
70
80
|
|
71
81
|
def zeros(shape, dtype: :float32, name: nil)
|
72
|
-
|
82
|
+
_op(:zeros, shape, nil, data_type: dtype, name: name)
|
73
83
|
end
|
74
84
|
|
75
85
|
def ones(shape, dtype: :float32, name: nil)
|
76
|
-
|
86
|
+
_op(:ones, shape, nil, data_type: dtype, name: name)
|
77
87
|
end
|
78
88
|
|
79
89
|
def less(input_a, input_b, name: nil)
|
80
|
-
|
90
|
+
_op(:less, input_a, input_b, name: name)
|
81
91
|
end
|
82
92
|
|
83
93
|
def logical_and(input_a, input_b, name: nil)
|
84
|
-
|
94
|
+
_op(:logical_and, input_a, input_b, name: name)
|
85
95
|
end
|
86
96
|
|
87
97
|
def greater(input_a, input_b, name: nil)
|
88
|
-
|
98
|
+
_op(:greater, input_a, input_b, name: name)
|
89
99
|
end
|
90
100
|
|
91
101
|
def greater_equal(input_a, input_b, name: nil)
|
92
|
-
|
102
|
+
_op(:greater_equal, input_a, input_b, name: name)
|
93
103
|
end
|
94
104
|
|
95
105
|
def less_equal(input_a, input_b, name: nil)
|
96
|
-
|
106
|
+
_op(:less_equal, input_a, input_b, name: name)
|
97
107
|
end
|
98
108
|
|
99
109
|
def reduce_mean(input_tensor, axis = nil, keepdims: false, name: nil)
|
100
|
-
|
110
|
+
_op(:reduce_mean, input_tensor, nil, axis: axis, keepdims: keepdims, name: name)
|
101
111
|
end
|
102
112
|
|
103
113
|
def reduce_sum(input_tensor, axis = nil, keepdims: false, name: nil)
|
104
|
-
|
114
|
+
_op(:reduce_sum, input_tensor, nil, axis: axis, keepdims: keepdims, name: name)
|
105
115
|
end
|
106
116
|
|
107
117
|
def reduce_prod(input, axis = nil, keepdims: false, name: nil)
|
108
|
-
|
118
|
+
_op(:reduce_prod, input, nil, axis: axis, keepdims: keepdims, name: name)
|
109
119
|
end
|
110
120
|
|
111
121
|
def concat(values, axis, name: 'concat')
|
112
|
-
|
122
|
+
_op(:concat, values, nil, axis: axis, name: name)
|
113
123
|
end
|
114
124
|
|
115
125
|
def reshape(tensor, shape, name: nil)
|
116
|
-
|
126
|
+
_op(:reshape, tensor, shape, name: name)
|
117
127
|
end
|
118
128
|
|
119
129
|
def square(tensor, name: nil)
|
120
|
-
|
130
|
+
_op(:square, tensor, nil, name: name)
|
131
|
+
end
|
132
|
+
|
133
|
+
def round(tensor, name: nil)
|
134
|
+
check_allowed_types(tensor, FLOATING_POINT_TYPES)
|
135
|
+
_op(:round, tensor, nil, name: name)
|
136
|
+
end
|
137
|
+
|
138
|
+
def reciprocal(tensor, name: nil)
|
139
|
+
_op(:reciprocal, tensor, nil, name: name)
|
121
140
|
end
|
122
141
|
|
123
142
|
def cond(pred, true_fn, false_fn, name: nil)
|
124
|
-
|
143
|
+
_op(:cond, true_fn, false_fn, pred: pred, name: name)
|
125
144
|
end
|
126
145
|
|
127
146
|
def where(condition, true_t = nil, false_t = nil, name: nil)
|
128
|
-
|
147
|
+
_op(:where, true_t, false_t, pred: condition, name: name)
|
129
148
|
end
|
130
149
|
|
131
150
|
def add(input_a, input_b, name: nil)
|
132
|
-
|
151
|
+
_op(:add, input_a, input_b, name: name)
|
133
152
|
end
|
134
153
|
|
135
154
|
def sub(input_a, input_b, name: nil)
|
136
|
-
|
155
|
+
_op(:sub, input_a, input_b, name: name)
|
137
156
|
end
|
138
157
|
|
139
158
|
def max(input_a, input_b, name: nil)
|
140
159
|
check_allowed_types(input_a, NUMERIC_TYPES)
|
141
160
|
check_allowed_types(input_b, NUMERIC_TYPES)
|
142
161
|
|
143
|
-
|
162
|
+
_op(:max, input_a, input_b, name: name)
|
144
163
|
end
|
145
164
|
|
165
|
+
def maximum(input_a, input_b, name: nil)
|
166
|
+
max(input_a, input_b, name: name)
|
167
|
+
end
|
168
|
+
|
146
169
|
def cast(input, dtype, name: nil)
|
147
|
-
|
170
|
+
_op(:cast, input, nil, data_type: dtype, name: name)
|
148
171
|
end
|
149
172
|
|
150
173
|
def print(input, data, message: nil, name: nil)
|
151
|
-
|
174
|
+
_op(:print, input, data, message: message, name: name)
|
152
175
|
end
|
153
176
|
|
154
177
|
def negate(input, options = {})
|
155
|
-
|
178
|
+
_op(:negate, input, nil, options)
|
156
179
|
end
|
157
180
|
|
158
181
|
def equal(input_a, input_b, name: nil)
|
159
|
-
|
182
|
+
_op(:equal, input_a, input_b, name: name)
|
160
183
|
end
|
161
184
|
|
162
185
|
def not_equal(input_a, input_b, name: nil)
|
163
|
-
|
186
|
+
_op(:not_equal, input_a, input_b, name: name)
|
164
187
|
end
|
165
188
|
|
166
189
|
def zeros_like(tensor, dtype: nil, name: nil)
|
167
|
-
|
190
|
+
_op(:zeros_like, tensor, nil, data_type: dtype, name: name)
|
168
191
|
end
|
169
192
|
|
170
193
|
def ones_like(tensor, dtype: nil, name: nil)
|
171
|
-
|
194
|
+
_op(:ones_like, tensor, nil, data_type: dtype, name: name)
|
172
195
|
end
|
173
196
|
|
174
197
|
def identity(input, name: nil)
|
175
|
-
|
198
|
+
_op(:identity, input, nil, name: name)
|
176
199
|
end
|
177
200
|
|
178
201
|
def multiply(input_a, input_b, name: nil)
|
179
|
-
|
202
|
+
_op(:mul, input_a, input_b, name: name)
|
180
203
|
end
|
181
204
|
|
182
205
|
def pow(input_a, input_e, name: nil)
|
183
|
-
|
206
|
+
_op(:pow, input_a, input_e, name: name)
|
184
207
|
end
|
185
208
|
|
186
209
|
def abs(input, name: nil)
|
187
|
-
|
210
|
+
_op(:abs, input, nil, name: name)
|
188
211
|
end
|
189
212
|
|
190
213
|
def sign(input, name: nil)
|
191
|
-
|
214
|
+
_op(:sign, input, nil, name: name)
|
192
215
|
end
|
193
216
|
|
194
217
|
def sin(input, options = {})
|
195
218
|
options[:data_type] ||= :float32
|
196
219
|
check_allowed_types(input, FLOATING_POINT_TYPES)
|
197
|
-
|
220
|
+
_op(:sin, input, nil, options)
|
198
221
|
end
|
199
222
|
|
200
223
|
def cos(input, options = {})
|
201
224
|
options[:data_type] ||= :float32
|
202
225
|
check_allowed_types(input, FLOATING_POINT_TYPES)
|
203
|
-
|
226
|
+
_op(:cos, input, nil, options)
|
204
227
|
end
|
205
228
|
|
206
229
|
def tan(input, options = {})
|
207
230
|
options[:data_type] ||= :float32
|
208
231
|
check_allowed_types(input, FLOATING_POINT_TYPES)
|
209
|
-
|
232
|
+
_op(:tan, input, nil, options)
|
210
233
|
end
|
211
234
|
|
212
235
|
def tanh(input, options = {})
|
213
236
|
options[:data_type] ||= :float32
|
214
237
|
check_allowed_types(input, FLOATING_POINT_TYPES)
|
215
|
-
|
238
|
+
_op(:tanh, input, nil, options)
|
216
239
|
end
|
217
240
|
|
218
241
|
def sqrt(input, name: nil)
|
@@ -221,33 +244,44 @@ module TensorStream
|
|
221
244
|
name: name
|
222
245
|
}
|
223
246
|
check_allowed_types(input, FLOATING_POINT_TYPES)
|
224
|
-
|
247
|
+
_op(:sqrt, input, nil, options)
|
225
248
|
end
|
226
249
|
|
227
250
|
def log(input, options = {})
|
228
251
|
options[:data_type] ||= :float32
|
229
252
|
check_allowed_types(input, FLOATING_POINT_TYPES)
|
230
|
-
|
253
|
+
_op(:log, input, nil, options)
|
254
|
+
end
|
255
|
+
|
256
|
+
def log1p(input, options = {})
|
257
|
+
options[:data_type] ||= :float32
|
258
|
+
check_allowed_types(input, FLOATING_POINT_TYPES)
|
259
|
+
_op(:log1p, input, nil, options)
|
231
260
|
end
|
232
261
|
|
233
262
|
def exp(input, options = {})
|
234
263
|
options[:data_type] ||= :float32
|
235
264
|
check_allowed_types(input, FLOATING_POINT_TYPES)
|
236
|
-
|
265
|
+
_op(:exp, input, nil, options)
|
266
|
+
end
|
267
|
+
|
268
|
+
def sigmoid(input, name: nil)
|
269
|
+
check_allowed_types(input, FLOATING_POINT_TYPES)
|
270
|
+
_op(:sigmoid, input, nil, name: name)
|
237
271
|
end
|
238
272
|
|
239
273
|
def matmul(input_a, input_b, transpose_a: false,
|
240
274
|
transpose_b: false,
|
241
275
|
name: nil)
|
242
|
-
|
276
|
+
_op(:matmul, input_a, input_b, transpose_a: transpose_a, transpose_b: transpose_b, name: name)
|
243
277
|
end
|
244
278
|
|
245
279
|
def transpose(tensor, perm: nil, name: 'transpose')
|
246
|
-
|
280
|
+
_op(:transpose, tensor, nil, perm: perm, name: name)
|
247
281
|
end
|
248
282
|
|
249
283
|
def pad(tensor, paddings, mode: 'CONSTANT', name: nil)
|
250
|
-
|
284
|
+
_op(:pad, tensor, nil, paddings: paddings, mode: mode, name: name)
|
251
285
|
end
|
252
286
|
end
|
253
287
|
end
|