tensorflow 0.1.1 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -224,8 +224,10 @@ module TensorFlow
224
224
  RawOps.log1p(x: x)
225
225
  end
226
226
 
227
- # def log_sigmoid
228
- # end
227
+ def log_sigmoid(x)
228
+ x = TensorFlow.convert_to_tensor(x)
229
+ negative(RawOps.softplus(features: -x))
230
+ end
229
231
 
230
232
  def log_softmax(logits)
231
233
  RawOps.log_softmax(logits: logits)
@@ -243,8 +245,9 @@ module TensorFlow
243
245
  RawOps.logical_or(x: x, y: y)
244
246
  end
245
247
 
246
- # def logical_xor
247
- # end
248
+ def logical_xor(x, y)
249
+ logical_and(logical_or(x, y), logical_not(logical_and(x, y)))
250
+ end
248
251
 
249
252
  def maximum(x, y)
250
253
  RawOps.maximum(x: x, y: y)
@@ -262,11 +265,13 @@ module TensorFlow
262
265
  RawOps.mul(x: x, y: y)
263
266
  end
264
267
 
265
- # def multiply_no_nan
266
- # end
268
+ def multiply_no_nan(x, y)
269
+ RawOps.mul_no_nan(x: x, y: y)
270
+ end
267
271
 
268
- # def negative
269
- # end
272
+ def negative(x)
273
+ RawOps.neg(x: x)
274
+ end
270
275
 
271
276
  # def nextafter
272
277
  # end
@@ -300,8 +305,11 @@ module TensorFlow
300
305
  # def reduce_all
301
306
  # end
302
307
 
303
- # def reduce_any
304
- # end
308
+ def reduce_any(input_tensor, axis: nil, keepdims: false)
309
+ input_tensor = TensorFlow.convert_to_tensor(input_tensor)
310
+ axis ||= reduction_dims(input_tensor)
311
+ RawOps.any(input: input_tensor, reduction_indices: axis, keep_dims: keepdims)
312
+ end
305
313
 
306
314
  # def reduce_euclidean_norm
307
315
  # end
@@ -309,26 +317,46 @@ module TensorFlow
309
317
  # def reduce_logsumexp
310
318
  # end
311
319
 
312
- # def reduce_max
313
- # end
320
+ def reduce_max(input_tensor, axis: nil, keepdims: false)
321
+ input_tensor = TensorFlow.convert_to_tensor(input_tensor)
322
+ axis ||= reduction_dims(input_tensor)
323
+ RawOps.max(input: input_tensor, reduction_indices: axis, keep_dims: keepdims)
324
+ end
314
325
 
315
- # def reduce_mean
316
- # end
326
+ def reduce_mean(input_tensor, axis: nil, keepdims: false)
327
+ input_tensor = TensorFlow.convert_to_tensor(input_tensor)
328
+ axis ||= reduction_dims(input_tensor)
329
+ RawOps.mean(input: input_tensor, reduction_indices: axis, keep_dims: keepdims)
330
+ end
317
331
 
318
- # def reduce_min
319
- # end
332
+ def reduce_min(input_tensor, axis: nil, keepdims: false)
333
+ input_tensor = TensorFlow.convert_to_tensor(input_tensor)
334
+ axis ||= reduction_dims(input_tensor)
335
+ RawOps.min(input: input_tensor, reduction_indices: axis, keep_dims: keepdims)
336
+ end
320
337
 
321
- # def reduce_prod
322
- # end
338
+ def reduce_prod(input_tensor, axis: nil, keepdims: false)
339
+ input_tensor = TensorFlow.convert_to_tensor(input_tensor)
340
+ axis ||= reduction_dims(input_tensor)
341
+ RawOps.prod(input: input_tensor, reduction_indices: axis, keep_dims: keepdims)
342
+ end
323
343
 
324
- # def reduce_std
325
- # end
344
+ def reduce_std(input_tensor, axis: nil, keepdims: false)
345
+ variance = reduce_variance(input_tensor, axis: axis, keepdims: keepdims)
346
+ sqrt(variance)
347
+ end
326
348
 
327
- # def reduce_sum
328
- # end
349
+ def reduce_sum(input_tensor, axis: nil, keepdims: false)
350
+ input_tensor = TensorFlow.convert_to_tensor(input_tensor)
351
+ axis ||= reduction_dims(input_tensor)
352
+ RawOps.sum(input: input_tensor, reduction_indices: axis, keep_dims: keepdims)
353
+ end
329
354
 
330
- # def reduce_variance
331
- # end
355
+ def reduce_variance(input_tensor, axis: nil, keepdims: false)
356
+ means = reduce_mean(input_tensor, axis: axis, keepdims: true)
357
+ squared_deviations = RawOps.square(x: input_tensor - means)
358
+ reduce_mean(squared_deviations, axis: axis, keepdims: keepdims)
359
+ end
332
360
 
333
361
  def rint(x)
334
362
  RawOps.rint(x: x)
@@ -460,6 +488,13 @@ module TensorFlow
460
488
  def zeta(x, q)
461
489
  RawOps.zeta(x: x, q: q)
462
490
  end
491
+
492
+ private
493
+
494
+ def reduction_dims(input_tensor)
495
+ rank = RawOps.rank(input: input_tensor).value
496
+ (0...rank).to_a
497
+ end
463
498
  end
464
499
  end
465
500
  end
@@ -0,0 +1,284 @@
1
+ module TensorFlow
2
+ module NN
3
+ class << self
4
+ def all_candidate_sampler(true_classes, num_true: nil, num_sampled: nil, unique: nil, seed: nil, seed2: nil)
5
+ RawOps.all_candidate_sampler(true_classes: true_classes, num_true: num_true, num_sampled: num_sampled, unique: unique, seed: seed, seed2: seed2)
6
+ end
7
+
8
+ # def atrous_conv2d
9
+ # end
10
+
11
+ # def atrous_conv2d_transpose
12
+ # end
13
+
14
+ def avg_pool(value, ksize: nil, strides: nil, padding: nil, data_format: nil)
15
+ RawOps.avg_pool(value: value, ksize: ksize, strides: strides, padding: padding, data_format: data_format)
16
+ end
17
+
18
+ # def avg_pool1d
19
+ # end
20
+
21
+ # def avg_pool2d
22
+ # end
23
+
24
+ def avg_pool3d(input, ksize: nil, strides: nil, padding: nil, data_format: nil)
25
+ RawOps.avg_pool3d(input: input, ksize: ksize, strides: strides, padding: padding, data_format: data_format)
26
+ end
27
+
28
+ def batch_norm_with_global_normalization(t, m, v, beta, gamma, variance_epsilon: nil, scale_after_normalization: nil)
29
+ RawOps.batch_norm_with_global_normalization(t: t, m: m, v: v, beta: beta, gamma: gamma, variance_epsilon: variance_epsilon, scale_after_normalization: scale_after_normalization)
30
+ end
31
+
32
+ # def batch_normalization
33
+ # end
34
+
35
+ def bias_add(value, bias, data_format: nil)
36
+ RawOps.bias_add(value: value, bias: bias, data_format: data_format)
37
+ end
38
+
39
+ # def collapse_repeated
40
+ # end
41
+
42
+ def compute_accidental_hits(true_classes, sampled_candidates, num_true: nil, seed: nil, seed2: nil)
43
+ RawOps.compute_accidental_hits(true_classes: true_classes, sampled_candidates: sampled_candidates, num_true: num_true, seed: seed, seed2: seed2)
44
+ end
45
+
46
+ # def compute_average_loss
47
+ # end
48
+
49
+ # def conv1d
50
+ # end
51
+
52
+ # def conv1d_transpose
53
+ # end
54
+
55
+ def conv2d(input, filter, strides: nil, use_cudnn_on_gpu: nil, padding: nil, explicit_paddings: nil, data_format: nil, dilations: nil)
56
+ RawOps.conv2d(input: input, filter: filter, strides: strides, use_cudnn_on_gpu: use_cudnn_on_gpu, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations)
57
+ end
58
+
59
+ # def conv2d_transpose
60
+ # end
61
+
62
+ def conv3d(input, filter, strides: nil, padding: nil, data_format: nil, dilations: nil)
63
+ RawOps.conv3d(input: input, filter: filter, strides: strides, padding: padding, data_format: data_format, dilations: dilations)
64
+ end
65
+
66
+ # def conv3d_transpose
67
+ # end
68
+
69
+ # def conv_transpose
70
+ # end
71
+
72
+ # def convolution
73
+ # end
74
+
75
+ # def crelu
76
+ # end
77
+
78
+ def ctc_beam_search_decoder(inputs, sequence_length, beam_width: nil, top_paths: nil, merge_repeated: nil)
79
+ RawOps.ctc_beam_search_decoder(inputs: inputs, sequence_length: sequence_length, beam_width: beam_width, top_paths: top_paths, merge_repeated: merge_repeated)
80
+ end
81
+
82
+ def ctc_greedy_decoder(inputs, sequence_length, merge_repeated: nil)
83
+ RawOps.ctc_greedy_decoder(inputs: inputs, sequence_length: sequence_length, merge_repeated: merge_repeated)
84
+ end
85
+
86
+ def ctc_loss(inputs, labels_indices, labels_values, sequence_length, preprocess_collapse_repeated: nil, ctc_merge_repeated: nil, ignore_longer_outputs_than_inputs: nil)
87
+ RawOps.ctc_loss(inputs: inputs, labels_indices: labels_indices, labels_values: labels_values, sequence_length: sequence_length, preprocess_collapse_repeated: preprocess_collapse_repeated, ctc_merge_repeated: ctc_merge_repeated, ignore_longer_outputs_than_inputs: ignore_longer_outputs_than_inputs)
88
+ end
89
+
90
+ # def ctc_unique_labels
91
+ # end
92
+
93
+ def depth_to_space(input, block_size: nil, data_format: nil)
94
+ RawOps.depth_to_space(input: input, block_size: block_size, data_format: data_format)
95
+ end
96
+
97
+ # def depthwise_conv2d
98
+ # end
99
+
100
+ # def depthwise_conv2d_backprop_filter
101
+ # end
102
+
103
+ # def depthwise_conv2d_backprop_input
104
+ # end
105
+
106
+ def dilation2d(input, filter, strides: nil, rates: nil, padding: nil)
107
+ RawOps.dilation2d(input: input, filter: filter, strides: strides, rates: rates, padding: padding)
108
+ end
109
+
110
+ # def dropout
111
+ # end
112
+
113
+ def elu(features)
114
+ RawOps.elu(features: features)
115
+ end
116
+
117
+ # def embedding_lookup
118
+ # end
119
+
120
+ # def embedding_lookup_sparse
121
+ # end
122
+
123
+ # def erosion2d
124
+ # end
125
+
126
+ def fixed_unigram_candidate_sampler(true_classes, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, vocab_file: nil, distortion: nil, num_reserved_ids: nil, num_shards: nil, shard: nil, unigrams: nil, seed: nil, seed2: nil)
127
+ RawOps.fixed_unigram_candidate_sampler(true_classes: true_classes, num_true: num_true, num_sampled: num_sampled, unique: unique, range_max: range_max, vocab_file: vocab_file, distortion: distortion, num_reserved_ids: num_reserved_ids, num_shards: num_shards, shard: shard, unigrams: unigrams, seed: seed, seed2: seed2)
128
+ end
129
+
130
+ def fractional_avg_pool(value, pooling_ratio: nil, pseudo_random: nil, overlapping: nil, deterministic: nil, seed: nil, seed2: nil)
131
+ RawOps.fractional_avg_pool(value: value, pooling_ratio: pooling_ratio, pseudo_random: pseudo_random, overlapping: overlapping, deterministic: deterministic, seed: seed, seed2: seed2)
132
+ end
133
+
134
+ def fractional_max_pool(value, pooling_ratio: nil, pseudo_random: nil, overlapping: nil, deterministic: nil, seed: nil, seed2: nil)
135
+ RawOps.fractional_max_pool(value: value, pooling_ratio: pooling_ratio, pseudo_random: pseudo_random, overlapping: overlapping, deterministic: deterministic, seed: seed, seed2: seed2)
136
+ end
137
+
138
+ def in_top_k(predictions, targets, k: nil)
139
+ RawOps.in_top_k(predictions: predictions, targets: targets, k: k)
140
+ end
141
+
142
+ def l2_loss(t)
143
+ RawOps.l2_loss(t: t)
144
+ end
145
+
146
+ # def l2_normalize
147
+ # end
148
+
149
+ def leaky_relu(features, alpha: nil)
150
+ RawOps.leaky_relu(features: features, alpha: alpha)
151
+ end
152
+
153
+ def learned_unigram_candidate_sampler(true_classes, num_true: nil, num_sampled: nil, unique: nil, range_max: nil, seed: nil, seed2: nil)
154
+ RawOps.learned_unigram_candidate_sampler(true_classes: true_classes, num_true: num_true, num_sampled: num_sampled, unique: unique, range_max: range_max, seed: seed, seed2: seed2)
155
+ end
156
+
157
+ # def local_response_normalization
158
+ # end
159
+
160
+ # def log_poisson_loss
161
+ # end
162
+
163
+ def log_softmax(logits)
164
+ RawOps.log_softmax(logits: logits)
165
+ end
166
+
167
+ def lrn(input, depth_radius: nil, bias: nil, alpha: nil, beta: nil)
168
+ RawOps.lrn(input: input, depth_radius: depth_radius, bias: bias, alpha: alpha, beta: beta)
169
+ end
170
+
171
+ def max_pool(input, ksize: nil, strides: nil, padding: nil, data_format: nil)
172
+ RawOps.max_pool(input: input, ksize: ksize, strides: strides, padding: padding, data_format: data_format)
173
+ end
174
+
175
+ # def max_pool1d
176
+ # end
177
+
178
+ # def max_pool2d
179
+ # end
180
+
181
+ def max_pool3d(input, ksize: nil, strides: nil, padding: nil, data_format: nil)
182
+ RawOps.max_pool3d(input: input, ksize: ksize, strides: strides, padding: padding, data_format: data_format)
183
+ end
184
+
185
+ def max_pool_with_argmax(input, ksize: nil, strides: nil, padding: nil, include_batch_in_index: nil)
186
+ RawOps.max_pool_with_argmax(input: input, ksize: ksize, strides: strides, padding: padding, include_batch_in_index: include_batch_in_index)
187
+ end
188
+
189
+ # def moments
190
+ # end
191
+
192
+ # def nce_loss
193
+ # end
194
+
195
+ # def normalize_moments
196
+ # end
197
+
198
+ # def pool
199
+ # end
200
+
201
+ def relu(features)
202
+ RawOps.relu(features: features)
203
+ end
204
+
205
+ def relu6(features)
206
+ RawOps.relu6(features: features)
207
+ end
208
+
209
+ # def safe_embedding_lookup_sparse
210
+ # end
211
+
212
+ # def sampled_softmax_loss
213
+ # end
214
+
215
+ # def scale_regularization_loss
216
+ # end
217
+
218
+ def selu(features)
219
+ RawOps.selu(features: features)
220
+ end
221
+
222
+ # def separable_conv2d
223
+ # end
224
+
225
+ def sigmoid(x)
226
+ RawOps.sigmoid(x: x)
227
+ end
228
+
229
+ # def sigmoid_cross_entropy_with_logits
230
+ # end
231
+
232
+ def softmax(logits)
233
+ RawOps.softmax(logits: logits)
234
+ end
235
+
236
+ def softmax_cross_entropy_with_logits(features, labels)
237
+ RawOps.softmax_cross_entropy_with_logits(features: features, labels: labels)
238
+ end
239
+
240
+ def softplus(features)
241
+ RawOps.softplus(features: features)
242
+ end
243
+
244
+ def softsign(features)
245
+ RawOps.softsign(features: features)
246
+ end
247
+
248
+ def space_to_batch(input, paddings, block_size: nil)
249
+ RawOps.space_to_batch(input: input, paddings: paddings, block_size: block_size)
250
+ end
251
+
252
+ def space_to_depth(input, block_size: nil, data_format: nil)
253
+ RawOps.space_to_depth(input: input, block_size: block_size, data_format: data_format)
254
+ end
255
+
256
+ def sparse_softmax_cross_entropy_with_logits(features, labels)
257
+ RawOps.sparse_softmax_cross_entropy_with_logits(features: features, labels: labels)
258
+ end
259
+
260
+ # def sufficient_statistics
261
+ # end
262
+
263
+ def tanh(x)
264
+ RawOps.tanh(x: x)
265
+ end
266
+
267
+ def top_k(input, k: nil, sorted: nil)
268
+ RawOps.top_k(input: input, k: k, sorted: sorted)
269
+ end
270
+
271
+ # def weighted_cross_entropy_with_logits
272
+ # end
273
+
274
+ # def weighted_moments
275
+ # end
276
+
277
+ # def with_space_to_batch
278
+ # end
279
+
280
+ # def zero_fraction
281
+ # end
282
+ end
283
+ end
284
+ end
@@ -1,11 +1,12 @@
1
1
  # keep in alphabetical order
2
2
  module TensorFlow
3
3
  module Ops
4
- def eye(num_rows, num_columns: nil)
5
- num_columns ||= num_rows
6
- zeros = self.zeros([num_rows, num_columns])
7
- ones = self.ones([num_rows])
8
- RawOps.matrix_set_diag(input: zeros, diagonal: ones)
4
+ def cast(x, dtype)
5
+ Utils.execute("Cast", [x], DstT: FFI::DataType[dtype])
6
+ end
7
+
8
+ def expand_dims(input, axis)
9
+ RawOps.expand_dims(input: input, dim: axis)
9
10
  end
10
11
 
11
12
  def fill(dims, value)
@@ -16,10 +17,6 @@ module TensorFlow
16
17
  RawOps.identity(input: input)
17
18
  end
18
19
 
19
- def matmul(a, b)
20
- RawOps.mat_mul(a: a, b: b)
21
- end
22
-
23
20
  def ones(dims)
24
21
  fill(dims, 1)
25
22
  end
@@ -32,6 +29,10 @@ module TensorFlow
32
29
  RawOps.range(start: start, limit: limit, delta: delta)
33
30
  end
34
31
 
32
+ def squeeze(input, axis: nil)
33
+ RawOps.squeeze(input: input, squeeze_dims: axis)
34
+ end
35
+
35
36
  def timestamp
36
37
  RawOps.timestamp
37
38
  end