tensor_stream 0.1.1 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: fdcfc961fddc348440126986de425c0b1784c235
4
- data.tar.gz: 35649e38711773b0caf91ed791166cc8a733c2a4
3
+ metadata.gz: b5b9fcecbe7b27fadeaf403d886ebd0bbdaff531
4
+ data.tar.gz: b33ee60d3a4fc3c182a2984b8caaac80b7559435
5
5
  SHA512:
6
- metadata.gz: a2ac76af196eab64d2fadd327e724fd927f3de52d2fd1043896299f95022f92c5c5d833d0d18ed16ce1b5acac1fb29d292bb73f4036a15265ddea662af87a09c
7
- data.tar.gz: bd0b0b4078bd9d347e984bc2dde41952cd268b75d5697a1f7b722792fc171ff19bb9aebc41ec0199c092494f1aa9e0e3f9e635d848cf0e7cdb68b132ce66ebd7
6
+ metadata.gz: f847e4d908f8c1b77f331caa45ddfec15554c44076d8e49b1d5520a61bdfe4294542c5a50433699cfa9e8e88eee3ae5780c00f02d2451282f1143fb5a92822be
7
+ data.tar.gz: 0ce481db0db30fc53921f011be4d4f294b7f9b78d9fc6a91690f694a912d4d3cbd4cf452290162d8af88b4916bc0a3a7738cf58a5f2082292e833bb8325c92b4
data/.gitignore CHANGED
@@ -7,6 +7,7 @@
7
7
  /pkg/
8
8
  /spec/reports/
9
9
  /tmp/
10
+ *.gem
10
11
 
11
12
  # rspec failure tracking
12
13
  .rspec_status
data/.rubocop.yml ADDED
@@ -0,0 +1,74 @@
1
+ AllCops:
2
+ Exclude:
3
+ - samples/*
4
+ - bin/*
5
+ - spec/**/*
6
+ - tensor_stream.gemspec
7
+ - Rakefile
8
+
9
+ Metrics/LineLength:
10
+ Max: 200
11
+
12
+ Metrics/AbcSize:
13
+ Enabled: false
14
+
15
+ Metrics/PerceivedComplexity:
16
+ Enabled: false
17
+
18
+ Metrics/MethodLength:
19
+ Enabled: false
20
+
21
+ Metrics/CyclomaticComplexity:
22
+ Enabled: false
23
+
24
+ Naming/AccessorMethodName:
25
+ Exclude:
26
+ - lib/tensor_stream.rb
27
+ - lib/tensor_stream/control_flow.rb
28
+ - lib/tensor_stream/graph.rb
29
+ - lib/tensor_stream/operation.rb
30
+
31
+ Style/Documentation:
32
+ Exclude:
33
+ - lib/tensor_stream/version.rb
34
+ - lib/tensor_stream/trainer.rb
35
+ - lib/tensor_stream/nn/nn_ops.rb
36
+ - lib/tensor_stream/evaluator/evaluator.rb
37
+
38
+ Lint/UnusedMethodArgument:
39
+ Exclude:
40
+ - lib/tensor_stream/train/saver.rb
41
+ - lib/tensor_stream/ops.rb
42
+
43
+ Metrics/ParameterLists:
44
+ Max: 8
45
+
46
+ Style/PerlBackrefs:
47
+ Enabled: false
48
+
49
+ Style/RegexpLiteral:
50
+ Enabled: false
51
+
52
+ Naming/MemoizedInstanceVariableName:
53
+ Enabled: false
54
+
55
+ Metrics/ModuleLength:
56
+ Max: 200
57
+
58
+ Metrics/ClassLength:
59
+ Max: 250
60
+ Exclude:
61
+ - lib/tensor_stream/evaluator/ruby_evaluator.rb
62
+
63
+ Naming/VariableNumber:
64
+ Enabled: false
65
+
66
+ Style/DoubleNegation:
67
+ Enabled: false
68
+
69
+ Style/TrailingCommaInHashLiteral:
70
+ Enabled: false
71
+
72
+ Naming/UncommunicativeMethodParamName:
73
+ Exclude:
74
+ - lib/tensor_stream/evaluator/ruby_evaluator.rb
data/README.md CHANGED
@@ -57,8 +57,8 @@ X = tf.placeholder("float")
57
57
  Y = tf.placeholder("float")
58
58
 
59
59
  # Set model weights
60
- W = tf.Variable(rand, name: "weight")
61
- b = tf.Variable(rand, name: "bias")
60
+ W = tf.variable(rand, name: "weight")
61
+ b = tf.variable(rand, name: "bias")
62
62
 
63
63
  # Construct a linear model
64
64
  pred = X * W + b
@@ -71,7 +71,7 @@ optimizer = TensorStream::Train::GradientDescentOptimizer.new(learning_rate).min
71
71
  # Initialize the variables (i.e. assign their default value)
72
72
  init = tf.global_variables_initializer()
73
73
 
74
- tf.Session do |sess|
74
+ tf.session do |sess|
75
75
  start_time = Time.now
76
76
  sess.run(init)
77
77
  (0..training_epochs).each do |epoch|
@@ -105,6 +105,7 @@ end
105
105
 
106
106
  - This is an early preview release and many things still don't work
107
107
  - Performance is not great, at least until the opencl and/or sciruby backends are complete
108
+ - However if you really need an op supported please feel free to file a pull request with the corresponding failing test (see spec/operation_spec.rb)
108
109
 
109
110
  ## Development
110
111
 
data/lib/tensor_stream.rb CHANGED
@@ -1,4 +1,4 @@
1
- require "tensor_stream/version"
1
+ require 'tensor_stream/version'
2
2
  require 'deep_merge'
3
3
  require 'matrix'
4
4
  require 'concurrent'
@@ -17,9 +17,10 @@ require 'tensor_stream/trainer'
17
17
  require 'tensor_stream/nn/nn_ops'
18
18
  require 'tensor_stream/evaluator/evaluator'
19
19
  # require 'tensor_stream/libraries/layers'
20
- require "tensor_stream/monkey_patches/integer"
20
+ require 'tensor_stream/monkey_patches/integer'
21
21
  require 'tensor_stream/ops'
22
22
 
23
+ # module that exposes TensorStream top level functions
23
24
  module TensorStream
24
25
  extend TensorStream::OpHelper
25
26
  extend TensorStream::Ops
@@ -48,8 +49,8 @@ module TensorStream
48
49
  TensorStream::Graph.get_default_graph.executing_eagerly?
49
50
  end
50
51
 
51
- def self.Variable(value, options = {})
52
- common_options= {
52
+ def self.variable(value, options = {})
53
+ common_options = {
53
54
  initializer: Operation.new(:assign, nil, value),
54
55
  name: options[:name]
55
56
  }
@@ -64,16 +65,15 @@ module TensorStream
64
65
  end
65
66
  end
66
67
 
67
- def self.Session(evaluator = :ruby_evaluator, thread_pool_class: Concurrent::ImmediateExecutor)
68
+ def self.session(evaluator = :ruby_evaluator, thread_pool_class: Concurrent::ImmediateExecutor)
68
69
  session = TensorStream::Session.new(evaluator, thread_pool_class: thread_pool_class)
69
- if block_given?
70
- yield session
71
- end
70
+ yield session if block_given?
71
+
72
72
  session
73
73
  end
74
74
 
75
75
  def self.program(&block)
76
- block.(self)
76
+ block.call(self)
77
77
  end
78
78
 
79
79
  def self.layers
@@ -94,10 +94,11 @@ module TensorStream
94
94
  dimensions = []
95
95
  value_ptr = value
96
96
 
97
- begin
98
- dtype, rank, value_ptr, d = dtype_eval(dtype, rank, value_ptr)
97
+ Kernel.loop do
98
+ dtype, rank, value_ptr, d = dtype_eval(rank, value_ptr)
99
99
  dimensions << d
100
- end while dtype == :array
100
+ break if dtype != :array
101
+ end
101
102
 
102
103
  TensorStream::Tensor.new(dtype, rank, options[:shape] || dimensions, shared_options)
103
104
  end
@@ -127,12 +128,10 @@ module TensorStream
127
128
  TensorStream::Trainer
128
129
  end
129
130
 
130
- private
131
-
132
- def self.check_allowed_types(t, types)
133
- return t unless t.is_a?(Tensor)
134
- return t if t.data_type.nil?
131
+ def self.check_allowed_types(input, types)
132
+ return input unless input.is_a?(Tensor)
133
+ return input if input.data_type.nil?
135
134
 
136
- fail "Parameter data type #{t.data_type} passed not in #{types.join(',')}" if !types.map(&:to_sym).include?(t.data_type)
135
+ raise "Parameter data type #{input.data_type} passed not in #{types.join(',')}" unless types.map(&:to_sym).include?(input.data_type)
137
136
  end
138
137
  end
@@ -1,18 +1,21 @@
1
1
  module TensorStream
2
+ # Defines a TensorStream controlflow op
2
3
  class ControlFlow < Operation
3
4
  attr_accessor :ops
4
5
 
5
6
  def initialize(flow_type, items, ops = nil, options = {})
7
+ @graph = options[:graph] || TensorStream.get_default_graph
8
+
6
9
  @operation = :"flow_#{flow_type}"
7
10
  @items = items
8
11
  @name = set_name
9
12
  @ops = ops
10
- @source = set_source(caller_locations)
11
- @graph = options[:graph] || TensorStream.get_default_graph
13
+ @source = format_source(caller_locations)
14
+
12
15
  @graph.add_node(self)
13
16
  end
14
17
 
15
- def set_data_type(passed_data_type)
18
+ def set_data_type(_passed_data_type)
16
19
  :unknown
17
20
  end
18
21
 
@@ -20,4 +23,4 @@ module TensorStream
20
23
  eval
21
24
  end
22
25
  end
23
- end
26
+ end
@@ -4,4 +4,4 @@ require 'tensor_stream/evaluator/ruby_evaluator'
4
4
  module TensorStream
5
5
  module Evaluator
6
6
  end
7
- end
7
+ end
@@ -1,6 +1,6 @@
1
1
  # http://creativecommons.org/publicdomain/zero/1.0/
2
2
  class RandomGaussian
3
- def initialize(mean, stddev, rand_helper = lambda { Kernel.rand })
3
+ def initialize(mean, stddev, rand_helper = -> { Kernel.rand })
4
4
  @rand_helper = rand_helper
5
5
  @mean = mean
6
6
  @stddev = stddev
@@ -9,24 +9,23 @@ class RandomGaussian
9
9
  end
10
10
 
11
11
  def rand
12
- if @valid then
12
+ if @valid
13
13
  @valid = false
14
- return @next
14
+ @next
15
15
  else
16
16
  @valid = true
17
17
  x, y = self.class.gaussian(@mean, @stddev, @rand_helper)
18
18
  @next = y
19
- return x
19
+ x
20
20
  end
21
21
  end
22
22
 
23
- private
24
23
  def self.gaussian(mean, stddev, rand)
25
24
  theta = 2 * Math::PI * rand.call
26
25
  rho = Math.sqrt(-2 * Math.log(1 - rand.call))
27
26
  scale = stddev * rho
28
27
  x = mean + scale * Math.cos(theta)
29
28
  y = mean + scale * Math.sin(theta)
30
- return x, y
29
+ [x, y]
31
30
  end
32
- end
31
+ end
@@ -1,4 +1,4 @@
1
- require "tensor_stream/evaluator/operation_helpers/random_gaussian"
1
+ require 'tensor_stream/evaluator/operation_helpers/random_gaussian'
2
2
  require 'tensor_stream/math_gradients'
3
3
 
4
4
  module TensorStream
@@ -57,9 +57,7 @@ module TensorStream
57
57
  old_tensor = tensor
58
58
  tensor = run(tensor, context)
59
59
 
60
- if tensor.is_a?(Array) && !tensor.empty? && tensor[0].is_a?(Tensor)
61
- tensor = tensor.map { |t| complete_eval(t, context) }
62
- end
60
+ tensor = tensor.map { |t| complete_eval(t, context) } if tensor.is_a?(Array) && !tensor.empty? && tensor[0].is_a?(Tensor)
63
61
 
64
62
  return tensor if old_tensor.equal?(tensor)
65
63
  return tensor unless tensor.is_a?(Tensor)
@@ -104,7 +102,7 @@ module TensorStream
104
102
  elsif x > 0
105
103
  1
106
104
  else
107
- fail 'assert: cannot be here'
105
+ raise 'assert: cannot be here'
108
106
  end
109
107
  }
110
108
 
@@ -128,7 +126,7 @@ module TensorStream
128
126
  input = complete_eval(a, child_context)
129
127
  start = complete_eval(b, child_context)
130
128
  size = complete_eval(tensor.options[:size], child_context)
131
- fail "start index and size not of the same shape #{start.size} != #{size.size}" if start.size != size.size
129
+ raise "start index and size not of the same shape #{start.size} != #{size.size}" if start.size != size.size
132
130
  slice_tensor(input, start, size)
133
131
  when :negate
134
132
  call_vector_op(:negate, a, nil, child_context, ->(t, _u) { -t })
@@ -137,7 +135,7 @@ module TensorStream
137
135
  when :sub
138
136
  call_vector_op(:sub, a, b, child_context, ->(t, u) { t - u })
139
137
  when :mul
140
- call_vector_op(:mul, a, b, child_context, ->(t, u) { binding.pry if t.nil? || u.nil?; t * u })
138
+ call_vector_op(:mul, a, b, child_context, ->(t, u) { t * u })
141
139
  when :pow
142
140
  call_vector_op(:pow, a, b, child_context, ->(t, u) { t**u })
143
141
  when :concat
@@ -156,13 +154,13 @@ module TensorStream
156
154
  when :cos
157
155
  call_op(:cos, a, child_context, ->(t, _b) { Math.cos(t) })
158
156
  when :log
159
- call_op(:log, a, child_context, ->(t, _b) { t < 0 ? Float::NAN : Math.log(t)} )
157
+ call_op(:log, a, child_context, ->(t, _b) { t < 0 ? Float::NAN : Math.log(t) })
160
158
  when :exp
161
- call_op(:exp, a, child_context, ->(t, _b) { Math.exp(t) } )
159
+ call_op(:exp, a, child_context, ->(t, _b) { Math.exp(t) })
162
160
  when :sqrt
163
- call_op(:exp, a, child_context, ->(t, _b) { Math.sqrt(t) } )
161
+ call_op(:exp, a, child_context, ->(t, _b) { Math.sqrt(t) })
164
162
  when :square
165
- call_op(:square, a, child_context, ->(t, _b) { t * t } )
163
+ call_op(:square, a, child_context, ->(t, _b) { t * t })
166
164
  when :stop_gradient
167
165
  run(a, child_context)
168
166
  when :random_uniform
@@ -184,13 +182,13 @@ module TensorStream
184
182
  assign.value = complete_eval(tensor.items[1], child_context)
185
183
  assign.value
186
184
  when :assign_add
187
- tensor.items[0].value = process_vector_math_op(tensor.items[0], tensor.items[1], child_context, ->(a,b) { a + b })
185
+ tensor.items[0].value = process_vector_math_op(tensor.items[0], tensor.items[1], child_context, ->(t, u) { t + u })
188
186
  tensor.items[0].value
189
187
  when :assign_sub
190
- tensor.items[0].value = process_vector_math_op(tensor.items[0], tensor.items[1], child_context, ->(a,b) { a - b })
188
+ tensor.items[0].value = process_vector_math_op(tensor.items[0], tensor.items[1], child_context, ->(t, u) { t - u })
191
189
  tensor.items[0].value
192
190
  when :reduce_mean
193
- c = tensor.data_type == :float ? 0.0 : 0
191
+ c = fp_type?(tensor.data_type) ? 0.0 : 0
194
192
  func = lambda { |v|
195
193
  if v.is_a?(Array)
196
194
  v.empty? ? c : (v.reduce(:+) / v.size)
@@ -201,9 +199,9 @@ module TensorStream
201
199
 
202
200
  reduction(child_context, tensor, func)
203
201
  when :reduce_sum
204
- c = tensor.data_type == :float ? 0.0 : 0
205
- func = ->(v) {
206
- if v.kind_of?(Array)
202
+ c = fp_type?(tensor.data_type) ? 0.0 : 0
203
+ func = lambda { |v|
204
+ if v.is_a?(Array)
207
205
  v.empty? ? c : v.reduce(:+)
208
206
  else
209
207
  v
@@ -212,9 +210,9 @@ module TensorStream
212
210
 
213
211
  reduction(child_context, tensor, func)
214
212
  when :reduce_prod
215
- c = tensor.data_type == :float ? 1.0 : 1
216
- func = ->(v) {
217
- if v.kind_of?(Array)
213
+ c = fp_type?(tensor.data_type) ? 1.0 : 1
214
+ func = lambda { |v|
215
+ if v.is_a?(Array)
218
216
  v.empty? ? c : v.reduce(:*)
219
217
  else
220
218
  v
@@ -231,7 +229,7 @@ module TensorStream
231
229
 
232
230
  Array.new(rows) do |i|
233
231
  Array.new(columns) do |col|
234
- if tensor.data_type == :float32
232
+ if fp_type?(tensor.data_type)
235
233
  i == col ? 1.0 : 0.0
236
234
  else
237
235
  i == col ? 1 : 0
@@ -241,7 +239,7 @@ module TensorStream
241
239
  when :cond
242
240
  pred = complete_eval(tensor.options[:pred], child_context)
243
241
 
244
- if is_all_true(pred)
242
+ if all_true?(pred)
245
243
  complete_eval(a, child_context)
246
244
  else
247
245
  complete_eval(b, child_context)
@@ -288,15 +286,14 @@ module TensorStream
288
286
  end
289
287
 
290
288
  if shape.is_a?(Array) && shape.size.zero?
291
- func.call()
289
+ func.call
292
290
  else
293
291
  shape = [shape.to_i] unless shape.is_a?(Array)
294
292
  generate_vector(shape, generator: func)
295
293
  end
296
294
  when :shape
297
295
  input = complete_eval(a, child_context)
298
-
299
- shape_eval(input)
296
+ shape_eval(input, tensor.options[:out_type])
300
297
  when :matmul
301
298
  matrix_a = complete_eval(a, child_context)
302
299
  matrix_b = complete_eval(b, child_context)
@@ -319,7 +316,7 @@ module TensorStream
319
316
 
320
317
  (Matrix[*matrix_a] * Matrix[*matrix_b]).to_a
321
318
  when :gradients
322
- fail "not implemented in evaluator"
319
+ raise 'not implemented in evaluator' # see TensorStream.gradients instead.
323
320
  when :identity
324
321
  complete_eval(a, child_context)
325
322
  when :print
@@ -331,13 +328,13 @@ module TensorStream
331
328
  a = complete_eval(a, child_context)
332
329
  get_rank(a)
333
330
  when :div
334
- process_vector_math_op(a, b, child_context, ->(a,b) { a/b })
331
+ process_vector_math_op(a, b, child_context, ->(t, u) { t / u })
335
332
  when :reshape
336
333
  arr = complete_eval(a, child_context)
337
334
  new_shape = complete_eval(b, child_context)
338
335
 
339
336
  flat_arr = arr.flatten
340
- return flat_arr[0] if new_shape.size == 0 && flat_arr.size == 1
337
+ return flat_arr[0] if new_shape.size.zero? && flat_arr.size == 1
341
338
 
342
339
  new_shape = fix_inferred_elements(new_shape, flat_arr.size)
343
340
 
@@ -353,7 +350,7 @@ module TensorStream
353
350
 
354
351
  call_vector_op(:max, a, b, child_context, ->(t, u) { [t, u].max })
355
352
  else
356
- fail "unknown op #{tensor.operation}"
353
+ raise "unknown op #{tensor.operation}"
357
354
  end.tap do |result|
358
355
  if tensor.breakpoint
359
356
  a = complete_eval(a, child_context)
@@ -412,9 +409,9 @@ module TensorStream
412
409
  else
413
410
  max = nil
414
411
  max_index = 0
415
- a.each_with_index do |a, index|
416
- if max.nil? || a > max
417
- max = a
412
+ a.each_with_index do |x, index|
413
+ if max.nil? || x > max
414
+ max = x
418
415
  max_index = index
419
416
  end
420
417
  end
@@ -445,17 +442,17 @@ module TensorStream
445
442
  end
446
443
 
447
444
  def arr_pad(arr, paddings, data_type = :float32, rank = 0)
448
- fail "padding #{paddings[rank]} needs to have to elements [before, after]" if paddings[rank].size != 2
445
+ raise "padding #{paddings[rank]} needs to have to elements [before, after]" if paddings[rank].size != 2
449
446
 
450
447
  before = paddings[rank][0]
451
448
  after = paddings[rank][1]
452
-
449
+ pad_value = fp_type?(data_type) ? 0.0 : 0
453
450
  if arr[0].is_a?(Array)
454
451
  next_dim_elem = arr.collect { |a| arr_pad(a, paddings, data_type, rank + 1) }
455
- padding = deep_dup_array(next_dim_elem[0], data_type == :float32 ? 0.0 : 0)
456
- before.times.map { padding } + next_dim_elem + after.times.map { padding }
452
+ padding = deep_dup_array(next_dim_elem[0], pad_value)
453
+ Array.new(before) { padding } + next_dim_elem + Array.new(after) { padding }
457
454
  else
458
- before.times.map { data_type == :float32 ? 0.0 : 0 } + arr + after.times.map { data_type == :float32 ? 0.0 : 0 }
455
+ Array.new(before) { pad_value } + arr + Array.new(after) { pad_value }
459
456
  end
460
457
  end
461
458
 
@@ -485,7 +482,7 @@ module TensorStream
485
482
  def matmul_const_transform(mat, mat_b, tensor)
486
483
  if !mat.is_a?(Array)
487
484
  compat_shape = shape_eval(mat_b).reverse
488
- func = ->() { tensor.data_type == :int32 ? mat.to_i : mat.to_f }
485
+ func = -> { tensor.data_type == :int32 ? mat.to_i : mat.to_f }
489
486
 
490
487
  generate_vector(compat_shape, generator: func)
491
488
  else
@@ -496,7 +493,7 @@ module TensorStream
496
493
  def fix_inferred_elements(shape, total_size)
497
494
  return shape if shape.empty?
498
495
 
499
- current_size = shape.inject(1) { |product, n| n > 0 ? product * n : product }
496
+ current_size = shape.inject(1) { |product, n| n > 0 ? product * n : product }
500
497
  inferred_size = total_size / current_size
501
498
  shape.map { |s| s == -1 ? inferred_size : s }
502
499
  end
@@ -506,8 +503,8 @@ module TensorStream
506
503
 
507
504
  s = new_shape.shift
508
505
 
509
- if new_shape.size == 0
510
- fail "reshape dimen mismatch #{arr.size} != #{s}" if arr.size != s
506
+ if new_shape.size.zero?
507
+ raise "reshape dimen mismatch #{arr.size} != #{s}" if arr.size != s
511
508
  return arr
512
509
  end
513
510
 
@@ -525,7 +522,7 @@ module TensorStream
525
522
  end
526
523
 
527
524
  def call_vector_op(op, a, b, child_context, func)
528
- process_vector_math_op(a, b, child_context, func)
525
+ process_vector_math_op(a, b, child_context, func)
529
526
  rescue FullEvalNotPossible
530
527
  TensorStream.send(op.to_sym, a, b)
531
528
  end
@@ -534,12 +531,12 @@ module TensorStream
534
531
  eval_a = complete_eval(a, child_context) unless a.nil?
535
532
  eval_b = complete_eval(b, child_context) unless b.nil?
536
533
 
537
- fail FullEvalNotPossible.new, "full eval not possible for #{a.name}" if eval_a.is_a?(Tensor) || eval_b.kind_of?(Tensor)
534
+ raise FullEvalNotPossible.new, "full eval not possible for #{a.name}" if eval_a.is_a?(Tensor) || eval_b.is_a?(Tensor)
538
535
 
539
536
  # ruby scalar
540
- if get_rank(eval_a) == 0
541
- if (get_rank(eval_b)) == 0
542
- op.call(eval_a,eval_b)
537
+ if get_rank(eval_a).zero?
538
+ if get_rank(eval_b).zero?
539
+ op.call(eval_a, eval_b)
543
540
  else
544
541
  constant_op(eval_b, eval_a, child_context, op, true)
545
542
  end
@@ -547,14 +544,14 @@ module TensorStream
547
544
  if get_rank(eval_b) > 0
548
545
  vector_op(eval_a, eval_b, child_context, op)
549
546
  else
550
- constant_op(eval_a, eval_b, child_context, op)
547
+ constant_op(eval_a, eval_b, child_context, op)
551
548
  end
552
549
  end
553
550
  end
554
551
 
555
552
  def get_rank(value, rank = 0)
556
553
  return rank unless value.is_a?(Array)
557
- return rank + 1 if value.size == 0
554
+ return rank + 1 if value.empty?
558
555
 
559
556
  get_rank(value[0], rank + 1)
560
557
  end
@@ -570,7 +567,7 @@ module TensorStream
570
567
  end
571
568
 
572
569
  def concat(a, b, axis)
573
- if axis == 0
570
+ if axis.zero?
574
571
  a + b
575
572
  else
576
573
  a.each_with_index.collect do |i, index|
@@ -581,80 +578,74 @@ module TensorStream
581
578
 
582
579
  def process_function_op(a, child_context, op)
583
580
  # ruby scalar
584
- if (a.kind_of?(Tensor) && a.shape.rank > 0) || a.kind_of?(Array)
581
+ if (a.is_a?(Tensor) && a.shape.rank > 0) || a.is_a?(Array)
585
582
  constant_op(a, 0, child_context, op)
586
- elsif !a.kind_of?(Tensor) || a.shape.rank == 0
583
+ elsif !a.is_a?(Tensor) || a.shape.rank.zero?
587
584
  v = run(a, child_context)
588
- fail FullEvalNotPossible.new, "full eval not possible for #{v.name}" if v.is_a?(Tensor) && !v.is_const
585
+ raise FullEvalNotPossible.new, "full eval not possible for #{v.name}" if v.is_a?(Tensor) && !v.is_const
589
586
 
590
587
  op.call(v, 0)
591
588
  else
592
- fail 'cannot be here'
589
+ raise 'cannot be here'
593
590
  end
594
591
  end
595
592
 
596
- def resolve_placeholder(placeholder, execution_context = {})
593
+ def resolve_placeholder(placeholder, _execution_context = {})
597
594
  return nil if placeholder.nil?
598
595
  return placeholder if retain.include?(placeholder)
599
596
 
600
- var = if placeholder.kind_of?(Placeholder)
597
+ var = if placeholder.is_a?(Placeholder)
601
598
  @context[placeholder.name.to_sym].tap do |c|
602
- if c.nil?
603
- raise "missing placeholder #{placeholder.name}"
604
- end
599
+ raise "missing placeholder #{placeholder.name}" if c.nil?
605
600
  end
606
601
  else
607
602
  placeholder
608
603
  end
609
604
 
610
- return var unless placeholder.kind_of?(Tensor)
605
+ return var unless placeholder.is_a?(Tensor)
611
606
  Tensor.cast_dtype(var, placeholder.data_type)
612
607
  end
613
608
 
614
- def reduce_axis(axis, val, keep_dims, child_context, op = ->(v) { v.kind_of?(Array) ? v.reduce(:+) : v })
609
+ def reduce_axis(axis, val, keep_dims, child_context, op = ->(v) { v.is_a?(Array) ? v.reduce(:+) : v })
615
610
  val = run(val, child_context)
616
611
  return val.is_a?(Array) ? op.call(val.flatten) : val if axis.nil?
617
- return val.transpose.collect { |v| keep_dims ? [op.call(v)] : op.call(v) } if axis == 0
612
+ return val.transpose.collect { |v| keep_dims ? [op.call(v)] : op.call(v) } if axis.zero?
618
613
  return val.collect { |v| keep_dims ? [op.call(v)] : op.call(v) } if axis == 1
619
614
 
620
- fail "can't handle with axis > 1 :("
615
+ raise "can't handle with axis > 1 :("
621
616
  end
622
617
 
623
618
  def constant_add(vector, constant)
624
619
  run(vector).collect do |item|
625
620
  if item.is_a?(Array)
626
621
  constant_add(item, constant)
622
+ elsif item.respond_to?(:value)
623
+ item.value + constant
627
624
  else
628
- if item.respond_to?(:value)
629
- item.value + constant
630
- else
631
- item + constant
632
- end
625
+ item + constant
633
626
  end
634
627
  end
635
628
  end
636
629
 
637
- def constant_op(vector, constant, child_context, op = ->(a,b) { a + b }, switch = false)
630
+ def constant_op(vector, constant, child_context, op = ->(a, b) { a + b }, switch = false)
638
631
  eval_vector = complete_eval(vector, child_context)
639
632
  constant = complete_eval(constant, child_context)
640
633
 
641
- fail FullEvalNotPossible.new, "full eval not possible for #{eval_vector.name}" if eval_vector.kind_of?(Tensor) || constant.kind_of?(Tensor)
634
+ raise FullEvalNotPossible.new, "full eval not possible for #{eval_vector.name}" if eval_vector.is_a?(Tensor) || constant.is_a?(Tensor)
642
635
 
643
636
  eval_vector.each_with_index.collect do |item, index|
644
637
  c = constant.is_a?(Array) ? constant[index] : constant
645
638
  if item.is_a?(Array)
646
639
  constant_op(item, c, child_context, op, switch)
640
+ elsif item.respond_to?(:value)
641
+ switch ? op.call(c, item.value) : op.call(item.value, c)
647
642
  else
648
- if item.respond_to?(:value)
649
- switch ? op.(c, item.value) : op.(item.value, c)
650
- else
651
- switch ? op.(c, item) : op.(item, c)
652
- end
643
+ switch ? op.call(c, item) : op.call(item, c)
653
644
  end
654
645
  end
655
646
  end
656
647
 
657
- def call_3way_vector_op(v_a, v_b, v_c, child_context, op = ->(a,b,c) { a + b + c})
648
+ def call_3way_vector_op(v_a, v_b, v_c, child_context, op = ->(a, b, c) { a + b + c })
658
649
  return op.call(v_a, v_b, v_c) unless v_a.is_a?(Array)
659
650
 
660
651
  v_a.each_with_index.collect do |v1, index|
@@ -668,12 +659,12 @@ module TensorStream
668
659
  end
669
660
  end
670
661
 
671
- def vector_op(vector, vector2, child_context, op = ->(a,b) { a + b })
662
+ def vector_op(vector, vector2, child_context, op = ->(a, b) { a + b })
672
663
  v_a = run(vector, child_context)
673
664
  v_b = run(vector2, child_context)
674
665
 
675
666
  if get_rank(v_a) < get_rank(v_b) # upgrade rank of A
676
- duplicated = v_b.size.times.collect do
667
+ duplicated = Array.new(v_b.size) do
677
668
  v_a
678
669
  end
679
670
  return vector_op(duplicated, v_b, child_context, op)
@@ -692,10 +683,10 @@ module TensorStream
692
683
  end
693
684
  end
694
685
 
695
- def is_all_true(arr)
686
+ def all_true?(arr)
696
687
  if arr.is_a?(Array)
697
- arr.each do |a|
698
- return false if !is_all_true(a)
688
+ arr.each do |a|
689
+ return false unless all_true?(a)
699
690
  end
700
691
  return true
701
692
  end
@@ -710,33 +701,31 @@ module TensorStream
710
701
  v_a.each_with_index.collect do |item, index|
711
702
  if item.is_a?(Array)
712
703
  constant_add(item, constant)
704
+ elsif item.respond_to?(:value)
705
+ item.value + v_b[index].value
713
706
  else
714
- if item.respond_to?(:value)
715
- item.value + v_b[index].value
716
- else
717
- item + v_b[index]
718
- end
707
+ item + v_b[index]
719
708
  end
720
709
  end
721
710
  end
722
711
 
723
- def generate_vector(shape, dtype: :float32, generator: )
712
+ def generate_vector(shape, dtype: :float32, generator:)
724
713
  if shape.is_a?(Integer)
725
- shape.times.collect do
714
+ Array.new(shape) do
726
715
  generator.call
727
716
  end
728
717
  elsif shape.size > 1
729
- shape[0].times.collect do
718
+ Array.new(shape[0]) do
730
719
  generate_vector(shape[1..shape.size], generator: generator, dtype: dtype)
731
720
  end
732
721
  elsif shape.size == 1
733
- shape[0].times.collect do
722
+ Array.new(shape[0]) do
734
723
  generator.call
735
724
  end
736
- elsif shape.size == 0
725
+ elsif shape.size.zero?
737
726
  generator.call
738
727
  end
739
728
  end
740
729
  end
741
730
  end
742
- end
731
+ end