tensor_stream 0.9.2 → 0.9.5

Sign up to get free protection for your applications and to get access to all the features.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/lib/tensor_stream/evaluator/base_evaluator.rb +3 -0
  3. data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +25 -0
  4. data/lib/tensor_stream/evaluator/ruby/array_ops.rb +24 -24
  5. data/lib/tensor_stream/evaluator/ruby/check_ops.rb +8 -0
  6. data/lib/tensor_stream/evaluator/ruby/images_ops.rb +16 -18
  7. data/lib/tensor_stream/evaluator/ruby/math_ops.rb +20 -4
  8. data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +9 -5
  9. data/lib/tensor_stream/evaluator/ruby/random_ops.rb +4 -4
  10. data/lib/tensor_stream/evaluator/ruby_evaluator.rb +16 -61
  11. data/lib/tensor_stream/graph_builder.rb +1 -0
  12. data/lib/tensor_stream/graph_serializers/graphml.rb +1 -1
  13. data/lib/tensor_stream/graph_serializers/pbtext.rb +1 -0
  14. data/lib/tensor_stream/helpers/infer_shape.rb +182 -0
  15. data/lib/tensor_stream/helpers/op_helper.rb +2 -2
  16. data/lib/tensor_stream/images.rb +1 -1
  17. data/lib/tensor_stream/math_gradients.rb +1 -1
  18. data/lib/tensor_stream/monkey_patches/array.rb +15 -0
  19. data/lib/tensor_stream/monkey_patches/float.rb +3 -0
  20. data/lib/tensor_stream/monkey_patches/integer.rb +3 -0
  21. data/lib/tensor_stream/monkey_patches/patch.rb +70 -0
  22. data/lib/tensor_stream/nn/nn_ops.rb +43 -9
  23. data/lib/tensor_stream/operation.rb +2 -153
  24. data/lib/tensor_stream/ops.rb +71 -56
  25. data/lib/tensor_stream/profile/report_tool.rb +3 -3
  26. data/lib/tensor_stream/tensor_shape.rb +9 -6
  27. data/lib/tensor_stream/train/adadelta_optimizer.rb +1 -1
  28. data/lib/tensor_stream/train/adagrad_optimizer.rb +1 -1
  29. data/lib/tensor_stream/train/adam_optimizer.rb +2 -2
  30. data/lib/tensor_stream/train/learning_rate_decay.rb +29 -0
  31. data/lib/tensor_stream/train/optimizer.rb +7 -6
  32. data/lib/tensor_stream/train/saver.rb +1 -0
  33. data/lib/tensor_stream/train/slot_creator.rb +2 -2
  34. data/lib/tensor_stream/trainer.rb +3 -0
  35. data/lib/tensor_stream/utils.rb +2 -2
  36. data/lib/tensor_stream/version.rb +1 -1
  37. data/lib/tensor_stream.rb +5 -1
  38. data/samples/rnn.rb +108 -0
  39. metadata +8 -2
@@ -5,13 +5,13 @@ module TensorStream
5
5
  def self.profile_for(session, order_by: :slowest)
6
6
  context = session.last_session_context
7
7
  eval_times = context[:profile][:operations].map do |name, profile|
8
- [name, profile[:eval_time], profile[:tensor].source]
8
+ [name, profile[:eval_time], profile[:shape], profile[:tensor].source]
9
9
  end
10
10
 
11
11
  if order_by == :slowest
12
- eval_times.sort_by { |a, b| b[1] <=> a[1] }
12
+ eval_times.sort_by { |a| a[1] }.reverse!
13
13
  else
14
- eval_times.sort_by { |a, b| a[1] <=> b[1] }
14
+ eval_times.sort_by { |a| a[1] }
15
15
  end
16
16
  end
17
17
  end
@@ -20,7 +20,7 @@ module TensorStream
20
20
  end
21
21
 
22
22
  def ndims
23
- shape.size
23
+ shape ? shape.size : nil
24
24
  end
25
25
 
26
26
  def scalar?
@@ -29,20 +29,21 @@ module TensorStream
29
29
 
30
30
  def known?
31
31
  return false if shape.nil?
32
-
32
+
33
33
  a_shape = shape.is_a?(Array) ? shape : [shape]
34
34
  a_shape.each { |s| return false if s.nil? || s < 0 }
35
35
 
36
36
  true
37
37
  end
38
38
 
39
- def is_fully_defined?
39
+ def fully_defined?
40
40
  known?
41
41
  end
42
42
 
43
43
  def self.infer_shape(shape_a, shape_b)
44
- return shape_a if shape_b.nil?
45
- return shape_b if shape_a.nil?
44
+ return nil if shape_a.nil? || shape_b.nil?
45
+ return shape_a if shape_b.empty?
46
+ return shape_b if shape_a.empty?
46
47
  return shape_a if shape_a == shape_b
47
48
  return shape_b if shape_b.size > shape_a.size
48
49
  return shape_a if shape_a.size > shape_b.size
@@ -55,13 +56,14 @@ module TensorStream
55
56
  next nil if s.nil? || reversed_b[index].nil?
56
57
  next nil if s.is_a?(Tensor) || reversed_b[index].is_a?(Tensor)
57
58
  next reversed_b[index] if reversed_b[index] > s
59
+
58
60
  s
59
61
  end.reverse
60
62
  end
61
63
 
62
64
  def self.reshape(arr, new_shape)
63
65
  arr = arr.is_a?(Array) ? arr.flatten : [arr]
64
-
66
+ new_shape = new_shape.is_a?(TensorShape) ? new_shape.shape : new_shape
65
67
  new_shape = TensorShape.fix_inferred_elements(new_shape, arr.size)
66
68
  return arr[0] if arr.size == 1 && new_shape.empty?
67
69
 
@@ -71,6 +73,7 @@ module TensorStream
71
73
 
72
74
  if new_shape.size.zero?
73
75
  raise "reshape dimen mismatch #{arr.size} != #{s}" if arr.size != s
76
+
74
77
  return arr
75
78
  end
76
79
 
@@ -11,7 +11,7 @@ module TensorStream
11
11
  @learning_rate = learning_rate
12
12
  @rho = rho
13
13
  @epsilon = epsilon
14
-
14
+
15
15
  # Tensor versions of the constructor arguments, created in _prepare().
16
16
  @learning_rate_tensor = nil
17
17
  @rho_t = nil
@@ -7,7 +7,7 @@ module TensorStream
7
7
  attr_accessor :learning_rate
8
8
 
9
9
  def initialize(learning_rate, initial_accumulator_value = 0.1,
10
- use_locking: false, name: "Adagrad")
10
+ use_locking: false, name: "Adagrad")
11
11
  @learning_rate = learning_rate
12
12
  @initial_accumulator_value = initial_accumulator_value
13
13
  @learning_rate_tensor = nil
@@ -43,8 +43,8 @@ module TensorStream
43
43
 
44
44
  def get_beta_accumulators
45
45
  graph = TensorStream.get_default_graph
46
- [ get_non_slot_variable("beta1_power", graph: graph),
47
- get_non_slot_variable("beta2_power", graph: graph)]
46
+ [get_non_slot_variable("beta1_power", graph: graph),
47
+ get_non_slot_variable("beta2_power", graph: graph)]
48
48
  end
49
49
 
50
50
  def prepare
@@ -0,0 +1,29 @@
1
+ ##
2
+ # Ruby port of tensorflow's learning rate decay functions
3
+ module TensorStream
4
+ module Train
5
+ module LearningRateDecay
6
+ include TensorStream::Utils
7
+ include TensorStream::OpHelper
8
+ include TensorStream::Ops
9
+
10
+ ##
11
+ # Applies exponential decay to the learning rate
12
+ def exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase: false, name: nil)
13
+ raise TensorStream::ValueError, "global_step is required for exponential_decay." if global_step.nil?
14
+
15
+ name_scope(name, default: 'ExponentialDecay', values: [learning_rate, global_step, decay_steps, decay_rate]) do
16
+ learning_rate = convert_to_tensor(learning_rate, name: "learning_rate")
17
+ data_type = learning_rate.data_type
18
+ decay_steps = cast(decay_steps, data_type)
19
+ decay_rate = cast(decay_rate, data_type)
20
+
21
+ global_step_recomp = cast(global_step, data_type)
22
+ p = global_step_recomp / decay_steps
23
+ p = floor(p) if staircase
24
+ multiply(learning_rate, pow(decay_rate, p), name: name)
25
+ end
26
+ end
27
+ end
28
+ end
29
+ end
@@ -11,6 +11,7 @@ module TensorStream
11
11
  @name = name
12
12
  @use_locking = use_locking
13
13
  raise TensorStream::ValueError, "Must specify the optimizer name" unless @name
14
+
14
15
  @slots = {}
15
16
  @non_slots = {}
16
17
  end
@@ -51,6 +52,7 @@ module TensorStream
51
52
  def compute_gradients(loss, var_list: nil, grad_loss: nil)
52
53
  trainable_vars = if var_list
53
54
  raise "var_list must be an array" unless var_list.is_a?(Array)
55
+
54
56
  var_list.each_with_index { |var, index| raise "var #{index} not a Variable" unless var.is_a?(Variable) }
55
57
 
56
58
  var_list
@@ -66,6 +68,7 @@ module TensorStream
66
68
  def get_slot(var, name)
67
69
  named_slots = @slots.fetch(name, nil)
68
70
  return nil if named_slots.nil?
71
+
69
72
  named_slots.fetch(var_key(var), nil)
70
73
  end
71
74
 
@@ -100,7 +103,7 @@ module TensorStream
100
103
  # op_name: string - Name to use when scoping the Variable that needs to be created
101
104
  def zeros_slot(var, slot_name, op_name)
102
105
  named_slots = slot_dict(slot_name)
103
- if !named_slots.key?(var_key(var))
106
+ unless named_slots.key?(var_key(var))
104
107
  named_slots[var_key(var)] = create_zeros_slot(var, op_name)
105
108
  end
106
109
  named_slots[var_key(var)]
@@ -132,10 +135,9 @@ module TensorStream
132
135
  end
133
136
 
134
137
  def call_if_callable(param)
135
- param.kind_of?(Proc) ? param.call : param
138
+ param.is_a?(Proc) ? param.call : param
136
139
  end
137
140
 
138
-
139
141
  def create_non_slot_variable(initial_value, name, colocate_with)
140
142
  graph = colocate_with.graph
141
143
 
@@ -152,9 +154,8 @@ module TensorStream
152
154
  # Find or create a slot for a variable, using an Initializer.
153
155
  def get_or_make_slot_with_initializer(var, initializer, shape, dtype, slot_name, op_name)
154
156
  named_slots = slot_dict(slot_name)
155
- if !named_slots.key?(var_key(var))
156
- new_slot_variable = create_slot_with_initializer(
157
- var, initializer, shape, dtype, op_name)
157
+ unless named_slots.key?(var_key(var))
158
+ new_slot_variable = create_slot_with_initializer(var, initializer, shape, dtype, op_name)
158
159
  named_slots[var_key(var)] = new_slot_variable
159
160
  end
160
161
  named_slots[var_key(var)]
@@ -80,6 +80,7 @@ module TensorStream
80
80
 
81
81
  def _add_saveable(saveables, seen_ops, saveable)
82
82
  raise TensorStream::ValueError, "The same saveable will be restored with two names: #{saveable.name}" if seen_ops.include?(saveable.op)
83
+
83
84
  saveables << saveable
84
85
  seen_ops << saveable.op
85
86
  end
@@ -5,7 +5,7 @@ module TensorStream
5
5
 
6
6
  ##
7
7
  # Helper function for creating a slot variable.
8
- def create_slot_var(primary, val, scope, shape)
8
+ def create_slot_var(_primary, val, scope, shape)
9
9
  slot = get_variable(scope, initializer: val, trainable: false, shape: shape,
10
10
  validate_shape: val.shape && val.shape.known?)
11
11
  slot
@@ -53,7 +53,7 @@ module TensorStream
53
53
  def create_zeros_slot(primary, name, dtype: nil, colocate_with_primary: true)
54
54
  dtype = primary.data_type if dtype.nil?
55
55
  slot_shape = primary.shape
56
- slot_shape = if slot_shape.is_fully_defined?
56
+ slot_shape = if slot_shape.fully_defined?
57
57
  slot_shape.shape
58
58
  else
59
59
  TensorStream.shape(primary.initialized_value)
@@ -7,13 +7,16 @@ require 'tensor_stream/train/adadelta_optimizer'
7
7
  require 'tensor_stream/train/adagrad_optimizer'
8
8
  require 'tensor_stream/train/rmsprop_optimizer'
9
9
  require 'tensor_stream/train/saver'
10
+ require 'tensor_stream/train/learning_rate_decay'
10
11
 
11
12
  module TensorStream
12
13
  module Trainer
13
14
  extend TensorStream::Train::Utils
15
+ extend TensorStream::Train::LearningRateDecay
14
16
 
15
17
  def self.write_graph(graph, path, filename, as_text: true, serializer: TensorStream::Pbtext)
16
18
  raise "only supports as_text=true for now" unless as_text
19
+
17
20
  new_filename = File.join(path, filename)
18
21
  File.write(new_filename, serializer.new.get_string(graph))
19
22
  end
@@ -104,13 +104,13 @@ module TensorStream
104
104
  get_default_graph.device(device_uri, &block)
105
105
  end
106
106
 
107
- def name_scope(name, default: nil, values: nil)
107
+ def name_scope(name, default_name = nil, default: nil, values: nil)
108
108
  if values
109
109
  graph_count = values.select { |v| v.is_a?(Tensor) }.map(&:graph).map(&:object_id).uniq.size
110
110
  raise "values are not on the same graph" if graph_count > 1
111
111
  end
112
112
 
113
- get_default_graph.name_scope(name || default) do |scope|
113
+ get_default_graph.name_scope(name || default_name || default) do |scope|
114
114
  yield scope if block_given?
115
115
  end
116
116
  end
@@ -1,5 +1,5 @@
1
1
  module TensorStream
2
- VERSION = '0.9.2'.freeze
2
+ VERSION = '0.9.5'.freeze
3
3
 
4
4
  def self.version
5
5
  VERSION
data/lib/tensor_stream.rb CHANGED
@@ -32,12 +32,16 @@ require "tensor_stream/debugging/debugging"
32
32
  require 'tensor_stream/utils'
33
33
  require 'tensor_stream/train/utils'
34
34
  require 'tensor_stream/images'
35
- require 'tensor_stream/trainer'
35
+
36
36
  require 'tensor_stream/profile/report_tool'
37
37
 
38
38
  # require 'tensor_stream/libraries/layers'
39
+ require 'tensor_stream/monkey_patches/patch'
39
40
  require 'tensor_stream/monkey_patches/integer'
41
+ require 'tensor_stream/monkey_patches/float'
42
+ require 'tensor_stream/monkey_patches/array'
40
43
  require 'tensor_stream/ops'
44
+ require 'tensor_stream/trainer'
41
45
 
42
46
  # module that exposes TensorStream top level functions
43
47
  module TensorStream
data/samples/rnn.rb ADDED
@@ -0,0 +1,108 @@
1
+ # RNN sample
2
+ #
3
+ # Ruby port Example based on article by Erik Hallström
4
+ # https://medium.com/@erikhallstrm/hello-world-rnn-83cd7105b767
5
+ #
6
+ #
7
+
8
+ require "bundler/setup"
9
+ require 'tensor_stream'
10
+ # require 'tensor_stream/opencl'
11
+ require 'pry-byebug'
12
+
13
+
14
+ tf = TensorStream
15
+
16
+ num_epochs = 100
17
+ total_series_length = 50000
18
+ truncated_backprop_length = 15
19
+ state_size = 4
20
+ num_classes = 2
21
+ echo_step = 3
22
+ batch_size = 5
23
+ num_batches = total_series_length / batch_size / truncated_backprop_length
24
+ randomizer = TensorStream.random_uniform([total_series_length], minval: 0, maxval: 2)
25
+
26
+
27
+ def generate_data(randomizer, total_series_length, batch_size, echo_step)
28
+ x = randomizer.eval
29
+ y = x.rotate(-echo_step)
30
+
31
+ y[echo_step] = 0
32
+
33
+ x = TensorStream::TensorShape.reshape(x, [batch_size, -1]) # The first index changing slowest, subseries as rows
34
+ y = TensorStream::TensorShape.reshape(y, [batch_size, -1])
35
+ [x, y]
36
+ end
37
+
38
+ batchX_placeholder = tf.placeholder(:float32, shape: [batch_size, truncated_backprop_length], name: 'batch_x')
39
+ batchY_placeholder = tf.placeholder(:int32, shape: [batch_size, truncated_backprop_length], name: 'batch_y')
40
+
41
+ init_state = tf.placeholder(:float32, shape: [batch_size, state_size], name: 'init_state')
42
+
43
+
44
+ W = tf.variable(tf.random_uniform([state_size+1, state_size]), dtype: :float32, name: 'W')
45
+ b = tf.variable(tf.zeros([state_size]), dtype: :float32, name: 'b')
46
+
47
+ W2 = tf.variable(tf.random_uniform([state_size, num_classes]), dtype: :float32, name: 'W2')
48
+ b2 = tf.variable(tf.zeros([num_classes]), dtype: :float32, name: 'b2')
49
+
50
+
51
+ inputs_series = tf.unpack(batchX_placeholder, axis: 1)
52
+ labels_series = tf.unpack(batchY_placeholder, axis: 1)
53
+
54
+ current_state = init_state
55
+ states_series = []
56
+
57
+ inputs_series.each do |current_input|
58
+ current_input = tf.reshape(current_input, [batch_size, 1])
59
+ input_and_state_concatenated = tf.concat([current_input, current_state], 1) # Increasing number of columns
60
+ next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b) # Broadcasted addition
61
+ states_series << next_state
62
+ current_state = next_state
63
+ end
64
+
65
+ logits_series = states_series.collect do |state|
66
+ tf.matmul(state, W2) + b2
67
+ end
68
+
69
+ predictions_series = logits_series.collect do |logits|
70
+ tf.nn.softmax(logits)
71
+ end
72
+
73
+ losses = logits_series.zip(labels_series).collect do |logits, labels|
74
+ tf.nn.sparse_softmax_cross_entropy_with_logits(logits: logits, labels: labels)
75
+ end
76
+ total_loss = tf.reduce_mean(losses)
77
+
78
+ train_step = TensorStream::Train::AdagradOptimizer.new(0.1).minimize(total_loss)
79
+
80
+ puts "#{tf.get_default_graph.nodes.keys.size} nodes created"
81
+ zeros_state = tf.zeros([batch_size, state_size]).eval
82
+ tf.session do |sess|
83
+ sess.run(tf.global_variables_initializer)
84
+ (0..num_epochs).each do |epoch_idx|
85
+ x, y = generate_data(randomizer, total_series_length, batch_size, echo_step)
86
+ _current_state = zeros_state
87
+ print("New data, epoch", epoch_idx, "\n")
88
+ (0..num_batches - 1).each do |batch_idx|
89
+ start_idx = batch_idx * truncated_backprop_length
90
+ end_idx = start_idx + truncated_backprop_length
91
+
92
+ batchX = x.map { |x| x[start_idx...end_idx] }
93
+ batchY = y.map { |y| y[start_idx...end_idx] }
94
+
95
+ _total_loss, _train_step, _current_state, _predictions_series = sess.run(
96
+ [total_loss, train_step, current_state, predictions_series],
97
+ feed_dict: {
98
+ batchX_placeholder => batchX,
99
+ batchY_placeholder => batchY,
100
+ init_state => _current_state
101
+ })
102
+
103
+ if batch_idx%10 == 0
104
+ print("Step",batch_idx, " Loss ", _total_loss, "\n")
105
+ end
106
+ end
107
+ end
108
+ end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: tensor_stream
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.2
4
+ version: 0.9.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - Joseph Emmanuel Dayo
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-10-21 00:00:00.000000000 Z
11
+ date: 2018-11-04 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -275,12 +275,16 @@ files:
275
275
  - lib/tensor_stream/graph_serializers/packer.rb
276
276
  - lib/tensor_stream/graph_serializers/pbtext.rb
277
277
  - lib/tensor_stream/graph_serializers/serializer.rb
278
+ - lib/tensor_stream/helpers/infer_shape.rb
278
279
  - lib/tensor_stream/helpers/op_helper.rb
279
280
  - lib/tensor_stream/helpers/string_helper.rb
280
281
  - lib/tensor_stream/images.rb
281
282
  - lib/tensor_stream/initializer.rb
282
283
  - lib/tensor_stream/math_gradients.rb
284
+ - lib/tensor_stream/monkey_patches/array.rb
285
+ - lib/tensor_stream/monkey_patches/float.rb
283
286
  - lib/tensor_stream/monkey_patches/integer.rb
287
+ - lib/tensor_stream/monkey_patches/patch.rb
284
288
  - lib/tensor_stream/nn/nn_ops.rb
285
289
  - lib/tensor_stream/operation.rb
286
290
  - lib/tensor_stream/ops.rb
@@ -293,6 +297,7 @@ files:
293
297
  - lib/tensor_stream/train/adagrad_optimizer.rb
294
298
  - lib/tensor_stream/train/adam_optimizer.rb
295
299
  - lib/tensor_stream/train/gradient_descent_optimizer.rb
300
+ - lib/tensor_stream/train/learning_rate_decay.rb
296
301
  - lib/tensor_stream/train/momentum_optimizer.rb
297
302
  - lib/tensor_stream/train/optimizer.rb
298
303
  - lib/tensor_stream/train/rmsprop_optimizer.rb
@@ -315,6 +320,7 @@ files:
315
320
  - samples/multigpu.rb
316
321
  - samples/nearest_neighbor.rb
317
322
  - samples/raw_neural_net_sample.rb
323
+ - samples/rnn.rb
318
324
  - tensor_stream.gemspec
319
325
  homepage: http://www.github.com/jedld/tensor_stream
320
326
  licenses: