tensor_stream 0.8.0 → 0.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -55,7 +55,8 @@ module TensorStream
55
55
  end
56
56
  tensor_program
57
57
  end
58
- TensorStream.group(gs)
58
+
59
+ gs
59
60
  end
60
61
 
61
62
  ##
@@ -71,7 +71,7 @@ module TensorStream
71
71
  value = delegate_to_evaluator(e, context, {})
72
72
  recursive_eval(value)
73
73
  end
74
- result.size == 1 ? result.first : result
74
+ args.size == 1 ? result.first : result
75
75
  end
76
76
 
77
77
  def list_devices
@@ -111,17 +111,26 @@ module TensorStream
111
111
  end
112
112
 
113
113
  def delegate_to_evaluator(tensor_arr, session_context, context)
114
- arr = tensor_arr.is_a?(Array) ? tensor_arr : [tensor_arr]
115
- result = arr.collect do |tensor|
116
- session_context[:_cache][:placement][tensor.name] = assign_evaluator(tensor) if session_context[:_cache][:placement][tensor.name].nil?
117
-
118
- session_context[:_cache][:placement][tensor.name][1].run_with_buffer(tensor, session_context, context)
114
+ if tensor_arr.is_a?(Array)
115
+ tensor_arr.collect do |tensor|
116
+ if tensor.is_a?(Array)
117
+ delegate_to_evaluator(tensor, session_context, context)
118
+ else
119
+ run_with_session_context(tensor, session_context, context)
120
+ end
121
+ end
122
+ else
123
+ run_with_session_context(tensor_arr, session_context, context)
119
124
  end
120
- result.size == 1 ? result.first : result
121
125
  end
122
126
 
123
127
  protected
124
128
 
129
+ def run_with_session_context(tensor, session_context, context)
130
+ session_context[:_cache][:placement][tensor.name] = assign_evaluator(tensor) if session_context[:_cache][:placement][tensor.name].nil?
131
+ session_context[:_cache][:placement][tensor.name][1].run_with_buffer(tensor, session_context, context)
132
+ end
133
+
125
134
  def recursive_eval(value, depth = 2)
126
135
  if value.is_a?(Array) && depth > 0
127
136
  value.collect { |v| recursive_eval(v, depth - 1) }
@@ -151,7 +160,7 @@ module TensorStream
151
160
  def prepare_evaluators(tensor_arr, context)
152
161
  context[:_cache][:placement] ||= {}
153
162
 
154
- tensor_arr = tensor_arr.is_a?(Array) ? tensor_arr : [tensor_arr]
163
+ tensor_arr = tensor_arr.is_a?(Array) ? tensor_arr.flatten : [tensor_arr]
155
164
  tensor_arr.each do |tensor|
156
165
  next if context[:_cache][:placement][tensor.name]
157
166
 
@@ -0,0 +1,87 @@
1
+ module TensorStream
2
+ module Train
3
+ # High Level implementation of the gradient descent algorithm
4
+ class AdamOptimizer < Optimizer
5
+ include TensorStream::OpHelper
6
+
7
+ attr_accessor :learning_rate
8
+
9
+ def initialize(learning_rate = 0.001, beta1=0.9, beta2=0.999, epsilon = 1e-8,
10
+ use_locking: false, name: "Adam")
11
+ @learning_rate = learning_rate
12
+ @beta1 = beta1
13
+ @beta2 = beta2
14
+ @epsilon = epsilon
15
+
16
+ # Tensor versions of the constructor arguments, created in _prepare().
17
+ @lr_t = nil
18
+ @beta1_t = nil
19
+ @beta2_t = nil
20
+ @epsilon_t = nil
21
+
22
+ # Created in SparseApply if needed.
23
+ @updated_lr = nil
24
+ super(name: name, use_locking: use_locking)
25
+ end
26
+
27
+ protected
28
+
29
+ def get_beta_accumulators
30
+ graph = TensorStream.get_default_graph
31
+ [ get_non_slot_variable("beta1_power", graph: graph),
32
+ get_non_slot_variable("beta2_power", graph: graph)]
33
+ end
34
+
35
+ def prepare
36
+ lr = call_if_callable(@learning_rate)
37
+ beta1 = call_if_callable(@beta1)
38
+ beta2 = call_if_callable(@beta2)
39
+ epsilon = call_if_callable(@epsilon)
40
+
41
+ @lr_t = TensorStream.convert_to_tensor(lr, name: "learning_rate")
42
+ @beta1_t = TensorStream.convert_to_tensor(beta1, name: "beta1")
43
+ @beta2_t = TensorStream.convert_to_tensor(beta2, name: "beta2")
44
+ @epsilon_t = TensorStream.convert_to_tensor(epsilon, name: "epsilon")
45
+ end
46
+
47
+ def create_slots(var_list)
48
+ first_var = var_list.min_by(&:name)
49
+ create_non_slot_variable(@beta1, "beta1_power", first_var)
50
+ create_non_slot_variable(@beta2, "beta2_power", first_var)
51
+
52
+ # Create slots for the first and second moments.
53
+ var_list.each do |v|
54
+ zeros_slot(v, "m", @name)
55
+ zeros_slot(v, "v", @name)
56
+ end
57
+ end
58
+
59
+ def apply_dense(grad, var)
60
+ m = get_slot(var, "m")
61
+ v = get_slot(var, "v")
62
+ beta1_power, beta2_power = get_beta_accumulators
63
+ _op(:apply_adam,
64
+ var, m, v,
65
+ TensorStream.cast(beta1_power, var.data_type),
66
+ TensorStream.cast(beta2_power, var.data_type),
67
+ TensorStream.cast(@lr_t, var.data_type),
68
+ TensorStream.cast(@beta1_t, var.data_type),
69
+ TensorStream.cast(@beta2_t, var.data_type),
70
+ TensorStream.cast(@epsilon_t, var.data_type),
71
+ grad, use_locking: @use_locking)
72
+ end
73
+
74
+ def finish(update_ops, name_scope)
75
+ TensorStream.control_dependencies(update_ops) do
76
+ beta1_power, beta2_power = get_beta_accumulators
77
+ update_beta1 = nil, update_beta2 = nil
78
+ TensorStream.colocate_with(beta1_power) do
79
+ update_beta1 = beta1_power.assign(beta1_power * @beta1_t, use_locking: @use_locking)
80
+ update_beta2 = beta2_power.assign(beta2_power * @beta2_t, use_locking: @use_locking)
81
+ end
82
+ TensorStream.group(update_ops + [update_beta1, update_beta2], name: name_scope)
83
+ end
84
+ end
85
+ end
86
+ end
87
+ end
@@ -6,8 +6,9 @@ module TensorStream
6
6
 
7
7
  attr_accessor :learning_rate
8
8
 
9
- def initialize(learning_rate, _options = {})
9
+ def initialize(learning_rate, use_locking: false, name: "GradientDescent")
10
10
  @learning_rate = learning_rate
11
+ super(name: name, use_locking: use_locking)
11
12
  end
12
13
 
13
14
  protected
@@ -12,6 +12,7 @@ module TensorStream
12
12
  @use_locking = use_locking
13
13
  raise TensorStream::ValueError, "Must specify the optimizer name" unless @name
14
14
  @slots = {}
15
+ @non_slots = {}
15
16
  end
16
17
 
17
18
  def minimize(loss, var_list: nil, grad_loss: nil, global_step: nil, name: nil)
@@ -86,7 +87,7 @@ module TensorStream
86
87
  # no implementation
87
88
  end
88
89
 
89
- def apply_dense(grad, var)
90
+ def apply_dense(_grad, _var)
90
91
  raise TensorStream::NotImplementedError, "not implemented"
91
92
  end
92
93
 
@@ -95,7 +96,7 @@ module TensorStream
95
96
  #
96
97
  # Args:
97
98
  # var: Variable - A Variable object
98
- # slot_name: string - Name fot the slot
99
+ # slot_name: string - Name for the slot
99
100
  # op_name: string - Name to use when scoping the Variable that needs to be created
100
101
  def zeros_slot(var, slot_name, op_name)
101
102
  named_slots = slot_dict(slot_name)
@@ -124,6 +125,28 @@ module TensorStream
124
125
  def var_key(var)
125
126
  [var.op.graph, var.op.name]
126
127
  end
128
+
129
+ def get_non_slot_variable(name, graph: nil)
130
+ non_slot = @non_slots.fetch([name, graph], nil)
131
+ non_slot
132
+ end
133
+
134
+ def call_if_callable(param)
135
+ param.kind_of?(Proc) ? param.call : param
136
+ end
137
+
138
+
139
+ def create_non_slot_variable(initial_value, name, colocate_with)
140
+ graph = colocate_with.graph
141
+
142
+ key = [name, graph]
143
+ v = @non_slots.fetch(key, nil)
144
+ if v.nil?
145
+ v = TensorStream.variable(initial_value, name: name, trainable: false)
146
+ @non_slots[key] = v
147
+ end
148
+ v
149
+ end
127
150
  end
128
151
  end
129
152
  end
@@ -23,7 +23,7 @@ module TensorStream
23
23
  #
24
24
  # Returns: A `Variable` object
25
25
  def create_slot(primary, val, name, colocate_with_primary: true)
26
- TensorStream.variable_scope(primary.op.name + "/" + name) do
26
+ TensorStream.variable_scope(nil, primary.op.name + "/" + name) do
27
27
  if colocate_with_primary
28
28
  TensorStream.colocate_with(primary) do
29
29
  return create_slot_var(primary, val, "")
@@ -2,6 +2,7 @@ require 'tensor_stream/train/slot_creator'
2
2
  require 'tensor_stream/train/optimizer'
3
3
  require 'tensor_stream/train/gradient_descent_optimizer'
4
4
  require 'tensor_stream/train/momentum_optimizer'
5
+ require 'tensor_stream/train/adam_optimizer'
5
6
  require 'tensor_stream/train/saver'
6
7
 
7
8
  module TensorStream
@@ -66,9 +66,25 @@ module TensorStream
66
66
  tensor
67
67
  end
68
68
 
69
- def variable_scope(scope = nil, reuse: nil, initializer: nil)
70
- Thread.current[:tensor_stream_variable_scope] ||= []
69
+ ##
70
+ # Defines a variable context manager
71
+ def variable_scope(scope = nil, default_name = nil, reuse: nil, initializer: nil)
72
+ Thread.current[:tensor_stream_variable_scope] ||= [ VariableScope.new ]
73
+
74
+ # uniquenifier
75
+ if scope.nil? && default_name
76
+ same_names = get_variable_scope.used_names.select { |s| s.start_with?(default_name) }
77
+ new_name = default_name
78
+ index = 1
79
+ while same_names.include?(new_name)
80
+ new_name = "#{default_name}_#{index}"
81
+ index += 1
82
+ end
83
+ scope = new_name
84
+ end
85
+
71
86
  variable_scope = VariableScope.new(name: scope, reuse: reuse, initializer: initializer)
87
+ get_variable_scope.register_name(scope || "")
72
88
  Thread.current[:tensor_stream_variable_scope] << variable_scope
73
89
  scope_name = __v_scope_name
74
90
  if block_given?
@@ -100,8 +116,13 @@ module TensorStream
100
116
  end
101
117
 
102
118
  def get_variable_scope
103
- return VariableScope.new unless Thread.current[:tensor_stream_variable_scope]
104
- Thread.current[:tensor_stream_variable_scope].last || VariableScope.new
119
+ if !Thread.current[:tensor_stream_variable_scope]
120
+ variable_scope = VariableScope.new
121
+ Thread.current[:tensor_stream_variable_scope] = [variable_scope]
122
+ return variable_scope
123
+ end
124
+
125
+ Thread.current[:tensor_stream_variable_scope].last
105
126
  end
106
127
 
107
128
  def __v_scope_name
@@ -40,7 +40,7 @@ module TensorStream
40
40
  init_op
41
41
  end
42
42
 
43
- def assign(value, name: nil)
43
+ def assign(value, name: nil, use_locking: false)
44
44
  _a, value = TensorStream.check_data_types(self, value)
45
45
  _op(:assign, self, value, name: name)
46
46
  end
@@ -1,15 +1,21 @@
1
1
  module TensorStream
2
2
  class VariableScope
3
3
  attr_accessor :name, :reuse, :initializer
4
+ attr_reader :used_names
4
5
 
5
- def initialize(name: '', reuse: nil, initializer: nil)
6
+ def initialize(name: nil, reuse: nil, initializer: nil)
6
7
  @name = name
7
8
  @reuse = reuse
8
9
  @initializer = initializer
10
+ @used_names = []
9
11
  end
10
12
 
11
13
  def get_variable(name, dtype: nil, shape: nil, initializer: nil, trainable: true, collections: nil, validate_shape: false)
12
14
  TensorStream::Variable.new(dtype || :float32, nil, shape, self, collections: collections, name: name, initializer: initializer, trainable: trainable)
13
15
  end
16
+
17
+ def register_name(name)
18
+ @used_names << name unless @used_names.include?(name)
19
+ end
14
20
  end
15
21
  end
@@ -1,5 +1,5 @@
1
1
  module TensorStream
2
- VERSION = '0.8.0'.freeze
2
+ VERSION = '0.8.1'.freeze
3
3
 
4
4
  def self.version
5
5
  VERSION
data/samples/iris.rb CHANGED
@@ -1,6 +1,5 @@
1
- require "bundler/setup"
2
1
  require 'tensor_stream'
3
- # require 'tensor_stream/evaluator/opencl/opencl_evaluator'
2
+ require 'tensor_stream/evaluator/opencl/opencl_evaluator'
4
3
 
5
4
  # This neural network will predict the species of an iris based on sepal and petal size
6
5
  # Dataset: http://en.wikipedia.org/wiki/Iris_flower_data_set
@@ -66,8 +65,8 @@ end
66
65
  x_size = x_train[0].size
67
66
  y_size = y_train[0].size
68
67
  h_size = 256
69
- X = tf.placeholder(:float, shape: [nil, x_size])
70
- y = tf.placeholder(:float, shape: [nil, y_size])
68
+ X = tf.placeholder(:float32, shape: [nil, x_size])
69
+ y = tf.placeholder(:float32, shape: [nil, y_size])
71
70
 
72
71
  # Weight initializations
73
72
  w_1 = init_weights([x_size, h_size])
@@ -79,13 +78,17 @@ predict = tf.argmax(yhat, 1)
79
78
 
80
79
  # Backward propagation
81
80
  cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: yhat))
82
- updates = TensorStream::Train::MomentumOptimizer.new(0.01, 0.1, use_nesterov: true).minimize(cost)
81
+
82
+ updates = TensorStream::Train::GradientDescentOptimizer.new(0.01).minimize(cost)
83
+ # updates = TensorStream::Train::MomentumOptimizer.new(0.01, 0.5, use_nesterov: true).minimize(cost)
84
+ # updates = TensorStream::Train::AdamOptimizer.new.minimize(cost)
83
85
 
84
86
  # Run SGD
85
87
  sess = tf.session
86
88
  init = tf.global_variables_initializer
87
89
  sess.run(init)
88
-
90
+ loss = sess.run(cost, feed_dict: { X => x_test, y => y_test })
91
+ puts "loss test data set #{loss}"
89
92
  loss = sess.run(cost, feed_dict: { X => x_train, y => y_train })
90
93
  puts "Testing the untrained network..."
91
94
  puts loss
@@ -1,8 +1,8 @@
1
- require "bundler/setup"
1
+ # Linear Regression sample, using SGD and auto-differentiation
2
+
2
3
  require 'tensor_stream'
3
- require 'benchmark'
4
4
 
5
- tf = TensorStream
5
+ tf = TensorStream # use tf to make it look like TensorFlow
6
6
 
7
7
  learning_rate = 0.01
8
8
  momentum = 0.5
@@ -29,7 +29,9 @@ pred = X * W + b
29
29
  # Mean squared error
30
30
  cost = ((pred - Y) ** 2).reduce(:+) / ( 2 * n_samples)
31
31
 
32
- # optimizer = TensorStream::Train::MomentumOptimizer.new(learning_rate, momentum).minimize(cost)
32
+ # Other possible Optimizers
33
+ # optimizer = TensorStream::Train::MomentumOptimizer.new(learning_rate, momentum, use_nesterov: true).minimize(cost)
34
+ # optimizer = TensorStream::Train::AdamOptimizer.new(learning_rate).minimize(cost)
33
35
  optimizer = TensorStream::Train::GradientDescentOptimizer.new(learning_rate).minimize(cost)
34
36
 
35
37
  # Initialize the variables (i.e. assign their default value)
@@ -6,9 +6,9 @@ This example is using the MNIST database of handwritten digits
6
6
  Author: Aymeric Damien
7
7
  Project: https://github.com/aymericdamien/TensorFlow-Examples/
8
8
 
9
- Make sure to install the mnist-learn gem
9
+ Make sure to install the mnist-learn gem !!
10
10
  '''
11
- require "bundler/setup"
11
+
12
12
  require 'tensor_stream'
13
13
  require 'mnist-learn'
14
14
  require 'tensor_stream/evaluator/opencl/opencl_evaluator'
@@ -9,12 +9,12 @@ Links:
9
9
 
10
10
  Author: Aymeric Damien
11
11
  Project: https://github.com/aymericdamien/TensorFlow-Examples/
12
+
13
+ The mnist-learn gem is required as well as an OpenCL compatible device with drivers correctly installed
12
14
  """
13
- require "bundler/setup"
14
15
  require 'tensor_stream'
15
16
  require 'mnist-learn'
16
17
  require 'tensor_stream/evaluator/opencl/opencl_evaluator'
17
- require 'pry-byebug'
18
18
 
19
19
  tf = TensorStream
20
20
  # Import MNIST data
@@ -23,8 +23,9 @@ mnist = Mnist.read_data_sets('/tmp/data', one_hot: true)
23
23
  puts "downloading finished"
24
24
 
25
25
  # Parameters
26
- learning_rate = 0.1
27
- num_steps = 500
26
+ learning_rate = 0.001
27
+ momentum = 0.01
28
+ num_steps = 100
28
29
  batch_size = 128
29
30
  display_step = 5
30
31
 
@@ -71,7 +72,7 @@ prediction = tf.nn.softmax(logits)
71
72
  loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
72
73
  logits: logits, labels: Y))
73
74
 
74
- optimizer = TensorStream::Train::GradientDescentOptimizer.new(learning_rate)
75
+ optimizer = TensorStream::Train::MomentumOptimizer.new(learning_rate, momentum, use_nesterov: true)
75
76
  train_op = optimizer.minimize(loss_op)
76
77
 
77
78
  # Evaluate model
@@ -88,28 +89,24 @@ tf.session do |sess|
88
89
  # Run the initializer
89
90
  sess.run(init)
90
91
 
91
- # print("Testing Accuracy:", \
92
- # sess.run(accuracy, feed_dict: { X => mnist.test.images,
93
- # Y => mnist.test.labels}))
92
+ print("Testing Accuracy:", \
93
+ sess.run(accuracy, feed_dict: { X => mnist.test.images,
94
+ Y => mnist.test.labels}))
94
95
 
95
96
  (1..num_steps+1).each do |step|
96
97
  batch_x, batch_y = mnist.train.next_batch(batch_size)
97
98
  # Run optimization op (backprop)
98
- puts "."
99
99
  sess.run(train_op, feed_dict: { X => batch_x, Y => batch_y })
100
100
  if step % display_step == 0 || step == 1
101
- # Calculate batch loss and accuracy
102
- loss, acc = sess.run([loss_op, accuracy], feed_dict: { X => batch_x, Y => batch_y})
103
- print("Step " + step.to_s + ", Minibatch Loss= " + \
104
- loss.to_s + ", Training Accuracy= " + \
105
- acc.to_s)
101
+ # Calculate batch loss and accuracy
102
+ loss, acc = sess.run([loss_op, accuracy], feed_dict: { X => batch_x, Y => batch_y})
103
+ print("\nStep " + step.to_s + ", Minibatch Loss= " + \
104
+ loss.to_s + ", Training Accuracy= " + \
105
+ acc.to_s)
106
106
  end
107
107
  end
108
-
109
- print("Optimization Finished!")
110
-
111
- # Calculate accuracy for MNIST test images
112
- print("Testing Accuracy:", \
113
- sess.run(accuracy, feed_dict: { X => mnist.test.images,
108
+ print("\nOptimization Finished!")
109
+ print("\nTesting Accuracy after optimization:", \
110
+ sess.run(accuracy, feed_dict: { X => mnist.test.images,
114
111
  Y => mnist.test.labels}))
115
112
  end