tensor_stream 0.8.5 → 0.8.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -68,6 +68,7 @@ module TensorStream
68
68
  end
69
69
  end
70
70
  result = args.collect do |e|
71
+ next e.value if e.is_a?(Tensor) && e.is_const && e.value
71
72
  value = delegate_to_evaluator(e, context, {})
72
73
  recursive_eval(value)
73
74
  end
@@ -27,9 +27,7 @@ module TensorStream
27
27
  # check if single dimenstion array is passed
28
28
  options[:value] = reshape(options[:value], shape.reverse.dup) if shape.size >= 2 && !options[:value].empty? && !options[:value][0].is_a?(Array)
29
29
 
30
- @value = options[:value].collect do |v|
31
- v.is_a?(Tensor) ? Tensor.cast_dtype(v, @data_type) : v
32
- end
30
+ @value = options[:value].map { |v| v.is_a?(Tensor) ? Tensor.cast_dtype(v, @data_type) : v }
33
31
  elsif !shape.empty?
34
32
  @value = reshape(Tensor.cast_dtype(options[:value], @data_type), shape.dup)
35
33
  else
@@ -88,8 +86,7 @@ module TensorStream
88
86
  end
89
87
 
90
88
  def %(other)
91
- _a, other = TensorStream.check_data_types(self, other)
92
- _op(:mod, self, TensorStream.convert_to_tensor(other, dtype: data_type))
89
+ TensorStream.mod(self, other)
93
90
  end
94
91
 
95
92
  def floor
@@ -292,8 +289,8 @@ module TensorStream
292
289
  end
293
290
 
294
291
  def add_consumer(consumer)
295
- @consumers ||= []
296
- @consumers << consumer.name if !@consumers.include?(consumer.name) && consumer.name != name
292
+ @consumers ||= Set.new
293
+ @consumers << consumer.name if consumer.name != name
297
294
  end
298
295
 
299
296
  def setup_output(consumer)
@@ -23,6 +23,10 @@ module TensorStream
23
23
  shape.size
24
24
  end
25
25
 
26
+ def scalar?
27
+ shape.size.zero?
28
+ end
29
+
26
30
  def known?
27
31
  return false if shape.nil?
28
32
  shape.each { |s| return false if s.nil? }
@@ -54,8 +58,13 @@ module TensorStream
54
58
  end
55
59
 
56
60
  def self.reshape(arr, new_shape)
57
- return arr if new_shape.empty?
61
+ arr = arr.is_a?(Array) ? arr.flatten : [arr]
62
+
63
+ new_shape = TensorShape.fix_inferred_elements(new_shape, arr.size)
64
+ return arr[0] if arr.size == 1 && new_shape.empty?
65
+
58
66
  new_shape = new_shape.dup
67
+
59
68
  s = new_shape.shift
60
69
 
61
70
  if new_shape.size.zero?
@@ -0,0 +1,46 @@
1
+ module TensorStream
2
+ module Train
3
+ # High Level implementation of the Adagrad algorithm
4
+ class AdagradOptimizer < Optimizer
5
+ include TensorStream::OpHelper
6
+
7
+ attr_accessor :learning_rate
8
+
9
+ def initialize(learning_rate, initial_accumulator_value = 0.1,
10
+ use_locking: false, name: "Adagrad")
11
+ @learning_rate = learning_rate
12
+ @initial_accumulator_value = initial_accumulator_value
13
+ @learning_rate_tensor = nil
14
+ super(name: name, use_locking: use_locking)
15
+ end
16
+
17
+ protected
18
+
19
+ def create_slots(var_list)
20
+ var_list.each do |v|
21
+ dtype = v.data_type
22
+ init = nil
23
+ if v.shape.known?
24
+ init = TensorStream.constant_initializer(@initial_accumulator_value, dtype: dtype)
25
+ else
26
+ init_constant = TensorStream.fill(TensorStream.shape(v), @initial_accumulator_value)
27
+ init = TensorStream.cast(init_constant, dtype)
28
+ end
29
+ get_or_make_slot_with_initializer(v, init, v.shape, dtype, "accumulator", @name)
30
+ end
31
+ end
32
+
33
+ def prepare
34
+ learning_rate = call_if_callable(@learning_rate)
35
+ @learning_rate_tensor = TensorStream.convert_to_tensor(learning_rate, name: "learning_rate")
36
+ end
37
+
38
+ def apply_dense(grad, var)
39
+ acc = get_slot(var, "accumulator")
40
+ _op(:apply_adagrad,
41
+ var, acc, TensorStream.cast(@learning_rate_tensor, var.data_type),
42
+ grad, use_locking: @use_locking)
43
+ end
44
+ end
45
+ end
46
+ end
@@ -147,6 +147,18 @@ module TensorStream
147
147
  end
148
148
  v
149
149
  end
150
+
151
+ ##
152
+ # Find or create a slot for a variable, using an Initializer.
153
+ def get_or_make_slot_with_initializer(var, initializer, shape, dtype, slot_name, op_name)
154
+ named_slots = slot_dict(slot_name)
155
+ if !named_slots.key?(var_key(var))
156
+ new_slot_variable = create_slot_with_initializer(
157
+ var, initializer, shape, dtype, op_name)
158
+ named_slots[var_key(var)] = new_slot_variable
159
+ end
160
+ named_slots[var_key(var)]
161
+ end
150
162
  end
151
163
  end
152
164
  end
@@ -0,0 +1,84 @@
1
+ module TensorStream
2
+ module Train
3
+ # High Level implementation of the RMSProp algorithm
4
+ # This is a straight port from TensorFlows rmsprop.py
5
+ class RMSPropOptimizer < Optimizer
6
+ include TensorStream::OpHelper
7
+
8
+ attr_accessor :learning_rate
9
+
10
+ ##
11
+ # Optimizer that implements the RMSProp algorithm.
12
+ #
13
+ # [paper](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf).
14
+ def initialize(learning_rate, decay = 0.9, momentum = 0.0, epsilon = 1e-10, centered: false,
15
+ use_locking: false, name: "RMSProp")
16
+ @learning_rate = learning_rate
17
+ @decay = decay
18
+ @momentum = momentum
19
+ @epsilon = epsilon
20
+ @centered = centered
21
+
22
+ # Tensor versions of the constructor arguments, created in _prepare().
23
+ @learning_rate_tensor = nil
24
+ @decay_tensor = nil
25
+ @momentum_tensor = nil
26
+ @epsilon_tensor = nil
27
+
28
+ super(name: name, use_locking: use_locking)
29
+ end
30
+
31
+ protected
32
+
33
+ def prepare
34
+ lr = call_if_callable(@learning_rate)
35
+ decay = call_if_callable(@decay)
36
+ momentum = call_if_callable(@momentum)
37
+ epsilon = call_if_callable(@epsilon)
38
+
39
+ @learning_rate_tensor = TensorStream.convert_to_tensor(lr, name: "learning_rate")
40
+ @decay_tensor = TensorStream.convert_to_tensor(decay, name: "decay")
41
+ @momentum_tensor = TensorStream.convert_to_tensor(momentum, name: "momentum")
42
+ @epsilon_tensor = TensorStream.convert_to_tensor(epsilon, name: "epsilon")
43
+ end
44
+
45
+ def create_slots(var_list)
46
+ # Create slots for the first and second moments.
47
+ var_list.each do |v|
48
+ init_rms = if v.shape.known?
49
+ TensorStream.ones_initializer(dtype: v.data_type)
50
+ else
51
+ TensorStream.ones_like(v)
52
+ end
53
+
54
+ get_or_make_slot_with_initializer(v, init_rms, v.shape, v.data_type, "rms", @name)
55
+
56
+ zeros_slot(v, "mg", @name) if @centered
57
+ zeros_slot(v, "momentum", @name)
58
+ end
59
+ end
60
+
61
+ def apply_dense(grad, var)
62
+ rms = get_slot(var, "rms")
63
+ mom = get_slot(var, "momentum")
64
+
65
+ if @centered
66
+ mg = get_slot(var, "mg")
67
+ _op(:apply_centered_rms_prop, var, mg, rms, mom,
68
+ TensorStream.cast(@learning_rate_tensor, var.data_type),
69
+ TensorStream.cast(@decay_tensor, var.data_type),
70
+ TensorStream.cast(@momentum_tensor, var.data_type),
71
+ TensorStream.cast(@epsilon_tensor, var.data_type),
72
+ grad, use_locking: @use_locking)
73
+ else
74
+ _op(:apply_rms_prop, var, rms, mom,
75
+ TensorStream.cast(@learning_rate_tensor, var.data_type),
76
+ TensorStream.cast(@decay_tensor, var.data_type),
77
+ TensorStream.cast(@momentum_tensor, var.data_type),
78
+ TensorStream.cast(@epsilon_tensor, var.data_type),
79
+ grad, use_locking: @use_locking)
80
+ end
81
+ end
82
+ end
83
+ end
84
+ end
@@ -5,9 +5,9 @@ module TensorStream
5
5
 
6
6
  ##
7
7
  # Helper function for creating a slot variable.
8
- def create_slot_var(primary, val, scope)
9
- slot = get_variable(scope, initializer: val, trainable: false,
10
- validate_shape: val.shape.is_fully_defined?)
8
+ def create_slot_var(primary, val, scope, shape)
9
+ slot = get_variable(scope, initializer: val, trainable: false, shape: shape,
10
+ validate_shape: val.shape && val.shape.known?)
11
11
  slot
12
12
  end
13
13
 
@@ -24,16 +24,21 @@ module TensorStream
24
24
  # Returns: A `Variable` object
25
25
  def create_slot(primary, val, name, colocate_with_primary: true)
26
26
  TensorStream.variable_scope(nil, primary.op.name + "/" + name) do
27
- if colocate_with_primary
28
- TensorStream.colocate_with(primary) do
29
- return create_slot_var(primary, val, "")
30
- end
31
- else
32
- return create_slot_var(primary, val, "")
27
+ return create_slot_var(primary, val, "", nil) if colocate_with_primary
28
+
29
+ TensorStream.colocate_with(primary) do
30
+ return create_slot_var(primary, val, "", nil)
33
31
  end
34
32
  end
35
33
  end
36
34
 
35
+ def create_slot_with_initializer(primary, initializer, shape, dtype, name, colocate_with_primary: true)
36
+ prefix = primary.op.name
37
+ TensorStream.variable_scope(nil, prefix + "/" + name) do
38
+ create_slot_var(primary, initializer, "", shape.shape)
39
+ end
40
+ end
41
+
37
42
  ##
38
43
  # Create a slot initialized to 0 with same shape as the primary object.
39
44
  #
@@ -4,6 +4,8 @@ require 'tensor_stream/train/gradient_descent_optimizer'
4
4
  require 'tensor_stream/train/momentum_optimizer'
5
5
  require 'tensor_stream/train/adam_optimizer'
6
6
  require 'tensor_stream/train/adadelta_optimizer'
7
+ require 'tensor_stream/train/adagrad_optimizer'
8
+ require 'tensor_stream/train/rmsprop_optimizer'
7
9
  require 'tensor_stream/train/saver'
8
10
 
9
11
  module TensorStream
@@ -225,13 +225,15 @@ module TensorStream
225
225
  end
226
226
 
227
227
  def convert_to_tensor(value, dtype: nil, name: nil, preferred_dtype: nil)
228
+ return value if value.is_a?(Tensor)
228
229
  return convert_to_tensor(value.call) if value.is_a?(Proc)
230
+ if value.is_a?(Array) && value[0].is_a?(Tensor)
231
+ return TensorStream.stack(value) if value.size > 1
229
232
 
230
- if !value.is_a?(Tensor)
231
- i_cons(value, dtype: dtype || Tensor.detect_type(value), name: name)
232
- else
233
- value
233
+ return TensorStream.expand_dims(value[0], 0)
234
234
  end
235
+
236
+ i_cons(value, dtype: dtype || Tensor.detect_type(value), name: name)
235
237
  end
236
238
 
237
239
  def check_allowed_types(input, types)
@@ -1,5 +1,5 @@
1
1
  module TensorStream
2
- VERSION = '0.8.5'.freeze
2
+ VERSION = '0.8.6'.freeze
3
3
 
4
4
  def self.version
5
5
  VERSION
data/samples/iris.rb CHANGED
@@ -1,5 +1,6 @@
1
+ require "bundler/setup"
1
2
  require 'tensor_stream'
2
- require 'tensor_stream/evaluator/opencl/opencl_evaluator'
3
+ # require 'tensor_stream/evaluator/opencl/opencl_evaluator'
3
4
 
4
5
  # This neural network will predict the species of an iris based on sepal and petal size
5
6
  # Dataset: http://en.wikipedia.org/wiki/Iris_flower_data_set
@@ -79,9 +80,9 @@ predict = tf.argmax(yhat, 1)
79
80
  # Backward propagation
80
81
  cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: yhat))
81
82
 
82
- updates = TensorStream::Train::GradientDescentOptimizer.new(0.01).minimize(cost)
83
+ # updates = TensorStream::Train::GradientDescentOptimizer.new(0.01).minimize(cost)
83
84
  # updates = TensorStream::Train::MomentumOptimizer.new(0.01, 0.5, use_nesterov: true).minimize(cost)
84
- # updates = TensorStream::Train::AdamOptimizer.new.minimize(cost)
85
+ updates = TensorStream::Train::RMSPropOptimizer.new(0.01).minimize(cost)
85
86
 
86
87
  # Run SGD
87
88
  sess = tf.session
@@ -31,9 +31,12 @@ pred = X * W + b
31
31
  cost = ((pred - Y) ** 2).reduce(:+) / ( 2 * n_samples)
32
32
 
33
33
  # Other possible Optimizers
34
+
34
35
  # optimizer = TensorStream::Train::MomentumOptimizer.new(learning_rate, momentum, use_nesterov: true).minimize(cost)
35
36
  # optimizer = TensorStream::Train::AdamOptimizer.new(learning_rate).minimize(cost)
36
37
  # optimizer = TensorStream::Train::AdadeltaOptimizer.new(1.0).minimize(cost)
38
+ # optimizer = TensorStream::Train::AdagradOptimizer.new(0.01).minimize(cost)
39
+ # optimizer = TensorStream::Train::RMSPropOptimizer.new(0.01, centered: true).minimize(cost)
37
40
  optimizer = TensorStream::Train::GradientDescentOptimizer.new(learning_rate).minimize(cost)
38
41
 
39
42
 
data/samples/rnn.rb ADDED
@@ -0,0 +1,105 @@
1
+ # RNN sample
2
+ #
3
+ # Ruby port Example based on article by Erik Hallström
4
+ # https://medium.com/@erikhallstrm/hello-world-rnn-83cd7105b767
5
+ #
6
+ #
7
+
8
+ require "bundler/setup"
9
+ require 'tensor_stream'
10
+
11
+ tf = TensorStream
12
+
13
+ num_epochs = 100
14
+ total_series_length = 50000
15
+ truncated_backprop_length = 15
16
+ state_size = 4
17
+ num_classes = 2
18
+ echo_step = 3
19
+ batch_size = 5
20
+ num_batches = total_series_length / batch_size / truncated_backprop_length
21
+ randomizer = TensorStream.random_uniform([total_series_length], minval: 0, maxval: 2)
22
+
23
+
24
+ def generate_data(randomizer, total_series_length, batch_size, echo_step)
25
+ x = randomizer.eval
26
+ y = x.rotate(-echo_step)
27
+
28
+ y[echo_step] = 0
29
+
30
+ x = TensorStream::TensorShape.reshape(x, [batch_size, -1]) # The first index changing slowest, subseries as rows
31
+ y = TensorStream::TensorShape.reshape(y, [batch_size, -1])
32
+ [x, y]
33
+ end
34
+
35
+ batchX_placeholder = tf.placeholder(:float32, shape: [batch_size, truncated_backprop_length], name: 'batch_x')
36
+ batchY_placeholder = tf.placeholder(:int32, shape: [batch_size, truncated_backprop_length], name: 'batch_y')
37
+
38
+ init_state = tf.placeholder(:float32, shape: [batch_size, state_size], name: 'init_state')
39
+
40
+
41
+ W = tf.variable(tf.random_uniform([state_size+1, state_size]), dtype: :float32, name: 'W')
42
+ b = tf.variable(tf.zeros([state_size]), dtype: :float32, name: 'b')
43
+
44
+ W2 = tf.variable(tf.random_uniform([state_size, num_classes]), dtype: :float32, name: 'W2')
45
+ b2 = tf.variable(tf.zeros([num_classes]), dtype: :float32, name: 'b2')
46
+
47
+
48
+ inputs_series = tf.unpack(batchX_placeholder, axis: 1)
49
+ labels_series = tf.unpack(batchY_placeholder, axis: 1)
50
+
51
+ current_state = init_state
52
+ states_series = []
53
+
54
+ inputs_series.each do |current_input|
55
+ current_input = tf.reshape(current_input, [batch_size, 1])
56
+ input_and_state_concatenated = tf.concat([current_input, current_state], 1) # Increasing number of columns
57
+ next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b) # Broadcasted addition
58
+ states_series << next_state
59
+ current_state = next_state
60
+ end
61
+
62
+ logits_series = states_series.collect do |state|
63
+ tf.matmul(state, W2) + b2
64
+ end
65
+
66
+ predictions_series = logits_series.collect do |logits|
67
+ tf.nn.softmax(logits)
68
+ end
69
+
70
+ losses = logits_series.zip(labels_series).collect do |logits, labels|
71
+ tf.nn.sparse_softmax_cross_entropy_with_logits(logits: logits, labels: labels)
72
+ end
73
+ total_loss = tf.reduce_mean(losses)
74
+
75
+ train_step = TensorStream::Train::AdagradOptimizer.new(0.3).minimize(total_loss)
76
+
77
+ puts "#{tf.get_default_graph.nodes.keys.size} nodes created"
78
+ zeros_state = tf.zeros([batch_size, state_size]).eval
79
+ tf.session do |sess|
80
+ sess.run(tf.global_variables_initializer)
81
+ (0..num_epochs).each do |epoch_idx|
82
+ x, y = generate_data(randomizer, total_series_length, batch_size, echo_step)
83
+ _current_state = zeros_state
84
+ print("New data, epoch", epoch_idx, "\n")
85
+ (0..num_batches - 1).each do |batch_idx|
86
+ start_idx = batch_idx * truncated_backprop_length
87
+ end_idx = start_idx + truncated_backprop_length
88
+
89
+ batchX = x.map { |x| x[start_idx...end_idx] }
90
+ batchY = y.map { |y| y[start_idx...end_idx] }
91
+
92
+ _total_loss, _train_step, _current_state, _predictions_series = sess.run(
93
+ [total_loss, train_step, current_state, predictions_series],
94
+ feed_dict: {
95
+ batchX_placeholder => batchX,
96
+ batchY_placeholder => batchY,
97
+ init_state => _current_state
98
+ })
99
+
100
+ if batch_idx%100 == 0
101
+ print("Step",batch_idx, " Loss ", _total_loss, "\n")
102
+ end
103
+ end
104
+ end
105
+ end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: tensor_stream
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.8.5
4
+ version: 0.8.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - Joseph Emmanuel Dayo
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-09-06 00:00:00.000000000 Z
11
+ date: 2018-09-11 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -261,6 +261,7 @@ files:
261
261
  - lib/tensor_stream/evaluator/operation_helpers/math_helper.rb
262
262
  - lib/tensor_stream/evaluator/operation_helpers/random_gaussian.rb
263
263
  - lib/tensor_stream/evaluator/ruby/array_ops.rb
264
+ - lib/tensor_stream/evaluator/ruby/check_ops.rb
264
265
  - lib/tensor_stream/evaluator/ruby/images_ops.rb
265
266
  - lib/tensor_stream/evaluator/ruby/math_ops.rb
266
267
  - lib/tensor_stream/evaluator/ruby/nn_ops.rb
@@ -288,10 +289,12 @@ files:
288
289
  - lib/tensor_stream/tensor.rb
289
290
  - lib/tensor_stream/tensor_shape.rb
290
291
  - lib/tensor_stream/train/adadelta_optimizer.rb
292
+ - lib/tensor_stream/train/adagrad_optimizer.rb
291
293
  - lib/tensor_stream/train/adam_optimizer.rb
292
294
  - lib/tensor_stream/train/gradient_descent_optimizer.rb
293
295
  - lib/tensor_stream/train/momentum_optimizer.rb
294
296
  - lib/tensor_stream/train/optimizer.rb
297
+ - lib/tensor_stream/train/rmsprop_optimizer.rb
295
298
  - lib/tensor_stream/train/saver.rb
296
299
  - lib/tensor_stream/train/slot_creator.rb
297
300
  - lib/tensor_stream/train/utils.rb
@@ -311,6 +314,7 @@ files:
311
314
  - samples/multigpu.rb
312
315
  - samples/nearest_neighbor.rb
313
316
  - samples/raw_neural_net_sample.rb
317
+ - samples/rnn.rb
314
318
  - tensor_stream.gemspec
315
319
  homepage: http://www.github.com/jedld/tensor_stream
316
320
  licenses: