tensor_stream 0.9.8 → 0.9.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +31 -14
- data/lib/tensor_stream.rb +4 -0
- data/lib/tensor_stream/constant.rb +41 -0
- data/lib/tensor_stream/control_flow.rb +2 -1
- data/lib/tensor_stream/dynamic_stitch.rb +3 -1
- data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +4 -4
- data/lib/tensor_stream/evaluator/ruby/array_ops.rb +74 -23
- data/lib/tensor_stream/evaluator/ruby/math_ops.rb +45 -43
- data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +31 -30
- data/lib/tensor_stream/evaluator/ruby/random_ops.rb +6 -6
- data/lib/tensor_stream/evaluator/ruby_evaluator.rb +46 -111
- data/lib/tensor_stream/graph.rb +61 -12
- data/lib/tensor_stream/graph_builder.rb +3 -3
- data/lib/tensor_stream/graph_deserializers/yaml_loader.rb +38 -0
- data/lib/tensor_stream/graph_serializers/packer.rb +8 -0
- data/lib/tensor_stream/graph_serializers/pbtext.rb +62 -27
- data/lib/tensor_stream/graph_serializers/serializer.rb +2 -2
- data/lib/tensor_stream/graph_serializers/yaml.rb +27 -0
- data/lib/tensor_stream/helpers/infer_shape.rb +15 -9
- data/lib/tensor_stream/helpers/op_helper.rb +17 -6
- data/lib/tensor_stream/helpers/string_helper.rb +32 -1
- data/lib/tensor_stream/helpers/tensor_mixins.rb +135 -0
- data/lib/tensor_stream/math_gradients.rb +19 -12
- data/lib/tensor_stream/monkey_patches/float.rb +7 -0
- data/lib/tensor_stream/monkey_patches/integer.rb +7 -0
- data/lib/tensor_stream/monkey_patches/patch.rb +8 -8
- data/lib/tensor_stream/nn/nn_ops.rb +1 -1
- data/lib/tensor_stream/operation.rb +98 -36
- data/lib/tensor_stream/ops.rb +65 -13
- data/lib/tensor_stream/placeholder.rb +2 -2
- data/lib/tensor_stream/session.rb +15 -3
- data/lib/tensor_stream/tensor.rb +15 -172
- data/lib/tensor_stream/tensor_shape.rb +3 -1
- data/lib/tensor_stream/train/saver.rb +12 -10
- data/lib/tensor_stream/trainer.rb +7 -2
- data/lib/tensor_stream/utils.rb +13 -11
- data/lib/tensor_stream/utils/freezer.rb +37 -0
- data/lib/tensor_stream/variable.rb +17 -11
- data/lib/tensor_stream/variable_scope.rb +3 -1
- data/lib/tensor_stream/version.rb +1 -1
- data/samples/iris.rb +3 -4
- data/samples/linear_regression.rb +9 -5
- data/samples/logistic_regression.rb +11 -9
- data/samples/mnist_data.rb +8 -10
- metadata +8 -4
@@ -1,16 +1,21 @@
|
|
1
1
|
module TensorStream
|
2
2
|
# Class that defines a TensorStream variable
|
3
3
|
class Variable < Tensor
|
4
|
-
attr_accessor :trainable, :options, :buffer
|
5
|
-
|
6
|
-
setup_initial_state(options)
|
4
|
+
attr_accessor :trainable, :options, :buffer, :op
|
5
|
+
attr_writer :value
|
7
6
|
|
8
|
-
|
9
|
-
}
|
7
|
+
def initialize(data_type)
|
10
8
|
@data_type = data_type
|
9
|
+
@options = {}
|
10
|
+
@is_const = false
|
11
|
+
end
|
12
|
+
|
13
|
+
def prepare(rank, shape, variable_scope, options = {})
|
14
|
+
setup_initial_state(options)
|
15
|
+
|
11
16
|
@rank = rank
|
12
17
|
@value = nil
|
13
|
-
|
18
|
+
|
14
19
|
scope_name = variable_scope ? variable_scope.name : nil
|
15
20
|
variable_scope_initializer = variable_scope ? variable_scope.initializer : nil
|
16
21
|
@name = [scope_name, options[:name] || build_name].compact.reject(&:empty?).join('/')
|
@@ -19,7 +24,6 @@ module TensorStream
|
|
19
24
|
|
20
25
|
@shape = TensorShape.new(shape, rank)
|
21
26
|
@trainable = options.fetch(:trainable, true)
|
22
|
-
@graph.add_variable(self, options)
|
23
27
|
end
|
24
28
|
|
25
29
|
def trainable?
|
@@ -55,10 +59,6 @@ module TensorStream
|
|
55
59
|
_op(:assign_add, self, value, data_type: data_type, name: name)
|
56
60
|
end
|
57
61
|
|
58
|
-
def op
|
59
|
-
@op ||= _op(:variable, self, data_type: data_type)
|
60
|
-
end
|
61
|
-
|
62
62
|
def to_math(_tensor, _name_only = false, _max_depth = 99, _unused = 0)
|
63
63
|
@name
|
64
64
|
end
|
@@ -75,5 +75,11 @@ module TensorStream
|
|
75
75
|
def self.global_variables_initializer
|
76
76
|
variables_initializer(TensorStream::GraphKeys::GLOBAL_VARIABLES)
|
77
77
|
end
|
78
|
+
|
79
|
+
protected
|
80
|
+
|
81
|
+
def build_name
|
82
|
+
"Variable#{graph.get_var_counter}:#{@rank}"
|
83
|
+
end
|
78
84
|
end
|
79
85
|
end
|
@@ -1,5 +1,6 @@
|
|
1
1
|
module TensorStream
|
2
2
|
class VariableScope
|
3
|
+
include OpHelper
|
3
4
|
attr_accessor :name, :reuse, :initializer
|
4
5
|
attr_reader :used_names
|
5
6
|
|
@@ -12,7 +13,8 @@ module TensorStream
|
|
12
13
|
|
13
14
|
def get_variable(name, dtype: nil, shape: nil, initializer: nil, trainable: true, collections: nil, validate_shape: false)
|
14
15
|
raise TensorStream::ValueError, "validate_shape=true and initializer does not have a defined shape" if validate_shape && !shape.nil && initializer.is_a?(Tensor)
|
15
|
-
|
16
|
+
|
17
|
+
i_var(dtype || :float32, nil, shape, self, collections: collections, name: name, initializer: initializer, trainable: trainable)
|
16
18
|
end
|
17
19
|
|
18
20
|
def register_name(name)
|
data/samples/iris.rb
CHANGED
@@ -52,15 +52,14 @@ end
|
|
52
52
|
|
53
53
|
def init_weights(shape)
|
54
54
|
# Weight initialization
|
55
|
-
|
56
|
-
TensorStream.variable(weights)
|
55
|
+
TensorStream.random_normal(shape, stddev: 0.1).var
|
57
56
|
end
|
58
57
|
|
59
58
|
def forwardprop(x, w_1, w_2)
|
60
59
|
# Forward-propagation.
|
61
60
|
# IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
|
62
|
-
h = TensorStream.nn.sigmoid(
|
63
|
-
|
61
|
+
h = TensorStream.nn.sigmoid(x.matmul w_1) # The \sigma function
|
62
|
+
h.matmul w_2 # The \varphi function
|
64
63
|
end
|
65
64
|
|
66
65
|
x_size = x_train[0].size
|
@@ -16,19 +16,19 @@ train_Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
|
|
16
16
|
|
17
17
|
n_samples = train_X.size
|
18
18
|
|
19
|
-
X =
|
20
|
-
Y =
|
19
|
+
X = Float.placeholder
|
20
|
+
Y = Float.placeholder
|
21
21
|
|
22
22
|
# Set model weights
|
23
23
|
|
24
|
-
W =
|
25
|
-
b =
|
24
|
+
W = rand.t.var name: "weight"
|
25
|
+
b = rand.t.var name: "bias"
|
26
26
|
|
27
27
|
# Construct a linear model
|
28
28
|
pred = X * W + b
|
29
29
|
|
30
30
|
# Mean squared error
|
31
|
-
cost = ((pred - Y) ** 2).reduce
|
31
|
+
cost = ((pred - Y) ** 2).reduce / ( 2 * n_samples)
|
32
32
|
|
33
33
|
# Other possible Optimizers
|
34
34
|
|
@@ -42,6 +42,8 @@ optimizer = TensorStream::Train::GradientDescentOptimizer.new(learning_rate).min
|
|
42
42
|
|
43
43
|
# Initialize the variables (i.e. assign their default value)
|
44
44
|
init = tf.global_variables_initializer()
|
45
|
+
# Add ops to save and restore all the variables.
|
46
|
+
saver = tf::Train::Saver.new
|
45
47
|
|
46
48
|
tf.session do |sess|
|
47
49
|
start_time = Time.now
|
@@ -52,6 +54,8 @@ tf.session do |sess|
|
|
52
54
|
end
|
53
55
|
|
54
56
|
if (epoch+1) % display_step == 0
|
57
|
+
# Save the variables to disk.
|
58
|
+
save_path = saver.save(sess, "/tmp/lg_model")
|
55
59
|
c = sess.run(cost, feed_dict: {X => train_X, Y => train_Y})
|
56
60
|
puts("Epoch:", '%04d' % (epoch+1), "cost=", c, \
|
57
61
|
"W=", sess.run(W), "b=", sess.run(b))
|
@@ -39,19 +39,19 @@ test_x = transformed_data[51..100].map { |x| x[0..3].map(&:to_f) }
|
|
39
39
|
test_y = iris[51..100].map { |x| x[4] == 'Iris-setosa' ? 0.0 : 1.0 }
|
40
40
|
|
41
41
|
|
42
|
-
A = tf.
|
43
|
-
b = tf.
|
42
|
+
A = tf.random_normal([4, 1]).var
|
43
|
+
b = tf.random_normal([1, 1]).var
|
44
44
|
|
45
45
|
init = tf.global_variables_initializer
|
46
46
|
sess = tf.session
|
47
47
|
sess.run(init)
|
48
48
|
|
49
|
-
data =
|
50
|
-
target =
|
49
|
+
data = Float.placeholder shape: [nil, 4]
|
50
|
+
target = Float.placeholder shape: [nil, 1]
|
51
51
|
|
52
|
-
mod = data.
|
52
|
+
mod = data.matmul(A) + b
|
53
53
|
|
54
|
-
loss = tf.
|
54
|
+
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits: mod, labels: target).reduce :mean
|
55
55
|
|
56
56
|
learning_rate = 0.003
|
57
57
|
batch_size = 30
|
@@ -59,11 +59,13 @@ iter_num = 1500
|
|
59
59
|
|
60
60
|
optimizer = TensorStream::Train::GradientDescentOptimizer.new(learning_rate)
|
61
61
|
goal = optimizer.minimize(loss)
|
62
|
-
prediction = tf.
|
62
|
+
prediction = tf.sigmoid(mod).round
|
63
|
+
|
63
64
|
# Bool into float32 type
|
64
|
-
correct =
|
65
|
+
correct = (prediction == target).cast
|
66
|
+
|
65
67
|
# Average
|
66
|
-
accuracy =
|
68
|
+
accuracy = correct.reduce :mean
|
67
69
|
|
68
70
|
loss_trace = []
|
69
71
|
train_acc = []
|
data/samples/mnist_data.rb
CHANGED
@@ -20,22 +20,20 @@ puts "downloading minst data"
|
|
20
20
|
mnist = Mnist.read_data_sets('/tmp/data', one_hot: true)
|
21
21
|
puts "downloading finished"
|
22
22
|
|
23
|
-
x =
|
24
|
-
w = tf.
|
25
|
-
b = tf.
|
26
|
-
|
27
|
-
|
23
|
+
x = Float.placeholder shape: [nil, 784]
|
24
|
+
w = tf.zeros([784, 10]).var
|
25
|
+
b = tf.zeros([10]).var
|
28
26
|
|
29
27
|
# model
|
30
|
-
y = tf.nn.softmax(
|
28
|
+
y = tf.nn.softmax(x.reshape([-1, 784]).matmul(w) + b)
|
31
29
|
|
32
|
-
y_ =
|
30
|
+
y_ = Float.placeholder shape: [nil, 10]
|
33
31
|
|
34
32
|
# loss function
|
35
|
-
cross_entropy = -
|
33
|
+
cross_entropy = -(y_ * y.log).reduce
|
36
34
|
|
37
|
-
is_correct = tf.
|
38
|
-
accuracy =
|
35
|
+
is_correct = tf.argmax(y, 1) == tf.argmax(y_, 1)
|
36
|
+
accuracy = is_correct.cast.reduce :mean
|
39
37
|
|
40
38
|
optimizer = TensorStream::Train::AdamOptimizer.new
|
41
39
|
train_step = optimizer.minimize(cross_entropy)
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: tensor_stream
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.9.
|
4
|
+
version: 0.9.9
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Joseph Emmanuel Dayo
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-
|
11
|
+
date: 2018-12-28 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -254,6 +254,7 @@ files:
|
|
254
254
|
- data_expected.json
|
255
255
|
- data_input.json
|
256
256
|
- lib/tensor_stream.rb
|
257
|
+
- lib/tensor_stream/constant.rb
|
257
258
|
- lib/tensor_stream/control_flow.rb
|
258
259
|
- lib/tensor_stream/debugging/debugging.rb
|
259
260
|
- lib/tensor_stream/device.rb
|
@@ -275,14 +276,17 @@ files:
|
|
275
276
|
- lib/tensor_stream/graph.rb
|
276
277
|
- lib/tensor_stream/graph_builder.rb
|
277
278
|
- lib/tensor_stream/graph_deserializers/protobuf.rb
|
279
|
+
- lib/tensor_stream/graph_deserializers/yaml_loader.rb
|
278
280
|
- lib/tensor_stream/graph_keys.rb
|
279
281
|
- lib/tensor_stream/graph_serializers/graphml.rb
|
280
282
|
- lib/tensor_stream/graph_serializers/packer.rb
|
281
283
|
- lib/tensor_stream/graph_serializers/pbtext.rb
|
282
284
|
- lib/tensor_stream/graph_serializers/serializer.rb
|
285
|
+
- lib/tensor_stream/graph_serializers/yaml.rb
|
283
286
|
- lib/tensor_stream/helpers/infer_shape.rb
|
284
287
|
- lib/tensor_stream/helpers/op_helper.rb
|
285
288
|
- lib/tensor_stream/helpers/string_helper.rb
|
289
|
+
- lib/tensor_stream/helpers/tensor_mixins.rb
|
286
290
|
- lib/tensor_stream/images.rb
|
287
291
|
- lib/tensor_stream/initializer.rb
|
288
292
|
- lib/tensor_stream/math_gradients.rb
|
@@ -312,6 +316,7 @@ files:
|
|
312
316
|
- lib/tensor_stream/trainer.rb
|
313
317
|
- lib/tensor_stream/types.rb
|
314
318
|
- lib/tensor_stream/utils.rb
|
319
|
+
- lib/tensor_stream/utils/freezer.rb
|
315
320
|
- lib/tensor_stream/variable.rb
|
316
321
|
- lib/tensor_stream/variable_scope.rb
|
317
322
|
- lib/tensor_stream/version.rb
|
@@ -347,8 +352,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
347
352
|
- !ruby/object:Gem::Version
|
348
353
|
version: '0'
|
349
354
|
requirements: []
|
350
|
-
|
351
|
-
rubygems_version: 2.7.7
|
355
|
+
rubygems_version: 3.0.1
|
352
356
|
signing_key:
|
353
357
|
specification_version: 4
|
354
358
|
summary: A Pure ruby tensorflow implementation
|