tensor_stream 0.7.0 → 0.8.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/.rubocop.yml +6 -1
- data/CHANGELOG.md +10 -0
- data/README.md +35 -0
- data/lib/tensor_stream.rb +2 -2
- data/lib/tensor_stream/debugging/debugging.rb +2 -1
- data/lib/tensor_stream/dynamic_stitch.rb +23 -24
- data/lib/tensor_stream/evaluator/base_evaluator.rb +27 -18
- data/lib/tensor_stream/evaluator/opencl/kernels/apply_momentum.cl +16 -0
- data/lib/tensor_stream/evaluator/opencl/kernels/pack.cl +24 -0
- data/lib/tensor_stream/evaluator/opencl/kernels/softmax_cross.cl +6 -1
- data/lib/tensor_stream/evaluator/opencl/opencl_buffer.rb +6 -6
- data/lib/tensor_stream/evaluator/opencl/opencl_evaluator.rb +237 -107
- data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +97 -7
- data/lib/tensor_stream/evaluator/ruby_evaluator.rb +230 -123
- data/lib/tensor_stream/exceptions.rb +1 -0
- data/lib/tensor_stream/graph_builder.rb +2 -3
- data/lib/tensor_stream/graph_deserializers/protobuf.rb +22 -23
- data/lib/tensor_stream/graph_serializers/graphml.rb +26 -29
- data/lib/tensor_stream/graph_serializers/pbtext.rb +22 -19
- data/lib/tensor_stream/helpers/string_helper.rb +4 -5
- data/lib/tensor_stream/math_gradients.rb +141 -77
- data/lib/tensor_stream/nn/nn_ops.rb +4 -6
- data/lib/tensor_stream/operation.rb +139 -120
- data/lib/tensor_stream/ops.rb +36 -3
- data/lib/tensor_stream/session.rb +7 -11
- data/lib/tensor_stream/tensor.rb +3 -3
- data/lib/tensor_stream/tensor_shape.rb +5 -0
- data/lib/tensor_stream/train/gradient_descent_optimizer.rb +4 -37
- data/lib/tensor_stream/train/momentum_optimizer.rb +48 -0
- data/lib/tensor_stream/train/optimizer.rb +129 -0
- data/lib/tensor_stream/train/saver.rb +0 -1
- data/lib/tensor_stream/train/slot_creator.rb +62 -0
- data/lib/tensor_stream/train/utils.rb +11 -12
- data/lib/tensor_stream/trainer.rb +3 -0
- data/lib/tensor_stream/utils.rb +18 -11
- data/lib/tensor_stream/variable.rb +19 -12
- data/lib/tensor_stream/variable_scope.rb +1 -1
- data/lib/tensor_stream/version.rb +1 -1
- data/samples/iris.rb +2 -1
- data/samples/linear_regression.rb +3 -1
- data/samples/nearest_neighbor.rb +2 -0
- data/test_samples/neural_network_raw.py +101 -0
- data/test_samples/raw_neural_net_sample.rb +6 -4
- data/test_samples/test2.py +73 -27
- metadata +9 -3
@@ -30,6 +30,10 @@ module TensorStream
|
|
30
30
|
true
|
31
31
|
end
|
32
32
|
|
33
|
+
def is_fully_defined?
|
34
|
+
known?
|
35
|
+
end
|
36
|
+
|
33
37
|
def self.infer_shape(shape_a, shape_b)
|
34
38
|
return shape_a if shape_b.nil?
|
35
39
|
return shape_b if shape_a.nil?
|
@@ -69,6 +73,7 @@ module TensorStream
|
|
69
73
|
|
70
74
|
def self.fix_inferred_elements(shape, total_size)
|
71
75
|
return shape if shape.empty?
|
76
|
+
return nil if shape[0].is_a?(Tensor)
|
72
77
|
|
73
78
|
current_size = shape.inject(1) { |product, n| n > 0 ? product * n : product }
|
74
79
|
inferred_size = total_size.nil? ? nil : total_size / current_size
|
@@ -1,7 +1,7 @@
|
|
1
1
|
module TensorStream
|
2
2
|
module Train
|
3
3
|
# High Level implementation of the gradient descent algorithm
|
4
|
-
class GradientDescentOptimizer
|
4
|
+
class GradientDescentOptimizer < Optimizer
|
5
5
|
include TensorStream::OpHelper
|
6
6
|
|
7
7
|
attr_accessor :learning_rate
|
@@ -10,43 +10,10 @@ module TensorStream
|
|
10
10
|
@learning_rate = learning_rate
|
11
11
|
end
|
12
12
|
|
13
|
-
|
14
|
-
grads_and_vars = compute_gradients(loss, var_list: var_list, grad_loss: grad_loss)
|
15
|
-
apply_gradients(grads_and_vars, global_step: global_step)
|
16
|
-
end
|
17
|
-
|
18
|
-
##
|
19
|
-
# Apply gradients to variables.
|
20
|
-
# This is the second part of minimize(). It returns an Operation that applies gradients.
|
21
|
-
def apply_gradients(grads_and_vars, global_step: nil)
|
22
|
-
apply_ops = grads_and_vars.map do |grad, var|
|
23
|
-
i_op(:apply_gradient_descent, var, TensorStream.cast(@learning_rate, grad.data_type), grad)
|
24
|
-
end
|
25
|
-
|
26
|
-
if global_step.nil?
|
27
|
-
apply_ops
|
28
|
-
else
|
29
|
-
apply_ops + [global_step.assign_add(1)]
|
30
|
-
end
|
31
|
-
end
|
32
|
-
|
33
|
-
##
|
34
|
-
# Compute gradients of loss for the variables in var_list.
|
35
|
-
#
|
36
|
-
# This is the first part of minimize(). It returns a list of (gradient, variable) pairs where "gradient" is the gradient for "variable".
|
37
|
-
def compute_gradients(loss, var_list: nil, grad_loss: nil)
|
38
|
-
trainable_vars = if var_list
|
39
|
-
raise "var_list must be an array" unless var_list.is_a?(Array)
|
40
|
-
var_list.each_with_index { |var, index| raise "var #{index} not a Variable" unless var.is_a?(Variable) }
|
13
|
+
protected
|
41
14
|
|
42
|
-
|
43
|
-
|
44
|
-
loss.graph.get_collection(TensorStream::GraphKeys::TRAINABLE_VARIABLES)
|
45
|
-
end
|
46
|
-
all_grads = grad_loss || TensorStream.gradients(loss, trainable_vars)
|
47
|
-
trainable_vars.each_with_index.collect do |var, index|
|
48
|
-
[all_grads[index], var]
|
49
|
-
end
|
15
|
+
def apply_dense(grad, var)
|
16
|
+
i_op(:apply_gradient_descent, var, TensorStream.cast(@learning_rate, grad.data_type), grad)
|
50
17
|
end
|
51
18
|
end
|
52
19
|
end
|
@@ -0,0 +1,48 @@
|
|
1
|
+
module TensorStream
|
2
|
+
module Train
|
3
|
+
# Optimizer that implements the Momentum algorithm. loosely based on the tensorflow implementation.
|
4
|
+
class MomentumOptimizer < Optimizer
|
5
|
+
include OpHelper
|
6
|
+
|
7
|
+
##
|
8
|
+
# Construct a new Momentum optimizer.
|
9
|
+
#
|
10
|
+
# Args:
|
11
|
+
# learning_rate: A Tensor or a floating point value that indicates the learning rate
|
12
|
+
# momentum: A Tensor or a floating point value for the momentum
|
13
|
+
# name: Optional name prefix
|
14
|
+
# use_nesterov: boolean - Flag that indicates if nesterov momentum is to be used. http://jmlr.org/proceedings/papers/v28/sutskever13.pdf
|
15
|
+
# use_locking: boolean - filler argument for compatibility, not used at the moment
|
16
|
+
def initialize(learning_rate, momentum, name: 'momentum', use_nesterov: false, use_locking: false)
|
17
|
+
@learning_rate = learning_rate
|
18
|
+
@momentum = momentum
|
19
|
+
@use_nesterov = use_nesterov
|
20
|
+
super(name: name, use_locking: use_locking)
|
21
|
+
end
|
22
|
+
|
23
|
+
protected
|
24
|
+
|
25
|
+
def prepare
|
26
|
+
@learning_rate_tensor = TensorStream.convert_to_tensor(@learning_rate, name: "learning_rate")
|
27
|
+
@momentum_tensor = TensorStream.convert_to_tensor(@momentum, name: "momentum")
|
28
|
+
end
|
29
|
+
|
30
|
+
def create_slots(var_list)
|
31
|
+
var_list.each do |v|
|
32
|
+
zeros_slot(v, "momentum", @name)
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
def apply_dense(grad, var)
|
37
|
+
mom = get_slot(var, "momentum")
|
38
|
+
|
39
|
+
_op(:apply_momentum, var, mom,
|
40
|
+
TensorStream.cast(@learning_rate_tensor, var.data_type),
|
41
|
+
grad,
|
42
|
+
TensorStream.cast(@momentum_tensor, var.data_type),
|
43
|
+
use_locking: @use_locking,
|
44
|
+
use_nesterov: @use_nesterov)
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
@@ -0,0 +1,129 @@
|
|
1
|
+
module TensorStream
|
2
|
+
module Train
|
3
|
+
# Base class for an optimizer
|
4
|
+
# This is a straight up port from the python version
|
5
|
+
class Optimizer
|
6
|
+
include SlotCreator
|
7
|
+
|
8
|
+
attr_reader :name
|
9
|
+
|
10
|
+
def initialize(name:, use_locking:)
|
11
|
+
@name = name
|
12
|
+
@use_locking = use_locking
|
13
|
+
raise TensorStream::ValueError, "Must specify the optimizer name" unless @name
|
14
|
+
@slots = {}
|
15
|
+
end
|
16
|
+
|
17
|
+
def minimize(loss, var_list: nil, grad_loss: nil, global_step: nil, name: nil)
|
18
|
+
grads_and_vars = compute_gradients(loss, var_list: var_list, grad_loss: grad_loss)
|
19
|
+
apply_gradients(grads_and_vars, global_step: global_step, name: name)
|
20
|
+
end
|
21
|
+
|
22
|
+
##
|
23
|
+
# Apply gradients to variables.
|
24
|
+
# This is the second part of minimize(). It returns an Operation that applies gradients.
|
25
|
+
def apply_gradients(grads_and_vars, global_step: nil, name: nil)
|
26
|
+
varlist = grads_and_vars.map { |_grad, var| var }
|
27
|
+
create_slots(varlist)
|
28
|
+
TensorStream.name_scope(name, default: @name) do
|
29
|
+
prepare
|
30
|
+
apply_ops = grads_and_vars.map do |grad, var|
|
31
|
+
TensorStream.name_scope("update_" + var.op.name) do
|
32
|
+
apply_dense(grad, var)
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
if global_step.nil?
|
37
|
+
finish(apply_ops, name)
|
38
|
+
else
|
39
|
+
TensorStream.control_dependencies([finish(apply_ops, "update")]) do
|
40
|
+
global_step.assign_add(1)
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
##
|
47
|
+
# Compute gradients of loss for the variables in var_list.
|
48
|
+
#
|
49
|
+
# This is the first part of minimize(). It returns a list of (gradient, variable) pairs where "gradient" is the gradient for "variable".
|
50
|
+
def compute_gradients(loss, var_list: nil, grad_loss: nil)
|
51
|
+
trainable_vars = if var_list
|
52
|
+
raise "var_list must be an array" unless var_list.is_a?(Array)
|
53
|
+
var_list.each_with_index { |var, index| raise "var #{index} not a Variable" unless var.is_a?(Variable) }
|
54
|
+
|
55
|
+
var_list
|
56
|
+
else
|
57
|
+
loss.graph.get_collection(TensorStream::GraphKeys::TRAINABLE_VARIABLES)
|
58
|
+
end
|
59
|
+
all_grads = grad_loss || TensorStream.gradients(loss, trainable_vars)
|
60
|
+
trainable_vars.each_with_index.collect do |var, index|
|
61
|
+
[all_grads[index], var]
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def get_slot(var, name)
|
66
|
+
named_slots = @slots.fetch(name, nil)
|
67
|
+
return nil if named_slots.nil?
|
68
|
+
named_slots.fetch(var_key(var), nil)
|
69
|
+
end
|
70
|
+
|
71
|
+
def get_slot_names
|
72
|
+
@slots.keys.sort
|
73
|
+
end
|
74
|
+
|
75
|
+
protected
|
76
|
+
|
77
|
+
def finish(update_ops, name_scope)
|
78
|
+
TensorStream.group(update_ops, name: name_scope)
|
79
|
+
end
|
80
|
+
|
81
|
+
def create_slots(var_list)
|
82
|
+
# no implementation
|
83
|
+
end
|
84
|
+
|
85
|
+
def prepare
|
86
|
+
# no implementation
|
87
|
+
end
|
88
|
+
|
89
|
+
def apply_dense(grad, var)
|
90
|
+
raise TensorStream::NotImplementedError, "not implemented"
|
91
|
+
end
|
92
|
+
|
93
|
+
##
|
94
|
+
# Find or create a slot initialized with 0.0.
|
95
|
+
#
|
96
|
+
# Args:
|
97
|
+
# var: Variable - A Variable object
|
98
|
+
# slot_name: string - Name fot the slot
|
99
|
+
# op_name: string - Name to use when scoping the Variable that needs to be created
|
100
|
+
def zeros_slot(var, slot_name, op_name)
|
101
|
+
named_slots = slot_dict(slot_name)
|
102
|
+
if !named_slots.key?(var_key(var))
|
103
|
+
named_slots[var_key(var)] = create_zeros_slot(var, op_name)
|
104
|
+
end
|
105
|
+
named_slots[var_key(var)]
|
106
|
+
end
|
107
|
+
|
108
|
+
##
|
109
|
+
# Returns a dict for caching slots created under the given name.
|
110
|
+
#
|
111
|
+
# Args:
|
112
|
+
# slot_name string Name for the slot
|
113
|
+
#
|
114
|
+
# Returns: A dict that maps primary 'Variable' objects to the slot created
|
115
|
+
def slot_dict(slot_name)
|
116
|
+
named_slots = @slots.fetch(slot_name, nil)
|
117
|
+
if named_slots.nil?
|
118
|
+
named_slots = {}
|
119
|
+
@slots[slot_name] = named_slots
|
120
|
+
end
|
121
|
+
named_slots
|
122
|
+
end
|
123
|
+
|
124
|
+
def var_key(var)
|
125
|
+
[var.op.graph, var.op.name]
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
module TensorStream
|
2
|
+
module Train
|
3
|
+
module SlotCreator
|
4
|
+
include TensorStream::Utils
|
5
|
+
|
6
|
+
##
|
7
|
+
# Helper function for creating a slot variable.
|
8
|
+
def create_slot_var(primary, val, scope)
|
9
|
+
slot = get_variable(scope, initializer: val, trainable: false,
|
10
|
+
validate_shape: val.shape.is_fully_defined?)
|
11
|
+
slot
|
12
|
+
end
|
13
|
+
|
14
|
+
##
|
15
|
+
# Create a slot initialized to the given value
|
16
|
+
#
|
17
|
+
# Args:
|
18
|
+
# primary: Variable - The primary 'Variable' or 'Tensor'
|
19
|
+
# val: Tensor - A `Tensor` specifying the initial value of the slot
|
20
|
+
# name: String - Name to use for the slot variable
|
21
|
+
# colocate_with_primary: Boolean - If true the slot is located
|
22
|
+
# on the same device as `primary`
|
23
|
+
#
|
24
|
+
# Returns: A `Variable` object
|
25
|
+
def create_slot(primary, val, name, colocate_with_primary: true)
|
26
|
+
TensorStream.variable_scope(primary.op.name + "/" + name) do
|
27
|
+
if colocate_with_primary
|
28
|
+
TensorStream.colocate_with(primary) do
|
29
|
+
return create_slot_var(primary, val, "")
|
30
|
+
end
|
31
|
+
else
|
32
|
+
return create_slot_var(primary, val, "")
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
##
|
38
|
+
# Create a slot initialized to 0 with same shape as the primary object.
|
39
|
+
#
|
40
|
+
# Args:
|
41
|
+
# primary: The pirmary variable or Tensor
|
42
|
+
# name: String - Name to use for the slot variable
|
43
|
+
# dtype: Symbol - Type of the slot variable
|
44
|
+
# colocate_with_primary: boolean - If true the slot is located on the same device as primary
|
45
|
+
#
|
46
|
+
# Returns:
|
47
|
+
# A `Variable` object
|
48
|
+
def create_zeros_slot(primary, name, dtype: nil, colocate_with_primary: true)
|
49
|
+
dtype = primary.data_type if dtype.nil?
|
50
|
+
slot_shape = primary.shape
|
51
|
+
slot_shape = if slot_shape.is_fully_defined?
|
52
|
+
slot_shape.shape
|
53
|
+
else
|
54
|
+
TensorStream.shape(primary.initialized_value)
|
55
|
+
end
|
56
|
+
val = TensorStream.zeros(slot_shape, dtype: dtype)
|
57
|
+
create_slot(primary, val, name,
|
58
|
+
colocate_with_primary: colocate_with_primary)
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
@@ -6,24 +6,23 @@ module TensorStream
|
|
6
6
|
target_graph = graph || TensorStream.get_default_graph
|
7
7
|
raise TensorStream::ValueError, '"global_step" already exists.' unless get_global_step(target_graph).nil?
|
8
8
|
|
9
|
-
TensorStream.variable_scope.get_variable(
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
TensorStream::GraphKeys::GLOBAL_STEP])
|
9
|
+
TensorStream.variable_scope.get_variable(TensorStream::GraphKeys::GLOBAL_STEP, shape: [],
|
10
|
+
dtype: :int64,
|
11
|
+
initializer: TensorStream.zeros_initializer,
|
12
|
+
trainable: false,
|
13
|
+
collections: [TensorStream::GraphKeys::GLOBAL_VARIABLES,
|
14
|
+
TensorStream::GraphKeys::GLOBAL_STEP])
|
16
15
|
end
|
17
16
|
|
18
17
|
def get_global_step(graph = nil)
|
19
18
|
target_graph = graph || TensorStream.get_default_graph
|
20
19
|
global_step_tensors = target_graph.get_collection(TensorStream::GraphKeys::GLOBAL_STEP)
|
21
20
|
global_step_tensor = if global_step_tensors.nil? || global_step_tensors.empty?
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
21
|
+
begin
|
22
|
+
target_graph.get_tensor_by_name('global_step:0')
|
23
|
+
rescue TensorStream::KeyError
|
24
|
+
nil
|
25
|
+
end
|
27
26
|
elsif global_step_tensors.size == 1
|
28
27
|
global_step_tensors[0]
|
29
28
|
else
|
data/lib/tensor_stream/utils.rb
CHANGED
@@ -54,14 +54,14 @@ module TensorStream
|
|
54
54
|
trainable: trainable
|
55
55
|
}
|
56
56
|
tensor = if value.is_a?(String)
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
57
|
+
TensorStream::Variable.new(dtype || :string, 0, [], get_variable_scope, common_options)
|
58
|
+
elsif value.is_a?(Integer)
|
59
|
+
TensorStream::Variable.new(dtype || :int32, 0, [], get_variable_scope, common_options)
|
60
|
+
elsif value.is_a?(Float)
|
61
|
+
TensorStream::Variable.new(dtype || :float32, 0, [], get_variable_scope, common_options)
|
62
|
+
else
|
63
|
+
TensorStream::Variable.new(dtype || :float32, 0, nil, get_variable_scope, common_options)
|
64
|
+
end
|
65
65
|
op.inputs[0] = tensor
|
66
66
|
tensor
|
67
67
|
end
|
@@ -115,8 +115,13 @@ module TensorStream
|
|
115
115
|
session
|
116
116
|
end
|
117
117
|
|
118
|
-
def
|
119
|
-
|
118
|
+
def colocate_with(op, ignore_existing: false)
|
119
|
+
# noop for now
|
120
|
+
yield
|
121
|
+
end
|
122
|
+
|
123
|
+
def program
|
124
|
+
yield self
|
120
125
|
end
|
121
126
|
|
122
127
|
def layers
|
@@ -153,7 +158,7 @@ module TensorStream
|
|
153
158
|
TensorStream::DynamicStitch.new(:dynamic_stitch, [indices, data], name: name)
|
154
159
|
end
|
155
160
|
|
156
|
-
def get_variable(name, dtype: nil, shape: nil, initializer: nil, trainable: true, collections: nil)
|
161
|
+
def get_variable(name, dtype: nil, shape: nil, initializer: nil, trainable: true, collections: nil, validate_shape: false)
|
157
162
|
get_variable_scope.get_variable(name, dtype: dtype, shape: shape, initializer: initializer, trainable: trainable, collections: collections)
|
158
163
|
end
|
159
164
|
|
@@ -184,6 +189,8 @@ module TensorStream
|
|
184
189
|
TensorStream.get_default_graph.get_collection(TensorStream::GraphKeys::TRAINABLE_VARIABLES)
|
185
190
|
end
|
186
191
|
|
192
|
+
##
|
193
|
+
# Sets random seed to use for the default graph
|
187
194
|
def set_random_seed(seed)
|
188
195
|
TensorStream.get_default_graph.random_seed = seed
|
189
196
|
end
|
@@ -14,10 +14,9 @@ module TensorStream
|
|
14
14
|
scope_name = variable_scope ? variable_scope.name : nil
|
15
15
|
variable_scope_initializer = variable_scope ? variable_scope.initializer : nil
|
16
16
|
@name = [scope_name, options[:name] || build_name].compact.reject(&:empty?).join('/')
|
17
|
-
@initalizer_tensor = options[:initializer]
|
18
|
-
if shape.nil? && @initalizer_tensor && @initalizer_tensor.shape
|
19
|
-
|
20
|
-
end
|
17
|
+
@initalizer_tensor = options[:initializer] || variable_scope_initializer || TensorStream.glorot_uniform_initializer
|
18
|
+
shape = @initalizer_tensor.shape.shape if shape.nil? && @initalizer_tensor && @initalizer_tensor.shape
|
19
|
+
|
21
20
|
@shape = TensorShape.new(shape, rank)
|
22
21
|
@trainable = options.fetch(:trainable, true)
|
23
22
|
@graph.add_variable(self, options)
|
@@ -34,22 +33,30 @@ module TensorStream
|
|
34
33
|
assign(init_op)
|
35
34
|
end
|
36
35
|
|
36
|
+
def initialized_value
|
37
|
+
init_op = @initalizer_tensor.op
|
38
|
+
init_op.shape = @shape || init_op.shape
|
39
|
+
init_op.data_type = @data_type || init_op.data_type
|
40
|
+
init_op
|
41
|
+
end
|
42
|
+
|
37
43
|
def assign(value, name: nil)
|
38
44
|
_a, value = TensorStream.check_data_types(self, value)
|
39
|
-
|
45
|
+
_op(:assign, self, value, name: name)
|
40
46
|
end
|
41
47
|
|
42
48
|
def read_value
|
43
|
-
if buffer
|
44
|
-
@value = buffer.to_ruby
|
45
|
-
end
|
46
|
-
|
49
|
+
@value = buffer.to_ruby if buffer
|
47
50
|
@value
|
48
51
|
end
|
49
52
|
|
50
|
-
def assign_add(value)
|
53
|
+
def assign_add(value, name: nil)
|
51
54
|
_a, value = TensorStream.check_data_types(self, value)
|
52
|
-
|
55
|
+
_op(:assign_add, self, value, data_type: data_type, name: name)
|
56
|
+
end
|
57
|
+
|
58
|
+
def op
|
59
|
+
@op ||= _op(:variable, self, data_type: data_type)
|
53
60
|
end
|
54
61
|
|
55
62
|
def to_math(_tensor, _name_only = false, _max_depth = 99, _unused = 0)
|
@@ -58,7 +65,7 @@ module TensorStream
|
|
58
65
|
|
59
66
|
def assign_sub(value)
|
60
67
|
_a, value = TensorStream.check_data_types(self, value)
|
61
|
-
|
68
|
+
_op(:assign_sub, self, value)
|
62
69
|
end
|
63
70
|
|
64
71
|
def self.variables_initializer(collection)
|