tensor_stream 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -0
  3. data/.rubocop.yml +1 -0
  4. data/Gemfile +1 -1
  5. data/LICENSE.txt +1 -1
  6. data/README.md +34 -34
  7. data/Rakefile +3 -3
  8. data/USAGE_GUIDE.md +235 -0
  9. data/bin/stubgen +20 -0
  10. data/exe/model_utils +2 -2
  11. data/lib/tensor_stream.rb +45 -44
  12. data/lib/tensor_stream/constant.rb +2 -2
  13. data/lib/tensor_stream/control_flow.rb +1 -1
  14. data/lib/tensor_stream/debugging/debugging.rb +2 -2
  15. data/lib/tensor_stream/dynamic_stitch.rb +2 -2
  16. data/lib/tensor_stream/evaluator/base_evaluator.rb +18 -18
  17. data/lib/tensor_stream/evaluator/buffer.rb +1 -1
  18. data/lib/tensor_stream/evaluator/evaluator.rb +2 -2
  19. data/lib/tensor_stream/evaluator/operation_helpers/array_ops_helper.rb +41 -41
  20. data/lib/tensor_stream/evaluator/operation_helpers/math_helper.rb +1 -1
  21. data/lib/tensor_stream/evaluator/ruby/array_ops.rb +39 -39
  22. data/lib/tensor_stream/evaluator/ruby/check_ops.rb +2 -2
  23. data/lib/tensor_stream/evaluator/ruby/images_ops.rb +18 -18
  24. data/lib/tensor_stream/evaluator/ruby/math_ops.rb +13 -14
  25. data/lib/tensor_stream/evaluator/ruby/nn_ops.rb +33 -36
  26. data/lib/tensor_stream/evaluator/ruby/random_ops.rb +20 -21
  27. data/lib/tensor_stream/evaluator/ruby_evaluator.rb +36 -49
  28. data/lib/tensor_stream/exceptions.rb +1 -1
  29. data/lib/tensor_stream/generated_stub/ops.rb +691 -0
  30. data/lib/tensor_stream/generated_stub/stub_file.erb +24 -0
  31. data/lib/tensor_stream/graph.rb +18 -18
  32. data/lib/tensor_stream/graph_builder.rb +17 -17
  33. data/lib/tensor_stream/graph_deserializers/protobuf.rb +97 -97
  34. data/lib/tensor_stream/graph_deserializers/yaml_loader.rb +1 -1
  35. data/lib/tensor_stream/graph_keys.rb +3 -3
  36. data/lib/tensor_stream/graph_serializers/graphml.rb +33 -33
  37. data/lib/tensor_stream/graph_serializers/packer.rb +23 -23
  38. data/lib/tensor_stream/graph_serializers/pbtext.rb +38 -42
  39. data/lib/tensor_stream/graph_serializers/serializer.rb +3 -2
  40. data/lib/tensor_stream/graph_serializers/yaml.rb +5 -5
  41. data/lib/tensor_stream/helpers/infer_shape.rb +56 -56
  42. data/lib/tensor_stream/helpers/op_helper.rb +8 -9
  43. data/lib/tensor_stream/helpers/string_helper.rb +15 -15
  44. data/lib/tensor_stream/helpers/tensor_mixins.rb +17 -17
  45. data/lib/tensor_stream/images.rb +1 -1
  46. data/lib/tensor_stream/initializer.rb +1 -1
  47. data/lib/tensor_stream/math_gradients.rb +28 -187
  48. data/lib/tensor_stream/monkey_patches/array.rb +1 -1
  49. data/lib/tensor_stream/monkey_patches/float.rb +1 -1
  50. data/lib/tensor_stream/monkey_patches/integer.rb +1 -1
  51. data/lib/tensor_stream/monkey_patches/op_patch.rb +5 -5
  52. data/lib/tensor_stream/monkey_patches/patch.rb +1 -1
  53. data/lib/tensor_stream/nn/nn_ops.rb +17 -15
  54. data/lib/tensor_stream/op_maker.rb +180 -0
  55. data/lib/tensor_stream/operation.rb +17 -17
  56. data/lib/tensor_stream/ops.rb +95 -384
  57. data/lib/tensor_stream/ops/add.rb +23 -0
  58. data/lib/tensor_stream/ops/argmax.rb +14 -0
  59. data/lib/tensor_stream/ops/argmin.rb +14 -0
  60. data/lib/tensor_stream/ops/case.rb +17 -0
  61. data/lib/tensor_stream/ops/cast.rb +15 -0
  62. data/lib/tensor_stream/ops/ceil.rb +15 -0
  63. data/lib/tensor_stream/ops/const.rb +0 -0
  64. data/lib/tensor_stream/ops/cos.rb +10 -0
  65. data/lib/tensor_stream/ops/div.rb +21 -0
  66. data/lib/tensor_stream/ops/equal.rb +15 -0
  67. data/lib/tensor_stream/ops/expand_dims.rb +17 -0
  68. data/lib/tensor_stream/ops/fill.rb +19 -0
  69. data/lib/tensor_stream/ops/floor.rb +15 -0
  70. data/lib/tensor_stream/ops/floor_div.rb +15 -0
  71. data/lib/tensor_stream/ops/greater.rb +11 -0
  72. data/lib/tensor_stream/ops/greater_equal.rb +11 -0
  73. data/lib/tensor_stream/ops/less_equal.rb +15 -0
  74. data/lib/tensor_stream/ops/log.rb +14 -0
  75. data/lib/tensor_stream/ops/mat_mul.rb +60 -0
  76. data/lib/tensor_stream/ops/max.rb +15 -0
  77. data/lib/tensor_stream/ops/min.rb +15 -0
  78. data/lib/tensor_stream/ops/mod.rb +23 -0
  79. data/lib/tensor_stream/ops/mul.rb +21 -0
  80. data/lib/tensor_stream/ops/negate.rb +14 -0
  81. data/lib/tensor_stream/ops/ones_like.rb +19 -0
  82. data/lib/tensor_stream/ops/pow.rb +25 -0
  83. data/lib/tensor_stream/ops/prod.rb +60 -0
  84. data/lib/tensor_stream/ops/random_uniform.rb +18 -0
  85. data/lib/tensor_stream/ops/range.rb +20 -0
  86. data/lib/tensor_stream/ops/rank.rb +13 -0
  87. data/lib/tensor_stream/ops/reshape.rb +24 -0
  88. data/lib/tensor_stream/ops/round.rb +15 -0
  89. data/lib/tensor_stream/ops/shape.rb +14 -0
  90. data/lib/tensor_stream/ops/sigmoid.rb +10 -0
  91. data/lib/tensor_stream/ops/sign.rb +12 -0
  92. data/lib/tensor_stream/ops/sin.rb +10 -0
  93. data/lib/tensor_stream/ops/size.rb +16 -0
  94. data/lib/tensor_stream/ops/sub.rb +24 -0
  95. data/lib/tensor_stream/ops/sum.rb +27 -0
  96. data/lib/tensor_stream/ops/tan.rb +12 -0
  97. data/lib/tensor_stream/ops/tanh.rb +10 -0
  98. data/lib/tensor_stream/ops/tile.rb +19 -0
  99. data/lib/tensor_stream/ops/zeros.rb +15 -0
  100. data/lib/tensor_stream/placeholder.rb +2 -2
  101. data/lib/tensor_stream/profile/report_tool.rb +3 -3
  102. data/lib/tensor_stream/session.rb +36 -38
  103. data/lib/tensor_stream/tensor.rb +2 -2
  104. data/lib/tensor_stream/tensor_shape.rb +4 -4
  105. data/lib/tensor_stream/train/adadelta_optimizer.rb +8 -8
  106. data/lib/tensor_stream/train/adagrad_optimizer.rb +3 -3
  107. data/lib/tensor_stream/train/adam_optimizer.rb +11 -11
  108. data/lib/tensor_stream/train/learning_rate_decay.rb +2 -2
  109. data/lib/tensor_stream/train/momentum_optimizer.rb +7 -7
  110. data/lib/tensor_stream/train/optimizer.rb +9 -9
  111. data/lib/tensor_stream/train/rmsprop_optimizer.rb +16 -16
  112. data/lib/tensor_stream/train/saver.rb +14 -14
  113. data/lib/tensor_stream/train/slot_creator.rb +6 -6
  114. data/lib/tensor_stream/train/utils.rb +12 -12
  115. data/lib/tensor_stream/trainer.rb +10 -10
  116. data/lib/tensor_stream/types.rb +1 -1
  117. data/lib/tensor_stream/utils.rb +33 -32
  118. data/lib/tensor_stream/utils/freezer.rb +5 -5
  119. data/lib/tensor_stream/variable.rb +5 -5
  120. data/lib/tensor_stream/variable_scope.rb +1 -1
  121. data/lib/tensor_stream/version.rb +1 -1
  122. data/samples/{iris.data → datasets/iris.data} +0 -0
  123. data/samples/jupyter_notebooks/linear_regression.ipynb +463 -0
  124. data/samples/{iris.rb → neural_networks/iris.rb} +21 -23
  125. data/samples/{mnist_data.rb → neural_networks/mnist_data.rb} +8 -8
  126. data/samples/neural_networks/raw_neural_net_sample.rb +112 -0
  127. data/samples/{rnn.rb → neural_networks/rnn.rb} +28 -31
  128. data/samples/{nearest_neighbor.rb → others/nearest_neighbor.rb} +12 -12
  129. data/samples/regression/linear_regression.rb +63 -0
  130. data/samples/{logistic_regression.rb → regression/logistic_regression.rb} +14 -16
  131. data/tensor_stream.gemspec +9 -8
  132. metadata +89 -19
  133. data/data_1.json +0 -4764
  134. data/data_2.json +0 -4764
  135. data/data_actual.json +0 -28
  136. data/data_expected.json +0 -28
  137. data/data_input.json +0 -28
  138. data/samples/error.graphml +0 -2755
  139. data/samples/gradient_sample.graphml +0 -1255
  140. data/samples/linear_regression.rb +0 -69
  141. data/samples/multigpu.rb +0 -73
  142. data/samples/raw_neural_net_sample.rb +0 -112
@@ -1,4 +1,4 @@
1
- require 'ostruct'
1
+ require "ostruct"
2
2
 
3
3
  module TensorStream
4
4
  # Base class that defines a tensor like interface
@@ -8,7 +8,7 @@ module TensorStream
8
8
 
9
9
  attr_reader :graph, :value
10
10
  attr_accessor :name, :data_type, :shape, :rank, :native_buffer, :is_const,
11
- :internal, :source, :given_name, :outputs, :op
11
+ :internal, :source, :given_name, :outputs, :op
12
12
 
13
13
  def inspect
14
14
  end
@@ -11,9 +11,9 @@ module TensorStream
11
11
  def to_s
12
12
  return "?" if @shape.nil?
13
13
 
14
- dimensions = @shape.collect do |r|
14
+ dimensions = @shape.collect { |r|
15
15
  "Dimension(#{r})"
16
- end.join(',')
16
+ }.join(",")
17
17
  "TensorShape([#{dimensions}])"
18
18
  end
19
19
 
@@ -53,14 +53,14 @@ module TensorStream
53
53
  reversed_a = shape_a.reverse
54
54
  reversed_b = shape_b.reverse
55
55
 
56
- reversed_a.each_with_index.collect do |s, index|
56
+ reversed_a.each_with_index.collect { |s, index|
57
57
  next s if index >= reversed_b.size
58
58
  next nil if s.nil? || reversed_b[index].nil?
59
59
  next nil if s.is_a?(Tensor) || reversed_b[index].is_a?(Tensor)
60
60
  next reversed_b[index] if reversed_b[index] > s
61
61
 
62
62
  s
63
- end.reverse
63
+ }.reverse
64
64
  end
65
65
 
66
66
  def self.reshape(arr, new_shape)
@@ -38,14 +38,14 @@ module TensorStream
38
38
  accum = get_slot(var, "accum")
39
39
  accum_update = get_slot(var, "accum_update")
40
40
  _op(:apply_adadelta,
41
- var,
42
- accum,
43
- accum_update,
44
- TensorStream.cast(@learning_rate_tensor, var.data_type),
45
- TensorStream.cast(@rho_t, var.data_type),
46
- TensorStream.cast(@epsilon_t, var.data_type),
47
- grad,
48
- use_locking: @use_locking)
41
+ var,
42
+ accum,
43
+ accum_update,
44
+ TensorStream.cast(@learning_rate_tensor, var.data_type),
45
+ TensorStream.cast(@rho_t, var.data_type),
46
+ TensorStream.cast(@epsilon_t, var.data_type),
47
+ grad,
48
+ use_locking: @use_locking)
49
49
  end
50
50
  end
51
51
  end
@@ -7,7 +7,7 @@ module TensorStream
7
7
  attr_accessor :learning_rate
8
8
 
9
9
  def initialize(learning_rate, initial_accumulator_value = 0.1,
10
- use_locking: false, name: "Adagrad")
10
+ use_locking: false, name: "Adagrad")
11
11
  @learning_rate = learning_rate
12
12
  @initial_accumulator_value = initial_accumulator_value
13
13
  @learning_rate_tensor = nil
@@ -38,8 +38,8 @@ module TensorStream
38
38
  def apply_dense(grad, var)
39
39
  acc = get_slot(var, "accumulator")
40
40
  _op(:apply_adagrad,
41
- var, acc, TensorStream.cast(@learning_rate_tensor, var.data_type),
42
- grad, use_locking: @use_locking)
41
+ var, acc, TensorStream.cast(@learning_rate_tensor, var.data_type),
42
+ grad, use_locking: @use_locking)
43
43
  end
44
44
  end
45
45
  end
@@ -22,7 +22,7 @@ module TensorStream
22
22
  # name: Optional name for the operations created when applying gradients.
23
23
  # Defaults to "Adam".
24
24
  def initialize(learning_rate = 0.001, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8,
25
- use_locking: false, name: "Adam")
25
+ use_locking: false, name: "Adam")
26
26
  @learning_rate = learning_rate
27
27
  @beta1 = beta1
28
28
  @beta2 = beta2
@@ -44,7 +44,7 @@ module TensorStream
44
44
  def get_beta_accumulators
45
45
  graph = TensorStream.get_default_graph
46
46
  [get_non_slot_variable("beta1_power", graph: graph),
47
- get_non_slot_variable("beta2_power", graph: graph)]
47
+ get_non_slot_variable("beta2_power", graph: graph),]
48
48
  end
49
49
 
50
50
  def prepare
@@ -76,14 +76,14 @@ module TensorStream
76
76
  v = get_slot(var, "v")
77
77
  beta1_power, beta2_power = get_beta_accumulators
78
78
  _op(:apply_adam,
79
- var, m, v,
80
- TensorStream.cast(beta1_power, var.data_type),
81
- TensorStream.cast(beta2_power, var.data_type),
82
- TensorStream.cast(@lr_t, var.data_type),
83
- TensorStream.cast(@beta1_t, var.data_type),
84
- TensorStream.cast(@beta2_t, var.data_type),
85
- TensorStream.cast(@epsilon_t, var.data_type),
86
- grad, use_locking: @use_locking)
79
+ var, m, v,
80
+ TensorStream.cast(beta1_power, var.data_type),
81
+ TensorStream.cast(beta2_power, var.data_type),
82
+ TensorStream.cast(@lr_t, var.data_type),
83
+ TensorStream.cast(@beta1_t, var.data_type),
84
+ TensorStream.cast(@beta2_t, var.data_type),
85
+ TensorStream.cast(@epsilon_t, var.data_type),
86
+ grad, use_locking: @use_locking)
87
87
  end
88
88
 
89
89
  def finish(update_ops, name_scope)
@@ -99,4 +99,4 @@ module TensorStream
99
99
  end
100
100
  end
101
101
  end
102
- end
102
+ end
@@ -12,7 +12,7 @@ module TensorStream
12
12
  def exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase: false, name: nil)
13
13
  raise TensorStream::ValueError, "global_step is required for exponential_decay." if global_step.nil?
14
14
 
15
- name_scope(name, default: 'ExponentialDecay', values: [learning_rate, global_step, decay_steps, decay_rate]) do
15
+ name_scope(name, default: "ExponentialDecay", values: [learning_rate, global_step, decay_steps, decay_rate]) do
16
16
  learning_rate = convert_to_tensor(learning_rate, name: "learning_rate")
17
17
  data_type = learning_rate.data_type
18
18
  decay_steps = cast(decay_steps, data_type)
@@ -26,4 +26,4 @@ module TensorStream
26
26
  end
27
27
  end
28
28
  end
29
- end
29
+ end
@@ -13,7 +13,7 @@ module TensorStream
13
13
  # name: Optional name prefix
14
14
  # use_nesterov: boolean - Flag that indicates if nesterov momentum is to be used. http://jmlr.org/proceedings/papers/v28/sutskever13.pdf
15
15
  # use_locking: boolean - filler argument for compatibility, not used at the moment
16
- def initialize(learning_rate, momentum, name: 'momentum', use_nesterov: false, use_locking: false)
16
+ def initialize(learning_rate, momentum, name: "momentum", use_nesterov: false, use_locking: false)
17
17
  @learning_rate = learning_rate
18
18
  @momentum = momentum
19
19
  @use_nesterov = use_nesterov
@@ -37,12 +37,12 @@ module TensorStream
37
37
  mom = get_slot(var, "momentum")
38
38
 
39
39
  _op(:apply_momentum, var, mom,
40
- TensorStream.cast(@learning_rate_tensor, var.data_type),
41
- grad,
42
- TensorStream.cast(@momentum_tensor, var.data_type),
43
- use_locking: @use_locking,
44
- use_nesterov: @use_nesterov)
40
+ TensorStream.cast(@learning_rate_tensor, var.data_type),
41
+ grad,
42
+ TensorStream.cast(@momentum_tensor, var.data_type),
43
+ use_locking: @use_locking,
44
+ use_nesterov: @use_nesterov)
45
45
  end
46
46
  end
47
47
  end
48
- end
48
+ end
@@ -29,11 +29,11 @@ module TensorStream
29
29
  create_slots(varlist)
30
30
  TensorStream.name_scope(name, default: @name) do
31
31
  prepare
32
- apply_ops = grads_and_vars.map do |grad, var|
32
+ apply_ops = grads_and_vars.map { |grad, var|
33
33
  TensorStream.name_scope("update_" + var.op.name) do
34
34
  apply_dense(grad, var)
35
35
  end
36
- end
36
+ }
37
37
 
38
38
  if global_step.nil?
39
39
  finish(apply_ops, name)
@@ -51,14 +51,14 @@ module TensorStream
51
51
  # This is the first part of minimize(). It returns a list of (gradient, variable) pairs where "gradient" is the gradient for "variable".
52
52
  def compute_gradients(loss, var_list: nil, grad_loss: nil)
53
53
  trainable_vars = if var_list
54
- raise "var_list must be an array" unless var_list.is_a?(Array)
54
+ raise "var_list must be an array" unless var_list.is_a?(Array)
55
55
 
56
- var_list.each_with_index { |var, index| raise "var #{index} not a Variable" unless var.is_a?(Variable) }
56
+ var_list.each_with_index { |var, index| raise "var #{index} not a Variable" unless var.is_a?(Variable) }
57
57
 
58
- var_list
59
- else
60
- loss.graph.get_collection(TensorStream::GraphKeys::TRAINABLE_VARIABLES)
61
- end
58
+ var_list
59
+ else
60
+ loss.graph.get_collection(TensorStream::GraphKeys::TRAINABLE_VARIABLES)
61
+ end
62
62
  all_grads = grad_loss || TensorStream.gradients(loss, trainable_vars)
63
63
  trainable_vars.each_with_index.collect do |var, index|
64
64
  [all_grads[index], var]
@@ -162,4 +162,4 @@ module TensorStream
162
162
  end
163
163
  end
164
164
  end
165
- end
165
+ end
@@ -12,7 +12,7 @@ module TensorStream
12
12
  #
13
13
  # [paper](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf).
14
14
  def initialize(learning_rate, decay = 0.9, momentum = 0.0, epsilon = 1e-10, centered: false,
15
- use_locking: false, name: "RMSProp")
15
+ use_locking: false, name: "RMSProp")
16
16
  @learning_rate = learning_rate
17
17
  @decay = decay
18
18
  @momentum = momentum
@@ -46,10 +46,10 @@ module TensorStream
46
46
  # Create slots for the first and second moments.
47
47
  var_list.each do |v|
48
48
  init_rms = if v.shape.known?
49
- TensorStream.ones_initializer(dtype: v.data_type)
50
- else
51
- TensorStream.ones_like(v)
52
- end
49
+ TensorStream.ones_initializer(dtype: v.data_type)
50
+ else
51
+ TensorStream.ones_like(v)
52
+ end
53
53
 
54
54
  get_or_make_slot_with_initializer(v, init_rms, v.shape, v.data_type, "rms", @name)
55
55
 
@@ -65,20 +65,20 @@ module TensorStream
65
65
  if @centered
66
66
  mg = get_slot(var, "mg")
67
67
  _op(:apply_centered_rms_prop, var, mg, rms, mom,
68
- TensorStream.cast(@learning_rate_tensor, var.data_type),
69
- TensorStream.cast(@decay_tensor, var.data_type),
70
- TensorStream.cast(@momentum_tensor, var.data_type),
71
- TensorStream.cast(@epsilon_tensor, var.data_type),
72
- grad, use_locking: @use_locking)
68
+ TensorStream.cast(@learning_rate_tensor, var.data_type),
69
+ TensorStream.cast(@decay_tensor, var.data_type),
70
+ TensorStream.cast(@momentum_tensor, var.data_type),
71
+ TensorStream.cast(@epsilon_tensor, var.data_type),
72
+ grad, use_locking: @use_locking)
73
73
  else
74
74
  _op(:apply_rms_prop, var, rms, mom,
75
- TensorStream.cast(@learning_rate_tensor, var.data_type),
76
- TensorStream.cast(@decay_tensor, var.data_type),
77
- TensorStream.cast(@momentum_tensor, var.data_type),
78
- TensorStream.cast(@epsilon_tensor, var.data_type),
79
- grad, use_locking: @use_locking)
75
+ TensorStream.cast(@learning_rate_tensor, var.data_type),
76
+ TensorStream.cast(@decay_tensor, var.data_type),
77
+ TensorStream.cast(@momentum_tensor, var.data_type),
78
+ TensorStream.cast(@epsilon_tensor, var.data_type),
79
+ grad, use_locking: @use_locking)
80
80
  end
81
81
  end
82
82
  end
83
83
  end
84
- end
84
+ end
@@ -1,4 +1,4 @@
1
- require 'json'
1
+ require "json"
2
2
  require "zlib"
3
3
 
4
4
  module TensorStream
@@ -11,18 +11,18 @@ module TensorStream
11
11
  graph = TensorStream::Graph.get_default_graph
12
12
  vars = graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
13
13
 
14
- @filename = graph['ts_filename'] || TensorStream.placeholder(:string, name: 'ts_filename', shape: [])
14
+ @filename = graph["ts_filename"] || TensorStream.placeholder(:string, name: "ts_filename", shape: [])
15
15
 
16
16
  @save_op = _op(:save_ts, @filename, *vars)
17
17
  @restore_op = _op(:restore_ts, @filename, *vars.map(&:name))
18
18
  end
19
19
 
20
20
  def save(session, outputdir, global_step: nil,
21
- latest_filename: nil,
22
- meta_graph_suffix: 'meta',
23
- write_meta_graph: true,
24
- write_state: true,
25
- strip_default_attrs: false)
21
+ latest_filename: nil,
22
+ meta_graph_suffix: "meta",
23
+ write_meta_graph: true,
24
+ write_state: true,
25
+ strip_default_attrs: false)
26
26
  graph = TensorStream::Graph.get_default_graph
27
27
  vars = graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
28
28
 
@@ -31,10 +31,10 @@ module TensorStream
31
31
  gs = eval_global_step(session, global_step)
32
32
 
33
33
  FileUtils.mkdir_p(outputdir)
34
- basename = 'model'
35
- File.write(File.join(outputdir, "#{basename}.meta"), { "gs" => gs }.to_json)
36
- new_filename = File.join(outputdir, [basename, gs, '.ckpt'].compact.join('-'))
37
- session.run(@save_op, feed_dict: { @filename => new_filename })
34
+ basename = "model"
35
+ File.write(File.join(outputdir, "#{basename}.meta"), {"gs" => gs}.to_json)
36
+ new_filename = File.join(outputdir, [basename, gs, ".ckpt"].compact.join("-"))
37
+ session.run(@save_op, feed_dict: {@filename => new_filename})
38
38
 
39
39
  if write_meta_graph
40
40
  graph_filename = "#{basename}.yaml"
@@ -48,10 +48,10 @@ module TensorStream
48
48
  return unless File.exist?(meta_file)
49
49
 
50
50
  meta_data = JSON.parse(File.read(meta_file))
51
- gs = meta_data['gs']
52
- filename = File.join(modelpath, ['model', gs, '.ckpt'].compact.join('-'))
51
+ gs = meta_data["gs"]
52
+ filename = File.join(modelpath, ["model", gs, ".ckpt"].compact.join("-"))
53
53
 
54
- session.run(@restore_op, feed_dict: { @filename => filename })
54
+ session.run(@restore_op, feed_dict: {@filename => filename})
55
55
  end
56
56
 
57
57
  private
@@ -54,14 +54,14 @@ module TensorStream
54
54
  dtype = primary.data_type if dtype.nil?
55
55
  slot_shape = primary.shape
56
56
  slot_shape = if slot_shape.fully_defined?
57
- slot_shape.shape
58
- else
59
- TensorStream.shape(primary.initialized_value)
60
- end
57
+ slot_shape.shape
58
+ else
59
+ TensorStream.shape(primary.initialized_value)
60
+ end
61
61
  val = TensorStream.zeros(slot_shape, dtype: dtype)
62
62
  create_slot(primary, val, name,
63
- colocate_with_primary: colocate_with_primary)
63
+ colocate_with_primary: colocate_with_primary)
64
64
  end
65
65
  end
66
66
  end
67
- end
67
+ end
@@ -11,24 +11,24 @@ module TensorStream
11
11
  initializer: TensorStream.zeros_initializer,
12
12
  trainable: false,
13
13
  collections: [TensorStream::GraphKeys::GLOBAL_VARIABLES,
14
- TensorStream::GraphKeys::GLOBAL_STEP])
14
+ TensorStream::GraphKeys::GLOBAL_STEP,])
15
15
  end
16
16
 
17
17
  def get_global_step(graph = nil)
18
18
  target_graph = graph || TensorStream.get_default_graph
19
19
  global_step_tensors = target_graph.get_collection(TensorStream::GraphKeys::GLOBAL_STEP)
20
20
  global_step_tensor = if global_step_tensors.nil? || global_step_tensors.empty?
21
- begin
22
- target_graph.get_tensor_by_name('global_step:0')
23
- rescue TensorStream::KeyError
24
- nil
25
- end
26
- elsif global_step_tensors.size == 1
27
- global_step_tensors[0]
28
- else
29
- TensorStream.logger.error("Multiple tensors in global_step collection.")
30
- nil
31
- end
21
+ begin
22
+ target_graph.get_tensor_by_name("global_step:0")
23
+ rescue TensorStream::KeyError
24
+ nil
25
+ end
26
+ elsif global_step_tensors.size == 1
27
+ global_step_tensors[0]
28
+ else
29
+ TensorStream.logger.error("Multiple tensors in global_step collection.")
30
+ nil
31
+ end
32
32
  global_step_tensor
33
33
  end
34
34
  end
@@ -1,13 +1,13 @@
1
- require 'tensor_stream/train/slot_creator'
2
- require 'tensor_stream/train/optimizer'
3
- require 'tensor_stream/train/gradient_descent_optimizer'
4
- require 'tensor_stream/train/momentum_optimizer'
5
- require 'tensor_stream/train/adam_optimizer'
6
- require 'tensor_stream/train/adadelta_optimizer'
7
- require 'tensor_stream/train/adagrad_optimizer'
8
- require 'tensor_stream/train/rmsprop_optimizer'
9
- require 'tensor_stream/train/saver'
10
- require 'tensor_stream/train/learning_rate_decay'
1
+ require "tensor_stream/train/slot_creator"
2
+ require "tensor_stream/train/optimizer"
3
+ require "tensor_stream/train/gradient_descent_optimizer"
4
+ require "tensor_stream/train/momentum_optimizer"
5
+ require "tensor_stream/train/adam_optimizer"
6
+ require "tensor_stream/train/adadelta_optimizer"
7
+ require "tensor_stream/train/adagrad_optimizer"
8
+ require "tensor_stream/train/rmsprop_optimizer"
9
+ require "tensor_stream/train/saver"
10
+ require "tensor_stream/train/learning_rate_decay"
11
11
 
12
12
  module TensorStream
13
13
  module Trainer
@@ -1,4 +1,4 @@
1
- require 'ostruct'
1
+ require "ostruct"
2
2
 
3
3
  module TensorStream
4
4
  # Convenience class for specifying valid data_types
@@ -33,12 +33,12 @@ module TensorStream
33
33
  # Returns:
34
34
  # - An array containing the names of those devices
35
35
  def list_local_devices
36
- local_name = 'job:localhost'
37
- TensorStream::Evaluator.evaluators.collect do |k, v|
36
+ local_name = "job:localhost"
37
+ TensorStream::Evaluator.evaluators.collect { |k, v|
38
38
  v[:class].query_supported_devices.collect do |device_str|
39
- [local_name, "ts:#{k}:#{device_str.name}"].join('/')
39
+ [local_name, "ts:#{k}:#{device_str.name}"].join("/")
40
40
  end
41
- end.flatten
41
+ }.flatten
42
42
  end
43
43
 
44
44
  ##
@@ -51,17 +51,17 @@ module TensorStream
51
51
  name: name,
52
52
  graph: graph,
53
53
  dtype: dtype,
54
- trainable: trainable
54
+ trainable: trainable,
55
55
  }
56
56
  tensor = if value.is_a?(String)
57
- i_var(dtype || :string, 0, [], get_variable_scope, common_options)
58
- elsif value.is_a?(Integer)
59
- i_var(dtype || :int32, 0, [], get_variable_scope, common_options)
60
- elsif value.is_a?(Float)
61
- i_var(dtype || :float32, 0, [], get_variable_scope, common_options)
62
- else
63
- i_var(dtype || :float32, 0, nil, get_variable_scope, common_options)
64
- end
57
+ i_var(dtype || :string, 0, [], get_variable_scope, common_options)
58
+ elsif value.is_a?(Integer)
59
+ i_var(dtype || :int32, 0, [], get_variable_scope, common_options)
60
+ elsif value.is_a?(Float)
61
+ i_var(dtype || :float32, 0, [], get_variable_scope, common_options)
62
+ else
63
+ i_var(dtype || :float32, 0, nil, get_variable_scope, common_options)
64
+ end
65
65
  op.set_input(0, tensor.op)
66
66
  Graph.get_default_graph.add_node(op)
67
67
  tensor
@@ -70,7 +70,7 @@ module TensorStream
70
70
  ##
71
71
  # Defines a variable context manager
72
72
  def variable_scope(scope = nil, default_name = nil, reuse: nil, initializer: nil)
73
- Thread.current[:tensor_stream_variable_scope] ||= [ VariableScope.new ]
73
+ Thread.current[:tensor_stream_variable_scope] ||= [VariableScope.new]
74
74
 
75
75
  # uniquenifier
76
76
  if scope.nil? && default_name
@@ -117,7 +117,7 @@ module TensorStream
117
117
  end
118
118
 
119
119
  def get_variable_scope
120
- if !Thread.current[:tensor_stream_variable_scope]
120
+ unless Thread.current[:tensor_stream_variable_scope]
121
121
  variable_scope = VariableScope.new
122
122
  Thread.current[:tensor_stream_variable_scope] = [variable_scope]
123
123
  return variable_scope
@@ -127,7 +127,7 @@ module TensorStream
127
127
  end
128
128
 
129
129
  def __v_scope_name
130
- Thread.current[:tensor_stream_variable_scope].map(&:name).compact.reject(&:empty?).join('/')
130
+ Thread.current[:tensor_stream_variable_scope].map(&:name).compact.reject(&:empty?).join("/")
131
131
  end
132
132
 
133
133
  ##
@@ -160,8 +160,8 @@ module TensorStream
160
160
  TensorStream::Layers
161
161
  end
162
162
 
163
- def constant(value, dtype: nil, shape: nil, internal: false, name: 'Const')
164
- shared_options = { const: true, value: value, name: name, internal: internal }
163
+ def constant(value, dtype: nil, shape: nil, internal: false, name: "Const")
164
+ shared_options = {const: true, value: value, name: name, internal: internal}
165
165
 
166
166
  if value.is_a?(Float)
167
167
  TensorStream::Constant.new(dtype || :float32, 0, shape || [], shared_options)
@@ -275,24 +275,25 @@ module TensorStream
275
275
  return input unless input.is_a?(Tensor)
276
276
  return input if input.data_type.nil?
277
277
 
278
- raise "#{input.source}: Parameter data type #{input.data_type} passed not in #{types.join(',')}" unless types.include?(input.data_type.to_sym)
278
+ raise "#{input.source}: Parameter data type #{input.data_type} passed not in #{types.join(",")}" unless types.include?(input.data_type.to_sym)
279
279
  end
280
280
 
281
- def check_data_types(input_a, input_b)
282
- if !input_a.is_a?(Tensor) && input_b.is_a?(Tensor)
283
- input_a = convert_to_tensor(input_a, dtype: input_b.data_type)
284
- elsif !input_b.is_a?(Tensor) && input_a.is_a?(Tensor)
285
- input_b = convert_to_tensor(input_b, dtype: input_a.data_type)
286
- else
287
- input_a = convert_to_tensor(input_a)
288
- input_b = convert_to_tensor(input_b)
289
- end
281
+ def check_data_types(*args)
282
+ unique_types = args.select { |a| a.is_a?(Tensor) }. map { |a| norm_dtype(a.data_type) }.uniq
290
283
 
291
- if norm_dtype(input_a.data_type) != norm_dtype(input_b.data_type)
292
- raise TensorStream::ValueError, "Value Error: Tensor conversion requested dtype #{input_a.data_type} for tensor type #{input_b.data_type}"
284
+ if unique_types.size > 1
285
+ raise TensorStream::ValueError, "Value Error: Tensor conversion requested dtypes are different -> #{unique_types}"
293
286
  end
294
287
 
295
- [input_a, input_b]
288
+ unique_types.first
289
+ end
290
+
291
+ ##
292
+ # Auto cast ruby constant data types to the same
293
+ # tensor types of other operands
294
+ def apply_data_type_coercion(*args)
295
+ coerced_type = check_data_types(*args)
296
+ args.map { |a| a.is_a?(Tensor) ? a : convert_to_tensor(a, dtype: coerced_type) }
296
297
  end
297
298
 
298
299
  def norm_dtype(dtype)
@@ -307,4 +308,4 @@ module TensorStream
307
308
  end
308
309
  end
309
310
  end
310
- end
311
+ end