torch-rb 0.1.4 → 0.1.5

Sign up to get free protection for your applications and to get access to all the features.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +6 -0
  3. data/README.md +5 -3
  4. data/ext/torch/ext.cpp +22 -548
  5. data/ext/torch/extconf.rb +6 -0
  6. data/ext/torch/nn_functions.cpp +595 -0
  7. data/ext/torch/nn_functions.hpp +6 -0
  8. data/ext/torch/templates.hpp +250 -0
  9. data/ext/torch/tensor_functions.cpp +1860 -0
  10. data/ext/torch/tensor_functions.hpp +6 -0
  11. data/ext/torch/torch_functions.cpp +2875 -0
  12. data/ext/torch/torch_functions.hpp +6 -0
  13. data/lib/torch.rb +68 -129
  14. data/lib/torch/ext.bundle +0 -0
  15. data/lib/torch/native/dispatcher.rb +48 -0
  16. data/lib/torch/native/function.rb +78 -0
  17. data/lib/torch/native/generator.rb +149 -0
  18. data/lib/torch/native/native_functions.yaml +6837 -0
  19. data/lib/torch/native/parser.rb +97 -0
  20. data/lib/torch/nn/bce_with_logits_loss.rb +15 -0
  21. data/lib/torch/nn/conv2d.rb +0 -2
  22. data/lib/torch/nn/cosine_embedding_loss.rb +14 -0
  23. data/lib/torch/nn/functional.rb +55 -16
  24. data/lib/torch/nn/hinge_embedding_loss.rb +14 -0
  25. data/lib/torch/nn/identity.rb +1 -0
  26. data/lib/torch/nn/margin_ranking_loss.rb +14 -0
  27. data/lib/torch/nn/module.rb +59 -12
  28. data/lib/torch/nn/multi_label_margin_loss.rb +13 -0
  29. data/lib/torch/nn/multi_label_soft_margin_loss.rb +13 -0
  30. data/lib/torch/nn/multi_margin_loss.rb +17 -0
  31. data/lib/torch/nn/parameter.rb +4 -0
  32. data/lib/torch/nn/rnn.rb +22 -0
  33. data/lib/torch/nn/rnn_base.rb +154 -0
  34. data/lib/torch/nn/smooth_l1_loss.rb +13 -0
  35. data/lib/torch/nn/soft_margin_loss.rb +13 -0
  36. data/lib/torch/nn/triplet_margin_loss.rb +18 -0
  37. data/lib/torch/tensor.rb +19 -19
  38. data/lib/torch/version.rb +1 -1
  39. metadata +26 -2
@@ -0,0 +1,13 @@
1
+ module Torch
2
+ module NN
3
+ class MultiLabelSoftMarginLoss < WeightedLoss
4
+ def initialize(weight: nil, reduction: "mean")
5
+ super(weight, reduction)
6
+ end
7
+
8
+ def forward(input, target)
9
+ F.multilabel_soft_margin_loss(input, target, weight: @weight, reduction: @reduction)
10
+ end
11
+ end
12
+ end
13
+ end
@@ -0,0 +1,17 @@
1
+ module Torch
2
+ module NN
3
+ class MultiMarginLoss < WeightedLoss
4
+ def initialize(p: 1, margin: 1.0, weight: nil, reduction: "mean")
5
+ super(weight, reduction)
6
+ raise ArgumentError, "only p == 1 and p == 2 supported" if p != 1 && p != 2
7
+ raise ArgumentError, "weight must be nil or have one dimension" unless weight.nil? || weight.dim == 1
8
+ @p = p
9
+ @margin = margin
10
+ end
11
+
12
+ def forward(input, target)
13
+ F.multi_margin_loss(input, target, p: @p, margin: @margin, weight: @weight, reduction: @reduction)
14
+ end
15
+ end
16
+ end
17
+ end
@@ -5,6 +5,10 @@ module Torch
5
5
  data = Tensor.new unless data
6
6
  Tensor._make_subclass(data, requires_grad)
7
7
  end
8
+
9
+ def inspect
10
+ "Parameter containing:\n#{super}"
11
+ end
8
12
  end
9
13
  end
10
14
  end
@@ -0,0 +1,22 @@
1
+ module Torch
2
+ module NN
3
+ class RNN < RNNBase
4
+ def initialize(*args, **options)
5
+ if options.key?(:nonlinearity)
6
+ if options[:nonlinearity] == "tanh"
7
+ mode = "RNN_TANH"
8
+ elsif options[:nonlinearity] == "relu"
9
+ mode = "RNN_RELU"
10
+ else
11
+ raise ArgumentError, "Unknown nonlinearity: #{options[:nonlinearity]}"
12
+ end
13
+ options.delete(:nonlinearity)
14
+ else
15
+ mode = "RNN_TANH"
16
+ end
17
+
18
+ super(mode, *args, **options)
19
+ end
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,154 @@
1
+ module Torch
2
+ module NN
3
+ class RNNBase < Module
4
+ def initialize(mode, input_size, hidden_size, num_layers: 1, bias: true,
5
+ batch_first: false, dropout: 0.0, bidirectional: false)
6
+
7
+ super()
8
+ @mode = mode
9
+ @input_size = input_size
10
+ @hidden_size = hidden_size
11
+ @num_layers = num_layers
12
+ @bias = bias
13
+ @batch_first = batch_first
14
+ @dropout = dropout.to_f
15
+ @bidirectional = bidirectional
16
+ num_directions = bidirectional ? 2 : 1
17
+
18
+ if !dropout.is_a?(Numeric) || !(dropout >= 0 && dropout <= 1)
19
+ raise ArgumentError, "dropout should be a number in range [0, 1] " +
20
+ "representing the probability of an element being " +
21
+ "zeroed"
22
+ end
23
+ if dropout > 0 && num_layers == 1
24
+ warn "dropout option adds dropout after all but last " +
25
+ "recurrent layer, so non-zero dropout expects " +
26
+ "num_layers greater than 1, but got dropout=#{dropout} and " +
27
+ "num_layers=#{num_layers}"
28
+ end
29
+
30
+ gate_size =
31
+ case mode
32
+ when "LSTM"
33
+ 4 * hidden_size
34
+ when "GRU"
35
+ 3 * hidden_size
36
+ when "RNN_TANH"
37
+ hidden_size
38
+ when "RNN_RELU"
39
+ hidden_size
40
+ else
41
+ raise ArgumentError, "Unrecognized RNN mode: #{mode}"
42
+ end
43
+
44
+ @all_weights = []
45
+ num_layers.times do |layer|
46
+ num_directions.times do |direction|
47
+ layer_input_size = layer == 0 ? input_size : hidden_size * num_directions
48
+
49
+ w_ih = Parameter.new(Torch::Tensor.new(gate_size, layer_input_size))
50
+ w_hh = Parameter.new(Torch::Tensor.new(gate_size, hidden_size))
51
+ b_ih = Parameter.new(Torch::Tensor.new(gate_size))
52
+ # Second bias vector included for CuDNN compatibility. Only one
53
+ # bias vector is needed in standard definition.
54
+ b_hh = Parameter.new(Torch::Tensor.new(gate_size))
55
+ layer_params = [w_ih, w_hh, b_ih, b_hh]
56
+
57
+ suffix = direction == 1 ? "_reverse" : ""
58
+ param_names = ["weight_ih_l%s%s", "weight_hh_l%s%s"]
59
+ if bias
60
+ param_names += ["bias_ih_l%s%s", "bias_hh_l%s%s"]
61
+ end
62
+ param_names.map! { |x| x % [layer, suffix] }
63
+
64
+ param_names.zip(layer_params) do |name, param|
65
+ instance_variable_set("@#{name}", param)
66
+ end
67
+ @all_weights << param_names
68
+ end
69
+ end
70
+
71
+ flatten_parameters
72
+ reset_parameters
73
+ end
74
+
75
+ def flatten_parameters
76
+ # no-op unless module is on the GPU and cuDNN is enabled
77
+ end
78
+
79
+ def _apply(fn)
80
+ ret = super
81
+ flatten_parameters
82
+ ret
83
+ end
84
+
85
+ def reset_parameters
86
+ stdv = 1.0 / Math.sqrt(@hidden_size)
87
+ parameters.each do |weight|
88
+ Init.uniform!(weight, a: -stdv, b: stdv)
89
+ end
90
+ end
91
+
92
+ def permute_hidden(hx, permutation)
93
+ raise NotImplementedYet
94
+ end
95
+
96
+ def forward(input, hx: nil)
97
+ raise NotImplementedYet
98
+
99
+ is_packed = false # TODO isinstance(input, PackedSequence)
100
+ if is_packed
101
+ input, batch_sizes, sorted_indices, unsorted_indices = input
102
+ max_batch_size = batch_sizes[0]
103
+ max_batch_size = max_batch_size.to_i
104
+ else
105
+ batch_sizes = nil
106
+ max_batch_size = @batch_first ? input.size(0) : input.size(1)
107
+ sorted_indices = nil
108
+ unsorted_indices = nil
109
+ end
110
+
111
+ if hx.nil?
112
+ num_directions = @bidirectional ? 2 : 1
113
+ hx = Torch.zeros(@num_layers * num_directions, max_batch_size,
114
+ @hidden_size, dtype: input.dtype, device: input.device)
115
+ else
116
+ # Each batch of the hidden state should match the input sequence that
117
+ # the user believes he/she is passing in.
118
+ hx = permute_hidden(hx, sorted_indices)
119
+ end
120
+
121
+ check_forward_args(input, hx, batch_sizes)
122
+ _rnn_impls = {
123
+ "RNN_TANH" => Torch.method(:_rnn_tanh),
124
+ "RNN_RELU" => Torch.method(:_rnn_relu)
125
+ }
126
+ _impl = _rnn_impls[@mode]
127
+ if batch_sizes.nil?
128
+ result = _impl.call(input, hx, _get_flat_weights, @bias, @num_layers,
129
+ @dropout, @training, @bidirectional, @batch_first)
130
+ else
131
+ result = _impl.call(input, batch_sizes, hx, _get_flat_weights, @bias,
132
+ @num_layers, @dropout, @training, @bidirectional)
133
+ end
134
+ output = result[0]
135
+ hidden = result[1]
136
+
137
+ if is_packed
138
+ raise NotImplementedYet
139
+ # output = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
140
+ end
141
+ [output, permute_hidden(hidden, unsorted_indices)]
142
+ end
143
+
144
+ # TODO add more parameters
145
+ def extra_inspect
146
+ s = String.new("%{input_size}, %{hidden_size}")
147
+ if @num_layers != 1
148
+ s += ", num_layers: %{num_layers}"
149
+ end
150
+ format(s, input_size: @input_size, hidden_size: @hidden_size, num_layers: @num_layers)
151
+ end
152
+ end
153
+ end
154
+ end
@@ -0,0 +1,13 @@
1
+ module Torch
2
+ module NN
3
+ class SmoothL1Loss < Loss
4
+ def initialize(reduction: "mean")
5
+ super(reduction)
6
+ end
7
+
8
+ def forward(input, target)
9
+ F.smooth_l1_loss(input, target, reduction: @reduction)
10
+ end
11
+ end
12
+ end
13
+ end
@@ -0,0 +1,13 @@
1
+ module Torch
2
+ module NN
3
+ class SoftMarginLoss < Loss
4
+ def initialize(reduction: "mean")
5
+ super(reduction)
6
+ end
7
+
8
+ def forward(input, target)
9
+ F.soft_margin_loss(input, target, reduction: @reduction)
10
+ end
11
+ end
12
+ end
13
+ end
@@ -0,0 +1,18 @@
1
+ module Torch
2
+ module NN
3
+ class TripletMarginLoss < Loss
4
+ def initialize(margin: 1.0, p: 2.0, eps: 1e-6, swap: false, reduction: "mean")
5
+ super(reduction)
6
+ @margin = margin
7
+ @p = p
8
+ @eps = eps
9
+ @swap = swap
10
+ end
11
+
12
+ def forward(anchor, positive, negative)
13
+ F.triplet_margin_loss(anchor, positive, negative, margin: @margin, p: @p,
14
+ eps: @eps, swap: @swap, reduction: @reduction)
15
+ end
16
+ end
17
+ end
18
+ end
data/lib/torch/tensor.rb CHANGED
@@ -5,12 +5,8 @@ module Torch
5
5
 
6
6
  alias_method :requires_grad?, :requires_grad
7
7
 
8
- def self.new(*size)
9
- if size.length == 1 && size.first.is_a?(Tensor)
10
- size.first
11
- else
12
- Torch.empty(*size)
13
- end
8
+ def self.new(*args)
9
+ FloatTensor.new(*args)
14
10
  end
15
11
 
16
12
  def dtype
@@ -28,7 +24,7 @@ module Torch
28
24
  end
29
25
 
30
26
  def to_a
31
- reshape_arr(_data, shape)
27
+ reshape_arr(_flat_data, shape)
32
28
  end
33
29
 
34
30
  # TODO support dtype
@@ -39,7 +35,7 @@ module Torch
39
35
 
40
36
  def size(dim = nil)
41
37
  if dim
42
- _size(dim)
38
+ _size_int(dim)
43
39
  else
44
40
  shape
45
41
  end
@@ -57,7 +53,7 @@ module Torch
57
53
  if numel != 1
58
54
  raise Error, "only one element tensors can be converted to Ruby scalars"
59
55
  end
60
- _data.first
56
+ _flat_data.first
61
57
  end
62
58
 
63
59
  # unsure if this is correct
@@ -73,7 +69,7 @@ module Torch
73
69
  def numo
74
70
  cls = Torch._dtype_to_numo[dtype]
75
71
  raise Error, "Cannot convert #{dtype} to Numo" unless cls
76
- cls.cast(_data).reshape(*shape)
72
+ cls.cast(_flat_data).reshape(*shape)
77
73
  end
78
74
 
79
75
  def new_ones(*size, **options)
@@ -90,25 +86,27 @@ module Torch
90
86
  _type(enum)
91
87
  end
92
88
 
89
+ # start temp operations
90
+
93
91
  def add!(value = 1, other)
94
92
  if other.is_a?(Numeric)
95
- _add_scalar!(other * value)
93
+ _add__scalar(other, value)
96
94
  else
97
95
  # need to use alpha for sparse tensors instead of multiplying
98
- _add_alpha!(other, value)
96
+ _add__tensor(other, value)
99
97
  end
100
98
  end
101
99
 
102
100
  def mul!(other)
103
101
  if other.is_a?(Numeric)
104
- _mul_scalar!(other)
102
+ _mul__scalar(other)
105
103
  else
106
- _mul!(other)
104
+ _mul__tensor(other)
107
105
  end
108
106
  end
109
107
 
110
108
  # operations
111
- %w(abs add argmax div dot eq exp gt log log_softmax lt matmul max mean min mul neg norm num numel pow relu remainder reshape sign softmax sqrt sub sum unsqueeze topk).each do |op|
109
+ %w(log_softmax mean softmax sum topk).each do |op|
112
110
  define_method(op) do |*args, **options, &block|
113
111
  if options.any?
114
112
  Torch.send(op, self, *args, **options, &block)
@@ -118,6 +116,8 @@ module Torch
118
116
  end
119
117
  end
120
118
 
119
+ # end temp operations
120
+
121
121
  def +(other)
122
122
  add(other)
123
123
  end
@@ -156,11 +156,11 @@ module Torch
156
156
  dim = 0
157
157
  indexes.each do |index|
158
158
  if index.is_a?(Numeric)
159
- result = result._select(dim, index)
159
+ result = result._select_int(dim, index)
160
160
  elsif index.is_a?(Range)
161
161
  finish = index.end
162
162
  finish += 1 unless index.exclude_end?
163
- result = result._slice(dim, index.begin, finish, 1)
163
+ result = result._slice_tensor(dim, index.begin, finish, 1)
164
164
  dim += 1
165
165
  elsif index.nil?
166
166
  result = result.unsqueeze(dim)
@@ -183,11 +183,11 @@ module Torch
183
183
  value = Torch.tensor(value) unless value.is_a?(Tensor)
184
184
 
185
185
  if index.is_a?(Numeric)
186
- copy_to(_select(0, index), value)
186
+ copy_to(_select_int(0, index), value)
187
187
  elsif index.is_a?(Range)
188
188
  finish = index.end
189
189
  finish += 1 unless index.exclude_end?
190
- copy_to(_slice(0, index.begin, finish, 1), value)
190
+ copy_to(_slice_tensor(0, index.begin, finish, 1), value)
191
191
  else
192
192
  raise Error, "Unsupported index type: #{index.class.name}"
193
193
  end
data/lib/torch/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module Torch
2
- VERSION = "0.1.4"
2
+ VERSION = "0.1.5"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: torch-rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.4
4
+ version: 0.1.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrew Kane
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2019-12-01 00:00:00.000000000 Z
11
+ date: 2019-12-07 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rice
@@ -106,17 +106,31 @@ files:
106
106
  - README.md
107
107
  - ext/torch/ext.cpp
108
108
  - ext/torch/extconf.rb
109
+ - ext/torch/nn_functions.cpp
110
+ - ext/torch/nn_functions.hpp
111
+ - ext/torch/templates.hpp
112
+ - ext/torch/tensor_functions.cpp
113
+ - ext/torch/tensor_functions.hpp
114
+ - ext/torch/torch_functions.cpp
115
+ - ext/torch/torch_functions.hpp
109
116
  - lib/torch-rb.rb
110
117
  - lib/torch.rb
111
118
  - lib/torch/ext.bundle
112
119
  - lib/torch/inspector.rb
120
+ - lib/torch/native/dispatcher.rb
121
+ - lib/torch/native/function.rb
122
+ - lib/torch/native/generator.rb
123
+ - lib/torch/native/native_functions.yaml
124
+ - lib/torch/native/parser.rb
113
125
  - lib/torch/nn/alpha_dropout.rb
114
126
  - lib/torch/nn/avg_pool2d.rb
115
127
  - lib/torch/nn/avg_poolnd.rb
116
128
  - lib/torch/nn/bce_loss.rb
129
+ - lib/torch/nn/bce_with_logits_loss.rb
117
130
  - lib/torch/nn/bilinear.rb
118
131
  - lib/torch/nn/conv2d.rb
119
132
  - lib/torch/nn/convnd.rb
133
+ - lib/torch/nn/cosine_embedding_loss.rb
120
134
  - lib/torch/nn/cosine_similarity.rb
121
135
  - lib/torch/nn/cross_entropy_loss.rb
122
136
  - lib/torch/nn/ctc_loss.rb
@@ -128,6 +142,7 @@ files:
128
142
  - lib/torch/nn/embedding_bag.rb
129
143
  - lib/torch/nn/feature_alpha_dropout.rb
130
144
  - lib/torch/nn/functional.rb
145
+ - lib/torch/nn/hinge_embedding_loss.rb
131
146
  - lib/torch/nn/identity.rb
132
147
  - lib/torch/nn/init.rb
133
148
  - lib/torch/nn/kl_div_loss.rb
@@ -136,22 +151,31 @@ files:
136
151
  - lib/torch/nn/linear.rb
137
152
  - lib/torch/nn/log_softmax.rb
138
153
  - lib/torch/nn/loss.rb
154
+ - lib/torch/nn/margin_ranking_loss.rb
139
155
  - lib/torch/nn/max_pool2d.rb
140
156
  - lib/torch/nn/max_poolnd.rb
141
157
  - lib/torch/nn/module.rb
142
158
  - lib/torch/nn/mse_loss.rb
159
+ - lib/torch/nn/multi_label_margin_loss.rb
160
+ - lib/torch/nn/multi_label_soft_margin_loss.rb
161
+ - lib/torch/nn/multi_margin_loss.rb
143
162
  - lib/torch/nn/nll_loss.rb
144
163
  - lib/torch/nn/pairwise_distance.rb
145
164
  - lib/torch/nn/parameter.rb
146
165
  - lib/torch/nn/poisson_nll_loss.rb
147
166
  - lib/torch/nn/prelu.rb
148
167
  - lib/torch/nn/relu.rb
168
+ - lib/torch/nn/rnn.rb
169
+ - lib/torch/nn/rnn_base.rb
149
170
  - lib/torch/nn/sequential.rb
150
171
  - lib/torch/nn/sigmoid.rb
172
+ - lib/torch/nn/smooth_l1_loss.rb
173
+ - lib/torch/nn/soft_margin_loss.rb
151
174
  - lib/torch/nn/softmax.rb
152
175
  - lib/torch/nn/softmax2d.rb
153
176
  - lib/torch/nn/softmin.rb
154
177
  - lib/torch/nn/softplus.rb
178
+ - lib/torch/nn/triplet_margin_loss.rb
155
179
  - lib/torch/nn/weighted_loss.rb
156
180
  - lib/torch/optim/adadelta.rb
157
181
  - lib/torch/optim/adagrad.rb