torch-rb 0.1.2 → 0.1.3

Sign up to get free protection for your applications and to get access to all the features.
data/lib/torch.rb CHANGED
@@ -8,18 +8,40 @@ require "torch/version"
8
8
 
9
9
  # optim
10
10
  require "torch/optim/optimizer"
11
+ require "torch/optim/adadelta"
12
+ require "torch/optim/adagrad"
13
+ require "torch/optim/adam"
14
+ require "torch/optim/adamax"
15
+ require "torch/optim/adamw"
16
+ require "torch/optim/asgd"
17
+ require "torch/optim/rmsprop"
18
+ require "torch/optim/rprop"
11
19
  require "torch/optim/sgd"
12
20
 
13
- # nn
21
+ # optim lr_scheduler
22
+ require "torch/optim/lr_scheduler/lr_scheduler"
23
+ require "torch/optim/lr_scheduler/step_lr"
24
+
25
+ # nn base classes
14
26
  require "torch/nn/module"
15
- require "torch/nn/init"
27
+ require "torch/nn/convnd"
28
+ require "torch/nn/dropoutnd"
29
+
30
+ # nn
31
+ require "torch/nn/alpha_dropout"
16
32
  require "torch/nn/conv2d"
33
+ require "torch/nn/dropout"
34
+ require "torch/nn/dropout2d"
35
+ require "torch/nn/dropout3d"
36
+ require "torch/nn/embedding"
37
+ require "torch/nn/feature_alpha_dropout"
17
38
  require "torch/nn/functional"
39
+ require "torch/nn/init"
18
40
  require "torch/nn/linear"
41
+ require "torch/nn/mse_loss"
19
42
  require "torch/nn/parameter"
20
- require "torch/nn/sequential"
21
43
  require "torch/nn/relu"
22
- require "torch/nn/mse_loss"
44
+ require "torch/nn/sequential"
23
45
 
24
46
  # utils
25
47
  require "torch/utils/data/data_loader"
@@ -27,6 +49,11 @@ require "torch/utils/data/tensor_dataset"
27
49
 
28
50
  module Torch
29
51
  class Error < StandardError; end
52
+ class NotImplementedYet < StandardError
53
+ def message
54
+ "This feature has not been implemented yet. Consider submitting a PR."
55
+ end
56
+ end
30
57
 
31
58
  # keys: https://pytorch.org/docs/stable/tensor_attributes.html#torch.torch.dtype
32
59
  # values: https://github.com/pytorch/pytorch/blob/master/c10/core/ScalarType.h
@@ -75,11 +102,18 @@ module Torch
75
102
  obj.is_a?(Tensor)
76
103
  end
77
104
 
78
- # TODO don't copy
79
105
  def from_numo(ndarray)
80
106
  dtype = _dtype_to_numo.find { |k, v| ndarray.is_a?(v) }
81
107
  raise Error, "Cannot convert #{ndarray.class.name} to tensor" unless dtype
82
- tensor(ndarray.to_a, dtype: dtype[0])
108
+ options = tensor_options(device: "cpu", dtype: dtype[0])
109
+ # TODO pass pointer to array instead of creating string
110
+ str = ndarray.to_string
111
+ tensor = _from_blob(str, ndarray.shape, options)
112
+ # from_blob does not own the data, so we need to keep
113
+ # a reference to it for duration of tensor
114
+ # can remove when passing pointer directly
115
+ tensor.instance_variable_set("@_numo_str", str)
116
+ tensor
83
117
  end
84
118
 
85
119
  # private
@@ -197,7 +231,7 @@ module Torch
197
231
  high = low
198
232
  low = 0
199
233
  end
200
- rand(input.size, like_options(input, options))
234
+ randint(low, high, input.size, like_options(input, options))
201
235
  end
202
236
 
203
237
  def randn_like(input, **options)
@@ -272,8 +306,13 @@ module Torch
272
306
  _min(input)
273
307
  end
274
308
 
275
- def max(input)
276
- _max(input)
309
+ def max(input, dim = nil, keepdim: false, out: nil)
310
+ if dim
311
+ raise NotImplementedYet unless out
312
+ _max_out(out[0], out[1], input, dim, keepdim)
313
+ else
314
+ _max(input)
315
+ end
277
316
  end
278
317
 
279
318
  def exp(input)
@@ -284,6 +323,18 @@ module Torch
284
323
  _log(input)
285
324
  end
286
325
 
326
+ def sign(input)
327
+ _sign(input)
328
+ end
329
+
330
+ def gt(input, other)
331
+ _gt(input, other)
332
+ end
333
+
334
+ def lt(input, other)
335
+ _lt(input, other)
336
+ end
337
+
287
338
  def unsqueeze(input, dim)
288
339
  _unsqueeze(input, dim)
289
340
  end
@@ -292,6 +343,10 @@ module Torch
292
343
  _dot(input, tensor)
293
344
  end
294
345
 
346
+ def cat(tensors, dim = 0)
347
+ _cat(tensors, dim)
348
+ end
349
+
295
350
  def matmul(input, other)
296
351
  _matmul(input, other)
297
352
  end
@@ -300,6 +355,22 @@ module Torch
300
355
  _reshape(input, shape)
301
356
  end
302
357
 
358
+ def flatten(input, start_dim: 0, end_dim: -1)
359
+ _flatten(input, start_dim, end_dim)
360
+ end
361
+
362
+ def sqrt(input)
363
+ _sqrt(input)
364
+ end
365
+
366
+ def abs(input)
367
+ _abs(input)
368
+ end
369
+
370
+ def device(str)
371
+ Device.new(str)
372
+ end
373
+
303
374
  private
304
375
 
305
376
  def execute_op(op, input, other, out: nil)
data/lib/torch/ext.bundle CHANGED
Binary file
@@ -1,13 +1,16 @@
1
1
  module Torch
2
2
  module Inspector
3
3
  # TODO make more performance, especially when summarizing
4
+ # how? only read data that will be displayed
4
5
  def inspect
5
6
  data =
6
7
  if numel == 0
7
8
  "[]"
8
9
  elsif dim == 0
9
- to_a.first
10
+ item
10
11
  else
12
+ summarize = numel > 1000
13
+
11
14
  values = to_a.flatten
12
15
  abs = values.select { |v| v != 0 }.map(&:abs)
13
16
  max = abs.max || 1
@@ -36,8 +39,6 @@ module Torch
36
39
  fmt = "%#{total}d"
37
40
  end
38
41
 
39
- summarize = numel > 1000
40
-
41
42
  inspect_level(to_a, fmt, dim - 1, 0, summarize)
42
43
  end
43
44
 
@@ -0,0 +1,9 @@
1
+ module Torch
2
+ module NN
3
+ class AlphaDropout < DropoutNd
4
+ def forward(input)
5
+ F.alpha_dropout(input, p: @p, training: @training, inplace: @inplace)
6
+ end
7
+ end
8
+ end
9
+ end
@@ -1,36 +1,24 @@
1
1
  module Torch
2
2
  module NN
3
- class Conv2d < Module
3
+ class Conv2d < ConvNd
4
4
  attr_reader :bias, :weight
5
5
 
6
- def initialize(in_channels, out_channels, kernel_size, stride: 1, padding: 0) #, dilation: 1, groups: 1)
7
- @in_channels = in_channels
8
- @out_channels = out_channels
9
- @kernel_size = pair(kernel_size)
10
- @stride = pair(stride)
11
- @padding = pair(padding)
12
- # @dilation = pair(dilation)
13
-
14
- # TODO divide by groups
15
- @weight = Parameter.new(Tensor.new(out_channels, in_channels, *@kernel_size))
16
- @bias = Parameter.new(Tensor.new(out_channels))
17
-
18
- reset_parameters
6
+ def initialize(in_channels, out_channels, kernel_size, stride: 1, padding: 0, dilation: 1, groups: 1, bias: true, padding_mode: "zeros")
7
+ kernel_size = pair(kernel_size)
8
+ stride = pair(stride)
9
+ padding = pair(padding)
10
+ dilation = pair(dilation)
11
+ super(in_channels, out_channels, kernel_size, stride, padding, dilation, false, pair(0), groups, bias, padding_mode)
19
12
  end
20
13
 
21
- def reset_parameters
22
- Init.kaiming_uniform_(@weight, Math.sqrt(5))
23
- if @bias
24
- fan_in, _ = Init.calculate_fan_in_and_fan_out(@weight)
25
- bound = 1 / Math.sqrt(fan_in)
26
- Init.uniform_(@bias, -bound, bound)
14
+ def forward(input)
15
+ if @padding_mode == "circular"
16
+ raise NotImplementedError
27
17
  end
18
+ F.conv2d(input, @weight, @bias, stride: @stride, padding: @padding, dilation: @dilation, groups: @groups)
28
19
  end
29
20
 
30
- def call(input)
31
- F.conv2d(input, @weight, @bias, stride: @stride, padding: @padding) #, @dilation, @groups)
32
- end
33
-
21
+ # TODO add more parameters
34
22
  def inspect
35
23
  "Conv2d(#{@in_channels}, #{@out_channels}, kernel_size: #{@kernel_size.inspect}, stride: #{@stride.inspect})"
36
24
  end
@@ -0,0 +1,41 @@
1
+ module Torch
2
+ module NN
3
+ class ConvNd < Module
4
+ def initialize(in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias, padding_mode)
5
+ super()
6
+ raise ArgumentError, "in_channels must be divisible by groups" if in_channels % groups != 0
7
+ raise ArgumentError, "out_channels must be divisible by groups" if out_channels % groups != 0
8
+ @in_channels = in_channels
9
+ @out_channels = out_channels
10
+ @kernel_size = kernel_size
11
+ @stride = stride
12
+ @padding = padding
13
+ @dilation = dilation
14
+ @transposed = transposed
15
+ @output_padding = output_padding
16
+ @groups = groups
17
+ @padding_mode = padding_mode
18
+ if transposed
19
+ @weight = Parameter.new(Tensor.new(in_channels, out_channels / groups, *kernel_size))
20
+ else
21
+ @weight = Parameter.new(Tensor.new(out_channels, in_channels / groups, *kernel_size))
22
+ end
23
+ if bias
24
+ @bias = Parameter.new(Tensor.new(out_channels))
25
+ else
26
+ raise NotImplementedError
27
+ end
28
+ reset_parameters
29
+ end
30
+
31
+ def reset_parameters
32
+ Init.kaiming_uniform!(@weight, Math.sqrt(5))
33
+ if @bias
34
+ fan_in, _ = Init.calculate_fan_in_and_fan_out(@weight)
35
+ bound = 1 / Math.sqrt(fan_in)
36
+ Init.uniform!(@bias, -bound, bound)
37
+ end
38
+ end
39
+ end
40
+ end
41
+ end
@@ -0,0 +1,9 @@
1
+ module Torch
2
+ module NN
3
+ class Dropout < DropoutNd
4
+ def forward(input)
5
+ F.dropout(input, p: @p, training: @training, inplace: @inplace)
6
+ end
7
+ end
8
+ end
9
+ end
@@ -0,0 +1,9 @@
1
+ module Torch
2
+ module NN
3
+ class Dropout2d < DropoutNd
4
+ def forward(input)
5
+ F.dropout2d(input, p: @p, training: @training, inplace: @inplace)
6
+ end
7
+ end
8
+ end
9
+ end
@@ -0,0 +1,9 @@
1
+ module Torch
2
+ module NN
3
+ class Dropout3d < DropoutNd
4
+ def forward(input)
5
+ F.dropout3d(input, p: @p, training: @training, inplace: @inplace)
6
+ end
7
+ end
8
+ end
9
+ end
@@ -0,0 +1,15 @@
1
+ module Torch
2
+ module NN
3
+ class DropoutNd < Module
4
+ def initialize(p: 0.5, inplace: false)
5
+ super()
6
+ @p = p
7
+ @inplace = inplace
8
+ end
9
+
10
+ def inspect
11
+ "#{self.class.name.split("::").last}(p: #{@p.inspect}, inplace: #{@inplace.inspect})"
12
+ end
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,52 @@
1
+ # ported from https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/sparse.py
2
+ module Torch
3
+ module NN
4
+ class Embedding < Module
5
+ def initialize(num_embeddings, embedding_dim, padding_idx: nil, max_norm: nil,
6
+ norm_type: 2.0, scale_grad_by_freq: false, sparse: false, _weight: nil)
7
+
8
+ super()
9
+ @num_embeddings = num_embeddings
10
+ @embedding_dim = embedding_dim
11
+
12
+ if padding_idx
13
+ if padding_idx > 0
14
+ raise ArgumentError, "Padding_idx must be within num_embeddings" unless padding_idx < @num_embeddings
15
+ elsif padding_idx < 0
16
+ raise ArgumentError, "Padding_idx must be within num_embeddings" unless padding_idx >= -@num_embeddings
17
+ padding_idx = @num_embeddings + padding_idx
18
+ end
19
+ end
20
+ @padding_idx = padding_idx
21
+ @max_norm = max_norm
22
+ @norm_type = norm_type
23
+ @scale_grad_by_freq = scale_grad_by_freq
24
+ if _weight.nil?
25
+ @weight = Parameter.new(Tensor.new(num_embeddings, embedding_dim))
26
+ reset_parameters
27
+ else
28
+ raise ArgumentError, "Shape of weight does not match num_embeddings and embedding_dim" unless _weight.shape == [num_embeddings, embedding_dim]
29
+ @weight = Parameter.new(_weight)
30
+ end
31
+ @sparse = sparse
32
+ end
33
+
34
+ def reset_parameters
35
+ Init.normal!(@weight)
36
+ if @padding_idx
37
+ Torch.no_grad do
38
+ @weight[@padding_idx].fill!(0)
39
+ end
40
+ end
41
+ end
42
+
43
+ def forward(input)
44
+ F.embedding(input, @weight, padding_idx: @padding_idx, max_norm: @max_norm, norm_type: @norm_type, scale_grad_by_freq: @scale_grad_by_freq, sparse: @sparse)
45
+ end
46
+
47
+ def inspect
48
+ "Embedding(#{@num_embeddings}, #{@embedding_dim})"
49
+ end
50
+ end
51
+ end
52
+ end
@@ -0,0 +1,9 @@
1
+ module Torch
2
+ module NN
3
+ class FeatureAlphaDropout < DropoutNd
4
+ def forward(input)
5
+ F.feature_alpha_dropout(input, p: @p, training: @training, inplace: @inplace)
6
+ end
7
+ end
8
+ end
9
+ end
@@ -6,17 +6,9 @@ module Torch
6
6
  Torch.relu(input)
7
7
  end
8
8
 
9
- def conv2d(input, weight, bias, stride: 1, padding: 0)
9
+ def conv2d(input, weight, bias, stride: 1, padding: 0, dilation: 1, groups: 1)
10
10
  # TODO pair stride and padding when needed
11
- Torch.conv2d(input, weight, bias, stride, padding)
12
- end
13
-
14
- def prelu(input, weight)
15
- Torch.prelu(input, weight)
16
- end
17
-
18
- def leaky_relu(input, negative_slope = 0.01)
19
- Torch.leaky_relu(input, negative_slope)
11
+ Torch.conv2d(input, weight, bias, stride, padding, dilation, groups)
20
12
  end
21
13
 
22
14
  def max_pool2d(input, kernel_size)
@@ -41,14 +33,64 @@ module Torch
41
33
  nll_loss(log_softmax(input, 1), target)
42
34
  end
43
35
 
44
- def nll_loss(input, target)
36
+ def nll_loss(input, target, reduction: "mean")
45
37
  # TODO fix for non-1d
46
- Torch.nll_loss(input, target)
38
+ Torch.nll_loss(input, target, reduction)
47
39
  end
48
40
 
49
41
  def log_softmax(input, dim)
50
42
  input.log_softmax(dim)
51
43
  end
44
+
45
+ def dropout(input, p: 0.5, training: true, inplace: false)
46
+ if inplace
47
+ Torch._dropout!(input, p, training)
48
+ else
49
+ Torch._dropout(input, p, training)
50
+ end
51
+ end
52
+
53
+ def dropout2d(input, p: 0.5, training: true, inplace: false)
54
+ raise ArgumentError, "dropout probability has to be between 0 and 1, but got #{p}" if p < 0 || p > 1
55
+
56
+ if inplace
57
+ Torch._feature_dropout!(input, p, training)
58
+ else
59
+ Torch._feature_dropout(input, p, training)
60
+ end
61
+ end
62
+
63
+ def dropout3d(input, p: 0.5, training: true, inplace: false)
64
+ if inplace
65
+ Torch._feature_dropout!(input, p, training)
66
+ else
67
+ Torch._feature_dropout(input, p, training)
68
+ end
69
+ end
70
+
71
+ def alpha_dropout(input, p: 0.5, training: true, inplace: false)
72
+ if inplace
73
+ Torch._alpha_dropout!(input, p, training)
74
+ else
75
+ Torch._alpha_dropout(input, p, training)
76
+ end
77
+ end
78
+
79
+ def feature_alpha_dropout(input, p: 0.5, training: true, inplace: false)
80
+ if inplace
81
+ Torch._feature_alpha_dropout!(input, p, training)
82
+ else
83
+ Torch._feature_alpha_dropout(input, p, training)
84
+ end
85
+ end
86
+
87
+ def embedding(input, weight, padding_idx: nil, max_norm: nil, norm_type: 2.0, scale_grad_by_freq: false, sparse: false)
88
+ # TODO handle max_norm and norm_type
89
+ raise NotImplementedYet unless max_norm.nil? && norm_type == 2.0
90
+
91
+ padding_idx ||= -1
92
+ Torch._embedding(input, weight, padding_idx, scale_grad_by_freq, sparse)
93
+ end
52
94
  end
53
95
  end
54
96