torch-rb 0.9.1 → 0.9.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f5224c74f6e74ed04396dfa0414400af5cb20bc5e654320421116723ffcb8e83
4
- data.tar.gz: 6a2881ddacb7610a231ebd5a1c24d0f71a2662f16f360102141dfd0893c13346
3
+ metadata.gz: bd10ea76fc9cde319cad48a1b3bd298384c4f47812d9be5ee9016c44151458a0
4
+ data.tar.gz: e8391c2bc4ccae67ca0c2e402e431e99a683023285952edbd3f011a18793dfb6
5
5
  SHA512:
6
- metadata.gz: 045432235e1c691ce85fb937a0562d93b1d9bc312fc648d40dafcc24857eeec4f84e6ceab397793171b0046ccfd785a6caeba37902925dcff1f73c760dd57cec
7
- data.tar.gz: 2520fa17dcd13be52aaf1256f431d2951f8113f862189b8691f877dcb48ac9f71ac70719600e27d8c8972494c0b7f11c0983c3330fed7ca6aeed552e8500ec22
6
+ metadata.gz: c290b76e1d18e88c46ec2bd544471cc5922de4741a0095786ba00415b2fa6ae52023b0c296365a7af6a81143515c36ccd3dbe69d11aea5138fbcd81b6e687bf6
7
+ data.tar.gz: bc35fd6f9c9ed38b12caf98158f61a6d9827e967c36f8fbcf46f64331cc96c03de3105f4bc0b5b70c911d25ce223ea0085c18537cff677e84bf3d564ce0058ab
data/CHANGELOG.md CHANGED
@@ -1,3 +1,10 @@
1
+ ## 0.9.2 (2022-02-03)
2
+
3
+ - Added support for setting `nil` gradient
4
+ - Added checks when setting gradient
5
+ - Fixed precision with `Torch.tensor` method
6
+ - Fixed memory issue when creating tensor for `ByteStorage`
7
+
1
8
  ## 0.9.1 (2022-02-02)
2
9
 
3
10
  - Moved `like` methods to C++
data/ext/torch/nn.cpp CHANGED
@@ -98,8 +98,11 @@ void init_nn(Rice::Module& m) {
98
98
  auto grad = self.grad();
99
99
  return grad.defined() ? Object(Rice::detail::To_Ruby<torch::Tensor>().convert(grad)) : Nil;
100
100
  })
101
+ // can't use grad=
102
+ // assignment methods fail with Ruby 3.0
103
+ // TODO add checks like Tensor
101
104
  .define_method(
102
- "grad=",
105
+ "_set_grad",
103
106
  [](Parameter& self, torch::Tensor& grad) {
104
107
  self.mutable_grad() = grad;
105
108
  })
@@ -41,6 +41,40 @@ using torch::nn::init::NonlinearityType;
41
41
  #define RETURN_NIL \
42
42
  return Qnil;
43
43
 
44
+ namespace Rice::detail
45
+ {
46
+ template<typename T>
47
+ struct Type<c10::complex<T>>
48
+ {
49
+ static bool verify()
50
+ {
51
+ return true;
52
+ }
53
+ };
54
+
55
+ template<typename T>
56
+ class To_Ruby<c10::complex<T>>
57
+ {
58
+ public:
59
+ VALUE convert(c10::complex<T> const& x)
60
+ {
61
+ return rb_dbl_complex_new(x.real(), x.imag());
62
+ }
63
+ };
64
+
65
+ template<typename T>
66
+ class From_Ruby<c10::complex<T>>
67
+ {
68
+ public:
69
+ c10::complex<T> convert(VALUE x)
70
+ {
71
+ VALUE real = rb_funcall(x, rb_intern("real"), 0);
72
+ VALUE imag = rb_funcall(x, rb_intern("imag"), 0);
73
+ return c10::complex<T>(From_Ruby<T>().convert(real), From_Ruby<T>().convert(imag));
74
+ }
75
+ };
76
+ }
77
+
44
78
  namespace Rice::detail
45
79
  {
46
80
  template<>
data/ext/torch/tensor.cpp CHANGED
@@ -10,28 +10,6 @@
10
10
  using namespace Rice;
11
11
  using torch::indexing::TensorIndex;
12
12
 
13
- namespace Rice::detail
14
- {
15
- template<typename T>
16
- struct Type<c10::complex<T>>
17
- {
18
- static bool verify()
19
- {
20
- return true;
21
- }
22
- };
23
-
24
- template<typename T>
25
- class To_Ruby<c10::complex<T>>
26
- {
27
- public:
28
- VALUE convert(c10::complex<T> const& x)
29
- {
30
- return rb_dbl_complex_new(x.real(), x.imag());
31
- }
32
- };
33
- }
34
-
35
13
  template<typename T>
36
14
  Array flat_data(Tensor& tensor) {
37
15
  Tensor view = tensor.reshape({tensor.numel()});
@@ -189,9 +167,31 @@ void init_tensor(Rice::Module& m, Rice::Class& c, Rice::Class& rb_cTensorOptions
189
167
  auto grad = self.grad();
190
168
  return grad.defined() ? Object(Rice::detail::To_Ruby<torch::Tensor>().convert(grad)) : Nil;
191
169
  })
170
+ // can't use grad=
171
+ // assignment methods fail with Ruby 3.0
192
172
  .define_method(
193
- "grad=",
194
- [](Tensor& self, torch::Tensor& grad) {
173
+ "_set_grad",
174
+ [](Tensor& self, Rice::Object value) {
175
+ if (value.is_nil()) {
176
+ self.mutable_grad().reset();
177
+ return;
178
+ }
179
+
180
+ const auto& grad = Rice::detail::From_Ruby<torch::Tensor>().convert(value.value());
181
+
182
+ // TODO support sparse grad
183
+ if (!grad.options().type_equal(self.options())) {
184
+ rb_raise(rb_eArgError, "assigned grad has data of a different type");
185
+ }
186
+
187
+ if (self.is_cuda() && grad.get_device() != self.get_device()) {
188
+ rb_raise(rb_eArgError, "assigned grad has data located on a different device");
189
+ }
190
+
191
+ if (!self.sizes().equals(grad.sizes())) {
192
+ rb_raise(rb_eArgError, "assigned grad has data of a different size");
193
+ }
194
+
195
195
  self.mutable_grad() = grad;
196
196
  })
197
197
  .define_method(
@@ -281,7 +281,7 @@ void init_tensor(Rice::Module& m, Rice::Class& c, Rice::Class& rb_cTensorOptions
281
281
  })
282
282
  .define_method(
283
283
  "_to",
284
- [](Tensor& self, torch::Device device, int dtype, bool non_blocking, bool copy) {
284
+ [](Tensor& self, torch::Device& device, int dtype, bool non_blocking, bool copy) {
285
285
  return self.to(device, (torch::ScalarType) dtype, non_blocking, copy);
286
286
  });
287
287
 
data/ext/torch/torch.cpp CHANGED
@@ -6,6 +6,23 @@
6
6
  #include "templates.h"
7
7
  #include "utils.h"
8
8
 
9
+ template<typename T>
10
+ torch::Tensor make_tensor(Rice::Array a, std::vector<int64_t> size, const torch::TensorOptions &options) {
11
+ std::vector<T> vec;
12
+ for (long i = 0; i < a.size(); i++) {
13
+ vec.push_back(Rice::detail::From_Ruby<T>().convert(a[i].value()));
14
+ }
15
+
16
+ // hack for requires_grad error
17
+ auto requires_grad = options.requires_grad();
18
+ torch::Tensor t = torch::tensor(vec, options.requires_grad(c10::nullopt));
19
+ if (requires_grad) {
20
+ t.set_requires_grad(true);
21
+ }
22
+
23
+ return t.reshape(size);
24
+ }
25
+
9
26
  void init_torch(Rice::Module& m) {
10
27
  m.add_handler<torch::Error>(handle_error);
11
28
  add_torch_functions(m);
@@ -61,35 +78,28 @@ void init_torch(Rice::Module& m) {
61
78
  "_tensor",
62
79
  [](Rice::Array a, std::vector<int64_t> size, const torch::TensorOptions &options) {
63
80
  auto dtype = options.dtype();
64
- torch::Tensor t;
65
- if (dtype == torch::kBool) {
66
- std::vector<uint8_t> vec;
67
- for (long i = 0; i < a.size(); i++) {
68
- vec.push_back(Rice::detail::From_Ruby<bool>().convert(a[i].value()));
69
- }
70
- t = torch::tensor(vec, options);
71
- } else if (dtype == torch::kComplexFloat || dtype == torch::kComplexDouble) {
72
- // TODO use template
73
- std::vector<c10::complex<double>> vec;
74
- Object obj;
75
- for (long i = 0; i < a.size(); i++) {
76
- obj = a[i];
77
- vec.push_back(c10::complex<double>(Rice::detail::From_Ruby<double>().convert(obj.call("real").value()), Rice::detail::From_Ruby<double>().convert(obj.call("imag").value())));
78
- }
79
- t = torch::tensor(vec, options);
81
+ if (dtype == torch::kByte) {
82
+ return make_tensor<uint8_t>(a, size, options);
83
+ } else if (dtype == torch::kChar) {
84
+ return make_tensor<int8_t>(a, size, options);
85
+ } else if (dtype == torch::kShort) {
86
+ return make_tensor<int16_t>(a, size, options);
87
+ } else if (dtype == torch::kInt) {
88
+ return make_tensor<int32_t>(a, size, options);
89
+ } else if (dtype == torch::kLong) {
90
+ return make_tensor<int64_t>(a, size, options);
91
+ } else if (dtype == torch::kFloat) {
92
+ return make_tensor<float>(a, size, options);
93
+ } else if (dtype == torch::kDouble) {
94
+ return make_tensor<double>(a, size, options);
95
+ } else if (dtype == torch::kBool) {
96
+ return make_tensor<uint8_t>(a, size, options);
97
+ } else if (dtype == torch::kComplexFloat) {
98
+ return make_tensor<c10::complex<float>>(a, size, options);
99
+ } else if (dtype == torch::kComplexDouble) {
100
+ return make_tensor<c10::complex<double>>(a, size, options);
80
101
  } else {
81
- std::vector<float> vec;
82
- for (long i = 0; i < a.size(); i++) {
83
- vec.push_back(Rice::detail::From_Ruby<float>().convert(a[i].value()));
84
- }
85
- // hack for requires_grad error
86
- if (options.requires_grad()) {
87
- t = torch::tensor(vec, options.requires_grad(c10::nullopt));
88
- t.set_requires_grad(true);
89
- } else {
90
- t = torch::tensor(vec, options);
91
- }
102
+ throw std::runtime_error("Unsupported type");
92
103
  }
93
- return t.reshape(size);
94
104
  });
95
105
  }
@@ -1,6 +1,9 @@
1
1
  module Torch
2
2
  module NN
3
3
  class Parameter < Tensor
4
+ # fix for issue w/ assignment methods
5
+ alias_method :grad=, :_set_grad
6
+
4
7
  def self.new(data = nil, requires_grad: true)
5
8
  data = Tensor.new unless data
6
9
  _make_subclass(data, requires_grad)
data/lib/torch/tensor.rb CHANGED
@@ -8,6 +8,9 @@ module Torch
8
8
  alias_method :ndim, :dim
9
9
  alias_method :ndimension, :dim
10
10
 
11
+ # fix for issue w/ assignment methods
12
+ alias_method :grad=, :_set_grad
13
+
11
14
  # use alias_method for performance
12
15
  alias_method :+, :add
13
16
  alias_method :-, :sub
data/lib/torch/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module Torch
2
- VERSION = "0.9.1"
2
+ VERSION = "0.9.2"
3
3
  end
data/lib/torch.rb CHANGED
@@ -267,7 +267,7 @@ module Torch
267
267
  args.first.send(dtype).to(device)
268
268
  elsif args.size == 1 && args.first.is_a?(ByteStorage) && dtype == :uint8
269
269
  bytes = args.first.bytes
270
- Torch._from_blob(bytes, [bytes.bytesize], TensorOptions.new.dtype(DTYPE_TO_ENUM[dtype]))
270
+ Torch._from_blob_ref(bytes, [bytes.bytesize], TensorOptions.new.dtype(DTYPE_TO_ENUM[dtype]))
271
271
  elsif args.size == 1 && args.first.is_a?(Array)
272
272
  Torch.tensor(args.first, dtype: dtype, device: device)
273
273
  elsif args.size == 0
@@ -320,12 +320,17 @@ module Torch
320
320
  raise Error, "Cannot convert #{ndarray.class.name} to tensor" unless dtype
321
321
  options = tensor_options(device: "cpu", dtype: dtype[0])
322
322
  # TODO pass pointer to array instead of creating string
323
- str = ndarray.to_string
324
- tensor = _from_blob(str, ndarray.shape, options)
323
+ _from_blob_ref(ndarray.to_string, ndarray.shape, options)
324
+ end
325
+
326
+ # private
327
+ # TODO use keepAlive in Rice (currently segfaults)
328
+ def _from_blob_ref(data, size, options)
329
+ tensor = _from_blob(data, size, options)
325
330
  # from_blob does not own the data, so we need to keep
326
331
  # a reference to it for duration of tensor
327
332
  # can remove when passing pointer directly
328
- tensor.instance_variable_set("@_numo_str", str)
333
+ tensor.instance_variable_set("@_numo_data", data)
329
334
  tensor
330
335
  end
331
336
 
@@ -406,6 +411,12 @@ module Torch
406
411
  end
407
412
  end
408
413
 
414
+ # TODO check each dimensions for consistency in future
415
+ raise Error, "Inconsistent dimensions" if data.size != size.inject(1, :*)
416
+
417
+ # TOOD move to C++
418
+ data = data.map { |v| v ? 1 : 0 } if options[:dtype] == :bool
419
+
409
420
  _tensor(data, size, tensor_options(**options))
410
421
  end
411
422
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: torch-rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.1
4
+ version: 0.9.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrew Kane