torch-rb 0.9.1 → 0.10.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +16 -0
- data/README.md +3 -1
- data/codegen/function.rb +2 -2
- data/codegen/generate_functions.rb +27 -5
- data/codegen/native_functions.yaml +951 -362
- data/ext/torch/nn.cpp +4 -1
- data/ext/torch/ruby_arg_parser.h +26 -6
- data/ext/torch/sparse_functions.h +6 -0
- data/ext/torch/templates.h +34 -0
- data/ext/torch/tensor.cpp +25 -25
- data/ext/torch/torch.cpp +38 -28
- data/ext/torch/utils.h +7 -0
- data/lib/torch/nn/parameter.rb +3 -0
- data/lib/torch/nn/parameter_list.rb +48 -0
- data/lib/torch/tensor.rb +3 -0
- data/lib/torch/version.rb +1 -1
- data/lib/torch.rb +16 -4
- metadata +5 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f4665eec43d85fbf02ce75f4b268dbf001bfad7e3ae1ecace0e9911b651e2cc2
|
4
|
+
data.tar.gz: d11ee1386ce7feeea68333de6c361d8737a7164cfd1626abce3a511deecb2963
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: cf346bc03f36d4fc920151b0554c93c33a59d0fecd35c6f110dc862ad8b35c6b8641d306124505ddd012ce9d903363672e99772cc2bd7b981d962c9d00f08d3e
|
7
|
+
data.tar.gz: 265157846417fdc3c024e0f50d0b0a663d345ca423ad9233a7d0e722bf5134a8e15e1afce30248992155787484e80311e484c64c5787945b5f5a88625115479a
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,19 @@
|
|
1
|
+
## 0.10.1 (2022-04-12)
|
2
|
+
|
3
|
+
- Fixed `dtype`, `device`, and `layout` for `new_*` and `like_*` methods
|
4
|
+
|
5
|
+
## 0.10.0 (2022-03-13)
|
6
|
+
|
7
|
+
- Updated LibTorch to 1.11.0
|
8
|
+
- Added `ParameterList`
|
9
|
+
|
10
|
+
## 0.9.2 (2022-02-03)
|
11
|
+
|
12
|
+
- Added support for setting `nil` gradient
|
13
|
+
- Added checks when setting gradient
|
14
|
+
- Fixed precision with `Torch.tensor` method
|
15
|
+
- Fixed memory issue when creating tensor for `ByteStorage`
|
16
|
+
|
1
17
|
## 0.9.1 (2022-02-02)
|
2
18
|
|
3
19
|
- Moved `like` methods to C++
|
data/README.md
CHANGED
@@ -7,6 +7,7 @@ Check out:
|
|
7
7
|
- [TorchVision](https://github.com/ankane/torchvision) for computer vision tasks
|
8
8
|
- [TorchText](https://github.com/ankane/torchtext) for text and NLP tasks
|
9
9
|
- [TorchAudio](https://github.com/ankane/torchaudio) for audio tasks
|
10
|
+
- [TorchRec](https://github.com/ankane/torchrec-ruby) for recommendation systems
|
10
11
|
|
11
12
|
[](https://github.com/ankane/torch.rb/actions)
|
12
13
|
|
@@ -408,7 +409,8 @@ Here’s the list of compatible versions.
|
|
408
409
|
|
409
410
|
Torch.rb | LibTorch
|
410
411
|
--- | ---
|
411
|
-
0.
|
412
|
+
0.10.0+ | 1.11.0+
|
413
|
+
0.9.0-0.9.2 | 1.10.0-1.10.2
|
412
414
|
0.8.0-0.8.3 | 1.9.0-1.9.1
|
413
415
|
0.6.0-0.7.0 | 1.8.0-1.8.1
|
414
416
|
0.5.0-0.5.3 | 1.7.0-1.7.1
|
data/codegen/function.rb
CHANGED
@@ -37,7 +37,7 @@ class Function
|
|
37
37
|
private
|
38
38
|
|
39
39
|
def parse_func
|
40
|
-
input, output = func.
|
40
|
+
input, _, output = func.rpartition(/\s+->\s+/)
|
41
41
|
[generate_params(input), generate_retvals(output)]
|
42
42
|
end
|
43
43
|
|
@@ -52,7 +52,7 @@ class Function
|
|
52
52
|
next
|
53
53
|
end
|
54
54
|
|
55
|
-
type, name = i.
|
55
|
+
type, _, name = i.rpartition(/\s+/)
|
56
56
|
|
57
57
|
if name.include?("=")
|
58
58
|
name, default = name.split("=", 2)
|
@@ -14,6 +14,7 @@ def generate_functions
|
|
14
14
|
generate_files("fft", :define_singleton_method, functions[:fft])
|
15
15
|
generate_files("linalg", :define_singleton_method, functions[:linalg])
|
16
16
|
generate_files("special", :define_singleton_method, functions[:special])
|
17
|
+
generate_files("sparse", :define_singleton_method, functions[:sparse])
|
17
18
|
end
|
18
19
|
|
19
20
|
def load_functions
|
@@ -47,6 +48,7 @@ def group_functions(functions)
|
|
47
48
|
linalg_functions, other_functions = other_functions.partition { |f| f.python_module == "linalg" }
|
48
49
|
fft_functions, other_functions = other_functions.partition { |f| f.python_module == "fft" }
|
49
50
|
special_functions, other_functions = other_functions.partition { |f| f.python_module == "special" }
|
51
|
+
sparse_functions, other_functions = other_functions.partition { |f| f.python_module == "sparse" }
|
50
52
|
unexpected_functions, other_functions = other_functions.partition { |f| f.python_module }
|
51
53
|
torch_functions = other_functions.select { |f| f.variants.include?("function") }
|
52
54
|
tensor_functions = other_functions.select { |f| f.variants.include?("method") }
|
@@ -62,7 +64,8 @@ def group_functions(functions)
|
|
62
64
|
nn: nn_functions,
|
63
65
|
linalg: linalg_functions,
|
64
66
|
fft: fft_functions,
|
65
|
-
special: special_functions
|
67
|
+
special: special_functions,
|
68
|
+
sparse: sparse_functions
|
66
69
|
}
|
67
70
|
end
|
68
71
|
|
@@ -136,6 +139,7 @@ def generate_attach_def(name, type, def_method)
|
|
136
139
|
ruby_name = ruby_name.sub(/\Afft_/, "") if type == "fft"
|
137
140
|
ruby_name = ruby_name.sub(/\Alinalg_/, "") if type == "linalg"
|
138
141
|
ruby_name = ruby_name.sub(/\Aspecial_/, "") if type == "special"
|
142
|
+
ruby_name = ruby_name.sub(/\Asparse_/, "") if type == "sparse"
|
139
143
|
ruby_name = name if name.start_with?("__")
|
140
144
|
|
141
145
|
# cast for Ruby < 2.7 https://github.com/thisMagpie/fftw/issues/22#issuecomment-49508900
|
@@ -289,7 +293,13 @@ def split_opt_params(params)
|
|
289
293
|
end
|
290
294
|
|
291
295
|
def generate_tensor_options(function, opt_params)
|
292
|
-
|
296
|
+
new_function = function.base_name.start_with?("new_")
|
297
|
+
like_function = function.base_name.end_with?("_like")
|
298
|
+
|
299
|
+
code = String.new("")
|
300
|
+
code << "\n auto self = _r.tensor(0);" if like_function
|
301
|
+
code << "\n const auto options = TensorOptions()"
|
302
|
+
|
293
303
|
order = ["dtype", "device", "layout", "requires_grad", "pin_memory"]
|
294
304
|
opt_params.sort_by { |v| order.index(v[:name]) }.each do |opt|
|
295
305
|
i = opt[:position]
|
@@ -300,12 +310,24 @@ def generate_tensor_options(function, opt_params)
|
|
300
310
|
if function.base_name == "arange"
|
301
311
|
"dtype(_r.scalartypeOptional(#{i}))"
|
302
312
|
else
|
303
|
-
|
313
|
+
if new_function || like_function
|
314
|
+
"dtype(_r.scalartypeWithDefault(#{i}, self.scalar_type()))"
|
315
|
+
else
|
316
|
+
"dtype(_r.scalartype(#{i}))"
|
317
|
+
end
|
304
318
|
end
|
305
319
|
when "device"
|
306
|
-
|
320
|
+
if new_function || like_function
|
321
|
+
"device(_r.deviceWithDefault(#{i}, self.device()))"
|
322
|
+
else
|
323
|
+
"device(_r.device(#{i}))"
|
324
|
+
end
|
307
325
|
when "layout"
|
308
|
-
|
326
|
+
if new_function || like_function
|
327
|
+
"layout(_r.layoutWithDefault(#{i}, self.layout()))"
|
328
|
+
else
|
329
|
+
"layout(_r.layoutOptional(#{i}))"
|
330
|
+
end
|
309
331
|
when "requires_grad"
|
310
332
|
"requires_grad(_r.toBool(#{i}))"
|
311
333
|
when "pin_memory"
|