torch-rb 0.1.5 → 0.1.6
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/README.md +1 -1
- data/ext/torch/ext.cpp +0 -170
- data/ext/torch/nn_functions.cpp +44 -24
- data/ext/torch/templates.cpp +55 -0
- data/ext/torch/templates.hpp +48 -0
- data/ext/torch/tensor_functions.cpp +76 -16
- data/ext/torch/torch_functions.cpp +165 -65
- data/lib/torch.rb +51 -42
- data/lib/torch/ext.bundle +0 -0
- data/lib/torch/native/dispatcher.rb +1 -1
- data/lib/torch/native/function.rb +36 -5
- data/lib/torch/native/generator.rb +26 -7
- data/lib/torch/native/parser.rb +51 -14
- data/lib/torch/nn/avg_pool1d.rb +18 -0
- data/lib/torch/nn/avg_pool2d.rb +7 -2
- data/lib/torch/nn/avg_pool3d.rb +19 -0
- data/lib/torch/nn/avg_poolnd.rb +1 -1
- data/lib/torch/nn/batch_norm.rb +75 -0
- data/lib/torch/nn/batch_norm1d.rb +11 -0
- data/lib/torch/nn/batch_norm2d.rb +11 -0
- data/lib/torch/nn/batch_norm3d.rb +11 -0
- data/lib/torch/nn/constant_pad1d.rb +10 -0
- data/lib/torch/nn/constant_pad2d.rb +10 -0
- data/lib/torch/nn/constant_pad3d.rb +10 -0
- data/lib/torch/nn/constant_padnd.rb +18 -0
- data/lib/torch/nn/conv1d.rb +22 -0
- data/lib/torch/nn/conv2d.rb +9 -17
- data/lib/torch/nn/conv3d.rb +22 -0
- data/lib/torch/nn/fold.rb +20 -0
- data/lib/torch/nn/functional.rb +320 -100
- data/lib/torch/nn/group_norm.rb +36 -0
- data/lib/torch/nn/gru.rb +49 -0
- data/lib/torch/nn/hardshrink.rb +18 -0
- data/lib/torch/nn/instance_norm.rb +20 -0
- data/lib/torch/nn/instance_norm1d.rb +18 -0
- data/lib/torch/nn/instance_norm2d.rb +11 -0
- data/lib/torch/nn/instance_norm3d.rb +11 -0
- data/lib/torch/nn/layer_norm.rb +35 -0
- data/lib/torch/nn/local_response_norm.rb +21 -0
- data/lib/torch/nn/log_sigmoid.rb +9 -0
- data/lib/torch/nn/lp_pool1d.rb +9 -0
- data/lib/torch/nn/lp_pool2d.rb +9 -0
- data/lib/torch/nn/lp_poolnd.rb +22 -0
- data/lib/torch/nn/lstm.rb +66 -0
- data/lib/torch/nn/max_pool1d.rb +9 -0
- data/lib/torch/nn/max_pool2d.rb +1 -1
- data/lib/torch/nn/max_pool3d.rb +9 -0
- data/lib/torch/nn/max_poolnd.rb +6 -6
- data/lib/torch/nn/max_unpool1d.rb +16 -0
- data/lib/torch/nn/max_unpool2d.rb +16 -0
- data/lib/torch/nn/max_unpool3d.rb +16 -0
- data/lib/torch/nn/max_unpoolnd.rb +9 -0
- data/lib/torch/nn/module.rb +7 -0
- data/lib/torch/nn/reflection_pad1d.rb +10 -0
- data/lib/torch/nn/reflection_pad2d.rb +10 -0
- data/lib/torch/nn/reflection_padnd.rb +13 -0
- data/lib/torch/nn/replication_pad1d.rb +10 -0
- data/lib/torch/nn/replication_pad2d.rb +10 -0
- data/lib/torch/nn/replication_pad3d.rb +10 -0
- data/lib/torch/nn/replication_padnd.rb +13 -0
- data/lib/torch/nn/rnn_base.rb +48 -4
- data/lib/torch/nn/softshrink.rb +18 -0
- data/lib/torch/nn/softsign.rb +9 -0
- data/lib/torch/nn/tanh.rb +9 -0
- data/lib/torch/nn/tanhshrink.rb +9 -0
- data/lib/torch/nn/unfold.rb +19 -0
- data/lib/torch/nn/utils.rb +25 -0
- data/lib/torch/nn/zero_pad2d.rb +9 -0
- data/lib/torch/tensor.rb +14 -25
- data/lib/torch/version.rb +1 -1
- metadata +50 -2
data/ext/torch/templates.hpp
CHANGED
@@ -248,3 +248,51 @@ OptionalTensor from_ruby<OptionalTensor>(Object x)
|
|
248
248
|
{
|
249
249
|
return OptionalTensor(x);
|
250
250
|
}
|
251
|
+
|
252
|
+
class ScalarType {
|
253
|
+
Object value;
|
254
|
+
public:
|
255
|
+
ScalarType(Object o) {
|
256
|
+
value = o;
|
257
|
+
}
|
258
|
+
operator at::ScalarType() {
|
259
|
+
throw std::runtime_error("ScalarType arguments not implemented yet");
|
260
|
+
}
|
261
|
+
};
|
262
|
+
|
263
|
+
template<>
|
264
|
+
inline
|
265
|
+
ScalarType from_ruby<ScalarType>(Object x)
|
266
|
+
{
|
267
|
+
return ScalarType(x);
|
268
|
+
}
|
269
|
+
|
270
|
+
class OptionalScalarType {
|
271
|
+
Object value;
|
272
|
+
public:
|
273
|
+
OptionalScalarType(Object o) {
|
274
|
+
value = o;
|
275
|
+
}
|
276
|
+
operator c10::optional<at::ScalarType>() {
|
277
|
+
if (value.is_nil()) {
|
278
|
+
return c10::nullopt;
|
279
|
+
}
|
280
|
+
return ScalarType(value);
|
281
|
+
}
|
282
|
+
};
|
283
|
+
|
284
|
+
template<>
|
285
|
+
inline
|
286
|
+
OptionalScalarType from_ruby<OptionalScalarType>(Object x)
|
287
|
+
{
|
288
|
+
return OptionalScalarType(x);
|
289
|
+
}
|
290
|
+
|
291
|
+
typedef torch::Device Device;
|
292
|
+
|
293
|
+
Object wrap(std::tuple<torch::Tensor, torch::Tensor> x);
|
294
|
+
Object wrap(std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> x);
|
295
|
+
Object wrap(std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> x);
|
296
|
+
Object wrap(std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> x);
|
297
|
+
Object wrap(std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, int64_t> x);
|
298
|
+
Object wrap(std::tuple<torch::Tensor, torch::Tensor, double, int64_t> x);
|
@@ -362,6 +362,16 @@ void add_tensor_functions(Module m) {
|
|
362
362
|
*[](Tensor &self) {
|
363
363
|
return self.cosh_();
|
364
364
|
})
|
365
|
+
.define_method(
|
366
|
+
"_cumprod",
|
367
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
|
368
|
+
return self.cumprod(dim, dtype);
|
369
|
+
})
|
370
|
+
.define_method(
|
371
|
+
"_cumsum",
|
372
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
|
373
|
+
return self.cumsum(dim, dtype);
|
374
|
+
})
|
365
375
|
.define_method(
|
366
376
|
"_data",
|
367
377
|
*[](const Tensor &self) {
|
@@ -455,7 +465,7 @@ void add_tensor_functions(Module m) {
|
|
455
465
|
.define_method(
|
456
466
|
"_eig",
|
457
467
|
*[](const Tensor &self, bool eigenvectors) {
|
458
|
-
return self.eig(eigenvectors);
|
468
|
+
return wrap(self.eig(eigenvectors));
|
459
469
|
})
|
460
470
|
.define_method(
|
461
471
|
"_eq__scalar",
|
@@ -650,7 +660,7 @@ void add_tensor_functions(Module m) {
|
|
650
660
|
.define_method(
|
651
661
|
"_geqrf",
|
652
662
|
*[](const Tensor &self) {
|
653
|
-
return self.geqrf();
|
663
|
+
return wrap(self.geqrf());
|
654
664
|
})
|
655
665
|
.define_method(
|
656
666
|
"_ger",
|
@@ -820,7 +830,7 @@ void add_tensor_functions(Module m) {
|
|
820
830
|
.define_method(
|
821
831
|
"_kthvalue",
|
822
832
|
*[](const Tensor &self, int64_t k, int64_t dim, bool keepdim) {
|
823
|
-
return self.kthvalue(k, dim, keepdim);
|
833
|
+
return wrap(self.kthvalue(k, dim, keepdim));
|
824
834
|
})
|
825
835
|
.define_method(
|
826
836
|
"_le__scalar",
|
@@ -917,6 +927,11 @@ void add_tensor_functions(Module m) {
|
|
917
927
|
*[](Tensor &self, double mean, double std) {
|
918
928
|
return self.log_normal_(mean, std);
|
919
929
|
})
|
930
|
+
.define_method(
|
931
|
+
"_log_softmax",
|
932
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
|
933
|
+
return self.log_softmax(dim, dtype);
|
934
|
+
})
|
920
935
|
.define_method(
|
921
936
|
"_logdet",
|
922
937
|
*[](const Tensor &self) {
|
@@ -950,7 +965,7 @@ void add_tensor_functions(Module m) {
|
|
950
965
|
.define_method(
|
951
966
|
"_lstsq",
|
952
967
|
*[](const Tensor &self, const Tensor &A) {
|
953
|
-
return self.lstsq(A);
|
968
|
+
return wrap(self.lstsq(A));
|
954
969
|
})
|
955
970
|
.define_method(
|
956
971
|
"_lt__scalar",
|
@@ -1030,7 +1045,7 @@ void add_tensor_functions(Module m) {
|
|
1030
1045
|
.define_method(
|
1031
1046
|
"_max_dim",
|
1032
1047
|
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1033
|
-
return self.max(dim, keepdim);
|
1048
|
+
return wrap(self.max(dim, keepdim));
|
1034
1049
|
})
|
1035
1050
|
.define_method(
|
1036
1051
|
"_max_other",
|
@@ -1042,6 +1057,16 @@ void add_tensor_functions(Module m) {
|
|
1042
1057
|
*[](const Tensor &self, IntArrayRef dim, bool keepdim) {
|
1043
1058
|
return self.max_values(dim, keepdim);
|
1044
1059
|
})
|
1060
|
+
.define_method(
|
1061
|
+
"_mean",
|
1062
|
+
*[](const Tensor &self, OptionalScalarType dtype) {
|
1063
|
+
return self.mean(dtype);
|
1064
|
+
})
|
1065
|
+
.define_method(
|
1066
|
+
"_mean_dim",
|
1067
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype) {
|
1068
|
+
return self.mean(dim, keepdim, dtype);
|
1069
|
+
})
|
1045
1070
|
.define_method(
|
1046
1071
|
"_median",
|
1047
1072
|
*[](const Tensor &self) {
|
@@ -1050,7 +1075,7 @@ void add_tensor_functions(Module m) {
|
|
1050
1075
|
.define_method(
|
1051
1076
|
"_median_dim",
|
1052
1077
|
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1053
|
-
return self.median(dim, keepdim);
|
1078
|
+
return wrap(self.median(dim, keepdim));
|
1054
1079
|
})
|
1055
1080
|
.define_method(
|
1056
1081
|
"_min",
|
@@ -1060,7 +1085,7 @@ void add_tensor_functions(Module m) {
|
|
1060
1085
|
.define_method(
|
1061
1086
|
"_min_dim",
|
1062
1087
|
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1063
|
-
return self.min(dim, keepdim);
|
1088
|
+
return wrap(self.min(dim, keepdim));
|
1064
1089
|
})
|
1065
1090
|
.define_method(
|
1066
1091
|
"_min_other",
|
@@ -1080,7 +1105,7 @@ void add_tensor_functions(Module m) {
|
|
1080
1105
|
.define_method(
|
1081
1106
|
"_mode",
|
1082
1107
|
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1083
|
-
return self.mode(dim, keepdim);
|
1108
|
+
return wrap(self.mode(dim, keepdim));
|
1084
1109
|
})
|
1085
1110
|
.define_method(
|
1086
1111
|
"_mul__scalar",
|
@@ -1257,6 +1282,16 @@ void add_tensor_functions(Module m) {
|
|
1257
1282
|
*[](const Tensor &self, const Tensor &weight) {
|
1258
1283
|
return self.prelu(weight);
|
1259
1284
|
})
|
1285
|
+
.define_method(
|
1286
|
+
"_prod",
|
1287
|
+
*[](const Tensor &self, OptionalScalarType dtype) {
|
1288
|
+
return self.prod(dtype);
|
1289
|
+
})
|
1290
|
+
.define_method(
|
1291
|
+
"_prod_dim_int",
|
1292
|
+
*[](const Tensor &self, int64_t dim, bool keepdim, OptionalScalarType dtype) {
|
1293
|
+
return self.prod(dim, keepdim, dtype);
|
1294
|
+
})
|
1260
1295
|
.define_method(
|
1261
1296
|
"_put_",
|
1262
1297
|
*[](Tensor &self, const Tensor &index, const Tensor &source, bool accumulate) {
|
@@ -1290,7 +1325,7 @@ void add_tensor_functions(Module m) {
|
|
1290
1325
|
.define_method(
|
1291
1326
|
"_qr",
|
1292
1327
|
*[](const Tensor &self, bool some) {
|
1293
|
-
return self.qr(some);
|
1328
|
+
return wrap(self.qr(some));
|
1294
1329
|
})
|
1295
1330
|
.define_method(
|
1296
1331
|
"_qscheme",
|
@@ -1545,22 +1580,27 @@ void add_tensor_functions(Module m) {
|
|
1545
1580
|
.define_method(
|
1546
1581
|
"_slogdet",
|
1547
1582
|
*[](const Tensor &self) {
|
1548
|
-
return self.slogdet();
|
1583
|
+
return wrap(self.slogdet());
|
1549
1584
|
})
|
1550
1585
|
.define_method(
|
1551
1586
|
"_smm",
|
1552
1587
|
*[](const Tensor &self, const Tensor &mat2) {
|
1553
1588
|
return self.smm(mat2);
|
1554
1589
|
})
|
1590
|
+
.define_method(
|
1591
|
+
"_softmax",
|
1592
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
|
1593
|
+
return self.softmax(dim, dtype);
|
1594
|
+
})
|
1555
1595
|
.define_method(
|
1556
1596
|
"_solve",
|
1557
1597
|
*[](const Tensor &self, const Tensor &A) {
|
1558
|
-
return self.solve(A);
|
1598
|
+
return wrap(self.solve(A));
|
1559
1599
|
})
|
1560
1600
|
.define_method(
|
1561
1601
|
"_sort",
|
1562
1602
|
*[](const Tensor &self, int64_t dim, bool descending) {
|
1563
|
-
return self.sort(dim, descending);
|
1603
|
+
return wrap(self.sort(dim, descending));
|
1564
1604
|
})
|
1565
1605
|
.define_method(
|
1566
1606
|
"_sparse_dim",
|
@@ -1662,6 +1702,16 @@ void add_tensor_functions(Module m) {
|
|
1662
1702
|
*[](const Tensor &self, const Tensor &other, Scalar alpha) {
|
1663
1703
|
return self.sub(other, alpha);
|
1664
1704
|
})
|
1705
|
+
.define_method(
|
1706
|
+
"_sum",
|
1707
|
+
*[](const Tensor &self, OptionalScalarType dtype) {
|
1708
|
+
return self.sum(dtype);
|
1709
|
+
})
|
1710
|
+
.define_method(
|
1711
|
+
"_sum_dim_intlist",
|
1712
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype) {
|
1713
|
+
return self.sum(dim, keepdim, dtype);
|
1714
|
+
})
|
1665
1715
|
.define_method(
|
1666
1716
|
"_sum_to_size",
|
1667
1717
|
*[](const Tensor &self, IntArrayRef size) {
|
@@ -1670,12 +1720,12 @@ void add_tensor_functions(Module m) {
|
|
1670
1720
|
.define_method(
|
1671
1721
|
"_svd",
|
1672
1722
|
*[](const Tensor &self, bool some, bool compute_uv) {
|
1673
|
-
return self.svd(some, compute_uv);
|
1723
|
+
return wrap(self.svd(some, compute_uv));
|
1674
1724
|
})
|
1675
1725
|
.define_method(
|
1676
1726
|
"_symeig",
|
1677
1727
|
*[](const Tensor &self, bool eigenvectors, bool upper) {
|
1678
|
-
return self.symeig(eigenvectors, upper);
|
1728
|
+
return wrap(self.symeig(eigenvectors, upper));
|
1679
1729
|
})
|
1680
1730
|
.define_method(
|
1681
1731
|
"_t",
|
@@ -1717,6 +1767,16 @@ void add_tensor_functions(Module m) {
|
|
1717
1767
|
*[](const Tensor &self) {
|
1718
1768
|
return self.to_dense();
|
1719
1769
|
})
|
1770
|
+
.define_method(
|
1771
|
+
"_to_device",
|
1772
|
+
*[](const Tensor &self, Device device, ScalarType dtype, bool non_blocking, bool copy) {
|
1773
|
+
return self.to(device, dtype, non_blocking, copy);
|
1774
|
+
})
|
1775
|
+
.define_method(
|
1776
|
+
"_to_dtype",
|
1777
|
+
*[](const Tensor &self, ScalarType dtype, bool non_blocking, bool copy) {
|
1778
|
+
return self.to(dtype, non_blocking, copy);
|
1779
|
+
})
|
1720
1780
|
.define_method(
|
1721
1781
|
"_to_mkldnn",
|
1722
1782
|
*[](const Tensor &self) {
|
@@ -1740,7 +1800,7 @@ void add_tensor_functions(Module m) {
|
|
1740
1800
|
.define_method(
|
1741
1801
|
"_topk",
|
1742
1802
|
*[](const Tensor &self, int64_t k, int64_t dim, bool largest, bool sorted) {
|
1743
|
-
return self.topk(k, dim, largest, sorted);
|
1803
|
+
return wrap(self.topk(k, dim, largest, sorted));
|
1744
1804
|
})
|
1745
1805
|
.define_method(
|
1746
1806
|
"_trace",
|
@@ -1760,7 +1820,7 @@ void add_tensor_functions(Module m) {
|
|
1760
1820
|
.define_method(
|
1761
1821
|
"_triangular_solve",
|
1762
1822
|
*[](const Tensor &self, const Tensor &A, bool upper, bool transpose, bool unitriangular) {
|
1763
|
-
return self.triangular_solve(A, upper, transpose, unitriangular);
|
1823
|
+
return wrap(self.triangular_solve(A, upper, transpose, unitriangular));
|
1764
1824
|
})
|
1765
1825
|
.define_method(
|
1766
1826
|
"_tril",
|
@@ -45,7 +45,7 @@ void add_torch_functions(Module m) {
|
|
45
45
|
.define_singleton_method(
|
46
46
|
"_adaptive_max_pool1d",
|
47
47
|
*[](const Tensor &self, IntArrayRef output_size) {
|
48
|
-
return torch::adaptive_max_pool1d(self, output_size);
|
48
|
+
return wrap(torch::adaptive_max_pool1d(self, output_size));
|
49
49
|
})
|
50
50
|
.define_singleton_method(
|
51
51
|
"_add_out",
|
@@ -310,7 +310,7 @@ void add_torch_functions(Module m) {
|
|
310
310
|
.define_singleton_method(
|
311
311
|
"_batch_norm_backward_reduce",
|
312
312
|
*[](const Tensor &grad_out, const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor weight, bool input_g, bool weight_g, bool bias_g) {
|
313
|
-
return torch::batch_norm_backward_reduce(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g);
|
313
|
+
return wrap(torch::batch_norm_backward_reduce(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g));
|
314
314
|
})
|
315
315
|
.define_singleton_method(
|
316
316
|
"_batch_norm_elemt",
|
@@ -320,22 +320,22 @@ void add_torch_functions(Module m) {
|
|
320
320
|
.define_singleton_method(
|
321
321
|
"_batch_norm_gather_stats",
|
322
322
|
*[](const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor running_mean, OptionalTensor running_var, double momentum, double eps, int64_t count) {
|
323
|
-
return torch::batch_norm_gather_stats(input, mean, invstd, running_mean, running_var, momentum, eps, count);
|
323
|
+
return wrap(torch::batch_norm_gather_stats(input, mean, invstd, running_mean, running_var, momentum, eps, count));
|
324
324
|
})
|
325
325
|
.define_singleton_method(
|
326
326
|
"_batch_norm_gather_stats_with_counts",
|
327
327
|
*[](const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor running_mean, OptionalTensor running_var, double momentum, double eps, IntArrayRef counts) {
|
328
|
-
return torch::batch_norm_gather_stats_with_counts(input, mean, invstd, running_mean, running_var, momentum, eps, counts);
|
328
|
+
return wrap(torch::batch_norm_gather_stats_with_counts(input, mean, invstd, running_mean, running_var, momentum, eps, counts));
|
329
329
|
})
|
330
330
|
.define_singleton_method(
|
331
331
|
"_batch_norm_stats",
|
332
332
|
*[](const Tensor &input, double eps) {
|
333
|
-
return torch::batch_norm_stats(input, eps);
|
333
|
+
return wrap(torch::batch_norm_stats(input, eps));
|
334
334
|
})
|
335
335
|
.define_singleton_method(
|
336
336
|
"_batch_norm_update_stats",
|
337
337
|
*[](const Tensor &input, OptionalTensor running_mean, OptionalTensor running_var, double momentum) {
|
338
|
-
return torch::batch_norm_update_stats(input, running_mean, running_var, momentum);
|
338
|
+
return wrap(torch::batch_norm_update_stats(input, running_mean, running_var, momentum));
|
339
339
|
})
|
340
340
|
.define_singleton_method(
|
341
341
|
"_bernoulli",
|
@@ -392,6 +392,11 @@ void add_torch_functions(Module m) {
|
|
392
392
|
*[](TensorList tensors) {
|
393
393
|
return torch::broadcast_tensors(tensors);
|
394
394
|
})
|
395
|
+
.define_singleton_method(
|
396
|
+
"_can_cast",
|
397
|
+
*[](ScalarType from, ScalarType to) {
|
398
|
+
return torch::can_cast(from, to);
|
399
|
+
})
|
395
400
|
.define_singleton_method(
|
396
401
|
"_cartesian_prod",
|
397
402
|
*[](TensorList tensors) {
|
@@ -630,7 +635,7 @@ void add_torch_functions(Module m) {
|
|
630
635
|
.define_singleton_method(
|
631
636
|
"_cudnn_batch_norm",
|
632
637
|
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double exponential_average_factor, double epsilon) {
|
633
|
-
return torch::cudnn_batch_norm(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
|
638
|
+
return wrap(torch::cudnn_batch_norm(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon));
|
634
639
|
})
|
635
640
|
.define_singleton_method(
|
636
641
|
"_cudnn_convolution",
|
@@ -682,6 +687,26 @@ void add_torch_functions(Module m) {
|
|
682
687
|
*[](const Tensor &self) {
|
683
688
|
return torch::cudnn_is_acceptable(self);
|
684
689
|
})
|
690
|
+
.define_singleton_method(
|
691
|
+
"_cumprod",
|
692
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
|
693
|
+
return torch::cumprod(self, dim, dtype);
|
694
|
+
})
|
695
|
+
.define_singleton_method(
|
696
|
+
"_cumprod_out",
|
697
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype, Tensor &out) {
|
698
|
+
return torch::cumprod_out(out, self, dim, dtype);
|
699
|
+
})
|
700
|
+
.define_singleton_method(
|
701
|
+
"_cumsum",
|
702
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
|
703
|
+
return torch::cumsum(self, dim, dtype);
|
704
|
+
})
|
705
|
+
.define_singleton_method(
|
706
|
+
"_cumsum_out",
|
707
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype, Tensor &out) {
|
708
|
+
return torch::cumsum_out(out, self, dim, dtype);
|
709
|
+
})
|
685
710
|
.define_singleton_method(
|
686
711
|
"_dequantize",
|
687
712
|
*[](const Tensor &self) {
|
@@ -780,12 +805,12 @@ void add_torch_functions(Module m) {
|
|
780
805
|
.define_singleton_method(
|
781
806
|
"_eig",
|
782
807
|
*[](const Tensor &self, bool eigenvectors) {
|
783
|
-
return torch::eig(self, eigenvectors);
|
808
|
+
return wrap(torch::eig(self, eigenvectors));
|
784
809
|
})
|
785
810
|
.define_singleton_method(
|
786
811
|
"_eig_e",
|
787
812
|
*[](const Tensor &self, bool eigenvectors, Tensor &e, Tensor &v) {
|
788
|
-
return torch::eig_out(e, v, self, eigenvectors);
|
813
|
+
return wrap(torch::eig_out(e, v, self, eigenvectors));
|
789
814
|
})
|
790
815
|
.define_singleton_method(
|
791
816
|
"_embedding",
|
@@ -795,7 +820,7 @@ void add_torch_functions(Module m) {
|
|
795
820
|
.define_singleton_method(
|
796
821
|
"_embedding_bag",
|
797
822
|
*[](const Tensor &weight, const Tensor &indices, const Tensor &offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, OptionalTensor per_sample_weights) {
|
798
|
-
return torch::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights);
|
823
|
+
return wrap(torch::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights));
|
799
824
|
})
|
800
825
|
.define_singleton_method(
|
801
826
|
"_embedding_renorm_",
|
@@ -945,7 +970,7 @@ void add_torch_functions(Module m) {
|
|
945
970
|
.define_singleton_method(
|
946
971
|
"_fbgemm_linear_quantize_weight",
|
947
972
|
*[](const Tensor &input) {
|
948
|
-
return torch::fbgemm_linear_quantize_weight(input);
|
973
|
+
return wrap(torch::fbgemm_linear_quantize_weight(input));
|
949
974
|
})
|
950
975
|
.define_singleton_method(
|
951
976
|
"_fbgemm_pack_gemm_matrix_fp16",
|
@@ -1115,12 +1140,12 @@ void add_torch_functions(Module m) {
|
|
1115
1140
|
.define_singleton_method(
|
1116
1141
|
"_geqrf",
|
1117
1142
|
*[](const Tensor &self) {
|
1118
|
-
return torch::geqrf(self);
|
1143
|
+
return wrap(torch::geqrf(self));
|
1119
1144
|
})
|
1120
1145
|
.define_singleton_method(
|
1121
1146
|
"_geqrf_a",
|
1122
1147
|
*[](const Tensor &self, Tensor &a, Tensor &tau) {
|
1123
|
-
return torch::geqrf_out(a, tau, self);
|
1148
|
+
return wrap(torch::geqrf_out(a, tau, self));
|
1124
1149
|
})
|
1125
1150
|
.define_singleton_method(
|
1126
1151
|
"_ger",
|
@@ -1160,12 +1185,12 @@ void add_torch_functions(Module m) {
|
|
1160
1185
|
.define_singleton_method(
|
1161
1186
|
"_gru_data",
|
1162
1187
|
*[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
1163
|
-
return torch::gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
|
1188
|
+
return wrap(torch::gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
|
1164
1189
|
})
|
1165
1190
|
.define_singleton_method(
|
1166
1191
|
"_gru_input",
|
1167
1192
|
*[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
1168
|
-
return torch::gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
1193
|
+
return wrap(torch::gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
|
1169
1194
|
})
|
1170
1195
|
.define_singleton_method(
|
1171
1196
|
"_gt_scalar",
|
@@ -1325,12 +1350,12 @@ void add_torch_functions(Module m) {
|
|
1325
1350
|
.define_singleton_method(
|
1326
1351
|
"_kthvalue",
|
1327
1352
|
*[](const Tensor &self, int64_t k, int64_t dim, bool keepdim) {
|
1328
|
-
return torch::kthvalue(self, k, dim, keepdim);
|
1353
|
+
return wrap(torch::kthvalue(self, k, dim, keepdim));
|
1329
1354
|
})
|
1330
1355
|
.define_singleton_method(
|
1331
1356
|
"_kthvalue_values",
|
1332
1357
|
*[](const Tensor &self, int64_t k, int64_t dim, bool keepdim, Tensor &values, Tensor &indices) {
|
1333
|
-
return torch::kthvalue_out(values, indices, self, k, dim, keepdim);
|
1358
|
+
return wrap(torch::kthvalue_out(values, indices, self, k, dim, keepdim));
|
1334
1359
|
})
|
1335
1360
|
.define_singleton_method(
|
1336
1361
|
"_layer_norm",
|
@@ -1452,6 +1477,11 @@ void add_torch_functions(Module m) {
|
|
1452
1477
|
*[](const Tensor &self, Tensor &out) {
|
1453
1478
|
return torch::log_out(out, self);
|
1454
1479
|
})
|
1480
|
+
.define_singleton_method(
|
1481
|
+
"_log_softmax",
|
1482
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
|
1483
|
+
return torch::log_softmax(self, dim, dtype);
|
1484
|
+
})
|
1455
1485
|
.define_singleton_method(
|
1456
1486
|
"_logdet",
|
1457
1487
|
*[](const Tensor &self) {
|
@@ -1495,27 +1525,27 @@ void add_torch_functions(Module m) {
|
|
1495
1525
|
.define_singleton_method(
|
1496
1526
|
"_lstm_cell",
|
1497
1527
|
*[](const Tensor &input, TensorList hx, const Tensor &w_ih, const Tensor &w_hh, OptionalTensor b_ih, OptionalTensor b_hh) {
|
1498
|
-
return torch::lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
|
1528
|
+
return wrap(torch::lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh));
|
1499
1529
|
})
|
1500
1530
|
.define_singleton_method(
|
1501
1531
|
"_lstm_data",
|
1502
1532
|
*[](const Tensor &data, const Tensor &batch_sizes, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
1503
|
-
return torch::lstm(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
|
1533
|
+
return wrap(torch::lstm(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
|
1504
1534
|
})
|
1505
1535
|
.define_singleton_method(
|
1506
1536
|
"_lstm_input",
|
1507
1537
|
*[](const Tensor &input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
1508
|
-
return torch::lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
1538
|
+
return wrap(torch::lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
|
1509
1539
|
})
|
1510
1540
|
.define_singleton_method(
|
1511
1541
|
"_lstsq",
|
1512
1542
|
*[](const Tensor &self, const Tensor &A) {
|
1513
|
-
return torch::lstsq(self, A);
|
1543
|
+
return wrap(torch::lstsq(self, A));
|
1514
1544
|
})
|
1515
1545
|
.define_singleton_method(
|
1516
1546
|
"_lstsq_x",
|
1517
1547
|
*[](const Tensor &self, const Tensor &A, Tensor &X, Tensor &qr) {
|
1518
|
-
return torch::lstsq_out(X, qr, self, A);
|
1548
|
+
return wrap(torch::lstsq_out(X, qr, self, A));
|
1519
1549
|
})
|
1520
1550
|
.define_singleton_method(
|
1521
1551
|
"_lt_scalar",
|
@@ -1610,12 +1640,12 @@ void add_torch_functions(Module m) {
|
|
1610
1640
|
.define_singleton_method(
|
1611
1641
|
"_max_dim",
|
1612
1642
|
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1613
|
-
return torch::max(self, dim, keepdim);
|
1643
|
+
return wrap(torch::max(self, dim, keepdim));
|
1614
1644
|
})
|
1615
1645
|
.define_singleton_method(
|
1616
1646
|
"_max_dim_max",
|
1617
1647
|
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &max, Tensor &max_values) {
|
1618
|
-
return torch::max_out(max, max_values, self, dim, keepdim);
|
1648
|
+
return wrap(torch::max_out(max, max_values, self, dim, keepdim));
|
1619
1649
|
})
|
1620
1650
|
.define_singleton_method(
|
1621
1651
|
"_max_other",
|
@@ -1635,7 +1665,7 @@ void add_torch_functions(Module m) {
|
|
1635
1665
|
.define_singleton_method(
|
1636
1666
|
"_max_pool1d_with_indices",
|
1637
1667
|
*[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
|
1638
|
-
return torch::max_pool1d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
|
1668
|
+
return wrap(torch::max_pool1d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode));
|
1639
1669
|
})
|
1640
1670
|
.define_singleton_method(
|
1641
1671
|
"_max_pool2d",
|
@@ -1652,6 +1682,21 @@ void add_torch_functions(Module m) {
|
|
1652
1682
|
*[](const Tensor &self, IntArrayRef dim, bool keepdim) {
|
1653
1683
|
return torch::max_values(self, dim, keepdim);
|
1654
1684
|
})
|
1685
|
+
.define_singleton_method(
|
1686
|
+
"_mean",
|
1687
|
+
*[](const Tensor &self, OptionalScalarType dtype) {
|
1688
|
+
return torch::mean(self, dtype);
|
1689
|
+
})
|
1690
|
+
.define_singleton_method(
|
1691
|
+
"_mean_dim",
|
1692
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype) {
|
1693
|
+
return torch::mean(self, dim, keepdim, dtype);
|
1694
|
+
})
|
1695
|
+
.define_singleton_method(
|
1696
|
+
"_mean_out",
|
1697
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype, Tensor &out) {
|
1698
|
+
return torch::mean_out(out, self, dim, keepdim, dtype);
|
1699
|
+
})
|
1655
1700
|
.define_singleton_method(
|
1656
1701
|
"_median",
|
1657
1702
|
*[](const Tensor &self) {
|
@@ -1660,12 +1705,12 @@ void add_torch_functions(Module m) {
|
|
1660
1705
|
.define_singleton_method(
|
1661
1706
|
"_median_dim",
|
1662
1707
|
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1663
|
-
return torch::median(self, dim, keepdim);
|
1708
|
+
return wrap(torch::median(self, dim, keepdim));
|
1664
1709
|
})
|
1665
1710
|
.define_singleton_method(
|
1666
1711
|
"_median_dim_values",
|
1667
1712
|
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &values, Tensor &indices) {
|
1668
|
-
return torch::median_out(values, indices, self, dim, keepdim);
|
1713
|
+
return wrap(torch::median_out(values, indices, self, dim, keepdim));
|
1669
1714
|
})
|
1670
1715
|
.define_singleton_method(
|
1671
1716
|
"_meshgrid",
|
@@ -1680,12 +1725,12 @@ void add_torch_functions(Module m) {
|
|
1680
1725
|
.define_singleton_method(
|
1681
1726
|
"_min_dim",
|
1682
1727
|
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1683
|
-
return torch::min(self, dim, keepdim);
|
1728
|
+
return wrap(torch::min(self, dim, keepdim));
|
1684
1729
|
})
|
1685
1730
|
.define_singleton_method(
|
1686
1731
|
"_min_dim_min",
|
1687
1732
|
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &min, Tensor &min_indices) {
|
1688
|
-
return torch::min_out(min, min_indices, self, dim, keepdim);
|
1733
|
+
return wrap(torch::min_out(min, min_indices, self, dim, keepdim));
|
1689
1734
|
})
|
1690
1735
|
.define_singleton_method(
|
1691
1736
|
"_min_other",
|
@@ -1705,7 +1750,7 @@ void add_torch_functions(Module m) {
|
|
1705
1750
|
.define_singleton_method(
|
1706
1751
|
"_miopen_batch_norm",
|
1707
1752
|
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double exponential_average_factor, double epsilon) {
|
1708
|
-
return torch::miopen_batch_norm(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
|
1753
|
+
return wrap(torch::miopen_batch_norm(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon));
|
1709
1754
|
})
|
1710
1755
|
.define_singleton_method(
|
1711
1756
|
"_miopen_convolution",
|
@@ -1760,7 +1805,7 @@ void add_torch_functions(Module m) {
|
|
1760
1805
|
.define_singleton_method(
|
1761
1806
|
"_miopen_rnn",
|
1762
1807
|
*[](const Tensor &input, TensorList weight, int64_t weight_stride0, const Tensor &hx, OptionalTensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, OptionalTensor dropout_state) {
|
1763
|
-
return torch::miopen_rnn(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
|
1808
|
+
return wrap(torch::miopen_rnn(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state));
|
1764
1809
|
})
|
1765
1810
|
.define_singleton_method(
|
1766
1811
|
"_mkldnn_adaptive_avg_pool2d",
|
@@ -1780,7 +1825,7 @@ void add_torch_functions(Module m) {
|
|
1780
1825
|
.define_singleton_method(
|
1781
1826
|
"_mkldnn_convolution_backward_weights",
|
1782
1827
|
*[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) {
|
1783
|
-
return torch::mkldnn_convolution_backward_weights(weight_size, grad_output, self, padding, stride, dilation, groups, bias_defined);
|
1828
|
+
return wrap(torch::mkldnn_convolution_backward_weights(weight_size, grad_output, self, padding, stride, dilation, groups, bias_defined));
|
1784
1829
|
})
|
1785
1830
|
.define_singleton_method(
|
1786
1831
|
"_mkldnn_max_pool2d",
|
@@ -1800,12 +1845,12 @@ void add_torch_functions(Module m) {
|
|
1800
1845
|
.define_singleton_method(
|
1801
1846
|
"_mode",
|
1802
1847
|
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1803
|
-
return torch::mode(self, dim, keepdim);
|
1848
|
+
return wrap(torch::mode(self, dim, keepdim));
|
1804
1849
|
})
|
1805
1850
|
.define_singleton_method(
|
1806
1851
|
"_mode_values",
|
1807
1852
|
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &values, Tensor &indices) {
|
1808
|
-
return torch::mode_out(values, indices, self, dim, keepdim);
|
1853
|
+
return wrap(torch::mode_out(values, indices, self, dim, keepdim));
|
1809
1854
|
})
|
1810
1855
|
.define_singleton_method(
|
1811
1856
|
"_mul_out",
|
@@ -1855,12 +1900,12 @@ void add_torch_functions(Module m) {
|
|
1855
1900
|
.define_singleton_method(
|
1856
1901
|
"_native_batch_norm",
|
1857
1902
|
*[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double momentum, double eps) {
|
1858
|
-
return torch::native_batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps);
|
1903
|
+
return wrap(torch::native_batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps));
|
1859
1904
|
})
|
1860
1905
|
.define_singleton_method(
|
1861
1906
|
"_native_layer_norm",
|
1862
1907
|
*[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, int64_t M, int64_t N, double eps) {
|
1863
|
-
return torch::native_layer_norm(input, weight, bias, M, N, eps);
|
1908
|
+
return wrap(torch::native_layer_norm(input, weight, bias, M, N, eps));
|
1864
1909
|
})
|
1865
1910
|
.define_singleton_method(
|
1866
1911
|
"_native_norm",
|
@@ -2057,6 +2102,26 @@ void add_torch_functions(Module m) {
|
|
2057
2102
|
*[](const Tensor &self, const Tensor &weight) {
|
2058
2103
|
return torch::prelu(self, weight);
|
2059
2104
|
})
|
2105
|
+
.define_singleton_method(
|
2106
|
+
"_prod",
|
2107
|
+
*[](const Tensor &self, OptionalScalarType dtype) {
|
2108
|
+
return torch::prod(self, dtype);
|
2109
|
+
})
|
2110
|
+
.define_singleton_method(
|
2111
|
+
"_prod_dim_int",
|
2112
|
+
*[](const Tensor &self, int64_t dim, bool keepdim, OptionalScalarType dtype) {
|
2113
|
+
return torch::prod(self, dim, keepdim, dtype);
|
2114
|
+
})
|
2115
|
+
.define_singleton_method(
|
2116
|
+
"_prod_int_out",
|
2117
|
+
*[](const Tensor &self, int64_t dim, bool keepdim, OptionalScalarType dtype, Tensor &out) {
|
2118
|
+
return torch::prod_out(out, self, dim, keepdim, dtype);
|
2119
|
+
})
|
2120
|
+
.define_singleton_method(
|
2121
|
+
"_promote_types",
|
2122
|
+
*[](ScalarType type1, ScalarType type2) {
|
2123
|
+
return torch::promote_types(type1, type2);
|
2124
|
+
})
|
2060
2125
|
.define_singleton_method(
|
2061
2126
|
"_q_per_channel_axis",
|
2062
2127
|
*[](const Tensor &self) {
|
@@ -2085,12 +2150,22 @@ void add_torch_functions(Module m) {
|
|
2085
2150
|
.define_singleton_method(
|
2086
2151
|
"_qr",
|
2087
2152
|
*[](const Tensor &self, bool some) {
|
2088
|
-
return torch::qr(self, some);
|
2153
|
+
return wrap(torch::qr(self, some));
|
2089
2154
|
})
|
2090
2155
|
.define_singleton_method(
|
2091
2156
|
"_qr_q",
|
2092
2157
|
*[](const Tensor &self, bool some, Tensor &Q, Tensor &R) {
|
2093
|
-
return torch::qr_out(Q, R, self, some);
|
2158
|
+
return wrap(torch::qr_out(Q, R, self, some));
|
2159
|
+
})
|
2160
|
+
.define_singleton_method(
|
2161
|
+
"_quantize_per_channel",
|
2162
|
+
*[](const Tensor &self, const Tensor &scales, const Tensor &zero_points, int64_t axis, ScalarType dtype) {
|
2163
|
+
return torch::quantize_per_channel(self, scales, zero_points, axis, dtype);
|
2164
|
+
})
|
2165
|
+
.define_singleton_method(
|
2166
|
+
"_quantize_per_tensor",
|
2167
|
+
*[](const Tensor &self, double scale, int64_t zero_point, ScalarType dtype) {
|
2168
|
+
return torch::quantize_per_tensor(self, scale, zero_point, dtype);
|
2094
2169
|
})
|
2095
2170
|
.define_singleton_method(
|
2096
2171
|
"_quantized_gru_cell",
|
@@ -2100,17 +2175,22 @@ void add_torch_functions(Module m) {
|
|
2100
2175
|
.define_singleton_method(
|
2101
2176
|
"_quantized_gru_data",
|
2102
2177
|
*[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
2103
|
-
return torch::quantized_gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
|
2178
|
+
return wrap(torch::quantized_gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
|
2104
2179
|
})
|
2105
2180
|
.define_singleton_method(
|
2106
2181
|
"_quantized_gru_input",
|
2107
2182
|
*[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
2108
|
-
return torch::quantized_gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
2183
|
+
return wrap(torch::quantized_gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
|
2184
|
+
})
|
2185
|
+
.define_singleton_method(
|
2186
|
+
"_quantized_lstm",
|
2187
|
+
*[](const Tensor &input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, OptionalScalarType dtype, bool use_dynamic) {
|
2188
|
+
return wrap(torch::quantized_lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, dtype, use_dynamic));
|
2109
2189
|
})
|
2110
2190
|
.define_singleton_method(
|
2111
2191
|
"_quantized_lstm_cell",
|
2112
2192
|
*[](const Tensor &input, TensorList hx, const Tensor &w_ih, const Tensor &w_hh, const Tensor &b_ih, const Tensor &b_hh, const Tensor &packed_ih, const Tensor &packed_hh, const Tensor &col_offsets_ih, const Tensor &col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) {
|
2113
|
-
return torch::quantized_lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
|
2193
|
+
return wrap(torch::quantized_lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh));
|
2114
2194
|
})
|
2115
2195
|
.define_singleton_method(
|
2116
2196
|
"_quantized_max_pool2d",
|
@@ -2325,12 +2405,12 @@ void add_torch_functions(Module m) {
|
|
2325
2405
|
.define_singleton_method(
|
2326
2406
|
"_rnn_relu_data",
|
2327
2407
|
*[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
2328
|
-
return torch::rnn_relu(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
|
2408
|
+
return wrap(torch::rnn_relu(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
|
2329
2409
|
})
|
2330
2410
|
.define_singleton_method(
|
2331
2411
|
"_rnn_relu_input",
|
2332
2412
|
*[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
2333
|
-
return torch::rnn_relu(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
2413
|
+
return wrap(torch::rnn_relu(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
|
2334
2414
|
})
|
2335
2415
|
.define_singleton_method(
|
2336
2416
|
"_rnn_tanh_cell",
|
@@ -2340,12 +2420,12 @@ void add_torch_functions(Module m) {
|
|
2340
2420
|
.define_singleton_method(
|
2341
2421
|
"_rnn_tanh_data",
|
2342
2422
|
*[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
2343
|
-
return torch::rnn_tanh(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
|
2423
|
+
return wrap(torch::rnn_tanh(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
|
2344
2424
|
})
|
2345
2425
|
.define_singleton_method(
|
2346
2426
|
"_rnn_tanh_input",
|
2347
2427
|
*[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
2348
|
-
return torch::rnn_tanh(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
2428
|
+
return wrap(torch::rnn_tanh(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
|
2349
2429
|
})
|
2350
2430
|
.define_singleton_method(
|
2351
2431
|
"_roll",
|
@@ -2505,32 +2585,37 @@ void add_torch_functions(Module m) {
|
|
2505
2585
|
.define_singleton_method(
|
2506
2586
|
"_slogdet",
|
2507
2587
|
*[](const Tensor &self) {
|
2508
|
-
return torch::slogdet(self);
|
2588
|
+
return wrap(torch::slogdet(self));
|
2509
2589
|
})
|
2510
2590
|
.define_singleton_method(
|
2511
2591
|
"_smm",
|
2512
2592
|
*[](const Tensor &self, const Tensor &mat2) {
|
2513
2593
|
return torch::smm(self, mat2);
|
2514
2594
|
})
|
2595
|
+
.define_singleton_method(
|
2596
|
+
"_softmax",
|
2597
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
|
2598
|
+
return torch::softmax(self, dim, dtype);
|
2599
|
+
})
|
2515
2600
|
.define_singleton_method(
|
2516
2601
|
"_solve",
|
2517
2602
|
*[](const Tensor &self, const Tensor &A) {
|
2518
|
-
return torch::solve(self, A);
|
2603
|
+
return wrap(torch::solve(self, A));
|
2519
2604
|
})
|
2520
2605
|
.define_singleton_method(
|
2521
2606
|
"_solve_solution",
|
2522
2607
|
*[](const Tensor &self, const Tensor &A, Tensor &solution, Tensor &lu) {
|
2523
|
-
return torch::solve_out(solution, lu, self, A);
|
2608
|
+
return wrap(torch::solve_out(solution, lu, self, A));
|
2524
2609
|
})
|
2525
2610
|
.define_singleton_method(
|
2526
2611
|
"_sort",
|
2527
2612
|
*[](const Tensor &self, int64_t dim, bool descending) {
|
2528
|
-
return torch::sort(self, dim, descending);
|
2613
|
+
return wrap(torch::sort(self, dim, descending));
|
2529
2614
|
})
|
2530
2615
|
.define_singleton_method(
|
2531
2616
|
"_sort_values",
|
2532
2617
|
*[](const Tensor &self, int64_t dim, bool descending, Tensor &values, Tensor &indices) {
|
2533
|
-
return torch::sort_out(values, indices, self, dim, descending);
|
2618
|
+
return wrap(torch::sort_out(values, indices, self, dim, descending));
|
2534
2619
|
})
|
2535
2620
|
.define_singleton_method(
|
2536
2621
|
"_split_tensor",
|
@@ -2600,12 +2685,12 @@ void add_torch_functions(Module m) {
|
|
2600
2685
|
.define_singleton_method(
|
2601
2686
|
"_std_mean",
|
2602
2687
|
*[](const Tensor &self, bool unbiased) {
|
2603
|
-
return torch::std_mean(self, unbiased);
|
2688
|
+
return wrap(torch::std_mean(self, unbiased));
|
2604
2689
|
})
|
2605
2690
|
.define_singleton_method(
|
2606
2691
|
"_std_mean_dim",
|
2607
2692
|
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
|
2608
|
-
return torch::std_mean(self, dim, unbiased, keepdim);
|
2693
|
+
return wrap(torch::std_mean(self, dim, unbiased, keepdim));
|
2609
2694
|
})
|
2610
2695
|
.define_singleton_method(
|
2611
2696
|
"_std_out",
|
@@ -2632,25 +2717,40 @@ void add_torch_functions(Module m) {
|
|
2632
2717
|
*[](const Tensor &self, const Tensor &other, Scalar alpha) {
|
2633
2718
|
return torch::sub(self, other, alpha);
|
2634
2719
|
})
|
2720
|
+
.define_singleton_method(
|
2721
|
+
"_sum",
|
2722
|
+
*[](const Tensor &self, OptionalScalarType dtype) {
|
2723
|
+
return torch::sum(self, dtype);
|
2724
|
+
})
|
2725
|
+
.define_singleton_method(
|
2726
|
+
"_sum_dim_intlist",
|
2727
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype) {
|
2728
|
+
return torch::sum(self, dim, keepdim, dtype);
|
2729
|
+
})
|
2730
|
+
.define_singleton_method(
|
2731
|
+
"_sum_intlist_out",
|
2732
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype, Tensor &out) {
|
2733
|
+
return torch::sum_out(out, self, dim, keepdim, dtype);
|
2734
|
+
})
|
2635
2735
|
.define_singleton_method(
|
2636
2736
|
"_svd",
|
2637
2737
|
*[](const Tensor &self, bool some, bool compute_uv) {
|
2638
|
-
return torch::svd(self, some, compute_uv);
|
2738
|
+
return wrap(torch::svd(self, some, compute_uv));
|
2639
2739
|
})
|
2640
2740
|
.define_singleton_method(
|
2641
2741
|
"_svd_u",
|
2642
2742
|
*[](const Tensor &self, bool some, bool compute_uv, Tensor &U, Tensor &S, Tensor &V) {
|
2643
|
-
return torch::svd_out(U, S, V, self, some, compute_uv);
|
2743
|
+
return wrap(torch::svd_out(U, S, V, self, some, compute_uv));
|
2644
2744
|
})
|
2645
2745
|
.define_singleton_method(
|
2646
2746
|
"_symeig",
|
2647
2747
|
*[](const Tensor &self, bool eigenvectors, bool upper) {
|
2648
|
-
return torch::symeig(self, eigenvectors, upper);
|
2748
|
+
return wrap(torch::symeig(self, eigenvectors, upper));
|
2649
2749
|
})
|
2650
2750
|
.define_singleton_method(
|
2651
2751
|
"_symeig_e",
|
2652
2752
|
*[](const Tensor &self, bool eigenvectors, bool upper, Tensor &e, Tensor &V) {
|
2653
|
-
return torch::symeig_out(e, V, self, eigenvectors, upper);
|
2753
|
+
return wrap(torch::symeig_out(e, V, self, eigenvectors, upper));
|
2654
2754
|
})
|
2655
2755
|
.define_singleton_method(
|
2656
2756
|
"_t",
|
@@ -2720,12 +2820,12 @@ void add_torch_functions(Module m) {
|
|
2720
2820
|
.define_singleton_method(
|
2721
2821
|
"_topk",
|
2722
2822
|
*[](const Tensor &self, int64_t k, int64_t dim, bool largest, bool sorted) {
|
2723
|
-
return torch::topk(self, k, dim, largest, sorted);
|
2823
|
+
return wrap(torch::topk(self, k, dim, largest, sorted));
|
2724
2824
|
})
|
2725
2825
|
.define_singleton_method(
|
2726
2826
|
"_topk_values",
|
2727
2827
|
*[](const Tensor &self, int64_t k, int64_t dim, bool largest, bool sorted, Tensor &values, Tensor &indices) {
|
2728
|
-
return torch::topk_out(values, indices, self, k, dim, largest, sorted);
|
2828
|
+
return wrap(torch::topk_out(values, indices, self, k, dim, largest, sorted));
|
2729
2829
|
})
|
2730
2830
|
.define_singleton_method(
|
2731
2831
|
"_trace",
|
@@ -2750,12 +2850,12 @@ void add_torch_functions(Module m) {
|
|
2750
2850
|
.define_singleton_method(
|
2751
2851
|
"_triangular_solve",
|
2752
2852
|
*[](const Tensor &self, const Tensor &A, bool upper, bool transpose, bool unitriangular) {
|
2753
|
-
return torch::triangular_solve(self, A, upper, transpose, unitriangular);
|
2853
|
+
return wrap(torch::triangular_solve(self, A, upper, transpose, unitriangular));
|
2754
2854
|
})
|
2755
2855
|
.define_singleton_method(
|
2756
2856
|
"_triangular_solve_x",
|
2757
2857
|
*[](const Tensor &self, const Tensor &A, bool upper, bool transpose, bool unitriangular, Tensor &X, Tensor &M) {
|
2758
|
-
return torch::triangular_solve_out(X, M, self, A, upper, transpose, unitriangular);
|
2858
|
+
return wrap(torch::triangular_solve_out(X, M, self, A, upper, transpose, unitriangular));
|
2759
2859
|
})
|
2760
2860
|
.define_singleton_method(
|
2761
2861
|
"_tril",
|
@@ -2805,17 +2905,17 @@ void add_torch_functions(Module m) {
|
|
2805
2905
|
.define_singleton_method(
|
2806
2906
|
"_unique_consecutive",
|
2807
2907
|
*[](const Tensor &self, bool return_inverse, bool return_counts) {
|
2808
|
-
return torch::unique_consecutive(self, return_inverse, return_counts);
|
2908
|
+
return wrap(torch::unique_consecutive(self, return_inverse, return_counts));
|
2809
2909
|
})
|
2810
2910
|
.define_singleton_method(
|
2811
2911
|
"_unique_consecutive_dim",
|
2812
2912
|
*[](const Tensor &self, bool return_inverse, bool return_counts, int64_t dim) {
|
2813
|
-
return torch::unique_consecutive(self, return_inverse, return_counts, dim);
|
2913
|
+
return wrap(torch::unique_consecutive(self, return_inverse, return_counts, dim));
|
2814
2914
|
})
|
2815
2915
|
.define_singleton_method(
|
2816
2916
|
"_unique_dim",
|
2817
2917
|
*[](const Tensor &self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
|
2818
|
-
return torch::unique_dim(self, dim, sorted, return_inverse, return_counts);
|
2918
|
+
return wrap(torch::unique_dim(self, dim, sorted, return_inverse, return_counts));
|
2819
2919
|
})
|
2820
2920
|
.define_singleton_method(
|
2821
2921
|
"_unsqueeze",
|
@@ -2835,12 +2935,12 @@ void add_torch_functions(Module m) {
|
|
2835
2935
|
.define_singleton_method(
|
2836
2936
|
"_var_mean",
|
2837
2937
|
*[](const Tensor &self, bool unbiased) {
|
2838
|
-
return torch::var_mean(self, unbiased);
|
2938
|
+
return wrap(torch::var_mean(self, unbiased));
|
2839
2939
|
})
|
2840
2940
|
.define_singleton_method(
|
2841
2941
|
"_var_mean_dim",
|
2842
2942
|
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
|
2843
|
-
return torch::var_mean(self, dim, unbiased, keepdim);
|
2943
|
+
return wrap(torch::var_mean(self, dim, unbiased, keepdim));
|
2844
2944
|
})
|
2845
2945
|
.define_singleton_method(
|
2846
2946
|
"_var_out",
|