torch-rb 0.1.1 → 0.1.6

Sign up to get free protection for your applications and to get access to all the features.
Files changed (142) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +40 -0
  3. data/LICENSE.txt +46 -22
  4. data/README.md +73 -9
  5. data/ext/torch/ext.cpp +148 -315
  6. data/ext/torch/extconf.rb +6 -0
  7. data/ext/torch/nn_functions.cpp +615 -0
  8. data/ext/torch/nn_functions.hpp +6 -0
  9. data/ext/torch/templates.cpp +55 -0
  10. data/ext/torch/templates.hpp +298 -0
  11. data/ext/torch/tensor_functions.cpp +1920 -0
  12. data/ext/torch/tensor_functions.hpp +6 -0
  13. data/ext/torch/torch_functions.cpp +2975 -0
  14. data/ext/torch/torch_functions.hpp +6 -0
  15. data/lib/torch.rb +236 -112
  16. data/lib/torch/ext.bundle +0 -0
  17. data/lib/torch/inspector.rb +52 -25
  18. data/lib/torch/native/dispatcher.rb +48 -0
  19. data/lib/torch/native/function.rb +109 -0
  20. data/lib/torch/native/generator.rb +168 -0
  21. data/lib/torch/native/native_functions.yaml +6837 -0
  22. data/lib/torch/native/parser.rb +134 -0
  23. data/lib/torch/nn/alpha_dropout.rb +9 -0
  24. data/lib/torch/nn/avg_pool1d.rb +18 -0
  25. data/lib/torch/nn/avg_pool2d.rb +19 -0
  26. data/lib/torch/nn/avg_pool3d.rb +19 -0
  27. data/lib/torch/nn/avg_poolnd.rb +9 -0
  28. data/lib/torch/nn/batch_norm.rb +75 -0
  29. data/lib/torch/nn/batch_norm1d.rb +11 -0
  30. data/lib/torch/nn/batch_norm2d.rb +11 -0
  31. data/lib/torch/nn/batch_norm3d.rb +11 -0
  32. data/lib/torch/nn/bce_loss.rb +13 -0
  33. data/lib/torch/nn/bce_with_logits_loss.rb +15 -0
  34. data/lib/torch/nn/bilinear.rb +38 -0
  35. data/lib/torch/nn/constant_pad1d.rb +10 -0
  36. data/lib/torch/nn/constant_pad2d.rb +10 -0
  37. data/lib/torch/nn/constant_pad3d.rb +10 -0
  38. data/lib/torch/nn/constant_padnd.rb +18 -0
  39. data/lib/torch/nn/conv1d.rb +22 -0
  40. data/lib/torch/nn/conv2d.rb +16 -39
  41. data/lib/torch/nn/conv3d.rb +22 -0
  42. data/lib/torch/nn/convnd.rb +41 -0
  43. data/lib/torch/nn/cosine_embedding_loss.rb +14 -0
  44. data/lib/torch/nn/cosine_similarity.rb +15 -0
  45. data/lib/torch/nn/cross_entropy_loss.rb +14 -0
  46. data/lib/torch/nn/ctc_loss.rb +15 -0
  47. data/lib/torch/nn/dropout.rb +9 -0
  48. data/lib/torch/nn/dropout2d.rb +9 -0
  49. data/lib/torch/nn/dropout3d.rb +9 -0
  50. data/lib/torch/nn/dropoutnd.rb +15 -0
  51. data/lib/torch/nn/embedding.rb +52 -0
  52. data/lib/torch/nn/embedding_bag.rb +34 -0
  53. data/lib/torch/nn/feature_alpha_dropout.rb +9 -0
  54. data/lib/torch/nn/fold.rb +20 -0
  55. data/lib/torch/nn/functional.rb +419 -16
  56. data/lib/torch/nn/group_norm.rb +36 -0
  57. data/lib/torch/nn/gru.rb +49 -0
  58. data/lib/torch/nn/hardshrink.rb +18 -0
  59. data/lib/torch/nn/hinge_embedding_loss.rb +14 -0
  60. data/lib/torch/nn/identity.rb +14 -0
  61. data/lib/torch/nn/init.rb +58 -1
  62. data/lib/torch/nn/instance_norm.rb +20 -0
  63. data/lib/torch/nn/instance_norm1d.rb +18 -0
  64. data/lib/torch/nn/instance_norm2d.rb +11 -0
  65. data/lib/torch/nn/instance_norm3d.rb +11 -0
  66. data/lib/torch/nn/kl_div_loss.rb +13 -0
  67. data/lib/torch/nn/l1_loss.rb +13 -0
  68. data/lib/torch/nn/layer_norm.rb +35 -0
  69. data/lib/torch/nn/leaky_relu.rb +20 -0
  70. data/lib/torch/nn/linear.rb +12 -11
  71. data/lib/torch/nn/local_response_norm.rb +21 -0
  72. data/lib/torch/nn/log_sigmoid.rb +9 -0
  73. data/lib/torch/nn/log_softmax.rb +14 -0
  74. data/lib/torch/nn/loss.rb +10 -0
  75. data/lib/torch/nn/lp_pool1d.rb +9 -0
  76. data/lib/torch/nn/lp_pool2d.rb +9 -0
  77. data/lib/torch/nn/lp_poolnd.rb +22 -0
  78. data/lib/torch/nn/lstm.rb +66 -0
  79. data/lib/torch/nn/margin_ranking_loss.rb +14 -0
  80. data/lib/torch/nn/max_pool1d.rb +9 -0
  81. data/lib/torch/nn/max_pool2d.rb +9 -0
  82. data/lib/torch/nn/max_pool3d.rb +9 -0
  83. data/lib/torch/nn/max_poolnd.rb +19 -0
  84. data/lib/torch/nn/max_unpool1d.rb +16 -0
  85. data/lib/torch/nn/max_unpool2d.rb +16 -0
  86. data/lib/torch/nn/max_unpool3d.rb +16 -0
  87. data/lib/torch/nn/max_unpoolnd.rb +9 -0
  88. data/lib/torch/nn/module.rb +191 -19
  89. data/lib/torch/nn/mse_loss.rb +2 -2
  90. data/lib/torch/nn/multi_label_margin_loss.rb +13 -0
  91. data/lib/torch/nn/multi_label_soft_margin_loss.rb +13 -0
  92. data/lib/torch/nn/multi_margin_loss.rb +17 -0
  93. data/lib/torch/nn/nll_loss.rb +14 -0
  94. data/lib/torch/nn/pairwise_distance.rb +16 -0
  95. data/lib/torch/nn/parameter.rb +4 -0
  96. data/lib/torch/nn/poisson_nll_loss.rb +16 -0
  97. data/lib/torch/nn/prelu.rb +19 -0
  98. data/lib/torch/nn/reflection_pad1d.rb +10 -0
  99. data/lib/torch/nn/reflection_pad2d.rb +10 -0
  100. data/lib/torch/nn/reflection_padnd.rb +13 -0
  101. data/lib/torch/nn/relu.rb +8 -3
  102. data/lib/torch/nn/replication_pad1d.rb +10 -0
  103. data/lib/torch/nn/replication_pad2d.rb +10 -0
  104. data/lib/torch/nn/replication_pad3d.rb +10 -0
  105. data/lib/torch/nn/replication_padnd.rb +13 -0
  106. data/lib/torch/nn/rnn.rb +22 -0
  107. data/lib/torch/nn/rnn_base.rb +198 -0
  108. data/lib/torch/nn/sequential.rb +1 -10
  109. data/lib/torch/nn/sigmoid.rb +9 -0
  110. data/lib/torch/nn/smooth_l1_loss.rb +13 -0
  111. data/lib/torch/nn/soft_margin_loss.rb +13 -0
  112. data/lib/torch/nn/softmax.rb +18 -0
  113. data/lib/torch/nn/softmax2d.rb +10 -0
  114. data/lib/torch/nn/softmin.rb +14 -0
  115. data/lib/torch/nn/softplus.rb +19 -0
  116. data/lib/torch/nn/softshrink.rb +18 -0
  117. data/lib/torch/nn/softsign.rb +9 -0
  118. data/lib/torch/nn/tanh.rb +9 -0
  119. data/lib/torch/nn/tanhshrink.rb +9 -0
  120. data/lib/torch/nn/triplet_margin_loss.rb +18 -0
  121. data/lib/torch/nn/unfold.rb +19 -0
  122. data/lib/torch/nn/utils.rb +25 -0
  123. data/lib/torch/nn/weighted_loss.rb +10 -0
  124. data/lib/torch/nn/zero_pad2d.rb +9 -0
  125. data/lib/torch/optim/adadelta.rb +57 -0
  126. data/lib/torch/optim/adagrad.rb +71 -0
  127. data/lib/torch/optim/adam.rb +81 -0
  128. data/lib/torch/optim/adamax.rb +68 -0
  129. data/lib/torch/optim/adamw.rb +82 -0
  130. data/lib/torch/optim/asgd.rb +65 -0
  131. data/lib/torch/optim/lr_scheduler/lr_scheduler.rb +33 -0
  132. data/lib/torch/optim/lr_scheduler/step_lr.rb +17 -0
  133. data/lib/torch/optim/optimizer.rb +62 -0
  134. data/lib/torch/optim/rmsprop.rb +76 -0
  135. data/lib/torch/optim/rprop.rb +68 -0
  136. data/lib/torch/optim/sgd.rb +60 -0
  137. data/lib/torch/random.rb +10 -0
  138. data/lib/torch/tensor.rb +90 -30
  139. data/lib/torch/utils/data/data_loader.rb +15 -0
  140. data/lib/torch/utils/data/tensor_dataset.rb +8 -1
  141. data/lib/torch/version.rb +1 -1
  142. metadata +122 -3
@@ -0,0 +1,6 @@
1
+ // generated by rake generate:functions
2
+ // do not edit by hand
3
+
4
+ #pragma once
5
+
6
+ void add_nn_functions(Module m);
@@ -0,0 +1,55 @@
1
+ #include <torch/torch.h>
2
+ #include <rice/Object.hpp>
3
+ #include "templates.hpp"
4
+
5
+ Object wrap(std::tuple<torch::Tensor, torch::Tensor> x) {
6
+ Array a;
7
+ a.push(to_ruby<torch::Tensor>(std::get<0>(x)));
8
+ a.push(to_ruby<torch::Tensor>(std::get<1>(x)));
9
+ return Object(a);
10
+ }
11
+
12
+ Object wrap(std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> x) {
13
+ Array a;
14
+ a.push(to_ruby<torch::Tensor>(std::get<0>(x)));
15
+ a.push(to_ruby<torch::Tensor>(std::get<1>(x)));
16
+ a.push(to_ruby<torch::Tensor>(std::get<2>(x)));
17
+ return Object(a);
18
+ }
19
+
20
+ Object wrap(std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> x) {
21
+ Array a;
22
+ a.push(to_ruby<torch::Tensor>(std::get<0>(x)));
23
+ a.push(to_ruby<torch::Tensor>(std::get<1>(x)));
24
+ a.push(to_ruby<torch::Tensor>(std::get<2>(x)));
25
+ a.push(to_ruby<torch::Tensor>(std::get<3>(x)));
26
+ return Object(a);
27
+ }
28
+
29
+ Object wrap(std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> x) {
30
+ Array a;
31
+ a.push(to_ruby<torch::Tensor>(std::get<0>(x)));
32
+ a.push(to_ruby<torch::Tensor>(std::get<1>(x)));
33
+ a.push(to_ruby<torch::Tensor>(std::get<2>(x)));
34
+ a.push(to_ruby<torch::Tensor>(std::get<3>(x)));
35
+ a.push(to_ruby<torch::Tensor>(std::get<4>(x)));
36
+ return Object(a);
37
+ }
38
+
39
+ Object wrap(std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, int64_t> x) {
40
+ Array a;
41
+ a.push(to_ruby<torch::Tensor>(std::get<0>(x)));
42
+ a.push(to_ruby<torch::Tensor>(std::get<1>(x)));
43
+ a.push(to_ruby<torch::Tensor>(std::get<2>(x)));
44
+ a.push(to_ruby<int64_t>(std::get<3>(x)));
45
+ return Object(a);
46
+ }
47
+
48
+ Object wrap(std::tuple<torch::Tensor, torch::Tensor, double, int64_t> x) {
49
+ Array a;
50
+ a.push(to_ruby<torch::Tensor>(std::get<0>(x)));
51
+ a.push(to_ruby<torch::Tensor>(std::get<1>(x)));
52
+ a.push(to_ruby<double>(std::get<2>(x)));
53
+ a.push(to_ruby<int64_t>(std::get<3>(x)));
54
+ return Object(a);
55
+ }
@@ -0,0 +1,298 @@
1
+ #pragma once
2
+
3
+ #include <rice/Array.hpp>
4
+ #include <rice/Object.hpp>
5
+
6
+ using namespace Rice;
7
+
8
+ template<>
9
+ inline
10
+ long long from_ruby<long long>(Object x)
11
+ {
12
+ return NUM2LL(x);
13
+ }
14
+
15
+ template<>
16
+ inline
17
+ Object to_ruby<long long>(long long const & x)
18
+ {
19
+ return LL2NUM(x);
20
+ }
21
+
22
+ template<>
23
+ inline
24
+ unsigned long long from_ruby<unsigned long long>(Object x)
25
+ {
26
+ return NUM2ULL(x);
27
+ }
28
+
29
+ template<>
30
+ inline
31
+ Object to_ruby<unsigned long long>(unsigned long long const & x)
32
+ {
33
+ return ULL2NUM(x);
34
+ }
35
+
36
+ template<>
37
+ inline
38
+ short from_ruby<short>(Object x)
39
+ {
40
+ return NUM2SHORT(x);
41
+ }
42
+
43
+ template<>
44
+ inline
45
+ Object to_ruby<short>(short const & x)
46
+ {
47
+ return INT2NUM(x);
48
+ }
49
+
50
+ template<>
51
+ inline
52
+ unsigned short from_ruby<unsigned short>(Object x)
53
+ {
54
+ return NUM2USHORT(x);
55
+ }
56
+
57
+ template<>
58
+ inline
59
+ Object to_ruby<unsigned short>(unsigned short const & x)
60
+ {
61
+ return UINT2NUM(x);
62
+ }
63
+
64
+ // need to wrap torch::IntArrayRef() since
65
+ // it doesn't own underlying data
66
+ class IntArrayRef {
67
+ std::vector<int64_t> vec;
68
+ public:
69
+ IntArrayRef(Object o) {
70
+ Array a = Array(o);
71
+ for (size_t i = 0; i < a.size(); i++) {
72
+ vec.push_back(from_ruby<int64_t>(a[i]));
73
+ }
74
+ }
75
+ operator torch::IntArrayRef() {
76
+ return torch::IntArrayRef(vec);
77
+ }
78
+ };
79
+
80
+ template<>
81
+ inline
82
+ IntArrayRef from_ruby<IntArrayRef>(Object x)
83
+ {
84
+ return IntArrayRef(x);
85
+ }
86
+
87
+ // for now
88
+ class Scalar {
89
+ torch::Scalar value;
90
+ public:
91
+ Scalar(Object o) {
92
+ // TODO cast based on Ruby type
93
+ if (o.rb_type() == T_FIXNUM) {
94
+ value = torch::Scalar(from_ruby<int64_t>(o));
95
+ } else {
96
+ value = torch::Scalar(from_ruby<float>(o));
97
+ }
98
+ }
99
+ operator torch::Scalar() {
100
+ return value;
101
+ }
102
+ };
103
+
104
+ template<>
105
+ inline
106
+ Scalar from_ruby<Scalar>(Object x)
107
+ {
108
+ return Scalar(x);
109
+ }
110
+
111
+ class TensorList {
112
+ std::vector<torch::Tensor> vec;
113
+ public:
114
+ TensorList(Object o) {
115
+ Array a = Array(o);
116
+ for (size_t i = 0; i < a.size(); i++) {
117
+ vec.push_back(from_ruby<torch::Tensor>(a[i]));
118
+ }
119
+ }
120
+ operator torch::TensorList() {
121
+ return torch::TensorList(vec);
122
+ }
123
+ };
124
+
125
+ template<>
126
+ inline
127
+ TensorList from_ruby<TensorList>(Object x)
128
+ {
129
+ return TensorList(x);
130
+ }
131
+
132
+ class FanModeType {
133
+ std::string s;
134
+ public:
135
+ FanModeType(Object o) {
136
+ s = String(o).str();
137
+ }
138
+ // TODO switch NonlinearityType after LibTorch 1.4 release
139
+ operator torch::nn::init::FanMode() {
140
+ if (s == "fan_in") {
141
+ return torch::nn::init::FanMode::FanIn;
142
+ } else if (s == "fan_out") {
143
+ return torch::nn::init::FanMode::FanOut;
144
+ } else {
145
+ throw std::runtime_error("Unsupported nonlinearity type: " + s);
146
+ }
147
+ }
148
+ };
149
+
150
+ template<>
151
+ inline
152
+ FanModeType from_ruby<FanModeType>(Object x)
153
+ {
154
+ return FanModeType(x);
155
+ }
156
+
157
+ class NonlinearityType {
158
+ std::string s;
159
+ public:
160
+ NonlinearityType(Object o) {
161
+ s = String(o).str();
162
+ }
163
+ // TODO switch NonlinearityType after LibTorch 1.4 release
164
+ operator torch::nn::init::Nonlinearity() {
165
+ if (s == "linear") {
166
+ return torch::nn::init::Nonlinearity::Linear;
167
+ } else if (s == "conv1d") {
168
+ return torch::nn::init::Nonlinearity::Conv1D;
169
+ } else if (s == "conv2d") {
170
+ return torch::nn::init::Nonlinearity::Conv2D;
171
+ } else if (s == "conv3d") {
172
+ return torch::nn::init::Nonlinearity::Conv3D;
173
+ } else if (s == "conv_transpose1d") {
174
+ return torch::nn::init::Nonlinearity::ConvTranspose1D;
175
+ } else if (s == "conv_transpose2d") {
176
+ return torch::nn::init::Nonlinearity::ConvTranspose2D;
177
+ } else if (s == "conv_transpose3d") {
178
+ return torch::nn::init::Nonlinearity::ConvTranspose3D;
179
+ } else if (s == "sigmoid") {
180
+ return torch::nn::init::Nonlinearity::Sigmoid;
181
+ } else if (s == "tanh") {
182
+ return torch::nn::init::Nonlinearity::Tanh;
183
+ } else if (s == "relu") {
184
+ return torch::nn::init::Nonlinearity::ReLU;
185
+ } else if (s == "leaky_relu") {
186
+ return torch::nn::init::Nonlinearity::LeakyReLU;
187
+ } else {
188
+ throw std::runtime_error("Unsupported nonlinearity type: " + s);
189
+ }
190
+ }
191
+ };
192
+
193
+ template<>
194
+ inline
195
+ NonlinearityType from_ruby<NonlinearityType>(Object x)
196
+ {
197
+ return NonlinearityType(x);
198
+ }
199
+
200
+ class MyReduction {
201
+ Object value;
202
+ public:
203
+ MyReduction(Object o) {
204
+ value = o;
205
+ }
206
+ operator int64_t() {
207
+ if (value.is_nil()) {
208
+ return Reduction::None;
209
+ }
210
+
211
+ std::string s = String(value).str();
212
+ if (s == "mean") {
213
+ return Reduction::Mean;
214
+ } else if (s == "sum") {
215
+ return Reduction::Sum;
216
+ } else {
217
+ throw std::runtime_error("Unsupported reduction: " + s);
218
+ }
219
+ }
220
+ };
221
+
222
+ template<>
223
+ inline
224
+ MyReduction from_ruby<MyReduction>(Object x)
225
+ {
226
+ return MyReduction(x);
227
+ }
228
+
229
+ typedef torch::Tensor Tensor;
230
+
231
+ class OptionalTensor {
232
+ Object value;
233
+ public:
234
+ OptionalTensor(Object o) {
235
+ value = o;
236
+ }
237
+ operator torch::Tensor() {
238
+ if (value.is_nil()) {
239
+ return {};
240
+ }
241
+ return from_ruby<torch::Tensor>(value);
242
+ }
243
+ };
244
+
245
+ template<>
246
+ inline
247
+ OptionalTensor from_ruby<OptionalTensor>(Object x)
248
+ {
249
+ return OptionalTensor(x);
250
+ }
251
+
252
+ class ScalarType {
253
+ Object value;
254
+ public:
255
+ ScalarType(Object o) {
256
+ value = o;
257
+ }
258
+ operator at::ScalarType() {
259
+ throw std::runtime_error("ScalarType arguments not implemented yet");
260
+ }
261
+ };
262
+
263
+ template<>
264
+ inline
265
+ ScalarType from_ruby<ScalarType>(Object x)
266
+ {
267
+ return ScalarType(x);
268
+ }
269
+
270
+ class OptionalScalarType {
271
+ Object value;
272
+ public:
273
+ OptionalScalarType(Object o) {
274
+ value = o;
275
+ }
276
+ operator c10::optional<at::ScalarType>() {
277
+ if (value.is_nil()) {
278
+ return c10::nullopt;
279
+ }
280
+ return ScalarType(value);
281
+ }
282
+ };
283
+
284
+ template<>
285
+ inline
286
+ OptionalScalarType from_ruby<OptionalScalarType>(Object x)
287
+ {
288
+ return OptionalScalarType(x);
289
+ }
290
+
291
+ typedef torch::Device Device;
292
+
293
+ Object wrap(std::tuple<torch::Tensor, torch::Tensor> x);
294
+ Object wrap(std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> x);
295
+ Object wrap(std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> x);
296
+ Object wrap(std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> x);
297
+ Object wrap(std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, int64_t> x);
298
+ Object wrap(std::tuple<torch::Tensor, torch::Tensor, double, int64_t> x);
@@ -0,0 +1,1920 @@
1
+ // generated by rake generate:functions
2
+ // do not edit by hand
3
+
4
+ #include <torch/torch.h>
5
+ #include <rice/Module.hpp>
6
+ #include "templates.hpp"
7
+
8
+ void add_tensor_functions(Module m) {
9
+ m
10
+ .define_method(
11
+ "_abs",
12
+ *[](const Tensor &self) {
13
+ return self.abs();
14
+ })
15
+ .define_method(
16
+ "_abs_",
17
+ *[](Tensor &self) {
18
+ return self.abs_();
19
+ })
20
+ .define_method(
21
+ "_acos",
22
+ *[](const Tensor &self) {
23
+ return self.acos();
24
+ })
25
+ .define_method(
26
+ "_acos_",
27
+ *[](Tensor &self) {
28
+ return self.acos_();
29
+ })
30
+ .define_method(
31
+ "_add__scalar",
32
+ *[](Tensor &self, Scalar other, Scalar alpha) {
33
+ return self.add_(other, alpha);
34
+ })
35
+ .define_method(
36
+ "_add__tensor",
37
+ *[](Tensor &self, const Tensor &other, Scalar alpha) {
38
+ return self.add_(other, alpha);
39
+ })
40
+ .define_method(
41
+ "_add_scalar",
42
+ *[](const Tensor &self, Scalar other, Scalar alpha) {
43
+ return self.add(other, alpha);
44
+ })
45
+ .define_method(
46
+ "_add_tensor",
47
+ *[](const Tensor &self, const Tensor &other, Scalar alpha) {
48
+ return self.add(other, alpha);
49
+ })
50
+ .define_method(
51
+ "_addbmm",
52
+ *[](const Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha) {
53
+ return self.addbmm(batch1, batch2, beta, alpha);
54
+ })
55
+ .define_method(
56
+ "_addbmm_",
57
+ *[](Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha) {
58
+ return self.addbmm_(batch1, batch2, beta, alpha);
59
+ })
60
+ .define_method(
61
+ "_addcdiv",
62
+ *[](const Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value) {
63
+ return self.addcdiv(tensor1, tensor2, value);
64
+ })
65
+ .define_method(
66
+ "_addcdiv_",
67
+ *[](Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value) {
68
+ return self.addcdiv_(tensor1, tensor2, value);
69
+ })
70
+ .define_method(
71
+ "_addcmul",
72
+ *[](const Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value) {
73
+ return self.addcmul(tensor1, tensor2, value);
74
+ })
75
+ .define_method(
76
+ "_addcmul_",
77
+ *[](Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value) {
78
+ return self.addcmul_(tensor1, tensor2, value);
79
+ })
80
+ .define_method(
81
+ "_addmm",
82
+ *[](const Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha) {
83
+ return self.addmm(mat1, mat2, beta, alpha);
84
+ })
85
+ .define_method(
86
+ "_addmm_",
87
+ *[](Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha) {
88
+ return self.addmm_(mat1, mat2, beta, alpha);
89
+ })
90
+ .define_method(
91
+ "_addmv",
92
+ *[](const Tensor &self, const Tensor &mat, const Tensor &vec, Scalar beta, Scalar alpha) {
93
+ return self.addmv(mat, vec, beta, alpha);
94
+ })
95
+ .define_method(
96
+ "_addmv_",
97
+ *[](Tensor &self, const Tensor &mat, const Tensor &vec, Scalar beta, Scalar alpha) {
98
+ return self.addmv_(mat, vec, beta, alpha);
99
+ })
100
+ .define_method(
101
+ "_addr",
102
+ *[](const Tensor &self, const Tensor &vec1, const Tensor &vec2, Scalar beta, Scalar alpha) {
103
+ return self.addr(vec1, vec2, beta, alpha);
104
+ })
105
+ .define_method(
106
+ "_addr_",
107
+ *[](Tensor &self, const Tensor &vec1, const Tensor &vec2, Scalar beta, Scalar alpha) {
108
+ return self.addr_(vec1, vec2, beta, alpha);
109
+ })
110
+ .define_method(
111
+ "_alias",
112
+ *[](Tensor &self) {
113
+ return self.alias();
114
+ })
115
+ .define_method(
116
+ "_align_as",
117
+ *[](const Tensor &self, const Tensor &other) {
118
+ return self.align_as(other);
119
+ })
120
+ .define_method(
121
+ "_all",
122
+ *[](const Tensor &self) {
123
+ return self.all();
124
+ })
125
+ .define_method(
126
+ "_all_dim",
127
+ *[](const Tensor &self, int64_t dim, bool keepdim) {
128
+ return self.all(dim, keepdim);
129
+ })
130
+ .define_method(
131
+ "_allclose",
132
+ *[](const Tensor &self, const Tensor &other, double rtol, double atol, bool equal_nan) {
133
+ return self.allclose(other, rtol, atol, equal_nan);
134
+ })
135
+ .define_method(
136
+ "_any",
137
+ *[](const Tensor &self) {
138
+ return self.any();
139
+ })
140
+ .define_method(
141
+ "_any_dim",
142
+ *[](const Tensor &self, int64_t dim, bool keepdim) {
143
+ return self.any(dim, keepdim);
144
+ })
145
+ .define_method(
146
+ "_argmax",
147
+ *[](const Tensor &self) {
148
+ return self.argmax();
149
+ })
150
+ .define_method(
151
+ "_argmax_dim",
152
+ *[](const Tensor &self, int64_t dim, bool keepdim) {
153
+ return self.argmax(dim, keepdim);
154
+ })
155
+ .define_method(
156
+ "_argmin",
157
+ *[](const Tensor &self) {
158
+ return self.argmin();
159
+ })
160
+ .define_method(
161
+ "_argmin_dim",
162
+ *[](const Tensor &self, int64_t dim, bool keepdim) {
163
+ return self.argmin(dim, keepdim);
164
+ })
165
+ .define_method(
166
+ "_argsort",
167
+ *[](const Tensor &self, int64_t dim, bool descending) {
168
+ return self.argsort(dim, descending);
169
+ })
170
+ .define_method(
171
+ "_as_strided",
172
+ *[](Tensor &self, IntArrayRef size, IntArrayRef stride) {
173
+ return self.as_strided(size, stride);
174
+ })
175
+ .define_method(
176
+ "_as_strided_",
177
+ *[](Tensor &self, IntArrayRef size, IntArrayRef stride) {
178
+ return self.as_strided_(size, stride);
179
+ })
180
+ .define_method(
181
+ "_as_strided__storage_offset",
182
+ *[](Tensor &self, IntArrayRef size, IntArrayRef stride, int64_t storage_offset) {
183
+ return self.as_strided_(size, stride, storage_offset);
184
+ })
185
+ .define_method(
186
+ "_as_strided_storage_offset",
187
+ *[](Tensor &self, IntArrayRef size, IntArrayRef stride, int64_t storage_offset) {
188
+ return self.as_strided(size, stride, storage_offset);
189
+ })
190
+ .define_method(
191
+ "_asin",
192
+ *[](const Tensor &self) {
193
+ return self.asin();
194
+ })
195
+ .define_method(
196
+ "_asin_",
197
+ *[](Tensor &self) {
198
+ return self.asin_();
199
+ })
200
+ .define_method(
201
+ "_atan",
202
+ *[](const Tensor &self) {
203
+ return self.atan();
204
+ })
205
+ .define_method(
206
+ "_atan2",
207
+ *[](const Tensor &self, const Tensor &other) {
208
+ return self.atan2(other);
209
+ })
210
+ .define_method(
211
+ "_atan2_",
212
+ *[](Tensor &self, const Tensor &other) {
213
+ return self.atan2_(other);
214
+ })
215
+ .define_method(
216
+ "_atan_",
217
+ *[](Tensor &self) {
218
+ return self.atan_();
219
+ })
220
+ .define_method(
221
+ "_backward",
222
+ *[](const Tensor &self, OptionalTensor gradient, bool keep_graph, bool create_graph) {
223
+ return self.backward(gradient, keep_graph, create_graph);
224
+ })
225
+ .define_method(
226
+ "_baddbmm",
227
+ *[](const Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha) {
228
+ return self.baddbmm(batch1, batch2, beta, alpha);
229
+ })
230
+ .define_method(
231
+ "_baddbmm_",
232
+ *[](Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha) {
233
+ return self.baddbmm_(batch1, batch2, beta, alpha);
234
+ })
235
+ .define_method(
236
+ "_bernoulli",
237
+ *[](const Tensor &self) {
238
+ return self.bernoulli();
239
+ })
240
+ .define_method(
241
+ "_bernoulli__float",
242
+ *[](Tensor &self, double p) {
243
+ return self.bernoulli_(p);
244
+ })
245
+ .define_method(
246
+ "_bernoulli__tensor",
247
+ *[](Tensor &self, const Tensor &p) {
248
+ return self.bernoulli_(p);
249
+ })
250
+ .define_method(
251
+ "_bernoulli_p",
252
+ *[](const Tensor &self, double p) {
253
+ return self.bernoulli(p);
254
+ })
255
+ .define_method(
256
+ "_bincount",
257
+ *[](const Tensor &self, OptionalTensor weights, int64_t minlength) {
258
+ return self.bincount(weights, minlength);
259
+ })
260
+ .define_method(
261
+ "_bitwise_not",
262
+ *[](const Tensor &self) {
263
+ return self.bitwise_not();
264
+ })
265
+ .define_method(
266
+ "_bitwise_not_",
267
+ *[](Tensor &self) {
268
+ return self.bitwise_not_();
269
+ })
270
+ .define_method(
271
+ "_bmm",
272
+ *[](const Tensor &self, const Tensor &mat2) {
273
+ return self.bmm(mat2);
274
+ })
275
+ .define_method(
276
+ "_cauchy_",
277
+ *[](Tensor &self, double median, double sigma) {
278
+ return self.cauchy_(median, sigma);
279
+ })
280
+ .define_method(
281
+ "_ceil",
282
+ *[](const Tensor &self) {
283
+ return self.ceil();
284
+ })
285
+ .define_method(
286
+ "_ceil_",
287
+ *[](Tensor &self) {
288
+ return self.ceil_();
289
+ })
290
+ .define_method(
291
+ "_cholesky",
292
+ *[](const Tensor &self, bool upper) {
293
+ return self.cholesky(upper);
294
+ })
295
+ .define_method(
296
+ "_cholesky_inverse",
297
+ *[](const Tensor &self, bool upper) {
298
+ return self.cholesky_inverse(upper);
299
+ })
300
+ .define_method(
301
+ "_cholesky_solve",
302
+ *[](const Tensor &self, const Tensor &input2, bool upper) {
303
+ return self.cholesky_solve(input2, upper);
304
+ })
305
+ .define_method(
306
+ "_chunk",
307
+ *[](Tensor &self, int64_t chunks, int64_t dim) {
308
+ return self.chunk(chunks, dim);
309
+ })
310
+ .define_method(
311
+ "_clamp_max",
312
+ *[](const Tensor &self, Scalar max) {
313
+ return self.clamp_max(max);
314
+ })
315
+ .define_method(
316
+ "_clamp_max_",
317
+ *[](Tensor &self, Scalar max) {
318
+ return self.clamp_max_(max);
319
+ })
320
+ .define_method(
321
+ "_clamp_min",
322
+ *[](const Tensor &self, Scalar min) {
323
+ return self.clamp_min(min);
324
+ })
325
+ .define_method(
326
+ "_clamp_min_",
327
+ *[](Tensor &self, Scalar min) {
328
+ return self.clamp_min_(min);
329
+ })
330
+ .define_method(
331
+ "_clone",
332
+ *[](const Tensor &self) {
333
+ return self.clone();
334
+ })
335
+ .define_method(
336
+ "_coalesce",
337
+ *[](const Tensor &self) {
338
+ return self.coalesce();
339
+ })
340
+ .define_method(
341
+ "_copy_",
342
+ *[](Tensor &self, const Tensor &src, bool non_blocking) {
343
+ return self.copy_(src, non_blocking);
344
+ })
345
+ .define_method(
346
+ "_cos",
347
+ *[](const Tensor &self) {
348
+ return self.cos();
349
+ })
350
+ .define_method(
351
+ "_cos_",
352
+ *[](Tensor &self) {
353
+ return self.cos_();
354
+ })
355
+ .define_method(
356
+ "_cosh",
357
+ *[](const Tensor &self) {
358
+ return self.cosh();
359
+ })
360
+ .define_method(
361
+ "_cosh_",
362
+ *[](Tensor &self) {
363
+ return self.cosh_();
364
+ })
365
+ .define_method(
366
+ "_cumprod",
367
+ *[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
368
+ return self.cumprod(dim, dtype);
369
+ })
370
+ .define_method(
371
+ "_cumsum",
372
+ *[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
373
+ return self.cumsum(dim, dtype);
374
+ })
375
+ .define_method(
376
+ "_data",
377
+ *[](const Tensor &self) {
378
+ return self.data();
379
+ })
380
+ .define_method(
381
+ "_dense_dim",
382
+ *[](const Tensor &self) {
383
+ return self.dense_dim();
384
+ })
385
+ .define_method(
386
+ "_dequantize",
387
+ *[](const Tensor &self) {
388
+ return self.dequantize();
389
+ })
390
+ .define_method(
391
+ "_det",
392
+ *[](const Tensor &self) {
393
+ return self.det();
394
+ })
395
+ .define_method(
396
+ "_detach",
397
+ *[](const Tensor &self) {
398
+ return self.detach();
399
+ })
400
+ .define_method(
401
+ "_detach_",
402
+ *[](Tensor &self) {
403
+ return self.detach_();
404
+ })
405
+ .define_method(
406
+ "_diag",
407
+ *[](const Tensor &self, int64_t diagonal) {
408
+ return self.diag(diagonal);
409
+ })
410
+ .define_method(
411
+ "_diag_embed",
412
+ *[](const Tensor &self, int64_t offset, int64_t dim1, int64_t dim2) {
413
+ return self.diag_embed(offset, dim1, dim2);
414
+ })
415
+ .define_method(
416
+ "_diagflat",
417
+ *[](const Tensor &self, int64_t offset) {
418
+ return self.diagflat(offset);
419
+ })
420
+ .define_method(
421
+ "_diagonal",
422
+ *[](Tensor &self, int64_t offset, int64_t dim1, int64_t dim2) {
423
+ return self.diagonal(offset, dim1, dim2);
424
+ })
425
+ .define_method(
426
+ "_digamma",
427
+ *[](const Tensor &self) {
428
+ return self.digamma();
429
+ })
430
+ .define_method(
431
+ "_digamma_",
432
+ *[](Tensor &self) {
433
+ return self.digamma_();
434
+ })
435
+ .define_method(
436
+ "_dist",
437
+ *[](const Tensor &self, const Tensor &other, Scalar p) {
438
+ return self.dist(other, p);
439
+ })
440
+ .define_method(
441
+ "_div__scalar",
442
+ *[](Tensor &self, Scalar other) {
443
+ return self.div_(other);
444
+ })
445
+ .define_method(
446
+ "_div__tensor",
447
+ *[](Tensor &self, const Tensor &other) {
448
+ return self.div_(other);
449
+ })
450
+ .define_method(
451
+ "_div_scalar",
452
+ *[](const Tensor &self, Scalar other) {
453
+ return self.div(other);
454
+ })
455
+ .define_method(
456
+ "_div_tensor",
457
+ *[](const Tensor &self, const Tensor &other) {
458
+ return self.div(other);
459
+ })
460
+ .define_method(
461
+ "_dot",
462
+ *[](const Tensor &self, const Tensor &tensor) {
463
+ return self.dot(tensor);
464
+ })
465
+ .define_method(
466
+ "_eig",
467
+ *[](const Tensor &self, bool eigenvectors) {
468
+ return wrap(self.eig(eigenvectors));
469
+ })
470
+ .define_method(
471
+ "_eq__scalar",
472
+ *[](Tensor &self, Scalar other) {
473
+ return self.eq_(other);
474
+ })
475
+ .define_method(
476
+ "_eq__tensor",
477
+ *[](Tensor &self, const Tensor &other) {
478
+ return self.eq_(other);
479
+ })
480
+ .define_method(
481
+ "_eq_scalar",
482
+ *[](const Tensor &self, Scalar other) {
483
+ return self.eq(other);
484
+ })
485
+ .define_method(
486
+ "_eq_tensor",
487
+ *[](const Tensor &self, const Tensor &other) {
488
+ return self.eq(other);
489
+ })
490
+ .define_method(
491
+ "_equal",
492
+ *[](const Tensor &self, const Tensor &other) {
493
+ return self.equal(other);
494
+ })
495
+ .define_method(
496
+ "_erf",
497
+ *[](const Tensor &self) {
498
+ return self.erf();
499
+ })
500
+ .define_method(
501
+ "_erf_",
502
+ *[](Tensor &self) {
503
+ return self.erf_();
504
+ })
505
+ .define_method(
506
+ "_erfc",
507
+ *[](const Tensor &self) {
508
+ return self.erfc();
509
+ })
510
+ .define_method(
511
+ "_erfc_",
512
+ *[](Tensor &self) {
513
+ return self.erfc_();
514
+ })
515
+ .define_method(
516
+ "_erfinv",
517
+ *[](const Tensor &self) {
518
+ return self.erfinv();
519
+ })
520
+ .define_method(
521
+ "_erfinv_",
522
+ *[](Tensor &self) {
523
+ return self.erfinv_();
524
+ })
525
+ .define_method(
526
+ "_exp",
527
+ *[](const Tensor &self) {
528
+ return self.exp();
529
+ })
530
+ .define_method(
531
+ "_exp_",
532
+ *[](Tensor &self) {
533
+ return self.exp_();
534
+ })
535
+ .define_method(
536
+ "_expand",
537
+ *[](Tensor &self, IntArrayRef size, bool implicit) {
538
+ return self.expand(size, implicit);
539
+ })
540
+ .define_method(
541
+ "_expand_as",
542
+ *[](const Tensor &self, const Tensor &other) {
543
+ return self.expand_as(other);
544
+ })
545
+ .define_method(
546
+ "_expm1",
547
+ *[](const Tensor &self) {
548
+ return self.expm1();
549
+ })
550
+ .define_method(
551
+ "_expm1_",
552
+ *[](Tensor &self) {
553
+ return self.expm1_();
554
+ })
555
+ .define_method(
556
+ "_exponential_",
557
+ *[](Tensor &self, double lambd) {
558
+ return self.exponential_(lambd);
559
+ })
560
+ .define_method(
561
+ "_fft",
562
+ *[](const Tensor &self, int64_t signal_ndim, bool normalized) {
563
+ return self.fft(signal_ndim, normalized);
564
+ })
565
+ .define_method(
566
+ "_fill__scalar",
567
+ *[](Tensor &self, Scalar value) {
568
+ return self.fill_(value);
569
+ })
570
+ .define_method(
571
+ "_fill__tensor",
572
+ *[](Tensor &self, const Tensor &value) {
573
+ return self.fill_(value);
574
+ })
575
+ .define_method(
576
+ "_fill_diagonal_",
577
+ *[](Tensor &self, Scalar fill_value, bool wrap) {
578
+ return self.fill_diagonal_(fill_value, wrap);
579
+ })
580
+ .define_method(
581
+ "_flatten_using_ints",
582
+ *[](const Tensor &self, int64_t start_dim, int64_t end_dim) {
583
+ return self.flatten(start_dim, end_dim);
584
+ })
585
+ .define_method(
586
+ "_flip",
587
+ *[](const Tensor &self, IntArrayRef dims) {
588
+ return self.flip(dims);
589
+ })
590
+ .define_method(
591
+ "_floor",
592
+ *[](const Tensor &self) {
593
+ return self.floor();
594
+ })
595
+ .define_method(
596
+ "_floor_",
597
+ *[](Tensor &self) {
598
+ return self.floor_();
599
+ })
600
+ .define_method(
601
+ "_fmod__scalar",
602
+ *[](Tensor &self, Scalar other) {
603
+ return self.fmod_(other);
604
+ })
605
+ .define_method(
606
+ "_fmod__tensor",
607
+ *[](Tensor &self, const Tensor &other) {
608
+ return self.fmod_(other);
609
+ })
610
+ .define_method(
611
+ "_fmod_scalar",
612
+ *[](const Tensor &self, Scalar other) {
613
+ return self.fmod(other);
614
+ })
615
+ .define_method(
616
+ "_fmod_tensor",
617
+ *[](const Tensor &self, const Tensor &other) {
618
+ return self.fmod(other);
619
+ })
620
+ .define_method(
621
+ "_frac",
622
+ *[](const Tensor &self) {
623
+ return self.frac();
624
+ })
625
+ .define_method(
626
+ "_frac_",
627
+ *[](Tensor &self) {
628
+ return self.frac_();
629
+ })
630
+ .define_method(
631
+ "_gather",
632
+ *[](const Tensor &self, int64_t dim, const Tensor &index, bool sparse_grad) {
633
+ return self.gather(dim, index, sparse_grad);
634
+ })
635
+ .define_method(
636
+ "_ge__scalar",
637
+ *[](Tensor &self, Scalar other) {
638
+ return self.ge_(other);
639
+ })
640
+ .define_method(
641
+ "_ge__tensor",
642
+ *[](Tensor &self, const Tensor &other) {
643
+ return self.ge_(other);
644
+ })
645
+ .define_method(
646
+ "_ge_scalar",
647
+ *[](const Tensor &self, Scalar other) {
648
+ return self.ge(other);
649
+ })
650
+ .define_method(
651
+ "_ge_tensor",
652
+ *[](const Tensor &self, const Tensor &other) {
653
+ return self.ge(other);
654
+ })
655
+ .define_method(
656
+ "_geometric_",
657
+ *[](Tensor &self, double p) {
658
+ return self.geometric_(p);
659
+ })
660
+ .define_method(
661
+ "_geqrf",
662
+ *[](const Tensor &self) {
663
+ return wrap(self.geqrf());
664
+ })
665
+ .define_method(
666
+ "_ger",
667
+ *[](const Tensor &self, const Tensor &vec2) {
668
+ return self.ger(vec2);
669
+ })
670
+ .define_method(
671
+ "_gt__scalar",
672
+ *[](Tensor &self, Scalar other) {
673
+ return self.gt_(other);
674
+ })
675
+ .define_method(
676
+ "_gt__tensor",
677
+ *[](Tensor &self, const Tensor &other) {
678
+ return self.gt_(other);
679
+ })
680
+ .define_method(
681
+ "_gt_scalar",
682
+ *[](const Tensor &self, Scalar other) {
683
+ return self.gt(other);
684
+ })
685
+ .define_method(
686
+ "_gt_tensor",
687
+ *[](const Tensor &self, const Tensor &other) {
688
+ return self.gt(other);
689
+ })
690
+ .define_method(
691
+ "_hardshrink",
692
+ *[](const Tensor &self, Scalar lambd) {
693
+ return self.hardshrink(lambd);
694
+ })
695
+ .define_method(
696
+ "_histc",
697
+ *[](const Tensor &self, int64_t bins, Scalar min, Scalar max) {
698
+ return self.histc(bins, min, max);
699
+ })
700
+ .define_method(
701
+ "_ifft",
702
+ *[](const Tensor &self, int64_t signal_ndim, bool normalized) {
703
+ return self.ifft(signal_ndim, normalized);
704
+ })
705
+ .define_method(
706
+ "_index_add",
707
+ *[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &source) {
708
+ return self.index_add(dim, index, source);
709
+ })
710
+ .define_method(
711
+ "_index_add_",
712
+ *[](Tensor &self, int64_t dim, const Tensor &index, const Tensor &source) {
713
+ return self.index_add_(dim, index, source);
714
+ })
715
+ .define_method(
716
+ "_index_copy",
717
+ *[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &source) {
718
+ return self.index_copy(dim, index, source);
719
+ })
720
+ .define_method(
721
+ "_index_copy_",
722
+ *[](Tensor &self, int64_t dim, const Tensor &index, const Tensor &source) {
723
+ return self.index_copy_(dim, index, source);
724
+ })
725
+ .define_method(
726
+ "_index_fill__scalar",
727
+ *[](Tensor &self, int64_t dim, const Tensor &index, Scalar value) {
728
+ return self.index_fill_(dim, index, value);
729
+ })
730
+ .define_method(
731
+ "_index_fill__tensor",
732
+ *[](Tensor &self, int64_t dim, const Tensor &index, const Tensor &value) {
733
+ return self.index_fill_(dim, index, value);
734
+ })
735
+ .define_method(
736
+ "_index_fill_scalar",
737
+ *[](const Tensor &self, int64_t dim, const Tensor &index, Scalar value) {
738
+ return self.index_fill(dim, index, value);
739
+ })
740
+ .define_method(
741
+ "_index_fill_tensor",
742
+ *[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &value) {
743
+ return self.index_fill(dim, index, value);
744
+ })
745
+ .define_method(
746
+ "_index_select",
747
+ *[](const Tensor &self, int64_t dim, const Tensor &index) {
748
+ return self.index_select(dim, index);
749
+ })
750
+ .define_method(
751
+ "_indices",
752
+ *[](Tensor &self) {
753
+ return self.indices();
754
+ })
755
+ .define_method(
756
+ "_int_repr",
757
+ *[](const Tensor &self) {
758
+ return self.int_repr();
759
+ })
760
+ .define_method(
761
+ "_inverse",
762
+ *[](const Tensor &self) {
763
+ return self.inverse();
764
+ })
765
+ .define_method(
766
+ "_irfft",
767
+ *[](const Tensor &self, int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes) {
768
+ return self.irfft(signal_ndim, normalized, onesided, signal_sizes);
769
+ })
770
+ .define_method(
771
+ "_is_coalesced",
772
+ *[](const Tensor &self) {
773
+ return self.is_coalesced();
774
+ })
775
+ .define_method(
776
+ "_is_complex",
777
+ *[](const Tensor &self) {
778
+ return self.is_complex();
779
+ })
780
+ .define_method(
781
+ "_is_distributed",
782
+ *[](const Tensor &self) {
783
+ return self.is_distributed();
784
+ })
785
+ .define_method(
786
+ "_is_floating_point",
787
+ *[](const Tensor &self) {
788
+ return self.is_floating_point();
789
+ })
790
+ .define_method(
791
+ "_is_leaf",
792
+ *[](const Tensor &self) {
793
+ return self.is_leaf();
794
+ })
795
+ .define_method(
796
+ "_is_nonzero",
797
+ *[](const Tensor &self) {
798
+ return self.is_nonzero();
799
+ })
800
+ .define_method(
801
+ "_is_pinned",
802
+ *[](const Tensor &self) {
803
+ return self.is_pinned();
804
+ })
805
+ .define_method(
806
+ "_is_same_size",
807
+ *[](const Tensor &self, const Tensor &other) {
808
+ return self.is_same_size(other);
809
+ })
810
+ .define_method(
811
+ "_is_set_to",
812
+ *[](const Tensor &self, const Tensor &tensor) {
813
+ return self.is_set_to(tensor);
814
+ })
815
+ .define_method(
816
+ "_is_signed",
817
+ *[](const Tensor &self) {
818
+ return self.is_signed();
819
+ })
820
+ .define_method(
821
+ "_isclose",
822
+ *[](const Tensor &self, const Tensor &other, double rtol, double atol, bool equal_nan) {
823
+ return self.isclose(other, rtol, atol, equal_nan);
824
+ })
825
+ .define_method(
826
+ "_item",
827
+ *[](const Tensor &self) {
828
+ return self.item();
829
+ })
830
+ .define_method(
831
+ "_kthvalue",
832
+ *[](const Tensor &self, int64_t k, int64_t dim, bool keepdim) {
833
+ return wrap(self.kthvalue(k, dim, keepdim));
834
+ })
835
+ .define_method(
836
+ "_le__scalar",
837
+ *[](Tensor &self, Scalar other) {
838
+ return self.le_(other);
839
+ })
840
+ .define_method(
841
+ "_le__tensor",
842
+ *[](Tensor &self, const Tensor &other) {
843
+ return self.le_(other);
844
+ })
845
+ .define_method(
846
+ "_le_scalar",
847
+ *[](const Tensor &self, Scalar other) {
848
+ return self.le(other);
849
+ })
850
+ .define_method(
851
+ "_le_tensor",
852
+ *[](const Tensor &self, const Tensor &other) {
853
+ return self.le(other);
854
+ })
855
+ .define_method(
856
+ "_lerp__scalar",
857
+ *[](Tensor &self, const Tensor &end, Scalar weight) {
858
+ return self.lerp_(end, weight);
859
+ })
860
+ .define_method(
861
+ "_lerp__tensor",
862
+ *[](Tensor &self, const Tensor &end, const Tensor &weight) {
863
+ return self.lerp_(end, weight);
864
+ })
865
+ .define_method(
866
+ "_lerp_scalar",
867
+ *[](const Tensor &self, const Tensor &end, Scalar weight) {
868
+ return self.lerp(end, weight);
869
+ })
870
+ .define_method(
871
+ "_lerp_tensor",
872
+ *[](const Tensor &self, const Tensor &end, const Tensor &weight) {
873
+ return self.lerp(end, weight);
874
+ })
875
+ .define_method(
876
+ "_lgamma",
877
+ *[](const Tensor &self) {
878
+ return self.lgamma();
879
+ })
880
+ .define_method(
881
+ "_lgamma_",
882
+ *[](Tensor &self) {
883
+ return self.lgamma_();
884
+ })
885
+ .define_method(
886
+ "_log",
887
+ *[](const Tensor &self) {
888
+ return self.log();
889
+ })
890
+ .define_method(
891
+ "_log10",
892
+ *[](const Tensor &self) {
893
+ return self.log10();
894
+ })
895
+ .define_method(
896
+ "_log10_",
897
+ *[](Tensor &self) {
898
+ return self.log10_();
899
+ })
900
+ .define_method(
901
+ "_log1p",
902
+ *[](const Tensor &self) {
903
+ return self.log1p();
904
+ })
905
+ .define_method(
906
+ "_log1p_",
907
+ *[](Tensor &self) {
908
+ return self.log1p_();
909
+ })
910
+ .define_method(
911
+ "_log2",
912
+ *[](const Tensor &self) {
913
+ return self.log2();
914
+ })
915
+ .define_method(
916
+ "_log2_",
917
+ *[](Tensor &self) {
918
+ return self.log2_();
919
+ })
920
+ .define_method(
921
+ "_log_",
922
+ *[](Tensor &self) {
923
+ return self.log_();
924
+ })
925
+ .define_method(
926
+ "_log_normal_",
927
+ *[](Tensor &self, double mean, double std) {
928
+ return self.log_normal_(mean, std);
929
+ })
930
+ .define_method(
931
+ "_log_softmax",
932
+ *[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
933
+ return self.log_softmax(dim, dtype);
934
+ })
935
+ .define_method(
936
+ "_logdet",
937
+ *[](const Tensor &self) {
938
+ return self.logdet();
939
+ })
940
+ .define_method(
941
+ "_logical_not",
942
+ *[](const Tensor &self) {
943
+ return self.logical_not();
944
+ })
945
+ .define_method(
946
+ "_logical_not_",
947
+ *[](Tensor &self) {
948
+ return self.logical_not_();
949
+ })
950
+ .define_method(
951
+ "_logical_xor",
952
+ *[](const Tensor &self, const Tensor &other) {
953
+ return self.logical_xor(other);
954
+ })
955
+ .define_method(
956
+ "_logical_xor_",
957
+ *[](Tensor &self, const Tensor &other) {
958
+ return self.logical_xor_(other);
959
+ })
960
+ .define_method(
961
+ "_logsumexp",
962
+ *[](const Tensor &self, IntArrayRef dim, bool keepdim) {
963
+ return self.logsumexp(dim, keepdim);
964
+ })
965
+ .define_method(
966
+ "_lstsq",
967
+ *[](const Tensor &self, const Tensor &A) {
968
+ return wrap(self.lstsq(A));
969
+ })
970
+ .define_method(
971
+ "_lt__scalar",
972
+ *[](Tensor &self, Scalar other) {
973
+ return self.lt_(other);
974
+ })
975
+ .define_method(
976
+ "_lt__tensor",
977
+ *[](Tensor &self, const Tensor &other) {
978
+ return self.lt_(other);
979
+ })
980
+ .define_method(
981
+ "_lt_scalar",
982
+ *[](const Tensor &self, Scalar other) {
983
+ return self.lt(other);
984
+ })
985
+ .define_method(
986
+ "_lt_tensor",
987
+ *[](const Tensor &self, const Tensor &other) {
988
+ return self.lt(other);
989
+ })
990
+ .define_method(
991
+ "_lu_solve",
992
+ *[](const Tensor &self, const Tensor &LU_data, const Tensor &LU_pivots) {
993
+ return self.lu_solve(LU_data, LU_pivots);
994
+ })
995
+ .define_method(
996
+ "_masked_fill__scalar",
997
+ *[](Tensor &self, const Tensor &mask, Scalar value) {
998
+ return self.masked_fill_(mask, value);
999
+ })
1000
+ .define_method(
1001
+ "_masked_fill__tensor",
1002
+ *[](Tensor &self, const Tensor &mask, const Tensor &value) {
1003
+ return self.masked_fill_(mask, value);
1004
+ })
1005
+ .define_method(
1006
+ "_masked_fill_scalar",
1007
+ *[](const Tensor &self, const Tensor &mask, Scalar value) {
1008
+ return self.masked_fill(mask, value);
1009
+ })
1010
+ .define_method(
1011
+ "_masked_fill_tensor",
1012
+ *[](const Tensor &self, const Tensor &mask, const Tensor &value) {
1013
+ return self.masked_fill(mask, value);
1014
+ })
1015
+ .define_method(
1016
+ "_masked_scatter",
1017
+ *[](const Tensor &self, const Tensor &mask, const Tensor &source) {
1018
+ return self.masked_scatter(mask, source);
1019
+ })
1020
+ .define_method(
1021
+ "_masked_scatter_",
1022
+ *[](Tensor &self, const Tensor &mask, const Tensor &source) {
1023
+ return self.masked_scatter_(mask, source);
1024
+ })
1025
+ .define_method(
1026
+ "_masked_select",
1027
+ *[](const Tensor &self, const Tensor &mask) {
1028
+ return self.masked_select(mask);
1029
+ })
1030
+ .define_method(
1031
+ "_matmul",
1032
+ *[](const Tensor &self, const Tensor &other) {
1033
+ return self.matmul(other);
1034
+ })
1035
+ .define_method(
1036
+ "_matrix_power",
1037
+ *[](const Tensor &self, int64_t n) {
1038
+ return self.matrix_power(n);
1039
+ })
1040
+ .define_method(
1041
+ "_max",
1042
+ *[](const Tensor &self) {
1043
+ return self.max();
1044
+ })
1045
+ .define_method(
1046
+ "_max_dim",
1047
+ *[](const Tensor &self, int64_t dim, bool keepdim) {
1048
+ return wrap(self.max(dim, keepdim));
1049
+ })
1050
+ .define_method(
1051
+ "_max_other",
1052
+ *[](const Tensor &self, const Tensor &other) {
1053
+ return self.max(other);
1054
+ })
1055
+ .define_method(
1056
+ "_max_values",
1057
+ *[](const Tensor &self, IntArrayRef dim, bool keepdim) {
1058
+ return self.max_values(dim, keepdim);
1059
+ })
1060
+ .define_method(
1061
+ "_mean",
1062
+ *[](const Tensor &self, OptionalScalarType dtype) {
1063
+ return self.mean(dtype);
1064
+ })
1065
+ .define_method(
1066
+ "_mean_dim",
1067
+ *[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype) {
1068
+ return self.mean(dim, keepdim, dtype);
1069
+ })
1070
+ .define_method(
1071
+ "_median",
1072
+ *[](const Tensor &self) {
1073
+ return self.median();
1074
+ })
1075
+ .define_method(
1076
+ "_median_dim",
1077
+ *[](const Tensor &self, int64_t dim, bool keepdim) {
1078
+ return wrap(self.median(dim, keepdim));
1079
+ })
1080
+ .define_method(
1081
+ "_min",
1082
+ *[](const Tensor &self) {
1083
+ return self.min();
1084
+ })
1085
+ .define_method(
1086
+ "_min_dim",
1087
+ *[](const Tensor &self, int64_t dim, bool keepdim) {
1088
+ return wrap(self.min(dim, keepdim));
1089
+ })
1090
+ .define_method(
1091
+ "_min_other",
1092
+ *[](const Tensor &self, const Tensor &other) {
1093
+ return self.min(other);
1094
+ })
1095
+ .define_method(
1096
+ "_min_values",
1097
+ *[](const Tensor &self, IntArrayRef dim, bool keepdim) {
1098
+ return self.min_values(dim, keepdim);
1099
+ })
1100
+ .define_method(
1101
+ "_mm",
1102
+ *[](const Tensor &self, const Tensor &mat2) {
1103
+ return self.mm(mat2);
1104
+ })
1105
+ .define_method(
1106
+ "_mode",
1107
+ *[](const Tensor &self, int64_t dim, bool keepdim) {
1108
+ return wrap(self.mode(dim, keepdim));
1109
+ })
1110
+ .define_method(
1111
+ "_mul__scalar",
1112
+ *[](Tensor &self, Scalar other) {
1113
+ return self.mul_(other);
1114
+ })
1115
+ .define_method(
1116
+ "_mul__tensor",
1117
+ *[](Tensor &self, const Tensor &other) {
1118
+ return self.mul_(other);
1119
+ })
1120
+ .define_method(
1121
+ "_mul_scalar",
1122
+ *[](const Tensor &self, Scalar other) {
1123
+ return self.mul(other);
1124
+ })
1125
+ .define_method(
1126
+ "_mul_tensor",
1127
+ *[](const Tensor &self, const Tensor &other) {
1128
+ return self.mul(other);
1129
+ })
1130
+ .define_method(
1131
+ "_multinomial",
1132
+ *[](const Tensor &self, int64_t num_samples, bool replacement) {
1133
+ return self.multinomial(num_samples, replacement);
1134
+ })
1135
+ .define_method(
1136
+ "_mv",
1137
+ *[](const Tensor &self, const Tensor &vec) {
1138
+ return self.mv(vec);
1139
+ })
1140
+ .define_method(
1141
+ "_mvlgamma",
1142
+ *[](const Tensor &self, int64_t p) {
1143
+ return self.mvlgamma(p);
1144
+ })
1145
+ .define_method(
1146
+ "_mvlgamma_",
1147
+ *[](Tensor &self, int64_t p) {
1148
+ return self.mvlgamma_(p);
1149
+ })
1150
+ .define_method(
1151
+ "_narrow",
1152
+ *[](Tensor &self, int64_t dim, int64_t start, int64_t length) {
1153
+ return self.narrow(dim, start, length);
1154
+ })
1155
+ .define_method(
1156
+ "_narrow_copy",
1157
+ *[](const Tensor &self, int64_t dim, int64_t start, int64_t length) {
1158
+ return self.narrow_copy(dim, start, length);
1159
+ })
1160
+ .define_method(
1161
+ "_ne__scalar",
1162
+ *[](Tensor &self, Scalar other) {
1163
+ return self.ne_(other);
1164
+ })
1165
+ .define_method(
1166
+ "_ne__tensor",
1167
+ *[](Tensor &self, const Tensor &other) {
1168
+ return self.ne_(other);
1169
+ })
1170
+ .define_method(
1171
+ "_ne_scalar",
1172
+ *[](const Tensor &self, Scalar other) {
1173
+ return self.ne(other);
1174
+ })
1175
+ .define_method(
1176
+ "_ne_tensor",
1177
+ *[](const Tensor &self, const Tensor &other) {
1178
+ return self.ne(other);
1179
+ })
1180
+ .define_method(
1181
+ "_neg",
1182
+ *[](const Tensor &self) {
1183
+ return self.neg();
1184
+ })
1185
+ .define_method(
1186
+ "_neg_",
1187
+ *[](Tensor &self) {
1188
+ return self.neg_();
1189
+ })
1190
+ .define_method(
1191
+ "_nonzero",
1192
+ *[](const Tensor &self) {
1193
+ return self.nonzero();
1194
+ })
1195
+ .define_method(
1196
+ "_nonzero_numpy",
1197
+ *[](const Tensor &self) {
1198
+ return self.nonzero_numpy();
1199
+ })
1200
+ .define_method(
1201
+ "_norm_scalar",
1202
+ *[](const Tensor &self, Scalar p) {
1203
+ return self.norm(p);
1204
+ })
1205
+ .define_method(
1206
+ "_normal_",
1207
+ *[](Tensor &self, double mean, double std) {
1208
+ return self.normal_(mean, std);
1209
+ })
1210
+ .define_method(
1211
+ "_numel",
1212
+ *[](const Tensor &self) {
1213
+ return self.numel();
1214
+ })
1215
+ .define_method(
1216
+ "_numpy_t",
1217
+ *[](Tensor &self) {
1218
+ return self.numpy_T();
1219
+ })
1220
+ .define_method(
1221
+ "_orgqr",
1222
+ *[](const Tensor &self, const Tensor &input2) {
1223
+ return self.orgqr(input2);
1224
+ })
1225
+ .define_method(
1226
+ "_ormqr",
1227
+ *[](const Tensor &self, const Tensor &input2, const Tensor &input3, bool left, bool transpose) {
1228
+ return self.ormqr(input2, input3, left, transpose);
1229
+ })
1230
+ .define_method(
1231
+ "_output_nr",
1232
+ *[](const Tensor &self) {
1233
+ return self.output_nr();
1234
+ })
1235
+ .define_method(
1236
+ "_permute",
1237
+ *[](Tensor &self, IntArrayRef dims) {
1238
+ return self.permute(dims);
1239
+ })
1240
+ .define_method(
1241
+ "_pin_memory",
1242
+ *[](const Tensor &self) {
1243
+ return self.pin_memory();
1244
+ })
1245
+ .define_method(
1246
+ "_pinverse",
1247
+ *[](const Tensor &self, double rcond) {
1248
+ return self.pinverse(rcond);
1249
+ })
1250
+ .define_method(
1251
+ "_polygamma",
1252
+ *[](int64_t n, const Tensor &self) {
1253
+ return self.polygamma(n);
1254
+ })
1255
+ .define_method(
1256
+ "_polygamma_",
1257
+ *[](Tensor &self, int64_t n) {
1258
+ return self.polygamma_(n);
1259
+ })
1260
+ .define_method(
1261
+ "_pow__scalar",
1262
+ *[](Tensor &self, Scalar exponent) {
1263
+ return self.pow_(exponent);
1264
+ })
1265
+ .define_method(
1266
+ "_pow__tensor",
1267
+ *[](Tensor &self, const Tensor &exponent) {
1268
+ return self.pow_(exponent);
1269
+ })
1270
+ .define_method(
1271
+ "_pow_tensor_scalar",
1272
+ *[](const Tensor &self, Scalar exponent) {
1273
+ return self.pow(exponent);
1274
+ })
1275
+ .define_method(
1276
+ "_pow_tensor_tensor",
1277
+ *[](const Tensor &self, const Tensor &exponent) {
1278
+ return self.pow(exponent);
1279
+ })
1280
+ .define_method(
1281
+ "_prelu",
1282
+ *[](const Tensor &self, const Tensor &weight) {
1283
+ return self.prelu(weight);
1284
+ })
1285
+ .define_method(
1286
+ "_prod",
1287
+ *[](const Tensor &self, OptionalScalarType dtype) {
1288
+ return self.prod(dtype);
1289
+ })
1290
+ .define_method(
1291
+ "_prod_dim_int",
1292
+ *[](const Tensor &self, int64_t dim, bool keepdim, OptionalScalarType dtype) {
1293
+ return self.prod(dim, keepdim, dtype);
1294
+ })
1295
+ .define_method(
1296
+ "_put_",
1297
+ *[](Tensor &self, const Tensor &index, const Tensor &source, bool accumulate) {
1298
+ return self.put_(index, source, accumulate);
1299
+ })
1300
+ .define_method(
1301
+ "_q_per_channel_axis",
1302
+ *[](const Tensor &self) {
1303
+ return self.q_per_channel_axis();
1304
+ })
1305
+ .define_method(
1306
+ "_q_per_channel_scales",
1307
+ *[](const Tensor &self) {
1308
+ return self.q_per_channel_scales();
1309
+ })
1310
+ .define_method(
1311
+ "_q_per_channel_zero_points",
1312
+ *[](const Tensor &self) {
1313
+ return self.q_per_channel_zero_points();
1314
+ })
1315
+ .define_method(
1316
+ "_q_scale",
1317
+ *[](const Tensor &self) {
1318
+ return self.q_scale();
1319
+ })
1320
+ .define_method(
1321
+ "_q_zero_point",
1322
+ *[](const Tensor &self) {
1323
+ return self.q_zero_point();
1324
+ })
1325
+ .define_method(
1326
+ "_qr",
1327
+ *[](const Tensor &self, bool some) {
1328
+ return wrap(self.qr(some));
1329
+ })
1330
+ .define_method(
1331
+ "_qscheme",
1332
+ *[](const Tensor &self) {
1333
+ return self.qscheme();
1334
+ })
1335
+ .define_method(
1336
+ "_random_",
1337
+ *[](Tensor &self) {
1338
+ return self.random_();
1339
+ })
1340
+ .define_method(
1341
+ "_random__from",
1342
+ *[](Tensor &self, int64_t from, int64_t to) {
1343
+ return self.random_(from, to);
1344
+ })
1345
+ .define_method(
1346
+ "_random__to",
1347
+ *[](Tensor &self, int64_t to) {
1348
+ return self.random_(to);
1349
+ })
1350
+ .define_method(
1351
+ "_reciprocal",
1352
+ *[](const Tensor &self) {
1353
+ return self.reciprocal();
1354
+ })
1355
+ .define_method(
1356
+ "_reciprocal_",
1357
+ *[](Tensor &self) {
1358
+ return self.reciprocal_();
1359
+ })
1360
+ .define_method(
1361
+ "_relu",
1362
+ *[](const Tensor &self) {
1363
+ return self.relu();
1364
+ })
1365
+ .define_method(
1366
+ "_relu_",
1367
+ *[](Tensor &self) {
1368
+ return self.relu_();
1369
+ })
1370
+ .define_method(
1371
+ "_remainder__scalar",
1372
+ *[](Tensor &self, Scalar other) {
1373
+ return self.remainder_(other);
1374
+ })
1375
+ .define_method(
1376
+ "_remainder__tensor",
1377
+ *[](Tensor &self, const Tensor &other) {
1378
+ return self.remainder_(other);
1379
+ })
1380
+ .define_method(
1381
+ "_remainder_scalar",
1382
+ *[](const Tensor &self, Scalar other) {
1383
+ return self.remainder(other);
1384
+ })
1385
+ .define_method(
1386
+ "_remainder_tensor",
1387
+ *[](const Tensor &self, const Tensor &other) {
1388
+ return self.remainder(other);
1389
+ })
1390
+ .define_method(
1391
+ "_renorm",
1392
+ *[](const Tensor &self, Scalar p, int64_t dim, Scalar maxnorm) {
1393
+ return self.renorm(p, dim, maxnorm);
1394
+ })
1395
+ .define_method(
1396
+ "_renorm_",
1397
+ *[](Tensor &self, Scalar p, int64_t dim, Scalar maxnorm) {
1398
+ return self.renorm_(p, dim, maxnorm);
1399
+ })
1400
+ .define_method(
1401
+ "_repeat",
1402
+ *[](const Tensor &self, IntArrayRef repeats) {
1403
+ return self.repeat(repeats);
1404
+ })
1405
+ .define_method(
1406
+ "_repeat_interleave_self_int",
1407
+ *[](const Tensor &self, int64_t repeats) {
1408
+ return self.repeat_interleave(repeats);
1409
+ })
1410
+ .define_method(
1411
+ "_repeat_interleave_self_int_dim",
1412
+ *[](const Tensor &self, int64_t repeats, int64_t dim) {
1413
+ return self.repeat_interleave(repeats, dim);
1414
+ })
1415
+ .define_method(
1416
+ "_repeat_interleave_self_tensor",
1417
+ *[](const Tensor &self, const Tensor &repeats) {
1418
+ return self.repeat_interleave(repeats);
1419
+ })
1420
+ .define_method(
1421
+ "_repeat_interleave_self_tensor_dim",
1422
+ *[](const Tensor &self, const Tensor &repeats, int64_t dim) {
1423
+ return self.repeat_interleave(repeats, dim);
1424
+ })
1425
+ .define_method(
1426
+ "_reshape",
1427
+ *[](const Tensor &self, IntArrayRef shape) {
1428
+ return self.reshape(shape);
1429
+ })
1430
+ .define_method(
1431
+ "_reshape_as",
1432
+ *[](const Tensor &self, const Tensor &other) {
1433
+ return self.reshape_as(other);
1434
+ })
1435
+ .define_method(
1436
+ "_resize_",
1437
+ *[](Tensor &self, IntArrayRef size) {
1438
+ return self.resize_(size);
1439
+ })
1440
+ .define_method(
1441
+ "_resize_as_",
1442
+ *[](Tensor &self, const Tensor &the_template) {
1443
+ return self.resize_as_(the_template);
1444
+ })
1445
+ .define_method(
1446
+ "_rfft",
1447
+ *[](const Tensor &self, int64_t signal_ndim, bool normalized, bool onesided) {
1448
+ return self.rfft(signal_ndim, normalized, onesided);
1449
+ })
1450
+ .define_method(
1451
+ "_roll",
1452
+ *[](const Tensor &self, IntArrayRef shifts, IntArrayRef dims) {
1453
+ return self.roll(shifts, dims);
1454
+ })
1455
+ .define_method(
1456
+ "_rot90",
1457
+ *[](const Tensor &self, int64_t k, IntArrayRef dims) {
1458
+ return self.rot90(k, dims);
1459
+ })
1460
+ .define_method(
1461
+ "_round",
1462
+ *[](const Tensor &self) {
1463
+ return self.round();
1464
+ })
1465
+ .define_method(
1466
+ "_round_",
1467
+ *[](Tensor &self) {
1468
+ return self.round_();
1469
+ })
1470
+ .define_method(
1471
+ "_rsqrt",
1472
+ *[](const Tensor &self) {
1473
+ return self.rsqrt();
1474
+ })
1475
+ .define_method(
1476
+ "_rsqrt_",
1477
+ *[](Tensor &self) {
1478
+ return self.rsqrt_();
1479
+ })
1480
+ .define_method(
1481
+ "_scatter__src",
1482
+ *[](Tensor &self, int64_t dim, const Tensor &index, const Tensor &src) {
1483
+ return self.scatter_(dim, index, src);
1484
+ })
1485
+ .define_method(
1486
+ "_scatter__value",
1487
+ *[](Tensor &self, int64_t dim, const Tensor &index, Scalar value) {
1488
+ return self.scatter_(dim, index, value);
1489
+ })
1490
+ .define_method(
1491
+ "_scatter_add",
1492
+ *[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &src) {
1493
+ return self.scatter_add(dim, index, src);
1494
+ })
1495
+ .define_method(
1496
+ "_scatter_add_",
1497
+ *[](Tensor &self, int64_t dim, const Tensor &index, const Tensor &src) {
1498
+ return self.scatter_add_(dim, index, src);
1499
+ })
1500
+ .define_method(
1501
+ "_scatter_src",
1502
+ *[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &src) {
1503
+ return self.scatter(dim, index, src);
1504
+ })
1505
+ .define_method(
1506
+ "_scatter_value",
1507
+ *[](const Tensor &self, int64_t dim, const Tensor &index, Scalar value) {
1508
+ return self.scatter(dim, index, value);
1509
+ })
1510
+ .define_method(
1511
+ "_select_int",
1512
+ *[](Tensor &self, int64_t dim, int64_t index) {
1513
+ return self.select(dim, index);
1514
+ })
1515
+ .define_method(
1516
+ "_set_",
1517
+ *[](Tensor &self) {
1518
+ return self.set_();
1519
+ })
1520
+ .define_method(
1521
+ "_set__source_tensor",
1522
+ *[](Tensor &self, const Tensor &source) {
1523
+ return self.set_(source);
1524
+ })
1525
+ .define_method(
1526
+ "_set_data",
1527
+ *[](Tensor &self, const Tensor &new_data) {
1528
+ return self.set_data(new_data);
1529
+ })
1530
+ .define_method(
1531
+ "_sigmoid",
1532
+ *[](const Tensor &self) {
1533
+ return self.sigmoid();
1534
+ })
1535
+ .define_method(
1536
+ "_sigmoid_",
1537
+ *[](Tensor &self) {
1538
+ return self.sigmoid_();
1539
+ })
1540
+ .define_method(
1541
+ "_sign",
1542
+ *[](const Tensor &self) {
1543
+ return self.sign();
1544
+ })
1545
+ .define_method(
1546
+ "_sign_",
1547
+ *[](Tensor &self) {
1548
+ return self.sign_();
1549
+ })
1550
+ .define_method(
1551
+ "_sin",
1552
+ *[](const Tensor &self) {
1553
+ return self.sin();
1554
+ })
1555
+ .define_method(
1556
+ "_sin_",
1557
+ *[](Tensor &self) {
1558
+ return self.sin_();
1559
+ })
1560
+ .define_method(
1561
+ "_sinh",
1562
+ *[](const Tensor &self) {
1563
+ return self.sinh();
1564
+ })
1565
+ .define_method(
1566
+ "_sinh_",
1567
+ *[](Tensor &self) {
1568
+ return self.sinh_();
1569
+ })
1570
+ .define_method(
1571
+ "_size_int",
1572
+ *[](const Tensor &self, int64_t dim) {
1573
+ return self.size(dim);
1574
+ })
1575
+ .define_method(
1576
+ "_slice_tensor",
1577
+ *[](Tensor &self, int64_t dim, int64_t start, int64_t end, int64_t step) {
1578
+ return self.slice(dim, start, end, step);
1579
+ })
1580
+ .define_method(
1581
+ "_slogdet",
1582
+ *[](const Tensor &self) {
1583
+ return wrap(self.slogdet());
1584
+ })
1585
+ .define_method(
1586
+ "_smm",
1587
+ *[](const Tensor &self, const Tensor &mat2) {
1588
+ return self.smm(mat2);
1589
+ })
1590
+ .define_method(
1591
+ "_softmax",
1592
+ *[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
1593
+ return self.softmax(dim, dtype);
1594
+ })
1595
+ .define_method(
1596
+ "_solve",
1597
+ *[](const Tensor &self, const Tensor &A) {
1598
+ return wrap(self.solve(A));
1599
+ })
1600
+ .define_method(
1601
+ "_sort",
1602
+ *[](const Tensor &self, int64_t dim, bool descending) {
1603
+ return wrap(self.sort(dim, descending));
1604
+ })
1605
+ .define_method(
1606
+ "_sparse_dim",
1607
+ *[](const Tensor &self) {
1608
+ return self.sparse_dim();
1609
+ })
1610
+ .define_method(
1611
+ "_sparse_mask",
1612
+ *[](const Tensor &self, const Tensor &mask) {
1613
+ return self.sparse_mask(mask);
1614
+ })
1615
+ .define_method(
1616
+ "_sparse_resize_",
1617
+ *[](Tensor &self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
1618
+ return self.sparse_resize_(size, sparse_dim, dense_dim);
1619
+ })
1620
+ .define_method(
1621
+ "_sparse_resize_and_clear_",
1622
+ *[](Tensor &self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
1623
+ return self.sparse_resize_and_clear_(size, sparse_dim, dense_dim);
1624
+ })
1625
+ .define_method(
1626
+ "_split_tensor",
1627
+ *[](Tensor &self, int64_t split_size, int64_t dim) {
1628
+ return self.split(split_size, dim);
1629
+ })
1630
+ .define_method(
1631
+ "_split_with_sizes",
1632
+ *[](const Tensor &self, IntArrayRef split_sizes, int64_t dim) {
1633
+ return self.split_with_sizes(split_sizes, dim);
1634
+ })
1635
+ .define_method(
1636
+ "_sqrt",
1637
+ *[](const Tensor &self) {
1638
+ return self.sqrt();
1639
+ })
1640
+ .define_method(
1641
+ "_sqrt_",
1642
+ *[](Tensor &self) {
1643
+ return self.sqrt_();
1644
+ })
1645
+ .define_method(
1646
+ "_squeeze",
1647
+ *[](Tensor &self) {
1648
+ return self.squeeze();
1649
+ })
1650
+ .define_method(
1651
+ "_squeeze_",
1652
+ *[](Tensor &self) {
1653
+ return self.squeeze_();
1654
+ })
1655
+ .define_method(
1656
+ "_squeeze__dim",
1657
+ *[](Tensor &self, int64_t dim) {
1658
+ return self.squeeze_(dim);
1659
+ })
1660
+ .define_method(
1661
+ "_squeeze_dim",
1662
+ *[](Tensor &self, int64_t dim) {
1663
+ return self.squeeze(dim);
1664
+ })
1665
+ .define_method(
1666
+ "_sspaddmm",
1667
+ *[](const Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha) {
1668
+ return self.sspaddmm(mat1, mat2, beta, alpha);
1669
+ })
1670
+ .define_method(
1671
+ "_std",
1672
+ *[](const Tensor &self, bool unbiased) {
1673
+ return self.std(unbiased);
1674
+ })
1675
+ .define_method(
1676
+ "_std_dim",
1677
+ *[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
1678
+ return self.std(dim, unbiased, keepdim);
1679
+ })
1680
+ .define_method(
1681
+ "_stride_int",
1682
+ *[](const Tensor &self, int64_t dim) {
1683
+ return self.stride(dim);
1684
+ })
1685
+ .define_method(
1686
+ "_sub__scalar",
1687
+ *[](Tensor &self, Scalar other, Scalar alpha) {
1688
+ return self.sub_(other, alpha);
1689
+ })
1690
+ .define_method(
1691
+ "_sub__tensor",
1692
+ *[](Tensor &self, const Tensor &other, Scalar alpha) {
1693
+ return self.sub_(other, alpha);
1694
+ })
1695
+ .define_method(
1696
+ "_sub_scalar",
1697
+ *[](const Tensor &self, Scalar other, Scalar alpha) {
1698
+ return self.sub(other, alpha);
1699
+ })
1700
+ .define_method(
1701
+ "_sub_tensor",
1702
+ *[](const Tensor &self, const Tensor &other, Scalar alpha) {
1703
+ return self.sub(other, alpha);
1704
+ })
1705
+ .define_method(
1706
+ "_sum",
1707
+ *[](const Tensor &self, OptionalScalarType dtype) {
1708
+ return self.sum(dtype);
1709
+ })
1710
+ .define_method(
1711
+ "_sum_dim_intlist",
1712
+ *[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype) {
1713
+ return self.sum(dim, keepdim, dtype);
1714
+ })
1715
+ .define_method(
1716
+ "_sum_to_size",
1717
+ *[](const Tensor &self, IntArrayRef size) {
1718
+ return self.sum_to_size(size);
1719
+ })
1720
+ .define_method(
1721
+ "_svd",
1722
+ *[](const Tensor &self, bool some, bool compute_uv) {
1723
+ return wrap(self.svd(some, compute_uv));
1724
+ })
1725
+ .define_method(
1726
+ "_symeig",
1727
+ *[](const Tensor &self, bool eigenvectors, bool upper) {
1728
+ return wrap(self.symeig(eigenvectors, upper));
1729
+ })
1730
+ .define_method(
1731
+ "_t",
1732
+ *[](Tensor &self) {
1733
+ return self.t();
1734
+ })
1735
+ .define_method(
1736
+ "_t_",
1737
+ *[](Tensor &self) {
1738
+ return self.t_();
1739
+ })
1740
+ .define_method(
1741
+ "_take",
1742
+ *[](const Tensor &self, const Tensor &index) {
1743
+ return self.take(index);
1744
+ })
1745
+ .define_method(
1746
+ "_tan",
1747
+ *[](const Tensor &self) {
1748
+ return self.tan();
1749
+ })
1750
+ .define_method(
1751
+ "_tan_",
1752
+ *[](Tensor &self) {
1753
+ return self.tan_();
1754
+ })
1755
+ .define_method(
1756
+ "_tanh",
1757
+ *[](const Tensor &self) {
1758
+ return self.tanh();
1759
+ })
1760
+ .define_method(
1761
+ "_tanh_",
1762
+ *[](Tensor &self) {
1763
+ return self.tanh_();
1764
+ })
1765
+ .define_method(
1766
+ "_to_dense",
1767
+ *[](const Tensor &self) {
1768
+ return self.to_dense();
1769
+ })
1770
+ .define_method(
1771
+ "_to_device",
1772
+ *[](const Tensor &self, Device device, ScalarType dtype, bool non_blocking, bool copy) {
1773
+ return self.to(device, dtype, non_blocking, copy);
1774
+ })
1775
+ .define_method(
1776
+ "_to_dtype",
1777
+ *[](const Tensor &self, ScalarType dtype, bool non_blocking, bool copy) {
1778
+ return self.to(dtype, non_blocking, copy);
1779
+ })
1780
+ .define_method(
1781
+ "_to_mkldnn",
1782
+ *[](const Tensor &self) {
1783
+ return self.to_mkldnn();
1784
+ })
1785
+ .define_method(
1786
+ "_to_other",
1787
+ *[](const Tensor &self, const Tensor &other, bool non_blocking, bool copy) {
1788
+ return self.to(other, non_blocking, copy);
1789
+ })
1790
+ .define_method(
1791
+ "_to_sparse",
1792
+ *[](const Tensor &self) {
1793
+ return self.to_sparse();
1794
+ })
1795
+ .define_method(
1796
+ "_to_sparse_sparse_dim",
1797
+ *[](const Tensor &self, int64_t sparse_dim) {
1798
+ return self.to_sparse(sparse_dim);
1799
+ })
1800
+ .define_method(
1801
+ "_topk",
1802
+ *[](const Tensor &self, int64_t k, int64_t dim, bool largest, bool sorted) {
1803
+ return wrap(self.topk(k, dim, largest, sorted));
1804
+ })
1805
+ .define_method(
1806
+ "_trace",
1807
+ *[](const Tensor &self) {
1808
+ return self.trace();
1809
+ })
1810
+ .define_method(
1811
+ "_transpose_",
1812
+ *[](Tensor &self, int64_t dim0, int64_t dim1) {
1813
+ return self.transpose_(dim0, dim1);
1814
+ })
1815
+ .define_method(
1816
+ "_transpose_int",
1817
+ *[](Tensor &self, int64_t dim0, int64_t dim1) {
1818
+ return self.transpose(dim0, dim1);
1819
+ })
1820
+ .define_method(
1821
+ "_triangular_solve",
1822
+ *[](const Tensor &self, const Tensor &A, bool upper, bool transpose, bool unitriangular) {
1823
+ return wrap(self.triangular_solve(A, upper, transpose, unitriangular));
1824
+ })
1825
+ .define_method(
1826
+ "_tril",
1827
+ *[](const Tensor &self, int64_t diagonal) {
1828
+ return self.tril(diagonal);
1829
+ })
1830
+ .define_method(
1831
+ "_tril_",
1832
+ *[](Tensor &self, int64_t diagonal) {
1833
+ return self.tril_(diagonal);
1834
+ })
1835
+ .define_method(
1836
+ "_triu",
1837
+ *[](const Tensor &self, int64_t diagonal) {
1838
+ return self.triu(diagonal);
1839
+ })
1840
+ .define_method(
1841
+ "_triu_",
1842
+ *[](Tensor &self, int64_t diagonal) {
1843
+ return self.triu_(diagonal);
1844
+ })
1845
+ .define_method(
1846
+ "_trunc",
1847
+ *[](const Tensor &self) {
1848
+ return self.trunc();
1849
+ })
1850
+ .define_method(
1851
+ "_trunc_",
1852
+ *[](Tensor &self) {
1853
+ return self.trunc_();
1854
+ })
1855
+ .define_method(
1856
+ "_type_as",
1857
+ *[](const Tensor &self, const Tensor &other) {
1858
+ return self.type_as(other);
1859
+ })
1860
+ .define_method(
1861
+ "_unbind_int",
1862
+ *[](Tensor &self, int64_t dim) {
1863
+ return self.unbind(dim);
1864
+ })
1865
+ .define_method(
1866
+ "_unfold",
1867
+ *[](Tensor &self, int64_t dimension, int64_t size, int64_t step) {
1868
+ return self.unfold(dimension, size, step);
1869
+ })
1870
+ .define_method(
1871
+ "_uniform_",
1872
+ *[](Tensor &self, double from, double to) {
1873
+ return self.uniform_(from, to);
1874
+ })
1875
+ .define_method(
1876
+ "_unsqueeze",
1877
+ *[](Tensor &self, int64_t dim) {
1878
+ return self.unsqueeze(dim);
1879
+ })
1880
+ .define_method(
1881
+ "_unsqueeze_",
1882
+ *[](Tensor &self, int64_t dim) {
1883
+ return self.unsqueeze_(dim);
1884
+ })
1885
+ .define_method(
1886
+ "_values",
1887
+ *[](Tensor &self) {
1888
+ return self.values();
1889
+ })
1890
+ .define_method(
1891
+ "_var",
1892
+ *[](const Tensor &self, bool unbiased) {
1893
+ return self.var(unbiased);
1894
+ })
1895
+ .define_method(
1896
+ "_var_dim",
1897
+ *[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
1898
+ return self.var(dim, unbiased, keepdim);
1899
+ })
1900
+ .define_method(
1901
+ "_view",
1902
+ *[](Tensor &self, IntArrayRef size) {
1903
+ return self.view(size);
1904
+ })
1905
+ .define_method(
1906
+ "_view_as",
1907
+ *[](const Tensor &self, const Tensor &other) {
1908
+ return self.view_as(other);
1909
+ })
1910
+ .define_method(
1911
+ "_where_self",
1912
+ *[](const Tensor &condition, const Tensor &self, const Tensor &other) {
1913
+ return self.where(condition, other);
1914
+ })
1915
+ .define_method(
1916
+ "_zero_",
1917
+ *[](Tensor &self) {
1918
+ return self.zero_();
1919
+ });
1920
+ }