torch-rb 0.1.8 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +11 -2
- data/README.md +35 -11
- data/ext/torch/ext.cpp +37 -28
- data/ext/torch/extconf.rb +33 -6
- data/ext/torch/nn_functions.cpp +560 -0
- data/ext/torch/nn_functions.hpp +6 -0
- data/ext/torch/templates.hpp +2 -0
- data/ext/torch/tensor_functions.cpp +2085 -0
- data/ext/torch/tensor_functions.hpp +6 -0
- data/ext/torch/torch_functions.cpp +3175 -0
- data/ext/torch/torch_functions.hpp +6 -0
- data/lib/torch/ext.bundle +0 -0
- data/lib/torch/hub.rb +9 -0
- data/lib/torch/native/generator.rb +6 -3
- data/lib/torch/native/native_functions.yaml +539 -397
- data/lib/torch/native/parser.rb +2 -0
- data/lib/torch/nn/adaptive_avg_pool1d.rb +9 -0
- data/lib/torch/nn/adaptive_avg_pool2d.rb +9 -0
- data/lib/torch/nn/adaptive_avg_pool3d.rb +9 -0
- data/lib/torch/nn/adaptive_avg_poolnd.rb +14 -0
- data/lib/torch/nn/adaptive_max_pool1d.rb +9 -0
- data/lib/torch/nn/adaptive_max_pool2d.rb +9 -0
- data/lib/torch/nn/adaptive_max_pool3d.rb +9 -0
- data/lib/torch/nn/adaptive_max_poolnd.rb +15 -0
- data/lib/torch/nn/functional.rb +40 -2
- data/lib/torch/nn/module.rb +22 -1
- data/lib/torch/optim/lr_scheduler/cosine_annealing_lr.rb +29 -0
- data/lib/torch/optim/lr_scheduler/exponential_lr.rb +22 -0
- data/lib/torch/optim/lr_scheduler/lambda_lr.rb +28 -0
- data/lib/torch/optim/lr_scheduler/multi_step_lr.rb +23 -0
- data/lib/torch/optim/lr_scheduler/multiplicative_lr.rb +32 -0
- data/lib/torch/tensor.rb +8 -0
- data/lib/torch/version.rb +1 -1
- data/lib/torch.rb +21 -0
- metadata +38 -3
@@ -0,0 +1,3175 @@
|
|
1
|
+
// generated by rake generate:functions
|
2
|
+
// do not edit by hand
|
3
|
+
|
4
|
+
#include <torch/torch.h>
|
5
|
+
#include <rice/Module.hpp>
|
6
|
+
#include "templates.hpp"
|
7
|
+
|
8
|
+
void add_torch_functions(Module m) {
|
9
|
+
m
|
10
|
+
.define_singleton_method(
|
11
|
+
"_abs",
|
12
|
+
*[](const Tensor &self) {
|
13
|
+
return torch::abs(self);
|
14
|
+
})
|
15
|
+
.define_singleton_method(
|
16
|
+
"_abs_",
|
17
|
+
*[](Tensor &self) {
|
18
|
+
return torch::abs_(self);
|
19
|
+
})
|
20
|
+
.define_singleton_method(
|
21
|
+
"_abs_out",
|
22
|
+
*[](const Tensor &self, Tensor &out) {
|
23
|
+
return torch::abs_out(out, self);
|
24
|
+
})
|
25
|
+
.define_singleton_method(
|
26
|
+
"_acos",
|
27
|
+
*[](const Tensor &self) {
|
28
|
+
return torch::acos(self);
|
29
|
+
})
|
30
|
+
.define_singleton_method(
|
31
|
+
"_acos_",
|
32
|
+
*[](Tensor &self) {
|
33
|
+
return torch::acos_(self);
|
34
|
+
})
|
35
|
+
.define_singleton_method(
|
36
|
+
"_acos_out",
|
37
|
+
*[](const Tensor &self, Tensor &out) {
|
38
|
+
return torch::acos_out(out, self);
|
39
|
+
})
|
40
|
+
.define_singleton_method(
|
41
|
+
"_adaptive_avg_pool1d",
|
42
|
+
*[](const Tensor &self, IntArrayRef output_size) {
|
43
|
+
return torch::adaptive_avg_pool1d(self, output_size);
|
44
|
+
})
|
45
|
+
.define_singleton_method(
|
46
|
+
"_adaptive_max_pool1d",
|
47
|
+
*[](const Tensor &self, IntArrayRef output_size) {
|
48
|
+
return wrap(torch::adaptive_max_pool1d(self, output_size));
|
49
|
+
})
|
50
|
+
.define_singleton_method(
|
51
|
+
"_add_out",
|
52
|
+
*[](const Tensor &self, const Tensor &other, Scalar alpha, Tensor &out) {
|
53
|
+
return torch::add_out(out, self, other, alpha);
|
54
|
+
})
|
55
|
+
.define_singleton_method(
|
56
|
+
"_add_scalar",
|
57
|
+
*[](const Tensor &self, Scalar other, Scalar alpha) {
|
58
|
+
return torch::add(self, other, alpha);
|
59
|
+
})
|
60
|
+
.define_singleton_method(
|
61
|
+
"_add_tensor",
|
62
|
+
*[](const Tensor &self, const Tensor &other, Scalar alpha) {
|
63
|
+
return torch::add(self, other, alpha);
|
64
|
+
})
|
65
|
+
.define_singleton_method(
|
66
|
+
"_addbmm",
|
67
|
+
*[](const Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha) {
|
68
|
+
return torch::addbmm(self, batch1, batch2, beta, alpha);
|
69
|
+
})
|
70
|
+
.define_singleton_method(
|
71
|
+
"_addbmm_out",
|
72
|
+
*[](const Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha, Tensor &out) {
|
73
|
+
return torch::addbmm_out(out, self, batch1, batch2, beta, alpha);
|
74
|
+
})
|
75
|
+
.define_singleton_method(
|
76
|
+
"_addcdiv",
|
77
|
+
*[](const Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value) {
|
78
|
+
return torch::addcdiv(self, tensor1, tensor2, value);
|
79
|
+
})
|
80
|
+
.define_singleton_method(
|
81
|
+
"_addcdiv_out",
|
82
|
+
*[](const Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value, Tensor &out) {
|
83
|
+
return torch::addcdiv_out(out, self, tensor1, tensor2, value);
|
84
|
+
})
|
85
|
+
.define_singleton_method(
|
86
|
+
"_addcmul",
|
87
|
+
*[](const Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value) {
|
88
|
+
return torch::addcmul(self, tensor1, tensor2, value);
|
89
|
+
})
|
90
|
+
.define_singleton_method(
|
91
|
+
"_addcmul_out",
|
92
|
+
*[](const Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value, Tensor &out) {
|
93
|
+
return torch::addcmul_out(out, self, tensor1, tensor2, value);
|
94
|
+
})
|
95
|
+
.define_singleton_method(
|
96
|
+
"_addmm",
|
97
|
+
*[](const Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha) {
|
98
|
+
return torch::addmm(self, mat1, mat2, beta, alpha);
|
99
|
+
})
|
100
|
+
.define_singleton_method(
|
101
|
+
"_addmm_out",
|
102
|
+
*[](const Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha, Tensor &out) {
|
103
|
+
return torch::addmm_out(out, self, mat1, mat2, beta, alpha);
|
104
|
+
})
|
105
|
+
.define_singleton_method(
|
106
|
+
"_addmv",
|
107
|
+
*[](const Tensor &self, const Tensor &mat, const Tensor &vec, Scalar beta, Scalar alpha) {
|
108
|
+
return torch::addmv(self, mat, vec, beta, alpha);
|
109
|
+
})
|
110
|
+
.define_singleton_method(
|
111
|
+
"_addmv_",
|
112
|
+
*[](Tensor &self, const Tensor &mat, const Tensor &vec, Scalar beta, Scalar alpha) {
|
113
|
+
return torch::addmv_(self, mat, vec, beta, alpha);
|
114
|
+
})
|
115
|
+
.define_singleton_method(
|
116
|
+
"_addmv_out",
|
117
|
+
*[](const Tensor &self, const Tensor &mat, const Tensor &vec, Scalar beta, Scalar alpha, Tensor &out) {
|
118
|
+
return torch::addmv_out(out, self, mat, vec, beta, alpha);
|
119
|
+
})
|
120
|
+
.define_singleton_method(
|
121
|
+
"_addr",
|
122
|
+
*[](const Tensor &self, const Tensor &vec1, const Tensor &vec2, Scalar beta, Scalar alpha) {
|
123
|
+
return torch::addr(self, vec1, vec2, beta, alpha);
|
124
|
+
})
|
125
|
+
.define_singleton_method(
|
126
|
+
"_addr_out",
|
127
|
+
*[](const Tensor &self, const Tensor &vec1, const Tensor &vec2, Scalar beta, Scalar alpha, Tensor &out) {
|
128
|
+
return torch::addr_out(out, self, vec1, vec2, beta, alpha);
|
129
|
+
})
|
130
|
+
.define_singleton_method(
|
131
|
+
"_affine_grid_generator",
|
132
|
+
*[](const Tensor &theta, IntArrayRef size, bool align_corners) {
|
133
|
+
return torch::affine_grid_generator(theta, size, align_corners);
|
134
|
+
})
|
135
|
+
.define_singleton_method(
|
136
|
+
"_alias",
|
137
|
+
*[](Tensor &self) {
|
138
|
+
return torch::alias(self);
|
139
|
+
})
|
140
|
+
.define_singleton_method(
|
141
|
+
"_align_tensors",
|
142
|
+
*[](TensorList tensors) {
|
143
|
+
return torch::align_tensors(tensors);
|
144
|
+
})
|
145
|
+
.define_singleton_method(
|
146
|
+
"_all",
|
147
|
+
*[](const Tensor &self) {
|
148
|
+
return torch::all(self);
|
149
|
+
})
|
150
|
+
.define_singleton_method(
|
151
|
+
"_all_dim",
|
152
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
153
|
+
return torch::all(self, dim, keepdim);
|
154
|
+
})
|
155
|
+
.define_singleton_method(
|
156
|
+
"_all_out",
|
157
|
+
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &out) {
|
158
|
+
return torch::all_out(out, self, dim, keepdim);
|
159
|
+
})
|
160
|
+
.define_singleton_method(
|
161
|
+
"_allclose",
|
162
|
+
*[](const Tensor &self, const Tensor &other, double rtol, double atol, bool equal_nan) {
|
163
|
+
return torch::allclose(self, other, rtol, atol, equal_nan);
|
164
|
+
})
|
165
|
+
.define_singleton_method(
|
166
|
+
"_alpha_dropout",
|
167
|
+
*[](const Tensor &input, double p, bool train) {
|
168
|
+
return torch::alpha_dropout(input, p, train);
|
169
|
+
})
|
170
|
+
.define_singleton_method(
|
171
|
+
"_alpha_dropout_",
|
172
|
+
*[](Tensor &self, double p, bool train) {
|
173
|
+
return torch::alpha_dropout_(self, p, train);
|
174
|
+
})
|
175
|
+
.define_singleton_method(
|
176
|
+
"_angle",
|
177
|
+
*[](const Tensor &self) {
|
178
|
+
return torch::angle(self);
|
179
|
+
})
|
180
|
+
.define_singleton_method(
|
181
|
+
"_angle_out",
|
182
|
+
*[](const Tensor &self, Tensor &out) {
|
183
|
+
return torch::angle_out(out, self);
|
184
|
+
})
|
185
|
+
.define_singleton_method(
|
186
|
+
"_any",
|
187
|
+
*[](const Tensor &self) {
|
188
|
+
return torch::any(self);
|
189
|
+
})
|
190
|
+
.define_singleton_method(
|
191
|
+
"_any_dim",
|
192
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
193
|
+
return torch::any(self, dim, keepdim);
|
194
|
+
})
|
195
|
+
.define_singleton_method(
|
196
|
+
"_any_out",
|
197
|
+
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &out) {
|
198
|
+
return torch::any_out(out, self, dim, keepdim);
|
199
|
+
})
|
200
|
+
.define_singleton_method(
|
201
|
+
"_arange_out",
|
202
|
+
*[](Scalar end, Tensor &out) {
|
203
|
+
return torch::arange_out(out, end);
|
204
|
+
})
|
205
|
+
.define_singleton_method(
|
206
|
+
"_arange_start_out",
|
207
|
+
*[](Scalar start, Scalar end, Scalar step, Tensor &out) {
|
208
|
+
return torch::arange_out(out, start, end, step);
|
209
|
+
})
|
210
|
+
.define_singleton_method(
|
211
|
+
"_argmax",
|
212
|
+
*[](const Tensor &self) {
|
213
|
+
return torch::argmax(self);
|
214
|
+
})
|
215
|
+
.define_singleton_method(
|
216
|
+
"_argmax_dim",
|
217
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
218
|
+
return torch::argmax(self, dim, keepdim);
|
219
|
+
})
|
220
|
+
.define_singleton_method(
|
221
|
+
"_argmin",
|
222
|
+
*[](const Tensor &self) {
|
223
|
+
return torch::argmin(self);
|
224
|
+
})
|
225
|
+
.define_singleton_method(
|
226
|
+
"_argmin_dim",
|
227
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
228
|
+
return torch::argmin(self, dim, keepdim);
|
229
|
+
})
|
230
|
+
.define_singleton_method(
|
231
|
+
"_argsort",
|
232
|
+
*[](const Tensor &self, int64_t dim, bool descending) {
|
233
|
+
return torch::argsort(self, dim, descending);
|
234
|
+
})
|
235
|
+
.define_singleton_method(
|
236
|
+
"_as_strided",
|
237
|
+
*[](Tensor &self, IntArrayRef size, IntArrayRef stride) {
|
238
|
+
return torch::as_strided(self, size, stride);
|
239
|
+
})
|
240
|
+
.define_singleton_method(
|
241
|
+
"_as_strided_",
|
242
|
+
*[](Tensor &self, IntArrayRef size, IntArrayRef stride) {
|
243
|
+
return torch::as_strided_(self, size, stride);
|
244
|
+
})
|
245
|
+
.define_singleton_method(
|
246
|
+
"_as_strided__storage_offset",
|
247
|
+
*[](Tensor &self, IntArrayRef size, IntArrayRef stride, int64_t storage_offset) {
|
248
|
+
return torch::as_strided_(self, size, stride, storage_offset);
|
249
|
+
})
|
250
|
+
.define_singleton_method(
|
251
|
+
"_as_strided_storage_offset",
|
252
|
+
*[](Tensor &self, IntArrayRef size, IntArrayRef stride, int64_t storage_offset) {
|
253
|
+
return torch::as_strided(self, size, stride, storage_offset);
|
254
|
+
})
|
255
|
+
.define_singleton_method(
|
256
|
+
"_asin",
|
257
|
+
*[](const Tensor &self) {
|
258
|
+
return torch::asin(self);
|
259
|
+
})
|
260
|
+
.define_singleton_method(
|
261
|
+
"_asin_",
|
262
|
+
*[](Tensor &self) {
|
263
|
+
return torch::asin_(self);
|
264
|
+
})
|
265
|
+
.define_singleton_method(
|
266
|
+
"_asin_out",
|
267
|
+
*[](const Tensor &self, Tensor &out) {
|
268
|
+
return torch::asin_out(out, self);
|
269
|
+
})
|
270
|
+
.define_singleton_method(
|
271
|
+
"_atan",
|
272
|
+
*[](const Tensor &self) {
|
273
|
+
return torch::atan(self);
|
274
|
+
})
|
275
|
+
.define_singleton_method(
|
276
|
+
"_atan2",
|
277
|
+
*[](const Tensor &self, const Tensor &other) {
|
278
|
+
return torch::atan2(self, other);
|
279
|
+
})
|
280
|
+
.define_singleton_method(
|
281
|
+
"_atan2_out",
|
282
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
283
|
+
return torch::atan2_out(out, self, other);
|
284
|
+
})
|
285
|
+
.define_singleton_method(
|
286
|
+
"_atan_",
|
287
|
+
*[](Tensor &self) {
|
288
|
+
return torch::atan_(self);
|
289
|
+
})
|
290
|
+
.define_singleton_method(
|
291
|
+
"_atan_out",
|
292
|
+
*[](const Tensor &self, Tensor &out) {
|
293
|
+
return torch::atan_out(out, self);
|
294
|
+
})
|
295
|
+
.define_singleton_method(
|
296
|
+
"_avg_pool1d",
|
297
|
+
*[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
|
298
|
+
return torch::avg_pool1d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
|
299
|
+
})
|
300
|
+
.define_singleton_method(
|
301
|
+
"_baddbmm",
|
302
|
+
*[](const Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha) {
|
303
|
+
return torch::baddbmm(self, batch1, batch2, beta, alpha);
|
304
|
+
})
|
305
|
+
.define_singleton_method(
|
306
|
+
"_baddbmm_out",
|
307
|
+
*[](const Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha, Tensor &out) {
|
308
|
+
return torch::baddbmm_out(out, self, batch1, batch2, beta, alpha);
|
309
|
+
})
|
310
|
+
.define_singleton_method(
|
311
|
+
"_batch_norm",
|
312
|
+
*[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
|
313
|
+
return torch::batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
|
314
|
+
})
|
315
|
+
.define_singleton_method(
|
316
|
+
"_batch_norm_backward_elemt",
|
317
|
+
*[](const Tensor &grad_out, const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor weight, const Tensor &mean_dy, const Tensor &mean_dy_xmu) {
|
318
|
+
return torch::batch_norm_backward_elemt(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu);
|
319
|
+
})
|
320
|
+
.define_singleton_method(
|
321
|
+
"_batch_norm_backward_reduce",
|
322
|
+
*[](const Tensor &grad_out, const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor weight, bool input_g, bool weight_g, bool bias_g) {
|
323
|
+
return wrap(torch::batch_norm_backward_reduce(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g));
|
324
|
+
})
|
325
|
+
.define_singleton_method(
|
326
|
+
"_batch_norm_elemt",
|
327
|
+
*[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, const Tensor &mean, const Tensor &invstd, double eps) {
|
328
|
+
return torch::batch_norm_elemt(input, weight, bias, mean, invstd, eps);
|
329
|
+
})
|
330
|
+
.define_singleton_method(
|
331
|
+
"_batch_norm_elemt_out",
|
332
|
+
*[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, const Tensor &mean, const Tensor &invstd, double eps, Tensor &out) {
|
333
|
+
return torch::batch_norm_elemt_out(out, input, weight, bias, mean, invstd, eps);
|
334
|
+
})
|
335
|
+
.define_singleton_method(
|
336
|
+
"_batch_norm_gather_stats",
|
337
|
+
*[](const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor running_mean, OptionalTensor running_var, double momentum, double eps, int64_t count) {
|
338
|
+
return wrap(torch::batch_norm_gather_stats(input, mean, invstd, running_mean, running_var, momentum, eps, count));
|
339
|
+
})
|
340
|
+
.define_singleton_method(
|
341
|
+
"_batch_norm_gather_stats_with_counts",
|
342
|
+
*[](const Tensor &input, const Tensor &mean, const Tensor &invstd, OptionalTensor running_mean, OptionalTensor running_var, double momentum, double eps, IntArrayRef counts) {
|
343
|
+
return wrap(torch::batch_norm_gather_stats_with_counts(input, mean, invstd, running_mean, running_var, momentum, eps, counts));
|
344
|
+
})
|
345
|
+
.define_singleton_method(
|
346
|
+
"_batch_norm_stats",
|
347
|
+
*[](const Tensor &input, double eps) {
|
348
|
+
return wrap(torch::batch_norm_stats(input, eps));
|
349
|
+
})
|
350
|
+
.define_singleton_method(
|
351
|
+
"_batch_norm_update_stats",
|
352
|
+
*[](const Tensor &input, OptionalTensor running_mean, OptionalTensor running_var, double momentum) {
|
353
|
+
return wrap(torch::batch_norm_update_stats(input, running_mean, running_var, momentum));
|
354
|
+
})
|
355
|
+
.define_singleton_method(
|
356
|
+
"_bernoulli",
|
357
|
+
*[](const Tensor &self) {
|
358
|
+
return torch::bernoulli(self);
|
359
|
+
})
|
360
|
+
.define_singleton_method(
|
361
|
+
"_bernoulli_out",
|
362
|
+
*[](const Tensor &self, Tensor &out) {
|
363
|
+
return torch::bernoulli_out(out, self);
|
364
|
+
})
|
365
|
+
.define_singleton_method(
|
366
|
+
"_bernoulli_p",
|
367
|
+
*[](const Tensor &self, double p) {
|
368
|
+
return torch::bernoulli(self, p);
|
369
|
+
})
|
370
|
+
.define_singleton_method(
|
371
|
+
"_bilinear",
|
372
|
+
*[](const Tensor &input1, const Tensor &input2, const Tensor &weight, OptionalTensor bias) {
|
373
|
+
return torch::bilinear(input1, input2, weight, bias);
|
374
|
+
})
|
375
|
+
.define_singleton_method(
|
376
|
+
"_binary_cross_entropy_with_logits",
|
377
|
+
*[](const Tensor &self, const Tensor &target, OptionalTensor weight, OptionalTensor pos_weight, MyReduction reduction) {
|
378
|
+
return torch::binary_cross_entropy_with_logits(self, target, weight, pos_weight, reduction);
|
379
|
+
})
|
380
|
+
.define_singleton_method(
|
381
|
+
"_bincount",
|
382
|
+
*[](const Tensor &self, OptionalTensor weights, int64_t minlength) {
|
383
|
+
return torch::bincount(self, weights, minlength);
|
384
|
+
})
|
385
|
+
.define_singleton_method(
|
386
|
+
"_bitwise_and_scalar",
|
387
|
+
*[](const Tensor &self, Scalar other) {
|
388
|
+
return torch::bitwise_and(self, other);
|
389
|
+
})
|
390
|
+
.define_singleton_method(
|
391
|
+
"_bitwise_and_scalar_out",
|
392
|
+
*[](const Tensor &self, Scalar other, Tensor &out) {
|
393
|
+
return torch::bitwise_and_out(out, self, other);
|
394
|
+
})
|
395
|
+
.define_singleton_method(
|
396
|
+
"_bitwise_and_tensor",
|
397
|
+
*[](const Tensor &self, const Tensor &other) {
|
398
|
+
return torch::bitwise_and(self, other);
|
399
|
+
})
|
400
|
+
.define_singleton_method(
|
401
|
+
"_bitwise_and_tensor_out",
|
402
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
403
|
+
return torch::bitwise_and_out(out, self, other);
|
404
|
+
})
|
405
|
+
.define_singleton_method(
|
406
|
+
"_bitwise_not",
|
407
|
+
*[](const Tensor &self) {
|
408
|
+
return torch::bitwise_not(self);
|
409
|
+
})
|
410
|
+
.define_singleton_method(
|
411
|
+
"_bitwise_not_out",
|
412
|
+
*[](const Tensor &self, Tensor &out) {
|
413
|
+
return torch::bitwise_not_out(out, self);
|
414
|
+
})
|
415
|
+
.define_singleton_method(
|
416
|
+
"_bitwise_or_scalar",
|
417
|
+
*[](const Tensor &self, Scalar other) {
|
418
|
+
return torch::bitwise_or(self, other);
|
419
|
+
})
|
420
|
+
.define_singleton_method(
|
421
|
+
"_bitwise_or_scalar_out",
|
422
|
+
*[](const Tensor &self, Scalar other, Tensor &out) {
|
423
|
+
return torch::bitwise_or_out(out, self, other);
|
424
|
+
})
|
425
|
+
.define_singleton_method(
|
426
|
+
"_bitwise_or_tensor",
|
427
|
+
*[](const Tensor &self, const Tensor &other) {
|
428
|
+
return torch::bitwise_or(self, other);
|
429
|
+
})
|
430
|
+
.define_singleton_method(
|
431
|
+
"_bitwise_or_tensor_out",
|
432
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
433
|
+
return torch::bitwise_or_out(out, self, other);
|
434
|
+
})
|
435
|
+
.define_singleton_method(
|
436
|
+
"_bitwise_xor_scalar",
|
437
|
+
*[](const Tensor &self, Scalar other) {
|
438
|
+
return torch::bitwise_xor(self, other);
|
439
|
+
})
|
440
|
+
.define_singleton_method(
|
441
|
+
"_bitwise_xor_scalar_out",
|
442
|
+
*[](const Tensor &self, Scalar other, Tensor &out) {
|
443
|
+
return torch::bitwise_xor_out(out, self, other);
|
444
|
+
})
|
445
|
+
.define_singleton_method(
|
446
|
+
"_bitwise_xor_tensor",
|
447
|
+
*[](const Tensor &self, const Tensor &other) {
|
448
|
+
return torch::bitwise_xor(self, other);
|
449
|
+
})
|
450
|
+
.define_singleton_method(
|
451
|
+
"_bitwise_xor_tensor_out",
|
452
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
453
|
+
return torch::bitwise_xor_out(out, self, other);
|
454
|
+
})
|
455
|
+
.define_singleton_method(
|
456
|
+
"_bmm",
|
457
|
+
*[](const Tensor &self, const Tensor &mat2) {
|
458
|
+
return torch::bmm(self, mat2);
|
459
|
+
})
|
460
|
+
.define_singleton_method(
|
461
|
+
"_bmm_out",
|
462
|
+
*[](const Tensor &self, const Tensor &mat2, Tensor &out) {
|
463
|
+
return torch::bmm_out(out, self, mat2);
|
464
|
+
})
|
465
|
+
.define_singleton_method(
|
466
|
+
"_broadcast_tensors",
|
467
|
+
*[](TensorList tensors) {
|
468
|
+
return torch::broadcast_tensors(tensors);
|
469
|
+
})
|
470
|
+
.define_singleton_method(
|
471
|
+
"_can_cast",
|
472
|
+
*[](ScalarType from, ScalarType to) {
|
473
|
+
return torch::can_cast(from, to);
|
474
|
+
})
|
475
|
+
.define_singleton_method(
|
476
|
+
"_cartesian_prod",
|
477
|
+
*[](TensorList tensors) {
|
478
|
+
return torch::cartesian_prod(tensors);
|
479
|
+
})
|
480
|
+
.define_singleton_method(
|
481
|
+
"_cat",
|
482
|
+
*[](TensorList tensors, int64_t dim) {
|
483
|
+
return torch::cat(tensors, dim);
|
484
|
+
})
|
485
|
+
.define_singleton_method(
|
486
|
+
"_cat_out",
|
487
|
+
*[](TensorList tensors, int64_t dim, Tensor &out) {
|
488
|
+
return torch::cat_out(out, tensors, dim);
|
489
|
+
})
|
490
|
+
.define_singleton_method(
|
491
|
+
"_cdist",
|
492
|
+
*[](const Tensor &x1, const Tensor &x2, double p) {
|
493
|
+
return torch::cdist(x1, x2, p);
|
494
|
+
})
|
495
|
+
.define_singleton_method(
|
496
|
+
"_cdist_compute_mode",
|
497
|
+
*[](const Tensor &x1, const Tensor &x2, double p, int64_t compute_mode) {
|
498
|
+
return torch::cdist(x1, x2, p, compute_mode);
|
499
|
+
})
|
500
|
+
.define_singleton_method(
|
501
|
+
"_ceil",
|
502
|
+
*[](const Tensor &self) {
|
503
|
+
return torch::ceil(self);
|
504
|
+
})
|
505
|
+
.define_singleton_method(
|
506
|
+
"_ceil_",
|
507
|
+
*[](Tensor &self) {
|
508
|
+
return torch::ceil_(self);
|
509
|
+
})
|
510
|
+
.define_singleton_method(
|
511
|
+
"_ceil_out",
|
512
|
+
*[](const Tensor &self, Tensor &out) {
|
513
|
+
return torch::ceil_out(out, self);
|
514
|
+
})
|
515
|
+
.define_singleton_method(
|
516
|
+
"_celu",
|
517
|
+
*[](const Tensor &self, Scalar alpha) {
|
518
|
+
return torch::celu(self, alpha);
|
519
|
+
})
|
520
|
+
.define_singleton_method(
|
521
|
+
"_celu_",
|
522
|
+
*[](Tensor &self, Scalar alpha) {
|
523
|
+
return torch::celu_(self, alpha);
|
524
|
+
})
|
525
|
+
.define_singleton_method(
|
526
|
+
"_chain_matmul",
|
527
|
+
*[](TensorList matrices) {
|
528
|
+
return torch::chain_matmul(matrices);
|
529
|
+
})
|
530
|
+
.define_singleton_method(
|
531
|
+
"_cholesky",
|
532
|
+
*[](const Tensor &self, bool upper) {
|
533
|
+
return torch::cholesky(self, upper);
|
534
|
+
})
|
535
|
+
.define_singleton_method(
|
536
|
+
"_cholesky_inverse",
|
537
|
+
*[](const Tensor &self, bool upper) {
|
538
|
+
return torch::cholesky_inverse(self, upper);
|
539
|
+
})
|
540
|
+
.define_singleton_method(
|
541
|
+
"_cholesky_inverse_out",
|
542
|
+
*[](const Tensor &self, bool upper, Tensor &out) {
|
543
|
+
return torch::cholesky_inverse_out(out, self, upper);
|
544
|
+
})
|
545
|
+
.define_singleton_method(
|
546
|
+
"_cholesky_out",
|
547
|
+
*[](const Tensor &self, bool upper, Tensor &out) {
|
548
|
+
return torch::cholesky_out(out, self, upper);
|
549
|
+
})
|
550
|
+
.define_singleton_method(
|
551
|
+
"_cholesky_solve",
|
552
|
+
*[](const Tensor &self, const Tensor &input2, bool upper) {
|
553
|
+
return torch::cholesky_solve(self, input2, upper);
|
554
|
+
})
|
555
|
+
.define_singleton_method(
|
556
|
+
"_cholesky_solve_out",
|
557
|
+
*[](const Tensor &self, const Tensor &input2, bool upper, Tensor &out) {
|
558
|
+
return torch::cholesky_solve_out(out, self, input2, upper);
|
559
|
+
})
|
560
|
+
.define_singleton_method(
|
561
|
+
"_chunk",
|
562
|
+
*[](Tensor &self, int64_t chunks, int64_t dim) {
|
563
|
+
return torch::chunk(self, chunks, dim);
|
564
|
+
})
|
565
|
+
.define_singleton_method(
|
566
|
+
"_clamp_max",
|
567
|
+
*[](const Tensor &self, Scalar max) {
|
568
|
+
return torch::clamp_max(self, max);
|
569
|
+
})
|
570
|
+
.define_singleton_method(
|
571
|
+
"_clamp_max_",
|
572
|
+
*[](Tensor &self, Scalar max) {
|
573
|
+
return torch::clamp_max_(self, max);
|
574
|
+
})
|
575
|
+
.define_singleton_method(
|
576
|
+
"_clamp_max_out",
|
577
|
+
*[](const Tensor &self, Scalar max, Tensor &out) {
|
578
|
+
return torch::clamp_max_out(out, self, max);
|
579
|
+
})
|
580
|
+
.define_singleton_method(
|
581
|
+
"_clamp_min",
|
582
|
+
*[](const Tensor &self, Scalar min) {
|
583
|
+
return torch::clamp_min(self, min);
|
584
|
+
})
|
585
|
+
.define_singleton_method(
|
586
|
+
"_clamp_min_",
|
587
|
+
*[](Tensor &self, Scalar min) {
|
588
|
+
return torch::clamp_min_(self, min);
|
589
|
+
})
|
590
|
+
.define_singleton_method(
|
591
|
+
"_clamp_min_out",
|
592
|
+
*[](const Tensor &self, Scalar min, Tensor &out) {
|
593
|
+
return torch::clamp_min_out(out, self, min);
|
594
|
+
})
|
595
|
+
.define_singleton_method(
|
596
|
+
"_clone",
|
597
|
+
*[](const Tensor &self) {
|
598
|
+
return torch::clone(self);
|
599
|
+
})
|
600
|
+
.define_singleton_method(
|
601
|
+
"_combinations",
|
602
|
+
*[](const Tensor &self, int64_t r, bool with_replacement) {
|
603
|
+
return torch::combinations(self, r, with_replacement);
|
604
|
+
})
|
605
|
+
.define_singleton_method(
|
606
|
+
"_conj",
|
607
|
+
*[](const Tensor &self) {
|
608
|
+
return torch::conj(self);
|
609
|
+
})
|
610
|
+
.define_singleton_method(
|
611
|
+
"_conj_out",
|
612
|
+
*[](const Tensor &self, Tensor &out) {
|
613
|
+
return torch::conj_out(out, self);
|
614
|
+
})
|
615
|
+
.define_singleton_method(
|
616
|
+
"_constant_pad_nd",
|
617
|
+
*[](const Tensor &self, IntArrayRef pad, Scalar value) {
|
618
|
+
return torch::constant_pad_nd(self, pad, value);
|
619
|
+
})
|
620
|
+
.define_singleton_method(
|
621
|
+
"_conv1d",
|
622
|
+
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) {
|
623
|
+
return torch::conv1d(input, weight, bias, stride, padding, dilation, groups);
|
624
|
+
})
|
625
|
+
.define_singleton_method(
|
626
|
+
"_conv2d",
|
627
|
+
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) {
|
628
|
+
return torch::conv2d(input, weight, bias, stride, padding, dilation, groups);
|
629
|
+
})
|
630
|
+
.define_singleton_method(
|
631
|
+
"_conv3d",
|
632
|
+
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) {
|
633
|
+
return torch::conv3d(input, weight, bias, stride, padding, dilation, groups);
|
634
|
+
})
|
635
|
+
.define_singleton_method(
|
636
|
+
"_conv_tbc",
|
637
|
+
*[](const Tensor &self, const Tensor &weight, const Tensor &bias, int64_t pad) {
|
638
|
+
return torch::conv_tbc(self, weight, bias, pad);
|
639
|
+
})
|
640
|
+
.define_singleton_method(
|
641
|
+
"_conv_transpose1d",
|
642
|
+
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) {
|
643
|
+
return torch::conv_transpose1d(input, weight, bias, stride, padding, output_padding, groups, dilation);
|
644
|
+
})
|
645
|
+
.define_singleton_method(
|
646
|
+
"_conv_transpose2d_input",
|
647
|
+
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) {
|
648
|
+
return torch::conv_transpose2d(input, weight, bias, stride, padding, output_padding, groups, dilation);
|
649
|
+
})
|
650
|
+
.define_singleton_method(
|
651
|
+
"_conv_transpose3d_input",
|
652
|
+
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) {
|
653
|
+
return torch::conv_transpose3d(input, weight, bias, stride, padding, output_padding, groups, dilation);
|
654
|
+
})
|
655
|
+
.define_singleton_method(
|
656
|
+
"_convolution",
|
657
|
+
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups) {
|
658
|
+
return torch::convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
|
659
|
+
})
|
660
|
+
.define_singleton_method(
|
661
|
+
"_convolution_overrideable",
|
662
|
+
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups) {
|
663
|
+
return torch::convolution_overrideable(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
|
664
|
+
})
|
665
|
+
.define_singleton_method(
|
666
|
+
"_copy_sparse_to_sparse_",
|
667
|
+
*[](Tensor &self, const Tensor &src, bool non_blocking) {
|
668
|
+
return torch::copy_sparse_to_sparse_(self, src, non_blocking);
|
669
|
+
})
|
670
|
+
.define_singleton_method(
|
671
|
+
"_cos",
|
672
|
+
*[](const Tensor &self) {
|
673
|
+
return torch::cos(self);
|
674
|
+
})
|
675
|
+
.define_singleton_method(
|
676
|
+
"_cos_",
|
677
|
+
*[](Tensor &self) {
|
678
|
+
return torch::cos_(self);
|
679
|
+
})
|
680
|
+
.define_singleton_method(
|
681
|
+
"_cos_out",
|
682
|
+
*[](const Tensor &self, Tensor &out) {
|
683
|
+
return torch::cos_out(out, self);
|
684
|
+
})
|
685
|
+
.define_singleton_method(
|
686
|
+
"_cosh",
|
687
|
+
*[](const Tensor &self) {
|
688
|
+
return torch::cosh(self);
|
689
|
+
})
|
690
|
+
.define_singleton_method(
|
691
|
+
"_cosh_",
|
692
|
+
*[](Tensor &self) {
|
693
|
+
return torch::cosh_(self);
|
694
|
+
})
|
695
|
+
.define_singleton_method(
|
696
|
+
"_cosh_out",
|
697
|
+
*[](const Tensor &self, Tensor &out) {
|
698
|
+
return torch::cosh_out(out, self);
|
699
|
+
})
|
700
|
+
.define_singleton_method(
|
701
|
+
"_cosine_embedding_loss",
|
702
|
+
*[](const Tensor &input1, const Tensor &input2, const Tensor &target, double margin, MyReduction reduction) {
|
703
|
+
return torch::cosine_embedding_loss(input1, input2, target, margin, reduction);
|
704
|
+
})
|
705
|
+
.define_singleton_method(
|
706
|
+
"_cosine_similarity",
|
707
|
+
*[](const Tensor &x1, const Tensor &x2, int64_t dim, double eps) {
|
708
|
+
return torch::cosine_similarity(x1, x2, dim, eps);
|
709
|
+
})
|
710
|
+
.define_singleton_method(
|
711
|
+
"_ctc_loss_intlist",
|
712
|
+
*[](const Tensor &log_probs, const Tensor &targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, MyReduction reduction, bool zero_infinity) {
|
713
|
+
return torch::ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
|
714
|
+
})
|
715
|
+
.define_singleton_method(
|
716
|
+
"_ctc_loss_tensor",
|
717
|
+
*[](const Tensor &log_probs, const Tensor &targets, const Tensor &input_lengths, const Tensor &target_lengths, int64_t blank, MyReduction reduction, bool zero_infinity) {
|
718
|
+
return torch::ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
|
719
|
+
})
|
720
|
+
.define_singleton_method(
|
721
|
+
"_cudnn_affine_grid_generator",
|
722
|
+
*[](const Tensor &theta, int64_t N, int64_t C, int64_t H, int64_t W) {
|
723
|
+
return torch::cudnn_affine_grid_generator(theta, N, C, H, W);
|
724
|
+
})
|
725
|
+
.define_singleton_method(
|
726
|
+
"_cudnn_batch_norm",
|
727
|
+
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double exponential_average_factor, double epsilon) {
|
728
|
+
return wrap(torch::cudnn_batch_norm(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon));
|
729
|
+
})
|
730
|
+
.define_singleton_method(
|
731
|
+
"_cudnn_convolution",
|
732
|
+
*[](const Tensor &self, const Tensor &weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
733
|
+
return torch::cudnn_convolution(self, weight, padding, stride, dilation, groups, benchmark, deterministic);
|
734
|
+
})
|
735
|
+
.define_singleton_method(
|
736
|
+
"_cudnn_convolution_backward_input",
|
737
|
+
*[](IntArrayRef self_size, const Tensor &grad_output, const Tensor &weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
738
|
+
return torch::cudnn_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic);
|
739
|
+
})
|
740
|
+
.define_singleton_method(
|
741
|
+
"_cudnn_convolution_backward_weight",
|
742
|
+
*[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
743
|
+
return torch::cudnn_convolution_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic);
|
744
|
+
})
|
745
|
+
.define_singleton_method(
|
746
|
+
"_cudnn_convolution_deprecated",
|
747
|
+
*[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
748
|
+
return torch::cudnn_convolution(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
|
749
|
+
})
|
750
|
+
.define_singleton_method(
|
751
|
+
"_cudnn_convolution_transpose",
|
752
|
+
*[](const Tensor &self, const Tensor &weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
753
|
+
return torch::cudnn_convolution_transpose(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
|
754
|
+
})
|
755
|
+
.define_singleton_method(
|
756
|
+
"_cudnn_convolution_transpose_backward_input",
|
757
|
+
*[](const Tensor &grad_output, const Tensor &weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
758
|
+
return torch::cudnn_convolution_transpose_backward_input(grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic);
|
759
|
+
})
|
760
|
+
.define_singleton_method(
|
761
|
+
"_cudnn_convolution_transpose_backward_weight",
|
762
|
+
*[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
763
|
+
return torch::cudnn_convolution_transpose_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic);
|
764
|
+
})
|
765
|
+
.define_singleton_method(
|
766
|
+
"_cudnn_convolution_transpose_deprecated",
|
767
|
+
*[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
768
|
+
return torch::cudnn_convolution_transpose(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
|
769
|
+
})
|
770
|
+
.define_singleton_method(
|
771
|
+
"_cudnn_grid_sampler",
|
772
|
+
*[](const Tensor &self, const Tensor &grid) {
|
773
|
+
return torch::cudnn_grid_sampler(self, grid);
|
774
|
+
})
|
775
|
+
.define_singleton_method(
|
776
|
+
"_cudnn_is_acceptable",
|
777
|
+
*[](const Tensor &self) {
|
778
|
+
return torch::cudnn_is_acceptable(self);
|
779
|
+
})
|
780
|
+
.define_singleton_method(
|
781
|
+
"_cummax",
|
782
|
+
*[](const Tensor &self, int64_t dim) {
|
783
|
+
return wrap(torch::cummax(self, dim));
|
784
|
+
})
|
785
|
+
.define_singleton_method(
|
786
|
+
"_cummax_out",
|
787
|
+
*[](const Tensor &self, int64_t dim, Tensor &values, Tensor &indices) {
|
788
|
+
return wrap(torch::cummax_out(values, indices, self, dim));
|
789
|
+
})
|
790
|
+
.define_singleton_method(
|
791
|
+
"_cummin",
|
792
|
+
*[](const Tensor &self, int64_t dim) {
|
793
|
+
return wrap(torch::cummin(self, dim));
|
794
|
+
})
|
795
|
+
.define_singleton_method(
|
796
|
+
"_cummin_out",
|
797
|
+
*[](const Tensor &self, int64_t dim, Tensor &values, Tensor &indices) {
|
798
|
+
return wrap(torch::cummin_out(values, indices, self, dim));
|
799
|
+
})
|
800
|
+
.define_singleton_method(
|
801
|
+
"_cumprod",
|
802
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
|
803
|
+
return torch::cumprod(self, dim, dtype);
|
804
|
+
})
|
805
|
+
.define_singleton_method(
|
806
|
+
"_cumprod_out",
|
807
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype, Tensor &out) {
|
808
|
+
return torch::cumprod_out(out, self, dim, dtype);
|
809
|
+
})
|
810
|
+
.define_singleton_method(
|
811
|
+
"_cumsum",
|
812
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
|
813
|
+
return torch::cumsum(self, dim, dtype);
|
814
|
+
})
|
815
|
+
.define_singleton_method(
|
816
|
+
"_cumsum_out",
|
817
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype, Tensor &out) {
|
818
|
+
return torch::cumsum_out(out, self, dim, dtype);
|
819
|
+
})
|
820
|
+
.define_singleton_method(
|
821
|
+
"_dequantize",
|
822
|
+
*[](const Tensor &self) {
|
823
|
+
return torch::dequantize(self);
|
824
|
+
})
|
825
|
+
.define_singleton_method(
|
826
|
+
"_det",
|
827
|
+
*[](const Tensor &self) {
|
828
|
+
return torch::det(self);
|
829
|
+
})
|
830
|
+
.define_singleton_method(
|
831
|
+
"_detach",
|
832
|
+
*[](const Tensor &self) {
|
833
|
+
return torch::detach(self);
|
834
|
+
})
|
835
|
+
.define_singleton_method(
|
836
|
+
"_detach_",
|
837
|
+
*[](Tensor &self) {
|
838
|
+
return torch::detach_(self);
|
839
|
+
})
|
840
|
+
.define_singleton_method(
|
841
|
+
"_diag",
|
842
|
+
*[](const Tensor &self, int64_t diagonal) {
|
843
|
+
return torch::diag(self, diagonal);
|
844
|
+
})
|
845
|
+
.define_singleton_method(
|
846
|
+
"_diag_embed",
|
847
|
+
*[](const Tensor &self, int64_t offset, int64_t dim1, int64_t dim2) {
|
848
|
+
return torch::diag_embed(self, offset, dim1, dim2);
|
849
|
+
})
|
850
|
+
.define_singleton_method(
|
851
|
+
"_diag_out",
|
852
|
+
*[](const Tensor &self, int64_t diagonal, Tensor &out) {
|
853
|
+
return torch::diag_out(out, self, diagonal);
|
854
|
+
})
|
855
|
+
.define_singleton_method(
|
856
|
+
"_diagflat",
|
857
|
+
*[](const Tensor &self, int64_t offset) {
|
858
|
+
return torch::diagflat(self, offset);
|
859
|
+
})
|
860
|
+
.define_singleton_method(
|
861
|
+
"_diagonal",
|
862
|
+
*[](Tensor &self, int64_t offset, int64_t dim1, int64_t dim2) {
|
863
|
+
return torch::diagonal(self, offset, dim1, dim2);
|
864
|
+
})
|
865
|
+
.define_singleton_method(
|
866
|
+
"_digamma",
|
867
|
+
*[](const Tensor &self) {
|
868
|
+
return torch::digamma(self);
|
869
|
+
})
|
870
|
+
.define_singleton_method(
|
871
|
+
"_digamma_out",
|
872
|
+
*[](const Tensor &self, Tensor &out) {
|
873
|
+
return torch::digamma_out(out, self);
|
874
|
+
})
|
875
|
+
.define_singleton_method(
|
876
|
+
"_dist",
|
877
|
+
*[](const Tensor &self, const Tensor &other, Scalar p) {
|
878
|
+
return torch::dist(self, other, p);
|
879
|
+
})
|
880
|
+
.define_singleton_method(
|
881
|
+
"_div_out",
|
882
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
883
|
+
return torch::div_out(out, self, other);
|
884
|
+
})
|
885
|
+
.define_singleton_method(
|
886
|
+
"_div_scalar",
|
887
|
+
*[](const Tensor &self, Scalar other) {
|
888
|
+
return torch::div(self, other);
|
889
|
+
})
|
890
|
+
.define_singleton_method(
|
891
|
+
"_div_tensor",
|
892
|
+
*[](const Tensor &self, const Tensor &other) {
|
893
|
+
return torch::div(self, other);
|
894
|
+
})
|
895
|
+
.define_singleton_method(
|
896
|
+
"_dot",
|
897
|
+
*[](const Tensor &self, const Tensor &tensor) {
|
898
|
+
return torch::dot(self, tensor);
|
899
|
+
})
|
900
|
+
.define_singleton_method(
|
901
|
+
"_dot_out",
|
902
|
+
*[](const Tensor &self, const Tensor &tensor, Tensor &out) {
|
903
|
+
return torch::dot_out(out, self, tensor);
|
904
|
+
})
|
905
|
+
.define_singleton_method(
|
906
|
+
"_dropout",
|
907
|
+
*[](const Tensor &input, double p, bool train) {
|
908
|
+
return torch::dropout(input, p, train);
|
909
|
+
})
|
910
|
+
.define_singleton_method(
|
911
|
+
"_dropout_",
|
912
|
+
*[](Tensor &self, double p, bool train) {
|
913
|
+
return torch::dropout_(self, p, train);
|
914
|
+
})
|
915
|
+
.define_singleton_method(
|
916
|
+
"_eig",
|
917
|
+
*[](const Tensor &self, bool eigenvectors) {
|
918
|
+
return wrap(torch::eig(self, eigenvectors));
|
919
|
+
})
|
920
|
+
.define_singleton_method(
|
921
|
+
"_eig_e",
|
922
|
+
*[](const Tensor &self, bool eigenvectors, Tensor &e, Tensor &v) {
|
923
|
+
return wrap(torch::eig_out(e, v, self, eigenvectors));
|
924
|
+
})
|
925
|
+
.define_singleton_method(
|
926
|
+
"_einsum",
|
927
|
+
*[](std::string equation, TensorList tensors) {
|
928
|
+
return torch::einsum(equation, tensors);
|
929
|
+
})
|
930
|
+
.define_singleton_method(
|
931
|
+
"_embedding",
|
932
|
+
*[](const Tensor &weight, const Tensor &indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) {
|
933
|
+
return torch::embedding(weight, indices, padding_idx, scale_grad_by_freq, sparse);
|
934
|
+
})
|
935
|
+
.define_singleton_method(
|
936
|
+
"_embedding_bag",
|
937
|
+
*[](const Tensor &weight, const Tensor &indices, const Tensor &offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, OptionalTensor per_sample_weights, bool include_last_offset) {
|
938
|
+
return wrap(torch::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset));
|
939
|
+
})
|
940
|
+
.define_singleton_method(
|
941
|
+
"_embedding_renorm_",
|
942
|
+
*[](Tensor &self, const Tensor &indices, double max_norm, double norm_type) {
|
943
|
+
return torch::embedding_renorm_(self, indices, max_norm, norm_type);
|
944
|
+
})
|
945
|
+
.define_singleton_method(
|
946
|
+
"_empty_out",
|
947
|
+
*[](IntArrayRef size, Tensor &out) {
|
948
|
+
return torch::empty_out(out, size);
|
949
|
+
})
|
950
|
+
.define_singleton_method(
|
951
|
+
"_eq_scalar",
|
952
|
+
*[](const Tensor &self, Scalar other) {
|
953
|
+
return torch::eq(self, other);
|
954
|
+
})
|
955
|
+
.define_singleton_method(
|
956
|
+
"_eq_scalar_out",
|
957
|
+
*[](const Tensor &self, Scalar other, Tensor &out) {
|
958
|
+
return torch::eq_out(out, self, other);
|
959
|
+
})
|
960
|
+
.define_singleton_method(
|
961
|
+
"_eq_tensor",
|
962
|
+
*[](const Tensor &self, const Tensor &other) {
|
963
|
+
return torch::eq(self, other);
|
964
|
+
})
|
965
|
+
.define_singleton_method(
|
966
|
+
"_eq_tensor_out",
|
967
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
968
|
+
return torch::eq_out(out, self, other);
|
969
|
+
})
|
970
|
+
.define_singleton_method(
|
971
|
+
"_equal",
|
972
|
+
*[](const Tensor &self, const Tensor &other) {
|
973
|
+
return torch::equal(self, other);
|
974
|
+
})
|
975
|
+
.define_singleton_method(
|
976
|
+
"_erf",
|
977
|
+
*[](const Tensor &self) {
|
978
|
+
return torch::erf(self);
|
979
|
+
})
|
980
|
+
.define_singleton_method(
|
981
|
+
"_erf_",
|
982
|
+
*[](Tensor &self) {
|
983
|
+
return torch::erf_(self);
|
984
|
+
})
|
985
|
+
.define_singleton_method(
|
986
|
+
"_erf_out",
|
987
|
+
*[](const Tensor &self, Tensor &out) {
|
988
|
+
return torch::erf_out(out, self);
|
989
|
+
})
|
990
|
+
.define_singleton_method(
|
991
|
+
"_erfc",
|
992
|
+
*[](const Tensor &self) {
|
993
|
+
return torch::erfc(self);
|
994
|
+
})
|
995
|
+
.define_singleton_method(
|
996
|
+
"_erfc_",
|
997
|
+
*[](Tensor &self) {
|
998
|
+
return torch::erfc_(self);
|
999
|
+
})
|
1000
|
+
.define_singleton_method(
|
1001
|
+
"_erfc_out",
|
1002
|
+
*[](const Tensor &self, Tensor &out) {
|
1003
|
+
return torch::erfc_out(out, self);
|
1004
|
+
})
|
1005
|
+
.define_singleton_method(
|
1006
|
+
"_erfinv",
|
1007
|
+
*[](const Tensor &self) {
|
1008
|
+
return torch::erfinv(self);
|
1009
|
+
})
|
1010
|
+
.define_singleton_method(
|
1011
|
+
"_erfinv_out",
|
1012
|
+
*[](const Tensor &self, Tensor &out) {
|
1013
|
+
return torch::erfinv_out(out, self);
|
1014
|
+
})
|
1015
|
+
.define_singleton_method(
|
1016
|
+
"_exp",
|
1017
|
+
*[](const Tensor &self) {
|
1018
|
+
return torch::exp(self);
|
1019
|
+
})
|
1020
|
+
.define_singleton_method(
|
1021
|
+
"_exp_",
|
1022
|
+
*[](Tensor &self) {
|
1023
|
+
return torch::exp_(self);
|
1024
|
+
})
|
1025
|
+
.define_singleton_method(
|
1026
|
+
"_exp_out",
|
1027
|
+
*[](const Tensor &self, Tensor &out) {
|
1028
|
+
return torch::exp_out(out, self);
|
1029
|
+
})
|
1030
|
+
.define_singleton_method(
|
1031
|
+
"_expm1",
|
1032
|
+
*[](const Tensor &self) {
|
1033
|
+
return torch::expm1(self);
|
1034
|
+
})
|
1035
|
+
.define_singleton_method(
|
1036
|
+
"_expm1_",
|
1037
|
+
*[](Tensor &self) {
|
1038
|
+
return torch::expm1_(self);
|
1039
|
+
})
|
1040
|
+
.define_singleton_method(
|
1041
|
+
"_expm1_out",
|
1042
|
+
*[](const Tensor &self, Tensor &out) {
|
1043
|
+
return torch::expm1_out(out, self);
|
1044
|
+
})
|
1045
|
+
.define_singleton_method(
|
1046
|
+
"_eye_m_out",
|
1047
|
+
*[](int64_t n, int64_t m, Tensor &out) {
|
1048
|
+
return torch::eye_out(out, n, m);
|
1049
|
+
})
|
1050
|
+
.define_singleton_method(
|
1051
|
+
"_eye_out",
|
1052
|
+
*[](int64_t n, Tensor &out) {
|
1053
|
+
return torch::eye_out(out, n);
|
1054
|
+
})
|
1055
|
+
.define_singleton_method(
|
1056
|
+
"_fake_quantize_per_channel_affine",
|
1057
|
+
*[](const Tensor &self, const Tensor &scale, const Tensor &zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
|
1058
|
+
return torch::fake_quantize_per_channel_affine(self, scale, zero_point, axis, quant_min, quant_max);
|
1059
|
+
})
|
1060
|
+
.define_singleton_method(
|
1061
|
+
"_fake_quantize_per_tensor_affine",
|
1062
|
+
*[](const Tensor &self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
|
1063
|
+
return torch::fake_quantize_per_tensor_affine(self, scale, zero_point, quant_min, quant_max);
|
1064
|
+
})
|
1065
|
+
.define_singleton_method(
|
1066
|
+
"_fbgemm_linear_fp16_weight",
|
1067
|
+
*[](const Tensor &input, const Tensor &packed_weight, const Tensor &bias) {
|
1068
|
+
return torch::fbgemm_linear_fp16_weight(input, packed_weight, bias);
|
1069
|
+
})
|
1070
|
+
.define_singleton_method(
|
1071
|
+
"_fbgemm_linear_fp16_weight_fp32_activation",
|
1072
|
+
*[](const Tensor &input, const Tensor &packed_weight, const Tensor &bias) {
|
1073
|
+
return torch::fbgemm_linear_fp16_weight_fp32_activation(input, packed_weight, bias);
|
1074
|
+
})
|
1075
|
+
.define_singleton_method(
|
1076
|
+
"_fbgemm_linear_int8_weight",
|
1077
|
+
*[](const Tensor &input, const Tensor &weight, const Tensor &packed, const Tensor &col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor &bias) {
|
1078
|
+
return torch::fbgemm_linear_int8_weight(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
|
1079
|
+
})
|
1080
|
+
.define_singleton_method(
|
1081
|
+
"_fbgemm_linear_int8_weight_fp32_activation",
|
1082
|
+
*[](const Tensor &input, const Tensor &weight, const Tensor &packed, const Tensor &col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor &bias) {
|
1083
|
+
return torch::fbgemm_linear_int8_weight_fp32_activation(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
|
1084
|
+
})
|
1085
|
+
.define_singleton_method(
|
1086
|
+
"_fbgemm_linear_quantize_weight",
|
1087
|
+
*[](const Tensor &input) {
|
1088
|
+
return wrap(torch::fbgemm_linear_quantize_weight(input));
|
1089
|
+
})
|
1090
|
+
.define_singleton_method(
|
1091
|
+
"_fbgemm_pack_gemm_matrix_fp16",
|
1092
|
+
*[](const Tensor &input) {
|
1093
|
+
return torch::fbgemm_pack_gemm_matrix_fp16(input);
|
1094
|
+
})
|
1095
|
+
.define_singleton_method(
|
1096
|
+
"_fbgemm_pack_quantized_matrix",
|
1097
|
+
*[](const Tensor &input) {
|
1098
|
+
return torch::fbgemm_pack_quantized_matrix(input);
|
1099
|
+
})
|
1100
|
+
.define_singleton_method(
|
1101
|
+
"_fbgemm_pack_quantized_matrix_kn",
|
1102
|
+
*[](const Tensor &input, int64_t K, int64_t N) {
|
1103
|
+
return torch::fbgemm_pack_quantized_matrix(input, K, N);
|
1104
|
+
})
|
1105
|
+
.define_singleton_method(
|
1106
|
+
"_feature_alpha_dropout",
|
1107
|
+
*[](const Tensor &input, double p, bool train) {
|
1108
|
+
return torch::feature_alpha_dropout(input, p, train);
|
1109
|
+
})
|
1110
|
+
.define_singleton_method(
|
1111
|
+
"_feature_alpha_dropout_",
|
1112
|
+
*[](Tensor &self, double p, bool train) {
|
1113
|
+
return torch::feature_alpha_dropout_(self, p, train);
|
1114
|
+
})
|
1115
|
+
.define_singleton_method(
|
1116
|
+
"_feature_dropout",
|
1117
|
+
*[](const Tensor &input, double p, bool train) {
|
1118
|
+
return torch::feature_dropout(input, p, train);
|
1119
|
+
})
|
1120
|
+
.define_singleton_method(
|
1121
|
+
"_feature_dropout_",
|
1122
|
+
*[](Tensor &self, double p, bool train) {
|
1123
|
+
return torch::feature_dropout_(self, p, train);
|
1124
|
+
})
|
1125
|
+
.define_singleton_method(
|
1126
|
+
"_fft",
|
1127
|
+
*[](const Tensor &self, int64_t signal_ndim, bool normalized) {
|
1128
|
+
return torch::fft(self, signal_ndim, normalized);
|
1129
|
+
})
|
1130
|
+
.define_singleton_method(
|
1131
|
+
"_fill__scalar",
|
1132
|
+
*[](Tensor &self, Scalar value) {
|
1133
|
+
return torch::fill_(self, value);
|
1134
|
+
})
|
1135
|
+
.define_singleton_method(
|
1136
|
+
"_fill__tensor",
|
1137
|
+
*[](Tensor &self, const Tensor &value) {
|
1138
|
+
return torch::fill_(self, value);
|
1139
|
+
})
|
1140
|
+
.define_singleton_method(
|
1141
|
+
"_flatten_using_ints",
|
1142
|
+
*[](const Tensor &self, int64_t start_dim, int64_t end_dim) {
|
1143
|
+
return torch::flatten(self, start_dim, end_dim);
|
1144
|
+
})
|
1145
|
+
.define_singleton_method(
|
1146
|
+
"_flip",
|
1147
|
+
*[](const Tensor &self, IntArrayRef dims) {
|
1148
|
+
return torch::flip(self, dims);
|
1149
|
+
})
|
1150
|
+
.define_singleton_method(
|
1151
|
+
"_floor",
|
1152
|
+
*[](const Tensor &self) {
|
1153
|
+
return torch::floor(self);
|
1154
|
+
})
|
1155
|
+
.define_singleton_method(
|
1156
|
+
"_floor_",
|
1157
|
+
*[](Tensor &self) {
|
1158
|
+
return torch::floor_(self);
|
1159
|
+
})
|
1160
|
+
.define_singleton_method(
|
1161
|
+
"_floor_divide",
|
1162
|
+
*[](const Tensor &self, const Tensor &other) {
|
1163
|
+
return torch::floor_divide(self, other);
|
1164
|
+
})
|
1165
|
+
.define_singleton_method(
|
1166
|
+
"_floor_divide_out",
|
1167
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
1168
|
+
return torch::floor_divide_out(out, self, other);
|
1169
|
+
})
|
1170
|
+
.define_singleton_method(
|
1171
|
+
"_floor_divide_scalar",
|
1172
|
+
*[](const Tensor &self, Scalar other) {
|
1173
|
+
return torch::floor_divide(self, other);
|
1174
|
+
})
|
1175
|
+
.define_singleton_method(
|
1176
|
+
"_floor_out",
|
1177
|
+
*[](const Tensor &self, Tensor &out) {
|
1178
|
+
return torch::floor_out(out, self);
|
1179
|
+
})
|
1180
|
+
.define_singleton_method(
|
1181
|
+
"_fmod_scalar",
|
1182
|
+
*[](const Tensor &self, Scalar other) {
|
1183
|
+
return torch::fmod(self, other);
|
1184
|
+
})
|
1185
|
+
.define_singleton_method(
|
1186
|
+
"_fmod_scalar_out",
|
1187
|
+
*[](const Tensor &self, Scalar other, Tensor &out) {
|
1188
|
+
return torch::fmod_out(out, self, other);
|
1189
|
+
})
|
1190
|
+
.define_singleton_method(
|
1191
|
+
"_fmod_tensor",
|
1192
|
+
*[](const Tensor &self, const Tensor &other) {
|
1193
|
+
return torch::fmod(self, other);
|
1194
|
+
})
|
1195
|
+
.define_singleton_method(
|
1196
|
+
"_fmod_tensor_out",
|
1197
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
1198
|
+
return torch::fmod_out(out, self, other);
|
1199
|
+
})
|
1200
|
+
.define_singleton_method(
|
1201
|
+
"_frac",
|
1202
|
+
*[](const Tensor &self) {
|
1203
|
+
return torch::frac(self);
|
1204
|
+
})
|
1205
|
+
.define_singleton_method(
|
1206
|
+
"_frac_",
|
1207
|
+
*[](Tensor &self) {
|
1208
|
+
return torch::frac_(self);
|
1209
|
+
})
|
1210
|
+
.define_singleton_method(
|
1211
|
+
"_frac_out",
|
1212
|
+
*[](const Tensor &self, Tensor &out) {
|
1213
|
+
return torch::frac_out(out, self);
|
1214
|
+
})
|
1215
|
+
.define_singleton_method(
|
1216
|
+
"_frobenius_norm",
|
1217
|
+
*[](const Tensor &self) {
|
1218
|
+
return torch::frobenius_norm(self);
|
1219
|
+
})
|
1220
|
+
.define_singleton_method(
|
1221
|
+
"_frobenius_norm_dim",
|
1222
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim) {
|
1223
|
+
return torch::frobenius_norm(self, dim, keepdim);
|
1224
|
+
})
|
1225
|
+
.define_singleton_method(
|
1226
|
+
"_frobenius_norm_out",
|
1227
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim, Tensor &out) {
|
1228
|
+
return torch::frobenius_norm_out(out, self, dim, keepdim);
|
1229
|
+
})
|
1230
|
+
.define_singleton_method(
|
1231
|
+
"_full_out",
|
1232
|
+
*[](IntArrayRef size, Scalar fill_value, Tensor &out) {
|
1233
|
+
return torch::full_out(out, size, fill_value);
|
1234
|
+
})
|
1235
|
+
.define_singleton_method(
|
1236
|
+
"_gather",
|
1237
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, bool sparse_grad) {
|
1238
|
+
return torch::gather(self, dim, index, sparse_grad);
|
1239
|
+
})
|
1240
|
+
.define_singleton_method(
|
1241
|
+
"_gather_out",
|
1242
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, bool sparse_grad, Tensor &out) {
|
1243
|
+
return torch::gather_out(out, self, dim, index, sparse_grad);
|
1244
|
+
})
|
1245
|
+
.define_singleton_method(
|
1246
|
+
"_ge_scalar",
|
1247
|
+
*[](const Tensor &self, Scalar other) {
|
1248
|
+
return torch::ge(self, other);
|
1249
|
+
})
|
1250
|
+
.define_singleton_method(
|
1251
|
+
"_ge_scalar_out",
|
1252
|
+
*[](const Tensor &self, Scalar other, Tensor &out) {
|
1253
|
+
return torch::ge_out(out, self, other);
|
1254
|
+
})
|
1255
|
+
.define_singleton_method(
|
1256
|
+
"_ge_tensor",
|
1257
|
+
*[](const Tensor &self, const Tensor &other) {
|
1258
|
+
return torch::ge(self, other);
|
1259
|
+
})
|
1260
|
+
.define_singleton_method(
|
1261
|
+
"_ge_tensor_out",
|
1262
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
1263
|
+
return torch::ge_out(out, self, other);
|
1264
|
+
})
|
1265
|
+
.define_singleton_method(
|
1266
|
+
"_geqrf",
|
1267
|
+
*[](const Tensor &self) {
|
1268
|
+
return wrap(torch::geqrf(self));
|
1269
|
+
})
|
1270
|
+
.define_singleton_method(
|
1271
|
+
"_geqrf_a",
|
1272
|
+
*[](const Tensor &self, Tensor &a, Tensor &tau) {
|
1273
|
+
return wrap(torch::geqrf_out(a, tau, self));
|
1274
|
+
})
|
1275
|
+
.define_singleton_method(
|
1276
|
+
"_ger",
|
1277
|
+
*[](const Tensor &self, const Tensor &vec2) {
|
1278
|
+
return torch::ger(self, vec2);
|
1279
|
+
})
|
1280
|
+
.define_singleton_method(
|
1281
|
+
"_ger_out",
|
1282
|
+
*[](const Tensor &self, const Tensor &vec2, Tensor &out) {
|
1283
|
+
return torch::ger_out(out, self, vec2);
|
1284
|
+
})
|
1285
|
+
.define_singleton_method(
|
1286
|
+
"_grid_sampler",
|
1287
|
+
*[](const Tensor &input, const Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
|
1288
|
+
return torch::grid_sampler(input, grid, interpolation_mode, padding_mode, align_corners);
|
1289
|
+
})
|
1290
|
+
.define_singleton_method(
|
1291
|
+
"_grid_sampler_2d",
|
1292
|
+
*[](const Tensor &input, const Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
|
1293
|
+
return torch::grid_sampler_2d(input, grid, interpolation_mode, padding_mode, align_corners);
|
1294
|
+
})
|
1295
|
+
.define_singleton_method(
|
1296
|
+
"_grid_sampler_3d",
|
1297
|
+
*[](const Tensor &input, const Tensor &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
|
1298
|
+
return torch::grid_sampler_3d(input, grid, interpolation_mode, padding_mode, align_corners);
|
1299
|
+
})
|
1300
|
+
.define_singleton_method(
|
1301
|
+
"_group_norm",
|
1302
|
+
*[](const Tensor &input, int64_t num_groups, OptionalTensor weight, OptionalTensor bias, double eps, bool cudnn_enabled) {
|
1303
|
+
return torch::group_norm(input, num_groups, weight, bias, eps, cudnn_enabled);
|
1304
|
+
})
|
1305
|
+
.define_singleton_method(
|
1306
|
+
"_gru_cell",
|
1307
|
+
*[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, OptionalTensor b_ih, OptionalTensor b_hh) {
|
1308
|
+
return torch::gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
|
1309
|
+
})
|
1310
|
+
.define_singleton_method(
|
1311
|
+
"_gru_data",
|
1312
|
+
*[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
1313
|
+
return wrap(torch::gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
|
1314
|
+
})
|
1315
|
+
.define_singleton_method(
|
1316
|
+
"_gru_input",
|
1317
|
+
*[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
1318
|
+
return wrap(torch::gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
|
1319
|
+
})
|
1320
|
+
.define_singleton_method(
|
1321
|
+
"_gt_scalar",
|
1322
|
+
*[](const Tensor &self, Scalar other) {
|
1323
|
+
return torch::gt(self, other);
|
1324
|
+
})
|
1325
|
+
.define_singleton_method(
|
1326
|
+
"_gt_scalar_out",
|
1327
|
+
*[](const Tensor &self, Scalar other, Tensor &out) {
|
1328
|
+
return torch::gt_out(out, self, other);
|
1329
|
+
})
|
1330
|
+
.define_singleton_method(
|
1331
|
+
"_gt_tensor",
|
1332
|
+
*[](const Tensor &self, const Tensor &other) {
|
1333
|
+
return torch::gt(self, other);
|
1334
|
+
})
|
1335
|
+
.define_singleton_method(
|
1336
|
+
"_gt_tensor_out",
|
1337
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
1338
|
+
return torch::gt_out(out, self, other);
|
1339
|
+
})
|
1340
|
+
.define_singleton_method(
|
1341
|
+
"_hardshrink",
|
1342
|
+
*[](const Tensor &self, Scalar lambd) {
|
1343
|
+
return torch::hardshrink(self, lambd);
|
1344
|
+
})
|
1345
|
+
.define_singleton_method(
|
1346
|
+
"_hinge_embedding_loss",
|
1347
|
+
*[](const Tensor &self, const Tensor &target, double margin, MyReduction reduction) {
|
1348
|
+
return torch::hinge_embedding_loss(self, target, margin, reduction);
|
1349
|
+
})
|
1350
|
+
.define_singleton_method(
|
1351
|
+
"_histc",
|
1352
|
+
*[](const Tensor &self, int64_t bins, Scalar min, Scalar max) {
|
1353
|
+
return torch::histc(self, bins, min, max);
|
1354
|
+
})
|
1355
|
+
.define_singleton_method(
|
1356
|
+
"_histc_out",
|
1357
|
+
*[](const Tensor &self, int64_t bins, Scalar min, Scalar max, Tensor &out) {
|
1358
|
+
return torch::histc_out(out, self, bins, min, max);
|
1359
|
+
})
|
1360
|
+
.define_singleton_method(
|
1361
|
+
"_hspmm",
|
1362
|
+
*[](const Tensor &mat1, const Tensor &mat2) {
|
1363
|
+
return torch::hspmm(mat1, mat2);
|
1364
|
+
})
|
1365
|
+
.define_singleton_method(
|
1366
|
+
"_hspmm_out",
|
1367
|
+
*[](const Tensor &mat1, const Tensor &mat2, Tensor &out) {
|
1368
|
+
return torch::hspmm_out(out, mat1, mat2);
|
1369
|
+
})
|
1370
|
+
.define_singleton_method(
|
1371
|
+
"_ifft",
|
1372
|
+
*[](const Tensor &self, int64_t signal_ndim, bool normalized) {
|
1373
|
+
return torch::ifft(self, signal_ndim, normalized);
|
1374
|
+
})
|
1375
|
+
.define_singleton_method(
|
1376
|
+
"_imag",
|
1377
|
+
*[](const Tensor &self) {
|
1378
|
+
return torch::imag(self);
|
1379
|
+
})
|
1380
|
+
.define_singleton_method(
|
1381
|
+
"_index_add",
|
1382
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &source) {
|
1383
|
+
return torch::index_add(self, dim, index, source);
|
1384
|
+
})
|
1385
|
+
.define_singleton_method(
|
1386
|
+
"_index_copy",
|
1387
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &source) {
|
1388
|
+
return torch::index_copy(self, dim, index, source);
|
1389
|
+
})
|
1390
|
+
.define_singleton_method(
|
1391
|
+
"_index_fill_int_scalar",
|
1392
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, Scalar value) {
|
1393
|
+
return torch::index_fill(self, dim, index, value);
|
1394
|
+
})
|
1395
|
+
.define_singleton_method(
|
1396
|
+
"_index_fill_int_tensor",
|
1397
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &value) {
|
1398
|
+
return torch::index_fill(self, dim, index, value);
|
1399
|
+
})
|
1400
|
+
.define_singleton_method(
|
1401
|
+
"_index_select",
|
1402
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index) {
|
1403
|
+
return torch::index_select(self, dim, index);
|
1404
|
+
})
|
1405
|
+
.define_singleton_method(
|
1406
|
+
"_index_select_out",
|
1407
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, Tensor &out) {
|
1408
|
+
return torch::index_select_out(out, self, dim, index);
|
1409
|
+
})
|
1410
|
+
.define_singleton_method(
|
1411
|
+
"_instance_norm",
|
1412
|
+
*[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
|
1413
|
+
return torch::instance_norm(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
|
1414
|
+
})
|
1415
|
+
.define_singleton_method(
|
1416
|
+
"_int_repr",
|
1417
|
+
*[](const Tensor &self) {
|
1418
|
+
return torch::int_repr(self);
|
1419
|
+
})
|
1420
|
+
.define_singleton_method(
|
1421
|
+
"_inverse",
|
1422
|
+
*[](const Tensor &self) {
|
1423
|
+
return torch::inverse(self);
|
1424
|
+
})
|
1425
|
+
.define_singleton_method(
|
1426
|
+
"_inverse_out",
|
1427
|
+
*[](const Tensor &self, Tensor &out) {
|
1428
|
+
return torch::inverse_out(out, self);
|
1429
|
+
})
|
1430
|
+
.define_singleton_method(
|
1431
|
+
"_irfft",
|
1432
|
+
*[](const Tensor &self, int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes) {
|
1433
|
+
return torch::irfft(self, signal_ndim, normalized, onesided, signal_sizes);
|
1434
|
+
})
|
1435
|
+
.define_singleton_method(
|
1436
|
+
"_is_complex",
|
1437
|
+
*[](const Tensor &self) {
|
1438
|
+
return torch::is_complex(self);
|
1439
|
+
})
|
1440
|
+
.define_singleton_method(
|
1441
|
+
"_is_distributed",
|
1442
|
+
*[](const Tensor &self) {
|
1443
|
+
return torch::is_distributed(self);
|
1444
|
+
})
|
1445
|
+
.define_singleton_method(
|
1446
|
+
"_is_floating_point",
|
1447
|
+
*[](const Tensor &self) {
|
1448
|
+
return torch::is_floating_point(self);
|
1449
|
+
})
|
1450
|
+
.define_singleton_method(
|
1451
|
+
"_is_nonzero",
|
1452
|
+
*[](const Tensor &self) {
|
1453
|
+
return torch::is_nonzero(self);
|
1454
|
+
})
|
1455
|
+
.define_singleton_method(
|
1456
|
+
"_is_same_size",
|
1457
|
+
*[](const Tensor &self, const Tensor &other) {
|
1458
|
+
return torch::is_same_size(self, other);
|
1459
|
+
})
|
1460
|
+
.define_singleton_method(
|
1461
|
+
"_is_signed",
|
1462
|
+
*[](const Tensor &self) {
|
1463
|
+
return torch::is_signed(self);
|
1464
|
+
})
|
1465
|
+
.define_singleton_method(
|
1466
|
+
"_isclose",
|
1467
|
+
*[](const Tensor &self, const Tensor &other, double rtol, double atol, bool equal_nan) {
|
1468
|
+
return torch::isclose(self, other, rtol, atol, equal_nan);
|
1469
|
+
})
|
1470
|
+
.define_singleton_method(
|
1471
|
+
"_isfinite",
|
1472
|
+
*[](const Tensor &self) {
|
1473
|
+
return torch::isfinite(self);
|
1474
|
+
})
|
1475
|
+
.define_singleton_method(
|
1476
|
+
"_isinf",
|
1477
|
+
*[](const Tensor &self) {
|
1478
|
+
return torch::isinf(self);
|
1479
|
+
})
|
1480
|
+
.define_singleton_method(
|
1481
|
+
"_isnan",
|
1482
|
+
*[](const Tensor &self) {
|
1483
|
+
return torch::isnan(self);
|
1484
|
+
})
|
1485
|
+
.define_singleton_method(
|
1486
|
+
"_kl_div",
|
1487
|
+
*[](const Tensor &self, const Tensor &target, MyReduction reduction) {
|
1488
|
+
return torch::kl_div(self, target, reduction);
|
1489
|
+
})
|
1490
|
+
.define_singleton_method(
|
1491
|
+
"_kthvalue",
|
1492
|
+
*[](const Tensor &self, int64_t k, int64_t dim, bool keepdim) {
|
1493
|
+
return wrap(torch::kthvalue(self, k, dim, keepdim));
|
1494
|
+
})
|
1495
|
+
.define_singleton_method(
|
1496
|
+
"_kthvalue_values",
|
1497
|
+
*[](const Tensor &self, int64_t k, int64_t dim, bool keepdim, Tensor &values, Tensor &indices) {
|
1498
|
+
return wrap(torch::kthvalue_out(values, indices, self, k, dim, keepdim));
|
1499
|
+
})
|
1500
|
+
.define_singleton_method(
|
1501
|
+
"_layer_norm",
|
1502
|
+
*[](const Tensor &input, IntArrayRef normalized_shape, OptionalTensor weight, OptionalTensor bias, double eps, bool cudnn_enable) {
|
1503
|
+
return torch::layer_norm(input, normalized_shape, weight, bias, eps, cudnn_enable);
|
1504
|
+
})
|
1505
|
+
.define_singleton_method(
|
1506
|
+
"_le_scalar",
|
1507
|
+
*[](const Tensor &self, Scalar other) {
|
1508
|
+
return torch::le(self, other);
|
1509
|
+
})
|
1510
|
+
.define_singleton_method(
|
1511
|
+
"_le_scalar_out",
|
1512
|
+
*[](const Tensor &self, Scalar other, Tensor &out) {
|
1513
|
+
return torch::le_out(out, self, other);
|
1514
|
+
})
|
1515
|
+
.define_singleton_method(
|
1516
|
+
"_le_tensor",
|
1517
|
+
*[](const Tensor &self, const Tensor &other) {
|
1518
|
+
return torch::le(self, other);
|
1519
|
+
})
|
1520
|
+
.define_singleton_method(
|
1521
|
+
"_le_tensor_out",
|
1522
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
1523
|
+
return torch::le_out(out, self, other);
|
1524
|
+
})
|
1525
|
+
.define_singleton_method(
|
1526
|
+
"_lerp_scalar",
|
1527
|
+
*[](const Tensor &self, const Tensor &end, Scalar weight) {
|
1528
|
+
return torch::lerp(self, end, weight);
|
1529
|
+
})
|
1530
|
+
.define_singleton_method(
|
1531
|
+
"_lerp_scalar_out",
|
1532
|
+
*[](const Tensor &self, const Tensor &end, Scalar weight, Tensor &out) {
|
1533
|
+
return torch::lerp_out(out, self, end, weight);
|
1534
|
+
})
|
1535
|
+
.define_singleton_method(
|
1536
|
+
"_lerp_tensor",
|
1537
|
+
*[](const Tensor &self, const Tensor &end, const Tensor &weight) {
|
1538
|
+
return torch::lerp(self, end, weight);
|
1539
|
+
})
|
1540
|
+
.define_singleton_method(
|
1541
|
+
"_lerp_tensor_out",
|
1542
|
+
*[](const Tensor &self, const Tensor &end, const Tensor &weight, Tensor &out) {
|
1543
|
+
return torch::lerp_out(out, self, end, weight);
|
1544
|
+
})
|
1545
|
+
.define_singleton_method(
|
1546
|
+
"_lgamma",
|
1547
|
+
*[](const Tensor &self) {
|
1548
|
+
return torch::lgamma(self);
|
1549
|
+
})
|
1550
|
+
.define_singleton_method(
|
1551
|
+
"_lgamma_out",
|
1552
|
+
*[](const Tensor &self, Tensor &out) {
|
1553
|
+
return torch::lgamma_out(out, self);
|
1554
|
+
})
|
1555
|
+
.define_singleton_method(
|
1556
|
+
"_linspace_out",
|
1557
|
+
*[](Scalar start, Scalar end, int64_t steps, Tensor &out) {
|
1558
|
+
return torch::linspace_out(out, start, end, steps);
|
1559
|
+
})
|
1560
|
+
.define_singleton_method(
|
1561
|
+
"_log",
|
1562
|
+
*[](const Tensor &self) {
|
1563
|
+
return torch::log(self);
|
1564
|
+
})
|
1565
|
+
.define_singleton_method(
|
1566
|
+
"_log10",
|
1567
|
+
*[](const Tensor &self) {
|
1568
|
+
return torch::log10(self);
|
1569
|
+
})
|
1570
|
+
.define_singleton_method(
|
1571
|
+
"_log10_",
|
1572
|
+
*[](Tensor &self) {
|
1573
|
+
return torch::log10_(self);
|
1574
|
+
})
|
1575
|
+
.define_singleton_method(
|
1576
|
+
"_log10_out",
|
1577
|
+
*[](const Tensor &self, Tensor &out) {
|
1578
|
+
return torch::log10_out(out, self);
|
1579
|
+
})
|
1580
|
+
.define_singleton_method(
|
1581
|
+
"_log1p",
|
1582
|
+
*[](const Tensor &self) {
|
1583
|
+
return torch::log1p(self);
|
1584
|
+
})
|
1585
|
+
.define_singleton_method(
|
1586
|
+
"_log1p_",
|
1587
|
+
*[](Tensor &self) {
|
1588
|
+
return torch::log1p_(self);
|
1589
|
+
})
|
1590
|
+
.define_singleton_method(
|
1591
|
+
"_log1p_out",
|
1592
|
+
*[](const Tensor &self, Tensor &out) {
|
1593
|
+
return torch::log1p_out(out, self);
|
1594
|
+
})
|
1595
|
+
.define_singleton_method(
|
1596
|
+
"_log2",
|
1597
|
+
*[](const Tensor &self) {
|
1598
|
+
return torch::log2(self);
|
1599
|
+
})
|
1600
|
+
.define_singleton_method(
|
1601
|
+
"_log2_",
|
1602
|
+
*[](Tensor &self) {
|
1603
|
+
return torch::log2_(self);
|
1604
|
+
})
|
1605
|
+
.define_singleton_method(
|
1606
|
+
"_log2_out",
|
1607
|
+
*[](const Tensor &self, Tensor &out) {
|
1608
|
+
return torch::log2_out(out, self);
|
1609
|
+
})
|
1610
|
+
.define_singleton_method(
|
1611
|
+
"_log_",
|
1612
|
+
*[](Tensor &self) {
|
1613
|
+
return torch::log_(self);
|
1614
|
+
})
|
1615
|
+
.define_singleton_method(
|
1616
|
+
"_log_out",
|
1617
|
+
*[](const Tensor &self, Tensor &out) {
|
1618
|
+
return torch::log_out(out, self);
|
1619
|
+
})
|
1620
|
+
.define_singleton_method(
|
1621
|
+
"_log_softmax_int",
|
1622
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
|
1623
|
+
return torch::log_softmax(self, dim, dtype);
|
1624
|
+
})
|
1625
|
+
.define_singleton_method(
|
1626
|
+
"_logdet",
|
1627
|
+
*[](const Tensor &self) {
|
1628
|
+
return torch::logdet(self);
|
1629
|
+
})
|
1630
|
+
.define_singleton_method(
|
1631
|
+
"_logical_and",
|
1632
|
+
*[](const Tensor &self, const Tensor &other) {
|
1633
|
+
return torch::logical_and(self, other);
|
1634
|
+
})
|
1635
|
+
.define_singleton_method(
|
1636
|
+
"_logical_and_out",
|
1637
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
1638
|
+
return torch::logical_and_out(out, self, other);
|
1639
|
+
})
|
1640
|
+
.define_singleton_method(
|
1641
|
+
"_logical_not",
|
1642
|
+
*[](const Tensor &self) {
|
1643
|
+
return torch::logical_not(self);
|
1644
|
+
})
|
1645
|
+
.define_singleton_method(
|
1646
|
+
"_logical_not_out",
|
1647
|
+
*[](const Tensor &self, Tensor &out) {
|
1648
|
+
return torch::logical_not_out(out, self);
|
1649
|
+
})
|
1650
|
+
.define_singleton_method(
|
1651
|
+
"_logical_or",
|
1652
|
+
*[](const Tensor &self, const Tensor &other) {
|
1653
|
+
return torch::logical_or(self, other);
|
1654
|
+
})
|
1655
|
+
.define_singleton_method(
|
1656
|
+
"_logical_or_out",
|
1657
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
1658
|
+
return torch::logical_or_out(out, self, other);
|
1659
|
+
})
|
1660
|
+
.define_singleton_method(
|
1661
|
+
"_logical_xor",
|
1662
|
+
*[](const Tensor &self, const Tensor &other) {
|
1663
|
+
return torch::logical_xor(self, other);
|
1664
|
+
})
|
1665
|
+
.define_singleton_method(
|
1666
|
+
"_logical_xor_out",
|
1667
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
1668
|
+
return torch::logical_xor_out(out, self, other);
|
1669
|
+
})
|
1670
|
+
.define_singleton_method(
|
1671
|
+
"_logspace_out",
|
1672
|
+
*[](Scalar start, Scalar end, int64_t steps, double base, Tensor &out) {
|
1673
|
+
return torch::logspace_out(out, start, end, steps, base);
|
1674
|
+
})
|
1675
|
+
.define_singleton_method(
|
1676
|
+
"_logsumexp",
|
1677
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim) {
|
1678
|
+
return torch::logsumexp(self, dim, keepdim);
|
1679
|
+
})
|
1680
|
+
.define_singleton_method(
|
1681
|
+
"_logsumexp_out",
|
1682
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim, Tensor &out) {
|
1683
|
+
return torch::logsumexp_out(out, self, dim, keepdim);
|
1684
|
+
})
|
1685
|
+
.define_singleton_method(
|
1686
|
+
"_lstm_cell",
|
1687
|
+
*[](const Tensor &input, TensorList hx, const Tensor &w_ih, const Tensor &w_hh, OptionalTensor b_ih, OptionalTensor b_hh) {
|
1688
|
+
return wrap(torch::lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh));
|
1689
|
+
})
|
1690
|
+
.define_singleton_method(
|
1691
|
+
"_lstm_data",
|
1692
|
+
*[](const Tensor &data, const Tensor &batch_sizes, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
1693
|
+
return wrap(torch::lstm(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
|
1694
|
+
})
|
1695
|
+
.define_singleton_method(
|
1696
|
+
"_lstm_input",
|
1697
|
+
*[](const Tensor &input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
1698
|
+
return wrap(torch::lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
|
1699
|
+
})
|
1700
|
+
.define_singleton_method(
|
1701
|
+
"_lstsq",
|
1702
|
+
*[](const Tensor &self, const Tensor &A) {
|
1703
|
+
return wrap(torch::lstsq(self, A));
|
1704
|
+
})
|
1705
|
+
.define_singleton_method(
|
1706
|
+
"_lstsq_x",
|
1707
|
+
*[](const Tensor &self, const Tensor &A, Tensor &X, Tensor &qr) {
|
1708
|
+
return wrap(torch::lstsq_out(X, qr, self, A));
|
1709
|
+
})
|
1710
|
+
.define_singleton_method(
|
1711
|
+
"_lt_scalar",
|
1712
|
+
*[](const Tensor &self, Scalar other) {
|
1713
|
+
return torch::lt(self, other);
|
1714
|
+
})
|
1715
|
+
.define_singleton_method(
|
1716
|
+
"_lt_scalar_out",
|
1717
|
+
*[](const Tensor &self, Scalar other, Tensor &out) {
|
1718
|
+
return torch::lt_out(out, self, other);
|
1719
|
+
})
|
1720
|
+
.define_singleton_method(
|
1721
|
+
"_lt_tensor",
|
1722
|
+
*[](const Tensor &self, const Tensor &other) {
|
1723
|
+
return torch::lt(self, other);
|
1724
|
+
})
|
1725
|
+
.define_singleton_method(
|
1726
|
+
"_lt_tensor_out",
|
1727
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
1728
|
+
return torch::lt_out(out, self, other);
|
1729
|
+
})
|
1730
|
+
.define_singleton_method(
|
1731
|
+
"_lu_solve",
|
1732
|
+
*[](const Tensor &self, const Tensor &LU_data, const Tensor &LU_pivots) {
|
1733
|
+
return torch::lu_solve(self, LU_data, LU_pivots);
|
1734
|
+
})
|
1735
|
+
.define_singleton_method(
|
1736
|
+
"_lu_solve_out",
|
1737
|
+
*[](const Tensor &self, const Tensor &LU_data, const Tensor &LU_pivots, Tensor &out) {
|
1738
|
+
return torch::lu_solve_out(out, self, LU_data, LU_pivots);
|
1739
|
+
})
|
1740
|
+
.define_singleton_method(
|
1741
|
+
"_margin_ranking_loss",
|
1742
|
+
*[](const Tensor &input1, const Tensor &input2, const Tensor &target, double margin, MyReduction reduction) {
|
1743
|
+
return torch::margin_ranking_loss(input1, input2, target, margin, reduction);
|
1744
|
+
})
|
1745
|
+
.define_singleton_method(
|
1746
|
+
"_masked_fill_scalar",
|
1747
|
+
*[](const Tensor &self, const Tensor &mask, Scalar value) {
|
1748
|
+
return torch::masked_fill(self, mask, value);
|
1749
|
+
})
|
1750
|
+
.define_singleton_method(
|
1751
|
+
"_masked_fill_tensor",
|
1752
|
+
*[](const Tensor &self, const Tensor &mask, const Tensor &value) {
|
1753
|
+
return torch::masked_fill(self, mask, value);
|
1754
|
+
})
|
1755
|
+
.define_singleton_method(
|
1756
|
+
"_masked_scatter",
|
1757
|
+
*[](const Tensor &self, const Tensor &mask, const Tensor &source) {
|
1758
|
+
return torch::masked_scatter(self, mask, source);
|
1759
|
+
})
|
1760
|
+
.define_singleton_method(
|
1761
|
+
"_masked_select",
|
1762
|
+
*[](const Tensor &self, const Tensor &mask) {
|
1763
|
+
return torch::masked_select(self, mask);
|
1764
|
+
})
|
1765
|
+
.define_singleton_method(
|
1766
|
+
"_masked_select_out",
|
1767
|
+
*[](const Tensor &self, const Tensor &mask, Tensor &out) {
|
1768
|
+
return torch::masked_select_out(out, self, mask);
|
1769
|
+
})
|
1770
|
+
.define_singleton_method(
|
1771
|
+
"_matmul",
|
1772
|
+
*[](const Tensor &self, const Tensor &other) {
|
1773
|
+
return torch::matmul(self, other);
|
1774
|
+
})
|
1775
|
+
.define_singleton_method(
|
1776
|
+
"_matmul_out",
|
1777
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
1778
|
+
return torch::matmul_out(out, self, other);
|
1779
|
+
})
|
1780
|
+
.define_singleton_method(
|
1781
|
+
"_matrix_power",
|
1782
|
+
*[](const Tensor &self, int64_t n) {
|
1783
|
+
return torch::matrix_power(self, n);
|
1784
|
+
})
|
1785
|
+
.define_singleton_method(
|
1786
|
+
"_matrix_rank",
|
1787
|
+
*[](const Tensor &self, bool symmetric) {
|
1788
|
+
return torch::matrix_rank(self, symmetric);
|
1789
|
+
})
|
1790
|
+
.define_singleton_method(
|
1791
|
+
"_matrix_rank_tol",
|
1792
|
+
*[](const Tensor &self, double tol, bool symmetric) {
|
1793
|
+
return torch::matrix_rank(self, tol, symmetric);
|
1794
|
+
})
|
1795
|
+
.define_singleton_method(
|
1796
|
+
"_max",
|
1797
|
+
*[](const Tensor &self) {
|
1798
|
+
return torch::max(self);
|
1799
|
+
})
|
1800
|
+
.define_singleton_method(
|
1801
|
+
"_max_dim",
|
1802
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1803
|
+
return wrap(torch::max(self, dim, keepdim));
|
1804
|
+
})
|
1805
|
+
.define_singleton_method(
|
1806
|
+
"_max_dim_max",
|
1807
|
+
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &max, Tensor &max_values) {
|
1808
|
+
return wrap(torch::max_out(max, max_values, self, dim, keepdim));
|
1809
|
+
})
|
1810
|
+
.define_singleton_method(
|
1811
|
+
"_max_other",
|
1812
|
+
*[](const Tensor &self, const Tensor &other) {
|
1813
|
+
return torch::max(self, other);
|
1814
|
+
})
|
1815
|
+
.define_singleton_method(
|
1816
|
+
"_max_out",
|
1817
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
1818
|
+
return torch::max_out(out, self, other);
|
1819
|
+
})
|
1820
|
+
.define_singleton_method(
|
1821
|
+
"_max_pool1d",
|
1822
|
+
*[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
|
1823
|
+
return torch::max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode);
|
1824
|
+
})
|
1825
|
+
.define_singleton_method(
|
1826
|
+
"_max_pool1d_with_indices",
|
1827
|
+
*[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
|
1828
|
+
return wrap(torch::max_pool1d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode));
|
1829
|
+
})
|
1830
|
+
.define_singleton_method(
|
1831
|
+
"_max_pool2d",
|
1832
|
+
*[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
|
1833
|
+
return torch::max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode);
|
1834
|
+
})
|
1835
|
+
.define_singleton_method(
|
1836
|
+
"_max_pool3d",
|
1837
|
+
*[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
|
1838
|
+
return torch::max_pool3d(self, kernel_size, stride, padding, dilation, ceil_mode);
|
1839
|
+
})
|
1840
|
+
.define_singleton_method(
|
1841
|
+
"_max_values",
|
1842
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim) {
|
1843
|
+
return torch::max_values(self, dim, keepdim);
|
1844
|
+
})
|
1845
|
+
.define_singleton_method(
|
1846
|
+
"_mean",
|
1847
|
+
*[](const Tensor &self, OptionalScalarType dtype) {
|
1848
|
+
return torch::mean(self, dtype);
|
1849
|
+
})
|
1850
|
+
.define_singleton_method(
|
1851
|
+
"_mean_dim",
|
1852
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype) {
|
1853
|
+
return torch::mean(self, dim, keepdim, dtype);
|
1854
|
+
})
|
1855
|
+
.define_singleton_method(
|
1856
|
+
"_mean_out",
|
1857
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype, Tensor &out) {
|
1858
|
+
return torch::mean_out(out, self, dim, keepdim, dtype);
|
1859
|
+
})
|
1860
|
+
.define_singleton_method(
|
1861
|
+
"_median",
|
1862
|
+
*[](const Tensor &self) {
|
1863
|
+
return torch::median(self);
|
1864
|
+
})
|
1865
|
+
.define_singleton_method(
|
1866
|
+
"_median_dim",
|
1867
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1868
|
+
return wrap(torch::median(self, dim, keepdim));
|
1869
|
+
})
|
1870
|
+
.define_singleton_method(
|
1871
|
+
"_median_dim_values",
|
1872
|
+
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &values, Tensor &indices) {
|
1873
|
+
return wrap(torch::median_out(values, indices, self, dim, keepdim));
|
1874
|
+
})
|
1875
|
+
.define_singleton_method(
|
1876
|
+
"_meshgrid",
|
1877
|
+
*[](TensorList tensors) {
|
1878
|
+
return torch::meshgrid(tensors);
|
1879
|
+
})
|
1880
|
+
.define_singleton_method(
|
1881
|
+
"_min",
|
1882
|
+
*[](const Tensor &self) {
|
1883
|
+
return torch::min(self);
|
1884
|
+
})
|
1885
|
+
.define_singleton_method(
|
1886
|
+
"_min_dim",
|
1887
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1888
|
+
return wrap(torch::min(self, dim, keepdim));
|
1889
|
+
})
|
1890
|
+
.define_singleton_method(
|
1891
|
+
"_min_dim_min",
|
1892
|
+
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &min, Tensor &min_indices) {
|
1893
|
+
return wrap(torch::min_out(min, min_indices, self, dim, keepdim));
|
1894
|
+
})
|
1895
|
+
.define_singleton_method(
|
1896
|
+
"_min_other",
|
1897
|
+
*[](const Tensor &self, const Tensor &other) {
|
1898
|
+
return torch::min(self, other);
|
1899
|
+
})
|
1900
|
+
.define_singleton_method(
|
1901
|
+
"_min_out",
|
1902
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
1903
|
+
return torch::min_out(out, self, other);
|
1904
|
+
})
|
1905
|
+
.define_singleton_method(
|
1906
|
+
"_min_values",
|
1907
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim) {
|
1908
|
+
return torch::min_values(self, dim, keepdim);
|
1909
|
+
})
|
1910
|
+
.define_singleton_method(
|
1911
|
+
"_miopen_batch_norm",
|
1912
|
+
*[](const Tensor &input, const Tensor &weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double exponential_average_factor, double epsilon) {
|
1913
|
+
return wrap(torch::miopen_batch_norm(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon));
|
1914
|
+
})
|
1915
|
+
.define_singleton_method(
|
1916
|
+
"_miopen_convolution",
|
1917
|
+
*[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
1918
|
+
return torch::miopen_convolution(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
|
1919
|
+
})
|
1920
|
+
.define_singleton_method(
|
1921
|
+
"_miopen_convolution_backward_bias",
|
1922
|
+
*[](const Tensor &grad_output) {
|
1923
|
+
return torch::miopen_convolution_backward_bias(grad_output);
|
1924
|
+
})
|
1925
|
+
.define_singleton_method(
|
1926
|
+
"_miopen_convolution_backward_input",
|
1927
|
+
*[](IntArrayRef self_size, const Tensor &grad_output, const Tensor &weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
1928
|
+
return torch::miopen_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic);
|
1929
|
+
})
|
1930
|
+
.define_singleton_method(
|
1931
|
+
"_miopen_convolution_backward_weight",
|
1932
|
+
*[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
1933
|
+
return torch::miopen_convolution_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic);
|
1934
|
+
})
|
1935
|
+
.define_singleton_method(
|
1936
|
+
"_miopen_convolution_transpose",
|
1937
|
+
*[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
1938
|
+
return torch::miopen_convolution_transpose(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
|
1939
|
+
})
|
1940
|
+
.define_singleton_method(
|
1941
|
+
"_miopen_convolution_transpose_backward_input",
|
1942
|
+
*[](const Tensor &grad_output, const Tensor &weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
1943
|
+
return torch::miopen_convolution_transpose_backward_input(grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic);
|
1944
|
+
})
|
1945
|
+
.define_singleton_method(
|
1946
|
+
"_miopen_convolution_transpose_backward_weight",
|
1947
|
+
*[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
1948
|
+
return torch::miopen_convolution_transpose_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic);
|
1949
|
+
})
|
1950
|
+
.define_singleton_method(
|
1951
|
+
"_miopen_depthwise_convolution",
|
1952
|
+
*[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
1953
|
+
return torch::miopen_depthwise_convolution(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
|
1954
|
+
})
|
1955
|
+
.define_singleton_method(
|
1956
|
+
"_miopen_depthwise_convolution_backward_input",
|
1957
|
+
*[](IntArrayRef self_size, const Tensor &grad_output, const Tensor &weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
1958
|
+
return torch::miopen_depthwise_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic);
|
1959
|
+
})
|
1960
|
+
.define_singleton_method(
|
1961
|
+
"_miopen_depthwise_convolution_backward_weight",
|
1962
|
+
*[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
|
1963
|
+
return torch::miopen_depthwise_convolution_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic);
|
1964
|
+
})
|
1965
|
+
.define_singleton_method(
|
1966
|
+
"_miopen_rnn",
|
1967
|
+
*[](const Tensor &input, TensorList weight, int64_t weight_stride0, const Tensor &hx, OptionalTensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, OptionalTensor dropout_state) {
|
1968
|
+
return wrap(torch::miopen_rnn(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state));
|
1969
|
+
})
|
1970
|
+
.define_singleton_method(
|
1971
|
+
"_mkldnn_adaptive_avg_pool2d",
|
1972
|
+
*[](const Tensor &self, IntArrayRef output_size) {
|
1973
|
+
return torch::mkldnn_adaptive_avg_pool2d(self, output_size);
|
1974
|
+
})
|
1975
|
+
.define_singleton_method(
|
1976
|
+
"_mkldnn_convolution",
|
1977
|
+
*[](const Tensor &self, const Tensor &weight, OptionalTensor bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) {
|
1978
|
+
return torch::mkldnn_convolution(self, weight, bias, padding, stride, dilation, groups);
|
1979
|
+
})
|
1980
|
+
.define_singleton_method(
|
1981
|
+
"_mkldnn_convolution_backward_input",
|
1982
|
+
*[](IntArrayRef self_size, const Tensor &grad_output, const Tensor &weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) {
|
1983
|
+
return torch::mkldnn_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, bias_defined);
|
1984
|
+
})
|
1985
|
+
.define_singleton_method(
|
1986
|
+
"_mkldnn_convolution_backward_weights",
|
1987
|
+
*[](IntArrayRef weight_size, const Tensor &grad_output, const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) {
|
1988
|
+
return wrap(torch::mkldnn_convolution_backward_weights(weight_size, grad_output, self, padding, stride, dilation, groups, bias_defined));
|
1989
|
+
})
|
1990
|
+
.define_singleton_method(
|
1991
|
+
"_mkldnn_max_pool2d",
|
1992
|
+
*[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
|
1993
|
+
return torch::mkldnn_max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode);
|
1994
|
+
})
|
1995
|
+
.define_singleton_method(
|
1996
|
+
"_mm",
|
1997
|
+
*[](const Tensor &self, const Tensor &mat2) {
|
1998
|
+
return torch::mm(self, mat2);
|
1999
|
+
})
|
2000
|
+
.define_singleton_method(
|
2001
|
+
"_mm_out",
|
2002
|
+
*[](const Tensor &self, const Tensor &mat2, Tensor &out) {
|
2003
|
+
return torch::mm_out(out, self, mat2);
|
2004
|
+
})
|
2005
|
+
.define_singleton_method(
|
2006
|
+
"_mode",
|
2007
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
2008
|
+
return wrap(torch::mode(self, dim, keepdim));
|
2009
|
+
})
|
2010
|
+
.define_singleton_method(
|
2011
|
+
"_mode_values",
|
2012
|
+
*[](const Tensor &self, int64_t dim, bool keepdim, Tensor &values, Tensor &indices) {
|
2013
|
+
return wrap(torch::mode_out(values, indices, self, dim, keepdim));
|
2014
|
+
})
|
2015
|
+
.define_singleton_method(
|
2016
|
+
"_mul_out",
|
2017
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
2018
|
+
return torch::mul_out(out, self, other);
|
2019
|
+
})
|
2020
|
+
.define_singleton_method(
|
2021
|
+
"_mul_scalar",
|
2022
|
+
*[](const Tensor &self, Scalar other) {
|
2023
|
+
return torch::mul(self, other);
|
2024
|
+
})
|
2025
|
+
.define_singleton_method(
|
2026
|
+
"_mul_tensor",
|
2027
|
+
*[](const Tensor &self, const Tensor &other) {
|
2028
|
+
return torch::mul(self, other);
|
2029
|
+
})
|
2030
|
+
.define_singleton_method(
|
2031
|
+
"_multinomial",
|
2032
|
+
*[](const Tensor &self, int64_t num_samples, bool replacement) {
|
2033
|
+
return torch::multinomial(self, num_samples, replacement);
|
2034
|
+
})
|
2035
|
+
.define_singleton_method(
|
2036
|
+
"_multinomial_out",
|
2037
|
+
*[](const Tensor &self, int64_t num_samples, bool replacement, Tensor &out) {
|
2038
|
+
return torch::multinomial_out(out, self, num_samples, replacement);
|
2039
|
+
})
|
2040
|
+
.define_singleton_method(
|
2041
|
+
"_mv",
|
2042
|
+
*[](const Tensor &self, const Tensor &vec) {
|
2043
|
+
return torch::mv(self, vec);
|
2044
|
+
})
|
2045
|
+
.define_singleton_method(
|
2046
|
+
"_mv_out",
|
2047
|
+
*[](const Tensor &self, const Tensor &vec, Tensor &out) {
|
2048
|
+
return torch::mv_out(out, self, vec);
|
2049
|
+
})
|
2050
|
+
.define_singleton_method(
|
2051
|
+
"_mvlgamma",
|
2052
|
+
*[](const Tensor &self, int64_t p) {
|
2053
|
+
return torch::mvlgamma(self, p);
|
2054
|
+
})
|
2055
|
+
.define_singleton_method(
|
2056
|
+
"_narrow",
|
2057
|
+
*[](Tensor &self, int64_t dim, int64_t start, int64_t length) {
|
2058
|
+
return torch::narrow(self, dim, start, length);
|
2059
|
+
})
|
2060
|
+
.define_singleton_method(
|
2061
|
+
"_narrow_tensor",
|
2062
|
+
*[](Tensor &self, int64_t dim, const Tensor &start, int64_t length) {
|
2063
|
+
return torch::narrow(self, dim, start, length);
|
2064
|
+
})
|
2065
|
+
.define_singleton_method(
|
2066
|
+
"_native_batch_norm",
|
2067
|
+
*[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double momentum, double eps) {
|
2068
|
+
return wrap(torch::native_batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps));
|
2069
|
+
})
|
2070
|
+
.define_singleton_method(
|
2071
|
+
"_native_batch_norm_out",
|
2072
|
+
*[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, OptionalTensor running_mean, OptionalTensor running_var, bool training, double momentum, double eps, Tensor &out, Tensor &save_mean, Tensor &save_invstd) {
|
2073
|
+
return wrap(torch::native_batch_norm_out(out, save_mean, save_invstd, input, weight, bias, running_mean, running_var, training, momentum, eps));
|
2074
|
+
})
|
2075
|
+
.define_singleton_method(
|
2076
|
+
"_native_layer_norm",
|
2077
|
+
*[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, int64_t M, int64_t N, double eps) {
|
2078
|
+
return wrap(torch::native_layer_norm(input, weight, bias, M, N, eps));
|
2079
|
+
})
|
2080
|
+
.define_singleton_method(
|
2081
|
+
"_native_norm",
|
2082
|
+
*[](const Tensor &self, Scalar p) {
|
2083
|
+
return torch::native_norm(self, p);
|
2084
|
+
})
|
2085
|
+
.define_singleton_method(
|
2086
|
+
"_ne_scalar",
|
2087
|
+
*[](const Tensor &self, Scalar other) {
|
2088
|
+
return torch::ne(self, other);
|
2089
|
+
})
|
2090
|
+
.define_singleton_method(
|
2091
|
+
"_ne_scalar_out",
|
2092
|
+
*[](const Tensor &self, Scalar other, Tensor &out) {
|
2093
|
+
return torch::ne_out(out, self, other);
|
2094
|
+
})
|
2095
|
+
.define_singleton_method(
|
2096
|
+
"_ne_tensor",
|
2097
|
+
*[](const Tensor &self, const Tensor &other) {
|
2098
|
+
return torch::ne(self, other);
|
2099
|
+
})
|
2100
|
+
.define_singleton_method(
|
2101
|
+
"_ne_tensor_out",
|
2102
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
2103
|
+
return torch::ne_out(out, self, other);
|
2104
|
+
})
|
2105
|
+
.define_singleton_method(
|
2106
|
+
"_neg",
|
2107
|
+
*[](const Tensor &self) {
|
2108
|
+
return torch::neg(self);
|
2109
|
+
})
|
2110
|
+
.define_singleton_method(
|
2111
|
+
"_neg_",
|
2112
|
+
*[](Tensor &self) {
|
2113
|
+
return torch::neg_(self);
|
2114
|
+
})
|
2115
|
+
.define_singleton_method(
|
2116
|
+
"_neg_out",
|
2117
|
+
*[](const Tensor &self, Tensor &out) {
|
2118
|
+
return torch::neg_out(out, self);
|
2119
|
+
})
|
2120
|
+
.define_singleton_method(
|
2121
|
+
"_nonzero",
|
2122
|
+
*[](const Tensor &self) {
|
2123
|
+
return torch::nonzero(self);
|
2124
|
+
})
|
2125
|
+
.define_singleton_method(
|
2126
|
+
"_nonzero_numpy",
|
2127
|
+
*[](const Tensor &self) {
|
2128
|
+
return torch::nonzero_numpy(self);
|
2129
|
+
})
|
2130
|
+
.define_singleton_method(
|
2131
|
+
"_nonzero_out",
|
2132
|
+
*[](const Tensor &self, Tensor &out) {
|
2133
|
+
return torch::nonzero_out(out, self);
|
2134
|
+
})
|
2135
|
+
.define_singleton_method(
|
2136
|
+
"_norm_except_dim",
|
2137
|
+
*[](const Tensor &v, int64_t pow, int64_t dim) {
|
2138
|
+
return torch::norm_except_dim(v, pow, dim);
|
2139
|
+
})
|
2140
|
+
.define_singleton_method(
|
2141
|
+
"_norm_scalar",
|
2142
|
+
*[](const Tensor &self, Scalar p) {
|
2143
|
+
return torch::norm(self, p);
|
2144
|
+
})
|
2145
|
+
.define_singleton_method(
|
2146
|
+
"_normal_float_float_out",
|
2147
|
+
*[](double mean, double std, IntArrayRef size, Tensor &out) {
|
2148
|
+
return torch::normal_out(out, mean, std, size);
|
2149
|
+
})
|
2150
|
+
.define_singleton_method(
|
2151
|
+
"_normal_float_tensor_out",
|
2152
|
+
*[](double mean, const Tensor &std, Tensor &out) {
|
2153
|
+
return torch::normal_out(out, mean, std);
|
2154
|
+
})
|
2155
|
+
.define_singleton_method(
|
2156
|
+
"_normal_tensor_float_out",
|
2157
|
+
*[](const Tensor &mean, double std, Tensor &out) {
|
2158
|
+
return torch::normal_out(out, mean, std);
|
2159
|
+
})
|
2160
|
+
.define_singleton_method(
|
2161
|
+
"_normal_tensor_tensor_out",
|
2162
|
+
*[](const Tensor &mean, const Tensor &std, Tensor &out) {
|
2163
|
+
return torch::normal_out(out, mean, std);
|
2164
|
+
})
|
2165
|
+
.define_singleton_method(
|
2166
|
+
"_nuclear_norm",
|
2167
|
+
*[](const Tensor &self, bool keepdim) {
|
2168
|
+
return torch::nuclear_norm(self, keepdim);
|
2169
|
+
})
|
2170
|
+
.define_singleton_method(
|
2171
|
+
"_nuclear_norm_dim",
|
2172
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim) {
|
2173
|
+
return torch::nuclear_norm(self, dim, keepdim);
|
2174
|
+
})
|
2175
|
+
.define_singleton_method(
|
2176
|
+
"_nuclear_norm_dim_out",
|
2177
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim, Tensor &out) {
|
2178
|
+
return torch::nuclear_norm_out(out, self, dim, keepdim);
|
2179
|
+
})
|
2180
|
+
.define_singleton_method(
|
2181
|
+
"_nuclear_norm_out",
|
2182
|
+
*[](const Tensor &self, bool keepdim, Tensor &out) {
|
2183
|
+
return torch::nuclear_norm_out(out, self, keepdim);
|
2184
|
+
})
|
2185
|
+
.define_singleton_method(
|
2186
|
+
"_ones_out",
|
2187
|
+
*[](IntArrayRef size, Tensor &out) {
|
2188
|
+
return torch::ones_out(out, size);
|
2189
|
+
})
|
2190
|
+
.define_singleton_method(
|
2191
|
+
"_orgqr",
|
2192
|
+
*[](const Tensor &self, const Tensor &input2) {
|
2193
|
+
return torch::orgqr(self, input2);
|
2194
|
+
})
|
2195
|
+
.define_singleton_method(
|
2196
|
+
"_orgqr_out",
|
2197
|
+
*[](const Tensor &self, const Tensor &input2, Tensor &out) {
|
2198
|
+
return torch::orgqr_out(out, self, input2);
|
2199
|
+
})
|
2200
|
+
.define_singleton_method(
|
2201
|
+
"_ormqr",
|
2202
|
+
*[](const Tensor &self, const Tensor &input2, const Tensor &input3, bool left, bool transpose) {
|
2203
|
+
return torch::ormqr(self, input2, input3, left, transpose);
|
2204
|
+
})
|
2205
|
+
.define_singleton_method(
|
2206
|
+
"_ormqr_out",
|
2207
|
+
*[](const Tensor &self, const Tensor &input2, const Tensor &input3, bool left, bool transpose, Tensor &out) {
|
2208
|
+
return torch::ormqr_out(out, self, input2, input3, left, transpose);
|
2209
|
+
})
|
2210
|
+
.define_singleton_method(
|
2211
|
+
"_pairwise_distance",
|
2212
|
+
*[](const Tensor &x1, const Tensor &x2, double p, double eps, bool keepdim) {
|
2213
|
+
return torch::pairwise_distance(x1, x2, p, eps, keepdim);
|
2214
|
+
})
|
2215
|
+
.define_singleton_method(
|
2216
|
+
"_pdist",
|
2217
|
+
*[](const Tensor &self, double p) {
|
2218
|
+
return torch::pdist(self, p);
|
2219
|
+
})
|
2220
|
+
.define_singleton_method(
|
2221
|
+
"_pinverse",
|
2222
|
+
*[](const Tensor &self, double rcond) {
|
2223
|
+
return torch::pinverse(self, rcond);
|
2224
|
+
})
|
2225
|
+
.define_singleton_method(
|
2226
|
+
"_pixel_shuffle",
|
2227
|
+
*[](const Tensor &self, int64_t upscale_factor) {
|
2228
|
+
return torch::pixel_shuffle(self, upscale_factor);
|
2229
|
+
})
|
2230
|
+
.define_singleton_method(
|
2231
|
+
"_poisson",
|
2232
|
+
*[](const Tensor &self) {
|
2233
|
+
return torch::poisson(self);
|
2234
|
+
})
|
2235
|
+
.define_singleton_method(
|
2236
|
+
"_poisson_nll_loss",
|
2237
|
+
*[](const Tensor &input, const Tensor &target, bool log_input, bool full, double eps, MyReduction reduction) {
|
2238
|
+
return torch::poisson_nll_loss(input, target, log_input, full, eps, reduction);
|
2239
|
+
})
|
2240
|
+
.define_singleton_method(
|
2241
|
+
"_polygamma",
|
2242
|
+
*[](int64_t n, const Tensor &self) {
|
2243
|
+
return torch::polygamma(n, self);
|
2244
|
+
})
|
2245
|
+
.define_singleton_method(
|
2246
|
+
"_polygamma_out",
|
2247
|
+
*[](int64_t n, const Tensor &self, Tensor &out) {
|
2248
|
+
return torch::polygamma_out(out, n, self);
|
2249
|
+
})
|
2250
|
+
.define_singleton_method(
|
2251
|
+
"_pow_scalar",
|
2252
|
+
*[](Scalar self, const Tensor &exponent) {
|
2253
|
+
return torch::pow(self, exponent);
|
2254
|
+
})
|
2255
|
+
.define_singleton_method(
|
2256
|
+
"_pow_scalar_out",
|
2257
|
+
*[](Scalar self, const Tensor &exponent, Tensor &out) {
|
2258
|
+
return torch::pow_out(out, self, exponent);
|
2259
|
+
})
|
2260
|
+
.define_singleton_method(
|
2261
|
+
"_pow_tensor_scalar",
|
2262
|
+
*[](const Tensor &self, Scalar exponent) {
|
2263
|
+
return torch::pow(self, exponent);
|
2264
|
+
})
|
2265
|
+
.define_singleton_method(
|
2266
|
+
"_pow_tensor_scalar_out",
|
2267
|
+
*[](const Tensor &self, Scalar exponent, Tensor &out) {
|
2268
|
+
return torch::pow_out(out, self, exponent);
|
2269
|
+
})
|
2270
|
+
.define_singleton_method(
|
2271
|
+
"_pow_tensor_tensor",
|
2272
|
+
*[](const Tensor &self, const Tensor &exponent) {
|
2273
|
+
return torch::pow(self, exponent);
|
2274
|
+
})
|
2275
|
+
.define_singleton_method(
|
2276
|
+
"_pow_tensor_tensor_out",
|
2277
|
+
*[](const Tensor &self, const Tensor &exponent, Tensor &out) {
|
2278
|
+
return torch::pow_out(out, self, exponent);
|
2279
|
+
})
|
2280
|
+
.define_singleton_method(
|
2281
|
+
"_prelu",
|
2282
|
+
*[](const Tensor &self, const Tensor &weight) {
|
2283
|
+
return torch::prelu(self, weight);
|
2284
|
+
})
|
2285
|
+
.define_singleton_method(
|
2286
|
+
"_prod",
|
2287
|
+
*[](const Tensor &self, OptionalScalarType dtype) {
|
2288
|
+
return torch::prod(self, dtype);
|
2289
|
+
})
|
2290
|
+
.define_singleton_method(
|
2291
|
+
"_prod_dim_int",
|
2292
|
+
*[](const Tensor &self, int64_t dim, bool keepdim, OptionalScalarType dtype) {
|
2293
|
+
return torch::prod(self, dim, keepdim, dtype);
|
2294
|
+
})
|
2295
|
+
.define_singleton_method(
|
2296
|
+
"_prod_int_out",
|
2297
|
+
*[](const Tensor &self, int64_t dim, bool keepdim, OptionalScalarType dtype, Tensor &out) {
|
2298
|
+
return torch::prod_out(out, self, dim, keepdim, dtype);
|
2299
|
+
})
|
2300
|
+
.define_singleton_method(
|
2301
|
+
"_promote_types",
|
2302
|
+
*[](ScalarType type1, ScalarType type2) {
|
2303
|
+
return torch::promote_types(type1, type2);
|
2304
|
+
})
|
2305
|
+
.define_singleton_method(
|
2306
|
+
"_q_per_channel_axis",
|
2307
|
+
*[](const Tensor &self) {
|
2308
|
+
return torch::q_per_channel_axis(self);
|
2309
|
+
})
|
2310
|
+
.define_singleton_method(
|
2311
|
+
"_q_per_channel_scales",
|
2312
|
+
*[](const Tensor &self) {
|
2313
|
+
return torch::q_per_channel_scales(self);
|
2314
|
+
})
|
2315
|
+
.define_singleton_method(
|
2316
|
+
"_q_per_channel_zero_points",
|
2317
|
+
*[](const Tensor &self) {
|
2318
|
+
return torch::q_per_channel_zero_points(self);
|
2319
|
+
})
|
2320
|
+
.define_singleton_method(
|
2321
|
+
"_q_scale",
|
2322
|
+
*[](const Tensor &self) {
|
2323
|
+
return torch::q_scale(self);
|
2324
|
+
})
|
2325
|
+
.define_singleton_method(
|
2326
|
+
"_q_zero_point",
|
2327
|
+
*[](const Tensor &self) {
|
2328
|
+
return torch::q_zero_point(self);
|
2329
|
+
})
|
2330
|
+
.define_singleton_method(
|
2331
|
+
"_qr",
|
2332
|
+
*[](const Tensor &self, bool some) {
|
2333
|
+
return wrap(torch::qr(self, some));
|
2334
|
+
})
|
2335
|
+
.define_singleton_method(
|
2336
|
+
"_qr_q",
|
2337
|
+
*[](const Tensor &self, bool some, Tensor &Q, Tensor &R) {
|
2338
|
+
return wrap(torch::qr_out(Q, R, self, some));
|
2339
|
+
})
|
2340
|
+
.define_singleton_method(
|
2341
|
+
"_quantize_per_channel",
|
2342
|
+
*[](const Tensor &self, const Tensor &scales, const Tensor &zero_points, int64_t axis, ScalarType dtype) {
|
2343
|
+
return torch::quantize_per_channel(self, scales, zero_points, axis, dtype);
|
2344
|
+
})
|
2345
|
+
.define_singleton_method(
|
2346
|
+
"_quantize_per_tensor",
|
2347
|
+
*[](const Tensor &self, double scale, int64_t zero_point, ScalarType dtype) {
|
2348
|
+
return torch::quantize_per_tensor(self, scale, zero_point, dtype);
|
2349
|
+
})
|
2350
|
+
.define_singleton_method(
|
2351
|
+
"_quantized_batch_norm",
|
2352
|
+
*[](const Tensor &input, OptionalTensor weight, OptionalTensor bias, const Tensor &mean, const Tensor &var, double eps, double output_scale, int64_t output_zero_point) {
|
2353
|
+
return torch::quantized_batch_norm(input, weight, bias, mean, var, eps, output_scale, output_zero_point);
|
2354
|
+
})
|
2355
|
+
.define_singleton_method(
|
2356
|
+
"_quantized_gru_cell",
|
2357
|
+
*[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, const Tensor &b_ih, const Tensor &b_hh, const Tensor &packed_ih, const Tensor &packed_hh, const Tensor &col_offsets_ih, const Tensor &col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) {
|
2358
|
+
return torch::quantized_gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
|
2359
|
+
})
|
2360
|
+
.define_singleton_method(
|
2361
|
+
"_quantized_gru_data",
|
2362
|
+
*[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
2363
|
+
return wrap(torch::quantized_gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
|
2364
|
+
})
|
2365
|
+
.define_singleton_method(
|
2366
|
+
"_quantized_gru_input",
|
2367
|
+
*[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
2368
|
+
return wrap(torch::quantized_gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
|
2369
|
+
})
|
2370
|
+
.define_singleton_method(
|
2371
|
+
"_quantized_lstm",
|
2372
|
+
*[](const Tensor &input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, OptionalScalarType dtype, bool use_dynamic) {
|
2373
|
+
return wrap(torch::quantized_lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, dtype, use_dynamic));
|
2374
|
+
})
|
2375
|
+
.define_singleton_method(
|
2376
|
+
"_quantized_lstm_cell",
|
2377
|
+
*[](const Tensor &input, TensorList hx, const Tensor &w_ih, const Tensor &w_hh, const Tensor &b_ih, const Tensor &b_hh, const Tensor &packed_ih, const Tensor &packed_hh, const Tensor &col_offsets_ih, const Tensor &col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) {
|
2378
|
+
return wrap(torch::quantized_lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh));
|
2379
|
+
})
|
2380
|
+
.define_singleton_method(
|
2381
|
+
"_quantized_lstm_data",
|
2382
|
+
*[](const Tensor &data, const Tensor &batch_sizes, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, OptionalScalarType dtype, bool use_dynamic) {
|
2383
|
+
return wrap(torch::quantized_lstm(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional, dtype, use_dynamic));
|
2384
|
+
})
|
2385
|
+
.define_singleton_method(
|
2386
|
+
"_quantized_max_pool2d",
|
2387
|
+
*[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
|
2388
|
+
return torch::quantized_max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode);
|
2389
|
+
})
|
2390
|
+
.define_singleton_method(
|
2391
|
+
"_quantized_rnn_relu_cell",
|
2392
|
+
*[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, const Tensor &b_ih, const Tensor &b_hh, const Tensor &packed_ih, const Tensor &packed_hh, const Tensor &col_offsets_ih, const Tensor &col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) {
|
2393
|
+
return torch::quantized_rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
|
2394
|
+
})
|
2395
|
+
.define_singleton_method(
|
2396
|
+
"_quantized_rnn_tanh_cell",
|
2397
|
+
*[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, const Tensor &b_ih, const Tensor &b_hh, const Tensor &packed_ih, const Tensor &packed_hh, const Tensor &col_offsets_ih, const Tensor &col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) {
|
2398
|
+
return torch::quantized_rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
|
2399
|
+
})
|
2400
|
+
.define_singleton_method(
|
2401
|
+
"_rand_generator_out",
|
2402
|
+
*[](IntArrayRef size, Tensor &out) {
|
2403
|
+
return torch::rand_out(out, size);
|
2404
|
+
})
|
2405
|
+
.define_singleton_method(
|
2406
|
+
"_rand_out",
|
2407
|
+
*[](IntArrayRef size, Tensor &out) {
|
2408
|
+
return torch::rand_out(out, size);
|
2409
|
+
})
|
2410
|
+
.define_singleton_method(
|
2411
|
+
"_randint_generator_out",
|
2412
|
+
*[](int64_t high, IntArrayRef size, Tensor &out) {
|
2413
|
+
return torch::randint_out(out, high, size);
|
2414
|
+
})
|
2415
|
+
.define_singleton_method(
|
2416
|
+
"_randint_low_generator_out",
|
2417
|
+
*[](int64_t low, int64_t high, IntArrayRef size, Tensor &out) {
|
2418
|
+
return torch::randint_out(out, low, high, size);
|
2419
|
+
})
|
2420
|
+
.define_singleton_method(
|
2421
|
+
"_randint_low_out",
|
2422
|
+
*[](int64_t low, int64_t high, IntArrayRef size, Tensor &out) {
|
2423
|
+
return torch::randint_out(out, low, high, size);
|
2424
|
+
})
|
2425
|
+
.define_singleton_method(
|
2426
|
+
"_randint_out",
|
2427
|
+
*[](int64_t high, IntArrayRef size, Tensor &out) {
|
2428
|
+
return torch::randint_out(out, high, size);
|
2429
|
+
})
|
2430
|
+
.define_singleton_method(
|
2431
|
+
"_randn_generator_out",
|
2432
|
+
*[](IntArrayRef size, Tensor &out) {
|
2433
|
+
return torch::randn_out(out, size);
|
2434
|
+
})
|
2435
|
+
.define_singleton_method(
|
2436
|
+
"_randn_out",
|
2437
|
+
*[](IntArrayRef size, Tensor &out) {
|
2438
|
+
return torch::randn_out(out, size);
|
2439
|
+
})
|
2440
|
+
.define_singleton_method(
|
2441
|
+
"_randperm_generator_out",
|
2442
|
+
*[](int64_t n, Tensor &out) {
|
2443
|
+
return torch::randperm_out(out, n);
|
2444
|
+
})
|
2445
|
+
.define_singleton_method(
|
2446
|
+
"_randperm_out",
|
2447
|
+
*[](int64_t n, Tensor &out) {
|
2448
|
+
return torch::randperm_out(out, n);
|
2449
|
+
})
|
2450
|
+
.define_singleton_method(
|
2451
|
+
"_range_out",
|
2452
|
+
*[](Scalar start, Scalar end, Scalar step, Tensor &out) {
|
2453
|
+
return torch::range_out(out, start, end, step);
|
2454
|
+
})
|
2455
|
+
.define_singleton_method(
|
2456
|
+
"_real",
|
2457
|
+
*[](const Tensor &self) {
|
2458
|
+
return torch::real(self);
|
2459
|
+
})
|
2460
|
+
.define_singleton_method(
|
2461
|
+
"_reciprocal",
|
2462
|
+
*[](const Tensor &self) {
|
2463
|
+
return torch::reciprocal(self);
|
2464
|
+
})
|
2465
|
+
.define_singleton_method(
|
2466
|
+
"_reciprocal_",
|
2467
|
+
*[](Tensor &self) {
|
2468
|
+
return torch::reciprocal_(self);
|
2469
|
+
})
|
2470
|
+
.define_singleton_method(
|
2471
|
+
"_reciprocal_out",
|
2472
|
+
*[](const Tensor &self, Tensor &out) {
|
2473
|
+
return torch::reciprocal_out(out, self);
|
2474
|
+
})
|
2475
|
+
.define_singleton_method(
|
2476
|
+
"_relu",
|
2477
|
+
*[](const Tensor &self) {
|
2478
|
+
return torch::relu(self);
|
2479
|
+
})
|
2480
|
+
.define_singleton_method(
|
2481
|
+
"_relu_",
|
2482
|
+
*[](Tensor &self) {
|
2483
|
+
return torch::relu_(self);
|
2484
|
+
})
|
2485
|
+
.define_singleton_method(
|
2486
|
+
"_remainder_scalar",
|
2487
|
+
*[](const Tensor &self, Scalar other) {
|
2488
|
+
return torch::remainder(self, other);
|
2489
|
+
})
|
2490
|
+
.define_singleton_method(
|
2491
|
+
"_remainder_scalar_out",
|
2492
|
+
*[](const Tensor &self, Scalar other, Tensor &out) {
|
2493
|
+
return torch::remainder_out(out, self, other);
|
2494
|
+
})
|
2495
|
+
.define_singleton_method(
|
2496
|
+
"_remainder_tensor",
|
2497
|
+
*[](const Tensor &self, const Tensor &other) {
|
2498
|
+
return torch::remainder(self, other);
|
2499
|
+
})
|
2500
|
+
.define_singleton_method(
|
2501
|
+
"_remainder_tensor_out",
|
2502
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
2503
|
+
return torch::remainder_out(out, self, other);
|
2504
|
+
})
|
2505
|
+
.define_singleton_method(
|
2506
|
+
"_renorm",
|
2507
|
+
*[](const Tensor &self, Scalar p, int64_t dim, Scalar maxnorm) {
|
2508
|
+
return torch::renorm(self, p, dim, maxnorm);
|
2509
|
+
})
|
2510
|
+
.define_singleton_method(
|
2511
|
+
"_renorm_out",
|
2512
|
+
*[](const Tensor &self, Scalar p, int64_t dim, Scalar maxnorm, Tensor &out) {
|
2513
|
+
return torch::renorm_out(out, self, p, dim, maxnorm);
|
2514
|
+
})
|
2515
|
+
.define_singleton_method(
|
2516
|
+
"_repeat_interleave_self_int",
|
2517
|
+
*[](const Tensor &self, int64_t repeats) {
|
2518
|
+
return torch::repeat_interleave(self, repeats);
|
2519
|
+
})
|
2520
|
+
.define_singleton_method(
|
2521
|
+
"_repeat_interleave_self_int_dim",
|
2522
|
+
*[](const Tensor &self, int64_t repeats, int64_t dim) {
|
2523
|
+
return torch::repeat_interleave(self, repeats, dim);
|
2524
|
+
})
|
2525
|
+
.define_singleton_method(
|
2526
|
+
"_repeat_interleave_self_tensor",
|
2527
|
+
*[](const Tensor &self, const Tensor &repeats) {
|
2528
|
+
return torch::repeat_interleave(self, repeats);
|
2529
|
+
})
|
2530
|
+
.define_singleton_method(
|
2531
|
+
"_repeat_interleave_self_tensor_dim",
|
2532
|
+
*[](const Tensor &self, const Tensor &repeats, int64_t dim) {
|
2533
|
+
return torch::repeat_interleave(self, repeats, dim);
|
2534
|
+
})
|
2535
|
+
.define_singleton_method(
|
2536
|
+
"_repeat_interleave_tensor",
|
2537
|
+
*[](const Tensor &repeats) {
|
2538
|
+
return torch::repeat_interleave(repeats);
|
2539
|
+
})
|
2540
|
+
.define_singleton_method(
|
2541
|
+
"_reshape",
|
2542
|
+
*[](const Tensor &self, IntArrayRef shape) {
|
2543
|
+
return torch::reshape(self, shape);
|
2544
|
+
})
|
2545
|
+
.define_singleton_method(
|
2546
|
+
"_resize_as_",
|
2547
|
+
*[](Tensor &self, const Tensor &the_template) {
|
2548
|
+
return torch::resize_as_(self, the_template);
|
2549
|
+
})
|
2550
|
+
.define_singleton_method(
|
2551
|
+
"_result_type_scalar",
|
2552
|
+
*[](const Tensor &tensor, Scalar other) {
|
2553
|
+
return torch::result_type(tensor, other);
|
2554
|
+
})
|
2555
|
+
.define_singleton_method(
|
2556
|
+
"_result_type_scalar_scalar",
|
2557
|
+
*[](Scalar scalar1, Scalar scalar2) {
|
2558
|
+
return torch::result_type(scalar1, scalar2);
|
2559
|
+
})
|
2560
|
+
.define_singleton_method(
|
2561
|
+
"_result_type_scalar_tensor",
|
2562
|
+
*[](Scalar scalar, const Tensor &tensor) {
|
2563
|
+
return torch::result_type(scalar, tensor);
|
2564
|
+
})
|
2565
|
+
.define_singleton_method(
|
2566
|
+
"_result_type_tensor",
|
2567
|
+
*[](const Tensor &tensor, const Tensor &other) {
|
2568
|
+
return torch::result_type(tensor, other);
|
2569
|
+
})
|
2570
|
+
.define_singleton_method(
|
2571
|
+
"_rfft",
|
2572
|
+
*[](const Tensor &self, int64_t signal_ndim, bool normalized, bool onesided) {
|
2573
|
+
return torch::rfft(self, signal_ndim, normalized, onesided);
|
2574
|
+
})
|
2575
|
+
.define_singleton_method(
|
2576
|
+
"_rnn_relu_cell",
|
2577
|
+
*[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, OptionalTensor b_ih, OptionalTensor b_hh) {
|
2578
|
+
return torch::rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
|
2579
|
+
})
|
2580
|
+
.define_singleton_method(
|
2581
|
+
"_rnn_relu_data",
|
2582
|
+
*[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
2583
|
+
return wrap(torch::rnn_relu(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
|
2584
|
+
})
|
2585
|
+
.define_singleton_method(
|
2586
|
+
"_rnn_relu_input",
|
2587
|
+
*[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
2588
|
+
return wrap(torch::rnn_relu(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
|
2589
|
+
})
|
2590
|
+
.define_singleton_method(
|
2591
|
+
"_rnn_tanh_cell",
|
2592
|
+
*[](const Tensor &input, const Tensor &hx, const Tensor &w_ih, const Tensor &w_hh, OptionalTensor b_ih, OptionalTensor b_hh) {
|
2593
|
+
return torch::rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
|
2594
|
+
})
|
2595
|
+
.define_singleton_method(
|
2596
|
+
"_rnn_tanh_data",
|
2597
|
+
*[](const Tensor &data, const Tensor &batch_sizes, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
2598
|
+
return wrap(torch::rnn_tanh(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional));
|
2599
|
+
})
|
2600
|
+
.define_singleton_method(
|
2601
|
+
"_rnn_tanh_input",
|
2602
|
+
*[](const Tensor &input, const Tensor &hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
2603
|
+
return wrap(torch::rnn_tanh(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first));
|
2604
|
+
})
|
2605
|
+
.define_singleton_method(
|
2606
|
+
"_roll",
|
2607
|
+
*[](const Tensor &self, IntArrayRef shifts, IntArrayRef dims) {
|
2608
|
+
return torch::roll(self, shifts, dims);
|
2609
|
+
})
|
2610
|
+
.define_singleton_method(
|
2611
|
+
"_rot90",
|
2612
|
+
*[](const Tensor &self, int64_t k, IntArrayRef dims) {
|
2613
|
+
return torch::rot90(self, k, dims);
|
2614
|
+
})
|
2615
|
+
.define_singleton_method(
|
2616
|
+
"_round",
|
2617
|
+
*[](const Tensor &self) {
|
2618
|
+
return torch::round(self);
|
2619
|
+
})
|
2620
|
+
.define_singleton_method(
|
2621
|
+
"_round_",
|
2622
|
+
*[](Tensor &self) {
|
2623
|
+
return torch::round_(self);
|
2624
|
+
})
|
2625
|
+
.define_singleton_method(
|
2626
|
+
"_round_out",
|
2627
|
+
*[](const Tensor &self, Tensor &out) {
|
2628
|
+
return torch::round_out(out, self);
|
2629
|
+
})
|
2630
|
+
.define_singleton_method(
|
2631
|
+
"_rrelu",
|
2632
|
+
*[](const Tensor &self, Scalar lower, Scalar upper, bool training) {
|
2633
|
+
return torch::rrelu(self, lower, upper, training);
|
2634
|
+
})
|
2635
|
+
.define_singleton_method(
|
2636
|
+
"_rrelu_",
|
2637
|
+
*[](Tensor &self, Scalar lower, Scalar upper, bool training) {
|
2638
|
+
return torch::rrelu_(self, lower, upper, training);
|
2639
|
+
})
|
2640
|
+
.define_singleton_method(
|
2641
|
+
"_rsqrt",
|
2642
|
+
*[](const Tensor &self) {
|
2643
|
+
return torch::rsqrt(self);
|
2644
|
+
})
|
2645
|
+
.define_singleton_method(
|
2646
|
+
"_rsqrt_",
|
2647
|
+
*[](Tensor &self) {
|
2648
|
+
return torch::rsqrt_(self);
|
2649
|
+
})
|
2650
|
+
.define_singleton_method(
|
2651
|
+
"_rsqrt_out",
|
2652
|
+
*[](const Tensor &self, Tensor &out) {
|
2653
|
+
return torch::rsqrt_out(out, self);
|
2654
|
+
})
|
2655
|
+
.define_singleton_method(
|
2656
|
+
"_rsub_scalar",
|
2657
|
+
*[](const Tensor &self, Scalar other, Scalar alpha) {
|
2658
|
+
return torch::rsub(self, other, alpha);
|
2659
|
+
})
|
2660
|
+
.define_singleton_method(
|
2661
|
+
"_rsub_tensor",
|
2662
|
+
*[](const Tensor &self, const Tensor &other, Scalar alpha) {
|
2663
|
+
return torch::rsub(self, other, alpha);
|
2664
|
+
})
|
2665
|
+
.define_singleton_method(
|
2666
|
+
"_scatter_add",
|
2667
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &src) {
|
2668
|
+
return torch::scatter_add(self, dim, index, src);
|
2669
|
+
})
|
2670
|
+
.define_singleton_method(
|
2671
|
+
"_scatter_src",
|
2672
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &src) {
|
2673
|
+
return torch::scatter(self, dim, index, src);
|
2674
|
+
})
|
2675
|
+
.define_singleton_method(
|
2676
|
+
"_scatter_value",
|
2677
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, Scalar value) {
|
2678
|
+
return torch::scatter(self, dim, index, value);
|
2679
|
+
})
|
2680
|
+
.define_singleton_method(
|
2681
|
+
"_select_int",
|
2682
|
+
*[](Tensor &self, int64_t dim, int64_t index) {
|
2683
|
+
return torch::select(self, dim, index);
|
2684
|
+
})
|
2685
|
+
.define_singleton_method(
|
2686
|
+
"_selu",
|
2687
|
+
*[](const Tensor &self) {
|
2688
|
+
return torch::selu(self);
|
2689
|
+
})
|
2690
|
+
.define_singleton_method(
|
2691
|
+
"_selu_",
|
2692
|
+
*[](Tensor &self) {
|
2693
|
+
return torch::selu_(self);
|
2694
|
+
})
|
2695
|
+
.define_singleton_method(
|
2696
|
+
"_sigmoid",
|
2697
|
+
*[](const Tensor &self) {
|
2698
|
+
return torch::sigmoid(self);
|
2699
|
+
})
|
2700
|
+
.define_singleton_method(
|
2701
|
+
"_sigmoid_",
|
2702
|
+
*[](Tensor &self) {
|
2703
|
+
return torch::sigmoid_(self);
|
2704
|
+
})
|
2705
|
+
.define_singleton_method(
|
2706
|
+
"_sigmoid_out",
|
2707
|
+
*[](const Tensor &self, Tensor &out) {
|
2708
|
+
return torch::sigmoid_out(out, self);
|
2709
|
+
})
|
2710
|
+
.define_singleton_method(
|
2711
|
+
"_sign",
|
2712
|
+
*[](const Tensor &self) {
|
2713
|
+
return torch::sign(self);
|
2714
|
+
})
|
2715
|
+
.define_singleton_method(
|
2716
|
+
"_sign_out",
|
2717
|
+
*[](const Tensor &self, Tensor &out) {
|
2718
|
+
return torch::sign_out(out, self);
|
2719
|
+
})
|
2720
|
+
.define_singleton_method(
|
2721
|
+
"_sin",
|
2722
|
+
*[](const Tensor &self) {
|
2723
|
+
return torch::sin(self);
|
2724
|
+
})
|
2725
|
+
.define_singleton_method(
|
2726
|
+
"_sin_",
|
2727
|
+
*[](Tensor &self) {
|
2728
|
+
return torch::sin_(self);
|
2729
|
+
})
|
2730
|
+
.define_singleton_method(
|
2731
|
+
"_sin_out",
|
2732
|
+
*[](const Tensor &self, Tensor &out) {
|
2733
|
+
return torch::sin_out(out, self);
|
2734
|
+
})
|
2735
|
+
.define_singleton_method(
|
2736
|
+
"_sinh",
|
2737
|
+
*[](const Tensor &self) {
|
2738
|
+
return torch::sinh(self);
|
2739
|
+
})
|
2740
|
+
.define_singleton_method(
|
2741
|
+
"_sinh_",
|
2742
|
+
*[](Tensor &self) {
|
2743
|
+
return torch::sinh_(self);
|
2744
|
+
})
|
2745
|
+
.define_singleton_method(
|
2746
|
+
"_sinh_out",
|
2747
|
+
*[](const Tensor &self, Tensor &out) {
|
2748
|
+
return torch::sinh_out(out, self);
|
2749
|
+
})
|
2750
|
+
.define_singleton_method(
|
2751
|
+
"_size_int",
|
2752
|
+
*[](const Tensor &self, int64_t dim) {
|
2753
|
+
return torch::size(self, dim);
|
2754
|
+
})
|
2755
|
+
.define_singleton_method(
|
2756
|
+
"_slice_tensor",
|
2757
|
+
*[](Tensor &self, int64_t dim, int64_t start, int64_t end, int64_t step) {
|
2758
|
+
return torch::slice(self, dim, start, end, step);
|
2759
|
+
})
|
2760
|
+
.define_singleton_method(
|
2761
|
+
"_slogdet",
|
2762
|
+
*[](const Tensor &self) {
|
2763
|
+
return wrap(torch::slogdet(self));
|
2764
|
+
})
|
2765
|
+
.define_singleton_method(
|
2766
|
+
"_smm",
|
2767
|
+
*[](const Tensor &self, const Tensor &mat2) {
|
2768
|
+
return torch::smm(self, mat2);
|
2769
|
+
})
|
2770
|
+
.define_singleton_method(
|
2771
|
+
"_softmax_int",
|
2772
|
+
*[](const Tensor &self, int64_t dim, OptionalScalarType dtype) {
|
2773
|
+
return torch::softmax(self, dim, dtype);
|
2774
|
+
})
|
2775
|
+
.define_singleton_method(
|
2776
|
+
"_solve",
|
2777
|
+
*[](const Tensor &self, const Tensor &A) {
|
2778
|
+
return wrap(torch::solve(self, A));
|
2779
|
+
})
|
2780
|
+
.define_singleton_method(
|
2781
|
+
"_solve_solution",
|
2782
|
+
*[](const Tensor &self, const Tensor &A, Tensor &solution, Tensor &lu) {
|
2783
|
+
return wrap(torch::solve_out(solution, lu, self, A));
|
2784
|
+
})
|
2785
|
+
.define_singleton_method(
|
2786
|
+
"_sort",
|
2787
|
+
*[](const Tensor &self, int64_t dim, bool descending) {
|
2788
|
+
return wrap(torch::sort(self, dim, descending));
|
2789
|
+
})
|
2790
|
+
.define_singleton_method(
|
2791
|
+
"_sort_values",
|
2792
|
+
*[](const Tensor &self, int64_t dim, bool descending, Tensor &values, Tensor &indices) {
|
2793
|
+
return wrap(torch::sort_out(values, indices, self, dim, descending));
|
2794
|
+
})
|
2795
|
+
.define_singleton_method(
|
2796
|
+
"_split_tensor",
|
2797
|
+
*[](Tensor &self, int64_t split_size, int64_t dim) {
|
2798
|
+
return torch::split(self, split_size, dim);
|
2799
|
+
})
|
2800
|
+
.define_singleton_method(
|
2801
|
+
"_split_with_sizes",
|
2802
|
+
*[](const Tensor &self, IntArrayRef split_sizes, int64_t dim) {
|
2803
|
+
return torch::split_with_sizes(self, split_sizes, dim);
|
2804
|
+
})
|
2805
|
+
.define_singleton_method(
|
2806
|
+
"_sqrt",
|
2807
|
+
*[](const Tensor &self) {
|
2808
|
+
return torch::sqrt(self);
|
2809
|
+
})
|
2810
|
+
.define_singleton_method(
|
2811
|
+
"_sqrt_",
|
2812
|
+
*[](Tensor &self) {
|
2813
|
+
return torch::sqrt_(self);
|
2814
|
+
})
|
2815
|
+
.define_singleton_method(
|
2816
|
+
"_sqrt_out",
|
2817
|
+
*[](const Tensor &self, Tensor &out) {
|
2818
|
+
return torch::sqrt_out(out, self);
|
2819
|
+
})
|
2820
|
+
.define_singleton_method(
|
2821
|
+
"_square",
|
2822
|
+
*[](const Tensor &self) {
|
2823
|
+
return torch::square(self);
|
2824
|
+
})
|
2825
|
+
.define_singleton_method(
|
2826
|
+
"_square_",
|
2827
|
+
*[](Tensor &self) {
|
2828
|
+
return torch::square_(self);
|
2829
|
+
})
|
2830
|
+
.define_singleton_method(
|
2831
|
+
"_squeeze",
|
2832
|
+
*[](Tensor &self) {
|
2833
|
+
return torch::squeeze(self);
|
2834
|
+
})
|
2835
|
+
.define_singleton_method(
|
2836
|
+
"_squeeze_dim",
|
2837
|
+
*[](Tensor &self, int64_t dim) {
|
2838
|
+
return torch::squeeze(self, dim);
|
2839
|
+
})
|
2840
|
+
.define_singleton_method(
|
2841
|
+
"_sspaddmm",
|
2842
|
+
*[](const Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha) {
|
2843
|
+
return torch::sspaddmm(self, mat1, mat2, beta, alpha);
|
2844
|
+
})
|
2845
|
+
.define_singleton_method(
|
2846
|
+
"_sspaddmm_out",
|
2847
|
+
*[](const Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha, Tensor &out) {
|
2848
|
+
return torch::sspaddmm_out(out, self, mat1, mat2, beta, alpha);
|
2849
|
+
})
|
2850
|
+
.define_singleton_method(
|
2851
|
+
"_stack",
|
2852
|
+
*[](TensorList tensors, int64_t dim) {
|
2853
|
+
return torch::stack(tensors, dim);
|
2854
|
+
})
|
2855
|
+
.define_singleton_method(
|
2856
|
+
"_stack_out",
|
2857
|
+
*[](TensorList tensors, int64_t dim, Tensor &out) {
|
2858
|
+
return torch::stack_out(out, tensors, dim);
|
2859
|
+
})
|
2860
|
+
.define_singleton_method(
|
2861
|
+
"_std",
|
2862
|
+
*[](const Tensor &self, bool unbiased) {
|
2863
|
+
return torch::std(self, unbiased);
|
2864
|
+
})
|
2865
|
+
.define_singleton_method(
|
2866
|
+
"_std_dim",
|
2867
|
+
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
|
2868
|
+
return torch::std(self, dim, unbiased, keepdim);
|
2869
|
+
})
|
2870
|
+
.define_singleton_method(
|
2871
|
+
"_std_mean",
|
2872
|
+
*[](const Tensor &self, bool unbiased) {
|
2873
|
+
return wrap(torch::std_mean(self, unbiased));
|
2874
|
+
})
|
2875
|
+
.define_singleton_method(
|
2876
|
+
"_std_mean_dim",
|
2877
|
+
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
|
2878
|
+
return wrap(torch::std_mean(self, dim, unbiased, keepdim));
|
2879
|
+
})
|
2880
|
+
.define_singleton_method(
|
2881
|
+
"_std_out",
|
2882
|
+
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim, Tensor &out) {
|
2883
|
+
return torch::std_out(out, self, dim, unbiased, keepdim);
|
2884
|
+
})
|
2885
|
+
.define_singleton_method(
|
2886
|
+
"_stride_int",
|
2887
|
+
*[](const Tensor &self, int64_t dim) {
|
2888
|
+
return torch::stride(self, dim);
|
2889
|
+
})
|
2890
|
+
.define_singleton_method(
|
2891
|
+
"_sub_out",
|
2892
|
+
*[](const Tensor &self, const Tensor &other, Scalar alpha, Tensor &out) {
|
2893
|
+
return torch::sub_out(out, self, other, alpha);
|
2894
|
+
})
|
2895
|
+
.define_singleton_method(
|
2896
|
+
"_sub_scalar",
|
2897
|
+
*[](const Tensor &self, Scalar other, Scalar alpha) {
|
2898
|
+
return torch::sub(self, other, alpha);
|
2899
|
+
})
|
2900
|
+
.define_singleton_method(
|
2901
|
+
"_sub_tensor",
|
2902
|
+
*[](const Tensor &self, const Tensor &other, Scalar alpha) {
|
2903
|
+
return torch::sub(self, other, alpha);
|
2904
|
+
})
|
2905
|
+
.define_singleton_method(
|
2906
|
+
"_sum",
|
2907
|
+
*[](const Tensor &self, OptionalScalarType dtype) {
|
2908
|
+
return torch::sum(self, dtype);
|
2909
|
+
})
|
2910
|
+
.define_singleton_method(
|
2911
|
+
"_sum_dim_intlist",
|
2912
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype) {
|
2913
|
+
return torch::sum(self, dim, keepdim, dtype);
|
2914
|
+
})
|
2915
|
+
.define_singleton_method(
|
2916
|
+
"_sum_intlist_out",
|
2917
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim, OptionalScalarType dtype, Tensor &out) {
|
2918
|
+
return torch::sum_out(out, self, dim, keepdim, dtype);
|
2919
|
+
})
|
2920
|
+
.define_singleton_method(
|
2921
|
+
"_svd",
|
2922
|
+
*[](const Tensor &self, bool some, bool compute_uv) {
|
2923
|
+
return wrap(torch::svd(self, some, compute_uv));
|
2924
|
+
})
|
2925
|
+
.define_singleton_method(
|
2926
|
+
"_svd_u",
|
2927
|
+
*[](const Tensor &self, bool some, bool compute_uv, Tensor &U, Tensor &S, Tensor &V) {
|
2928
|
+
return wrap(torch::svd_out(U, S, V, self, some, compute_uv));
|
2929
|
+
})
|
2930
|
+
.define_singleton_method(
|
2931
|
+
"_symeig",
|
2932
|
+
*[](const Tensor &self, bool eigenvectors, bool upper) {
|
2933
|
+
return wrap(torch::symeig(self, eigenvectors, upper));
|
2934
|
+
})
|
2935
|
+
.define_singleton_method(
|
2936
|
+
"_symeig_e",
|
2937
|
+
*[](const Tensor &self, bool eigenvectors, bool upper, Tensor &e, Tensor &V) {
|
2938
|
+
return wrap(torch::symeig_out(e, V, self, eigenvectors, upper));
|
2939
|
+
})
|
2940
|
+
.define_singleton_method(
|
2941
|
+
"_t",
|
2942
|
+
*[](Tensor &self) {
|
2943
|
+
return torch::t(self);
|
2944
|
+
})
|
2945
|
+
.define_singleton_method(
|
2946
|
+
"_take",
|
2947
|
+
*[](const Tensor &self, const Tensor &index) {
|
2948
|
+
return torch::take(self, index);
|
2949
|
+
})
|
2950
|
+
.define_singleton_method(
|
2951
|
+
"_take_out",
|
2952
|
+
*[](const Tensor &self, const Tensor &index, Tensor &out) {
|
2953
|
+
return torch::take_out(out, self, index);
|
2954
|
+
})
|
2955
|
+
.define_singleton_method(
|
2956
|
+
"_tan",
|
2957
|
+
*[](const Tensor &self) {
|
2958
|
+
return torch::tan(self);
|
2959
|
+
})
|
2960
|
+
.define_singleton_method(
|
2961
|
+
"_tan_",
|
2962
|
+
*[](Tensor &self) {
|
2963
|
+
return torch::tan_(self);
|
2964
|
+
})
|
2965
|
+
.define_singleton_method(
|
2966
|
+
"_tan_out",
|
2967
|
+
*[](const Tensor &self, Tensor &out) {
|
2968
|
+
return torch::tan_out(out, self);
|
2969
|
+
})
|
2970
|
+
.define_singleton_method(
|
2971
|
+
"_tanh",
|
2972
|
+
*[](const Tensor &self) {
|
2973
|
+
return torch::tanh(self);
|
2974
|
+
})
|
2975
|
+
.define_singleton_method(
|
2976
|
+
"_tanh_",
|
2977
|
+
*[](Tensor &self) {
|
2978
|
+
return torch::tanh_(self);
|
2979
|
+
})
|
2980
|
+
.define_singleton_method(
|
2981
|
+
"_tanh_out",
|
2982
|
+
*[](const Tensor &self, Tensor &out) {
|
2983
|
+
return torch::tanh_out(out, self);
|
2984
|
+
})
|
2985
|
+
.define_singleton_method(
|
2986
|
+
"_tensordot",
|
2987
|
+
*[](const Tensor &self, const Tensor &other, IntArrayRef dims_self, IntArrayRef dims_other) {
|
2988
|
+
return torch::tensordot(self, other, dims_self, dims_other);
|
2989
|
+
})
|
2990
|
+
.define_singleton_method(
|
2991
|
+
"_threshold",
|
2992
|
+
*[](const Tensor &self, Scalar threshold, Scalar value) {
|
2993
|
+
return torch::threshold(self, threshold, value);
|
2994
|
+
})
|
2995
|
+
.define_singleton_method(
|
2996
|
+
"_threshold_",
|
2997
|
+
*[](Tensor &self, Scalar threshold, Scalar value) {
|
2998
|
+
return torch::threshold_(self, threshold, value);
|
2999
|
+
})
|
3000
|
+
.define_singleton_method(
|
3001
|
+
"_threshold_out",
|
3002
|
+
*[](const Tensor &self, Scalar threshold, Scalar value, Tensor &out) {
|
3003
|
+
return torch::threshold_out(out, self, threshold, value);
|
3004
|
+
})
|
3005
|
+
.define_singleton_method(
|
3006
|
+
"_topk",
|
3007
|
+
*[](const Tensor &self, int64_t k, int64_t dim, bool largest, bool sorted) {
|
3008
|
+
return wrap(torch::topk(self, k, dim, largest, sorted));
|
3009
|
+
})
|
3010
|
+
.define_singleton_method(
|
3011
|
+
"_topk_values",
|
3012
|
+
*[](const Tensor &self, int64_t k, int64_t dim, bool largest, bool sorted, Tensor &values, Tensor &indices) {
|
3013
|
+
return wrap(torch::topk_out(values, indices, self, k, dim, largest, sorted));
|
3014
|
+
})
|
3015
|
+
.define_singleton_method(
|
3016
|
+
"_trace",
|
3017
|
+
*[](const Tensor &self) {
|
3018
|
+
return torch::trace(self);
|
3019
|
+
})
|
3020
|
+
.define_singleton_method(
|
3021
|
+
"_transpose_int",
|
3022
|
+
*[](Tensor &self, int64_t dim0, int64_t dim1) {
|
3023
|
+
return torch::transpose(self, dim0, dim1);
|
3024
|
+
})
|
3025
|
+
.define_singleton_method(
|
3026
|
+
"_trapz_dx",
|
3027
|
+
*[](const Tensor &y, double dx, int64_t dim) {
|
3028
|
+
return torch::trapz(y, dx, dim);
|
3029
|
+
})
|
3030
|
+
.define_singleton_method(
|
3031
|
+
"_trapz_x",
|
3032
|
+
*[](const Tensor &y, const Tensor &x, int64_t dim) {
|
3033
|
+
return torch::trapz(y, x, dim);
|
3034
|
+
})
|
3035
|
+
.define_singleton_method(
|
3036
|
+
"_triangular_solve",
|
3037
|
+
*[](const Tensor &self, const Tensor &A, bool upper, bool transpose, bool unitriangular) {
|
3038
|
+
return wrap(torch::triangular_solve(self, A, upper, transpose, unitriangular));
|
3039
|
+
})
|
3040
|
+
.define_singleton_method(
|
3041
|
+
"_triangular_solve_x",
|
3042
|
+
*[](const Tensor &self, const Tensor &A, bool upper, bool transpose, bool unitriangular, Tensor &X, Tensor &M) {
|
3043
|
+
return wrap(torch::triangular_solve_out(X, M, self, A, upper, transpose, unitriangular));
|
3044
|
+
})
|
3045
|
+
.define_singleton_method(
|
3046
|
+
"_tril",
|
3047
|
+
*[](const Tensor &self, int64_t diagonal) {
|
3048
|
+
return torch::tril(self, diagonal);
|
3049
|
+
})
|
3050
|
+
.define_singleton_method(
|
3051
|
+
"_tril_out",
|
3052
|
+
*[](const Tensor &self, int64_t diagonal, Tensor &out) {
|
3053
|
+
return torch::tril_out(out, self, diagonal);
|
3054
|
+
})
|
3055
|
+
.define_singleton_method(
|
3056
|
+
"_triplet_margin_loss",
|
3057
|
+
*[](const Tensor &anchor, const Tensor &positive, const Tensor &negative, double margin, double p, double eps, bool swap, MyReduction reduction) {
|
3058
|
+
return torch::triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction);
|
3059
|
+
})
|
3060
|
+
.define_singleton_method(
|
3061
|
+
"_triu",
|
3062
|
+
*[](const Tensor &self, int64_t diagonal) {
|
3063
|
+
return torch::triu(self, diagonal);
|
3064
|
+
})
|
3065
|
+
.define_singleton_method(
|
3066
|
+
"_triu_out",
|
3067
|
+
*[](const Tensor &self, int64_t diagonal, Tensor &out) {
|
3068
|
+
return torch::triu_out(out, self, diagonal);
|
3069
|
+
})
|
3070
|
+
.define_singleton_method(
|
3071
|
+
"_true_divide_out",
|
3072
|
+
*[](const Tensor &self, const Tensor &other, Tensor &out) {
|
3073
|
+
return torch::true_divide_out(out, self, other);
|
3074
|
+
})
|
3075
|
+
.define_singleton_method(
|
3076
|
+
"_true_divide_scalar",
|
3077
|
+
*[](const Tensor &self, Scalar other) {
|
3078
|
+
return torch::true_divide(self, other);
|
3079
|
+
})
|
3080
|
+
.define_singleton_method(
|
3081
|
+
"_true_divide_tensor",
|
3082
|
+
*[](const Tensor &self, const Tensor &other) {
|
3083
|
+
return torch::true_divide(self, other);
|
3084
|
+
})
|
3085
|
+
.define_singleton_method(
|
3086
|
+
"_trunc",
|
3087
|
+
*[](const Tensor &self) {
|
3088
|
+
return torch::trunc(self);
|
3089
|
+
})
|
3090
|
+
.define_singleton_method(
|
3091
|
+
"_trunc_",
|
3092
|
+
*[](Tensor &self) {
|
3093
|
+
return torch::trunc_(self);
|
3094
|
+
})
|
3095
|
+
.define_singleton_method(
|
3096
|
+
"_trunc_out",
|
3097
|
+
*[](const Tensor &self, Tensor &out) {
|
3098
|
+
return torch::trunc_out(out, self);
|
3099
|
+
})
|
3100
|
+
.define_singleton_method(
|
3101
|
+
"_unbind_int",
|
3102
|
+
*[](Tensor &self, int64_t dim) {
|
3103
|
+
return torch::unbind(self, dim);
|
3104
|
+
})
|
3105
|
+
.define_singleton_method(
|
3106
|
+
"_unique_consecutive",
|
3107
|
+
*[](const Tensor &self, bool return_inverse, bool return_counts) {
|
3108
|
+
return wrap(torch::unique_consecutive(self, return_inverse, return_counts));
|
3109
|
+
})
|
3110
|
+
.define_singleton_method(
|
3111
|
+
"_unique_consecutive_dim",
|
3112
|
+
*[](const Tensor &self, bool return_inverse, bool return_counts, int64_t dim) {
|
3113
|
+
return wrap(torch::unique_consecutive(self, return_inverse, return_counts, dim));
|
3114
|
+
})
|
3115
|
+
.define_singleton_method(
|
3116
|
+
"_unique_dim",
|
3117
|
+
*[](const Tensor &self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
|
3118
|
+
return wrap(torch::unique_dim(self, dim, sorted, return_inverse, return_counts));
|
3119
|
+
})
|
3120
|
+
.define_singleton_method(
|
3121
|
+
"_unique_dim_consecutive",
|
3122
|
+
*[](const Tensor &self, int64_t dim, bool return_inverse, bool return_counts) {
|
3123
|
+
return wrap(torch::unique_dim_consecutive(self, dim, return_inverse, return_counts));
|
3124
|
+
})
|
3125
|
+
.define_singleton_method(
|
3126
|
+
"_unsqueeze",
|
3127
|
+
*[](Tensor &self, int64_t dim) {
|
3128
|
+
return torch::unsqueeze(self, dim);
|
3129
|
+
})
|
3130
|
+
.define_singleton_method(
|
3131
|
+
"_var",
|
3132
|
+
*[](const Tensor &self, bool unbiased) {
|
3133
|
+
return torch::var(self, unbiased);
|
3134
|
+
})
|
3135
|
+
.define_singleton_method(
|
3136
|
+
"_var_dim",
|
3137
|
+
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
|
3138
|
+
return torch::var(self, dim, unbiased, keepdim);
|
3139
|
+
})
|
3140
|
+
.define_singleton_method(
|
3141
|
+
"_var_mean",
|
3142
|
+
*[](const Tensor &self, bool unbiased) {
|
3143
|
+
return wrap(torch::var_mean(self, unbiased));
|
3144
|
+
})
|
3145
|
+
.define_singleton_method(
|
3146
|
+
"_var_mean_dim",
|
3147
|
+
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
|
3148
|
+
return wrap(torch::var_mean(self, dim, unbiased, keepdim));
|
3149
|
+
})
|
3150
|
+
.define_singleton_method(
|
3151
|
+
"_var_out",
|
3152
|
+
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim, Tensor &out) {
|
3153
|
+
return torch::var_out(out, self, dim, unbiased, keepdim);
|
3154
|
+
})
|
3155
|
+
.define_singleton_method(
|
3156
|
+
"_where",
|
3157
|
+
*[](const Tensor &condition) {
|
3158
|
+
return torch::where(condition);
|
3159
|
+
})
|
3160
|
+
.define_singleton_method(
|
3161
|
+
"_where_self",
|
3162
|
+
*[](const Tensor &condition, const Tensor &self, const Tensor &other) {
|
3163
|
+
return torch::where(condition, self, other);
|
3164
|
+
})
|
3165
|
+
.define_singleton_method(
|
3166
|
+
"_zero_",
|
3167
|
+
*[](Tensor &self) {
|
3168
|
+
return torch::zero_(self);
|
3169
|
+
})
|
3170
|
+
.define_singleton_method(
|
3171
|
+
"_zeros_out",
|
3172
|
+
*[](IntArrayRef size, Tensor &out) {
|
3173
|
+
return torch::zeros_out(out, size);
|
3174
|
+
});
|
3175
|
+
}
|