torch-rb 0.1.4 → 0.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/README.md +5 -3
- data/ext/torch/ext.cpp +22 -548
- data/ext/torch/extconf.rb +6 -0
- data/ext/torch/nn_functions.cpp +595 -0
- data/ext/torch/nn_functions.hpp +6 -0
- data/ext/torch/templates.hpp +250 -0
- data/ext/torch/tensor_functions.cpp +1860 -0
- data/ext/torch/tensor_functions.hpp +6 -0
- data/ext/torch/torch_functions.cpp +2875 -0
- data/ext/torch/torch_functions.hpp +6 -0
- data/lib/torch.rb +68 -129
- data/lib/torch/ext.bundle +0 -0
- data/lib/torch/native/dispatcher.rb +48 -0
- data/lib/torch/native/function.rb +78 -0
- data/lib/torch/native/generator.rb +149 -0
- data/lib/torch/native/native_functions.yaml +6837 -0
- data/lib/torch/native/parser.rb +97 -0
- data/lib/torch/nn/bce_with_logits_loss.rb +15 -0
- data/lib/torch/nn/conv2d.rb +0 -2
- data/lib/torch/nn/cosine_embedding_loss.rb +14 -0
- data/lib/torch/nn/functional.rb +55 -16
- data/lib/torch/nn/hinge_embedding_loss.rb +14 -0
- data/lib/torch/nn/identity.rb +1 -0
- data/lib/torch/nn/margin_ranking_loss.rb +14 -0
- data/lib/torch/nn/module.rb +59 -12
- data/lib/torch/nn/multi_label_margin_loss.rb +13 -0
- data/lib/torch/nn/multi_label_soft_margin_loss.rb +13 -0
- data/lib/torch/nn/multi_margin_loss.rb +17 -0
- data/lib/torch/nn/parameter.rb +4 -0
- data/lib/torch/nn/rnn.rb +22 -0
- data/lib/torch/nn/rnn_base.rb +154 -0
- data/lib/torch/nn/smooth_l1_loss.rb +13 -0
- data/lib/torch/nn/soft_margin_loss.rb +13 -0
- data/lib/torch/nn/triplet_margin_loss.rb +18 -0
- data/lib/torch/tensor.rb +19 -19
- data/lib/torch/version.rb +1 -1
- metadata +26 -2
@@ -0,0 +1,250 @@
|
|
1
|
+
#pragma once
|
2
|
+
|
3
|
+
#include <rice/Array.hpp>
|
4
|
+
#include <rice/Object.hpp>
|
5
|
+
|
6
|
+
using namespace Rice;
|
7
|
+
|
8
|
+
template<>
|
9
|
+
inline
|
10
|
+
long long from_ruby<long long>(Object x)
|
11
|
+
{
|
12
|
+
return NUM2LL(x);
|
13
|
+
}
|
14
|
+
|
15
|
+
template<>
|
16
|
+
inline
|
17
|
+
Object to_ruby<long long>(long long const & x)
|
18
|
+
{
|
19
|
+
return LL2NUM(x);
|
20
|
+
}
|
21
|
+
|
22
|
+
template<>
|
23
|
+
inline
|
24
|
+
unsigned long long from_ruby<unsigned long long>(Object x)
|
25
|
+
{
|
26
|
+
return NUM2ULL(x);
|
27
|
+
}
|
28
|
+
|
29
|
+
template<>
|
30
|
+
inline
|
31
|
+
Object to_ruby<unsigned long long>(unsigned long long const & x)
|
32
|
+
{
|
33
|
+
return ULL2NUM(x);
|
34
|
+
}
|
35
|
+
|
36
|
+
template<>
|
37
|
+
inline
|
38
|
+
short from_ruby<short>(Object x)
|
39
|
+
{
|
40
|
+
return NUM2SHORT(x);
|
41
|
+
}
|
42
|
+
|
43
|
+
template<>
|
44
|
+
inline
|
45
|
+
Object to_ruby<short>(short const & x)
|
46
|
+
{
|
47
|
+
return INT2NUM(x);
|
48
|
+
}
|
49
|
+
|
50
|
+
template<>
|
51
|
+
inline
|
52
|
+
unsigned short from_ruby<unsigned short>(Object x)
|
53
|
+
{
|
54
|
+
return NUM2USHORT(x);
|
55
|
+
}
|
56
|
+
|
57
|
+
template<>
|
58
|
+
inline
|
59
|
+
Object to_ruby<unsigned short>(unsigned short const & x)
|
60
|
+
{
|
61
|
+
return UINT2NUM(x);
|
62
|
+
}
|
63
|
+
|
64
|
+
// need to wrap torch::IntArrayRef() since
|
65
|
+
// it doesn't own underlying data
|
66
|
+
class IntArrayRef {
|
67
|
+
std::vector<int64_t> vec;
|
68
|
+
public:
|
69
|
+
IntArrayRef(Object o) {
|
70
|
+
Array a = Array(o);
|
71
|
+
for (size_t i = 0; i < a.size(); i++) {
|
72
|
+
vec.push_back(from_ruby<int64_t>(a[i]));
|
73
|
+
}
|
74
|
+
}
|
75
|
+
operator torch::IntArrayRef() {
|
76
|
+
return torch::IntArrayRef(vec);
|
77
|
+
}
|
78
|
+
};
|
79
|
+
|
80
|
+
template<>
|
81
|
+
inline
|
82
|
+
IntArrayRef from_ruby<IntArrayRef>(Object x)
|
83
|
+
{
|
84
|
+
return IntArrayRef(x);
|
85
|
+
}
|
86
|
+
|
87
|
+
// for now
|
88
|
+
class Scalar {
|
89
|
+
torch::Scalar value;
|
90
|
+
public:
|
91
|
+
Scalar(Object o) {
|
92
|
+
// TODO cast based on Ruby type
|
93
|
+
if (o.rb_type() == T_FIXNUM) {
|
94
|
+
value = torch::Scalar(from_ruby<int64_t>(o));
|
95
|
+
} else {
|
96
|
+
value = torch::Scalar(from_ruby<float>(o));
|
97
|
+
}
|
98
|
+
}
|
99
|
+
operator torch::Scalar() {
|
100
|
+
return value;
|
101
|
+
}
|
102
|
+
};
|
103
|
+
|
104
|
+
template<>
|
105
|
+
inline
|
106
|
+
Scalar from_ruby<Scalar>(Object x)
|
107
|
+
{
|
108
|
+
return Scalar(x);
|
109
|
+
}
|
110
|
+
|
111
|
+
class TensorList {
|
112
|
+
std::vector<torch::Tensor> vec;
|
113
|
+
public:
|
114
|
+
TensorList(Object o) {
|
115
|
+
Array a = Array(o);
|
116
|
+
for (size_t i = 0; i < a.size(); i++) {
|
117
|
+
vec.push_back(from_ruby<torch::Tensor>(a[i]));
|
118
|
+
}
|
119
|
+
}
|
120
|
+
operator torch::TensorList() {
|
121
|
+
return torch::TensorList(vec);
|
122
|
+
}
|
123
|
+
};
|
124
|
+
|
125
|
+
template<>
|
126
|
+
inline
|
127
|
+
TensorList from_ruby<TensorList>(Object x)
|
128
|
+
{
|
129
|
+
return TensorList(x);
|
130
|
+
}
|
131
|
+
|
132
|
+
class FanModeType {
|
133
|
+
std::string s;
|
134
|
+
public:
|
135
|
+
FanModeType(Object o) {
|
136
|
+
s = String(o).str();
|
137
|
+
}
|
138
|
+
// TODO switch NonlinearityType after LibTorch 1.4 release
|
139
|
+
operator torch::nn::init::FanMode() {
|
140
|
+
if (s == "fan_in") {
|
141
|
+
return torch::nn::init::FanMode::FanIn;
|
142
|
+
} else if (s == "fan_out") {
|
143
|
+
return torch::nn::init::FanMode::FanOut;
|
144
|
+
} else {
|
145
|
+
throw std::runtime_error("Unsupported nonlinearity type: " + s);
|
146
|
+
}
|
147
|
+
}
|
148
|
+
};
|
149
|
+
|
150
|
+
template<>
|
151
|
+
inline
|
152
|
+
FanModeType from_ruby<FanModeType>(Object x)
|
153
|
+
{
|
154
|
+
return FanModeType(x);
|
155
|
+
}
|
156
|
+
|
157
|
+
class NonlinearityType {
|
158
|
+
std::string s;
|
159
|
+
public:
|
160
|
+
NonlinearityType(Object o) {
|
161
|
+
s = String(o).str();
|
162
|
+
}
|
163
|
+
// TODO switch NonlinearityType after LibTorch 1.4 release
|
164
|
+
operator torch::nn::init::Nonlinearity() {
|
165
|
+
if (s == "linear") {
|
166
|
+
return torch::nn::init::Nonlinearity::Linear;
|
167
|
+
} else if (s == "conv1d") {
|
168
|
+
return torch::nn::init::Nonlinearity::Conv1D;
|
169
|
+
} else if (s == "conv2d") {
|
170
|
+
return torch::nn::init::Nonlinearity::Conv2D;
|
171
|
+
} else if (s == "conv3d") {
|
172
|
+
return torch::nn::init::Nonlinearity::Conv3D;
|
173
|
+
} else if (s == "conv_transpose1d") {
|
174
|
+
return torch::nn::init::Nonlinearity::ConvTranspose1D;
|
175
|
+
} else if (s == "conv_transpose2d") {
|
176
|
+
return torch::nn::init::Nonlinearity::ConvTranspose2D;
|
177
|
+
} else if (s == "conv_transpose3d") {
|
178
|
+
return torch::nn::init::Nonlinearity::ConvTranspose3D;
|
179
|
+
} else if (s == "sigmoid") {
|
180
|
+
return torch::nn::init::Nonlinearity::Sigmoid;
|
181
|
+
} else if (s == "tanh") {
|
182
|
+
return torch::nn::init::Nonlinearity::Tanh;
|
183
|
+
} else if (s == "relu") {
|
184
|
+
return torch::nn::init::Nonlinearity::ReLU;
|
185
|
+
} else if (s == "leaky_relu") {
|
186
|
+
return torch::nn::init::Nonlinearity::LeakyReLU;
|
187
|
+
} else {
|
188
|
+
throw std::runtime_error("Unsupported nonlinearity type: " + s);
|
189
|
+
}
|
190
|
+
}
|
191
|
+
};
|
192
|
+
|
193
|
+
template<>
|
194
|
+
inline
|
195
|
+
NonlinearityType from_ruby<NonlinearityType>(Object x)
|
196
|
+
{
|
197
|
+
return NonlinearityType(x);
|
198
|
+
}
|
199
|
+
|
200
|
+
class MyReduction {
|
201
|
+
Object value;
|
202
|
+
public:
|
203
|
+
MyReduction(Object o) {
|
204
|
+
value = o;
|
205
|
+
}
|
206
|
+
operator int64_t() {
|
207
|
+
if (value.is_nil()) {
|
208
|
+
return Reduction::None;
|
209
|
+
}
|
210
|
+
|
211
|
+
std::string s = String(value).str();
|
212
|
+
if (s == "mean") {
|
213
|
+
return Reduction::Mean;
|
214
|
+
} else if (s == "sum") {
|
215
|
+
return Reduction::Sum;
|
216
|
+
} else {
|
217
|
+
throw std::runtime_error("Unsupported reduction: " + s);
|
218
|
+
}
|
219
|
+
}
|
220
|
+
};
|
221
|
+
|
222
|
+
template<>
|
223
|
+
inline
|
224
|
+
MyReduction from_ruby<MyReduction>(Object x)
|
225
|
+
{
|
226
|
+
return MyReduction(x);
|
227
|
+
}
|
228
|
+
|
229
|
+
typedef torch::Tensor Tensor;
|
230
|
+
|
231
|
+
class OptionalTensor {
|
232
|
+
Object value;
|
233
|
+
public:
|
234
|
+
OptionalTensor(Object o) {
|
235
|
+
value = o;
|
236
|
+
}
|
237
|
+
operator torch::Tensor() {
|
238
|
+
if (value.is_nil()) {
|
239
|
+
return {};
|
240
|
+
}
|
241
|
+
return from_ruby<torch::Tensor>(value);
|
242
|
+
}
|
243
|
+
};
|
244
|
+
|
245
|
+
template<>
|
246
|
+
inline
|
247
|
+
OptionalTensor from_ruby<OptionalTensor>(Object x)
|
248
|
+
{
|
249
|
+
return OptionalTensor(x);
|
250
|
+
}
|
@@ -0,0 +1,1860 @@
|
|
1
|
+
// generated by rake generate:functions
|
2
|
+
// do not edit by hand
|
3
|
+
|
4
|
+
#include <torch/torch.h>
|
5
|
+
#include <rice/Module.hpp>
|
6
|
+
#include "templates.hpp"
|
7
|
+
|
8
|
+
void add_tensor_functions(Module m) {
|
9
|
+
m
|
10
|
+
.define_method(
|
11
|
+
"_abs",
|
12
|
+
*[](const Tensor &self) {
|
13
|
+
return self.abs();
|
14
|
+
})
|
15
|
+
.define_method(
|
16
|
+
"_abs_",
|
17
|
+
*[](Tensor &self) {
|
18
|
+
return self.abs_();
|
19
|
+
})
|
20
|
+
.define_method(
|
21
|
+
"_acos",
|
22
|
+
*[](const Tensor &self) {
|
23
|
+
return self.acos();
|
24
|
+
})
|
25
|
+
.define_method(
|
26
|
+
"_acos_",
|
27
|
+
*[](Tensor &self) {
|
28
|
+
return self.acos_();
|
29
|
+
})
|
30
|
+
.define_method(
|
31
|
+
"_add__scalar",
|
32
|
+
*[](Tensor &self, Scalar other, Scalar alpha) {
|
33
|
+
return self.add_(other, alpha);
|
34
|
+
})
|
35
|
+
.define_method(
|
36
|
+
"_add__tensor",
|
37
|
+
*[](Tensor &self, const Tensor &other, Scalar alpha) {
|
38
|
+
return self.add_(other, alpha);
|
39
|
+
})
|
40
|
+
.define_method(
|
41
|
+
"_add_scalar",
|
42
|
+
*[](const Tensor &self, Scalar other, Scalar alpha) {
|
43
|
+
return self.add(other, alpha);
|
44
|
+
})
|
45
|
+
.define_method(
|
46
|
+
"_add_tensor",
|
47
|
+
*[](const Tensor &self, const Tensor &other, Scalar alpha) {
|
48
|
+
return self.add(other, alpha);
|
49
|
+
})
|
50
|
+
.define_method(
|
51
|
+
"_addbmm",
|
52
|
+
*[](const Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha) {
|
53
|
+
return self.addbmm(batch1, batch2, beta, alpha);
|
54
|
+
})
|
55
|
+
.define_method(
|
56
|
+
"_addbmm_",
|
57
|
+
*[](Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha) {
|
58
|
+
return self.addbmm_(batch1, batch2, beta, alpha);
|
59
|
+
})
|
60
|
+
.define_method(
|
61
|
+
"_addcdiv",
|
62
|
+
*[](const Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value) {
|
63
|
+
return self.addcdiv(tensor1, tensor2, value);
|
64
|
+
})
|
65
|
+
.define_method(
|
66
|
+
"_addcdiv_",
|
67
|
+
*[](Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value) {
|
68
|
+
return self.addcdiv_(tensor1, tensor2, value);
|
69
|
+
})
|
70
|
+
.define_method(
|
71
|
+
"_addcmul",
|
72
|
+
*[](const Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value) {
|
73
|
+
return self.addcmul(tensor1, tensor2, value);
|
74
|
+
})
|
75
|
+
.define_method(
|
76
|
+
"_addcmul_",
|
77
|
+
*[](Tensor &self, const Tensor &tensor1, const Tensor &tensor2, Scalar value) {
|
78
|
+
return self.addcmul_(tensor1, tensor2, value);
|
79
|
+
})
|
80
|
+
.define_method(
|
81
|
+
"_addmm",
|
82
|
+
*[](const Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha) {
|
83
|
+
return self.addmm(mat1, mat2, beta, alpha);
|
84
|
+
})
|
85
|
+
.define_method(
|
86
|
+
"_addmm_",
|
87
|
+
*[](Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha) {
|
88
|
+
return self.addmm_(mat1, mat2, beta, alpha);
|
89
|
+
})
|
90
|
+
.define_method(
|
91
|
+
"_addmv",
|
92
|
+
*[](const Tensor &self, const Tensor &mat, const Tensor &vec, Scalar beta, Scalar alpha) {
|
93
|
+
return self.addmv(mat, vec, beta, alpha);
|
94
|
+
})
|
95
|
+
.define_method(
|
96
|
+
"_addmv_",
|
97
|
+
*[](Tensor &self, const Tensor &mat, const Tensor &vec, Scalar beta, Scalar alpha) {
|
98
|
+
return self.addmv_(mat, vec, beta, alpha);
|
99
|
+
})
|
100
|
+
.define_method(
|
101
|
+
"_addr",
|
102
|
+
*[](const Tensor &self, const Tensor &vec1, const Tensor &vec2, Scalar beta, Scalar alpha) {
|
103
|
+
return self.addr(vec1, vec2, beta, alpha);
|
104
|
+
})
|
105
|
+
.define_method(
|
106
|
+
"_addr_",
|
107
|
+
*[](Tensor &self, const Tensor &vec1, const Tensor &vec2, Scalar beta, Scalar alpha) {
|
108
|
+
return self.addr_(vec1, vec2, beta, alpha);
|
109
|
+
})
|
110
|
+
.define_method(
|
111
|
+
"_alias",
|
112
|
+
*[](Tensor &self) {
|
113
|
+
return self.alias();
|
114
|
+
})
|
115
|
+
.define_method(
|
116
|
+
"_align_as",
|
117
|
+
*[](const Tensor &self, const Tensor &other) {
|
118
|
+
return self.align_as(other);
|
119
|
+
})
|
120
|
+
.define_method(
|
121
|
+
"_all",
|
122
|
+
*[](const Tensor &self) {
|
123
|
+
return self.all();
|
124
|
+
})
|
125
|
+
.define_method(
|
126
|
+
"_all_dim",
|
127
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
128
|
+
return self.all(dim, keepdim);
|
129
|
+
})
|
130
|
+
.define_method(
|
131
|
+
"_allclose",
|
132
|
+
*[](const Tensor &self, const Tensor &other, double rtol, double atol, bool equal_nan) {
|
133
|
+
return self.allclose(other, rtol, atol, equal_nan);
|
134
|
+
})
|
135
|
+
.define_method(
|
136
|
+
"_any",
|
137
|
+
*[](const Tensor &self) {
|
138
|
+
return self.any();
|
139
|
+
})
|
140
|
+
.define_method(
|
141
|
+
"_any_dim",
|
142
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
143
|
+
return self.any(dim, keepdim);
|
144
|
+
})
|
145
|
+
.define_method(
|
146
|
+
"_argmax",
|
147
|
+
*[](const Tensor &self) {
|
148
|
+
return self.argmax();
|
149
|
+
})
|
150
|
+
.define_method(
|
151
|
+
"_argmax_dim",
|
152
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
153
|
+
return self.argmax(dim, keepdim);
|
154
|
+
})
|
155
|
+
.define_method(
|
156
|
+
"_argmin",
|
157
|
+
*[](const Tensor &self) {
|
158
|
+
return self.argmin();
|
159
|
+
})
|
160
|
+
.define_method(
|
161
|
+
"_argmin_dim",
|
162
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
163
|
+
return self.argmin(dim, keepdim);
|
164
|
+
})
|
165
|
+
.define_method(
|
166
|
+
"_argsort",
|
167
|
+
*[](const Tensor &self, int64_t dim, bool descending) {
|
168
|
+
return self.argsort(dim, descending);
|
169
|
+
})
|
170
|
+
.define_method(
|
171
|
+
"_as_strided",
|
172
|
+
*[](Tensor &self, IntArrayRef size, IntArrayRef stride) {
|
173
|
+
return self.as_strided(size, stride);
|
174
|
+
})
|
175
|
+
.define_method(
|
176
|
+
"_as_strided_",
|
177
|
+
*[](Tensor &self, IntArrayRef size, IntArrayRef stride) {
|
178
|
+
return self.as_strided_(size, stride);
|
179
|
+
})
|
180
|
+
.define_method(
|
181
|
+
"_as_strided__storage_offset",
|
182
|
+
*[](Tensor &self, IntArrayRef size, IntArrayRef stride, int64_t storage_offset) {
|
183
|
+
return self.as_strided_(size, stride, storage_offset);
|
184
|
+
})
|
185
|
+
.define_method(
|
186
|
+
"_as_strided_storage_offset",
|
187
|
+
*[](Tensor &self, IntArrayRef size, IntArrayRef stride, int64_t storage_offset) {
|
188
|
+
return self.as_strided(size, stride, storage_offset);
|
189
|
+
})
|
190
|
+
.define_method(
|
191
|
+
"_asin",
|
192
|
+
*[](const Tensor &self) {
|
193
|
+
return self.asin();
|
194
|
+
})
|
195
|
+
.define_method(
|
196
|
+
"_asin_",
|
197
|
+
*[](Tensor &self) {
|
198
|
+
return self.asin_();
|
199
|
+
})
|
200
|
+
.define_method(
|
201
|
+
"_atan",
|
202
|
+
*[](const Tensor &self) {
|
203
|
+
return self.atan();
|
204
|
+
})
|
205
|
+
.define_method(
|
206
|
+
"_atan2",
|
207
|
+
*[](const Tensor &self, const Tensor &other) {
|
208
|
+
return self.atan2(other);
|
209
|
+
})
|
210
|
+
.define_method(
|
211
|
+
"_atan2_",
|
212
|
+
*[](Tensor &self, const Tensor &other) {
|
213
|
+
return self.atan2_(other);
|
214
|
+
})
|
215
|
+
.define_method(
|
216
|
+
"_atan_",
|
217
|
+
*[](Tensor &self) {
|
218
|
+
return self.atan_();
|
219
|
+
})
|
220
|
+
.define_method(
|
221
|
+
"_backward",
|
222
|
+
*[](const Tensor &self, OptionalTensor gradient, bool keep_graph, bool create_graph) {
|
223
|
+
return self.backward(gradient, keep_graph, create_graph);
|
224
|
+
})
|
225
|
+
.define_method(
|
226
|
+
"_baddbmm",
|
227
|
+
*[](const Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha) {
|
228
|
+
return self.baddbmm(batch1, batch2, beta, alpha);
|
229
|
+
})
|
230
|
+
.define_method(
|
231
|
+
"_baddbmm_",
|
232
|
+
*[](Tensor &self, const Tensor &batch1, const Tensor &batch2, Scalar beta, Scalar alpha) {
|
233
|
+
return self.baddbmm_(batch1, batch2, beta, alpha);
|
234
|
+
})
|
235
|
+
.define_method(
|
236
|
+
"_bernoulli",
|
237
|
+
*[](const Tensor &self) {
|
238
|
+
return self.bernoulli();
|
239
|
+
})
|
240
|
+
.define_method(
|
241
|
+
"_bernoulli__float",
|
242
|
+
*[](Tensor &self, double p) {
|
243
|
+
return self.bernoulli_(p);
|
244
|
+
})
|
245
|
+
.define_method(
|
246
|
+
"_bernoulli__tensor",
|
247
|
+
*[](Tensor &self, const Tensor &p) {
|
248
|
+
return self.bernoulli_(p);
|
249
|
+
})
|
250
|
+
.define_method(
|
251
|
+
"_bernoulli_p",
|
252
|
+
*[](const Tensor &self, double p) {
|
253
|
+
return self.bernoulli(p);
|
254
|
+
})
|
255
|
+
.define_method(
|
256
|
+
"_bincount",
|
257
|
+
*[](const Tensor &self, OptionalTensor weights, int64_t minlength) {
|
258
|
+
return self.bincount(weights, minlength);
|
259
|
+
})
|
260
|
+
.define_method(
|
261
|
+
"_bitwise_not",
|
262
|
+
*[](const Tensor &self) {
|
263
|
+
return self.bitwise_not();
|
264
|
+
})
|
265
|
+
.define_method(
|
266
|
+
"_bitwise_not_",
|
267
|
+
*[](Tensor &self) {
|
268
|
+
return self.bitwise_not_();
|
269
|
+
})
|
270
|
+
.define_method(
|
271
|
+
"_bmm",
|
272
|
+
*[](const Tensor &self, const Tensor &mat2) {
|
273
|
+
return self.bmm(mat2);
|
274
|
+
})
|
275
|
+
.define_method(
|
276
|
+
"_cauchy_",
|
277
|
+
*[](Tensor &self, double median, double sigma) {
|
278
|
+
return self.cauchy_(median, sigma);
|
279
|
+
})
|
280
|
+
.define_method(
|
281
|
+
"_ceil",
|
282
|
+
*[](const Tensor &self) {
|
283
|
+
return self.ceil();
|
284
|
+
})
|
285
|
+
.define_method(
|
286
|
+
"_ceil_",
|
287
|
+
*[](Tensor &self) {
|
288
|
+
return self.ceil_();
|
289
|
+
})
|
290
|
+
.define_method(
|
291
|
+
"_cholesky",
|
292
|
+
*[](const Tensor &self, bool upper) {
|
293
|
+
return self.cholesky(upper);
|
294
|
+
})
|
295
|
+
.define_method(
|
296
|
+
"_cholesky_inverse",
|
297
|
+
*[](const Tensor &self, bool upper) {
|
298
|
+
return self.cholesky_inverse(upper);
|
299
|
+
})
|
300
|
+
.define_method(
|
301
|
+
"_cholesky_solve",
|
302
|
+
*[](const Tensor &self, const Tensor &input2, bool upper) {
|
303
|
+
return self.cholesky_solve(input2, upper);
|
304
|
+
})
|
305
|
+
.define_method(
|
306
|
+
"_chunk",
|
307
|
+
*[](Tensor &self, int64_t chunks, int64_t dim) {
|
308
|
+
return self.chunk(chunks, dim);
|
309
|
+
})
|
310
|
+
.define_method(
|
311
|
+
"_clamp_max",
|
312
|
+
*[](const Tensor &self, Scalar max) {
|
313
|
+
return self.clamp_max(max);
|
314
|
+
})
|
315
|
+
.define_method(
|
316
|
+
"_clamp_max_",
|
317
|
+
*[](Tensor &self, Scalar max) {
|
318
|
+
return self.clamp_max_(max);
|
319
|
+
})
|
320
|
+
.define_method(
|
321
|
+
"_clamp_min",
|
322
|
+
*[](const Tensor &self, Scalar min) {
|
323
|
+
return self.clamp_min(min);
|
324
|
+
})
|
325
|
+
.define_method(
|
326
|
+
"_clamp_min_",
|
327
|
+
*[](Tensor &self, Scalar min) {
|
328
|
+
return self.clamp_min_(min);
|
329
|
+
})
|
330
|
+
.define_method(
|
331
|
+
"_clone",
|
332
|
+
*[](const Tensor &self) {
|
333
|
+
return self.clone();
|
334
|
+
})
|
335
|
+
.define_method(
|
336
|
+
"_coalesce",
|
337
|
+
*[](const Tensor &self) {
|
338
|
+
return self.coalesce();
|
339
|
+
})
|
340
|
+
.define_method(
|
341
|
+
"_copy_",
|
342
|
+
*[](Tensor &self, const Tensor &src, bool non_blocking) {
|
343
|
+
return self.copy_(src, non_blocking);
|
344
|
+
})
|
345
|
+
.define_method(
|
346
|
+
"_cos",
|
347
|
+
*[](const Tensor &self) {
|
348
|
+
return self.cos();
|
349
|
+
})
|
350
|
+
.define_method(
|
351
|
+
"_cos_",
|
352
|
+
*[](Tensor &self) {
|
353
|
+
return self.cos_();
|
354
|
+
})
|
355
|
+
.define_method(
|
356
|
+
"_cosh",
|
357
|
+
*[](const Tensor &self) {
|
358
|
+
return self.cosh();
|
359
|
+
})
|
360
|
+
.define_method(
|
361
|
+
"_cosh_",
|
362
|
+
*[](Tensor &self) {
|
363
|
+
return self.cosh_();
|
364
|
+
})
|
365
|
+
.define_method(
|
366
|
+
"_data",
|
367
|
+
*[](const Tensor &self) {
|
368
|
+
return self.data();
|
369
|
+
})
|
370
|
+
.define_method(
|
371
|
+
"_dense_dim",
|
372
|
+
*[](const Tensor &self) {
|
373
|
+
return self.dense_dim();
|
374
|
+
})
|
375
|
+
.define_method(
|
376
|
+
"_dequantize",
|
377
|
+
*[](const Tensor &self) {
|
378
|
+
return self.dequantize();
|
379
|
+
})
|
380
|
+
.define_method(
|
381
|
+
"_det",
|
382
|
+
*[](const Tensor &self) {
|
383
|
+
return self.det();
|
384
|
+
})
|
385
|
+
.define_method(
|
386
|
+
"_detach",
|
387
|
+
*[](const Tensor &self) {
|
388
|
+
return self.detach();
|
389
|
+
})
|
390
|
+
.define_method(
|
391
|
+
"_detach_",
|
392
|
+
*[](Tensor &self) {
|
393
|
+
return self.detach_();
|
394
|
+
})
|
395
|
+
.define_method(
|
396
|
+
"_diag",
|
397
|
+
*[](const Tensor &self, int64_t diagonal) {
|
398
|
+
return self.diag(diagonal);
|
399
|
+
})
|
400
|
+
.define_method(
|
401
|
+
"_diag_embed",
|
402
|
+
*[](const Tensor &self, int64_t offset, int64_t dim1, int64_t dim2) {
|
403
|
+
return self.diag_embed(offset, dim1, dim2);
|
404
|
+
})
|
405
|
+
.define_method(
|
406
|
+
"_diagflat",
|
407
|
+
*[](const Tensor &self, int64_t offset) {
|
408
|
+
return self.diagflat(offset);
|
409
|
+
})
|
410
|
+
.define_method(
|
411
|
+
"_diagonal",
|
412
|
+
*[](Tensor &self, int64_t offset, int64_t dim1, int64_t dim2) {
|
413
|
+
return self.diagonal(offset, dim1, dim2);
|
414
|
+
})
|
415
|
+
.define_method(
|
416
|
+
"_digamma",
|
417
|
+
*[](const Tensor &self) {
|
418
|
+
return self.digamma();
|
419
|
+
})
|
420
|
+
.define_method(
|
421
|
+
"_digamma_",
|
422
|
+
*[](Tensor &self) {
|
423
|
+
return self.digamma_();
|
424
|
+
})
|
425
|
+
.define_method(
|
426
|
+
"_dist",
|
427
|
+
*[](const Tensor &self, const Tensor &other, Scalar p) {
|
428
|
+
return self.dist(other, p);
|
429
|
+
})
|
430
|
+
.define_method(
|
431
|
+
"_div__scalar",
|
432
|
+
*[](Tensor &self, Scalar other) {
|
433
|
+
return self.div_(other);
|
434
|
+
})
|
435
|
+
.define_method(
|
436
|
+
"_div__tensor",
|
437
|
+
*[](Tensor &self, const Tensor &other) {
|
438
|
+
return self.div_(other);
|
439
|
+
})
|
440
|
+
.define_method(
|
441
|
+
"_div_scalar",
|
442
|
+
*[](const Tensor &self, Scalar other) {
|
443
|
+
return self.div(other);
|
444
|
+
})
|
445
|
+
.define_method(
|
446
|
+
"_div_tensor",
|
447
|
+
*[](const Tensor &self, const Tensor &other) {
|
448
|
+
return self.div(other);
|
449
|
+
})
|
450
|
+
.define_method(
|
451
|
+
"_dot",
|
452
|
+
*[](const Tensor &self, const Tensor &tensor) {
|
453
|
+
return self.dot(tensor);
|
454
|
+
})
|
455
|
+
.define_method(
|
456
|
+
"_eig",
|
457
|
+
*[](const Tensor &self, bool eigenvectors) {
|
458
|
+
return self.eig(eigenvectors);
|
459
|
+
})
|
460
|
+
.define_method(
|
461
|
+
"_eq__scalar",
|
462
|
+
*[](Tensor &self, Scalar other) {
|
463
|
+
return self.eq_(other);
|
464
|
+
})
|
465
|
+
.define_method(
|
466
|
+
"_eq__tensor",
|
467
|
+
*[](Tensor &self, const Tensor &other) {
|
468
|
+
return self.eq_(other);
|
469
|
+
})
|
470
|
+
.define_method(
|
471
|
+
"_eq_scalar",
|
472
|
+
*[](const Tensor &self, Scalar other) {
|
473
|
+
return self.eq(other);
|
474
|
+
})
|
475
|
+
.define_method(
|
476
|
+
"_eq_tensor",
|
477
|
+
*[](const Tensor &self, const Tensor &other) {
|
478
|
+
return self.eq(other);
|
479
|
+
})
|
480
|
+
.define_method(
|
481
|
+
"_equal",
|
482
|
+
*[](const Tensor &self, const Tensor &other) {
|
483
|
+
return self.equal(other);
|
484
|
+
})
|
485
|
+
.define_method(
|
486
|
+
"_erf",
|
487
|
+
*[](const Tensor &self) {
|
488
|
+
return self.erf();
|
489
|
+
})
|
490
|
+
.define_method(
|
491
|
+
"_erf_",
|
492
|
+
*[](Tensor &self) {
|
493
|
+
return self.erf_();
|
494
|
+
})
|
495
|
+
.define_method(
|
496
|
+
"_erfc",
|
497
|
+
*[](const Tensor &self) {
|
498
|
+
return self.erfc();
|
499
|
+
})
|
500
|
+
.define_method(
|
501
|
+
"_erfc_",
|
502
|
+
*[](Tensor &self) {
|
503
|
+
return self.erfc_();
|
504
|
+
})
|
505
|
+
.define_method(
|
506
|
+
"_erfinv",
|
507
|
+
*[](const Tensor &self) {
|
508
|
+
return self.erfinv();
|
509
|
+
})
|
510
|
+
.define_method(
|
511
|
+
"_erfinv_",
|
512
|
+
*[](Tensor &self) {
|
513
|
+
return self.erfinv_();
|
514
|
+
})
|
515
|
+
.define_method(
|
516
|
+
"_exp",
|
517
|
+
*[](const Tensor &self) {
|
518
|
+
return self.exp();
|
519
|
+
})
|
520
|
+
.define_method(
|
521
|
+
"_exp_",
|
522
|
+
*[](Tensor &self) {
|
523
|
+
return self.exp_();
|
524
|
+
})
|
525
|
+
.define_method(
|
526
|
+
"_expand",
|
527
|
+
*[](Tensor &self, IntArrayRef size, bool implicit) {
|
528
|
+
return self.expand(size, implicit);
|
529
|
+
})
|
530
|
+
.define_method(
|
531
|
+
"_expand_as",
|
532
|
+
*[](const Tensor &self, const Tensor &other) {
|
533
|
+
return self.expand_as(other);
|
534
|
+
})
|
535
|
+
.define_method(
|
536
|
+
"_expm1",
|
537
|
+
*[](const Tensor &self) {
|
538
|
+
return self.expm1();
|
539
|
+
})
|
540
|
+
.define_method(
|
541
|
+
"_expm1_",
|
542
|
+
*[](Tensor &self) {
|
543
|
+
return self.expm1_();
|
544
|
+
})
|
545
|
+
.define_method(
|
546
|
+
"_exponential_",
|
547
|
+
*[](Tensor &self, double lambd) {
|
548
|
+
return self.exponential_(lambd);
|
549
|
+
})
|
550
|
+
.define_method(
|
551
|
+
"_fft",
|
552
|
+
*[](const Tensor &self, int64_t signal_ndim, bool normalized) {
|
553
|
+
return self.fft(signal_ndim, normalized);
|
554
|
+
})
|
555
|
+
.define_method(
|
556
|
+
"_fill__scalar",
|
557
|
+
*[](Tensor &self, Scalar value) {
|
558
|
+
return self.fill_(value);
|
559
|
+
})
|
560
|
+
.define_method(
|
561
|
+
"_fill__tensor",
|
562
|
+
*[](Tensor &self, const Tensor &value) {
|
563
|
+
return self.fill_(value);
|
564
|
+
})
|
565
|
+
.define_method(
|
566
|
+
"_fill_diagonal_",
|
567
|
+
*[](Tensor &self, Scalar fill_value, bool wrap) {
|
568
|
+
return self.fill_diagonal_(fill_value, wrap);
|
569
|
+
})
|
570
|
+
.define_method(
|
571
|
+
"_flatten_using_ints",
|
572
|
+
*[](const Tensor &self, int64_t start_dim, int64_t end_dim) {
|
573
|
+
return self.flatten(start_dim, end_dim);
|
574
|
+
})
|
575
|
+
.define_method(
|
576
|
+
"_flip",
|
577
|
+
*[](const Tensor &self, IntArrayRef dims) {
|
578
|
+
return self.flip(dims);
|
579
|
+
})
|
580
|
+
.define_method(
|
581
|
+
"_floor",
|
582
|
+
*[](const Tensor &self) {
|
583
|
+
return self.floor();
|
584
|
+
})
|
585
|
+
.define_method(
|
586
|
+
"_floor_",
|
587
|
+
*[](Tensor &self) {
|
588
|
+
return self.floor_();
|
589
|
+
})
|
590
|
+
.define_method(
|
591
|
+
"_fmod__scalar",
|
592
|
+
*[](Tensor &self, Scalar other) {
|
593
|
+
return self.fmod_(other);
|
594
|
+
})
|
595
|
+
.define_method(
|
596
|
+
"_fmod__tensor",
|
597
|
+
*[](Tensor &self, const Tensor &other) {
|
598
|
+
return self.fmod_(other);
|
599
|
+
})
|
600
|
+
.define_method(
|
601
|
+
"_fmod_scalar",
|
602
|
+
*[](const Tensor &self, Scalar other) {
|
603
|
+
return self.fmod(other);
|
604
|
+
})
|
605
|
+
.define_method(
|
606
|
+
"_fmod_tensor",
|
607
|
+
*[](const Tensor &self, const Tensor &other) {
|
608
|
+
return self.fmod(other);
|
609
|
+
})
|
610
|
+
.define_method(
|
611
|
+
"_frac",
|
612
|
+
*[](const Tensor &self) {
|
613
|
+
return self.frac();
|
614
|
+
})
|
615
|
+
.define_method(
|
616
|
+
"_frac_",
|
617
|
+
*[](Tensor &self) {
|
618
|
+
return self.frac_();
|
619
|
+
})
|
620
|
+
.define_method(
|
621
|
+
"_gather",
|
622
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, bool sparse_grad) {
|
623
|
+
return self.gather(dim, index, sparse_grad);
|
624
|
+
})
|
625
|
+
.define_method(
|
626
|
+
"_ge__scalar",
|
627
|
+
*[](Tensor &self, Scalar other) {
|
628
|
+
return self.ge_(other);
|
629
|
+
})
|
630
|
+
.define_method(
|
631
|
+
"_ge__tensor",
|
632
|
+
*[](Tensor &self, const Tensor &other) {
|
633
|
+
return self.ge_(other);
|
634
|
+
})
|
635
|
+
.define_method(
|
636
|
+
"_ge_scalar",
|
637
|
+
*[](const Tensor &self, Scalar other) {
|
638
|
+
return self.ge(other);
|
639
|
+
})
|
640
|
+
.define_method(
|
641
|
+
"_ge_tensor",
|
642
|
+
*[](const Tensor &self, const Tensor &other) {
|
643
|
+
return self.ge(other);
|
644
|
+
})
|
645
|
+
.define_method(
|
646
|
+
"_geometric_",
|
647
|
+
*[](Tensor &self, double p) {
|
648
|
+
return self.geometric_(p);
|
649
|
+
})
|
650
|
+
.define_method(
|
651
|
+
"_geqrf",
|
652
|
+
*[](const Tensor &self) {
|
653
|
+
return self.geqrf();
|
654
|
+
})
|
655
|
+
.define_method(
|
656
|
+
"_ger",
|
657
|
+
*[](const Tensor &self, const Tensor &vec2) {
|
658
|
+
return self.ger(vec2);
|
659
|
+
})
|
660
|
+
.define_method(
|
661
|
+
"_gt__scalar",
|
662
|
+
*[](Tensor &self, Scalar other) {
|
663
|
+
return self.gt_(other);
|
664
|
+
})
|
665
|
+
.define_method(
|
666
|
+
"_gt__tensor",
|
667
|
+
*[](Tensor &self, const Tensor &other) {
|
668
|
+
return self.gt_(other);
|
669
|
+
})
|
670
|
+
.define_method(
|
671
|
+
"_gt_scalar",
|
672
|
+
*[](const Tensor &self, Scalar other) {
|
673
|
+
return self.gt(other);
|
674
|
+
})
|
675
|
+
.define_method(
|
676
|
+
"_gt_tensor",
|
677
|
+
*[](const Tensor &self, const Tensor &other) {
|
678
|
+
return self.gt(other);
|
679
|
+
})
|
680
|
+
.define_method(
|
681
|
+
"_hardshrink",
|
682
|
+
*[](const Tensor &self, Scalar lambd) {
|
683
|
+
return self.hardshrink(lambd);
|
684
|
+
})
|
685
|
+
.define_method(
|
686
|
+
"_histc",
|
687
|
+
*[](const Tensor &self, int64_t bins, Scalar min, Scalar max) {
|
688
|
+
return self.histc(bins, min, max);
|
689
|
+
})
|
690
|
+
.define_method(
|
691
|
+
"_ifft",
|
692
|
+
*[](const Tensor &self, int64_t signal_ndim, bool normalized) {
|
693
|
+
return self.ifft(signal_ndim, normalized);
|
694
|
+
})
|
695
|
+
.define_method(
|
696
|
+
"_index_add",
|
697
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &source) {
|
698
|
+
return self.index_add(dim, index, source);
|
699
|
+
})
|
700
|
+
.define_method(
|
701
|
+
"_index_add_",
|
702
|
+
*[](Tensor &self, int64_t dim, const Tensor &index, const Tensor &source) {
|
703
|
+
return self.index_add_(dim, index, source);
|
704
|
+
})
|
705
|
+
.define_method(
|
706
|
+
"_index_copy",
|
707
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &source) {
|
708
|
+
return self.index_copy(dim, index, source);
|
709
|
+
})
|
710
|
+
.define_method(
|
711
|
+
"_index_copy_",
|
712
|
+
*[](Tensor &self, int64_t dim, const Tensor &index, const Tensor &source) {
|
713
|
+
return self.index_copy_(dim, index, source);
|
714
|
+
})
|
715
|
+
.define_method(
|
716
|
+
"_index_fill__scalar",
|
717
|
+
*[](Tensor &self, int64_t dim, const Tensor &index, Scalar value) {
|
718
|
+
return self.index_fill_(dim, index, value);
|
719
|
+
})
|
720
|
+
.define_method(
|
721
|
+
"_index_fill__tensor",
|
722
|
+
*[](Tensor &self, int64_t dim, const Tensor &index, const Tensor &value) {
|
723
|
+
return self.index_fill_(dim, index, value);
|
724
|
+
})
|
725
|
+
.define_method(
|
726
|
+
"_index_fill_scalar",
|
727
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, Scalar value) {
|
728
|
+
return self.index_fill(dim, index, value);
|
729
|
+
})
|
730
|
+
.define_method(
|
731
|
+
"_index_fill_tensor",
|
732
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &value) {
|
733
|
+
return self.index_fill(dim, index, value);
|
734
|
+
})
|
735
|
+
.define_method(
|
736
|
+
"_index_select",
|
737
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index) {
|
738
|
+
return self.index_select(dim, index);
|
739
|
+
})
|
740
|
+
.define_method(
|
741
|
+
"_indices",
|
742
|
+
*[](Tensor &self) {
|
743
|
+
return self.indices();
|
744
|
+
})
|
745
|
+
.define_method(
|
746
|
+
"_int_repr",
|
747
|
+
*[](const Tensor &self) {
|
748
|
+
return self.int_repr();
|
749
|
+
})
|
750
|
+
.define_method(
|
751
|
+
"_inverse",
|
752
|
+
*[](const Tensor &self) {
|
753
|
+
return self.inverse();
|
754
|
+
})
|
755
|
+
.define_method(
|
756
|
+
"_irfft",
|
757
|
+
*[](const Tensor &self, int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes) {
|
758
|
+
return self.irfft(signal_ndim, normalized, onesided, signal_sizes);
|
759
|
+
})
|
760
|
+
.define_method(
|
761
|
+
"_is_coalesced",
|
762
|
+
*[](const Tensor &self) {
|
763
|
+
return self.is_coalesced();
|
764
|
+
})
|
765
|
+
.define_method(
|
766
|
+
"_is_complex",
|
767
|
+
*[](const Tensor &self) {
|
768
|
+
return self.is_complex();
|
769
|
+
})
|
770
|
+
.define_method(
|
771
|
+
"_is_distributed",
|
772
|
+
*[](const Tensor &self) {
|
773
|
+
return self.is_distributed();
|
774
|
+
})
|
775
|
+
.define_method(
|
776
|
+
"_is_floating_point",
|
777
|
+
*[](const Tensor &self) {
|
778
|
+
return self.is_floating_point();
|
779
|
+
})
|
780
|
+
.define_method(
|
781
|
+
"_is_leaf",
|
782
|
+
*[](const Tensor &self) {
|
783
|
+
return self.is_leaf();
|
784
|
+
})
|
785
|
+
.define_method(
|
786
|
+
"_is_nonzero",
|
787
|
+
*[](const Tensor &self) {
|
788
|
+
return self.is_nonzero();
|
789
|
+
})
|
790
|
+
.define_method(
|
791
|
+
"_is_pinned",
|
792
|
+
*[](const Tensor &self) {
|
793
|
+
return self.is_pinned();
|
794
|
+
})
|
795
|
+
.define_method(
|
796
|
+
"_is_same_size",
|
797
|
+
*[](const Tensor &self, const Tensor &other) {
|
798
|
+
return self.is_same_size(other);
|
799
|
+
})
|
800
|
+
.define_method(
|
801
|
+
"_is_set_to",
|
802
|
+
*[](const Tensor &self, const Tensor &tensor) {
|
803
|
+
return self.is_set_to(tensor);
|
804
|
+
})
|
805
|
+
.define_method(
|
806
|
+
"_is_signed",
|
807
|
+
*[](const Tensor &self) {
|
808
|
+
return self.is_signed();
|
809
|
+
})
|
810
|
+
.define_method(
|
811
|
+
"_isclose",
|
812
|
+
*[](const Tensor &self, const Tensor &other, double rtol, double atol, bool equal_nan) {
|
813
|
+
return self.isclose(other, rtol, atol, equal_nan);
|
814
|
+
})
|
815
|
+
.define_method(
|
816
|
+
"_item",
|
817
|
+
*[](const Tensor &self) {
|
818
|
+
return self.item();
|
819
|
+
})
|
820
|
+
.define_method(
|
821
|
+
"_kthvalue",
|
822
|
+
*[](const Tensor &self, int64_t k, int64_t dim, bool keepdim) {
|
823
|
+
return self.kthvalue(k, dim, keepdim);
|
824
|
+
})
|
825
|
+
.define_method(
|
826
|
+
"_le__scalar",
|
827
|
+
*[](Tensor &self, Scalar other) {
|
828
|
+
return self.le_(other);
|
829
|
+
})
|
830
|
+
.define_method(
|
831
|
+
"_le__tensor",
|
832
|
+
*[](Tensor &self, const Tensor &other) {
|
833
|
+
return self.le_(other);
|
834
|
+
})
|
835
|
+
.define_method(
|
836
|
+
"_le_scalar",
|
837
|
+
*[](const Tensor &self, Scalar other) {
|
838
|
+
return self.le(other);
|
839
|
+
})
|
840
|
+
.define_method(
|
841
|
+
"_le_tensor",
|
842
|
+
*[](const Tensor &self, const Tensor &other) {
|
843
|
+
return self.le(other);
|
844
|
+
})
|
845
|
+
.define_method(
|
846
|
+
"_lerp__scalar",
|
847
|
+
*[](Tensor &self, const Tensor &end, Scalar weight) {
|
848
|
+
return self.lerp_(end, weight);
|
849
|
+
})
|
850
|
+
.define_method(
|
851
|
+
"_lerp__tensor",
|
852
|
+
*[](Tensor &self, const Tensor &end, const Tensor &weight) {
|
853
|
+
return self.lerp_(end, weight);
|
854
|
+
})
|
855
|
+
.define_method(
|
856
|
+
"_lerp_scalar",
|
857
|
+
*[](const Tensor &self, const Tensor &end, Scalar weight) {
|
858
|
+
return self.lerp(end, weight);
|
859
|
+
})
|
860
|
+
.define_method(
|
861
|
+
"_lerp_tensor",
|
862
|
+
*[](const Tensor &self, const Tensor &end, const Tensor &weight) {
|
863
|
+
return self.lerp(end, weight);
|
864
|
+
})
|
865
|
+
.define_method(
|
866
|
+
"_lgamma",
|
867
|
+
*[](const Tensor &self) {
|
868
|
+
return self.lgamma();
|
869
|
+
})
|
870
|
+
.define_method(
|
871
|
+
"_lgamma_",
|
872
|
+
*[](Tensor &self) {
|
873
|
+
return self.lgamma_();
|
874
|
+
})
|
875
|
+
.define_method(
|
876
|
+
"_log",
|
877
|
+
*[](const Tensor &self) {
|
878
|
+
return self.log();
|
879
|
+
})
|
880
|
+
.define_method(
|
881
|
+
"_log10",
|
882
|
+
*[](const Tensor &self) {
|
883
|
+
return self.log10();
|
884
|
+
})
|
885
|
+
.define_method(
|
886
|
+
"_log10_",
|
887
|
+
*[](Tensor &self) {
|
888
|
+
return self.log10_();
|
889
|
+
})
|
890
|
+
.define_method(
|
891
|
+
"_log1p",
|
892
|
+
*[](const Tensor &self) {
|
893
|
+
return self.log1p();
|
894
|
+
})
|
895
|
+
.define_method(
|
896
|
+
"_log1p_",
|
897
|
+
*[](Tensor &self) {
|
898
|
+
return self.log1p_();
|
899
|
+
})
|
900
|
+
.define_method(
|
901
|
+
"_log2",
|
902
|
+
*[](const Tensor &self) {
|
903
|
+
return self.log2();
|
904
|
+
})
|
905
|
+
.define_method(
|
906
|
+
"_log2_",
|
907
|
+
*[](Tensor &self) {
|
908
|
+
return self.log2_();
|
909
|
+
})
|
910
|
+
.define_method(
|
911
|
+
"_log_",
|
912
|
+
*[](Tensor &self) {
|
913
|
+
return self.log_();
|
914
|
+
})
|
915
|
+
.define_method(
|
916
|
+
"_log_normal_",
|
917
|
+
*[](Tensor &self, double mean, double std) {
|
918
|
+
return self.log_normal_(mean, std);
|
919
|
+
})
|
920
|
+
.define_method(
|
921
|
+
"_logdet",
|
922
|
+
*[](const Tensor &self) {
|
923
|
+
return self.logdet();
|
924
|
+
})
|
925
|
+
.define_method(
|
926
|
+
"_logical_not",
|
927
|
+
*[](const Tensor &self) {
|
928
|
+
return self.logical_not();
|
929
|
+
})
|
930
|
+
.define_method(
|
931
|
+
"_logical_not_",
|
932
|
+
*[](Tensor &self) {
|
933
|
+
return self.logical_not_();
|
934
|
+
})
|
935
|
+
.define_method(
|
936
|
+
"_logical_xor",
|
937
|
+
*[](const Tensor &self, const Tensor &other) {
|
938
|
+
return self.logical_xor(other);
|
939
|
+
})
|
940
|
+
.define_method(
|
941
|
+
"_logical_xor_",
|
942
|
+
*[](Tensor &self, const Tensor &other) {
|
943
|
+
return self.logical_xor_(other);
|
944
|
+
})
|
945
|
+
.define_method(
|
946
|
+
"_logsumexp",
|
947
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim) {
|
948
|
+
return self.logsumexp(dim, keepdim);
|
949
|
+
})
|
950
|
+
.define_method(
|
951
|
+
"_lstsq",
|
952
|
+
*[](const Tensor &self, const Tensor &A) {
|
953
|
+
return self.lstsq(A);
|
954
|
+
})
|
955
|
+
.define_method(
|
956
|
+
"_lt__scalar",
|
957
|
+
*[](Tensor &self, Scalar other) {
|
958
|
+
return self.lt_(other);
|
959
|
+
})
|
960
|
+
.define_method(
|
961
|
+
"_lt__tensor",
|
962
|
+
*[](Tensor &self, const Tensor &other) {
|
963
|
+
return self.lt_(other);
|
964
|
+
})
|
965
|
+
.define_method(
|
966
|
+
"_lt_scalar",
|
967
|
+
*[](const Tensor &self, Scalar other) {
|
968
|
+
return self.lt(other);
|
969
|
+
})
|
970
|
+
.define_method(
|
971
|
+
"_lt_tensor",
|
972
|
+
*[](const Tensor &self, const Tensor &other) {
|
973
|
+
return self.lt(other);
|
974
|
+
})
|
975
|
+
.define_method(
|
976
|
+
"_lu_solve",
|
977
|
+
*[](const Tensor &self, const Tensor &LU_data, const Tensor &LU_pivots) {
|
978
|
+
return self.lu_solve(LU_data, LU_pivots);
|
979
|
+
})
|
980
|
+
.define_method(
|
981
|
+
"_masked_fill__scalar",
|
982
|
+
*[](Tensor &self, const Tensor &mask, Scalar value) {
|
983
|
+
return self.masked_fill_(mask, value);
|
984
|
+
})
|
985
|
+
.define_method(
|
986
|
+
"_masked_fill__tensor",
|
987
|
+
*[](Tensor &self, const Tensor &mask, const Tensor &value) {
|
988
|
+
return self.masked_fill_(mask, value);
|
989
|
+
})
|
990
|
+
.define_method(
|
991
|
+
"_masked_fill_scalar",
|
992
|
+
*[](const Tensor &self, const Tensor &mask, Scalar value) {
|
993
|
+
return self.masked_fill(mask, value);
|
994
|
+
})
|
995
|
+
.define_method(
|
996
|
+
"_masked_fill_tensor",
|
997
|
+
*[](const Tensor &self, const Tensor &mask, const Tensor &value) {
|
998
|
+
return self.masked_fill(mask, value);
|
999
|
+
})
|
1000
|
+
.define_method(
|
1001
|
+
"_masked_scatter",
|
1002
|
+
*[](const Tensor &self, const Tensor &mask, const Tensor &source) {
|
1003
|
+
return self.masked_scatter(mask, source);
|
1004
|
+
})
|
1005
|
+
.define_method(
|
1006
|
+
"_masked_scatter_",
|
1007
|
+
*[](Tensor &self, const Tensor &mask, const Tensor &source) {
|
1008
|
+
return self.masked_scatter_(mask, source);
|
1009
|
+
})
|
1010
|
+
.define_method(
|
1011
|
+
"_masked_select",
|
1012
|
+
*[](const Tensor &self, const Tensor &mask) {
|
1013
|
+
return self.masked_select(mask);
|
1014
|
+
})
|
1015
|
+
.define_method(
|
1016
|
+
"_matmul",
|
1017
|
+
*[](const Tensor &self, const Tensor &other) {
|
1018
|
+
return self.matmul(other);
|
1019
|
+
})
|
1020
|
+
.define_method(
|
1021
|
+
"_matrix_power",
|
1022
|
+
*[](const Tensor &self, int64_t n) {
|
1023
|
+
return self.matrix_power(n);
|
1024
|
+
})
|
1025
|
+
.define_method(
|
1026
|
+
"_max",
|
1027
|
+
*[](const Tensor &self) {
|
1028
|
+
return self.max();
|
1029
|
+
})
|
1030
|
+
.define_method(
|
1031
|
+
"_max_dim",
|
1032
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1033
|
+
return self.max(dim, keepdim);
|
1034
|
+
})
|
1035
|
+
.define_method(
|
1036
|
+
"_max_other",
|
1037
|
+
*[](const Tensor &self, const Tensor &other) {
|
1038
|
+
return self.max(other);
|
1039
|
+
})
|
1040
|
+
.define_method(
|
1041
|
+
"_max_values",
|
1042
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim) {
|
1043
|
+
return self.max_values(dim, keepdim);
|
1044
|
+
})
|
1045
|
+
.define_method(
|
1046
|
+
"_median",
|
1047
|
+
*[](const Tensor &self) {
|
1048
|
+
return self.median();
|
1049
|
+
})
|
1050
|
+
.define_method(
|
1051
|
+
"_median_dim",
|
1052
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1053
|
+
return self.median(dim, keepdim);
|
1054
|
+
})
|
1055
|
+
.define_method(
|
1056
|
+
"_min",
|
1057
|
+
*[](const Tensor &self) {
|
1058
|
+
return self.min();
|
1059
|
+
})
|
1060
|
+
.define_method(
|
1061
|
+
"_min_dim",
|
1062
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1063
|
+
return self.min(dim, keepdim);
|
1064
|
+
})
|
1065
|
+
.define_method(
|
1066
|
+
"_min_other",
|
1067
|
+
*[](const Tensor &self, const Tensor &other) {
|
1068
|
+
return self.min(other);
|
1069
|
+
})
|
1070
|
+
.define_method(
|
1071
|
+
"_min_values",
|
1072
|
+
*[](const Tensor &self, IntArrayRef dim, bool keepdim) {
|
1073
|
+
return self.min_values(dim, keepdim);
|
1074
|
+
})
|
1075
|
+
.define_method(
|
1076
|
+
"_mm",
|
1077
|
+
*[](const Tensor &self, const Tensor &mat2) {
|
1078
|
+
return self.mm(mat2);
|
1079
|
+
})
|
1080
|
+
.define_method(
|
1081
|
+
"_mode",
|
1082
|
+
*[](const Tensor &self, int64_t dim, bool keepdim) {
|
1083
|
+
return self.mode(dim, keepdim);
|
1084
|
+
})
|
1085
|
+
.define_method(
|
1086
|
+
"_mul__scalar",
|
1087
|
+
*[](Tensor &self, Scalar other) {
|
1088
|
+
return self.mul_(other);
|
1089
|
+
})
|
1090
|
+
.define_method(
|
1091
|
+
"_mul__tensor",
|
1092
|
+
*[](Tensor &self, const Tensor &other) {
|
1093
|
+
return self.mul_(other);
|
1094
|
+
})
|
1095
|
+
.define_method(
|
1096
|
+
"_mul_scalar",
|
1097
|
+
*[](const Tensor &self, Scalar other) {
|
1098
|
+
return self.mul(other);
|
1099
|
+
})
|
1100
|
+
.define_method(
|
1101
|
+
"_mul_tensor",
|
1102
|
+
*[](const Tensor &self, const Tensor &other) {
|
1103
|
+
return self.mul(other);
|
1104
|
+
})
|
1105
|
+
.define_method(
|
1106
|
+
"_multinomial",
|
1107
|
+
*[](const Tensor &self, int64_t num_samples, bool replacement) {
|
1108
|
+
return self.multinomial(num_samples, replacement);
|
1109
|
+
})
|
1110
|
+
.define_method(
|
1111
|
+
"_mv",
|
1112
|
+
*[](const Tensor &self, const Tensor &vec) {
|
1113
|
+
return self.mv(vec);
|
1114
|
+
})
|
1115
|
+
.define_method(
|
1116
|
+
"_mvlgamma",
|
1117
|
+
*[](const Tensor &self, int64_t p) {
|
1118
|
+
return self.mvlgamma(p);
|
1119
|
+
})
|
1120
|
+
.define_method(
|
1121
|
+
"_mvlgamma_",
|
1122
|
+
*[](Tensor &self, int64_t p) {
|
1123
|
+
return self.mvlgamma_(p);
|
1124
|
+
})
|
1125
|
+
.define_method(
|
1126
|
+
"_narrow",
|
1127
|
+
*[](Tensor &self, int64_t dim, int64_t start, int64_t length) {
|
1128
|
+
return self.narrow(dim, start, length);
|
1129
|
+
})
|
1130
|
+
.define_method(
|
1131
|
+
"_narrow_copy",
|
1132
|
+
*[](const Tensor &self, int64_t dim, int64_t start, int64_t length) {
|
1133
|
+
return self.narrow_copy(dim, start, length);
|
1134
|
+
})
|
1135
|
+
.define_method(
|
1136
|
+
"_ne__scalar",
|
1137
|
+
*[](Tensor &self, Scalar other) {
|
1138
|
+
return self.ne_(other);
|
1139
|
+
})
|
1140
|
+
.define_method(
|
1141
|
+
"_ne__tensor",
|
1142
|
+
*[](Tensor &self, const Tensor &other) {
|
1143
|
+
return self.ne_(other);
|
1144
|
+
})
|
1145
|
+
.define_method(
|
1146
|
+
"_ne_scalar",
|
1147
|
+
*[](const Tensor &self, Scalar other) {
|
1148
|
+
return self.ne(other);
|
1149
|
+
})
|
1150
|
+
.define_method(
|
1151
|
+
"_ne_tensor",
|
1152
|
+
*[](const Tensor &self, const Tensor &other) {
|
1153
|
+
return self.ne(other);
|
1154
|
+
})
|
1155
|
+
.define_method(
|
1156
|
+
"_neg",
|
1157
|
+
*[](const Tensor &self) {
|
1158
|
+
return self.neg();
|
1159
|
+
})
|
1160
|
+
.define_method(
|
1161
|
+
"_neg_",
|
1162
|
+
*[](Tensor &self) {
|
1163
|
+
return self.neg_();
|
1164
|
+
})
|
1165
|
+
.define_method(
|
1166
|
+
"_nonzero",
|
1167
|
+
*[](const Tensor &self) {
|
1168
|
+
return self.nonzero();
|
1169
|
+
})
|
1170
|
+
.define_method(
|
1171
|
+
"_nonzero_numpy",
|
1172
|
+
*[](const Tensor &self) {
|
1173
|
+
return self.nonzero_numpy();
|
1174
|
+
})
|
1175
|
+
.define_method(
|
1176
|
+
"_norm_scalar",
|
1177
|
+
*[](const Tensor &self, Scalar p) {
|
1178
|
+
return self.norm(p);
|
1179
|
+
})
|
1180
|
+
.define_method(
|
1181
|
+
"_normal_",
|
1182
|
+
*[](Tensor &self, double mean, double std) {
|
1183
|
+
return self.normal_(mean, std);
|
1184
|
+
})
|
1185
|
+
.define_method(
|
1186
|
+
"_numel",
|
1187
|
+
*[](const Tensor &self) {
|
1188
|
+
return self.numel();
|
1189
|
+
})
|
1190
|
+
.define_method(
|
1191
|
+
"_numpy_t",
|
1192
|
+
*[](Tensor &self) {
|
1193
|
+
return self.numpy_T();
|
1194
|
+
})
|
1195
|
+
.define_method(
|
1196
|
+
"_orgqr",
|
1197
|
+
*[](const Tensor &self, const Tensor &input2) {
|
1198
|
+
return self.orgqr(input2);
|
1199
|
+
})
|
1200
|
+
.define_method(
|
1201
|
+
"_ormqr",
|
1202
|
+
*[](const Tensor &self, const Tensor &input2, const Tensor &input3, bool left, bool transpose) {
|
1203
|
+
return self.ormqr(input2, input3, left, transpose);
|
1204
|
+
})
|
1205
|
+
.define_method(
|
1206
|
+
"_output_nr",
|
1207
|
+
*[](const Tensor &self) {
|
1208
|
+
return self.output_nr();
|
1209
|
+
})
|
1210
|
+
.define_method(
|
1211
|
+
"_permute",
|
1212
|
+
*[](Tensor &self, IntArrayRef dims) {
|
1213
|
+
return self.permute(dims);
|
1214
|
+
})
|
1215
|
+
.define_method(
|
1216
|
+
"_pin_memory",
|
1217
|
+
*[](const Tensor &self) {
|
1218
|
+
return self.pin_memory();
|
1219
|
+
})
|
1220
|
+
.define_method(
|
1221
|
+
"_pinverse",
|
1222
|
+
*[](const Tensor &self, double rcond) {
|
1223
|
+
return self.pinverse(rcond);
|
1224
|
+
})
|
1225
|
+
.define_method(
|
1226
|
+
"_polygamma",
|
1227
|
+
*[](int64_t n, const Tensor &self) {
|
1228
|
+
return self.polygamma(n);
|
1229
|
+
})
|
1230
|
+
.define_method(
|
1231
|
+
"_polygamma_",
|
1232
|
+
*[](Tensor &self, int64_t n) {
|
1233
|
+
return self.polygamma_(n);
|
1234
|
+
})
|
1235
|
+
.define_method(
|
1236
|
+
"_pow__scalar",
|
1237
|
+
*[](Tensor &self, Scalar exponent) {
|
1238
|
+
return self.pow_(exponent);
|
1239
|
+
})
|
1240
|
+
.define_method(
|
1241
|
+
"_pow__tensor",
|
1242
|
+
*[](Tensor &self, const Tensor &exponent) {
|
1243
|
+
return self.pow_(exponent);
|
1244
|
+
})
|
1245
|
+
.define_method(
|
1246
|
+
"_pow_tensor_scalar",
|
1247
|
+
*[](const Tensor &self, Scalar exponent) {
|
1248
|
+
return self.pow(exponent);
|
1249
|
+
})
|
1250
|
+
.define_method(
|
1251
|
+
"_pow_tensor_tensor",
|
1252
|
+
*[](const Tensor &self, const Tensor &exponent) {
|
1253
|
+
return self.pow(exponent);
|
1254
|
+
})
|
1255
|
+
.define_method(
|
1256
|
+
"_prelu",
|
1257
|
+
*[](const Tensor &self, const Tensor &weight) {
|
1258
|
+
return self.prelu(weight);
|
1259
|
+
})
|
1260
|
+
.define_method(
|
1261
|
+
"_put_",
|
1262
|
+
*[](Tensor &self, const Tensor &index, const Tensor &source, bool accumulate) {
|
1263
|
+
return self.put_(index, source, accumulate);
|
1264
|
+
})
|
1265
|
+
.define_method(
|
1266
|
+
"_q_per_channel_axis",
|
1267
|
+
*[](const Tensor &self) {
|
1268
|
+
return self.q_per_channel_axis();
|
1269
|
+
})
|
1270
|
+
.define_method(
|
1271
|
+
"_q_per_channel_scales",
|
1272
|
+
*[](const Tensor &self) {
|
1273
|
+
return self.q_per_channel_scales();
|
1274
|
+
})
|
1275
|
+
.define_method(
|
1276
|
+
"_q_per_channel_zero_points",
|
1277
|
+
*[](const Tensor &self) {
|
1278
|
+
return self.q_per_channel_zero_points();
|
1279
|
+
})
|
1280
|
+
.define_method(
|
1281
|
+
"_q_scale",
|
1282
|
+
*[](const Tensor &self) {
|
1283
|
+
return self.q_scale();
|
1284
|
+
})
|
1285
|
+
.define_method(
|
1286
|
+
"_q_zero_point",
|
1287
|
+
*[](const Tensor &self) {
|
1288
|
+
return self.q_zero_point();
|
1289
|
+
})
|
1290
|
+
.define_method(
|
1291
|
+
"_qr",
|
1292
|
+
*[](const Tensor &self, bool some) {
|
1293
|
+
return self.qr(some);
|
1294
|
+
})
|
1295
|
+
.define_method(
|
1296
|
+
"_qscheme",
|
1297
|
+
*[](const Tensor &self) {
|
1298
|
+
return self.qscheme();
|
1299
|
+
})
|
1300
|
+
.define_method(
|
1301
|
+
"_random_",
|
1302
|
+
*[](Tensor &self) {
|
1303
|
+
return self.random_();
|
1304
|
+
})
|
1305
|
+
.define_method(
|
1306
|
+
"_random__from",
|
1307
|
+
*[](Tensor &self, int64_t from, int64_t to) {
|
1308
|
+
return self.random_(from, to);
|
1309
|
+
})
|
1310
|
+
.define_method(
|
1311
|
+
"_random__to",
|
1312
|
+
*[](Tensor &self, int64_t to) {
|
1313
|
+
return self.random_(to);
|
1314
|
+
})
|
1315
|
+
.define_method(
|
1316
|
+
"_reciprocal",
|
1317
|
+
*[](const Tensor &self) {
|
1318
|
+
return self.reciprocal();
|
1319
|
+
})
|
1320
|
+
.define_method(
|
1321
|
+
"_reciprocal_",
|
1322
|
+
*[](Tensor &self) {
|
1323
|
+
return self.reciprocal_();
|
1324
|
+
})
|
1325
|
+
.define_method(
|
1326
|
+
"_relu",
|
1327
|
+
*[](const Tensor &self) {
|
1328
|
+
return self.relu();
|
1329
|
+
})
|
1330
|
+
.define_method(
|
1331
|
+
"_relu_",
|
1332
|
+
*[](Tensor &self) {
|
1333
|
+
return self.relu_();
|
1334
|
+
})
|
1335
|
+
.define_method(
|
1336
|
+
"_remainder__scalar",
|
1337
|
+
*[](Tensor &self, Scalar other) {
|
1338
|
+
return self.remainder_(other);
|
1339
|
+
})
|
1340
|
+
.define_method(
|
1341
|
+
"_remainder__tensor",
|
1342
|
+
*[](Tensor &self, const Tensor &other) {
|
1343
|
+
return self.remainder_(other);
|
1344
|
+
})
|
1345
|
+
.define_method(
|
1346
|
+
"_remainder_scalar",
|
1347
|
+
*[](const Tensor &self, Scalar other) {
|
1348
|
+
return self.remainder(other);
|
1349
|
+
})
|
1350
|
+
.define_method(
|
1351
|
+
"_remainder_tensor",
|
1352
|
+
*[](const Tensor &self, const Tensor &other) {
|
1353
|
+
return self.remainder(other);
|
1354
|
+
})
|
1355
|
+
.define_method(
|
1356
|
+
"_renorm",
|
1357
|
+
*[](const Tensor &self, Scalar p, int64_t dim, Scalar maxnorm) {
|
1358
|
+
return self.renorm(p, dim, maxnorm);
|
1359
|
+
})
|
1360
|
+
.define_method(
|
1361
|
+
"_renorm_",
|
1362
|
+
*[](Tensor &self, Scalar p, int64_t dim, Scalar maxnorm) {
|
1363
|
+
return self.renorm_(p, dim, maxnorm);
|
1364
|
+
})
|
1365
|
+
.define_method(
|
1366
|
+
"_repeat",
|
1367
|
+
*[](const Tensor &self, IntArrayRef repeats) {
|
1368
|
+
return self.repeat(repeats);
|
1369
|
+
})
|
1370
|
+
.define_method(
|
1371
|
+
"_repeat_interleave_self_int",
|
1372
|
+
*[](const Tensor &self, int64_t repeats) {
|
1373
|
+
return self.repeat_interleave(repeats);
|
1374
|
+
})
|
1375
|
+
.define_method(
|
1376
|
+
"_repeat_interleave_self_int_dim",
|
1377
|
+
*[](const Tensor &self, int64_t repeats, int64_t dim) {
|
1378
|
+
return self.repeat_interleave(repeats, dim);
|
1379
|
+
})
|
1380
|
+
.define_method(
|
1381
|
+
"_repeat_interleave_self_tensor",
|
1382
|
+
*[](const Tensor &self, const Tensor &repeats) {
|
1383
|
+
return self.repeat_interleave(repeats);
|
1384
|
+
})
|
1385
|
+
.define_method(
|
1386
|
+
"_repeat_interleave_self_tensor_dim",
|
1387
|
+
*[](const Tensor &self, const Tensor &repeats, int64_t dim) {
|
1388
|
+
return self.repeat_interleave(repeats, dim);
|
1389
|
+
})
|
1390
|
+
.define_method(
|
1391
|
+
"_reshape",
|
1392
|
+
*[](const Tensor &self, IntArrayRef shape) {
|
1393
|
+
return self.reshape(shape);
|
1394
|
+
})
|
1395
|
+
.define_method(
|
1396
|
+
"_reshape_as",
|
1397
|
+
*[](const Tensor &self, const Tensor &other) {
|
1398
|
+
return self.reshape_as(other);
|
1399
|
+
})
|
1400
|
+
.define_method(
|
1401
|
+
"_resize_",
|
1402
|
+
*[](Tensor &self, IntArrayRef size) {
|
1403
|
+
return self.resize_(size);
|
1404
|
+
})
|
1405
|
+
.define_method(
|
1406
|
+
"_resize_as_",
|
1407
|
+
*[](Tensor &self, const Tensor &the_template) {
|
1408
|
+
return self.resize_as_(the_template);
|
1409
|
+
})
|
1410
|
+
.define_method(
|
1411
|
+
"_rfft",
|
1412
|
+
*[](const Tensor &self, int64_t signal_ndim, bool normalized, bool onesided) {
|
1413
|
+
return self.rfft(signal_ndim, normalized, onesided);
|
1414
|
+
})
|
1415
|
+
.define_method(
|
1416
|
+
"_roll",
|
1417
|
+
*[](const Tensor &self, IntArrayRef shifts, IntArrayRef dims) {
|
1418
|
+
return self.roll(shifts, dims);
|
1419
|
+
})
|
1420
|
+
.define_method(
|
1421
|
+
"_rot90",
|
1422
|
+
*[](const Tensor &self, int64_t k, IntArrayRef dims) {
|
1423
|
+
return self.rot90(k, dims);
|
1424
|
+
})
|
1425
|
+
.define_method(
|
1426
|
+
"_round",
|
1427
|
+
*[](const Tensor &self) {
|
1428
|
+
return self.round();
|
1429
|
+
})
|
1430
|
+
.define_method(
|
1431
|
+
"_round_",
|
1432
|
+
*[](Tensor &self) {
|
1433
|
+
return self.round_();
|
1434
|
+
})
|
1435
|
+
.define_method(
|
1436
|
+
"_rsqrt",
|
1437
|
+
*[](const Tensor &self) {
|
1438
|
+
return self.rsqrt();
|
1439
|
+
})
|
1440
|
+
.define_method(
|
1441
|
+
"_rsqrt_",
|
1442
|
+
*[](Tensor &self) {
|
1443
|
+
return self.rsqrt_();
|
1444
|
+
})
|
1445
|
+
.define_method(
|
1446
|
+
"_scatter__src",
|
1447
|
+
*[](Tensor &self, int64_t dim, const Tensor &index, const Tensor &src) {
|
1448
|
+
return self.scatter_(dim, index, src);
|
1449
|
+
})
|
1450
|
+
.define_method(
|
1451
|
+
"_scatter__value",
|
1452
|
+
*[](Tensor &self, int64_t dim, const Tensor &index, Scalar value) {
|
1453
|
+
return self.scatter_(dim, index, value);
|
1454
|
+
})
|
1455
|
+
.define_method(
|
1456
|
+
"_scatter_add",
|
1457
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &src) {
|
1458
|
+
return self.scatter_add(dim, index, src);
|
1459
|
+
})
|
1460
|
+
.define_method(
|
1461
|
+
"_scatter_add_",
|
1462
|
+
*[](Tensor &self, int64_t dim, const Tensor &index, const Tensor &src) {
|
1463
|
+
return self.scatter_add_(dim, index, src);
|
1464
|
+
})
|
1465
|
+
.define_method(
|
1466
|
+
"_scatter_src",
|
1467
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, const Tensor &src) {
|
1468
|
+
return self.scatter(dim, index, src);
|
1469
|
+
})
|
1470
|
+
.define_method(
|
1471
|
+
"_scatter_value",
|
1472
|
+
*[](const Tensor &self, int64_t dim, const Tensor &index, Scalar value) {
|
1473
|
+
return self.scatter(dim, index, value);
|
1474
|
+
})
|
1475
|
+
.define_method(
|
1476
|
+
"_select_int",
|
1477
|
+
*[](Tensor &self, int64_t dim, int64_t index) {
|
1478
|
+
return self.select(dim, index);
|
1479
|
+
})
|
1480
|
+
.define_method(
|
1481
|
+
"_set_",
|
1482
|
+
*[](Tensor &self) {
|
1483
|
+
return self.set_();
|
1484
|
+
})
|
1485
|
+
.define_method(
|
1486
|
+
"_set__source_tensor",
|
1487
|
+
*[](Tensor &self, const Tensor &source) {
|
1488
|
+
return self.set_(source);
|
1489
|
+
})
|
1490
|
+
.define_method(
|
1491
|
+
"_set_data",
|
1492
|
+
*[](Tensor &self, const Tensor &new_data) {
|
1493
|
+
return self.set_data(new_data);
|
1494
|
+
})
|
1495
|
+
.define_method(
|
1496
|
+
"_sigmoid",
|
1497
|
+
*[](const Tensor &self) {
|
1498
|
+
return self.sigmoid();
|
1499
|
+
})
|
1500
|
+
.define_method(
|
1501
|
+
"_sigmoid_",
|
1502
|
+
*[](Tensor &self) {
|
1503
|
+
return self.sigmoid_();
|
1504
|
+
})
|
1505
|
+
.define_method(
|
1506
|
+
"_sign",
|
1507
|
+
*[](const Tensor &self) {
|
1508
|
+
return self.sign();
|
1509
|
+
})
|
1510
|
+
.define_method(
|
1511
|
+
"_sign_",
|
1512
|
+
*[](Tensor &self) {
|
1513
|
+
return self.sign_();
|
1514
|
+
})
|
1515
|
+
.define_method(
|
1516
|
+
"_sin",
|
1517
|
+
*[](const Tensor &self) {
|
1518
|
+
return self.sin();
|
1519
|
+
})
|
1520
|
+
.define_method(
|
1521
|
+
"_sin_",
|
1522
|
+
*[](Tensor &self) {
|
1523
|
+
return self.sin_();
|
1524
|
+
})
|
1525
|
+
.define_method(
|
1526
|
+
"_sinh",
|
1527
|
+
*[](const Tensor &self) {
|
1528
|
+
return self.sinh();
|
1529
|
+
})
|
1530
|
+
.define_method(
|
1531
|
+
"_sinh_",
|
1532
|
+
*[](Tensor &self) {
|
1533
|
+
return self.sinh_();
|
1534
|
+
})
|
1535
|
+
.define_method(
|
1536
|
+
"_size_int",
|
1537
|
+
*[](const Tensor &self, int64_t dim) {
|
1538
|
+
return self.size(dim);
|
1539
|
+
})
|
1540
|
+
.define_method(
|
1541
|
+
"_slice_tensor",
|
1542
|
+
*[](Tensor &self, int64_t dim, int64_t start, int64_t end, int64_t step) {
|
1543
|
+
return self.slice(dim, start, end, step);
|
1544
|
+
})
|
1545
|
+
.define_method(
|
1546
|
+
"_slogdet",
|
1547
|
+
*[](const Tensor &self) {
|
1548
|
+
return self.slogdet();
|
1549
|
+
})
|
1550
|
+
.define_method(
|
1551
|
+
"_smm",
|
1552
|
+
*[](const Tensor &self, const Tensor &mat2) {
|
1553
|
+
return self.smm(mat2);
|
1554
|
+
})
|
1555
|
+
.define_method(
|
1556
|
+
"_solve",
|
1557
|
+
*[](const Tensor &self, const Tensor &A) {
|
1558
|
+
return self.solve(A);
|
1559
|
+
})
|
1560
|
+
.define_method(
|
1561
|
+
"_sort",
|
1562
|
+
*[](const Tensor &self, int64_t dim, bool descending) {
|
1563
|
+
return self.sort(dim, descending);
|
1564
|
+
})
|
1565
|
+
.define_method(
|
1566
|
+
"_sparse_dim",
|
1567
|
+
*[](const Tensor &self) {
|
1568
|
+
return self.sparse_dim();
|
1569
|
+
})
|
1570
|
+
.define_method(
|
1571
|
+
"_sparse_mask",
|
1572
|
+
*[](const Tensor &self, const Tensor &mask) {
|
1573
|
+
return self.sparse_mask(mask);
|
1574
|
+
})
|
1575
|
+
.define_method(
|
1576
|
+
"_sparse_resize_",
|
1577
|
+
*[](Tensor &self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
|
1578
|
+
return self.sparse_resize_(size, sparse_dim, dense_dim);
|
1579
|
+
})
|
1580
|
+
.define_method(
|
1581
|
+
"_sparse_resize_and_clear_",
|
1582
|
+
*[](Tensor &self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
|
1583
|
+
return self.sparse_resize_and_clear_(size, sparse_dim, dense_dim);
|
1584
|
+
})
|
1585
|
+
.define_method(
|
1586
|
+
"_split_tensor",
|
1587
|
+
*[](Tensor &self, int64_t split_size, int64_t dim) {
|
1588
|
+
return self.split(split_size, dim);
|
1589
|
+
})
|
1590
|
+
.define_method(
|
1591
|
+
"_split_with_sizes",
|
1592
|
+
*[](const Tensor &self, IntArrayRef split_sizes, int64_t dim) {
|
1593
|
+
return self.split_with_sizes(split_sizes, dim);
|
1594
|
+
})
|
1595
|
+
.define_method(
|
1596
|
+
"_sqrt",
|
1597
|
+
*[](const Tensor &self) {
|
1598
|
+
return self.sqrt();
|
1599
|
+
})
|
1600
|
+
.define_method(
|
1601
|
+
"_sqrt_",
|
1602
|
+
*[](Tensor &self) {
|
1603
|
+
return self.sqrt_();
|
1604
|
+
})
|
1605
|
+
.define_method(
|
1606
|
+
"_squeeze",
|
1607
|
+
*[](Tensor &self) {
|
1608
|
+
return self.squeeze();
|
1609
|
+
})
|
1610
|
+
.define_method(
|
1611
|
+
"_squeeze_",
|
1612
|
+
*[](Tensor &self) {
|
1613
|
+
return self.squeeze_();
|
1614
|
+
})
|
1615
|
+
.define_method(
|
1616
|
+
"_squeeze__dim",
|
1617
|
+
*[](Tensor &self, int64_t dim) {
|
1618
|
+
return self.squeeze_(dim);
|
1619
|
+
})
|
1620
|
+
.define_method(
|
1621
|
+
"_squeeze_dim",
|
1622
|
+
*[](Tensor &self, int64_t dim) {
|
1623
|
+
return self.squeeze(dim);
|
1624
|
+
})
|
1625
|
+
.define_method(
|
1626
|
+
"_sspaddmm",
|
1627
|
+
*[](const Tensor &self, const Tensor &mat1, const Tensor &mat2, Scalar beta, Scalar alpha) {
|
1628
|
+
return self.sspaddmm(mat1, mat2, beta, alpha);
|
1629
|
+
})
|
1630
|
+
.define_method(
|
1631
|
+
"_std",
|
1632
|
+
*[](const Tensor &self, bool unbiased) {
|
1633
|
+
return self.std(unbiased);
|
1634
|
+
})
|
1635
|
+
.define_method(
|
1636
|
+
"_std_dim",
|
1637
|
+
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
|
1638
|
+
return self.std(dim, unbiased, keepdim);
|
1639
|
+
})
|
1640
|
+
.define_method(
|
1641
|
+
"_stride_int",
|
1642
|
+
*[](const Tensor &self, int64_t dim) {
|
1643
|
+
return self.stride(dim);
|
1644
|
+
})
|
1645
|
+
.define_method(
|
1646
|
+
"_sub__scalar",
|
1647
|
+
*[](Tensor &self, Scalar other, Scalar alpha) {
|
1648
|
+
return self.sub_(other, alpha);
|
1649
|
+
})
|
1650
|
+
.define_method(
|
1651
|
+
"_sub__tensor",
|
1652
|
+
*[](Tensor &self, const Tensor &other, Scalar alpha) {
|
1653
|
+
return self.sub_(other, alpha);
|
1654
|
+
})
|
1655
|
+
.define_method(
|
1656
|
+
"_sub_scalar",
|
1657
|
+
*[](const Tensor &self, Scalar other, Scalar alpha) {
|
1658
|
+
return self.sub(other, alpha);
|
1659
|
+
})
|
1660
|
+
.define_method(
|
1661
|
+
"_sub_tensor",
|
1662
|
+
*[](const Tensor &self, const Tensor &other, Scalar alpha) {
|
1663
|
+
return self.sub(other, alpha);
|
1664
|
+
})
|
1665
|
+
.define_method(
|
1666
|
+
"_sum_to_size",
|
1667
|
+
*[](const Tensor &self, IntArrayRef size) {
|
1668
|
+
return self.sum_to_size(size);
|
1669
|
+
})
|
1670
|
+
.define_method(
|
1671
|
+
"_svd",
|
1672
|
+
*[](const Tensor &self, bool some, bool compute_uv) {
|
1673
|
+
return self.svd(some, compute_uv);
|
1674
|
+
})
|
1675
|
+
.define_method(
|
1676
|
+
"_symeig",
|
1677
|
+
*[](const Tensor &self, bool eigenvectors, bool upper) {
|
1678
|
+
return self.symeig(eigenvectors, upper);
|
1679
|
+
})
|
1680
|
+
.define_method(
|
1681
|
+
"_t",
|
1682
|
+
*[](Tensor &self) {
|
1683
|
+
return self.t();
|
1684
|
+
})
|
1685
|
+
.define_method(
|
1686
|
+
"_t_",
|
1687
|
+
*[](Tensor &self) {
|
1688
|
+
return self.t_();
|
1689
|
+
})
|
1690
|
+
.define_method(
|
1691
|
+
"_take",
|
1692
|
+
*[](const Tensor &self, const Tensor &index) {
|
1693
|
+
return self.take(index);
|
1694
|
+
})
|
1695
|
+
.define_method(
|
1696
|
+
"_tan",
|
1697
|
+
*[](const Tensor &self) {
|
1698
|
+
return self.tan();
|
1699
|
+
})
|
1700
|
+
.define_method(
|
1701
|
+
"_tan_",
|
1702
|
+
*[](Tensor &self) {
|
1703
|
+
return self.tan_();
|
1704
|
+
})
|
1705
|
+
.define_method(
|
1706
|
+
"_tanh",
|
1707
|
+
*[](const Tensor &self) {
|
1708
|
+
return self.tanh();
|
1709
|
+
})
|
1710
|
+
.define_method(
|
1711
|
+
"_tanh_",
|
1712
|
+
*[](Tensor &self) {
|
1713
|
+
return self.tanh_();
|
1714
|
+
})
|
1715
|
+
.define_method(
|
1716
|
+
"_to_dense",
|
1717
|
+
*[](const Tensor &self) {
|
1718
|
+
return self.to_dense();
|
1719
|
+
})
|
1720
|
+
.define_method(
|
1721
|
+
"_to_mkldnn",
|
1722
|
+
*[](const Tensor &self) {
|
1723
|
+
return self.to_mkldnn();
|
1724
|
+
})
|
1725
|
+
.define_method(
|
1726
|
+
"_to_other",
|
1727
|
+
*[](const Tensor &self, const Tensor &other, bool non_blocking, bool copy) {
|
1728
|
+
return self.to(other, non_blocking, copy);
|
1729
|
+
})
|
1730
|
+
.define_method(
|
1731
|
+
"_to_sparse",
|
1732
|
+
*[](const Tensor &self) {
|
1733
|
+
return self.to_sparse();
|
1734
|
+
})
|
1735
|
+
.define_method(
|
1736
|
+
"_to_sparse_sparse_dim",
|
1737
|
+
*[](const Tensor &self, int64_t sparse_dim) {
|
1738
|
+
return self.to_sparse(sparse_dim);
|
1739
|
+
})
|
1740
|
+
.define_method(
|
1741
|
+
"_topk",
|
1742
|
+
*[](const Tensor &self, int64_t k, int64_t dim, bool largest, bool sorted) {
|
1743
|
+
return self.topk(k, dim, largest, sorted);
|
1744
|
+
})
|
1745
|
+
.define_method(
|
1746
|
+
"_trace",
|
1747
|
+
*[](const Tensor &self) {
|
1748
|
+
return self.trace();
|
1749
|
+
})
|
1750
|
+
.define_method(
|
1751
|
+
"_transpose_",
|
1752
|
+
*[](Tensor &self, int64_t dim0, int64_t dim1) {
|
1753
|
+
return self.transpose_(dim0, dim1);
|
1754
|
+
})
|
1755
|
+
.define_method(
|
1756
|
+
"_transpose_int",
|
1757
|
+
*[](Tensor &self, int64_t dim0, int64_t dim1) {
|
1758
|
+
return self.transpose(dim0, dim1);
|
1759
|
+
})
|
1760
|
+
.define_method(
|
1761
|
+
"_triangular_solve",
|
1762
|
+
*[](const Tensor &self, const Tensor &A, bool upper, bool transpose, bool unitriangular) {
|
1763
|
+
return self.triangular_solve(A, upper, transpose, unitriangular);
|
1764
|
+
})
|
1765
|
+
.define_method(
|
1766
|
+
"_tril",
|
1767
|
+
*[](const Tensor &self, int64_t diagonal) {
|
1768
|
+
return self.tril(diagonal);
|
1769
|
+
})
|
1770
|
+
.define_method(
|
1771
|
+
"_tril_",
|
1772
|
+
*[](Tensor &self, int64_t diagonal) {
|
1773
|
+
return self.tril_(diagonal);
|
1774
|
+
})
|
1775
|
+
.define_method(
|
1776
|
+
"_triu",
|
1777
|
+
*[](const Tensor &self, int64_t diagonal) {
|
1778
|
+
return self.triu(diagonal);
|
1779
|
+
})
|
1780
|
+
.define_method(
|
1781
|
+
"_triu_",
|
1782
|
+
*[](Tensor &self, int64_t diagonal) {
|
1783
|
+
return self.triu_(diagonal);
|
1784
|
+
})
|
1785
|
+
.define_method(
|
1786
|
+
"_trunc",
|
1787
|
+
*[](const Tensor &self) {
|
1788
|
+
return self.trunc();
|
1789
|
+
})
|
1790
|
+
.define_method(
|
1791
|
+
"_trunc_",
|
1792
|
+
*[](Tensor &self) {
|
1793
|
+
return self.trunc_();
|
1794
|
+
})
|
1795
|
+
.define_method(
|
1796
|
+
"_type_as",
|
1797
|
+
*[](const Tensor &self, const Tensor &other) {
|
1798
|
+
return self.type_as(other);
|
1799
|
+
})
|
1800
|
+
.define_method(
|
1801
|
+
"_unbind_int",
|
1802
|
+
*[](Tensor &self, int64_t dim) {
|
1803
|
+
return self.unbind(dim);
|
1804
|
+
})
|
1805
|
+
.define_method(
|
1806
|
+
"_unfold",
|
1807
|
+
*[](Tensor &self, int64_t dimension, int64_t size, int64_t step) {
|
1808
|
+
return self.unfold(dimension, size, step);
|
1809
|
+
})
|
1810
|
+
.define_method(
|
1811
|
+
"_uniform_",
|
1812
|
+
*[](Tensor &self, double from, double to) {
|
1813
|
+
return self.uniform_(from, to);
|
1814
|
+
})
|
1815
|
+
.define_method(
|
1816
|
+
"_unsqueeze",
|
1817
|
+
*[](Tensor &self, int64_t dim) {
|
1818
|
+
return self.unsqueeze(dim);
|
1819
|
+
})
|
1820
|
+
.define_method(
|
1821
|
+
"_unsqueeze_",
|
1822
|
+
*[](Tensor &self, int64_t dim) {
|
1823
|
+
return self.unsqueeze_(dim);
|
1824
|
+
})
|
1825
|
+
.define_method(
|
1826
|
+
"_values",
|
1827
|
+
*[](Tensor &self) {
|
1828
|
+
return self.values();
|
1829
|
+
})
|
1830
|
+
.define_method(
|
1831
|
+
"_var",
|
1832
|
+
*[](const Tensor &self, bool unbiased) {
|
1833
|
+
return self.var(unbiased);
|
1834
|
+
})
|
1835
|
+
.define_method(
|
1836
|
+
"_var_dim",
|
1837
|
+
*[](const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim) {
|
1838
|
+
return self.var(dim, unbiased, keepdim);
|
1839
|
+
})
|
1840
|
+
.define_method(
|
1841
|
+
"_view",
|
1842
|
+
*[](Tensor &self, IntArrayRef size) {
|
1843
|
+
return self.view(size);
|
1844
|
+
})
|
1845
|
+
.define_method(
|
1846
|
+
"_view_as",
|
1847
|
+
*[](const Tensor &self, const Tensor &other) {
|
1848
|
+
return self.view_as(other);
|
1849
|
+
})
|
1850
|
+
.define_method(
|
1851
|
+
"_where_self",
|
1852
|
+
*[](const Tensor &condition, const Tensor &self, const Tensor &other) {
|
1853
|
+
return self.where(condition, other);
|
1854
|
+
})
|
1855
|
+
.define_method(
|
1856
|
+
"_zero_",
|
1857
|
+
*[](Tensor &self) {
|
1858
|
+
return self.zero_();
|
1859
|
+
});
|
1860
|
+
}
|