torch-rb 0.1.7 → 0.1.8

Sign up to get free protection for your applications and to get access to all the features.
data/lib/torch/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module Torch
2
- VERSION = "0.1.7"
2
+ VERSION = "0.1.8"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: torch-rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.7
4
+ version: 0.1.8
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrew Kane
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2020-01-11 00:00:00.000000000 Z
11
+ date: 2020-01-18 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rice
@@ -106,17 +106,10 @@ files:
106
106
  - README.md
107
107
  - ext/torch/ext.cpp
108
108
  - ext/torch/extconf.rb
109
- - ext/torch/nn_functions.cpp
110
- - ext/torch/nn_functions.hpp
111
109
  - ext/torch/templates.cpp
112
110
  - ext/torch/templates.hpp
113
- - ext/torch/tensor_functions.cpp
114
- - ext/torch/tensor_functions.hpp
115
- - ext/torch/torch_functions.cpp
116
- - ext/torch/torch_functions.hpp
117
111
  - lib/torch-rb.rb
118
112
  - lib/torch.rb
119
- - lib/torch/ext.bundle
120
113
  - lib/torch/inspector.rb
121
114
  - lib/torch/native/dispatcher.rb
122
115
  - lib/torch/native/function.rb
@@ -1,615 +0,0 @@
1
- // generated by rake generate:functions
2
- // do not edit by hand
3
-
4
- #include <torch/torch.h>
5
- #include <rice/Module.hpp>
6
- #include "templates.hpp"
7
-
8
- void add_nn_functions(Module m) {
9
- m
10
- .define_singleton_method(
11
- "_adaptive_avg_pool2d",
12
- *[](const Tensor &self, IntArrayRef output_size) {
13
- return torch::adaptive_avg_pool2d(self, output_size);
14
- })
15
- .define_singleton_method(
16
- "_adaptive_avg_pool2d_out",
17
- *[](const Tensor &self, IntArrayRef output_size, Tensor &out) {
18
- return torch::adaptive_avg_pool2d_out(out, self, output_size);
19
- })
20
- .define_singleton_method(
21
- "_adaptive_avg_pool3d",
22
- *[](const Tensor &self, IntArrayRef output_size) {
23
- return torch::adaptive_avg_pool3d(self, output_size);
24
- })
25
- .define_singleton_method(
26
- "_adaptive_avg_pool3d_out",
27
- *[](const Tensor &self, IntArrayRef output_size, Tensor &out) {
28
- return torch::adaptive_avg_pool3d_out(out, self, output_size);
29
- })
30
- .define_singleton_method(
31
- "_adaptive_max_pool2d",
32
- *[](const Tensor &self, IntArrayRef output_size) {
33
- return wrap(torch::adaptive_max_pool2d(self, output_size));
34
- })
35
- .define_singleton_method(
36
- "_adaptive_max_pool2d_out",
37
- *[](const Tensor &self, IntArrayRef output_size, Tensor &out, Tensor &indices) {
38
- return wrap(torch::adaptive_max_pool2d_out(out, indices, self, output_size));
39
- })
40
- .define_singleton_method(
41
- "_adaptive_max_pool3d",
42
- *[](const Tensor &self, IntArrayRef output_size) {
43
- return wrap(torch::adaptive_max_pool3d(self, output_size));
44
- })
45
- .define_singleton_method(
46
- "_adaptive_max_pool3d_out",
47
- *[](const Tensor &self, IntArrayRef output_size, Tensor &out, Tensor &indices) {
48
- return wrap(torch::adaptive_max_pool3d_out(out, indices, self, output_size));
49
- })
50
- .define_singleton_method(
51
- "_avg_pool2d",
52
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
53
- return torch::avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
54
- })
55
- .define_singleton_method(
56
- "_avg_pool2d_divisor_override",
57
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, int64_t divisor_override) {
58
- return torch::avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
59
- })
60
- .define_singleton_method(
61
- "_avg_pool3d",
62
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
63
- return torch::avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
64
- })
65
- .define_singleton_method(
66
- "_avg_pool3d_divisor_override",
67
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, int64_t divisor_override) {
68
- return torch::avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
69
- })
70
- .define_singleton_method(
71
- "_binary_cross_entropy",
72
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction) {
73
- return torch::binary_cross_entropy(self, target, weight, reduction);
74
- })
75
- .define_singleton_method(
76
- "_binary_cross_entropy_out",
77
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, Tensor &out) {
78
- return torch::binary_cross_entropy_out(out, self, target, weight, reduction);
79
- })
80
- .define_singleton_method(
81
- "_col2im",
82
- *[](const Tensor &self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) {
83
- return torch::col2im(self, output_size, kernel_size, dilation, padding, stride);
84
- })
85
- .define_singleton_method(
86
- "_col2im_out",
87
- *[](const Tensor &self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor &out) {
88
- return torch::col2im_out(out, self, output_size, kernel_size, dilation, padding, stride);
89
- })
90
- .define_singleton_method(
91
- "_elu",
92
- *[](const Tensor &self, Scalar alpha, Scalar scale, Scalar input_scale) {
93
- return torch::elu(self, alpha, scale, input_scale);
94
- })
95
- .define_singleton_method(
96
- "_elu_",
97
- *[](Tensor &self, Scalar alpha, Scalar scale, Scalar input_scale) {
98
- return torch::elu_(self, alpha, scale, input_scale);
99
- })
100
- .define_singleton_method(
101
- "_elu_out",
102
- *[](const Tensor &self, Scalar alpha, Scalar scale, Scalar input_scale, Tensor &out) {
103
- return torch::elu_out(out, self, alpha, scale, input_scale);
104
- })
105
- .define_singleton_method(
106
- "_fractional_max_pool2d",
107
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor &random_samples) {
108
- return wrap(torch::fractional_max_pool2d(self, kernel_size, output_size, random_samples));
109
- })
110
- .define_singleton_method(
111
- "_fractional_max_pool2d_output",
112
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor &random_samples, Tensor &output, Tensor &indices) {
113
- return wrap(torch::fractional_max_pool2d_out(output, indices, self, kernel_size, output_size, random_samples));
114
- })
115
- .define_singleton_method(
116
- "_fractional_max_pool3d",
117
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor &random_samples) {
118
- return wrap(torch::fractional_max_pool3d(self, kernel_size, output_size, random_samples));
119
- })
120
- .define_singleton_method(
121
- "_fractional_max_pool3d_output",
122
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor &random_samples, Tensor &output, Tensor &indices) {
123
- return wrap(torch::fractional_max_pool3d_out(output, indices, self, kernel_size, output_size, random_samples));
124
- })
125
- .define_singleton_method(
126
- "_gelu",
127
- *[](const Tensor &self) {
128
- return torch::gelu(self);
129
- })
130
- .define_singleton_method(
131
- "_glu",
132
- *[](const Tensor &self, int64_t dim) {
133
- return torch::glu(self, dim);
134
- })
135
- .define_singleton_method(
136
- "_glu_out",
137
- *[](const Tensor &self, int64_t dim, Tensor &out) {
138
- return torch::glu_out(out, self, dim);
139
- })
140
- .define_singleton_method(
141
- "_hardtanh",
142
- *[](const Tensor &self, Scalar min_val, Scalar max_val) {
143
- return torch::hardtanh(self, min_val, max_val);
144
- })
145
- .define_singleton_method(
146
- "_hardtanh_",
147
- *[](Tensor &self, Scalar min_val, Scalar max_val) {
148
- return torch::hardtanh_(self, min_val, max_val);
149
- })
150
- .define_singleton_method(
151
- "_hardtanh_out",
152
- *[](const Tensor &self, Scalar min_val, Scalar max_val, Tensor &out) {
153
- return torch::hardtanh_out(out, self, min_val, max_val);
154
- })
155
- .define_singleton_method(
156
- "_im2col",
157
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) {
158
- return torch::im2col(self, kernel_size, dilation, padding, stride);
159
- })
160
- .define_singleton_method(
161
- "_im2col_out",
162
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor &out) {
163
- return torch::im2col_out(out, self, kernel_size, dilation, padding, stride);
164
- })
165
- .define_singleton_method(
166
- "_l1_loss",
167
- *[](const Tensor &self, const Tensor &target, MyReduction reduction) {
168
- return torch::l1_loss(self, target, reduction);
169
- })
170
- .define_singleton_method(
171
- "_l1_loss_out",
172
- *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &out) {
173
- return torch::l1_loss_out(out, self, target, reduction);
174
- })
175
- .define_singleton_method(
176
- "_leaky_relu",
177
- *[](const Tensor &self, Scalar negative_slope) {
178
- return torch::leaky_relu(self, negative_slope);
179
- })
180
- .define_singleton_method(
181
- "_leaky_relu_",
182
- *[](Tensor &self, Scalar negative_slope) {
183
- return torch::leaky_relu_(self, negative_slope);
184
- })
185
- .define_singleton_method(
186
- "_leaky_relu_out",
187
- *[](const Tensor &self, Scalar negative_slope, Tensor &out) {
188
- return torch::leaky_relu_out(out, self, negative_slope);
189
- })
190
- .define_singleton_method(
191
- "_linear",
192
- *[](const Tensor &input, const Tensor &weight, OptionalTensor bias) {
193
- return torch::linear(input, weight, bias);
194
- })
195
- .define_singleton_method(
196
- "_log_sigmoid",
197
- *[](const Tensor &self) {
198
- return torch::log_sigmoid(self);
199
- })
200
- .define_singleton_method(
201
- "_log_sigmoid_forward",
202
- *[](const Tensor &self) {
203
- return wrap(torch::log_sigmoid_forward(self));
204
- })
205
- .define_singleton_method(
206
- "_log_sigmoid_forward_output",
207
- *[](const Tensor &self, Tensor &output, Tensor &buffer) {
208
- return wrap(torch::log_sigmoid_forward_out(output, buffer, self));
209
- })
210
- .define_singleton_method(
211
- "_log_sigmoid_out",
212
- *[](const Tensor &self, Tensor &out) {
213
- return torch::log_sigmoid_out(out, self);
214
- })
215
- .define_singleton_method(
216
- "_max_pool2d_with_indices",
217
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
218
- return wrap(torch::max_pool2d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode));
219
- })
220
- .define_singleton_method(
221
- "_max_pool2d_with_indices_out",
222
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor &out, Tensor &indices) {
223
- return wrap(torch::max_pool2d_with_indices_out(out, indices, self, kernel_size, stride, padding, dilation, ceil_mode));
224
- })
225
- .define_singleton_method(
226
- "_max_pool3d_with_indices",
227
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
228
- return wrap(torch::max_pool3d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode));
229
- })
230
- .define_singleton_method(
231
- "_max_pool3d_with_indices_out",
232
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor &out, Tensor &indices) {
233
- return wrap(torch::max_pool3d_with_indices_out(out, indices, self, kernel_size, stride, padding, dilation, ceil_mode));
234
- })
235
- .define_singleton_method(
236
- "_max_unpool2d",
237
- *[](const Tensor &self, const Tensor &indices, IntArrayRef output_size) {
238
- return torch::max_unpool2d(self, indices, output_size);
239
- })
240
- .define_singleton_method(
241
- "_max_unpool2d_out",
242
- *[](const Tensor &self, const Tensor &indices, IntArrayRef output_size, Tensor &out) {
243
- return torch::max_unpool2d_out(out, self, indices, output_size);
244
- })
245
- .define_singleton_method(
246
- "_max_unpool3d",
247
- *[](const Tensor &self, const Tensor &indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) {
248
- return torch::max_unpool3d(self, indices, output_size, stride, padding);
249
- })
250
- .define_singleton_method(
251
- "_max_unpool3d_out",
252
- *[](const Tensor &self, const Tensor &indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding, Tensor &out) {
253
- return torch::max_unpool3d_out(out, self, indices, output_size, stride, padding);
254
- })
255
- .define_singleton_method(
256
- "_mkldnn_linear",
257
- *[](const Tensor &input, const Tensor &weight, OptionalTensor bias) {
258
- return torch::mkldnn_linear(input, weight, bias);
259
- })
260
- .define_singleton_method(
261
- "_mkldnn_reorder_conv2d_weight",
262
- *[](const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) {
263
- return torch::mkldnn_reorder_conv2d_weight(self, padding, stride, dilation, groups);
264
- })
265
- .define_singleton_method(
266
- "_mse_loss",
267
- *[](const Tensor &self, const Tensor &target, MyReduction reduction) {
268
- return torch::mse_loss(self, target, reduction);
269
- })
270
- .define_singleton_method(
271
- "_mse_loss_out",
272
- *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &out) {
273
- return torch::mse_loss_out(out, self, target, reduction);
274
- })
275
- .define_singleton_method(
276
- "_multi_margin_loss",
277
- *[](const Tensor &self, const Tensor &target, Scalar p, Scalar margin, OptionalTensor weight, MyReduction reduction) {
278
- return torch::multi_margin_loss(self, target, p, margin, weight, reduction);
279
- })
280
- .define_singleton_method(
281
- "_multi_margin_loss_out",
282
- *[](const Tensor &self, const Tensor &target, Scalar p, Scalar margin, OptionalTensor weight, MyReduction reduction, Tensor &out) {
283
- return torch::multi_margin_loss_out(out, self, target, p, margin, weight, reduction);
284
- })
285
- .define_singleton_method(
286
- "_multilabel_margin_loss",
287
- *[](const Tensor &self, const Tensor &target, MyReduction reduction) {
288
- return torch::multilabel_margin_loss(self, target, reduction);
289
- })
290
- .define_singleton_method(
291
- "_multilabel_margin_loss_forward",
292
- *[](const Tensor &self, const Tensor &target, MyReduction reduction) {
293
- return wrap(torch::multilabel_margin_loss_forward(self, target, reduction));
294
- })
295
- .define_singleton_method(
296
- "_multilabel_margin_loss_forward_output",
297
- *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &output, Tensor &is_target) {
298
- return wrap(torch::multilabel_margin_loss_forward_out(output, is_target, self, target, reduction));
299
- })
300
- .define_singleton_method(
301
- "_multilabel_margin_loss_out",
302
- *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &out) {
303
- return torch::multilabel_margin_loss_out(out, self, target, reduction);
304
- })
305
- .define_singleton_method(
306
- "_nll_loss",
307
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index) {
308
- return torch::nll_loss(self, target, weight, reduction, ignore_index);
309
- })
310
- .define_singleton_method(
311
- "_nll_loss2d",
312
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index) {
313
- return torch::nll_loss2d(self, target, weight, reduction, ignore_index);
314
- })
315
- .define_singleton_method(
316
- "_nll_loss2d_forward",
317
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index) {
318
- return wrap(torch::nll_loss2d_forward(self, target, weight, reduction, ignore_index));
319
- })
320
- .define_singleton_method(
321
- "_nll_loss2d_forward_output",
322
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index, Tensor &output, Tensor &total_weight) {
323
- return wrap(torch::nll_loss2d_forward_out(output, total_weight, self, target, weight, reduction, ignore_index));
324
- })
325
- .define_singleton_method(
326
- "_nll_loss2d_out",
327
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index, Tensor &out) {
328
- return torch::nll_loss2d_out(out, self, target, weight, reduction, ignore_index);
329
- })
330
- .define_singleton_method(
331
- "_nll_loss_forward",
332
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index) {
333
- return wrap(torch::nll_loss_forward(self, target, weight, reduction, ignore_index));
334
- })
335
- .define_singleton_method(
336
- "_nll_loss_forward_output",
337
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index, Tensor &output, Tensor &total_weight) {
338
- return wrap(torch::nll_loss_forward_out(output, total_weight, self, target, weight, reduction, ignore_index));
339
- })
340
- .define_singleton_method(
341
- "_nll_loss_out",
342
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index, Tensor &out) {
343
- return torch::nll_loss_out(out, self, target, weight, reduction, ignore_index);
344
- })
345
- .define_singleton_method(
346
- "_one_hot",
347
- *[](const Tensor &self, int64_t num_classes) {
348
- return torch::one_hot(self, num_classes);
349
- })
350
- .define_singleton_method(
351
- "_reflection_pad1d",
352
- *[](const Tensor &self, IntArrayRef padding) {
353
- return torch::reflection_pad1d(self, padding);
354
- })
355
- .define_singleton_method(
356
- "_reflection_pad1d_out",
357
- *[](const Tensor &self, IntArrayRef padding, Tensor &out) {
358
- return torch::reflection_pad1d_out(out, self, padding);
359
- })
360
- .define_singleton_method(
361
- "_reflection_pad2d",
362
- *[](const Tensor &self, IntArrayRef padding) {
363
- return torch::reflection_pad2d(self, padding);
364
- })
365
- .define_singleton_method(
366
- "_reflection_pad2d_out",
367
- *[](const Tensor &self, IntArrayRef padding, Tensor &out) {
368
- return torch::reflection_pad2d_out(out, self, padding);
369
- })
370
- .define_singleton_method(
371
- "_replication_pad1d",
372
- *[](const Tensor &self, IntArrayRef padding) {
373
- return torch::replication_pad1d(self, padding);
374
- })
375
- .define_singleton_method(
376
- "_replication_pad1d_out",
377
- *[](const Tensor &self, IntArrayRef padding, Tensor &out) {
378
- return torch::replication_pad1d_out(out, self, padding);
379
- })
380
- .define_singleton_method(
381
- "_replication_pad2d",
382
- *[](const Tensor &self, IntArrayRef padding) {
383
- return torch::replication_pad2d(self, padding);
384
- })
385
- .define_singleton_method(
386
- "_replication_pad2d_out",
387
- *[](const Tensor &self, IntArrayRef padding, Tensor &out) {
388
- return torch::replication_pad2d_out(out, self, padding);
389
- })
390
- .define_singleton_method(
391
- "_replication_pad3d",
392
- *[](const Tensor &self, IntArrayRef padding) {
393
- return torch::replication_pad3d(self, padding);
394
- })
395
- .define_singleton_method(
396
- "_replication_pad3d_out",
397
- *[](const Tensor &self, IntArrayRef padding, Tensor &out) {
398
- return torch::replication_pad3d_out(out, self, padding);
399
- })
400
- .define_singleton_method(
401
- "_rrelu_with_noise",
402
- *[](const Tensor &self, const Tensor &noise, Scalar lower, Scalar upper, bool training) {
403
- return torch::rrelu_with_noise(self, noise, lower, upper, training);
404
- })
405
- .define_singleton_method(
406
- "_rrelu_with_noise_",
407
- *[](Tensor &self, const Tensor &noise, Scalar lower, Scalar upper, bool training) {
408
- return torch::rrelu_with_noise_(self, noise, lower, upper, training);
409
- })
410
- .define_singleton_method(
411
- "_rrelu_with_noise_out",
412
- *[](const Tensor &self, const Tensor &noise, Scalar lower, Scalar upper, bool training, Tensor &out) {
413
- return torch::rrelu_with_noise_out(out, self, noise, lower, upper, training);
414
- })
415
- .define_singleton_method(
416
- "_slow_conv_dilated2d",
417
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
418
- return torch::slow_conv_dilated2d(self, weight, kernel_size, bias, stride, padding, dilation);
419
- })
420
- .define_singleton_method(
421
- "_slow_conv_dilated3d",
422
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
423
- return torch::slow_conv_dilated3d(self, weight, kernel_size, bias, stride, padding, dilation);
424
- })
425
- .define_singleton_method(
426
- "_slow_conv_transpose2d",
427
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) {
428
- return torch::slow_conv_transpose2d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
429
- })
430
- .define_singleton_method(
431
- "_slow_conv_transpose2d_out",
432
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor &out) {
433
- return torch::slow_conv_transpose2d_out(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
434
- })
435
- .define_singleton_method(
436
- "_slow_conv_transpose3d",
437
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) {
438
- return torch::slow_conv_transpose3d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
439
- })
440
- .define_singleton_method(
441
- "_slow_conv_transpose3d_out",
442
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor &out) {
443
- return torch::slow_conv_transpose3d_out(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
444
- })
445
- .define_singleton_method(
446
- "_smooth_l1_loss",
447
- *[](const Tensor &self, const Tensor &target, MyReduction reduction) {
448
- return torch::smooth_l1_loss(self, target, reduction);
449
- })
450
- .define_singleton_method(
451
- "_smooth_l1_loss_out",
452
- *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &out) {
453
- return torch::smooth_l1_loss_out(out, self, target, reduction);
454
- })
455
- .define_singleton_method(
456
- "_soft_margin_loss",
457
- *[](const Tensor &self, const Tensor &target, MyReduction reduction) {
458
- return torch::soft_margin_loss(self, target, reduction);
459
- })
460
- .define_singleton_method(
461
- "_soft_margin_loss_out",
462
- *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &out) {
463
- return torch::soft_margin_loss_out(out, self, target, reduction);
464
- })
465
- .define_singleton_method(
466
- "_softplus",
467
- *[](const Tensor &self, Scalar beta, Scalar threshold) {
468
- return torch::softplus(self, beta, threshold);
469
- })
470
- .define_singleton_method(
471
- "_softplus_out",
472
- *[](const Tensor &self, Scalar beta, Scalar threshold, Tensor &out) {
473
- return torch::softplus_out(out, self, beta, threshold);
474
- })
475
- .define_singleton_method(
476
- "_softshrink",
477
- *[](const Tensor &self, Scalar lambd) {
478
- return torch::softshrink(self, lambd);
479
- })
480
- .define_singleton_method(
481
- "_softshrink_out",
482
- *[](const Tensor &self, Scalar lambd, Tensor &out) {
483
- return torch::softshrink_out(out, self, lambd);
484
- })
485
- .define_singleton_method(
486
- "_thnn_conv2d",
487
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding) {
488
- return torch::thnn_conv2d(self, weight, kernel_size, bias, stride, padding);
489
- })
490
- .define_singleton_method(
491
- "_thnn_conv2d_forward",
492
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding) {
493
- return wrap(torch::thnn_conv2d_forward(self, weight, kernel_size, bias, stride, padding));
494
- })
495
- .define_singleton_method(
496
- "_thnn_conv2d_forward_output",
497
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, Tensor &output, Tensor &finput, Tensor &fgrad_input) {
498
- return wrap(torch::thnn_conv2d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding));
499
- })
500
- .define_singleton_method(
501
- "_thnn_conv2d_out",
502
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, Tensor &out) {
503
- return torch::thnn_conv2d_out(out, self, weight, kernel_size, bias, stride, padding);
504
- })
505
- .define_singleton_method(
506
- "_thnn_conv3d",
507
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding) {
508
- return torch::thnn_conv3d(self, weight, kernel_size, bias, stride, padding);
509
- })
510
- .define_singleton_method(
511
- "_thnn_conv3d_forward",
512
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding) {
513
- return wrap(torch::thnn_conv3d_forward(self, weight, kernel_size, bias, stride, padding));
514
- })
515
- .define_singleton_method(
516
- "_thnn_conv3d_forward_output",
517
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, Tensor &output, Tensor &finput, Tensor &fgrad_input) {
518
- return wrap(torch::thnn_conv3d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding));
519
- })
520
- .define_singleton_method(
521
- "_thnn_conv3d_out",
522
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, Tensor &out) {
523
- return torch::thnn_conv3d_out(out, self, weight, kernel_size, bias, stride, padding);
524
- })
525
- .define_singleton_method(
526
- "_thnn_conv_depthwise2d",
527
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
528
- return torch::thnn_conv_depthwise2d(self, weight, kernel_size, bias, stride, padding, dilation);
529
- })
530
- .define_singleton_method(
531
- "_thnn_conv_depthwise2d_forward",
532
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
533
- return torch::thnn_conv_depthwise2d_forward(self, weight, kernel_size, bias, stride, padding, dilation);
534
- })
535
- .define_singleton_method(
536
- "_thnn_conv_depthwise2d_forward_out",
537
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor &out) {
538
- return torch::thnn_conv_depthwise2d_forward_out(out, self, weight, kernel_size, bias, stride, padding, dilation);
539
- })
540
- .define_singleton_method(
541
- "_thnn_conv_depthwise2d_out",
542
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor &out) {
543
- return torch::thnn_conv_depthwise2d_out(out, self, weight, kernel_size, bias, stride, padding, dilation);
544
- })
545
- .define_singleton_method(
546
- "_upsample_bicubic2d",
547
- *[](const Tensor &self, IntArrayRef output_size, bool align_corners) {
548
- return torch::upsample_bicubic2d(self, output_size, align_corners);
549
- })
550
- .define_singleton_method(
551
- "_upsample_bicubic2d_out",
552
- *[](const Tensor &self, IntArrayRef output_size, bool align_corners, Tensor &out) {
553
- return torch::upsample_bicubic2d_out(out, self, output_size, align_corners);
554
- })
555
- .define_singleton_method(
556
- "_upsample_bilinear2d",
557
- *[](const Tensor &self, IntArrayRef output_size, bool align_corners) {
558
- return torch::upsample_bilinear2d(self, output_size, align_corners);
559
- })
560
- .define_singleton_method(
561
- "_upsample_bilinear2d_out",
562
- *[](const Tensor &self, IntArrayRef output_size, bool align_corners, Tensor &out) {
563
- return torch::upsample_bilinear2d_out(out, self, output_size, align_corners);
564
- })
565
- .define_singleton_method(
566
- "_upsample_linear1d",
567
- *[](const Tensor &self, IntArrayRef output_size, bool align_corners) {
568
- return torch::upsample_linear1d(self, output_size, align_corners);
569
- })
570
- .define_singleton_method(
571
- "_upsample_linear1d_out",
572
- *[](const Tensor &self, IntArrayRef output_size, bool align_corners, Tensor &out) {
573
- return torch::upsample_linear1d_out(out, self, output_size, align_corners);
574
- })
575
- .define_singleton_method(
576
- "_upsample_nearest1d",
577
- *[](const Tensor &self, IntArrayRef output_size) {
578
- return torch::upsample_nearest1d(self, output_size);
579
- })
580
- .define_singleton_method(
581
- "_upsample_nearest1d_out",
582
- *[](const Tensor &self, IntArrayRef output_size, Tensor &out) {
583
- return torch::upsample_nearest1d_out(out, self, output_size);
584
- })
585
- .define_singleton_method(
586
- "_upsample_nearest2d",
587
- *[](const Tensor &self, IntArrayRef output_size) {
588
- return torch::upsample_nearest2d(self, output_size);
589
- })
590
- .define_singleton_method(
591
- "_upsample_nearest2d_out",
592
- *[](const Tensor &self, IntArrayRef output_size, Tensor &out) {
593
- return torch::upsample_nearest2d_out(out, self, output_size);
594
- })
595
- .define_singleton_method(
596
- "_upsample_nearest3d",
597
- *[](const Tensor &self, IntArrayRef output_size) {
598
- return torch::upsample_nearest3d(self, output_size);
599
- })
600
- .define_singleton_method(
601
- "_upsample_nearest3d_out",
602
- *[](const Tensor &self, IntArrayRef output_size, Tensor &out) {
603
- return torch::upsample_nearest3d_out(out, self, output_size);
604
- })
605
- .define_singleton_method(
606
- "_upsample_trilinear3d",
607
- *[](const Tensor &self, IntArrayRef output_size, bool align_corners) {
608
- return torch::upsample_trilinear3d(self, output_size, align_corners);
609
- })
610
- .define_singleton_method(
611
- "_upsample_trilinear3d_out",
612
- *[](const Tensor &self, IntArrayRef output_size, bool align_corners, Tensor &out) {
613
- return torch::upsample_trilinear3d_out(out, self, output_size, align_corners);
614
- });
615
- }