torch-rb 0.2.0 → 0.2.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,560 +0,0 @@
1
- // generated by rake generate:functions
2
- // do not edit by hand
3
-
4
- #include <torch/torch.h>
5
- #include <rice/Module.hpp>
6
- #include "templates.hpp"
7
-
8
- void add_nn_functions(Module m) {
9
- m
10
- .define_singleton_method(
11
- "_adaptive_avg_pool2d",
12
- *[](const Tensor &self, IntArrayRef output_size) {
13
- return torch::adaptive_avg_pool2d(self, output_size);
14
- })
15
- .define_singleton_method(
16
- "_adaptive_avg_pool2d_out",
17
- *[](const Tensor &self, IntArrayRef output_size, Tensor &out) {
18
- return torch::adaptive_avg_pool2d_out(out, self, output_size);
19
- })
20
- .define_singleton_method(
21
- "_adaptive_avg_pool3d",
22
- *[](const Tensor &self, IntArrayRef output_size) {
23
- return torch::adaptive_avg_pool3d(self, output_size);
24
- })
25
- .define_singleton_method(
26
- "_adaptive_avg_pool3d_out",
27
- *[](const Tensor &self, IntArrayRef output_size, Tensor &out) {
28
- return torch::adaptive_avg_pool3d_out(out, self, output_size);
29
- })
30
- .define_singleton_method(
31
- "_adaptive_max_pool2d",
32
- *[](const Tensor &self, IntArrayRef output_size) {
33
- return wrap(torch::adaptive_max_pool2d(self, output_size));
34
- })
35
- .define_singleton_method(
36
- "_adaptive_max_pool2d_out",
37
- *[](const Tensor &self, IntArrayRef output_size, Tensor &out, Tensor &indices) {
38
- return wrap(torch::adaptive_max_pool2d_out(out, indices, self, output_size));
39
- })
40
- .define_singleton_method(
41
- "_adaptive_max_pool3d",
42
- *[](const Tensor &self, IntArrayRef output_size) {
43
- return wrap(torch::adaptive_max_pool3d(self, output_size));
44
- })
45
- .define_singleton_method(
46
- "_adaptive_max_pool3d_out",
47
- *[](const Tensor &self, IntArrayRef output_size, Tensor &out, Tensor &indices) {
48
- return wrap(torch::adaptive_max_pool3d_out(out, indices, self, output_size));
49
- })
50
- .define_singleton_method(
51
- "_avg_pool2d",
52
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
53
- return torch::avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
54
- })
55
- .define_singleton_method(
56
- "_avg_pool2d_divisor_override",
57
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, int64_t divisor_override) {
58
- return torch::avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
59
- })
60
- .define_singleton_method(
61
- "_avg_pool3d",
62
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
63
- return torch::avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
64
- })
65
- .define_singleton_method(
66
- "_avg_pool3d_divisor_override",
67
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, int64_t divisor_override) {
68
- return torch::avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
69
- })
70
- .define_singleton_method(
71
- "_binary_cross_entropy",
72
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction) {
73
- return torch::binary_cross_entropy(self, target, weight, reduction);
74
- })
75
- .define_singleton_method(
76
- "_binary_cross_entropy_out",
77
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, Tensor &out) {
78
- return torch::binary_cross_entropy_out(out, self, target, weight, reduction);
79
- })
80
- .define_singleton_method(
81
- "_col2im",
82
- *[](const Tensor &self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) {
83
- return torch::col2im(self, output_size, kernel_size, dilation, padding, stride);
84
- })
85
- .define_singleton_method(
86
- "_col2im_out",
87
- *[](const Tensor &self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor &out) {
88
- return torch::col2im_out(out, self, output_size, kernel_size, dilation, padding, stride);
89
- })
90
- .define_singleton_method(
91
- "_elu",
92
- *[](const Tensor &self, Scalar alpha, Scalar scale, Scalar input_scale) {
93
- return torch::elu(self, alpha, scale, input_scale);
94
- })
95
- .define_singleton_method(
96
- "_elu_",
97
- *[](Tensor &self, Scalar alpha, Scalar scale, Scalar input_scale) {
98
- return torch::elu_(self, alpha, scale, input_scale);
99
- })
100
- .define_singleton_method(
101
- "_elu_out",
102
- *[](const Tensor &self, Scalar alpha, Scalar scale, Scalar input_scale, Tensor &out) {
103
- return torch::elu_out(out, self, alpha, scale, input_scale);
104
- })
105
- .define_singleton_method(
106
- "_fractional_max_pool2d",
107
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor &random_samples) {
108
- return wrap(torch::fractional_max_pool2d(self, kernel_size, output_size, random_samples));
109
- })
110
- .define_singleton_method(
111
- "_fractional_max_pool2d_output",
112
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor &random_samples, Tensor &output, Tensor &indices) {
113
- return wrap(torch::fractional_max_pool2d_out(output, indices, self, kernel_size, output_size, random_samples));
114
- })
115
- .define_singleton_method(
116
- "_fractional_max_pool3d",
117
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor &random_samples) {
118
- return wrap(torch::fractional_max_pool3d(self, kernel_size, output_size, random_samples));
119
- })
120
- .define_singleton_method(
121
- "_fractional_max_pool3d_output",
122
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor &random_samples, Tensor &output, Tensor &indices) {
123
- return wrap(torch::fractional_max_pool3d_out(output, indices, self, kernel_size, output_size, random_samples));
124
- })
125
- .define_singleton_method(
126
- "_gelu",
127
- *[](const Tensor &self) {
128
- return torch::gelu(self);
129
- })
130
- .define_singleton_method(
131
- "_glu",
132
- *[](const Tensor &self, int64_t dim) {
133
- return torch::glu(self, dim);
134
- })
135
- .define_singleton_method(
136
- "_glu_out",
137
- *[](const Tensor &self, int64_t dim, Tensor &out) {
138
- return torch::glu_out(out, self, dim);
139
- })
140
- .define_singleton_method(
141
- "_hardsigmoid",
142
- *[](const Tensor &self) {
143
- return torch::hardsigmoid(self);
144
- })
145
- .define_singleton_method(
146
- "_hardsigmoid_",
147
- *[](Tensor &self) {
148
- return torch::hardsigmoid_(self);
149
- })
150
- .define_singleton_method(
151
- "_hardsigmoid_out",
152
- *[](const Tensor &self, Tensor &out) {
153
- return torch::hardsigmoid_out(out, self);
154
- })
155
- .define_singleton_method(
156
- "_hardtanh",
157
- *[](const Tensor &self, Scalar min_val, Scalar max_val) {
158
- return torch::hardtanh(self, min_val, max_val);
159
- })
160
- .define_singleton_method(
161
- "_hardtanh_",
162
- *[](Tensor &self, Scalar min_val, Scalar max_val) {
163
- return torch::hardtanh_(self, min_val, max_val);
164
- })
165
- .define_singleton_method(
166
- "_hardtanh_out",
167
- *[](const Tensor &self, Scalar min_val, Scalar max_val, Tensor &out) {
168
- return torch::hardtanh_out(out, self, min_val, max_val);
169
- })
170
- .define_singleton_method(
171
- "_im2col",
172
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) {
173
- return torch::im2col(self, kernel_size, dilation, padding, stride);
174
- })
175
- .define_singleton_method(
176
- "_im2col_out",
177
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor &out) {
178
- return torch::im2col_out(out, self, kernel_size, dilation, padding, stride);
179
- })
180
- .define_singleton_method(
181
- "_l1_loss",
182
- *[](const Tensor &self, const Tensor &target, MyReduction reduction) {
183
- return torch::l1_loss(self, target, reduction);
184
- })
185
- .define_singleton_method(
186
- "_l1_loss_out",
187
- *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &out) {
188
- return torch::l1_loss_out(out, self, target, reduction);
189
- })
190
- .define_singleton_method(
191
- "_leaky_relu",
192
- *[](const Tensor &self, Scalar negative_slope) {
193
- return torch::leaky_relu(self, negative_slope);
194
- })
195
- .define_singleton_method(
196
- "_leaky_relu_",
197
- *[](Tensor &self, Scalar negative_slope) {
198
- return torch::leaky_relu_(self, negative_slope);
199
- })
200
- .define_singleton_method(
201
- "_leaky_relu_out",
202
- *[](const Tensor &self, Scalar negative_slope, Tensor &out) {
203
- return torch::leaky_relu_out(out, self, negative_slope);
204
- })
205
- .define_singleton_method(
206
- "_linear",
207
- *[](const Tensor &input, const Tensor &weight, OptionalTensor bias) {
208
- return torch::linear(input, weight, bias);
209
- })
210
- .define_singleton_method(
211
- "_log_sigmoid",
212
- *[](const Tensor &self) {
213
- return torch::log_sigmoid(self);
214
- })
215
- .define_singleton_method(
216
- "_log_sigmoid_forward",
217
- *[](const Tensor &self) {
218
- return wrap(torch::log_sigmoid_forward(self));
219
- })
220
- .define_singleton_method(
221
- "_log_sigmoid_forward_output",
222
- *[](const Tensor &self, Tensor &output, Tensor &buffer) {
223
- return wrap(torch::log_sigmoid_forward_out(output, buffer, self));
224
- })
225
- .define_singleton_method(
226
- "_log_sigmoid_out",
227
- *[](const Tensor &self, Tensor &out) {
228
- return torch::log_sigmoid_out(out, self);
229
- })
230
- .define_singleton_method(
231
- "_max_pool2d_with_indices",
232
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
233
- return wrap(torch::max_pool2d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode));
234
- })
235
- .define_singleton_method(
236
- "_max_pool2d_with_indices_out",
237
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor &out, Tensor &indices) {
238
- return wrap(torch::max_pool2d_with_indices_out(out, indices, self, kernel_size, stride, padding, dilation, ceil_mode));
239
- })
240
- .define_singleton_method(
241
- "_max_pool3d_with_indices",
242
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
243
- return wrap(torch::max_pool3d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode));
244
- })
245
- .define_singleton_method(
246
- "_max_pool3d_with_indices_out",
247
- *[](const Tensor &self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor &out, Tensor &indices) {
248
- return wrap(torch::max_pool3d_with_indices_out(out, indices, self, kernel_size, stride, padding, dilation, ceil_mode));
249
- })
250
- .define_singleton_method(
251
- "_max_unpool2d",
252
- *[](const Tensor &self, const Tensor &indices, IntArrayRef output_size) {
253
- return torch::max_unpool2d(self, indices, output_size);
254
- })
255
- .define_singleton_method(
256
- "_max_unpool2d_out",
257
- *[](const Tensor &self, const Tensor &indices, IntArrayRef output_size, Tensor &out) {
258
- return torch::max_unpool2d_out(out, self, indices, output_size);
259
- })
260
- .define_singleton_method(
261
- "_max_unpool3d",
262
- *[](const Tensor &self, const Tensor &indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) {
263
- return torch::max_unpool3d(self, indices, output_size, stride, padding);
264
- })
265
- .define_singleton_method(
266
- "_max_unpool3d_out",
267
- *[](const Tensor &self, const Tensor &indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding, Tensor &out) {
268
- return torch::max_unpool3d_out(out, self, indices, output_size, stride, padding);
269
- })
270
- .define_singleton_method(
271
- "_mkldnn_linear",
272
- *[](const Tensor &input, const Tensor &weight, OptionalTensor bias) {
273
- return torch::mkldnn_linear(input, weight, bias);
274
- })
275
- .define_singleton_method(
276
- "_mkldnn_reorder_conv2d_weight",
277
- *[](const Tensor &self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) {
278
- return torch::mkldnn_reorder_conv2d_weight(self, padding, stride, dilation, groups);
279
- })
280
- .define_singleton_method(
281
- "_mse_loss",
282
- *[](const Tensor &self, const Tensor &target, MyReduction reduction) {
283
- return torch::mse_loss(self, target, reduction);
284
- })
285
- .define_singleton_method(
286
- "_mse_loss_out",
287
- *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &out) {
288
- return torch::mse_loss_out(out, self, target, reduction);
289
- })
290
- .define_singleton_method(
291
- "_multi_margin_loss",
292
- *[](const Tensor &self, const Tensor &target, Scalar p, Scalar margin, OptionalTensor weight, MyReduction reduction) {
293
- return torch::multi_margin_loss(self, target, p, margin, weight, reduction);
294
- })
295
- .define_singleton_method(
296
- "_multi_margin_loss_out",
297
- *[](const Tensor &self, const Tensor &target, Scalar p, Scalar margin, OptionalTensor weight, MyReduction reduction, Tensor &out) {
298
- return torch::multi_margin_loss_out(out, self, target, p, margin, weight, reduction);
299
- })
300
- .define_singleton_method(
301
- "_multilabel_margin_loss",
302
- *[](const Tensor &self, const Tensor &target, MyReduction reduction) {
303
- return torch::multilabel_margin_loss(self, target, reduction);
304
- })
305
- .define_singleton_method(
306
- "_multilabel_margin_loss_forward",
307
- *[](const Tensor &self, const Tensor &target, MyReduction reduction) {
308
- return wrap(torch::multilabel_margin_loss_forward(self, target, reduction));
309
- })
310
- .define_singleton_method(
311
- "_multilabel_margin_loss_forward_output",
312
- *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &output, Tensor &is_target) {
313
- return wrap(torch::multilabel_margin_loss_forward_out(output, is_target, self, target, reduction));
314
- })
315
- .define_singleton_method(
316
- "_multilabel_margin_loss_out",
317
- *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &out) {
318
- return torch::multilabel_margin_loss_out(out, self, target, reduction);
319
- })
320
- .define_singleton_method(
321
- "_nll_loss",
322
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index) {
323
- return torch::nll_loss(self, target, weight, reduction, ignore_index);
324
- })
325
- .define_singleton_method(
326
- "_nll_loss2d",
327
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index) {
328
- return torch::nll_loss2d(self, target, weight, reduction, ignore_index);
329
- })
330
- .define_singleton_method(
331
- "_nll_loss2d_forward",
332
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index) {
333
- return wrap(torch::nll_loss2d_forward(self, target, weight, reduction, ignore_index));
334
- })
335
- .define_singleton_method(
336
- "_nll_loss2d_forward_output",
337
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index, Tensor &output, Tensor &total_weight) {
338
- return wrap(torch::nll_loss2d_forward_out(output, total_weight, self, target, weight, reduction, ignore_index));
339
- })
340
- .define_singleton_method(
341
- "_nll_loss2d_out",
342
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index, Tensor &out) {
343
- return torch::nll_loss2d_out(out, self, target, weight, reduction, ignore_index);
344
- })
345
- .define_singleton_method(
346
- "_nll_loss_forward",
347
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index) {
348
- return wrap(torch::nll_loss_forward(self, target, weight, reduction, ignore_index));
349
- })
350
- .define_singleton_method(
351
- "_nll_loss_forward_output",
352
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index, Tensor &output, Tensor &total_weight) {
353
- return wrap(torch::nll_loss_forward_out(output, total_weight, self, target, weight, reduction, ignore_index));
354
- })
355
- .define_singleton_method(
356
- "_nll_loss_out",
357
- *[](const Tensor &self, const Tensor &target, OptionalTensor weight, MyReduction reduction, int64_t ignore_index, Tensor &out) {
358
- return torch::nll_loss_out(out, self, target, weight, reduction, ignore_index);
359
- })
360
- .define_singleton_method(
361
- "_one_hot",
362
- *[](const Tensor &self, int64_t num_classes) {
363
- return torch::one_hot(self, num_classes);
364
- })
365
- .define_singleton_method(
366
- "_reflection_pad1d",
367
- *[](const Tensor &self, IntArrayRef padding) {
368
- return torch::reflection_pad1d(self, padding);
369
- })
370
- .define_singleton_method(
371
- "_reflection_pad1d_out",
372
- *[](const Tensor &self, IntArrayRef padding, Tensor &out) {
373
- return torch::reflection_pad1d_out(out, self, padding);
374
- })
375
- .define_singleton_method(
376
- "_reflection_pad2d",
377
- *[](const Tensor &self, IntArrayRef padding) {
378
- return torch::reflection_pad2d(self, padding);
379
- })
380
- .define_singleton_method(
381
- "_reflection_pad2d_out",
382
- *[](const Tensor &self, IntArrayRef padding, Tensor &out) {
383
- return torch::reflection_pad2d_out(out, self, padding);
384
- })
385
- .define_singleton_method(
386
- "_replication_pad1d",
387
- *[](const Tensor &self, IntArrayRef padding) {
388
- return torch::replication_pad1d(self, padding);
389
- })
390
- .define_singleton_method(
391
- "_replication_pad1d_out",
392
- *[](const Tensor &self, IntArrayRef padding, Tensor &out) {
393
- return torch::replication_pad1d_out(out, self, padding);
394
- })
395
- .define_singleton_method(
396
- "_replication_pad2d",
397
- *[](const Tensor &self, IntArrayRef padding) {
398
- return torch::replication_pad2d(self, padding);
399
- })
400
- .define_singleton_method(
401
- "_replication_pad2d_out",
402
- *[](const Tensor &self, IntArrayRef padding, Tensor &out) {
403
- return torch::replication_pad2d_out(out, self, padding);
404
- })
405
- .define_singleton_method(
406
- "_replication_pad3d",
407
- *[](const Tensor &self, IntArrayRef padding) {
408
- return torch::replication_pad3d(self, padding);
409
- })
410
- .define_singleton_method(
411
- "_replication_pad3d_out",
412
- *[](const Tensor &self, IntArrayRef padding, Tensor &out) {
413
- return torch::replication_pad3d_out(out, self, padding);
414
- })
415
- .define_singleton_method(
416
- "_rrelu_with_noise",
417
- *[](const Tensor &self, const Tensor &noise, Scalar lower, Scalar upper, bool training) {
418
- return torch::rrelu_with_noise(self, noise, lower, upper, training);
419
- })
420
- .define_singleton_method(
421
- "_rrelu_with_noise_",
422
- *[](Tensor &self, const Tensor &noise, Scalar lower, Scalar upper, bool training) {
423
- return torch::rrelu_with_noise_(self, noise, lower, upper, training);
424
- })
425
- .define_singleton_method(
426
- "_rrelu_with_noise_out",
427
- *[](const Tensor &self, const Tensor &noise, Scalar lower, Scalar upper, bool training, Tensor &out) {
428
- return torch::rrelu_with_noise_out(out, self, noise, lower, upper, training);
429
- })
430
- .define_singleton_method(
431
- "_slow_conv3d",
432
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding) {
433
- return torch::slow_conv3d(self, weight, kernel_size, bias, stride, padding);
434
- })
435
- .define_singleton_method(
436
- "_slow_conv3d_forward",
437
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding) {
438
- return wrap(torch::slow_conv3d_forward(self, weight, kernel_size, bias, stride, padding));
439
- })
440
- .define_singleton_method(
441
- "_slow_conv3d_forward_output",
442
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, Tensor &output, Tensor &finput, Tensor &fgrad_input) {
443
- return wrap(torch::slow_conv3d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding));
444
- })
445
- .define_singleton_method(
446
- "_slow_conv3d_out",
447
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, Tensor &out) {
448
- return torch::slow_conv3d_out(out, self, weight, kernel_size, bias, stride, padding);
449
- })
450
- .define_singleton_method(
451
- "_slow_conv_dilated2d",
452
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
453
- return torch::slow_conv_dilated2d(self, weight, kernel_size, bias, stride, padding, dilation);
454
- })
455
- .define_singleton_method(
456
- "_slow_conv_dilated3d",
457
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
458
- return torch::slow_conv_dilated3d(self, weight, kernel_size, bias, stride, padding, dilation);
459
- })
460
- .define_singleton_method(
461
- "_slow_conv_transpose2d",
462
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) {
463
- return torch::slow_conv_transpose2d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
464
- })
465
- .define_singleton_method(
466
- "_slow_conv_transpose2d_out",
467
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor &out) {
468
- return torch::slow_conv_transpose2d_out(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
469
- })
470
- .define_singleton_method(
471
- "_slow_conv_transpose3d",
472
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) {
473
- return torch::slow_conv_transpose3d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
474
- })
475
- .define_singleton_method(
476
- "_slow_conv_transpose3d_out",
477
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor &out) {
478
- return torch::slow_conv_transpose3d_out(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
479
- })
480
- .define_singleton_method(
481
- "_smooth_l1_loss",
482
- *[](const Tensor &self, const Tensor &target, MyReduction reduction) {
483
- return torch::smooth_l1_loss(self, target, reduction);
484
- })
485
- .define_singleton_method(
486
- "_smooth_l1_loss_out",
487
- *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &out) {
488
- return torch::smooth_l1_loss_out(out, self, target, reduction);
489
- })
490
- .define_singleton_method(
491
- "_soft_margin_loss",
492
- *[](const Tensor &self, const Tensor &target, MyReduction reduction) {
493
- return torch::soft_margin_loss(self, target, reduction);
494
- })
495
- .define_singleton_method(
496
- "_soft_margin_loss_out",
497
- *[](const Tensor &self, const Tensor &target, MyReduction reduction, Tensor &out) {
498
- return torch::soft_margin_loss_out(out, self, target, reduction);
499
- })
500
- .define_singleton_method(
501
- "_softplus",
502
- *[](const Tensor &self, Scalar beta, Scalar threshold) {
503
- return torch::softplus(self, beta, threshold);
504
- })
505
- .define_singleton_method(
506
- "_softplus_out",
507
- *[](const Tensor &self, Scalar beta, Scalar threshold, Tensor &out) {
508
- return torch::softplus_out(out, self, beta, threshold);
509
- })
510
- .define_singleton_method(
511
- "_softshrink",
512
- *[](const Tensor &self, Scalar lambd) {
513
- return torch::softshrink(self, lambd);
514
- })
515
- .define_singleton_method(
516
- "_softshrink_out",
517
- *[](const Tensor &self, Scalar lambd, Tensor &out) {
518
- return torch::softshrink_out(out, self, lambd);
519
- })
520
- .define_singleton_method(
521
- "_thnn_conv2d",
522
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding) {
523
- return torch::thnn_conv2d(self, weight, kernel_size, bias, stride, padding);
524
- })
525
- .define_singleton_method(
526
- "_thnn_conv2d_forward",
527
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding) {
528
- return wrap(torch::thnn_conv2d_forward(self, weight, kernel_size, bias, stride, padding));
529
- })
530
- .define_singleton_method(
531
- "_thnn_conv2d_forward_output",
532
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, Tensor &output, Tensor &finput, Tensor &fgrad_input) {
533
- return wrap(torch::thnn_conv2d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding));
534
- })
535
- .define_singleton_method(
536
- "_thnn_conv2d_out",
537
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, Tensor &out) {
538
- return torch::thnn_conv2d_out(out, self, weight, kernel_size, bias, stride, padding);
539
- })
540
- .define_singleton_method(
541
- "_thnn_conv_depthwise2d",
542
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
543
- return torch::thnn_conv_depthwise2d(self, weight, kernel_size, bias, stride, padding, dilation);
544
- })
545
- .define_singleton_method(
546
- "_thnn_conv_depthwise2d_forward",
547
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
548
- return torch::thnn_conv_depthwise2d_forward(self, weight, kernel_size, bias, stride, padding, dilation);
549
- })
550
- .define_singleton_method(
551
- "_thnn_conv_depthwise2d_forward_out",
552
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor &out) {
553
- return torch::thnn_conv_depthwise2d_forward_out(out, self, weight, kernel_size, bias, stride, padding, dilation);
554
- })
555
- .define_singleton_method(
556
- "_thnn_conv_depthwise2d_out",
557
- *[](const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, OptionalTensor bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor &out) {
558
- return torch::thnn_conv_depthwise2d_out(out, self, weight, kernel_size, bias, stride, padding, dilation);
559
- });
560
- }
@@ -1,6 +0,0 @@
1
- // generated by rake generate:functions
2
- // do not edit by hand
3
-
4
- #pragma once
5
-
6
- void add_nn_functions(Module m);