torch-rb 0.8.0 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +22 -0
- data/README.md +23 -41
- data/codegen/generate_functions.rb +46 -8
- data/codegen/native_functions.yaml +1103 -373
- data/ext/torch/backends.cpp +17 -0
- data/ext/torch/ext.cpp +8 -0
- data/ext/torch/fft.cpp +13 -0
- data/ext/torch/fft_functions.h +6 -0
- data/ext/torch/linalg.cpp +13 -0
- data/ext/torch/linalg_functions.h +6 -0
- data/ext/torch/ruby_arg_parser.h +17 -3
- data/ext/torch/special.cpp +13 -0
- data/ext/torch/special_functions.h +6 -0
- data/ext/torch/templates.h +0 -37
- data/ext/torch/tensor.cpp +8 -8
- data/lib/torch/nn/convnd.rb +2 -0
- data/lib/torch/nn/functional_attention.rb +241 -0
- data/lib/torch/nn/module.rb +30 -0
- data/lib/torch/nn/module_list.rb +49 -0
- data/lib/torch/nn/multihead_attention.rb +123 -0
- data/lib/torch/nn/parameter.rb +6 -0
- data/lib/torch/nn/transformer.rb +92 -0
- data/lib/torch/nn/transformer_decoder.rb +25 -0
- data/lib/torch/nn/transformer_decoder_layer.rb +43 -0
- data/lib/torch/nn/transformer_encoder.rb +25 -0
- data/lib/torch/nn/transformer_encoder_layer.rb +36 -0
- data/lib/torch/nn/utils.rb +12 -0
- data/lib/torch/tensor.rb +20 -0
- data/lib/torch/utils/data/data_loader.rb +2 -0
- data/lib/torch/version.rb +1 -1
- data/lib/torch.rb +6 -0
- metadata +18 -3
@@ -89,6 +89,10 @@
|
|
89
89
|
manual_cpp_binding: True
|
90
90
|
variants: method
|
91
91
|
|
92
|
+
- func: retains_grad(Tensor self) -> bool
|
93
|
+
manual_cpp_binding: True
|
94
|
+
variants: method
|
95
|
+
|
92
96
|
- func: _fw_primal(Tensor(a) self, int level) -> Tensor(a)
|
93
97
|
variants: method
|
94
98
|
dispatch:
|
@@ -278,15 +282,15 @@
|
|
278
282
|
|
279
283
|
- func: sgn(Tensor self) -> Tensor
|
280
284
|
variants: function, method
|
281
|
-
|
282
|
-
CompositeExplicitAutograd: sgn
|
285
|
+
structured_delegate: sgn.out
|
283
286
|
|
284
287
|
- func: sgn_(Tensor(a!) self) -> Tensor(a!)
|
285
288
|
variants: method
|
286
|
-
|
287
|
-
CompositeExplicitAutograd: sgn_
|
289
|
+
structured_delegate: sgn.out
|
288
290
|
|
289
291
|
- func: sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
292
|
+
structured: True
|
293
|
+
structured_inherits: TensorIteratorBase
|
290
294
|
dispatch:
|
291
295
|
CPU, CUDA: sgn_out
|
292
296
|
|
@@ -298,20 +302,43 @@
|
|
298
302
|
device_check: NoCheck # TensorIterator
|
299
303
|
variants: function
|
300
304
|
|
305
|
+
- func: _conj(Tensor(a) self) -> Tensor(a)
|
306
|
+
variants: function, method
|
307
|
+
dispatch:
|
308
|
+
CompositeExplicitAutograd: _conj
|
309
|
+
|
301
310
|
- func: conj(Tensor(a) self) -> Tensor(a)
|
302
|
-
device_check: NoCheck # TensorIterator
|
303
311
|
variants: function, method
|
312
|
+
manual_cpp_binding: True
|
304
313
|
|
305
|
-
- func:
|
306
|
-
|
314
|
+
- func: _conj_physical(Tensor self) -> Tensor
|
315
|
+
variants: function, method
|
307
316
|
dispatch:
|
308
|
-
|
309
|
-
SparseCPU, SparseCUDA: conj_out_sparse
|
317
|
+
CompositeExplicitAutograd: _conj_physical
|
310
318
|
|
311
|
-
- func:
|
312
|
-
variants: function
|
319
|
+
- func: conj_physical(Tensor self) -> Tensor
|
320
|
+
variants: function, method
|
321
|
+
|
322
|
+
- func: conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
313
323
|
dispatch:
|
314
|
-
|
324
|
+
CPU, CUDA: conj_physical_out
|
325
|
+
SparseCPU, SparseCUDA: conj_physical_out_sparse
|
326
|
+
|
327
|
+
- func: conj_physical_(Tensor(a!) self) -> Tensor(a!)
|
328
|
+
variants: function, method
|
329
|
+
dispatch:
|
330
|
+
CompositeExplicitAutograd: conj_physical_
|
331
|
+
|
332
|
+
- func: resolve_conj(Tensor(a) self) -> Tensor(a)
|
333
|
+
variants: function, method
|
334
|
+
|
335
|
+
- func: resolve_neg(Tensor(a) self) -> Tensor(a)
|
336
|
+
variants: function, method
|
337
|
+
|
338
|
+
- func: _neg_view(Tensor(a) self) -> Tensor(a)
|
339
|
+
variants: function, method
|
340
|
+
dispatch:
|
341
|
+
CompositeExplicitAutograd: _neg_view
|
315
342
|
|
316
343
|
- func: acos(Tensor self) -> Tensor
|
317
344
|
device_check: NoCheck # TensorIterator
|
@@ -352,7 +379,7 @@
|
|
352
379
|
variants: function, method
|
353
380
|
dispatch:
|
354
381
|
SparseCPU, SparseCUDA: add_sparse
|
355
|
-
SparseCsrCPU: add_sparse_csr
|
382
|
+
SparseCsrCPU, SparseCsrCUDA: add_sparse_csr
|
356
383
|
MkldnnCPU: mkldnn_add
|
357
384
|
|
358
385
|
- func: add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
|
@@ -361,7 +388,7 @@
|
|
361
388
|
structured_delegate: add.out
|
362
389
|
dispatch:
|
363
390
|
SparseCPU, SparseCUDA: add_sparse_
|
364
|
-
SparseCsrCPU: add_sparse_csr_
|
391
|
+
SparseCsrCPU, SparseCsrCUDA: add_sparse_csr_
|
365
392
|
MkldnnCPU: mkldnn_add_
|
366
393
|
|
367
394
|
- func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
|
@@ -373,6 +400,7 @@
|
|
373
400
|
SparseCPU: add_out_sparse_cpu
|
374
401
|
SparseCUDA: add_out_sparse_cuda
|
375
402
|
SparseCsrCPU: add_out_sparse_csr_cpu
|
403
|
+
SparseCsrCUDA: add_out_sparse_csr_cuda
|
376
404
|
MkldnnCPU: mkldnn_add_out
|
377
405
|
|
378
406
|
- func: _add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
|
@@ -390,6 +418,16 @@
|
|
390
418
|
dispatch:
|
391
419
|
CPU: add_relu_out
|
392
420
|
|
421
|
+
- func: _add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
|
422
|
+
variants: function
|
423
|
+
dispatch:
|
424
|
+
CPU: add_relu
|
425
|
+
|
426
|
+
- func: _add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
|
427
|
+
variants: function
|
428
|
+
dispatch:
|
429
|
+
CPU: add_relu_
|
430
|
+
|
393
431
|
# For C++ only, until we have conversion from C++ numbers to Tensor
|
394
432
|
- func: add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
|
395
433
|
device_check: NoCheck # TensorIterator
|
@@ -443,12 +481,14 @@
|
|
443
481
|
|
444
482
|
- func: all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
|
445
483
|
device_check: NoCheck # TensorIterator
|
484
|
+
structured_delegate: all.out
|
446
485
|
variants: function, method
|
447
|
-
dispatch:
|
448
|
-
CPU, CUDA: all
|
449
486
|
|
450
487
|
- func: all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
|
451
488
|
device_check: NoCheck # TensorIterator
|
489
|
+
structured: True
|
490
|
+
precomputed:
|
491
|
+
- dim -> int dim
|
452
492
|
dispatch:
|
453
493
|
CPU, CUDA: all_out
|
454
494
|
|
@@ -464,12 +504,14 @@
|
|
464
504
|
|
465
505
|
- func: any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
|
466
506
|
device_check: NoCheck # TensorIterator
|
507
|
+
structured_delegate: any.out
|
467
508
|
variants: function, method
|
468
|
-
dispatch:
|
469
|
-
CPU, CUDA: any
|
470
509
|
|
471
510
|
- func: any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
|
472
511
|
device_check: NoCheck # TensorIterator
|
512
|
+
structured: True
|
513
|
+
precomputed:
|
514
|
+
- dim -> int dim
|
473
515
|
dispatch:
|
474
516
|
CPU, CUDA: any_out
|
475
517
|
|
@@ -501,22 +543,22 @@
|
|
501
543
|
- func: _dim_arange(Tensor like, int dim) -> Tensor
|
502
544
|
|
503
545
|
- func: argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
|
546
|
+
structured_delegate: argmax.out
|
504
547
|
device_check: NoCheck # TensorIterator
|
505
548
|
variants: function, method
|
506
|
-
dispatch:
|
507
|
-
CPU, CUDA: argmax
|
508
549
|
|
509
550
|
- func: argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
|
551
|
+
structured: True
|
510
552
|
dispatch:
|
511
553
|
CPU, CUDA: argmax_out
|
512
554
|
|
513
555
|
- func: argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
|
556
|
+
structured_delegate: argmin.out
|
514
557
|
device_check: NoCheck # TensorIterator
|
515
558
|
variants: function, method
|
516
|
-
dispatch:
|
517
|
-
CPU, CUDA: argmin
|
518
559
|
|
519
560
|
- func: argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
|
561
|
+
structured: True
|
520
562
|
dispatch:
|
521
563
|
CPU, CUDA: argmin_out
|
522
564
|
|
@@ -905,11 +947,6 @@
|
|
905
947
|
SparseCPU: bmm_sparse_cpu
|
906
948
|
SparseCUDA: bmm_sparse_cuda
|
907
949
|
|
908
|
-
- func: _bmm(Tensor self, Tensor mat2, *, bool deterministic=False) -> Tensor
|
909
|
-
variants: function
|
910
|
-
dispatch:
|
911
|
-
SparseCUDA: _bmm_sparse_cuda
|
912
|
-
|
913
950
|
- func: bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
|
914
951
|
variants: function
|
915
952
|
dispatch:
|
@@ -918,11 +955,6 @@
|
|
918
955
|
SparseCPU: bmm_out_sparse_cpu
|
919
956
|
SparseCUDA: bmm_out_sparse_cuda
|
920
957
|
|
921
|
-
- func: _bmm.out(Tensor self, Tensor mat2, *, bool deterministic=False, Tensor(a!) out) -> Tensor(a!)
|
922
|
-
variants: function
|
923
|
-
dispatch:
|
924
|
-
SparseCUDA: _bmm_out_sparse_cuda
|
925
|
-
|
926
958
|
- func: broadcast_tensors(Tensor[] tensors) -> Tensor[]
|
927
959
|
device_check: NoCheck
|
928
960
|
device_guard: False
|
@@ -942,6 +974,15 @@
|
|
942
974
|
|
943
975
|
- func: cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
|
944
976
|
|
977
|
+
# alias for torch.cat
|
978
|
+
- func: concat(Tensor[] tensors, int dim=0) -> Tensor
|
979
|
+
|
980
|
+
- func: concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
|
981
|
+
|
982
|
+
- func: concat.names(Tensor[] tensors, Dimname dim) -> Tensor
|
983
|
+
|
984
|
+
- func: concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
|
985
|
+
|
945
986
|
- func: block_diag(Tensor[] tensors) -> Tensor
|
946
987
|
variants: function
|
947
988
|
|
@@ -996,8 +1037,8 @@
|
|
996
1037
|
device_check: NoCheck # TensorIterator
|
997
1038
|
variants: function, method
|
998
1039
|
cpp_no_default_args: ['min']
|
1040
|
+
structured_delegate: clamp.out
|
999
1041
|
dispatch:
|
1000
|
-
CPU, CUDA: clamp
|
1001
1042
|
QuantizedCPU: clamp_quantized_cpu
|
1002
1043
|
|
1003
1044
|
- func: clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
|
@@ -1009,6 +1050,7 @@
|
|
1009
1050
|
device_check: NoCheck # TensorIterator
|
1010
1051
|
variants: function, method
|
1011
1052
|
cpp_no_default_args: ['min']
|
1053
|
+
structured_delegate: clamp.out
|
1012
1054
|
dispatch:
|
1013
1055
|
CompositeExplicitAutograd: clamp_
|
1014
1056
|
|
@@ -1020,6 +1062,8 @@
|
|
1020
1062
|
- func: clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
|
1021
1063
|
device_check: NoCheck # TensorIterator
|
1022
1064
|
cpp_no_default_args: ['min']
|
1065
|
+
structured: True
|
1066
|
+
structured_inherits: TensorIteratorBase
|
1023
1067
|
dispatch:
|
1024
1068
|
CPU, CUDA: clamp_out
|
1025
1069
|
|
@@ -1200,6 +1244,11 @@
|
|
1200
1244
|
- func: _copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
|
1201
1245
|
dispatch: {}
|
1202
1246
|
|
1247
|
+
# We need this to be able to properly copy from a CPU to an XLA tensor with different sizes.
|
1248
|
+
# See https://github.com/pytorch/xla/issues/2881
|
1249
|
+
- func: _copy_from_and_resize(Tensor self, Tensor dst) -> Tensor
|
1250
|
+
dispatch: {}
|
1251
|
+
|
1203
1252
|
- func: cos(Tensor self) -> Tensor
|
1204
1253
|
device_check: NoCheck # TensorIterator
|
1205
1254
|
variants: function, method
|
@@ -1239,13 +1288,20 @@
|
|
1239
1288
|
- func: count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor
|
1240
1289
|
variants: function, method
|
1241
1290
|
dispatch:
|
1242
|
-
CPU
|
1291
|
+
CPU: count_nonzero_cpu
|
1292
|
+
CUDA: count_nonzero_cuda
|
1243
1293
|
|
1244
1294
|
- func: count_nonzero(Tensor self, int? dim=None) -> Tensor
|
1245
1295
|
variants: function, method
|
1246
1296
|
dispatch:
|
1247
1297
|
CompositeExplicitAutograd: count_nonzero
|
1248
1298
|
|
1299
|
+
- func: cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor
|
1300
|
+
variants: function, method
|
1301
|
+
|
1302
|
+
- func: corrcoef(Tensor self) -> Tensor
|
1303
|
+
variants: function, method
|
1304
|
+
|
1249
1305
|
- func: cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
|
1250
1306
|
dispatch:
|
1251
1307
|
CUDA: cudnn_affine_grid_generator_forward
|
@@ -1385,20 +1441,19 @@
|
|
1385
1441
|
device_guard: False
|
1386
1442
|
|
1387
1443
|
- func: cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
|
1444
|
+
structured_delegate: cumprod.out
|
1388
1445
|
device_check: NoCheck # TensorIterator
|
1389
1446
|
variants: function, method
|
1390
|
-
dispatch:
|
1391
|
-
CompositeExplicitAutograd: cumprod
|
1392
1447
|
|
1393
1448
|
- func: cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
|
1449
|
+
structured_delegate: cumprod.out
|
1394
1450
|
variants: method
|
1395
|
-
dispatch:
|
1396
|
-
CompositeExplicitAutograd: cumprod_
|
1397
1451
|
|
1398
1452
|
- func: cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
|
1453
|
+
structured: True
|
1399
1454
|
device_check: NoCheck # TensorIterator
|
1400
1455
|
dispatch:
|
1401
|
-
|
1456
|
+
CPU, CUDA: cumprod_out
|
1402
1457
|
|
1403
1458
|
- func: cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
|
1404
1459
|
device_check: NoCheck # TensorIterator
|
@@ -1416,20 +1471,19 @@
|
|
1416
1471
|
device_guard: False
|
1417
1472
|
|
1418
1473
|
- func: cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
|
1474
|
+
structured_delegate: cumsum.out
|
1419
1475
|
device_check: NoCheck # TensorIterator
|
1420
1476
|
variants: function, method
|
1421
|
-
dispatch:
|
1422
|
-
CompositeExplicitAutograd: cumsum
|
1423
1477
|
|
1424
1478
|
- func: cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)
|
1479
|
+
structured_delegate: cumsum.out
|
1425
1480
|
variants: method
|
1426
|
-
dispatch:
|
1427
|
-
CompositeExplicitAutograd: cumsum_
|
1428
1481
|
|
1429
1482
|
- func: cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
|
1483
|
+
structured: True
|
1430
1484
|
device_check: NoCheck # TensorIterator
|
1431
1485
|
dispatch:
|
1432
|
-
|
1486
|
+
CPU, CUDA: cumsum_out
|
1433
1487
|
|
1434
1488
|
- func: cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
|
1435
1489
|
device_check: NoCheck # TensorIterator
|
@@ -1441,6 +1495,10 @@
|
|
1441
1495
|
- func: cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
|
1442
1496
|
device_check: NoCheck # TensorIterator
|
1443
1497
|
|
1498
|
+
- func: cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
|
1499
|
+
|
1500
|
+
- func: cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
|
1501
|
+
|
1444
1502
|
- func: ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
|
1445
1503
|
|
1446
1504
|
# convenience function that converts to intlists for you
|
@@ -1470,10 +1528,12 @@
|
|
1470
1528
|
- func: diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
|
1471
1529
|
variants: function, method
|
1472
1530
|
|
1473
|
-
- func: diagonal_backward(Tensor
|
1531
|
+
- func: diagonal_backward(Tensor grad_output, int[] input_sizes, int offset, int dim1, int dim2) -> Tensor
|
1474
1532
|
variants: function
|
1475
1533
|
device_check: NoCheck
|
1476
1534
|
device_guard: False
|
1535
|
+
dispatch:
|
1536
|
+
CompositeExplicitAutograd: diagonal_backward
|
1477
1537
|
|
1478
1538
|
- func: fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
|
1479
1539
|
variants: method
|
@@ -1734,6 +1794,9 @@
|
|
1734
1794
|
- func: new_zeros(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
1735
1795
|
variants: method
|
1736
1796
|
|
1797
|
+
- func: new_ones(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
1798
|
+
variants: method
|
1799
|
+
|
1737
1800
|
# other overrides are to provide a more helpful error message that dtype is required
|
1738
1801
|
- func: _empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
|
1739
1802
|
dispatch:
|
@@ -1758,7 +1821,8 @@
|
|
1758
1821
|
CUDA: resize_cuda_
|
1759
1822
|
QuantizedCPU: quantized_resize_cpu_
|
1760
1823
|
|
1761
|
-
- func: empty_quantized(int[] size, Tensor qtensor) -> Tensor
|
1824
|
+
- func: empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
|
1825
|
+
category_override: factory
|
1762
1826
|
variants: function
|
1763
1827
|
dispatch:
|
1764
1828
|
QuantizedCPU, QuantizedCUDA: empty_quantized
|
@@ -2214,6 +2278,36 @@
|
|
2214
2278
|
- func: isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor
|
2215
2279
|
variants: function, method
|
2216
2280
|
|
2281
|
+
- func: isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
|
2282
|
+
variants: function
|
2283
|
+
structured: True
|
2284
|
+
dispatch:
|
2285
|
+
CPU, CUDA: isin_Tensor_Tensor_out
|
2286
|
+
|
2287
|
+
- func: isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
|
2288
|
+
variants: function
|
2289
|
+
structured_delegate: isin.Tensor_Tensor_out
|
2290
|
+
|
2291
|
+
- func: isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
|
2292
|
+
variants: function
|
2293
|
+
structured: True
|
2294
|
+
dispatch:
|
2295
|
+
CPU, CUDA: isin_Tensor_Scalar_out
|
2296
|
+
|
2297
|
+
- func: isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor
|
2298
|
+
variants: function
|
2299
|
+
structured_delegate: isin.Tensor_Scalar_out
|
2300
|
+
|
2301
|
+
- func: isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)
|
2302
|
+
variants: function
|
2303
|
+
structured: True
|
2304
|
+
dispatch:
|
2305
|
+
CPU, CUDA: isin_Scalar_Tensor_out
|
2306
|
+
|
2307
|
+
- func: isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
|
2308
|
+
variants: function
|
2309
|
+
structured_delegate: isin.Scalar_Tensor_out
|
2310
|
+
|
2217
2311
|
- func: isnan(Tensor self) -> Tensor
|
2218
2312
|
variants: function, method
|
2219
2313
|
device_check: NoCheck
|
@@ -2239,6 +2333,16 @@
|
|
2239
2333
|
device_guard: False
|
2240
2334
|
manual_cpp_binding: True
|
2241
2335
|
|
2336
|
+
- func: is_conj(Tensor self) -> bool
|
2337
|
+
variants: function, method
|
2338
|
+
device_guard: False
|
2339
|
+
manual_cpp_binding: True
|
2340
|
+
|
2341
|
+
- func: is_neg(Tensor self) -> bool
|
2342
|
+
variants: function, method
|
2343
|
+
device_guard: False
|
2344
|
+
manual_cpp_binding: True
|
2345
|
+
|
2242
2346
|
- func: isreal(Tensor self) -> Tensor
|
2243
2347
|
variants: function, method
|
2244
2348
|
|
@@ -2258,6 +2362,12 @@
|
|
2258
2362
|
device_guard: False
|
2259
2363
|
manual_cpp_binding: True
|
2260
2364
|
|
2365
|
+
- func: is_inference(Tensor self) -> bool
|
2366
|
+
variants: function, method
|
2367
|
+
device_check: NoCheck
|
2368
|
+
device_guard: False
|
2369
|
+
manual_cpp_binding: True
|
2370
|
+
|
2261
2371
|
- func: kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
|
2262
2372
|
dispatch:
|
2263
2373
|
CompositeExplicitAutograd: kl_div
|
@@ -2317,6 +2427,9 @@
|
|
2317
2427
|
- func: linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor
|
2318
2428
|
python_module: nn
|
2319
2429
|
|
2430
|
+
- func: linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)
|
2431
|
+
python_module: nn
|
2432
|
+
|
2320
2433
|
- func: mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor
|
2321
2434
|
python_module: nn
|
2322
2435
|
dispatch:
|
@@ -2464,38 +2577,38 @@
|
|
2464
2577
|
|
2465
2578
|
- func: xlogy.Tensor(Tensor self, Tensor other) -> Tensor
|
2466
2579
|
device_check: NoCheck # TensorIterator
|
2580
|
+
structured_delegate: xlogy.OutTensor
|
2467
2581
|
variants: function, method
|
2468
|
-
dispatch:
|
2469
|
-
CPU, CUDA: xlogy
|
2470
2582
|
|
2471
2583
|
- func: xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor
|
2472
2584
|
device_check: NoCheck # TensorIterator
|
2473
2585
|
variants: function
|
2474
2586
|
dispatch:
|
2475
|
-
|
2587
|
+
CompositeExplicitAutograd: xlogy
|
2476
2588
|
|
2477
2589
|
- func: xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor
|
2478
2590
|
device_check: NoCheck # TensorIterator
|
2479
2591
|
variants: function, method
|
2480
2592
|
dispatch:
|
2481
|
-
|
2593
|
+
CompositeExplicitAutograd: xlogy
|
2482
2594
|
|
2483
2595
|
# xlogy: inplace variant
|
2484
2596
|
- func: xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
2485
2597
|
device_check: NoCheck # TensorIterator
|
2486
2598
|
variants: function, method
|
2487
|
-
|
2488
|
-
CPU, CUDA: xlogy_
|
2599
|
+
structured_delegate: xlogy.OutTensor
|
2489
2600
|
|
2490
2601
|
- func: xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
2491
2602
|
device_check: NoCheck # TensorIterator
|
2492
2603
|
variants: function, method
|
2493
2604
|
dispatch:
|
2494
|
-
|
2605
|
+
CompositeExplicitAutograd: xlogy_
|
2495
2606
|
|
2496
2607
|
# xlogy: out variant
|
2497
2608
|
- func: xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
2498
2609
|
device_check: NoCheck # TensorIterator
|
2610
|
+
structured: True
|
2611
|
+
structured_inherits: TensorIteratorBase
|
2499
2612
|
variants: function
|
2500
2613
|
dispatch:
|
2501
2614
|
CPU, CUDA: xlogy_out
|
@@ -2504,13 +2617,13 @@
|
|
2504
2617
|
device_check: NoCheck # TensorIterator
|
2505
2618
|
variants: function
|
2506
2619
|
dispatch:
|
2507
|
-
|
2620
|
+
CompositeExplicitAutograd: xlogy_out
|
2508
2621
|
|
2509
2622
|
- func: xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
2510
2623
|
device_check: NoCheck # TensorIterator
|
2511
2624
|
variants: function
|
2512
2625
|
dispatch:
|
2513
|
-
|
2626
|
+
CompositeExplicitAutograd: xlogy_out
|
2514
2627
|
|
2515
2628
|
- func: logdet(Tensor self) -> Tensor
|
2516
2629
|
variants: function, method
|
@@ -2532,14 +2645,22 @@
|
|
2532
2645
|
variants: function, method
|
2533
2646
|
|
2534
2647
|
- func: _log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
|
2648
|
+
structured_delegate: _log_softmax.out
|
2649
|
+
|
2650
|
+
- func: _log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
|
2651
|
+
structured: True
|
2535
2652
|
dispatch:
|
2536
|
-
CPU:
|
2537
|
-
CUDA:
|
2653
|
+
CPU: log_softmax_cpu_out
|
2654
|
+
CUDA: log_softmax_cuda_out
|
2538
2655
|
|
2539
2656
|
- func: _log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
|
2657
|
+
structured_delegate: _log_softmax_backward_data.out
|
2658
|
+
|
2659
|
+
- func: _log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
2660
|
+
structured: True
|
2540
2661
|
dispatch:
|
2541
|
-
CPU:
|
2542
|
-
CUDA:
|
2662
|
+
CPU: log_softmax_backward_cpu_out
|
2663
|
+
CUDA: log_softmax_backward_cuda_out
|
2543
2664
|
|
2544
2665
|
- func: _logcumsumexp(Tensor self, int dim) -> Tensor
|
2545
2666
|
dispatch:
|
@@ -2608,16 +2729,27 @@
|
|
2608
2729
|
|
2609
2730
|
- func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
|
2610
2731
|
|
2732
|
+
# DEPRECATED: Use torch.aminmax instead
|
2611
2733
|
- func: _aminmax(Tensor self) -> (Tensor, Tensor)
|
2612
|
-
variants: function
|
2613
2734
|
dispatch:
|
2614
2735
|
CPU, CUDA: _aminmax_all
|
2615
2736
|
|
2737
|
+
# DEPRECATED: Use torch.aminmax instead
|
2616
2738
|
- func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
|
2617
|
-
variants: function
|
2618
2739
|
dispatch:
|
2619
2740
|
CPU, CUDA: _aminmax
|
2620
2741
|
|
2742
|
+
- func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
|
2743
|
+
device_check: NoCheck # TensorIterator
|
2744
|
+
structured_delegate: aminmax.out
|
2745
|
+
variants: function, method
|
2746
|
+
|
2747
|
+
- func: aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)
|
2748
|
+
device_check: NoCheck # TensorIterator
|
2749
|
+
structured: True
|
2750
|
+
dispatch:
|
2751
|
+
CPU, CUDA: aminmax_out
|
2752
|
+
|
2621
2753
|
- func: _compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor
|
2622
2754
|
dispatch:
|
2623
2755
|
CPU, CUDA: _compute_linear_combination
|
@@ -2697,20 +2829,20 @@
|
|
2697
2829
|
device_check: NoCheck # TensorIterator
|
2698
2830
|
variants: function, method
|
2699
2831
|
dispatch:
|
2700
|
-
|
2701
|
-
QuantizedCPU: mean_quantized_cpu
|
2832
|
+
CompositeExplicitAutograd: mean
|
2702
2833
|
|
2703
2834
|
- func: mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
|
2835
|
+
structured_delegate: mean.out
|
2704
2836
|
device_check: NoCheck # TensorIterator
|
2705
2837
|
variants: function, method
|
2706
2838
|
dispatch:
|
2707
|
-
CPU, CUDA: mean_cpu_gpu
|
2708
2839
|
QuantizedCPU: mean_quantized_cpu
|
2709
2840
|
|
2710
2841
|
- func: mean.out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
|
2842
|
+
structured: True
|
2711
2843
|
device_check: NoCheck # TensorIterator
|
2712
2844
|
dispatch:
|
2713
|
-
CPU, CUDA:
|
2845
|
+
CPU, CUDA: mean_out
|
2714
2846
|
QuantizedCPU: mean_out_quantized_cpu
|
2715
2847
|
|
2716
2848
|
- func: mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
|
@@ -2720,6 +2852,13 @@
|
|
2720
2852
|
- func: mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
|
2721
2853
|
device_check: NoCheck # TensorIterator
|
2722
2854
|
|
2855
|
+
- func: nanmean(Tensor self, int[1] dim=[], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
|
2856
|
+
device_check: NoCheck # Composite
|
2857
|
+
variants: function, method
|
2858
|
+
|
2859
|
+
- func: nanmean.out(Tensor self, int[1] dim=[], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
|
2860
|
+
device_check: NoCheck # Composite
|
2861
|
+
|
2723
2862
|
- func: median(Tensor self) -> Tensor
|
2724
2863
|
variants: function, method
|
2725
2864
|
dispatch:
|
@@ -2872,18 +3011,18 @@
|
|
2872
3011
|
CUDA: miopen_rnn_backward
|
2873
3012
|
|
2874
3013
|
- func: mm(Tensor self, Tensor mat2) -> Tensor
|
3014
|
+
structured_delegate: mm.out
|
2875
3015
|
variants: function, method
|
2876
3016
|
dispatch:
|
2877
|
-
|
2878
|
-
CUDA: mm_cuda
|
2879
|
-
SparseCPU, SparseCUDA, SparseCsrCPU: _sparse_mm
|
3017
|
+
SparseCPU, SparseCUDA, SparseCsrCPU, SparseCsrCUDA: _sparse_mm
|
2880
3018
|
|
2881
3019
|
- func: mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
|
3020
|
+
structured: True
|
2882
3021
|
dispatch:
|
2883
|
-
CPU:
|
3022
|
+
CPU: mm_out_cpu
|
2884
3023
|
CUDA: mm_out_cuda
|
2885
3024
|
SparseCPU, SparseCUDA: _sparse_mm_out
|
2886
|
-
SparseCsrCPU: _sparse_csr_mm_out
|
3025
|
+
SparseCsrCPU, SparseCsrCUDA: _sparse_csr_mm_out
|
2887
3026
|
|
2888
3027
|
- func: _sparse_mm(Tensor sparse, Tensor dense) -> Tensor
|
2889
3028
|
|
@@ -2969,12 +3108,16 @@
|
|
2969
3108
|
variants: function, method
|
2970
3109
|
dispatch:
|
2971
3110
|
CPU, CUDA: mv
|
2972
|
-
SparseCPU, SparseCUDA, SparseCsrCPU: mv_sparse
|
3111
|
+
SparseCPU, SparseCUDA, SparseCsrCPU, SparseCsrCUDA: mv_sparse
|
2973
3112
|
|
2974
3113
|
- func: mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
|
2975
3114
|
dispatch:
|
2976
3115
|
CompositeExplicitAutograd: mv_out
|
2977
3116
|
|
3117
|
+
- func: mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
|
3118
|
+
dispatch:
|
3119
|
+
CPU, CUDA: mvlgamma_out
|
3120
|
+
|
2978
3121
|
- func: mvlgamma(Tensor self, int p) -> Tensor
|
2979
3122
|
device_check: NoCheck # TensorIterator
|
2980
3123
|
variants: function, method
|
@@ -3152,12 +3295,22 @@
|
|
3152
3295
|
CPU: channel_shuffle
|
3153
3296
|
QuantizedCPU: channel_shuffle_quantized_cpu
|
3154
3297
|
|
3155
|
-
- func: is_pinned(Tensor self) -> bool
|
3298
|
+
- func: is_pinned(Tensor self, Device? device=None) -> bool
|
3156
3299
|
variants: method
|
3300
|
+
dispatch:
|
3301
|
+
CUDA: is_pinned_cuda
|
3302
|
+
CompositeExplicitAutograd: is_pinned_default
|
3157
3303
|
|
3158
|
-
|
3304
|
+
# TODO: add a copy kwarg that guarantees that the tensor is put into fresh
|
3305
|
+
# pinned memory
|
3306
|
+
- func: pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)
|
3159
3307
|
variants: method
|
3160
3308
|
|
3309
|
+
# Unlike pin_memory, this is guaranteed to give a new non-aliasing tensor
|
3310
|
+
- func: _pin_memory(Tensor self, Device? device=None) -> Tensor
|
3311
|
+
dispatch:
|
3312
|
+
CUDA: _pin_memory_cuda
|
3313
|
+
|
3161
3314
|
- func: pinverse(Tensor self, float rcond=1e-15) -> Tensor
|
3162
3315
|
variants: function, method
|
3163
3316
|
|
@@ -3326,16 +3479,16 @@
|
|
3326
3479
|
dispatch:
|
3327
3480
|
CompositeExplicitAutograd: repeat
|
3328
3481
|
|
3329
|
-
- func: repeat_interleave.Tensor(Tensor repeats) -> Tensor
|
3482
|
+
- func: repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor
|
3330
3483
|
variants: function
|
3331
3484
|
dispatch:
|
3332
3485
|
CPU: repeat_interleave_cpu
|
3333
3486
|
CUDA: repeat_interleave_cuda
|
3334
3487
|
|
3335
|
-
- func: repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None) -> Tensor
|
3488
|
+
- func: repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor
|
3336
3489
|
variants: function, method
|
3337
3490
|
|
3338
|
-
- func: repeat_interleave.self_int(Tensor self, int repeats, int? dim=None) -> Tensor
|
3491
|
+
- func: repeat_interleave.self_int(Tensor self, int repeats, int? dim=None, *, int? output_size=None) -> Tensor
|
3339
3492
|
variants: function, method
|
3340
3493
|
|
3341
3494
|
- func: reshape(Tensor(a) self, int[] shape) -> Tensor(a)
|
@@ -3343,6 +3496,17 @@
|
|
3343
3496
|
device_check: NoCheck
|
3344
3497
|
device_guard: False
|
3345
3498
|
|
3499
|
+
# NOTE [ _reshape_alias ] is meant to be used in the implementation of reshape.
|
3500
|
+
# They are not user-facing, hence the leading underscore. Please don't use it
|
3501
|
+
# anywhere else.
|
3502
|
+
- func: _reshape_alias(Tensor(a) self, int[] size, int[] stride) -> Tensor(a)
|
3503
|
+
variants: function, method
|
3504
|
+
device_check: NoCheck
|
3505
|
+
device_guard: False
|
3506
|
+
dispatch:
|
3507
|
+
CPU, CUDA, Meta, QuantizedCPU, QuantizedCUDA: _reshape_alias
|
3508
|
+
# We don't need to support mkldnn since this is handled explicitly by the reshape operator.
|
3509
|
+
|
3346
3510
|
- func: _mkldnn_reshape(Tensor self, int[] shape) -> Tensor
|
3347
3511
|
device_check: NoCheck
|
3348
3512
|
device_guard: False
|
@@ -3412,19 +3576,35 @@
|
|
3412
3576
|
CPU: prelu_backward_cpu
|
3413
3577
|
CUDA: prelu_backward_cuda
|
3414
3578
|
|
3579
|
+
- func: gelu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
3580
|
+
structured: True
|
3581
|
+
structured_inherits: TensorIteratorBase
|
3582
|
+
device_check: NoCheck # TensorIterator
|
3583
|
+
python_module: nn
|
3584
|
+
dispatch:
|
3585
|
+
CPU: gelu_out_cpu
|
3586
|
+
CUDA: gelu_out_cuda
|
3587
|
+
|
3415
3588
|
- func: gelu(Tensor self) -> Tensor
|
3589
|
+
structured_delegate: gelu.out
|
3416
3590
|
device_check: NoCheck # TensorIterator
|
3417
3591
|
python_module: nn
|
3418
3592
|
dispatch:
|
3419
3593
|
MkldnnCPU: mkldnn_gelu
|
3420
|
-
|
3421
|
-
|
3594
|
+
|
3595
|
+
- func: gelu_backward.grad_input(Tensor grad, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
|
3596
|
+
structured: True
|
3597
|
+
structured_inherits: TensorIteratorBase
|
3598
|
+
python_module: nn
|
3599
|
+
dispatch:
|
3600
|
+
CPU: gelu_backward_out_cpu
|
3601
|
+
CUDA: gelu_backward_out_cuda
|
3422
3602
|
|
3423
3603
|
- func: gelu_backward(Tensor grad, Tensor self) -> Tensor
|
3604
|
+
structured_delegate: gelu_backward.grad_input
|
3424
3605
|
python_module: nn
|
3425
3606
|
dispatch:
|
3426
|
-
|
3427
|
-
CUDA: gelu_backward_cuda
|
3607
|
+
MkldnnCPU: mkldnn_gelu_backward
|
3428
3608
|
|
3429
3609
|
- func: infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor
|
3430
3610
|
variants: function
|
@@ -3432,16 +3612,27 @@
|
|
3432
3612
|
device_check: NoCheck
|
3433
3613
|
device_guard: False
|
3434
3614
|
|
3615
|
+
- func: hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
|
3616
|
+
structured: True
|
3617
|
+
structured_inherits: TensorIteratorBase
|
3618
|
+
device_check: NoCheck # TensorIterator
|
3619
|
+
dispatch:
|
3620
|
+
CPU, CUDA: hardshrink_out
|
3621
|
+
|
3435
3622
|
- func: hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
|
3623
|
+
structured_delegate: hardshrink.out
|
3436
3624
|
device_check: NoCheck # TensorIterator
|
3437
3625
|
variants: function, method
|
3626
|
+
|
3627
|
+
- func: hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
|
3628
|
+
structured: True
|
3629
|
+
structured_inherits: TensorIteratorBase
|
3438
3630
|
dispatch:
|
3439
|
-
CPU, CUDA:
|
3631
|
+
CPU, CUDA: hardshrink_backward_out
|
3440
3632
|
|
3441
3633
|
- func: hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor
|
3634
|
+
structured_delegate: hardshrink_backward.grad_input
|
3442
3635
|
variants: function, method
|
3443
|
-
dispatch:
|
3444
|
-
CPU, CUDA: hardshrink_backward
|
3445
3636
|
|
3446
3637
|
- func: rsqrt(Tensor self) -> Tensor
|
3447
3638
|
device_check: NoCheck # TensorIterator
|
@@ -3472,10 +3663,12 @@
|
|
3472
3663
|
dispatch:
|
3473
3664
|
CompositeExplicitAutograd: select
|
3474
3665
|
|
3475
|
-
- func: select_backward(Tensor
|
3666
|
+
- func: select_backward(Tensor grad_output, int[] input_sizes, int dim, int index) -> Tensor
|
3476
3667
|
variants: function
|
3477
3668
|
device_check: NoCheck
|
3478
3669
|
device_guard: False
|
3670
|
+
dispatch:
|
3671
|
+
CompositeExplicitAutograd: select_backward
|
3479
3672
|
|
3480
3673
|
- func: selu(Tensor self) -> Tensor
|
3481
3674
|
device_check: NoCheck # TensorIterator
|
@@ -3512,10 +3705,17 @@
|
|
3512
3705
|
dispatch:
|
3513
3706
|
CPU, CUDA: silu_out
|
3514
3707
|
|
3708
|
+
- func: silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
|
3709
|
+
structured: True
|
3710
|
+
structured_inherits: TensorIteratorBase
|
3711
|
+
python_module: nn
|
3712
|
+
dispatch:
|
3713
|
+
CPU, CUDA: silu_backward_out
|
3714
|
+
|
3515
3715
|
- func: silu_backward(Tensor grad_output, Tensor self) -> Tensor
|
3716
|
+
structured_delegate: silu_backward.grad_input
|
3516
3717
|
python_module: nn
|
3517
3718
|
dispatch:
|
3518
|
-
CPU, CUDA: silu_backward
|
3519
3719
|
CompositeImplicitAutograd: math_silu_backward
|
3520
3720
|
|
3521
3721
|
- func: mish(Tensor self) -> Tensor
|
@@ -3669,10 +3869,12 @@
|
|
3669
3869
|
dispatch:
|
3670
3870
|
CompositeExplicitAutograd: slice
|
3671
3871
|
|
3672
|
-
- func: slice_backward(Tensor
|
3872
|
+
- func: slice_backward(Tensor grad_output, int[] input_sizes, int dim, int start, int end, int step) -> Tensor
|
3673
3873
|
variants: function
|
3674
3874
|
device_check: NoCheck
|
3675
3875
|
device_guard: False
|
3876
|
+
dispatch:
|
3877
|
+
CompositeExplicitAutograd: slice_backward
|
3676
3878
|
|
3677
3879
|
- func: slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
|
3678
3880
|
variants: function, method
|
@@ -3690,15 +3892,24 @@
|
|
3690
3892
|
variants: function, method
|
3691
3893
|
|
3692
3894
|
- func: _softmax(Tensor self, int dim, bool half_to_float) -> Tensor
|
3895
|
+
structured_delegate: _softmax.out
|
3693
3896
|
dispatch:
|
3694
|
-
CPU: softmax_cpu
|
3695
|
-
CUDA: softmax_cuda
|
3696
3897
|
MkldnnCPU: mkldnn_softmax
|
3697
3898
|
|
3899
|
+
- func: _softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
|
3900
|
+
structured: True
|
3901
|
+
dispatch:
|
3902
|
+
CPU: softmax_cpu_out
|
3903
|
+
CUDA: softmax_cuda_out
|
3904
|
+
|
3698
3905
|
- func: _softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
|
3906
|
+
structured_delegate: _softmax_backward_data.out
|
3907
|
+
|
3908
|
+
- func: _softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
|
3909
|
+
structured: True
|
3699
3910
|
dispatch:
|
3700
|
-
CPU:
|
3701
|
-
CUDA:
|
3911
|
+
CPU: softmax_backward_cpu_out
|
3912
|
+
CUDA: softmax_backward_cuda_out
|
3702
3913
|
|
3703
3914
|
- func: unsafe_split.Tensor(Tensor self, int split_size, int dim=0) -> Tensor[]
|
3704
3915
|
variants: function, method
|
@@ -3849,19 +4060,19 @@
|
|
3849
4060
|
device_check: NoCheck # TensorIterator
|
3850
4061
|
variants: function, method
|
3851
4062
|
dispatch:
|
3852
|
-
|
4063
|
+
CompositeExplicitAutograd: sum
|
3853
4064
|
|
3854
4065
|
- func: sum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
|
4066
|
+
structured_delegate: sum.IntList_out
|
3855
4067
|
device_check: NoCheck # TensorIterator
|
3856
4068
|
variants: function, method
|
3857
|
-
dispatch:
|
3858
|
-
CPU, CUDA: sum
|
3859
4069
|
|
3860
4070
|
- func: sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
|
3861
4071
|
device_check: NoCheck # TensorIterator
|
3862
4072
|
variants: function, method
|
3863
4073
|
|
3864
4074
|
- func: sum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
|
4075
|
+
structured: True
|
3865
4076
|
device_check: NoCheck # TensorIterator
|
3866
4077
|
dispatch:
|
3867
4078
|
CPU, CUDA: sum_out
|
@@ -3986,12 +4197,12 @@
|
|
3986
4197
|
CPU, CUDA: prod
|
3987
4198
|
|
3988
4199
|
- func: prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
|
4200
|
+
structured_delegate: prod.int_out
|
3989
4201
|
device_check: NoCheck # TensorIterator
|
3990
4202
|
variants: function, method
|
3991
|
-
dispatch:
|
3992
|
-
CPU, CUDA: prod
|
3993
4203
|
|
3994
4204
|
- func: prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
|
4205
|
+
structured: True
|
3995
4206
|
device_check: NoCheck # TensorIterator
|
3996
4207
|
dispatch:
|
3997
4208
|
CPU, CUDA: prod_out
|
@@ -4136,8 +4347,7 @@
|
|
4136
4347
|
- func: flip(Tensor self, int[] dims) -> Tensor
|
4137
4348
|
variants: function, method
|
4138
4349
|
dispatch:
|
4139
|
-
CPU, QuantizedCPU:
|
4140
|
-
CUDA: flip_cuda
|
4350
|
+
CPU, QuantizedCPU, CUDA, QuantizedCUDA: flip
|
4141
4351
|
|
4142
4352
|
- func: fliplr(Tensor self) -> Tensor
|
4143
4353
|
variants: function, method
|
@@ -4158,6 +4368,10 @@
|
|
4158
4368
|
dispatch:
|
4159
4369
|
CompositeExplicitAutograd: rot90
|
4160
4370
|
|
4371
|
+
- func: trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
|
4372
|
+
|
4373
|
+
- func: trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor
|
4374
|
+
|
4161
4375
|
- func: trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
|
4162
4376
|
|
4163
4377
|
- func: trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor
|
@@ -4476,32 +4690,36 @@
|
|
4476
4690
|
device_check: NoCheck # TensorIterator
|
4477
4691
|
variants: function, method
|
4478
4692
|
dispatch:
|
4479
|
-
|
4693
|
+
CompositeExplicitAutograd: norm
|
4480
4694
|
|
4481
4695
|
- func: norm.Scalar(Tensor self, Scalar p=2) -> Tensor
|
4482
4696
|
device_check: NoCheck # TensorIterator
|
4483
4697
|
variants: function, method
|
4484
4698
|
dispatch:
|
4485
|
-
|
4699
|
+
CompositeExplicitAutograd: norm
|
4486
4700
|
|
4487
4701
|
- func: norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor
|
4702
|
+
structured_delegate: norm.dtype_out
|
4488
4703
|
device_check: NoCheck # TensorIterator
|
4489
4704
|
variants: function, method
|
4490
4705
|
dispatch:
|
4491
|
-
|
4706
|
+
SparseCPU, SparseCUDA: sparse_dtype_norm
|
4492
4707
|
|
4493
4708
|
- func: norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
|
4709
|
+
structured_delegate: norm.out
|
4494
4710
|
device_check: NoCheck # TensorIterator
|
4495
4711
|
variants: function, method
|
4496
4712
|
dispatch:
|
4497
|
-
|
4713
|
+
SparseCPU, SparseCUDA: sparse_norm
|
4498
4714
|
|
4499
4715
|
- func: norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
|
4716
|
+
structured: True
|
4500
4717
|
device_check: NoCheck # TensorIterator
|
4501
4718
|
dispatch:
|
4502
|
-
CPU, CUDA:
|
4719
|
+
CPU, CUDA: norm_dtype_out
|
4503
4720
|
|
4504
4721
|
- func: norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
|
4722
|
+
structured: True
|
4505
4723
|
device_check: NoCheck # TensorIterator
|
4506
4724
|
dispatch:
|
4507
4725
|
CPU, CUDA: norm_out
|
@@ -4573,7 +4791,7 @@
|
|
4573
4791
|
variants: function
|
4574
4792
|
dispatch:
|
4575
4793
|
SparseCPU, SparseCUDA: resize_as_sparse_
|
4576
|
-
SparseCsrCPU: resize_as_sparse_csr_
|
4794
|
+
SparseCsrCPU, SparseCsrCUDA: resize_as_sparse_csr_
|
4577
4795
|
|
4578
4796
|
- func: zero_(Tensor(a!) self) -> Tensor(a!)
|
4579
4797
|
device_check: NoCheck # TensorIterator
|
@@ -4679,6 +4897,7 @@
|
|
4679
4897
|
SparseCPU: addmm_out_sparse_dense_cpu
|
4680
4898
|
SparseCUDA: addmm_out_sparse_dense_cuda
|
4681
4899
|
SparseCsrCPU: addmm_out_sparse_csr_dense_cpu
|
4900
|
+
SparseCsrCUDA: addmm_out_sparse_csr_dense_cuda
|
4682
4901
|
|
4683
4902
|
- func: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
|
4684
4903
|
structured_delegate: addmm.out
|
@@ -4686,7 +4905,7 @@
|
|
4686
4905
|
dispatch:
|
4687
4906
|
SparseCPU: addmm_sparse_dense_cpu
|
4688
4907
|
SparseCUDA: addmm_sparse_dense_cuda
|
4689
|
-
SparseCsrCPU:
|
4908
|
+
SparseCsrCPU, SparseCsrCUDA: addmm_sparse_csr_dense
|
4690
4909
|
|
4691
4910
|
- func: addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
|
4692
4911
|
structured_delegate: addmm.out
|
@@ -4808,9 +5027,11 @@
|
|
4808
5027
|
# FIXME: would be nicer if TensorOptions was optional based; not adding default arguments for options given
|
4809
5028
|
# the default would never make sense.
|
4810
5029
|
|
4811
|
-
- func:
|
5030
|
+
- func: sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
|
4812
5031
|
|
4813
|
-
- func:
|
5032
|
+
- func: sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
|
5033
|
+
|
5034
|
+
- func: _sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
4814
5035
|
|
4815
5036
|
- func: sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
|
4816
5037
|
|
@@ -4822,6 +5043,8 @@
|
|
4822
5043
|
|
4823
5044
|
- func: _validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> ()
|
4824
5045
|
|
5046
|
+
- func: _validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
|
5047
|
+
|
4825
5048
|
- func: _sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
|
4826
5049
|
dispatch:
|
4827
5050
|
SparseCPU, SparseCUDA: new_with_dims_sparse
|
@@ -4848,10 +5071,13 @@
|
|
4848
5071
|
SparseCPU: sparse_mask_cpu
|
4849
5072
|
SparseCUDA: sparse_mask_cuda
|
4850
5073
|
|
5074
|
+
- func: _to_cpu(Tensor[] tensors) -> Tensor[]
|
5075
|
+
variants: function
|
5076
|
+
|
4851
5077
|
- func: to_dense(Tensor self, ScalarType? dtype=None) -> Tensor
|
4852
5078
|
variants: method
|
4853
5079
|
dispatch:
|
4854
|
-
SparseCPU, SparseCUDA, SparseCsrCPU: sparse_to_dense
|
5080
|
+
SparseCPU, SparseCUDA, SparseCsrCPU, SparseCsrCUDA: sparse_to_dense
|
4855
5081
|
MkldnnCPU: mkldnn_to_dense
|
4856
5082
|
|
4857
5083
|
- func: to_dense_backward(Tensor grad, Tensor input) -> Tensor
|
@@ -4890,7 +5116,7 @@
|
|
4890
5116
|
variants: method
|
4891
5117
|
dispatch:
|
4892
5118
|
SparseCPU, SparseCUDA: _nnz_sparse
|
4893
|
-
SparseCsrCPU: _nnz_sparse_csr
|
5119
|
+
SparseCsrCPU, SparseCsrCUDA: _nnz_sparse_csr
|
4894
5120
|
device_check: NoCheck
|
4895
5121
|
device_guard: False
|
4896
5122
|
|
@@ -4949,21 +5175,21 @@
|
|
4949
5175
|
variants: method
|
4950
5176
|
dispatch:
|
4951
5177
|
SparseCPU, SparseCUDA: values_sparse
|
4952
|
-
SparseCsrCPU: values_sparse_csr
|
5178
|
+
SparseCsrCPU, SparseCsrCUDA: values_sparse_csr
|
4953
5179
|
device_check: NoCheck
|
4954
5180
|
device_guard: False
|
4955
5181
|
|
4956
5182
|
- func: crow_indices(Tensor(a) self) -> Tensor(a)
|
4957
5183
|
variants: method
|
4958
5184
|
dispatch:
|
4959
|
-
SparseCsrCPU: crow_indices_sparse_csr
|
5185
|
+
SparseCsrCPU, SparseCsrCUDA: crow_indices_sparse_csr
|
4960
5186
|
device_check: NoCheck
|
4961
5187
|
device_guard: False
|
4962
5188
|
|
4963
5189
|
- func: col_indices(Tensor(a) self) -> Tensor(a)
|
4964
5190
|
variants: method
|
4965
5191
|
dispatch:
|
4966
|
-
SparseCsrCPU: col_indices_sparse_csr
|
5192
|
+
SparseCsrCPU, SparseCsrCUDA: col_indices_sparse_csr
|
4967
5193
|
device_check: NoCheck
|
4968
5194
|
device_guard: False
|
4969
5195
|
|
@@ -5025,6 +5251,11 @@
|
|
5025
5251
|
dispatch:
|
5026
5252
|
CPU, CUDA: quantize_per_tensor
|
5027
5253
|
|
5254
|
+
- func: quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor
|
5255
|
+
variants: function
|
5256
|
+
dispatch:
|
5257
|
+
CPU, CUDA: quantize_per_tensor_tensor_qparams
|
5258
|
+
|
5028
5259
|
- func: quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]
|
5029
5260
|
variants: function
|
5030
5261
|
dispatch:
|
@@ -5033,13 +5264,13 @@
|
|
5033
5264
|
- func: quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
|
5034
5265
|
variants: function
|
5035
5266
|
dispatch:
|
5036
|
-
CPU:
|
5267
|
+
CPU, CUDA: quantize_per_channel
|
5037
5268
|
|
5038
5269
|
- func: dequantize.self(Tensor self) -> Tensor
|
5039
5270
|
variants: function, method
|
5040
5271
|
dispatch:
|
5041
5272
|
CPU: dequantize_cpu
|
5042
|
-
QuantizedCPU, QuantizedCUDA:
|
5273
|
+
QuantizedCPU, QuantizedCUDA: dequantize_quantized
|
5043
5274
|
|
5044
5275
|
- func: dequantize.tensors(Tensor[] tensors) -> Tensor[]
|
5045
5276
|
variants: function
|
@@ -5086,6 +5317,7 @@
|
|
5086
5317
|
- func: _make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
|
5087
5318
|
dispatch:
|
5088
5319
|
CPU: make_per_channel_quantized_tensor_cpu
|
5320
|
+
CUDA: make_per_channel_quantized_tensor_cuda
|
5089
5321
|
|
5090
5322
|
- func: qscheme(Tensor self) -> QScheme
|
5091
5323
|
variants: method
|
@@ -5096,11 +5328,20 @@
|
|
5096
5328
|
device_check: NoCheck # TensorIterator
|
5097
5329
|
variants: function
|
5098
5330
|
|
5331
|
+
- func: fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor
|
5332
|
+
device_check: NoCheck # TensorIterator
|
5333
|
+
variants: function
|
5334
|
+
|
5099
5335
|
- func: fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
|
5100
5336
|
variants: function
|
5101
5337
|
dispatch:
|
5102
5338
|
CPU, CUDA: fake_quantize_per_tensor_affine_cachemask
|
5103
5339
|
|
5340
|
+
- func: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
|
5341
|
+
variants: function
|
5342
|
+
dispatch:
|
5343
|
+
CPU, CUDA: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams
|
5344
|
+
|
5104
5345
|
- func: fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor
|
5105
5346
|
variants: function
|
5106
5347
|
|
@@ -5132,6 +5373,15 @@
|
|
5132
5373
|
- func: _fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)
|
5133
5374
|
variants: function
|
5134
5375
|
|
5376
|
+
- func: fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor
|
5377
|
+
variants: function
|
5378
|
+
|
5379
|
+
- func: _fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)
|
5380
|
+
dispatch:
|
5381
|
+
CPU: fused_moving_avg_obs_fake_quant_cpu
|
5382
|
+
CUDA: fused_moving_avg_obs_fake_quant_cuda
|
5383
|
+
|
5384
|
+
|
5135
5385
|
- func: _choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)
|
5136
5386
|
variants: function
|
5137
5387
|
|
@@ -5141,31 +5391,42 @@
|
|
5141
5391
|
- func: choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)
|
5142
5392
|
variants: function
|
5143
5393
|
|
5394
|
+
- func: _to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor
|
5395
|
+
device_check: NoCheck
|
5396
|
+
device_guard: False
|
5397
|
+
dispatch:
|
5398
|
+
CompositeExplicitAutograd: _to_copy
|
5399
|
+
|
5144
5400
|
# to(Device) must not exist because all constructors of Device also works for
|
5145
5401
|
# TensorOptions. Otherwise, an ambiguity error is thrown.
|
5146
5402
|
# See NOTE [ TensorOptions Constructors ].
|
5147
|
-
- func: to.dtype_layout(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor
|
5403
|
+
- func: to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
|
5148
5404
|
variants: method
|
5149
5405
|
device_check: NoCheck
|
5150
5406
|
device_guard: False
|
5151
5407
|
|
5152
|
-
- func: to.device(Tensor self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor
|
5408
|
+
- func: to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
|
5153
5409
|
variants: method
|
5154
5410
|
device_check: NoCheck
|
5155
5411
|
device_guard: False
|
5156
5412
|
|
5157
|
-
- func: to.dtype(Tensor self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor
|
5413
|
+
- func: to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
|
5158
5414
|
variants: method
|
5159
5415
|
device_check: NoCheck
|
5160
5416
|
device_guard: False
|
5161
5417
|
|
5162
|
-
- func: to.other(Tensor self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor
|
5418
|
+
- func: to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)
|
5163
5419
|
variants: method
|
5164
5420
|
device_check: NoCheck
|
5165
5421
|
device_guard: False
|
5166
5422
|
|
5167
5423
|
- func: meshgrid(Tensor[] tensors) -> Tensor[]
|
5168
5424
|
|
5425
|
+
# TODO: Two weeks after this lands, combine these two overloads,
|
5426
|
+
# making "indexing" optional. These are temporarily distinct for
|
5427
|
+
# forward-compatibility reasons.
|
5428
|
+
- func: meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]
|
5429
|
+
|
5169
5430
|
- func: cartesian_prod(Tensor[] tensors) -> Tensor
|
5170
5431
|
variants: function
|
5171
5432
|
|
@@ -5433,56 +5694,94 @@
|
|
5433
5694
|
device_check: NoCheck # TensorIterator
|
5434
5695
|
variants: function, method
|
5435
5696
|
|
5436
|
-
- func: scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
|
5437
|
-
variants: method
|
5438
|
-
dispatch:
|
5439
|
-
CPU, CUDA: scatter_
|
5440
|
-
|
5441
5697
|
- func: scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
|
5698
|
+
structured_delegate: scatter.src_out
|
5442
5699
|
variants: function, method
|
5443
5700
|
|
5444
|
-
- func: scatter_.
|
5701
|
+
- func: scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
|
5702
|
+
structured_delegate: scatter.src_out
|
5445
5703
|
variants: method
|
5704
|
+
|
5705
|
+
- func: scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
|
5706
|
+
structured: True
|
5707
|
+
variants: function
|
5446
5708
|
dispatch:
|
5447
|
-
CPU, CUDA:
|
5709
|
+
CPU, CUDA: scatter_src_out
|
5448
5710
|
|
5449
5711
|
- func: scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
|
5712
|
+
structured_delegate: scatter.value_out
|
5450
5713
|
variants: function, method
|
5451
5714
|
|
5452
|
-
- func:
|
5453
|
-
|
5715
|
+
- func: scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
|
5716
|
+
structured_delegate: scatter.value_out
|
5717
|
+
variants: method
|
5454
5718
|
|
5455
|
-
- func: scatter.
|
5719
|
+
- func: scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
|
5720
|
+
structured: True
|
5721
|
+
variants: function
|
5722
|
+
dispatch:
|
5723
|
+
CPU, CUDA: scatter_value_out
|
5724
|
+
|
5725
|
+
- func: scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor
|
5726
|
+
structured_delegate: scatter.reduce_out
|
5456
5727
|
variants: function, method
|
5457
5728
|
|
5458
5729
|
- func: scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)
|
5730
|
+
structured_delegate: scatter.reduce_out
|
5459
5731
|
variants: method
|
5732
|
+
|
5733
|
+
- func: scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)
|
5734
|
+
structured: True
|
5735
|
+
variants: function
|
5460
5736
|
dispatch:
|
5461
|
-
CPU, CUDA:
|
5737
|
+
CPU, CUDA: scatter_reduce_out
|
5738
|
+
|
5739
|
+
- func: scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor
|
5740
|
+
structured_delegate: scatter.value_reduce_out
|
5741
|
+
variants: function, method
|
5462
5742
|
|
5463
5743
|
- func: scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)
|
5744
|
+
structured_delegate: scatter.value_reduce_out
|
5464
5745
|
variants: method
|
5465
|
-
dispatch:
|
5466
|
-
CPU, CUDA: scatter_scalar_reduce_
|
5467
5746
|
|
5468
|
-
- func:
|
5469
|
-
|
5747
|
+
- func: scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)
|
5748
|
+
structured: True
|
5749
|
+
variants: function
|
5470
5750
|
dispatch:
|
5471
|
-
CPU, CUDA:
|
5751
|
+
CPU, CUDA: scatter_value_reduce_out
|
5752
|
+
|
5753
|
+
- func: scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
|
5754
|
+
variants: function, method
|
5755
|
+
|
5756
|
+
- func: scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
|
5757
|
+
variants: function, method
|
5472
5758
|
|
5473
5759
|
- func: scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
|
5760
|
+
structured_delegate: scatter_add.out
|
5474
5761
|
variants: function, method
|
5475
5762
|
|
5763
|
+
- func: scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
|
5764
|
+
structured_delegate: scatter_add.out
|
5765
|
+
variants: method
|
5766
|
+
|
5767
|
+
- func: scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)
|
5768
|
+
structured: True
|
5769
|
+
variants: function
|
5770
|
+
dispatch:
|
5771
|
+
CPU, CUDA: scatter_add
|
5772
|
+
|
5476
5773
|
- func: scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor
|
5477
5774
|
variants: function, method
|
5478
5775
|
|
5479
5776
|
- func: eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
5777
|
+
structured_delegate: eq.Scalar_out
|
5480
5778
|
device_check: NoCheck # TensorIterator
|
5481
5779
|
variants: method
|
5482
5780
|
dispatch:
|
5483
5781
|
CompositeExplicitAutograd: eq_
|
5484
5782
|
|
5485
5783
|
- func: eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
5784
|
+
structured_delegate: eq.Tensor_out
|
5486
5785
|
device_check: NoCheck # TensorIterator
|
5487
5786
|
variants: method
|
5488
5787
|
dispatch:
|
@@ -5490,6 +5789,8 @@
|
|
5490
5789
|
|
5491
5790
|
- func: bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
5492
5791
|
device_check: NoCheck # TensorIterator
|
5792
|
+
structured: True
|
5793
|
+
structured_inherits: TensorIteratorBase
|
5493
5794
|
variants: function
|
5494
5795
|
dispatch:
|
5495
5796
|
CPU, CUDA: bitwise_and_out
|
@@ -5498,15 +5799,18 @@
|
|
5498
5799
|
device_check: NoCheck # TensorIterator
|
5499
5800
|
variants: function
|
5500
5801
|
dispatch:
|
5501
|
-
|
5802
|
+
CompositeExplicitAutograd: bitwise_and_out
|
5502
5803
|
|
5503
5804
|
- func: bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
|
5504
5805
|
device_check: NoCheck # TensorIterator
|
5505
5806
|
variants: method, function
|
5807
|
+
dispatch:
|
5808
|
+
CompositeExplicitAutograd: bitwise_and
|
5506
5809
|
|
5507
5810
|
- func: bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
|
5508
5811
|
device_check: NoCheck # TensorIterator
|
5509
5812
|
variants: method, function
|
5813
|
+
structured_delegate: bitwise_and.Tensor_out
|
5510
5814
|
|
5511
5815
|
- func: bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
5512
5816
|
device_check: NoCheck # TensorIterator
|
@@ -5515,6 +5819,7 @@
|
|
5515
5819
|
- func: bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
5516
5820
|
device_check: NoCheck # TensorIterator
|
5517
5821
|
variants: method
|
5822
|
+
structured_delegate: bitwise_and.Tensor_out
|
5518
5823
|
|
5519
5824
|
- func: __and__.Scalar(Tensor self, Scalar other) -> Tensor
|
5520
5825
|
device_check: NoCheck # TensorIterator
|
@@ -5534,6 +5839,8 @@
|
|
5534
5839
|
|
5535
5840
|
- func: bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
5536
5841
|
device_check: NoCheck # TensorIterator
|
5842
|
+
structured: True
|
5843
|
+
structured_inherits: TensorIteratorBase
|
5537
5844
|
variants: function
|
5538
5845
|
dispatch:
|
5539
5846
|
CPU, CUDA: bitwise_or_out
|
@@ -5542,7 +5849,7 @@
|
|
5542
5849
|
device_check: NoCheck # TensorIterator
|
5543
5850
|
variants: function
|
5544
5851
|
dispatch:
|
5545
|
-
|
5852
|
+
CompositeExplicitAutograd: bitwise_or_out
|
5546
5853
|
|
5547
5854
|
- func: bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
|
5548
5855
|
device_check: NoCheck # TensorIterator
|
@@ -5551,6 +5858,7 @@
|
|
5551
5858
|
- func: bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
|
5552
5859
|
device_check: NoCheck # TensorIterator
|
5553
5860
|
variants: method, function
|
5861
|
+
structured_delegate: bitwise_or.Tensor_out
|
5554
5862
|
|
5555
5863
|
- func: bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
5556
5864
|
device_check: NoCheck # TensorIterator
|
@@ -5559,6 +5867,7 @@
|
|
5559
5867
|
- func: bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
5560
5868
|
device_check: NoCheck # TensorIterator
|
5561
5869
|
variants: method
|
5870
|
+
structured_delegate: bitwise_or.Tensor_out
|
5562
5871
|
|
5563
5872
|
- func: __or__.Scalar(Tensor self, Scalar other) -> Tensor
|
5564
5873
|
device_check: NoCheck # TensorIterator
|
@@ -5578,6 +5887,8 @@
|
|
5578
5887
|
|
5579
5888
|
- func: bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
5580
5889
|
device_check: NoCheck # TensorIterator
|
5890
|
+
structured: True
|
5891
|
+
structured_inherits: TensorIteratorBase
|
5581
5892
|
variants: function
|
5582
5893
|
dispatch:
|
5583
5894
|
CPU, CUDA: bitwise_xor_out
|
@@ -5586,7 +5897,7 @@
|
|
5586
5897
|
device_check: NoCheck # TensorIterator
|
5587
5898
|
variants: function
|
5588
5899
|
dispatch:
|
5589
|
-
|
5900
|
+
CompositeExplicitAutograd: bitwise_xor_out
|
5590
5901
|
|
5591
5902
|
- func: bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
|
5592
5903
|
device_check: NoCheck # TensorIterator
|
@@ -5595,6 +5906,7 @@
|
|
5595
5906
|
- func: bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
|
5596
5907
|
device_check: NoCheck # TensorIterator
|
5597
5908
|
variants: method, function
|
5909
|
+
structured_delegate: bitwise_xor.Tensor_out
|
5598
5910
|
|
5599
5911
|
- func: bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
5600
5912
|
device_check: NoCheck # TensorIterator
|
@@ -5603,6 +5915,7 @@
|
|
5603
5915
|
- func: bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
5604
5916
|
device_check: NoCheck # TensorIterator
|
5605
5917
|
variants: method
|
5918
|
+
structured_delegate: bitwise_xor.Tensor_out
|
5606
5919
|
|
5607
5920
|
- func: __xor__.Scalar(Tensor self, Scalar other) -> Tensor
|
5608
5921
|
device_check: NoCheck # TensorIterator
|
@@ -5644,6 +5957,47 @@
|
|
5644
5957
|
dispatch:
|
5645
5958
|
CPU, CUDA: __ilshift__
|
5646
5959
|
|
5960
|
+
- func: bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor
|
5961
|
+
device_check: NoCheck # TensorIterator
|
5962
|
+
variants: function, method
|
5963
|
+
structured_delegate: bitwise_left_shift.Tensor_out
|
5964
|
+
|
5965
|
+
- func: bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
5966
|
+
device_check: NoCheck # TensorIterator
|
5967
|
+
variants: method
|
5968
|
+
structured_delegate: bitwise_left_shift.Tensor_out
|
5969
|
+
|
5970
|
+
- func: bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
5971
|
+
device_check: NoCheck # TensorIterator
|
5972
|
+
structured: True
|
5973
|
+
structured_inherits: TensorIteratorBase
|
5974
|
+
dispatch:
|
5975
|
+
CPU, CUDA: bitwise_left_shift_out
|
5976
|
+
|
5977
|
+
- func: bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
|
5978
|
+
device_check: NoCheck # TensorIterator
|
5979
|
+
variants: method, function
|
5980
|
+
dispatch:
|
5981
|
+
CPU, CUDA: bitwise_left_shift
|
5982
|
+
|
5983
|
+
- func: bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
5984
|
+
device_check: NoCheck # TensorIterator
|
5985
|
+
variants: method
|
5986
|
+
dispatch:
|
5987
|
+
CPU, CUDA: bitwise_left_shift_
|
5988
|
+
|
5989
|
+
- func: bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
5990
|
+
device_check: NoCheck # TensorIterator
|
5991
|
+
variants: function
|
5992
|
+
dispatch:
|
5993
|
+
CPU, CUDA: bitwise_left_shift_out
|
5994
|
+
|
5995
|
+
- func: bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
|
5996
|
+
device_check: NoCheck # TensorIterator
|
5997
|
+
variants: function
|
5998
|
+
dispatch:
|
5999
|
+
CPU, CUDA: bitwise_left_shift
|
6000
|
+
|
5647
6001
|
- func: __rshift__.Scalar(Tensor self, Scalar other) -> Tensor
|
5648
6002
|
device_check: NoCheck # TensorIterator
|
5649
6003
|
variants: method, function
|
@@ -5668,67 +6022,77 @@
|
|
5668
6022
|
dispatch:
|
5669
6023
|
CPU, CUDA: __irshift__
|
5670
6024
|
|
5671
|
-
- func:
|
5672
|
-
|
5673
|
-
|
5674
|
-
|
5675
|
-
CUDA: tril_cuda_
|
6025
|
+
- func: bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor
|
6026
|
+
device_check: NoCheck # TensorIterator
|
6027
|
+
variants: function, method
|
6028
|
+
structured_delegate: bitwise_right_shift.Tensor_out
|
5676
6029
|
|
5677
|
-
- func:
|
6030
|
+
- func: bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
6031
|
+
device_check: NoCheck # TensorIterator
|
5678
6032
|
variants: method
|
6033
|
+
structured_delegate: bitwise_right_shift.Tensor_out
|
6034
|
+
|
6035
|
+
- func: bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
6036
|
+
device_check: NoCheck # TensorIterator
|
6037
|
+
structured: True
|
6038
|
+
structured_inherits: TensorIteratorBase
|
5679
6039
|
dispatch:
|
5680
|
-
CPU:
|
5681
|
-
CUDA: triu_cuda_
|
6040
|
+
CPU, CUDA: bitwise_right_shift_out
|
5682
6041
|
|
5683
|
-
- func:
|
6042
|
+
- func: bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor
|
5684
6043
|
device_check: NoCheck # TensorIterator
|
5685
|
-
|
5686
|
-
|
6044
|
+
variants: method, function
|
6045
|
+
dispatch:
|
6046
|
+
CPU, CUDA: bitwise_right_shift
|
5687
6047
|
|
5688
|
-
- func:
|
6048
|
+
- func: bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
5689
6049
|
device_check: NoCheck # TensorIterator
|
5690
6050
|
variants: method
|
5691
6051
|
dispatch:
|
5692
|
-
CPU:
|
5693
|
-
CUDA: legacy::cuda::_th_renorm_
|
6052
|
+
CPU, CUDA: bitwise_right_shift_
|
5694
6053
|
|
5695
|
-
- func:
|
6054
|
+
- func: bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
5696
6055
|
device_check: NoCheck # TensorIterator
|
5697
|
-
variants:
|
6056
|
+
variants: function
|
5698
6057
|
dispatch:
|
5699
|
-
CPU:
|
5700
|
-
CUDA: lerp_cuda_scalar_
|
6058
|
+
CPU, CUDA: bitwise_right_shift_out
|
5701
6059
|
|
5702
|
-
- func:
|
6060
|
+
- func: bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
|
5703
6061
|
device_check: NoCheck # TensorIterator
|
6062
|
+
variants: function
|
6063
|
+
dispatch:
|
6064
|
+
CPU, CUDA: bitwise_right_shift
|
6065
|
+
|
6066
|
+
- func: tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
|
5704
6067
|
variants: method
|
5705
6068
|
dispatch:
|
5706
|
-
CPU:
|
5707
|
-
CUDA:
|
6069
|
+
CPU: tril_cpu_
|
6070
|
+
CUDA: tril_cuda_
|
5708
6071
|
|
5709
|
-
- func:
|
5710
|
-
device_check: NoCheck # TensorIterator
|
6072
|
+
- func: triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
|
5711
6073
|
variants: method
|
5712
6074
|
dispatch:
|
5713
|
-
CPU
|
6075
|
+
CPU: triu_cpu_
|
6076
|
+
CUDA: triu_cuda_
|
5714
6077
|
|
5715
|
-
- func:
|
6078
|
+
- func: digamma_(Tensor(a!) self) -> Tensor(a!)
|
5716
6079
|
device_check: NoCheck # TensorIterator
|
6080
|
+
structured_delegate: digamma.out
|
5717
6081
|
variants: method
|
5718
|
-
dispatch:
|
5719
|
-
CPU, CUDA: fmod_
|
5720
6082
|
|
5721
|
-
- func:
|
6083
|
+
- func: lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
|
5722
6084
|
device_check: NoCheck # TensorIterator
|
5723
6085
|
variants: method
|
5724
6086
|
dispatch:
|
5725
|
-
CPU
|
6087
|
+
CPU: lerp_cpu_scalar_
|
6088
|
+
CUDA: lerp_cuda_scalar_
|
5726
6089
|
|
5727
|
-
- func:
|
6090
|
+
- func: lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)
|
5728
6091
|
device_check: NoCheck # TensorIterator
|
5729
6092
|
variants: method
|
5730
6093
|
dispatch:
|
5731
|
-
CPU
|
6094
|
+
CPU: lerp_cpu_tensor_
|
6095
|
+
CUDA: lerp_cuda_tensor_
|
5732
6096
|
|
5733
6097
|
- func: addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
|
5734
6098
|
variants: method
|
@@ -5744,12 +6108,6 @@
|
|
5744
6108
|
dispatch:
|
5745
6109
|
CPU, CUDA: addbmm
|
5746
6110
|
|
5747
|
-
- func: addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
|
5748
|
-
device_check: NoCheck # TensorIterator
|
5749
|
-
variants: method
|
5750
|
-
dispatch:
|
5751
|
-
CompositeExplicitAutograd: addcdiv_
|
5752
|
-
|
5753
6111
|
- func: random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
|
5754
6112
|
device_check: NoCheck # TensorIterator
|
5755
6113
|
variants: method
|
@@ -5870,38 +6228,44 @@
|
|
5870
6228
|
device_guard: False
|
5871
6229
|
|
5872
6230
|
- func: ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
6231
|
+
structured: True
|
6232
|
+
structured_inherits: TensorIteratorBase
|
5873
6233
|
device_check: NoCheck # TensorIterator
|
5874
6234
|
dispatch:
|
5875
|
-
CPU, CUDA:
|
6235
|
+
CPU, CUDA: ne_Scalar_out
|
5876
6236
|
QuantizedCPU: ne_out_quantized_cpu
|
5877
6237
|
|
5878
6238
|
- func: ne.Scalar(Tensor self, Scalar other) -> Tensor
|
6239
|
+
structured_delegate: ne.Scalar_out
|
5879
6240
|
device_check: NoCheck # TensorIterator
|
5880
6241
|
variants: method, function
|
5881
6242
|
dispatch:
|
5882
|
-
CPU, CUDA: ne
|
5883
6243
|
QuantizedCPU: ne_quantized_cpu
|
5884
6244
|
|
5885
6245
|
- func: ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
6246
|
+
structured: True
|
6247
|
+
structured_inherits: TensorIteratorBase
|
5886
6248
|
device_check: NoCheck # TensorIterator
|
5887
6249
|
dispatch:
|
5888
|
-
CPU, CUDA:
|
6250
|
+
CPU, CUDA: ne_Tensor_out
|
5889
6251
|
QuantizedCPU: ne_out_quantized_cpu
|
5890
6252
|
|
5891
6253
|
- func: ne.Tensor(Tensor self, Tensor other) -> Tensor
|
6254
|
+
structured_delegate: ne.Tensor_out
|
5892
6255
|
device_check: NoCheck # TensorIterator
|
5893
6256
|
variants: method, function
|
5894
6257
|
dispatch:
|
5895
|
-
CPU, CUDA: ne
|
5896
6258
|
QuantizedCPU: ne_quantized_cpu
|
5897
6259
|
|
5898
6260
|
- func: ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
6261
|
+
structured_delegate: ne.Scalar_out
|
5899
6262
|
device_check: NoCheck # TensorIterator
|
5900
6263
|
variants: method
|
5901
6264
|
dispatch:
|
5902
6265
|
CompositeExplicitAutograd: ne_
|
5903
6266
|
|
5904
6267
|
- func: ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
6268
|
+
structured_delegate: ne.Tensor_out
|
5905
6269
|
device_check: NoCheck # TensorIterator
|
5906
6270
|
variants: method
|
5907
6271
|
dispatch:
|
@@ -5925,64 +6289,74 @@
|
|
5925
6289
|
variants: method
|
5926
6290
|
|
5927
6291
|
- func: eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
6292
|
+
structured: True
|
6293
|
+
structured_inherits: TensorIteratorBase
|
5928
6294
|
device_check: NoCheck # TensorIterator
|
5929
6295
|
dispatch:
|
5930
|
-
CPU, CUDA:
|
6296
|
+
CPU, CUDA: eq_Scalar_out
|
5931
6297
|
QuantizedCPU: eq_out_quantized_cpu
|
5932
6298
|
|
5933
6299
|
- func: eq.Scalar(Tensor self, Scalar other) -> Tensor
|
6300
|
+
structured_delegate: eq.Scalar_out
|
5934
6301
|
device_check: NoCheck # TensorIterator
|
5935
6302
|
variants: method, function
|
5936
6303
|
dispatch:
|
5937
|
-
CPU, CUDA: eq
|
5938
6304
|
QuantizedCPU: eq_quantized_cpu
|
5939
6305
|
|
5940
6306
|
- func: eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
6307
|
+
structured: True
|
6308
|
+
structured_inherits: TensorIteratorBase
|
5941
6309
|
device_check: NoCheck # TensorIterator
|
5942
6310
|
dispatch:
|
5943
|
-
CPU, CUDA:
|
6311
|
+
CPU, CUDA: eq_Tensor_out
|
5944
6312
|
QuantizedCPU: eq_out_quantized_cpu
|
5945
6313
|
|
5946
6314
|
- func: eq.Tensor(Tensor self, Tensor other) -> Tensor
|
6315
|
+
structured_delegate: eq.Tensor_out
|
5947
6316
|
device_check: NoCheck # TensorIterator
|
5948
6317
|
variants: method, function
|
5949
6318
|
dispatch:
|
5950
|
-
CPU, CUDA: eq
|
5951
6319
|
QuantizedCPU: eq_quantized_cpu
|
5952
6320
|
|
5953
6321
|
- func: ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
6322
|
+
structured: True
|
6323
|
+
structured_inherits: TensorIteratorBase
|
5954
6324
|
device_check: NoCheck # TensorIterator
|
5955
6325
|
dispatch:
|
5956
|
-
CPU, CUDA:
|
6326
|
+
CPU, CUDA: ge_Scalar_out
|
5957
6327
|
QuantizedCPU: ge_out_quantized_cpu
|
5958
6328
|
|
5959
6329
|
- func: ge.Scalar(Tensor self, Scalar other) -> Tensor
|
6330
|
+
structured_delegate: ge.Scalar_out
|
5960
6331
|
device_check: NoCheck # TensorIterator
|
5961
6332
|
variants: method, function
|
5962
6333
|
dispatch:
|
5963
|
-
CPU, CUDA: ge
|
5964
6334
|
QuantizedCPU: ge_quantized_cpu
|
5965
6335
|
|
5966
6336
|
- func: ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
6337
|
+
structured: True
|
6338
|
+
structured_inherits: TensorIteratorBase
|
5967
6339
|
device_check: NoCheck # TensorIterator
|
5968
6340
|
dispatch:
|
5969
|
-
CPU, CUDA:
|
6341
|
+
CPU, CUDA: ge_Tensor_out
|
5970
6342
|
QuantizedCPU: ge_out_quantized_cpu
|
5971
6343
|
|
5972
6344
|
- func: ge.Tensor(Tensor self, Tensor other) -> Tensor
|
6345
|
+
structured_delegate: ge.Tensor_out
|
5973
6346
|
device_check: NoCheck # TensorIterator
|
5974
6347
|
variants: method, function
|
5975
6348
|
dispatch:
|
5976
|
-
CPU, CUDA: ge
|
5977
6349
|
QuantizedCPU: ge_quantized_cpu
|
5978
6350
|
|
5979
6351
|
- func: ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
6352
|
+
structured_delegate: ge.Scalar_out
|
5980
6353
|
device_check: NoCheck # TensorIterator
|
5981
6354
|
variants: method
|
5982
6355
|
dispatch:
|
5983
6356
|
CompositeExplicitAutograd: ge_
|
5984
6357
|
|
5985
6358
|
- func: ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
6359
|
+
structured_delegate: ge.Tensor_out
|
5986
6360
|
device_check: NoCheck # TensorIterator
|
5987
6361
|
variants: method
|
5988
6362
|
dispatch:
|
@@ -6006,38 +6380,44 @@
|
|
6006
6380
|
variants: method
|
6007
6381
|
|
6008
6382
|
- func: le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
6383
|
+
structured: True
|
6384
|
+
structured_inherits: TensorIteratorBase
|
6009
6385
|
device_check: NoCheck # TensorIterator
|
6010
6386
|
dispatch:
|
6011
|
-
CPU, CUDA:
|
6387
|
+
CPU, CUDA: le_Scalar_out
|
6012
6388
|
QuantizedCPU: le_out_quantized_cpu
|
6013
6389
|
|
6014
6390
|
- func: le.Scalar(Tensor self, Scalar other) -> Tensor
|
6391
|
+
structured_delegate: le.Scalar_out
|
6015
6392
|
device_check: NoCheck # TensorIterator
|
6016
6393
|
variants: method, function
|
6017
6394
|
dispatch:
|
6018
|
-
CPU, CUDA: le
|
6019
6395
|
QuantizedCPU: le_quantized_cpu
|
6020
6396
|
|
6021
6397
|
- func: le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
6398
|
+
structured: True
|
6399
|
+
structured_inherits: TensorIteratorBase
|
6022
6400
|
device_check: NoCheck # TensorIterator
|
6023
6401
|
dispatch:
|
6024
|
-
CPU, CUDA:
|
6402
|
+
CPU, CUDA: le_Tensor_out
|
6025
6403
|
QuantizedCPU: le_out_quantized_cpu
|
6026
6404
|
|
6027
6405
|
- func: le.Tensor(Tensor self, Tensor other) -> Tensor
|
6406
|
+
structured_delegate: le.Tensor_out
|
6028
6407
|
device_check: NoCheck # TensorIterator
|
6029
6408
|
variants: method, function
|
6030
6409
|
dispatch:
|
6031
|
-
CPU, CUDA: le
|
6032
6410
|
QuantizedCPU: le_quantized_cpu
|
6033
6411
|
|
6034
6412
|
- func: le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
6413
|
+
structured_delegate: le.Scalar_out
|
6035
6414
|
device_check: NoCheck # TensorIterator
|
6036
6415
|
variants: method
|
6037
6416
|
dispatch:
|
6038
6417
|
CompositeExplicitAutograd: le_
|
6039
6418
|
|
6040
6419
|
- func: le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
6420
|
+
structured_delegate: le.Tensor_out
|
6041
6421
|
device_check: NoCheck # TensorIterator
|
6042
6422
|
variants: method
|
6043
6423
|
dispatch:
|
@@ -6061,38 +6441,44 @@
|
|
6061
6441
|
variants: method
|
6062
6442
|
|
6063
6443
|
- func: gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
6444
|
+
structured: True
|
6445
|
+
structured_inherits: TensorIteratorBase
|
6064
6446
|
device_check: NoCheck # TensorIterator
|
6065
6447
|
dispatch:
|
6066
|
-
CPU, CUDA:
|
6448
|
+
CPU, CUDA: gt_Scalar_out
|
6067
6449
|
QuantizedCPU: gt_out_quantized_cpu
|
6068
6450
|
|
6069
6451
|
- func: gt.Scalar(Tensor self, Scalar other) -> Tensor
|
6452
|
+
structured_delegate: gt.Scalar_out
|
6070
6453
|
device_check: NoCheck # TensorIterator
|
6071
6454
|
variants: method, function
|
6072
6455
|
dispatch:
|
6073
|
-
CPU, CUDA: gt
|
6074
6456
|
QuantizedCPU: gt_quantized_cpu
|
6075
6457
|
|
6076
6458
|
- func: gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
6459
|
+
structured: True
|
6460
|
+
structured_inherits: TensorIteratorBase
|
6077
6461
|
device_check: NoCheck # TensorIterator
|
6078
6462
|
dispatch:
|
6079
|
-
CPU, CUDA:
|
6463
|
+
CPU, CUDA: gt_Tensor_out
|
6080
6464
|
QuantizedCPU: gt_out_quantized_cpu
|
6081
6465
|
|
6082
6466
|
- func: gt.Tensor(Tensor self, Tensor other) -> Tensor
|
6467
|
+
structured_delegate: gt.Tensor_out
|
6083
6468
|
device_check: NoCheck # TensorIterator
|
6084
6469
|
variants: method, function
|
6085
6470
|
dispatch:
|
6086
|
-
CPU, CUDA: gt
|
6087
6471
|
QuantizedCPU: gt_quantized_cpu
|
6088
6472
|
|
6089
6473
|
- func: gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
6474
|
+
structured_delegate: gt.Scalar_out
|
6090
6475
|
device_check: NoCheck # TensorIterator
|
6091
6476
|
variants: method
|
6092
6477
|
dispatch:
|
6093
6478
|
CompositeExplicitAutograd: gt_
|
6094
6479
|
|
6095
6480
|
- func: gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
6481
|
+
structured_delegate: gt.Tensor_out
|
6096
6482
|
device_check: NoCheck # TensorIterator
|
6097
6483
|
variants: method
|
6098
6484
|
dispatch:
|
@@ -6116,38 +6502,44 @@
|
|
6116
6502
|
variants: method
|
6117
6503
|
|
6118
6504
|
- func: lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
6505
|
+
structured: True
|
6506
|
+
structured_inherits: TensorIteratorBase
|
6119
6507
|
device_check: NoCheck # TensorIterator
|
6120
6508
|
dispatch:
|
6121
|
-
CPU, CUDA:
|
6509
|
+
CPU, CUDA: lt_Scalar_out
|
6122
6510
|
QuantizedCPU: lt_out_quantized_cpu
|
6123
6511
|
|
6124
6512
|
- func: lt.Scalar(Tensor self, Scalar other) -> Tensor
|
6513
|
+
structured_delegate: lt.Scalar_out
|
6125
6514
|
device_check: NoCheck # TensorIterator
|
6126
6515
|
variants: method, function
|
6127
6516
|
dispatch:
|
6128
|
-
CPU, CUDA: lt
|
6129
6517
|
QuantizedCPU: lt_quantized_cpu
|
6130
6518
|
|
6131
6519
|
- func: lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
6520
|
+
structured: True
|
6521
|
+
structured_inherits: TensorIteratorBase
|
6132
6522
|
device_check: NoCheck # TensorIterator
|
6133
6523
|
dispatch:
|
6134
|
-
CPU, CUDA:
|
6524
|
+
CPU, CUDA: lt_Tensor_out
|
6135
6525
|
QuantizedCPU: lt_out_quantized_cpu
|
6136
6526
|
|
6137
6527
|
- func: lt.Tensor(Tensor self, Tensor other) -> Tensor
|
6528
|
+
structured_delegate: lt.Tensor_out
|
6138
6529
|
device_check: NoCheck # TensorIterator
|
6139
6530
|
variants: method, function
|
6140
6531
|
dispatch:
|
6141
|
-
CPU, CUDA: lt
|
6142
6532
|
QuantizedCPU: lt_quantized_cpu
|
6143
6533
|
|
6144
6534
|
- func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
6535
|
+
structured_delegate: lt.Scalar_out
|
6145
6536
|
device_check: NoCheck # TensorIterator
|
6146
6537
|
variants: method
|
6147
6538
|
dispatch:
|
6148
6539
|
CompositeExplicitAutograd: lt_
|
6149
6540
|
|
6150
6541
|
- func: lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
6542
|
+
structured_delegate: lt.Tensor_out
|
6151
6543
|
device_check: NoCheck # TensorIterator
|
6152
6544
|
variants: method
|
6153
6545
|
dispatch:
|
@@ -6186,14 +6578,14 @@
|
|
6186
6578
|
|
6187
6579
|
- func: index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)
|
6188
6580
|
dispatch:
|
6189
|
-
CPU: index_select_out_cpu_
|
6190
|
-
CUDA: index_select_out_cuda
|
6581
|
+
CPU, QuantizedCPU: index_select_out_cpu_
|
6582
|
+
CUDA, QuantizedCUDA: index_select_out_cuda
|
6191
6583
|
|
6192
6584
|
- func: index_select(Tensor self, int dim, Tensor index) -> Tensor
|
6193
6585
|
variants: method, function
|
6194
6586
|
dispatch:
|
6195
|
-
CPU: index_select_cpu_
|
6196
|
-
CUDA: index_select_cuda
|
6587
|
+
CPU, QuantizedCPU: index_select_cpu_
|
6588
|
+
CUDA, QuantizedCUDA: index_select_cuda
|
6197
6589
|
SparseCPU: index_select_sparse
|
6198
6590
|
SparseCUDA: index_select_sparse
|
6199
6591
|
|
@@ -6225,27 +6617,26 @@
|
|
6225
6617
|
|
6226
6618
|
- func: nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
6227
6619
|
dispatch:
|
6228
|
-
CPU:
|
6620
|
+
CPU: nonzero_out_cpu
|
6229
6621
|
CUDA: nonzero_out_cuda
|
6230
6622
|
|
6231
6623
|
- func: nonzero(Tensor self) -> Tensor
|
6232
6624
|
variants: method, function
|
6233
6625
|
dispatch:
|
6234
|
-
CPU:
|
6626
|
+
CPU: nonzero_cpu
|
6235
6627
|
CUDA: nonzero_cuda
|
6236
6628
|
|
6237
6629
|
- func: nonzero_numpy(Tensor self) -> Tensor[]
|
6238
6630
|
variants: method, function
|
6239
6631
|
|
6240
6632
|
- func: gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
|
6633
|
+
structured: True
|
6241
6634
|
dispatch:
|
6242
|
-
CPU:
|
6243
|
-
CUDA: gather_out_cpu_cuda
|
6635
|
+
CPU, CUDA: gather_out
|
6244
6636
|
|
6245
6637
|
- func: gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
|
6246
6638
|
variants: method, function
|
6247
|
-
|
6248
|
-
CPU, CUDA: gather
|
6639
|
+
structured_delegate: gather.out
|
6249
6640
|
|
6250
6641
|
- func: gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor
|
6251
6642
|
variants: function
|
@@ -6260,46 +6651,52 @@
|
|
6260
6651
|
- func: _gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor
|
6261
6652
|
|
6262
6653
|
- func: addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
|
6654
|
+
structured: True
|
6655
|
+
structured_inherits: TensorIteratorBase
|
6263
6656
|
device_check: NoCheck # TensorIterator
|
6264
6657
|
dispatch:
|
6265
6658
|
CPU, CUDA: addcmul_out
|
6266
6659
|
|
6267
6660
|
- func: addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
|
6661
|
+
structured_delegate: addcmul.out
|
6268
6662
|
device_check: NoCheck # TensorIterator
|
6269
6663
|
variants: method, function
|
6270
|
-
dispatch:
|
6271
|
-
CompositeExplicitAutograd: addcmul
|
6272
6664
|
|
6273
6665
|
- func: addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
|
6666
|
+
structured_delegate: addcmul.out
|
6274
6667
|
device_check: NoCheck # TensorIterator
|
6275
6668
|
variants: method
|
6276
|
-
dispatch:
|
6277
|
-
CompositeExplicitAutograd: addcmul_
|
6278
6669
|
|
6279
6670
|
- func: addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
|
6671
|
+
structured: True
|
6672
|
+
structured_inherits: TensorIteratorBase
|
6280
6673
|
device_check: NoCheck # TensorIterator
|
6281
6674
|
dispatch:
|
6282
6675
|
CPU, CUDA: addcdiv_out
|
6283
6676
|
|
6284
6677
|
- func: addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
|
6678
|
+
structured_delegate: addcdiv.out
|
6285
6679
|
device_check: NoCheck # TensorIterator
|
6286
6680
|
variants: method, function
|
6287
|
-
dispatch:
|
6288
|
-
CompositeExplicitAutograd: addcdiv
|
6289
6681
|
|
6290
|
-
- func:
|
6682
|
+
- func: addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
|
6683
|
+
structured_delegate: addcdiv.out
|
6684
|
+
device_check: NoCheck # TensorIterator
|
6685
|
+
variants: method
|
6686
|
+
|
6687
|
+
- func: cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, float label_smoothing=0.0) -> Tensor
|
6291
6688
|
python_module: nn
|
6292
6689
|
|
6293
6690
|
- func: lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR)
|
6294
6691
|
dispatch:
|
6295
|
-
CPU:
|
6296
|
-
CUDA:
|
6692
|
+
CPU: legacy_lstsq_out
|
6693
|
+
CUDA: legacy_lstsq_out_cuda
|
6297
6694
|
|
6298
6695
|
- func: lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)
|
6299
6696
|
variants: method, function
|
6300
6697
|
dispatch:
|
6301
|
-
CPU:
|
6302
|
-
CUDA:
|
6698
|
+
CPU: legacy_lstsq
|
6699
|
+
CUDA: legacy_lstsq_cuda
|
6303
6700
|
|
6304
6701
|
- func: triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
|
6305
6702
|
dispatch:
|
@@ -6444,19 +6841,19 @@
|
|
6444
6841
|
dispatch:
|
6445
6842
|
CPU, CUDA: ormqr
|
6446
6843
|
|
6447
|
-
- func: _lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor, Tensor, Tensor)
|
6844
|
+
- func: _lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)
|
6448
6845
|
variants: function
|
6449
6846
|
dispatch:
|
6450
6847
|
CPU, CUDA: _lu_with_info
|
6451
6848
|
|
6452
6849
|
- func: lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)
|
6453
6850
|
dispatch:
|
6454
|
-
|
6851
|
+
CPU, CUDA: lu_solve_out
|
6455
6852
|
|
6456
6853
|
- func: lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor
|
6457
6854
|
variants: method, function
|
6458
6855
|
dispatch:
|
6459
|
-
|
6856
|
+
CPU, CUDA: lu_solve
|
6460
6857
|
|
6461
6858
|
- func: lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)
|
6462
6859
|
variants: function
|
@@ -6579,8 +6976,11 @@
|
|
6579
6976
|
|
6580
6977
|
- func: signbit(Tensor self) -> Tensor
|
6581
6978
|
variants: function, method
|
6979
|
+
structured_delegate: signbit.out
|
6582
6980
|
|
6583
6981
|
- func: signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
6982
|
+
structured: True
|
6983
|
+
structured_inherits: TensorIteratorBase
|
6584
6984
|
dispatch:
|
6585
6985
|
CPU: signbit_out
|
6586
6986
|
CUDA: signbit_out
|
@@ -6636,36 +7036,67 @@
|
|
6636
7036
|
|
6637
7037
|
- func: histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)
|
6638
7038
|
dispatch:
|
6639
|
-
CPU:
|
7039
|
+
CPU: histogram_histc_cpu_out
|
6640
7040
|
CUDA: _histc_out_cuda
|
6641
7041
|
|
6642
7042
|
- func: histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
|
6643
7043
|
variants: method, function
|
6644
7044
|
dispatch:
|
6645
|
-
CPU:
|
7045
|
+
CPU: histogram_histc_cpu
|
6646
7046
|
CUDA: _histc_cuda
|
6647
7047
|
|
7048
|
+
- func: histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
|
7049
|
+
dispatch:
|
7050
|
+
CPU: histogram_out_cpu
|
7051
|
+
|
7052
|
+
- func: histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
|
7053
|
+
variants: method, function
|
7054
|
+
dispatch:
|
7055
|
+
CPU: histogram_cpu
|
7056
|
+
|
7057
|
+
- func: histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)
|
7058
|
+
dispatch:
|
7059
|
+
CPU: histogram_out_cpu
|
7060
|
+
|
7061
|
+
- func: histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)
|
7062
|
+
variants: method, function
|
7063
|
+
dispatch:
|
7064
|
+
CPU: histogram_cpu
|
7065
|
+
|
6648
7066
|
- func: fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
6649
7067
|
device_check: NoCheck # TensorIterator
|
6650
7068
|
dispatch:
|
6651
|
-
|
7069
|
+
CompositeExplicitAutograd: fmod_out
|
6652
7070
|
|
6653
7071
|
- func: fmod.Scalar(Tensor self, Scalar other) -> Tensor
|
6654
7072
|
device_check: NoCheck # TensorIterator
|
6655
7073
|
variants: method, function
|
6656
7074
|
dispatch:
|
6657
|
-
|
7075
|
+
CompositeExplicitAutograd: fmod
|
7076
|
+
|
7077
|
+
- func: fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
7078
|
+
device_check: NoCheck # TensorIterator
|
7079
|
+
variants: method
|
7080
|
+
dispatch:
|
7081
|
+
CompositeExplicitAutograd: fmod_
|
6658
7082
|
|
6659
7083
|
- func: fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
6660
7084
|
device_check: NoCheck # TensorIterator
|
7085
|
+
structured: True
|
7086
|
+
structured_inherits: TensorIteratorBase
|
6661
7087
|
dispatch:
|
6662
7088
|
CPU, CUDA: fmod_out
|
6663
7089
|
|
6664
7090
|
- func: fmod.Tensor(Tensor self, Tensor other) -> Tensor
|
6665
7091
|
device_check: NoCheck # TensorIterator
|
7092
|
+
structured_delegate: fmod.Tensor_out
|
6666
7093
|
variants: method, function
|
6667
|
-
|
6668
|
-
|
7094
|
+
|
7095
|
+
|
7096
|
+
- func: fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
7097
|
+
device_check: NoCheck # TensorIterator
|
7098
|
+
variants: method
|
7099
|
+
structured_delegate: fmod.Tensor_out
|
6669
7100
|
|
6670
7101
|
- func: hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
6671
7102
|
structured: True
|
@@ -6728,24 +7159,39 @@
|
|
6728
7159
|
CompositeExplicitAutograd: nextafter_
|
6729
7160
|
|
6730
7161
|
- func: remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
6731
|
-
device_check: NoCheck # TensorIterator
|
6732
7162
|
dispatch:
|
6733
|
-
|
7163
|
+
CompositeExplicitAutograd: remainder_out
|
6734
7164
|
|
6735
7165
|
- func: remainder.Scalar(Tensor self, Scalar other) -> Tensor
|
6736
|
-
device_check: NoCheck # TensorIterator
|
6737
7166
|
variants: method, function
|
6738
7167
|
dispatch:
|
6739
|
-
|
7168
|
+
CompositeExplicitAutograd: remainder
|
7169
|
+
|
7170
|
+
- func: remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
|
7171
|
+
variants: method
|
7172
|
+
dispatch:
|
7173
|
+
CompositeExplicitAutograd: remainder_
|
6740
7174
|
|
6741
7175
|
- func: remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
6742
7176
|
device_check: NoCheck # TensorIterator
|
7177
|
+
structured: True
|
7178
|
+
structured_inherits: TensorIteratorBase
|
6743
7179
|
dispatch:
|
6744
7180
|
CPU, CUDA: remainder_out
|
6745
7181
|
|
6746
7182
|
- func: remainder.Tensor(Tensor self, Tensor other) -> Tensor
|
6747
7183
|
device_check: NoCheck # TensorIterator
|
7184
|
+
structured_delegate: remainder.Tensor_out
|
6748
7185
|
variants: method, function
|
7186
|
+
|
7187
|
+
- func: remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
|
7188
|
+
device_check: NoCheck # TensorIterator
|
7189
|
+
structured_delegate: remainder.Tensor_out
|
7190
|
+
variants: method
|
7191
|
+
|
7192
|
+
- func: remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor
|
7193
|
+
device_check: NoCheck # TensorIterator
|
7194
|
+
variants: function
|
6749
7195
|
dispatch:
|
6750
7196
|
CPU, CUDA: remainder
|
6751
7197
|
|
@@ -6757,11 +7203,14 @@
|
|
6757
7203
|
QuantizedCPU: min_quantized_cpu
|
6758
7204
|
|
6759
7205
|
- func: fmin(Tensor self, Tensor other) -> Tensor
|
7206
|
+
structured_delegate: fmin.out
|
7207
|
+
device_check: NoCheck # TensorIterator
|
6760
7208
|
variants: method, function
|
6761
|
-
dispatch:
|
6762
|
-
CPU, CUDA: fmin
|
6763
7209
|
|
6764
7210
|
- func: fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
7211
|
+
structured: True
|
7212
|
+
structured_inherits: TensorIteratorBase
|
7213
|
+
device_check: NoCheck # TensorIterator
|
6765
7214
|
dispatch:
|
6766
7215
|
CPU, CUDA: fmin_out
|
6767
7216
|
|
@@ -6773,11 +7222,14 @@
|
|
6773
7222
|
QuantizedCPU: max_quantized_cpu
|
6774
7223
|
|
6775
7224
|
- func: fmax(Tensor self, Tensor other) -> Tensor
|
7225
|
+
structured_delegate: fmax.out
|
7226
|
+
device_check: NoCheck # TensorIterator
|
6776
7227
|
variants: method, function
|
6777
|
-
dispatch:
|
6778
|
-
CPU, CUDA: fmax
|
6779
7228
|
|
6780
7229
|
- func: fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
7230
|
+
structured: True
|
7231
|
+
structured_inherits: TensorIteratorBase
|
7232
|
+
device_check: NoCheck # TensorIterator
|
6781
7233
|
dispatch:
|
6782
7234
|
CPU, CUDA: fmax_out
|
6783
7235
|
|
@@ -6928,29 +7380,43 @@
|
|
6928
7380
|
|
6929
7381
|
- func: all(Tensor self) -> Tensor
|
6930
7382
|
device_check: NoCheck # TensorIterator
|
7383
|
+
structured_delegate: all.all_out
|
6931
7384
|
variants: method, function
|
7385
|
+
|
7386
|
+
- func: all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
7387
|
+
device_check: NoCheck
|
7388
|
+
structured: True
|
6932
7389
|
dispatch:
|
6933
|
-
CPU, CUDA:
|
7390
|
+
CPU, CUDA: all_all_out
|
6934
7391
|
|
6935
7392
|
- func: any(Tensor self) -> Tensor
|
6936
7393
|
device_check: NoCheck # TensorIterator
|
7394
|
+
structured_delegate: any.all_out
|
6937
7395
|
variants: method, function
|
6938
7396
|
dispatch:
|
6939
|
-
CPU, CUDA: any
|
6940
7397
|
SparseCPU, SparseCUDA: any_sparse
|
6941
7398
|
|
7399
|
+
- func: any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
7400
|
+
device_check: NoCheck
|
7401
|
+
structured: True
|
7402
|
+
dispatch:
|
7403
|
+
CPU, CUDA: any_all_out
|
7404
|
+
|
6942
7405
|
- func: renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)
|
6943
7406
|
device_check: NoCheck # TensorIterator
|
7407
|
+
structured: True
|
6944
7408
|
dispatch:
|
6945
|
-
CPU:
|
6946
|
-
CUDA: legacy::cuda::_th_renorm_out
|
7409
|
+
CPU, CUDA: renorm_out
|
6947
7410
|
|
6948
7411
|
- func: renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
|
6949
7412
|
device_check: NoCheck # TensorIterator
|
6950
7413
|
variants: method, function
|
6951
|
-
|
6952
|
-
|
6953
|
-
|
7414
|
+
structured_delegate: renorm.out
|
7415
|
+
|
7416
|
+
- func: renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
|
7417
|
+
device_check: NoCheck # TensorIterator
|
7418
|
+
variants: method
|
7419
|
+
structured_delegate: renorm.out
|
6954
7420
|
|
6955
7421
|
- func: unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
|
6956
7422
|
variants: method
|
@@ -7084,26 +7550,6 @@
|
|
7084
7550
|
CPU: _index_copy_impl_
|
7085
7551
|
CUDA: _index_copy_impl_
|
7086
7552
|
|
7087
|
-
- func: _cumsum(Tensor self, int dim) -> Tensor
|
7088
|
-
dispatch:
|
7089
|
-
CPU: _cumsum_cpu
|
7090
|
-
CUDA: _cumsum_cuda
|
7091
|
-
|
7092
|
-
- func: _cumsum.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
|
7093
|
-
dispatch:
|
7094
|
-
CPU: _cumsum_out_cpu
|
7095
|
-
CUDA: _cumsum_out_cuda
|
7096
|
-
|
7097
|
-
- func: _cumprod(Tensor self, int dim) -> Tensor
|
7098
|
-
dispatch:
|
7099
|
-
CPU: _cumprod_cpu
|
7100
|
-
CUDA: _cumprod_cuda
|
7101
|
-
|
7102
|
-
- func: _cumprod.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
|
7103
|
-
dispatch:
|
7104
|
-
CPU: _cumprod_out_cpu
|
7105
|
-
CUDA: _cumprod_out_cuda
|
7106
|
-
|
7107
7553
|
- func: _amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
|
7108
7554
|
variants: function
|
7109
7555
|
dispatch:
|
@@ -7793,6 +8239,15 @@
|
|
7793
8239
|
CPU: searchsorted_cpu
|
7794
8240
|
CUDA: searchsorted_cuda
|
7795
8241
|
|
8242
|
+
- func: _convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor
|
8243
|
+
structured_delegate: _convert_indices_from_coo_to_csr.out
|
8244
|
+
|
8245
|
+
- func: _convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)
|
8246
|
+
structured: True
|
8247
|
+
dispatch:
|
8248
|
+
CPU: _convert_indices_from_coo_to_csr_structured_cpu
|
8249
|
+
CUDA: _convert_indices_from_coo_to_csr_structured_cuda
|
8250
|
+
|
7796
8251
|
## NN wrappers
|
7797
8252
|
|
7798
8253
|
- func: mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
|
@@ -7841,25 +8296,25 @@
|
|
7841
8296
|
python_module: nn
|
7842
8297
|
dispatch:
|
7843
8298
|
CPU: multi_margin_loss_cpu_out
|
7844
|
-
CUDA:
|
8299
|
+
CUDA: multi_margin_loss_cuda_out
|
7845
8300
|
|
7846
8301
|
- func: multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
|
7847
8302
|
python_module: nn
|
7848
8303
|
dispatch:
|
7849
8304
|
CPU: multi_margin_loss_cpu
|
7850
|
-
CUDA:
|
8305
|
+
CUDA: multi_margin_loss_cuda
|
7851
8306
|
|
7852
8307
|
- func: multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)
|
7853
8308
|
python_module: nn
|
7854
8309
|
dispatch:
|
7855
8310
|
CPU: multi_margin_loss_cpu_backward_out
|
7856
|
-
CUDA:
|
8311
|
+
CUDA: multi_margin_loss_cuda_backward_out
|
7857
8312
|
|
7858
8313
|
- func: multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor
|
7859
8314
|
python_module: nn
|
7860
8315
|
dispatch:
|
7861
8316
|
CPU: multi_margin_loss_cpu_backward
|
7862
|
-
CUDA:
|
8317
|
+
CUDA: multi_margin_loss_cuda_backward
|
7863
8318
|
|
7864
8319
|
- func: multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
|
7865
8320
|
python_module: nn
|
@@ -7871,25 +8326,25 @@
|
|
7871
8326
|
python_module: nn
|
7872
8327
|
dispatch:
|
7873
8328
|
CPU: multilabel_margin_loss_forward_out_cpu
|
7874
|
-
CUDA:
|
8329
|
+
CUDA: multilabel_margin_loss_forward_out_cuda
|
7875
8330
|
|
7876
8331
|
- func: multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)
|
7877
8332
|
python_module: nn
|
7878
8333
|
dispatch:
|
7879
8334
|
CPU: multilabel_margin_loss_forward_cpu
|
7880
|
-
CUDA:
|
8335
|
+
CUDA: multilabel_margin_loss_forward_cuda
|
7881
8336
|
|
7882
8337
|
- func: multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)
|
7883
8338
|
python_module: nn
|
7884
8339
|
dispatch:
|
7885
8340
|
CPU: multilabel_margin_loss_backward_cpu_out
|
7886
|
-
CUDA:
|
8341
|
+
CUDA: multilabel_margin_loss_backward_cuda_out
|
7887
8342
|
|
7888
8343
|
- func: multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor
|
7889
8344
|
python_module: nn
|
7890
8345
|
dispatch:
|
7891
8346
|
CPU: multilabel_margin_loss_backward_cpu
|
7892
|
-
CUDA:
|
8347
|
+
CUDA: multilabel_margin_loss_backward_cuda
|
7893
8348
|
|
7894
8349
|
- func: nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
|
7895
8350
|
python_module: nn
|
@@ -7902,27 +8357,25 @@
|
|
7902
8357
|
|
7903
8358
|
- func: nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))
|
7904
8359
|
python_module: nn
|
8360
|
+
structured: True
|
7905
8361
|
dispatch:
|
7906
8362
|
CPU: nll_loss_forward_out_cpu
|
7907
|
-
CUDA:
|
8363
|
+
CUDA: nll_loss_forward_out_cuda
|
7908
8364
|
|
7909
8365
|
- func: nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight)
|
7910
8366
|
python_module: nn
|
7911
|
-
|
7912
|
-
CPU: nll_loss_forward_cpu
|
7913
|
-
CUDA: legacy::cuda::_thnn_nll_loss_forward
|
8367
|
+
structured_delegate: nll_loss_forward.output
|
7914
8368
|
|
7915
8369
|
- func: nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
|
7916
8370
|
python_module: nn
|
8371
|
+
structured: True
|
7917
8372
|
dispatch:
|
7918
8373
|
CPU: nll_loss_backward_out_cpu
|
7919
|
-
CUDA:
|
8374
|
+
CUDA: nll_loss_backward_out_cuda
|
7920
8375
|
|
7921
8376
|
- func: nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor
|
7922
8377
|
python_module: nn
|
7923
|
-
|
7924
|
-
CPU: nll_loss_backward_cpu
|
7925
|
-
CUDA: legacy::cuda::_thnn_nll_loss_backward
|
8378
|
+
structured_delegate: nll_loss_backward.grad_input
|
7926
8379
|
|
7927
8380
|
- func: nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
|
7928
8381
|
python_module: nn
|
@@ -7934,25 +8387,25 @@
|
|
7934
8387
|
python_module: nn
|
7935
8388
|
dispatch:
|
7936
8389
|
CPU: nll_loss2d_forward_out_cpu
|
7937
|
-
CUDA:
|
8390
|
+
CUDA: nll_loss2d_forward_out_cuda
|
7938
8391
|
|
7939
8392
|
- func: nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight)
|
7940
8393
|
python_module: nn
|
7941
8394
|
dispatch:
|
7942
8395
|
CPU: nll_loss2d_forward_cpu
|
7943
|
-
CUDA:
|
8396
|
+
CUDA: nll_loss2d_forward_cuda
|
7944
8397
|
|
7945
8398
|
- func: nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
|
7946
8399
|
python_module: nn
|
7947
8400
|
dispatch:
|
7948
8401
|
CPU: nll_loss2d_backward_out_cpu
|
7949
|
-
CUDA:
|
8402
|
+
CUDA: nll_loss2d_backward_out_cuda
|
7950
8403
|
|
7951
8404
|
- func: nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor
|
7952
8405
|
python_module: nn
|
7953
8406
|
dispatch:
|
7954
8407
|
CPU: nll_loss2d_backward_cpu
|
7955
|
-
CUDA:
|
8408
|
+
CUDA: nll_loss2d_backward_cuda
|
7956
8409
|
|
7957
8410
|
- func: smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)
|
7958
8411
|
device_check: NoCheck # TensorIterator
|
@@ -8031,10 +8484,16 @@
|
|
8031
8484
|
device_check: NoCheck # TensorIterator
|
8032
8485
|
python_module: nn
|
8033
8486
|
|
8034
|
-
- func: elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor
|
8487
|
+
- func: elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)
|
8488
|
+
structured: True
|
8489
|
+
structured_inherits: TensorIteratorBase
|
8035
8490
|
python_module: nn
|
8036
8491
|
dispatch:
|
8037
|
-
CPU, CUDA:
|
8492
|
+
CPU, CUDA: elu_backward_out
|
8493
|
+
|
8494
|
+
- func: elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor
|
8495
|
+
structured_delegate: elu_backward.grad_input
|
8496
|
+
python_module: nn
|
8038
8497
|
|
8039
8498
|
- func: elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
|
8040
8499
|
structured_delegate: elu.out
|
@@ -8044,28 +8503,28 @@
|
|
8044
8503
|
CompositeExplicitAutograd: elu_
|
8045
8504
|
|
8046
8505
|
- func: glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
|
8506
|
+
structured: True
|
8507
|
+
structured_inherits: TensorIteratorBase
|
8047
8508
|
python_module: nn
|
8048
8509
|
dispatch:
|
8049
|
-
CPU: glu_out
|
8050
|
-
CUDA: legacy::cuda::_thnn_glu_forward_out
|
8510
|
+
CPU, CUDA: glu_out
|
8051
8511
|
|
8052
8512
|
- func: glu(Tensor self, int dim=-1) -> Tensor
|
8513
|
+
structured_delegate: glu.out
|
8514
|
+
device_check: NoCheck # TensorIterator
|
8053
8515
|
python_module: nn
|
8054
|
-
dispatch:
|
8055
|
-
CPU: glu
|
8056
|
-
CUDA: legacy::cuda::_thnn_glu_forward
|
8057
8516
|
|
8058
8517
|
- func: glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
|
8059
8518
|
python_module: nn
|
8060
8519
|
dispatch:
|
8061
|
-
CPU:
|
8062
|
-
CUDA:
|
8520
|
+
CPU: glu_backward_cpu_out
|
8521
|
+
CUDA: glu_backward_cuda_out
|
8063
8522
|
|
8064
8523
|
- func: glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor
|
8065
8524
|
python_module: nn
|
8066
8525
|
dispatch:
|
8067
|
-
CPU:
|
8068
|
-
CUDA:
|
8526
|
+
CPU: glu_backward_cpu
|
8527
|
+
CUDA: glu_backward_cuda
|
8069
8528
|
|
8070
8529
|
- func: hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
8071
8530
|
structured: True
|
@@ -8087,10 +8546,16 @@
|
|
8087
8546
|
device_check: NoCheck # TensorIterator
|
8088
8547
|
python_module: nn
|
8089
8548
|
|
8090
|
-
- func: hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor
|
8549
|
+
- func: hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
|
8550
|
+
structured: True
|
8551
|
+
structured_inherits: TensorIteratorBase
|
8091
8552
|
python_module: nn
|
8092
8553
|
dispatch:
|
8093
|
-
CPU, CUDA:
|
8554
|
+
CPU, CUDA: hardsigmoid_backward_out
|
8555
|
+
|
8556
|
+
- func: hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor
|
8557
|
+
structured_delegate: hardsigmoid_backward.grad_input
|
8558
|
+
python_module: nn
|
8094
8559
|
|
8095
8560
|
- func: hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
|
8096
8561
|
device_check: NoCheck # TensorIterator
|
@@ -8162,10 +8627,16 @@
|
|
8162
8627
|
dispatch:
|
8163
8628
|
QuantizedCPU: leaky_relu_quantized_cpu
|
8164
8629
|
|
8165
|
-
- func: leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor
|
8630
|
+
- func: leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)
|
8631
|
+
structured: True
|
8632
|
+
structured_inherits: TensorIteratorBase
|
8166
8633
|
python_module: nn
|
8167
8634
|
dispatch:
|
8168
|
-
CPU, CUDA:
|
8635
|
+
CPU, CUDA: leaky_relu_backward_out
|
8636
|
+
|
8637
|
+
- func: leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor
|
8638
|
+
structured_delegate: leaky_relu_backward.grad_input
|
8639
|
+
python_module: nn
|
8169
8640
|
|
8170
8641
|
- func: leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)
|
8171
8642
|
structured_delegate: leaky_relu.out
|
@@ -8187,38 +8658,38 @@
|
|
8187
8658
|
python_module: nn
|
8188
8659
|
dispatch:
|
8189
8660
|
CPU: log_sigmoid_forward_out_cpu
|
8190
|
-
CUDA:
|
8661
|
+
CUDA: log_sigmoid_forward_out_cuda
|
8191
8662
|
|
8192
8663
|
- func: log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)
|
8193
8664
|
device_check: NoCheck # TensorIterator
|
8194
8665
|
python_module: nn
|
8195
8666
|
dispatch:
|
8196
8667
|
CPU: log_sigmoid_forward_cpu
|
8197
|
-
CUDA:
|
8668
|
+
CUDA: log_sigmoid_forward_cuda
|
8198
8669
|
|
8199
8670
|
- func: log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)
|
8200
8671
|
python_module: nn
|
8201
8672
|
dispatch:
|
8202
|
-
CPU:
|
8203
|
-
CUDA:
|
8673
|
+
CPU: log_sigmoid_backward_cpu_out
|
8674
|
+
CUDA: log_sigmoid_backward_cuda_out
|
8204
8675
|
|
8205
8676
|
- func: log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor
|
8206
8677
|
python_module: nn
|
8207
8678
|
dispatch:
|
8208
8679
|
CPU: log_sigmoid_backward_cpu
|
8209
|
-
CUDA:
|
8680
|
+
CUDA: log_sigmoid_backward_cuda
|
8210
8681
|
|
8211
8682
|
- func: rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
|
8212
8683
|
python_module: nn
|
8213
8684
|
dispatch:
|
8214
8685
|
CPU: rrelu_with_noise_out_cpu
|
8215
|
-
CUDA:
|
8686
|
+
CUDA: rrelu_with_noise_out_cuda
|
8216
8687
|
|
8217
8688
|
- func: rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
|
8218
8689
|
python_module: nn
|
8219
8690
|
dispatch:
|
8220
8691
|
CPU: rrelu_with_noise_cpu
|
8221
|
-
CUDA:
|
8692
|
+
CUDA: rrelu_with_noise_cuda
|
8222
8693
|
|
8223
8694
|
- func: rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor
|
8224
8695
|
python_module: nn
|
@@ -8229,7 +8700,7 @@
|
|
8229
8700
|
python_module: nn
|
8230
8701
|
dispatch:
|
8231
8702
|
CPU: rrelu_with_noise_cpu_
|
8232
|
-
CUDA:
|
8703
|
+
CUDA: rrelu_with_noise_cuda_
|
8233
8704
|
|
8234
8705
|
- func: softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
|
8235
8706
|
structured: True
|
@@ -8245,14 +8716,15 @@
|
|
8245
8716
|
python_module: nn
|
8246
8717
|
|
8247
8718
|
- func: softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
|
8719
|
+
structured: True
|
8720
|
+
structured_inherits: TensorIteratorBase
|
8248
8721
|
python_module: nn
|
8249
8722
|
dispatch:
|
8250
8723
|
CPU, CUDA: softplus_backward_out
|
8251
8724
|
|
8252
8725
|
- func: softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output) -> Tensor
|
8726
|
+
structured_delegate: softplus_backward.grad_input
|
8253
8727
|
python_module: nn
|
8254
|
-
dispatch:
|
8255
|
-
CPU, CUDA: softplus_backward
|
8256
8728
|
|
8257
8729
|
- func: softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
|
8258
8730
|
structured: True
|
@@ -8268,19 +8740,21 @@
|
|
8268
8740
|
python_module: nn
|
8269
8741
|
|
8270
8742
|
- func: softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
|
8743
|
+
structured: True
|
8744
|
+
structured_inherits: TensorIteratorBase
|
8271
8745
|
python_module: nn
|
8272
8746
|
dispatch:
|
8273
8747
|
CPU, CUDA: softshrink_backward_out
|
8274
8748
|
|
8275
8749
|
- func: softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor
|
8750
|
+
structured_delegate: softshrink_backward.grad_input
|
8276
8751
|
python_module: nn
|
8277
|
-
dispatch:
|
8278
|
-
CPU, CUDA: softshrink_backward
|
8279
8752
|
|
8280
8753
|
- func: adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
|
8281
8754
|
python_module: nn
|
8282
8755
|
dispatch:
|
8283
|
-
CPU
|
8756
|
+
CPU: adaptive_avg_pool2d_out_cpu
|
8757
|
+
CUDA: adaptive_avg_pool2d_out_cuda
|
8284
8758
|
MkldnnCPU: mkldnn_adaptive_avg_pool2d_out
|
8285
8759
|
|
8286
8760
|
- func: adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
|
@@ -8384,6 +8858,11 @@
|
|
8384
8858
|
|
8385
8859
|
- func: avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
|
8386
8860
|
python_module: nn
|
8861
|
+
structured: True
|
8862
|
+
precomputed:
|
8863
|
+
- kernel_size -> int kH, int kW
|
8864
|
+
- stride -> int dH, int dW
|
8865
|
+
- padding -> int padH, int padW
|
8387
8866
|
dispatch:
|
8388
8867
|
CPU: avg_pool2d_out_cpu
|
8389
8868
|
CUDA: avg_pool2d_out_cuda
|
@@ -8391,14 +8870,14 @@
|
|
8391
8870
|
|
8392
8871
|
- func: avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
|
8393
8872
|
python_module: nn
|
8873
|
+
structured_delegate: avg_pool2d.out
|
8394
8874
|
dispatch:
|
8395
|
-
CPU: avg_pool2d_cpu
|
8396
|
-
CUDA: avg_pool2d_cuda
|
8397
8875
|
MkldnnCPU: mkldnn_avg_pool2d
|
8398
8876
|
QuantizedCPU: avg_pool2d_quantized_cpu
|
8399
8877
|
|
8400
8878
|
- func: avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
|
8401
8879
|
python_module: nn
|
8880
|
+
structured: True
|
8402
8881
|
dispatch:
|
8403
8882
|
CPU: avg_pool2d_backward_out_cpu
|
8404
8883
|
CUDA: avg_pool2d_backward_out_cuda
|
@@ -8406,13 +8885,13 @@
|
|
8406
8885
|
|
8407
8886
|
- func: avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
|
8408
8887
|
python_module: nn
|
8888
|
+
structured_delegate: avg_pool2d_backward.grad_input
|
8409
8889
|
dispatch:
|
8410
|
-
CPU: avg_pool2d_backward_cpu
|
8411
|
-
CUDA: avg_pool2d_backward_cuda
|
8412
8890
|
MkldnnCPU: mkldnn_avg_pool2d_backward
|
8413
8891
|
|
8414
8892
|
- func: avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)
|
8415
8893
|
python_module: nn
|
8894
|
+
structured: True
|
8416
8895
|
dispatch:
|
8417
8896
|
CPU: avg_pool3d_out_cpu
|
8418
8897
|
CUDA: avg_pool3d_out_cuda
|
@@ -8420,14 +8899,14 @@
|
|
8420
8899
|
|
8421
8900
|
- func: avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
|
8422
8901
|
python_module: nn
|
8902
|
+
structured_delegate: avg_pool3d.out
|
8423
8903
|
dispatch:
|
8424
|
-
CPU: avg_pool3d_cpu
|
8425
|
-
CUDA: avg_pool3d_cuda
|
8426
8904
|
MkldnnCPU: mkldnn_avg_pool3d
|
8427
8905
|
QuantizedCPU: avg_pool3d_quantized_cpu
|
8428
8906
|
|
8429
8907
|
- func: avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)
|
8430
8908
|
python_module: nn
|
8909
|
+
structured: True
|
8431
8910
|
dispatch:
|
8432
8911
|
CPU: avg_pool3d_backward_out_cpu
|
8433
8912
|
CUDA: avg_pool3d_backward_out_cuda
|
@@ -8435,9 +8914,8 @@
|
|
8435
8914
|
|
8436
8915
|
- func: avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
|
8437
8916
|
python_module: nn
|
8917
|
+
structured_delegate: avg_pool3d_backward.grad_input
|
8438
8918
|
dispatch:
|
8439
|
-
CPU: avg_pool3d_backward_cpu
|
8440
|
-
CUDA: avg_pool3d_backward_cuda
|
8441
8919
|
MkldnnCPU: mkldnn_avg_pool3d_backward
|
8442
8920
|
|
8443
8921
|
# Return: (Tensor output, Tensor indices)
|
@@ -8604,15 +9082,14 @@
|
|
8604
9082
|
|
8605
9083
|
- func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
|
8606
9084
|
python_module: nn
|
9085
|
+
structured: True
|
8607
9086
|
dispatch:
|
8608
9087
|
CPU: reflection_pad1d_backward_out_cpu
|
8609
9088
|
CUDA: reflection_pad1d_backward_out_cuda
|
8610
9089
|
|
8611
9090
|
- func: reflection_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor
|
8612
9091
|
python_module: nn
|
8613
|
-
|
8614
|
-
CPU: reflection_pad1d_backward_cpu
|
8615
|
-
CUDA: reflection_pad1d_backward_cuda
|
9092
|
+
structured_delegate: reflection_pad1d_backward.grad_input
|
8616
9093
|
|
8617
9094
|
- func: reflection_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!)
|
8618
9095
|
python_module: nn
|
@@ -8638,6 +9115,28 @@
|
|
8638
9115
|
CPU: reflection_pad2d_backward_cpu
|
8639
9116
|
CUDA: reflection_pad2d_backward_cuda
|
8640
9117
|
|
9118
|
+
- func: reflection_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!)
|
9119
|
+
python_module: nn
|
9120
|
+
structured: True
|
9121
|
+
dispatch:
|
9122
|
+
CPU: reflection_pad3d_out_cpu
|
9123
|
+
CUDA: reflection_pad3d_out_cuda
|
9124
|
+
|
9125
|
+
- func: reflection_pad3d(Tensor self, int[6] padding) -> Tensor
|
9126
|
+
python_module: nn
|
9127
|
+
structured_delegate: reflection_pad3d.out
|
9128
|
+
|
9129
|
+
- func: reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
|
9130
|
+
python_module: nn
|
9131
|
+
structured: True
|
9132
|
+
dispatch:
|
9133
|
+
CPU: reflection_pad3d_backward_out_cpu
|
9134
|
+
CUDA: reflection_pad3d_backward_out_cuda
|
9135
|
+
|
9136
|
+
- func: reflection_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor
|
9137
|
+
python_module: nn
|
9138
|
+
structured_delegate: reflection_pad3d_backward.grad_input
|
9139
|
+
|
8641
9140
|
- func: replication_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!)
|
8642
9141
|
python_module: nn
|
8643
9142
|
structured: True
|
@@ -8942,33 +9441,36 @@
|
|
8942
9441
|
|
8943
9442
|
- func: sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
|
8944
9443
|
python_module: nn
|
9444
|
+
structured: True
|
9445
|
+
structured_inherits: TensorIteratorBase
|
8945
9446
|
dispatch:
|
8946
9447
|
CPU, CUDA: sigmoid_backward_out
|
8947
9448
|
|
8948
9449
|
- func: sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor
|
8949
9450
|
python_module: nn
|
8950
|
-
|
8951
|
-
CPU, CUDA: sigmoid_backward
|
9451
|
+
structured_delegate: sigmoid_backward.grad_input
|
8952
9452
|
|
8953
9453
|
- func: logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)
|
8954
9454
|
python_module: nn
|
9455
|
+
structured: True
|
9456
|
+
structured_inherits: TensorIteratorBase
|
8955
9457
|
dispatch:
|
8956
9458
|
CPU, CUDA: logit_backward_out
|
8957
9459
|
|
8958
9460
|
- func: logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor
|
8959
9461
|
python_module: nn
|
8960
|
-
|
8961
|
-
CPU, CUDA: logit_backward
|
9462
|
+
structured_delegate: logit_backward.grad_input
|
8962
9463
|
|
8963
9464
|
- func: tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
|
8964
9465
|
python_module: nn
|
9466
|
+
structured: True
|
9467
|
+
structured_inherits: TensorIteratorBase
|
8965
9468
|
dispatch:
|
8966
9469
|
CPU, CUDA: tanh_backward_out
|
8967
9470
|
|
8968
9471
|
- func: tanh_backward(Tensor grad_output, Tensor output) -> Tensor
|
8969
9472
|
python_module: nn
|
8970
|
-
|
8971
|
-
CPU, CUDA: tanh_backward
|
9473
|
+
structured_delegate: tanh_backward.grad_input
|
8972
9474
|
|
8973
9475
|
# What's a thnn_conv_ versus a slow_conv_?
|
8974
9476
|
#
|
@@ -8990,15 +9492,14 @@
|
|
8990
9492
|
|
8991
9493
|
- func: slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
|
8992
9494
|
python_module: nn
|
9495
|
+
structured: True
|
8993
9496
|
dispatch:
|
8994
|
-
CPU:
|
8995
|
-
CUDA:
|
9497
|
+
CPU: slow_conv_transpose2d_structured_cpu
|
9498
|
+
CUDA: slow_conv_transpose2d_structured_cuda
|
8996
9499
|
|
8997
9500
|
- func: slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1) -> Tensor
|
8998
9501
|
python_module: nn
|
8999
|
-
|
9000
|
-
CPU: slow_conv_transpose2d_cpu
|
9001
|
-
CUDA: slow_conv_transpose2d_cuda
|
9502
|
+
structured_delegate: slow_conv_transpose2d.out
|
9002
9503
|
|
9003
9504
|
- func: slow_conv_transpose2d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, Tensor columns, Tensor ones, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
|
9004
9505
|
python_module: nn
|
@@ -9046,13 +9547,13 @@
|
|
9046
9547
|
python_module: nn
|
9047
9548
|
dispatch:
|
9048
9549
|
CPU: slow_conv2d_forward_out_cpu
|
9049
|
-
CUDA:
|
9550
|
+
CUDA: slow_conv2d_forward_out_cuda
|
9050
9551
|
|
9051
9552
|
- func: thnn_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> (Tensor output, Tensor finput, Tensor fgrad_input)
|
9052
9553
|
python_module: nn
|
9053
9554
|
dispatch:
|
9054
9555
|
CPU: slow_conv2d_forward_cpu
|
9055
|
-
CUDA:
|
9556
|
+
CUDA: slow_conv2d_forward_cuda
|
9056
9557
|
|
9057
9558
|
- func: thnn_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor fgrad_input, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
|
9058
9559
|
python_module: nn
|
@@ -9066,31 +9567,26 @@
|
|
9066
9567
|
CPU: slow_conv2d_backward_cpu
|
9067
9568
|
CUDA: slow_conv2d_backward_cuda
|
9068
9569
|
|
9069
|
-
- func:
|
9070
|
-
|
9071
|
-
|
9072
|
-
- func: thnn_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor
|
9073
|
-
python_module: nn
|
9074
|
-
|
9075
|
-
- func: thnn_conv_depthwise2d_forward.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
|
9570
|
+
- func: _conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
|
9571
|
+
use_const_ref_for_mutable_tensors: True
|
9076
9572
|
python_module: nn
|
9077
9573
|
dispatch:
|
9078
|
-
CUDA:
|
9574
|
+
CUDA: conv_depthwise2d_cuda_out
|
9079
9575
|
|
9080
|
-
- func:
|
9576
|
+
- func: _conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation) -> Tensor
|
9081
9577
|
python_module: nn
|
9082
9578
|
dispatch:
|
9083
|
-
CUDA:
|
9579
|
+
CUDA: conv_depthwise2d_cuda
|
9084
9580
|
|
9085
|
-
- func:
|
9581
|
+
- func: _conv_depthwise2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) grad_input, Tensor(b!) grad_weight) -> (Tensor(a!), Tensor(b!))
|
9086
9582
|
python_module: nn
|
9087
9583
|
dispatch:
|
9088
|
-
CUDA:
|
9584
|
+
CUDA: conv_depthwise2d_backward_cuda_out
|
9089
9585
|
|
9090
|
-
- func:
|
9586
|
+
- func: _conv_depthwise2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool[2] output_mask) -> (Tensor grad_input, Tensor grad_weight)
|
9091
9587
|
python_module: nn
|
9092
9588
|
dispatch:
|
9093
|
-
CUDA:
|
9589
|
+
CUDA: conv_depthwise2d_backward_cuda
|
9094
9590
|
|
9095
9591
|
- func: conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] dilation) -> Tensor
|
9096
9592
|
python_module: nn
|
@@ -9226,15 +9722,21 @@
|
|
9226
9722
|
|
9227
9723
|
- func: isposinf(Tensor self) -> Tensor
|
9228
9724
|
variants: function, method
|
9725
|
+
structured_delegate: isposinf.out
|
9229
9726
|
|
9230
9727
|
- func: isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
9728
|
+
structured: True
|
9729
|
+
structured_inherits: TensorIteratorBase
|
9231
9730
|
dispatch:
|
9232
9731
|
CPU, CUDA: isposinf_out
|
9233
9732
|
|
9234
9733
|
- func: isneginf(Tensor self) -> Tensor
|
9235
9734
|
variants: function, method
|
9735
|
+
structured_delegate: isneginf.out
|
9236
9736
|
|
9237
9737
|
- func: isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
9738
|
+
structured: True
|
9739
|
+
structured_inherits: TensorIteratorBase
|
9238
9740
|
dispatch:
|
9239
9741
|
CPU, CUDA: isneginf_out
|
9240
9742
|
|
@@ -9269,6 +9771,19 @@
|
|
9269
9771
|
dispatch:
|
9270
9772
|
CPU, CUDA: special_entr_out
|
9271
9773
|
|
9774
|
+
- func: special_ndtri(Tensor self) -> Tensor
|
9775
|
+
structured_delegate: special_ndtri.out
|
9776
|
+
python_module: special
|
9777
|
+
variants: function
|
9778
|
+
|
9779
|
+
- func: special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
9780
|
+
structured: True
|
9781
|
+
structured_inherits: TensorIteratorBase
|
9782
|
+
python_module: special
|
9783
|
+
variants: function
|
9784
|
+
dispatch:
|
9785
|
+
CPU, CUDA: special_ndtri_out
|
9786
|
+
|
9272
9787
|
- func: special_expm1(Tensor self) -> Tensor
|
9273
9788
|
python_module: special
|
9274
9789
|
variants: function
|
@@ -9285,6 +9800,22 @@
|
|
9285
9800
|
python_module: special
|
9286
9801
|
variants: function
|
9287
9802
|
|
9803
|
+
- func: special_psi(Tensor self) -> Tensor
|
9804
|
+
python_module: special
|
9805
|
+
variants: function
|
9806
|
+
|
9807
|
+
- func: special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
9808
|
+
python_module: special
|
9809
|
+
variants: function
|
9810
|
+
|
9811
|
+
- func: special_digamma(Tensor self) -> Tensor
|
9812
|
+
python_module: special
|
9813
|
+
variants: function
|
9814
|
+
|
9815
|
+
- func: special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
9816
|
+
python_module: special
|
9817
|
+
variants: function
|
9818
|
+
|
9288
9819
|
- func: special_gammaln(Tensor self) -> Tensor
|
9289
9820
|
python_module: special
|
9290
9821
|
variants: function
|
@@ -9308,6 +9839,18 @@
|
|
9308
9839
|
- func: special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
9309
9840
|
python_module: special
|
9310
9841
|
|
9842
|
+
- func: special_erfcx(Tensor self) -> Tensor
|
9843
|
+
python_module: special
|
9844
|
+
variants: function
|
9845
|
+
structured_delegate: special_erfcx.out
|
9846
|
+
|
9847
|
+
- func: special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
9848
|
+
python_module: special
|
9849
|
+
structured: True
|
9850
|
+
structured_inherits: TensorIteratorBase
|
9851
|
+
dispatch:
|
9852
|
+
CPU, CUDA: special_erfcx_out
|
9853
|
+
|
9311
9854
|
- func: special_erfinv(Tensor self) -> Tensor
|
9312
9855
|
python_module: special
|
9313
9856
|
variants: function
|
@@ -9315,6 +9858,14 @@
|
|
9315
9858
|
- func: special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
9316
9859
|
python_module: special
|
9317
9860
|
|
9861
|
+
- func: special_ndtr(Tensor self) -> Tensor
|
9862
|
+
python_module: special
|
9863
|
+
variants: function
|
9864
|
+
|
9865
|
+
- func: special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
9866
|
+
python_module: special
|
9867
|
+
variants: function
|
9868
|
+
|
9318
9869
|
- func: special_xlog1py(Tensor self, Tensor other) -> Tensor
|
9319
9870
|
device_check: NoCheck # TensorIterator
|
9320
9871
|
python_module: special
|
@@ -9358,6 +9909,89 @@
|
|
9358
9909
|
dispatch:
|
9359
9910
|
CompositeExplicitAutograd: special_xlog1py_out
|
9360
9911
|
|
9912
|
+
- func: special_xlogy(Tensor self, Tensor other) -> Tensor
|
9913
|
+
device_check: NoCheck # TensorIterator
|
9914
|
+
python_module: special
|
9915
|
+
variants: function
|
9916
|
+
|
9917
|
+
- func: special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor
|
9918
|
+
device_check: NoCheck # TensorIterator
|
9919
|
+
python_module: special
|
9920
|
+
variants: function
|
9921
|
+
|
9922
|
+
- func: special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor
|
9923
|
+
device_check: NoCheck # TensorIterator
|
9924
|
+
python_module: special
|
9925
|
+
variants: function
|
9926
|
+
|
9927
|
+
- func: special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
9928
|
+
device_check: NoCheck # TensorIterator
|
9929
|
+
python_module: special
|
9930
|
+
variants: function
|
9931
|
+
|
9932
|
+
- func: special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
9933
|
+
device_check: NoCheck # TensorIterator
|
9934
|
+
python_module: special
|
9935
|
+
variants: function
|
9936
|
+
|
9937
|
+
- func: special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
9938
|
+
device_check: NoCheck # TensorIterator
|
9939
|
+
python_module: special
|
9940
|
+
variants: function
|
9941
|
+
|
9942
|
+
- func: special_zeta(Tensor self, Tensor other) -> Tensor
|
9943
|
+
device_check: NoCheck # TensorIterator
|
9944
|
+
python_module: special
|
9945
|
+
variants: function
|
9946
|
+
structured_delegate: special_zeta.out
|
9947
|
+
dispatch:
|
9948
|
+
CompositeExplicitAutograd: special_zeta
|
9949
|
+
|
9950
|
+
- func: special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor
|
9951
|
+
device_check: NoCheck # TensorIterator
|
9952
|
+
python_module: special
|
9953
|
+
variants: function
|
9954
|
+
dispatch:
|
9955
|
+
CompositeExplicitAutograd: special_zeta
|
9956
|
+
|
9957
|
+
- func: special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor
|
9958
|
+
device_check: NoCheck # TensorIterator
|
9959
|
+
python_module: special
|
9960
|
+
variants: function
|
9961
|
+
dispatch:
|
9962
|
+
CompositeExplicitAutograd: special_zeta
|
9963
|
+
|
9964
|
+
- func: special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
9965
|
+
device_check: NoCheck # TensorIterator
|
9966
|
+
structured: True
|
9967
|
+
structured_inherits: TensorIteratorBase
|
9968
|
+
python_module: special
|
9969
|
+
variants: function
|
9970
|
+
dispatch:
|
9971
|
+
CPU, CUDA: special_zeta_out
|
9972
|
+
|
9973
|
+
- func: special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
9974
|
+
device_check: NoCheck # TensorIterator
|
9975
|
+
python_module: special
|
9976
|
+
variants: function
|
9977
|
+
dispatch:
|
9978
|
+
CompositeExplicitAutograd: special_zeta_out
|
9979
|
+
|
9980
|
+
- func: special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
|
9981
|
+
device_check: NoCheck # TensorIterator
|
9982
|
+
python_module: special
|
9983
|
+
variants: function
|
9984
|
+
dispatch:
|
9985
|
+
CompositeExplicitAutograd: special_zeta_out
|
9986
|
+
|
9987
|
+
- func: special_i0(Tensor self) -> Tensor
|
9988
|
+
python_module: special
|
9989
|
+
variants: function
|
9990
|
+
|
9991
|
+
- func: special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
9992
|
+
python_module: special
|
9993
|
+
variants: function
|
9994
|
+
|
9361
9995
|
- func: special_i0e(Tensor self) -> Tensor
|
9362
9996
|
python_module: special
|
9363
9997
|
variants: function
|
@@ -9370,6 +10004,30 @@
|
|
9370
10004
|
dispatch:
|
9371
10005
|
CPU, CUDA: special_i0e_out
|
9372
10006
|
|
10007
|
+
- func: special_i1(Tensor self) -> Tensor
|
10008
|
+
python_module: special
|
10009
|
+
variants: function
|
10010
|
+
structured_delegate: special_i1.out
|
10011
|
+
|
10012
|
+
- func: special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
10013
|
+
python_module: special
|
10014
|
+
structured: True
|
10015
|
+
structured_inherits: TensorIteratorBase
|
10016
|
+
dispatch:
|
10017
|
+
CPU, CUDA: special_i1_out
|
10018
|
+
|
10019
|
+
- func: special_i1e(Tensor self) -> Tensor
|
10020
|
+
python_module: special
|
10021
|
+
variants: function
|
10022
|
+
structured_delegate: special_i1e.out
|
10023
|
+
|
10024
|
+
- func: special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
10025
|
+
python_module: special
|
10026
|
+
structured: True
|
10027
|
+
structured_inherits: TensorIteratorBase
|
10028
|
+
dispatch:
|
10029
|
+
CPU, CUDA: special_i1e_out
|
10030
|
+
|
9373
10031
|
- func: special_logit(Tensor self, float? eps=None) -> Tensor
|
9374
10032
|
python_module: special
|
9375
10033
|
variants: function
|
@@ -9377,6 +10035,20 @@
|
|
9377
10035
|
- func: special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)
|
9378
10036
|
python_module: special
|
9379
10037
|
|
10038
|
+
- func: special_polygamma(int n, Tensor self) -> Tensor
|
10039
|
+
python_module: special
|
10040
|
+
variants: function, method
|
10041
|
+
|
10042
|
+
- func: special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
10043
|
+
python_module: special
|
10044
|
+
|
10045
|
+
- func: special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
|
10046
|
+
python_module: special
|
10047
|
+
variants: function
|
10048
|
+
|
10049
|
+
- func: special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
|
10050
|
+
python_module: special
|
10051
|
+
|
9380
10052
|
- func: special_expit(Tensor self) -> Tensor
|
9381
10053
|
python_module: special
|
9382
10054
|
variants: function
|
@@ -9385,6 +10057,58 @@
|
|
9385
10057
|
python_module: special
|
9386
10058
|
variants: function
|
9387
10059
|
|
10060
|
+
- func: special_sinc(Tensor self) -> Tensor
|
10061
|
+
python_module: special
|
10062
|
+
variants: function
|
10063
|
+
|
10064
|
+
- func: special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
10065
|
+
python_module: special
|
10066
|
+
variants: function
|
10067
|
+
|
10068
|
+
- func: special_round(Tensor self) -> Tensor
|
10069
|
+
python_module: special
|
10070
|
+
variants: function
|
10071
|
+
|
10072
|
+
- func: special_round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
10073
|
+
python_module: special
|
10074
|
+
variants: function
|
10075
|
+
|
10076
|
+
- func: special_log1p(Tensor self) -> Tensor
|
10077
|
+
python_module: special
|
10078
|
+
variants: function
|
10079
|
+
|
10080
|
+
- func: special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
10081
|
+
python_module: special
|
10082
|
+
variants: function
|
10083
|
+
|
10084
|
+
- func: special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
|
10085
|
+
python_module: special
|
10086
|
+
variants: function
|
10087
|
+
|
10088
|
+
- func: special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
10089
|
+
python_module: special
|
10090
|
+
variants: function
|
10091
|
+
|
10092
|
+
- func: special_gammainc(Tensor self, Tensor other) -> Tensor
|
10093
|
+
python_module: special
|
10094
|
+
variants: function
|
10095
|
+
|
10096
|
+
- func: special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
10097
|
+
python_module: special
|
10098
|
+
variants: function
|
10099
|
+
|
10100
|
+
- func: special_gammaincc(Tensor self, Tensor other) -> Tensor
|
10101
|
+
python_module: special
|
10102
|
+
variants: function
|
10103
|
+
|
10104
|
+
- func: special_multigammaln(Tensor self, int p) -> Tensor
|
10105
|
+
python_module: special
|
10106
|
+
variants: function
|
10107
|
+
|
10108
|
+
- func: special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)
|
10109
|
+
python_module: special
|
10110
|
+
variants: function
|
10111
|
+
|
9388
10112
|
## Functions related to the fast Fourier transform and the torch.fft namespace
|
9389
10113
|
# Note [FFT namespace binding]
|
9390
10114
|
# Functions in the fft python module should have their names start with
|
@@ -9542,41 +10266,47 @@
|
|
9542
10266
|
# See linalg_det as an example.
|
9543
10267
|
|
9544
10268
|
# "_ex" stands for experimental
|
9545
|
-
- func: linalg_cholesky_ex(Tensor self, *, bool check_errors=False) -> (Tensor L, Tensor info)
|
10269
|
+
- func: linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)
|
9546
10270
|
python_module: linalg
|
9547
10271
|
variants: function
|
9548
10272
|
dispatch:
|
9549
10273
|
CPU, CUDA: linalg_cholesky_ex
|
9550
10274
|
|
9551
|
-
- func: linalg_cholesky_ex.L(Tensor self, *, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
|
10275
|
+
- func: linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
|
9552
10276
|
python_module: linalg
|
9553
10277
|
variants: function
|
9554
10278
|
dispatch:
|
9555
10279
|
CPU, CUDA: linalg_cholesky_ex_out
|
9556
10280
|
|
9557
|
-
- func: linalg_cholesky(Tensor self) -> Tensor
|
10281
|
+
- func: linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor
|
9558
10282
|
python_module: linalg
|
9559
10283
|
variants: function
|
9560
10284
|
|
9561
|
-
- func: linalg_cholesky.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
10285
|
+
- func: linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)
|
9562
10286
|
python_module: linalg
|
9563
10287
|
variants: function
|
9564
10288
|
|
9565
10289
|
- func: linalg_det(Tensor self) -> Tensor
|
9566
10290
|
python_module: linalg
|
9567
10291
|
variants: function
|
9568
|
-
dispatch:
|
9569
|
-
CompositeExplicitAutograd: linalg_det
|
9570
10292
|
|
9571
10293
|
- func: linalg_det.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
9572
10294
|
python_module: linalg
|
9573
|
-
dispatch:
|
9574
|
-
CompositeExplicitAutograd: linalg_det_out
|
9575
10295
|
|
9576
10296
|
# torch.det, alias for torch.linalg.det
|
9577
10297
|
- func: det(Tensor self) -> Tensor
|
9578
10298
|
variants: function, method
|
9579
10299
|
|
10300
|
+
- func: _det_lu_based_helper(Tensor self) -> (Tensor det, Tensor lu, Tensor pivs)
|
10301
|
+
variants: function
|
10302
|
+
dispatch:
|
10303
|
+
CPU, CUDA: _det_lu_based_helper
|
10304
|
+
|
10305
|
+
- func: _det_lu_based_helper_backward_helper(Tensor det_grad, Tensor det, Tensor self, Tensor lu, Tensor pivs) -> Tensor
|
10306
|
+
variants: function
|
10307
|
+
dispatch:
|
10308
|
+
CPU, CUDA: _det_lu_based_helper_backward_helper
|
10309
|
+
|
9580
10310
|
- func: linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)
|
9581
10311
|
python_module: linalg
|
9582
10312
|
variants: function
|
@@ -9589,6 +10319,14 @@
|
|
9589
10319
|
dispatch:
|
9590
10320
|
CPU, CUDA: linalg_lstsq_out
|
9591
10321
|
|
10322
|
+
# torch.linalg.matmul, alias for torch.matmul
|
10323
|
+
- func: linalg_matmul(Tensor self, Tensor other) -> Tensor
|
10324
|
+
python_module: linalg
|
10325
|
+
variants: function
|
10326
|
+
|
10327
|
+
- func: linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
10328
|
+
python_module: linalg
|
10329
|
+
|
9592
10330
|
- func: linalg_slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
|
9593
10331
|
python_module: linalg
|
9594
10332
|
variants: function
|
@@ -9621,12 +10359,12 @@
|
|
9621
10359
|
python_module: linalg
|
9622
10360
|
variants: function
|
9623
10361
|
dispatch:
|
9624
|
-
|
10362
|
+
CPU, CUDA: linalg_eigh
|
9625
10363
|
|
9626
10364
|
- func: linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
|
9627
10365
|
python_module: linalg
|
9628
10366
|
dispatch:
|
9629
|
-
|
10367
|
+
CPU, CUDA: linalg_eigh_out
|
9630
10368
|
|
9631
10369
|
- func: linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor
|
9632
10370
|
python_module: linalg
|
@@ -9634,6 +10372,8 @@
|
|
9634
10372
|
|
9635
10373
|
- func: linalg_eigvalsh.out(Tensor self, str UPLO='L', *, Tensor(a!) out) -> Tensor(a!)
|
9636
10374
|
python_module: linalg
|
10375
|
+
dispatch:
|
10376
|
+
CPU, CUDA: linalg_eigvalsh_out
|
9637
10377
|
|
9638
10378
|
- func: linalg_householder_product(Tensor input, Tensor tau) -> Tensor
|
9639
10379
|
python_module: linalg
|
@@ -9677,20 +10417,16 @@
|
|
9677
10417
|
|
9678
10418
|
- func: inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
9679
10419
|
|
9680
|
-
# torch.outer, alias for torch.ger
|
9681
10420
|
- func: outer(Tensor self, Tensor vec2) -> Tensor
|
9682
10421
|
variants: function, method
|
9683
10422
|
|
9684
10423
|
- func: outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
|
9685
10424
|
|
10425
|
+
# torch.ger, alias for torch.outer
|
9686
10426
|
- func: ger(Tensor self, Tensor vec2) -> Tensor
|
9687
10427
|
variants: function, method
|
9688
|
-
dispatch:
|
9689
|
-
CompositeExplicitAutograd: ger
|
9690
10428
|
|
9691
10429
|
- func: ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
|
9692
|
-
dispatch:
|
9693
|
-
CompositeExplicitAutograd: ger_out
|
9694
10430
|
|
9695
10431
|
- func: linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
|
9696
10432
|
python_module: linalg
|
@@ -9778,22 +10514,16 @@
|
|
9778
10514
|
python_module: linalg
|
9779
10515
|
variants: function
|
9780
10516
|
|
9781
|
-
- func: _linalg_solve_out_helper_(Tensor(a!) self, Tensor(b!) other, Tensor(c!) infos) -> Tensor(a!)
|
9782
|
-
variants: function
|
9783
|
-
dispatch:
|
9784
|
-
CPU: _linalg_solve_out_helper_cpu
|
9785
|
-
CUDA: _linalg_solve_out_helper_cuda
|
9786
|
-
|
9787
10517
|
- func: linalg_solve(Tensor input, Tensor other) -> Tensor
|
9788
10518
|
python_module: linalg
|
9789
10519
|
variants: function
|
9790
10520
|
dispatch:
|
9791
|
-
|
10521
|
+
CPU, CUDA: linalg_solve
|
9792
10522
|
|
9793
10523
|
- func: linalg_solve.out(Tensor input, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
|
9794
10524
|
python_module: linalg
|
9795
10525
|
dispatch:
|
9796
|
-
|
10526
|
+
CPU, CUDA: linalg_solve_out
|
9797
10527
|
|
9798
10528
|
- func: linalg_tensorinv(Tensor self, int ind=2) -> Tensor
|
9799
10529
|
python_module: linalg
|
@@ -9897,10 +10627,10 @@
|
|
9897
10627
|
dispatch:
|
9898
10628
|
CPU, CUDA: segment_reduce_kernel
|
9899
10629
|
|
9900
|
-
- func:
|
10630
|
+
- func: _segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, int axis=0) -> Tensor
|
9901
10631
|
variants: function
|
9902
10632
|
dispatch:
|
9903
|
-
CPU, CUDA:
|
10633
|
+
CPU, CUDA: _segment_reduce_backward_kernel
|
9904
10634
|
|
9905
10635
|
- func: pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor
|
9906
10636
|
python_module: nn
|