torch-rb 0.2.5 → 0.3.2

Sign up to get free protection for your applications and to get access to all the features.
@@ -66,6 +66,7 @@ module Torch
66
66
  end
67
67
 
68
68
  next if t == "Generator?"
69
+ next if t == "MemoryFormat"
69
70
  next if t == "MemoryFormat?"
70
71
  args << {name: k, type: t, default: d, pos: pos, has_default: has_default}
71
72
  end
@@ -18,7 +18,7 @@ module Torch
18
18
  functions = functions()
19
19
 
20
20
  # skip functions
21
- skip_args = ["bool[3]", "Dimname", "MemoryFormat", "Layout", "Storage", "ConstQuantizerPtr"]
21
+ skip_args = ["bool[3]", "Dimname", "Layout", "Storage", "ConstQuantizerPtr"]
22
22
 
23
23
  # remove functions
24
24
  functions.reject! do |f|
@@ -31,7 +31,7 @@ module Torch
31
31
  todo_functions, functions =
32
32
  functions.partition do |f|
33
33
  f.args.any? do |a|
34
- a[:type].include?("?") && !["Tensor?", "Generator?", "int?", "ScalarType?"].include?(a[:type]) ||
34
+ a[:type].include?("?") && !["Tensor?", "Generator?", "int?", "ScalarType?", "Tensor?[]"].include?(a[:type]) ||
35
35
  skip_args.any? { |sa| a[:type].include?(sa) } ||
36
36
  # native_functions.yaml is missing size argument for normal
37
37
  # https://pytorch.org/cppdocs/api/function_namespacetorch_1a80253fe5a3ded4716ec929a348adb4b9.html
@@ -112,6 +112,9 @@ void add_%{type}_functions(Module m) {
112
112
  "OptionalScalarType"
113
113
  when "Tensor[]"
114
114
  "TensorList"
115
+ when "Tensor?[]"
116
+ # TODO make optional
117
+ "TensorList"
115
118
  when "int"
116
119
  "int64_t"
117
120
  when "float"
@@ -1,44 +1,52 @@
1
1
  # See README.md in this directory for more guidance
2
2
 
3
+ # *********NB: _cast_* operators are DEPRECATED and will be removed
4
+ # eventually. These were previously used before TorchScript IR supported
5
+ # representing ScalarType's. They are now superseded by usage of
6
+ # `aten::to()`. The ops remain here for backward compatibility purposes.
3
7
 
4
- # Temporary type cast operators. These are needed to trace type-casts now since
5
- # Type's are not supported in the IR. Instead, we call down to these
6
- # specialized operators for each datatype.
7
- # TODO: remove when we have Type support in the IR
8
+ # DEPRECATED. DO NOT USE
8
9
  - func: _cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
9
10
  use_c10_dispatcher: full
10
11
  variants: function
11
12
 
13
+ # DEPRECATED. DO NOT USE
12
14
  - func: _cast_Char(Tensor self, bool non_blocking=False) -> Tensor
13
15
  use_c10_dispatcher: full
14
16
  variants: function
15
17
 
18
+ # DEPRECATED. DO NOT USE
16
19
  - func: _cast_Double(Tensor self, bool non_blocking=False) -> Tensor
17
20
  use_c10_dispatcher: full
18
21
  variants: function
19
22
 
23
+ # DEPRECATED. DO NOT USE
20
24
  - func: _cast_Float(Tensor self, bool non_blocking=False) -> Tensor
21
25
  use_c10_dispatcher: full
22
26
  variants: function
23
27
 
28
+ # DEPRECATED. DO NOT USE
24
29
  - func: _cast_Int(Tensor self, bool non_blocking=False) -> Tensor
25
30
  use_c10_dispatcher: full
26
31
  variants: function
27
32
 
33
+ # DEPRECATED. DO NOT USE
28
34
  - func: _cast_Long(Tensor self, bool non_blocking=False) -> Tensor
29
35
  use_c10_dispatcher: full
30
36
  variants: function
31
37
 
38
+ # DEPRECATED. DO NOT USE
32
39
  - func: _cast_Short(Tensor self, bool non_blocking=False) -> Tensor
33
40
  use_c10_dispatcher: full
34
41
  variants: function
35
42
 
43
+ # DEPRECATED. DO NOT USE
36
44
  - func: _cast_Half(Tensor self, bool non_blocking=False) -> Tensor
37
45
  use_c10_dispatcher: full
38
46
  variants: function
39
47
 
40
48
  # Computes the gradient of current tensor w.r.t. graph leaves.
41
- - func: backward(Tensor self, Tensor? gradient=None, bool keep_graph=False, bool create_graph=False) -> ()
49
+ - func: backward(Tensor self, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
42
50
  manual_kernel_registration: True
43
51
  variants: method
44
52
 
@@ -79,14 +87,13 @@
79
87
  use_c10_dispatcher: full
80
88
  manual_kernel_registration: True
81
89
  variants: method
82
- supports_named_tensor: True
83
90
 
84
91
  - func: _version(Tensor self) -> int
85
92
  use_c10_dispatcher: full
86
93
  manual_kernel_registration: True
87
94
  variants: method
88
95
 
89
- - func: requires_grad_(Tensor(a!) self, bool _requires_grad=True) -> Tensor(a!)
96
+ - func: requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
90
97
  manual_kernel_registration: True
91
98
  variants: method
92
99
 
@@ -98,45 +105,39 @@
98
105
 
99
106
  - func: rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
100
107
  variants: method
101
- supports_named_tensor: True
102
108
 
103
109
  - func: rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
104
110
  variants: method
105
- supports_named_tensor: True
106
111
 
107
112
  - func: align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)
108
113
  variants: method
109
- supports_named_tensor: True
110
114
 
111
115
  - func: align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
112
116
  variants: method
113
- supports_named_tensor: True
114
117
 
115
118
  - func: align_as(Tensor self, Tensor other) -> Tensor
119
+ use_c10_dispatcher: full
116
120
  variants: method
117
- supports_named_tensor: True
118
121
 
119
122
  - func: align_tensors(Tensor[] tensors) -> Tensor[]
120
- supports_named_tensor: True
123
+ use_c10_dispatcher: full
121
124
 
122
125
  - func: refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
123
126
  variants: method
124
- supports_named_tensor: True
125
127
 
126
128
  - func: unflatten.Dimname(Tensor self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor
127
129
  variants: method
128
- supports_named_tensor: True
129
130
 
130
131
  - func: unflatten.int(Tensor self, int dim, int[] sizes, Dimname[] names) -> Tensor
131
132
  variants: method
132
- supports_named_tensor: True
133
-
134
133
 
135
134
  - func: _use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
135
+ use_c10_dispatcher: full
136
136
  dispatch:
137
137
  CUDA: _use_cudnn_ctc_loss
138
138
 
139
139
  - func: _cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
140
+ use_c10_dispatcher: full
140
141
  dispatch:
141
142
  CUDA: _cudnn_ctc_loss
142
143
 
@@ -144,6 +145,7 @@
144
145
  use_c10_dispatcher: full
145
146
 
146
147
  - func: _cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, int input_size, int mode, int hidden_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
148
+ use_c10_dispatcher: full
147
149
  dispatch:
148
150
  CUDA: _cudnn_rnn_flatten_weight
149
151
 
@@ -167,7 +169,6 @@
167
169
  variants: function
168
170
  dispatch:
169
171
  CUDA: fused_dropout_cuda
170
- supports_named_tensor: True
171
172
 
172
173
  - func: _masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
173
174
  use_c10_dispatcher: full
@@ -179,13 +180,10 @@
179
180
 
180
181
  - func: _sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)
181
182
 
182
-
183
183
  - func: _sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)
184
184
 
185
-
186
185
  - func: _sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)
187
186
 
188
-
189
187
  - func: _reshape_from_tensor(Tensor self, Tensor shape) -> Tensor
190
188
  use_c10_dispatcher: full
191
189
 
@@ -194,10 +192,8 @@
194
192
 
195
193
  - func: dropout(Tensor input, float p, bool train) -> Tensor
196
194
  use_c10_dispatcher: full
197
- supports_named_tensor: True
198
195
 
199
196
  - func: dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
200
- supports_named_tensor: True
201
197
 
202
198
  - func: feature_dropout(Tensor input, float p, bool train) -> Tensor
203
199
  use_c10_dispatcher: full
@@ -209,69 +205,84 @@
209
205
 
210
206
  - func: alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
211
207
 
212
-
213
208
  - func: feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
214
209
  use_c10_dispatcher: full
215
210
 
216
211
  - func: feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
217
212
 
218
-
219
213
  - func: abs(Tensor self) -> Tensor
220
214
  use_c10_dispatcher: full
221
215
  variants: function, method
222
- supports_named_tensor: True
223
216
 
224
217
  - func: abs_(Tensor(a!) self) -> Tensor(a!)
225
218
  variants: function, method
226
- supports_named_tensor: True
227
219
 
228
220
  - func: abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
229
- supports_named_tensor: True
221
+
222
+ - func: absolute(Tensor self) -> Tensor
223
+ use_c10_dispatcher: full
224
+ variants: function, method
225
+ dispatch:
226
+ CPU: abs
227
+ CUDA: abs
228
+
229
+ - func: absolute_(Tensor(a!) self) -> Tensor(a!)
230
+ variants: function, method
231
+ dispatch:
232
+ CPU: abs_
233
+ CUDA: abs_
234
+
235
+ - func: absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
236
+ dispatch:
237
+ CPU: abs_out
238
+ CUDA: abs_out
230
239
 
231
240
  - func: angle(Tensor self) -> Tensor
232
241
  use_c10_dispatcher: full
233
242
  variants: function, method
234
- supports_named_tensor: True
235
243
 
236
244
  - func: angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
237
- supports_named_tensor: True
238
245
 
239
- - func: real(Tensor self) -> Tensor
246
+ - func: view_as_real(Tensor(a) self) -> Tensor(a)
240
247
  use_c10_dispatcher: full
241
248
  variants: function
242
- supports_named_tensor: True
243
249
 
244
- - func: imag(Tensor self) -> Tensor
250
+ - func: view_as_complex(Tensor(a) self) -> Tensor(a)
251
+ use_c10_dispatcher: full
252
+ variants: function
253
+
254
+ - func: real(Tensor(a) self) -> Tensor(a)
255
+ use_c10_dispatcher: full
256
+ variants: function
257
+
258
+ - func: imag(Tensor(a) self) -> Tensor(a)
245
259
  use_c10_dispatcher: full
246
260
  variants: function
247
- supports_named_tensor: True
248
261
 
249
262
  - func: conj(Tensor self) -> Tensor
250
263
  use_c10_dispatcher: full
251
264
  variants: function, method
252
- supports_named_tensor: True
253
265
 
254
266
  - func: conj.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
255
- supports_named_tensor: True
256
267
 
257
268
  - func: acos(Tensor self) -> Tensor
258
269
  use_c10_dispatcher: full
259
- supports_named_tensor: True
260
270
  variants: function, method
261
271
 
262
272
  - func: acos_(Tensor(a!) self) -> Tensor(a!)
263
- supports_named_tensor: True
264
273
  variants: function, method
265
274
 
266
275
  - func: acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
267
- supports_named_tensor: True
268
276
 
269
277
  - func: avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
278
+ use_c10_dispatcher: full
270
279
 
271
280
  - func: adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor
281
+ use_c10_dispatcher: full
272
282
 
273
283
  # Return: (Tensor output, Tensor indices)
274
284
  - func: adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)
285
+ use_c10_dispatcher: full
275
286
 
276
287
  - func: add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
277
288
  use_c10_dispatcher: full
@@ -282,7 +293,7 @@
282
293
  SparseCPU: add_sparse
283
294
  SparseCUDA: add_sparse
284
295
  MkldnnCPU: mkldnn_add
285
- supports_named_tensor: True
296
+ Vulkan: vulkan_add
286
297
 
287
298
  - func: add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
288
299
  variants: method
@@ -292,7 +303,6 @@
292
303
  SparseCPU: add_sparse_
293
304
  SparseCUDA: add_sparse_
294
305
  MkldnnCPU: mkldnn_add_
295
- supports_named_tensor: True
296
306
 
297
307
  - func: add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
298
308
  dispatch:
@@ -301,38 +311,28 @@
301
311
  SparseCPU: add_out_sparse_cpu
302
312
  SparseCUDA: add_out_sparse_cuda
303
313
  MkldnnCPU: mkldnn_add_out
304
- supports_named_tensor: True
305
314
 
306
315
  # For C++ only, until we have conversion from C++ numbers to Tensor
307
316
  - func: add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
308
317
  use_c10_dispatcher: full
309
318
  variants: function, method
310
- supports_named_tensor: True
311
319
 
312
320
  - func: add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
313
321
  variants: method
314
- supports_named_tensor: True
315
322
 
316
323
  - func: addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
317
324
  use_c10_dispatcher: full
318
325
  variants: function, method
319
- dispatch:
320
- CPU: legacy::cpu::_th_addmv
321
- CUDA: legacy::cuda::_th_addmv
322
- supports_named_tensor: True
323
326
 
324
327
  - func: addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
325
328
  variants: function, method
326
- dispatch:
327
- CPU: legacy::cpu::_th_addmv_
328
- CUDA: legacy::cuda::_th_addmv_
329
- supports_named_tensor: True
330
329
 
331
330
  - func: addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
331
+
332
+ - func: _addmv_impl_(Tensor(a!) self, Tensor self2, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
332
333
  dispatch:
333
- CPU: legacy::cpu::_th_addmv_out
334
- CUDA: legacy::cuda::_th_addmv_out
335
- supports_named_tensor: True
334
+ CPU: addmv_impl_cpu
335
+ CUDA: addmv_impl_cuda
336
336
 
337
337
  - func: addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
338
338
  use_c10_dispatcher: full
@@ -344,9 +344,11 @@
344
344
  - func: addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
345
345
 
346
346
  - func: affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor
347
+ use_c10_dispatcher: full
347
348
  variants: function
348
349
 
349
350
  - func: affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor
351
+ use_c10_dispatcher: full
350
352
  variants: function
351
353
 
352
354
  - func: all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor
@@ -397,25 +399,64 @@
397
399
  use_c10_dispatcher: full
398
400
 
399
401
  - func: argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
402
+ use_c10_dispatcher: full
400
403
  variants: function, method
401
404
  dispatch:
402
405
  CPU: argmax
403
406
  CUDA: argmax
404
407
 
405
408
  - func: argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor
409
+ use_c10_dispatcher: full
406
410
  variants: function, method
407
411
  dispatch:
408
412
  CPU: argmin
409
413
  CUDA: argmin
410
414
 
415
+ - func: acosh(Tensor self) -> Tensor
416
+ use_c10_dispatcher: full
417
+ supports_named_tensor: True
418
+ variants: function, method
419
+
420
+ - func: acosh_(Tensor(a!) self) -> Tensor(a!)
421
+ supports_named_tensor: True
422
+ variants: function, method
423
+
424
+ - func: acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
425
+ supports_named_tensor: True
426
+
427
+ - func: asinh(Tensor self) -> Tensor
428
+ use_c10_dispatcher: full
429
+ supports_named_tensor: True
430
+ variants: function, method
431
+
432
+ - func: asinh_(Tensor(a!) self) -> Tensor(a!)
433
+ supports_named_tensor: True
434
+ variants: function, method
435
+
436
+ - func: asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
437
+ supports_named_tensor: True
438
+
439
+ - func: atanh(Tensor self) -> Tensor
440
+ use_c10_dispatcher: full
441
+ supports_named_tensor: True
442
+ variants: function, method
443
+
444
+ - func: atanh_(Tensor(a!) self) -> Tensor(a!)
445
+ supports_named_tensor: True
446
+ variants: function, method
447
+
448
+ - func: atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
449
+ supports_named_tensor: True
450
+
411
451
  - func: as_strided(Tensor(a) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a)
452
+ use_c10_dispatcher: full
412
453
  variants: function, method
413
454
  dispatch:
414
455
  CPU: as_strided_tensorimpl
415
456
  CUDA: as_strided_tensorimpl
416
457
  QuantizedCPU: as_strided_qtensorimpl
458
+ QuantizedCUDA: as_strided_qtensorimpl
417
459
  device_guard: False
418
- supports_named_tensor: True
419
460
 
420
461
  - func: as_strided_(Tensor(a!) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a!)
421
462
  variants: function, method
@@ -423,33 +464,21 @@
423
464
 
424
465
  - func: asin(Tensor self) -> Tensor
425
466
  use_c10_dispatcher: full
426
- supports_named_tensor: True
427
467
  variants: function, method
428
468
 
429
469
  - func: asin_(Tensor(a!) self) -> Tensor(a!)
430
- supports_named_tensor: True
431
470
  variants: function, method
432
471
 
433
472
  - func: asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
434
- supports_named_tensor: True
435
473
 
436
474
  - func: atan(Tensor self) -> Tensor
437
475
  use_c10_dispatcher: full
438
- supports_named_tensor: True
439
476
  variants: function, method
440
477
 
441
478
  - func: atan_(Tensor(a!) self) -> Tensor(a!)
442
- supports_named_tensor: True
443
479
  variants: function, method
444
- dispatch:
445
- CPU: _atan__cpu
446
- CUDA: _atan__cuda
447
480
 
448
481
  - func: atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
449
- supports_named_tensor: True
450
- dispatch:
451
- CPU: _atan_out_cpu
452
- CUDA: _atan_out_cuda
453
482
 
454
483
  - func: baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
455
484
  use_c10_dispatcher: full
@@ -491,25 +520,15 @@
491
520
  # Sample bernoulli with values in `self` as probability.
492
521
  - func: bernoulli(Tensor self, *, Generator? generator=None) -> Tensor
493
522
  variants: function, method
494
- supports_named_tensor: True
495
523
 
496
524
  - func: bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
497
525
  variants: function
498
- supports_named_tensor: True
499
526
 
500
527
  - func: bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)
501
528
  variants: method
502
- dispatch:
503
- CPU: bernoulli_tensor_cpu_
504
- CUDA: bernoulli_tensor_cuda_
505
- supports_named_tensor: True
506
529
 
507
530
  - func: bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)
508
531
  variants: method
509
- dispatch:
510
- CPU: bernoulli_scalar_cpu_
511
- CUDA: bernoulli_scalar_cuda_
512
- supports_named_tensor: True
513
532
 
514
533
  # This out-of-place version isn't used explicitly, but needed by jit.
515
534
  # There is no default valid on `p` here because it would introduce ambiguity
@@ -561,74 +580,63 @@
561
580
 
562
581
  - func: bitwise_not(Tensor self) -> Tensor
563
582
  use_c10_dispatcher: full
564
- supports_named_tensor: True
565
583
  variants: function, method
566
584
 
567
585
  - func: bitwise_not_(Tensor(a!) self) -> Tensor(a!)
568
- supports_named_tensor: True
569
586
  variants: method
570
587
 
571
588
  - func: bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
572
- supports_named_tensor: True
573
589
  dispatch:
574
590
  CPU: bitwise_not_out
575
591
  CUDA: bitwise_not_out
576
592
 
577
593
  - func: logical_not(Tensor self) -> Tensor
578
- supports_named_tensor: True
594
+ use_c10_dispatcher: full
579
595
  variants: function, method
580
596
 
581
597
  - func: logical_not_(Tensor(a!) self) -> Tensor(a!)
582
- supports_named_tensor: True
583
598
  variants: method
584
599
 
585
600
  - func: logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
586
- supports_named_tensor: True
587
601
  dispatch:
588
602
  CPU: logical_not_out
589
603
  CUDA: logical_not_out
590
604
 
591
605
  - func: logical_xor(Tensor self, Tensor other) -> Tensor
606
+ use_c10_dispatcher: full
592
607
  variants: function, method
593
- supports_named_tensor: True
594
608
 
595
609
  - func: logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)
596
610
  variants: method
597
- supports_named_tensor: True
598
611
 
599
612
  - func: logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
600
613
  dispatch:
601
614
  CPU: logical_xor_out
602
615
  CUDA: logical_xor_out
603
- supports_named_tensor: True
604
616
 
605
617
  - func: logical_and(Tensor self, Tensor other) -> Tensor
618
+ use_c10_dispatcher: full
606
619
  variants: function, method
607
- supports_named_tensor: True
608
620
 
609
621
  - func: logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)
610
622
  variants: method
611
- supports_named_tensor: True
612
623
 
613
624
  - func: logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
614
625
  dispatch:
615
626
  CPU: logical_and_out
616
627
  CUDA: logical_and_out
617
- supports_named_tensor: True
618
628
 
619
629
  - func: logical_or(Tensor self, Tensor other) -> Tensor
630
+ use_c10_dispatcher: full
620
631
  variants: function, method
621
- supports_named_tensor: True
622
632
 
623
633
  - func: logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)
624
634
  variants: method
625
- supports_named_tensor: True
626
635
 
627
636
  - func: logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
628
637
  dispatch:
629
638
  CPU: logical_or_out
630
639
  CUDA: logical_or_out
631
- supports_named_tensor: True
632
640
 
633
641
  - func: blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
634
642
 
@@ -640,127 +648,115 @@
640
648
  dispatch:
641
649
  CPU: bmm_cpu
642
650
  CUDA: bmm_cuda
643
- supports_named_tensor: True
651
+ SparseCPU: bmm_sparse_cpu
652
+ SparseCUDA: bmm_sparse_cuda
653
+
654
+ - func: _bmm(Tensor self, Tensor mat2, *, bool deterministic=False) -> Tensor
655
+ use_c10_dispatcher: full
656
+ variants: function
657
+ dispatch:
658
+ SparseCUDA: _bmm_sparse_cuda
644
659
 
645
660
  - func: bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
646
661
  variants: function
647
662
  dispatch:
648
663
  CPU: bmm_out_cpu
649
664
  CUDA: bmm_out_cuda
650
- supports_named_tensor: True
665
+ SparseCPU: bmm_out_sparse_cpu
666
+ SparseCUDA: bmm_out_sparse_cuda
667
+
668
+ - func: _bmm.out(Tensor self, Tensor mat2, *, bool deterministic=False, Tensor(a!) out) -> Tensor(a!)
669
+ variants: function
670
+ dispatch:
671
+ SparseCUDA: _bmm_out_sparse_cuda
651
672
 
652
673
  - func: broadcast_tensors(Tensor[] tensors) -> Tensor[]
674
+ use_c10_dispatcher: full
653
675
  device_guard: False
654
676
 
655
677
  - func: cat(Tensor[] tensors, int dim=0) -> Tensor
656
- supports_named_tensor: True
678
+ use_c10_dispatcher: full
657
679
 
658
680
  - func: cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
659
- supports_named_tensor: True
660
681
 
661
682
  - func: cat.names(Tensor[] tensors, Dimname dim) -> Tensor
662
- supports_named_tensor: True
663
683
 
664
684
  - func: cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
665
- supports_named_tensor: True
685
+
686
+ - func: block_diag(Tensor[] tensors) -> Tensor
687
+ use_c10_dispatcher: full
688
+ variants: function
666
689
 
667
690
  - func: ceil(Tensor self) -> Tensor
668
691
  use_c10_dispatcher: full
669
- supports_named_tensor: True
670
692
  variants: function, method
671
693
 
672
694
  - func: ceil_(Tensor(a!) self) -> Tensor(a!)
673
- supports_named_tensor: True
674
695
  variants: function, method
675
696
 
676
697
  - func: ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
677
- supports_named_tensor: True
678
698
  dispatch:
679
699
  CPU: ceil_out
680
700
  CUDA: ceil_out
681
701
 
682
702
  - func: chain_matmul(Tensor[] matrices) -> Tensor
703
+ use_c10_dispatcher: full
683
704
  variants: function
684
705
 
685
706
  - func: chunk(Tensor(a) self, int chunks, int dim=0) -> Tensor(a)[]
707
+ use_c10_dispatcher: full
686
708
  variants: function, method
687
709
  device_guard: False
688
- supports_named_tensor: True
689
710
 
690
711
  - func: clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
691
712
  use_c10_dispatcher: full
692
- supports_named_tensor: True
693
713
  variants: function, method
694
714
  dispatch:
695
715
  CPU: clamp
696
716
  CUDA: clamp
697
717
  QuantizedCPU: quantized_clamp
718
+ Vulkan: vulkan_clamp
698
719
 
699
720
  - func: clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
700
- supports_named_tensor: True
701
721
  variants: function, method
702
- dispatch:
703
- CPU: _clamp__cpu
704
- CUDA: _clamp__cuda
705
722
 
706
723
  - func: clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
707
- supports_named_tensor: True
708
- dispatch:
709
- CPU: _clamp_out_cpu
710
- CUDA: _clamp_out_cuda
711
724
 
712
725
  - func: clamp_max(Tensor self, Scalar max) -> Tensor
713
726
  use_c10_dispatcher: full
714
- supports_named_tensor: True
715
727
  variants: function, method
716
728
 
717
729
  - func: clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)
718
- supports_named_tensor: True
719
730
  variants: function, method
720
- dispatch:
721
- CPU: _clamp_max__cpu
722
- CUDA: _clamp_max__cuda
723
731
 
724
732
  - func: clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)
725
- supports_named_tensor: True
726
- dispatch:
727
- CPU: _clamp_max_out_cpu
728
- CUDA: _clamp_max_out_cuda
729
733
 
730
734
  - func: clamp_min(Tensor self, Scalar min) -> Tensor
731
735
  use_c10_dispatcher: full
732
- supports_named_tensor: True
733
736
  variants: function, method
734
737
 
735
738
  - func: clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)
736
- supports_named_tensor: True
737
739
  variants: function, method
738
- dispatch:
739
- CPU: _clamp_min__cpu
740
- CUDA: _clamp_min__cuda
741
740
 
742
741
  - func: clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)
743
- supports_named_tensor: True
744
- dispatch:
745
- CPU: _clamp_min_out_cpu
746
- CUDA: _clamp_min_out_cuda
747
742
 
748
743
  - func: cudnn_is_acceptable(Tensor self) -> bool
749
744
  use_c10_dispatcher: full
750
745
  device_guard: False
751
746
 
752
747
  - func: constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor
748
+ use_c10_dispatcher: full
753
749
  variants: function
754
750
 
755
751
  - func: contiguous(Tensor self, *, MemoryFormat memory_format=contiguous_format) -> Tensor
756
752
  variants: method
757
- supports_named_tensor: True
758
753
 
759
754
  - func: convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor
760
755
 
761
756
  - func: convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor
762
757
 
763
758
  - func: convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
759
+ use_c10_dispatcher: full
764
760
 
765
761
  - func: _convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
766
762
 
@@ -778,6 +774,7 @@
778
774
  use_c10_dispatcher: full
779
775
 
780
776
  - func: conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)
777
+ use_c10_dispatcher: full
781
778
 
782
779
  # NB: we inherit the goofy argument order from PyTorch torch.nn.functional
783
780
  - func: conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor
@@ -790,7 +787,6 @@
790
787
  manual_kernel_registration: True
791
788
  variants: method
792
789
  device_guard: False
793
- supports_named_tensor: True
794
790
 
795
791
  - func: _copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor
796
792
  use_c10_dispatcher: full
@@ -798,39 +794,21 @@
798
794
 
799
795
  - func: cos(Tensor self) -> Tensor
800
796
  use_c10_dispatcher: full
801
- supports_named_tensor: True
802
797
  variants: function, method
803
798
 
804
799
  - func: cos_(Tensor(a!) self) -> Tensor(a!)
805
- supports_named_tensor: True
806
800
  variants: function, method
807
- dispatch:
808
- CPU: _cos__cpu
809
- CUDA: _cos__cuda
810
801
 
811
802
  - func: cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
812
- supports_named_tensor: True
813
- dispatch:
814
- CPU: _cos_out_cpu
815
- CUDA: _cos_out_cuda
816
803
 
817
804
  - func: cosh(Tensor self) -> Tensor
818
805
  use_c10_dispatcher: full
819
- supports_named_tensor: True
820
806
  variants: function, method
821
807
 
822
808
  - func: cosh_(Tensor(a!) self) -> Tensor(a!)
823
- supports_named_tensor: True
824
809
  variants: function, method
825
- dispatch:
826
- CPU: _cosh__cpu
827
- CUDA: _cosh__cuda
828
810
 
829
811
  - func: cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
830
- supports_named_tensor: True
831
- dispatch:
832
- CPU: _cosh_out_cpu
833
- CUDA: _cosh_out_cuda
834
812
 
835
813
  - func: cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
836
814
  use_c10_dispatcher: full
@@ -860,18 +838,22 @@
860
838
  CUDA: cudnn_convolution_deprecated
861
839
 
862
840
  - func: cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
841
+ use_c10_dispatcher: full
863
842
  dispatch:
864
843
  CUDA: cudnn_convolution
865
844
 
866
845
  - func: cudnn_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
846
+ use_c10_dispatcher: full
867
847
  dispatch:
868
848
  CUDA: cudnn_convolution_backward_input
869
849
 
870
850
  - func: cudnn_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[2] output_mask) -> (Tensor, Tensor)
851
+ use_c10_dispatcher: full
871
852
  dispatch:
872
853
  CUDA: cudnn_convolution_backward
873
854
 
874
855
  - func: cudnn_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
856
+ use_c10_dispatcher: full
875
857
  dispatch:
876
858
  CUDA: cudnn_convolution_backward_weight
877
859
 
@@ -880,20 +862,24 @@
880
862
  CUDA: cudnn_convolution_transpose_deprecated
881
863
 
882
864
  - func: cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
865
+ use_c10_dispatcher: full
883
866
  dispatch:
884
867
  CUDA: cudnn_convolution_transpose
885
868
 
886
869
  # NB: output_padding not strictly needed here, but it's helpful for the float
887
870
  # backwards
888
871
  - func: cudnn_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[2] output_mask) -> (Tensor, Tensor)
872
+ use_c10_dispatcher: full
889
873
  dispatch:
890
874
  CUDA: cudnn_convolution_transpose_backward
891
875
 
892
876
  - func: cudnn_convolution_transpose_backward_input(Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
877
+ use_c10_dispatcher: full
893
878
  dispatch:
894
879
  CUDA: cudnn_convolution_transpose_backward_input
895
880
 
896
881
  - func: cudnn_convolution_transpose_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
882
+ use_c10_dispatcher: full
897
883
  dispatch:
898
884
  CUDA: cudnn_convolution_transpose_backward_weight
899
885
 
@@ -904,22 +890,20 @@
904
890
  CUDA: cudnn_grid_sampler_forward
905
891
 
906
892
  - func: cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
893
+ use_c10_dispatcher: full
907
894
  dispatch:
908
895
  CUDA: cudnn_grid_sampler_backward
909
896
 
910
897
  - func: cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
911
- supports_named_tensor: True
898
+ use_c10_dispatcher: full
912
899
  variants: function, method
913
900
 
914
901
  - func: cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
915
- supports_named_tensor: True
916
902
 
917
903
  - func: cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
918
- supports_named_tensor: True
919
904
  variants: function, method
920
905
 
921
906
  - func: cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
922
- supports_named_tensor: True
923
907
 
924
908
  - func: _cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
925
909
  variants: function
@@ -928,18 +912,15 @@
928
912
  CUDA: cummax_helper_cuda
929
913
 
930
914
  - func: cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
931
- supports_named_tensor: True
915
+ use_c10_dispatcher: full
932
916
  variants: function, method
933
917
 
934
918
  - func: cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
935
- supports_named_tensor: True
936
919
 
937
920
  - func: cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)
938
- supports_named_tensor: True
939
921
  variants: function, method
940
922
 
941
923
  - func: cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
942
- supports_named_tensor: True
943
924
 
944
925
  - func: _cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()
945
926
  variants: function
@@ -948,45 +929,40 @@
948
929
  CUDA: cummin_helper_cuda
949
930
 
950
931
  - func: cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
951
- supports_named_tensor: True
952
932
  variants: function, method
953
933
 
954
934
  - func: cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
955
- supports_named_tensor: True
956
935
 
957
936
  - func: cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
958
- supports_named_tensor: True
959
937
  variants: function, method
960
938
 
961
939
  - func: cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
962
- supports_named_tensor: True
963
940
 
964
941
  - func: cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor
965
- supports_named_tensor: True
966
942
  variants: function, method
967
943
 
968
944
  - func: cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
969
- supports_named_tensor: True
970
945
 
971
946
  - func: cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
972
- supports_named_tensor: True
973
947
  variants: function, method
974
948
 
975
949
  - func: cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
976
- supports_named_tensor: True
977
950
 
978
951
  - func: ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
952
+ use_c10_dispatcher: full
979
953
 
980
954
  # convenience function that converts to intlists for you
981
955
  - func: ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor
982
956
  use_c10_dispatcher: full
983
957
 
984
958
  - func: _ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)
959
+ use_c10_dispatcher: full
985
960
  dispatch:
986
961
  CPU: ctc_loss_cpu
987
962
  CUDA: ctc_loss_gpu
988
963
 
989
964
  - func: _ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor
965
+ use_c10_dispatcher: full
990
966
  dispatch:
991
967
  CPU: ctc_loss_backward_cpu
992
968
  CUDA: ctc_loss_backward_gpu
@@ -1004,12 +980,11 @@
1004
980
  variants: function, method
1005
981
 
1006
982
  - func: diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)
983
+ use_c10_dispatcher: full
1007
984
  variants: function, method
1008
- supports_named_tensor: True
1009
985
 
1010
986
  - func: diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)
1011
987
  variants: function, method
1012
- supports_named_tensor: True
1013
988
 
1014
989
  - func: fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
1015
990
  variants: method
@@ -1022,7 +997,6 @@
1022
997
  CUDA: div
1023
998
  SparseCPU: div_sparse
1024
999
  SparseCUDA: div_sparse
1025
- supports_named_tensor: True
1026
1000
 
1027
1001
  - func: div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
1028
1002
  variants: method
@@ -1031,7 +1005,6 @@
1031
1005
  CUDA: div_
1032
1006
  SparseCPU: div_sparse_
1033
1007
  SparseCUDA: div_sparse_
1034
- supports_named_tensor: True
1035
1008
 
1036
1009
  - func: div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1037
1010
  dispatch:
@@ -1039,17 +1012,14 @@
1039
1012
  CUDA: div_out
1040
1013
  SparseCPU: div_out_sparse_zerodim
1041
1014
  SparseCUDA: div_out_sparse_zerodim
1042
- supports_named_tensor: True
1043
1015
 
1044
1016
  # For C++ only, until we have conversion from C++ numbers to Tensor
1045
1017
  - func: div.Scalar(Tensor self, Scalar other) -> Tensor
1046
1018
  use_c10_dispatcher: full
1047
1019
  variants: function, method
1048
- supports_named_tensor: True
1049
1020
 
1050
1021
  - func: div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
1051
1022
  variants: method
1052
- supports_named_tensor: True
1053
1023
 
1054
1024
  - func: dot(Tensor self, Tensor tensor) -> Tensor
1055
1025
  use_c10_dispatcher: full
@@ -1057,12 +1027,11 @@
1057
1027
  dispatch:
1058
1028
  CPU: legacy::cpu::_th_dot
1059
1029
  CUDA: legacy::cuda::_th_dot
1060
- supports_named_tensor: True
1061
1030
 
1062
1031
  - func: dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)
1063
- supports_named_tensor: True
1064
1032
 
1065
1033
  - func: einsum(str equation, Tensor[] tensors) -> Tensor
1034
+ use_c10_dispatcher: full
1066
1035
 
1067
1036
  - func: embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor
1068
1037
  use_c10_dispatcher: full
@@ -1115,6 +1084,8 @@
1115
1084
  CPU: _embedding_bag_per_sample_weights_backward_cpu
1116
1085
  CUDA: _embedding_bag_per_sample_weights_backward_cuda
1117
1086
 
1087
+ - func: empty_meta(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
1088
+
1118
1089
  - func: empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
1119
1090
  device_guard: False
1120
1091
 
@@ -1125,6 +1096,7 @@
1125
1096
  MkldnnCPU: empty_mkldnn
1126
1097
  SparseCPU: empty_sparse
1127
1098
  SparseCUDA: empty_sparse
1099
+ Vulkan: empty_vulkan
1128
1100
 
1129
1101
  - func: new_empty(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1130
1102
  variants: method
@@ -1139,7 +1111,8 @@
1139
1111
  - func: _empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor
1140
1112
  dispatch:
1141
1113
  CPU: empty_affine_quantized_other_backends_stub
1142
- QuantizedCPU: empty_affine_quantized_cpu
1114
+ QuantizedCPU: empty_affine_quantized
1115
+ QuantizedCUDA: empty_affine_quantized
1143
1116
 
1144
1117
  # it's a factory function receiving a tensor argument, thus overriding explicitly
1145
1118
  # other overrides are to provide a more helpful error message that dtype is required
@@ -1151,95 +1124,70 @@
1151
1124
 
1152
1125
  - func: resize_(Tensor(a!) self, int[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)
1153
1126
  manual_kernel_registration: True
1154
- supports_named_tensor: True
1155
1127
  variants: method
1156
1128
  device_guard: False
1157
1129
 
1130
+ - func: empty_quantized(int[] size, Tensor qtensor) -> Tensor
1131
+ variants: function
1132
+ dispatch:
1133
+ QuantizedCPU: empty_quantized
1134
+ QuantizedCUDA: empty_quantized
1135
+
1158
1136
  - func: empty.out(int[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
1159
1137
  device_guard: False
1160
1138
 
1161
1139
  - func: empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
1162
1140
  device_guard: False
1163
- supports_named_tensor: True
1164
1141
 
1165
1142
  - func: empty_strided(int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1166
1143
  dispatch:
1167
1144
  CPU: empty_strided_cpu
1168
1145
  CUDA: empty_strided_cuda
1146
+ Vulkan: empty_strided_vulkan
1169
1147
 
1170
1148
  - func: erf(Tensor self) -> Tensor
1171
1149
  use_c10_dispatcher: full
1172
- supports_named_tensor: True
1173
1150
  variants: function, method
1174
1151
 
1175
1152
  - func: erf_(Tensor(a!) self) -> Tensor(a!)
1176
- supports_named_tensor: True
1177
1153
  variants: function, method
1178
- dispatch:
1179
- CPU: _erf__cpu
1180
- CUDA: _erf__cuda
1181
1154
 
1182
1155
  - func: erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1183
- supports_named_tensor: True
1184
- dispatch:
1185
- CPU: _erf_out_cpu
1186
- CUDA: _erf_out_cuda
1187
1156
 
1188
1157
  - func: erfc(Tensor self) -> Tensor
1189
1158
  use_c10_dispatcher: full
1190
- supports_named_tensor: True
1191
1159
  variants: function, method
1192
1160
 
1193
1161
  - func: erfc_(Tensor(a!) self) -> Tensor(a!)
1194
- supports_named_tensor: True
1195
1162
  variants: function, method
1196
- dispatch:
1197
- CPU: _erfc__cpu
1198
- CUDA: _erfc__cuda
1199
1163
 
1200
1164
  - func: erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1201
- supports_named_tensor: True
1202
- dispatch:
1203
- CPU: _erfc_out_cpu
1204
- CUDA: _erfc_out_cuda
1205
1165
 
1206
1166
  - func: exp(Tensor self) -> Tensor
1207
1167
  use_c10_dispatcher: full
1208
- supports_named_tensor: True
1209
1168
  variants: function, method
1210
1169
 
1211
1170
  - func: exp_(Tensor(a!) self) -> Tensor(a!)
1212
- supports_named_tensor: True
1213
1171
  variants: function, method
1214
- dispatch:
1215
- CPU: _exp__cpu
1216
- CUDA: _exp__cuda
1217
1172
 
1218
1173
  - func: exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1219
- supports_named_tensor: True
1220
- dispatch:
1221
- CPU: _exp_out_cpu
1222
- CUDA: _exp_out_cuda
1223
1174
 
1224
1175
  - func: expm1(Tensor self) -> Tensor
1225
1176
  use_c10_dispatcher: full
1226
- supports_named_tensor: True
1227
1177
  variants: function, method
1228
1178
 
1229
1179
  - func: expm1_(Tensor(a!) self) -> Tensor(a!)
1230
- supports_named_tensor: True
1231
1180
  variants: function, method
1232
1181
 
1233
1182
  - func: expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1234
- supports_named_tensor: True
1235
1183
  dispatch:
1236
1184
  CPU: expm1_out
1237
1185
  CUDA: expm1_out
1238
1186
 
1239
1187
  - func: expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a)
1188
+ use_c10_dispatcher: full
1240
1189
  variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
1241
1190
  device_guard: False
1242
- supports_named_tensor: True
1243
1191
 
1244
1192
  - func: expand_as(Tensor self, Tensor other) -> Tensor
1245
1193
  use_c10_dispatcher: full
@@ -1263,51 +1211,42 @@
1263
1211
  - func: flatten.using_ints(Tensor self, int start_dim=0, int end_dim=-1) -> Tensor
1264
1212
  use_c10_dispatcher: full
1265
1213
  variants: function, method
1266
- supports_named_tensor: True
1267
1214
 
1268
1215
  - func: flatten.named_out_dim(Tensor self, int start_dim, int end_dim, Dimname out_dim) -> Tensor
1269
1216
  variants: function, method
1270
- supports_named_tensor: True
1271
1217
 
1272
1218
  - func: flatten.using_names(Tensor self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor
1273
1219
  variants: function, method
1274
- supports_named_tensor: True
1275
1220
 
1276
1221
  - func: flatten.DimnameList(Tensor self, Dimname[] dims, Dimname out_dim) -> Tensor
1277
1222
  variants: function, method
1278
- supports_named_tensor: True
1279
1223
 
1280
1224
  - func: fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
1281
- supports_named_tensor: True
1282
1225
  variants: function, method
1283
1226
 
1284
1227
  - func: fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)
1285
- supports_named_tensor: True
1286
1228
  variants: function, method
1287
1229
 
1288
1230
  - func: floor(Tensor self) -> Tensor
1289
1231
  use_c10_dispatcher: full
1290
- supports_named_tensor: True
1291
1232
  variants: function, method
1292
1233
 
1293
1234
  - func: floor_(Tensor(a!) self) -> Tensor(a!)
1294
- supports_named_tensor: True
1295
1235
  variants: function, method
1296
1236
 
1297
1237
  - func: floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1298
- supports_named_tensor: True
1299
1238
  dispatch:
1300
1239
  CPU: floor_out
1301
1240
  CUDA: floor_out
1302
1241
 
1303
1242
  - func: floor_divide(Tensor self, Tensor other) -> Tensor
1243
+ use_c10_dispatcher: full
1304
1244
  variants: function, method
1305
1245
  dispatch:
1306
1246
  CPU: floor_divide
1307
1247
  CUDA: floor_divide
1308
1248
  SparseCPU: floor_divide_sparse
1309
1249
  SparseCUDA: floor_divide_sparse
1310
- supports_named_tensor: True
1311
1250
 
1312
1251
  - func: floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
1313
1252
  variants: method
@@ -1316,7 +1255,6 @@
1316
1255
  CUDA: floor_divide_
1317
1256
  SparseCPU: floor_divide_sparse_
1318
1257
  SparseCUDA: floor_divide_sparse_
1319
- supports_named_tensor: True
1320
1258
 
1321
1259
  - func: floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1322
1260
  dispatch:
@@ -1324,27 +1262,22 @@
1324
1262
  CUDA: floor_divide_out
1325
1263
  SparseCPU: floor_divide_out_sparse_zerodim
1326
1264
  SparseCUDA: floor_divide_out_sparse_zerodim
1327
- supports_named_tensor: True
1328
1265
 
1329
1266
  - func: floor_divide.Scalar(Tensor self, Scalar other) -> Tensor
1267
+ use_c10_dispatcher: full
1330
1268
  variants: function, method
1331
- supports_named_tensor: True
1332
1269
 
1333
1270
  - func: floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
1334
1271
  variants: method
1335
- supports_named_tensor: True
1336
1272
 
1337
1273
  - func: frac(Tensor self) -> Tensor
1338
1274
  use_c10_dispatcher: full
1339
- supports_named_tensor: True
1340
1275
  variants: function, method
1341
1276
 
1342
1277
  - func: frac_(Tensor(a!) self) -> Tensor(a!)
1343
- supports_named_tensor: True
1344
1278
  variants: function, method
1345
1279
 
1346
1280
  - func: frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1347
- supports_named_tensor: True
1348
1281
 
1349
1282
  - func: full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1350
1283
  device_guard: False
@@ -1354,7 +1287,6 @@
1354
1287
  - func: full.out(int[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
1355
1288
 
1356
1289
  - func: full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
1357
- supports_named_tensor: True
1358
1290
 
1359
1291
  - func: from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
1360
1292
  dispatch:
@@ -1381,6 +1313,7 @@
1381
1313
  CUDA: grid_sampler_2d_cuda
1382
1314
 
1383
1315
  - func: grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
1316
+ use_c10_dispatcher: full
1384
1317
  dispatch:
1385
1318
  CPU: grid_sampler_2d_backward_cpu
1386
1319
  CUDA: grid_sampler_2d_backward_cuda
@@ -1392,6 +1325,7 @@
1392
1325
  CUDA: grid_sampler_3d_cuda
1393
1326
 
1394
1327
  - func: grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
1328
+ use_c10_dispatcher: full
1395
1329
  dispatch:
1396
1330
  CPU: grid_sampler_3d_backward_cpu
1397
1331
  CUDA: grid_sampler_3d_backward_cuda
@@ -1419,6 +1353,16 @@
1419
1353
 
1420
1354
  - func: group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor
1421
1355
 
1356
+ - func: native_group_norm(Tensor input, Tensor? weight, Tensor? bias, int N, int C, int HxW, int group, float eps) -> (Tensor, Tensor, Tensor)
1357
+ dispatch:
1358
+ CPU: native_group_norm
1359
+ CUDA: native_group_norm
1360
+
1361
+ - func: native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, int N, int C, int HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1362
+ dispatch:
1363
+ CPU: native_group_norm_backward
1364
+ CUDA: native_group_norm_backward
1365
+
1422
1366
  # FFT
1423
1367
 
1424
1368
  - func: fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor
@@ -1434,9 +1378,11 @@
1434
1378
  variants: function, method
1435
1379
 
1436
1380
  - func: irfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True, int[] signal_sizes=[]) -> Tensor
1381
+ use_c10_dispatcher: full
1437
1382
  variants: function, method
1438
1383
 
1439
1384
  - func: _fft_with_size(Tensor self, int signal_ndim, bool complex_input, bool complex_output, bool inverse, int[] checked_signal_sizes, bool normalized, bool onesided, int[] output_sizes) -> Tensor
1385
+ use_c10_dispatcher: full
1440
1386
  variants: function
1441
1387
  dispatch:
1442
1388
  CPU: _fft_mkl
@@ -1449,10 +1395,10 @@
1449
1395
  use_c10_dispatcher: full
1450
1396
 
1451
1397
  - func: _cufft_set_plan_cache_max_size(int device_index, int max_size) -> ()
1452
- use_c10_dispatcher: unboxed_only
1398
+ use_c10_dispatcher: full
1453
1399
 
1454
1400
  - func: _cufft_clear_plan_cache(int device_index) -> ()
1455
- use_c10_dispatcher: unboxed_only
1401
+ use_c10_dispatcher: full
1456
1402
 
1457
1403
  - func: index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
1458
1404
  variants: function, method
@@ -1510,9 +1456,8 @@
1510
1456
 
1511
1457
  - func: isnan(Tensor self) -> Tensor
1512
1458
  use_c10_dispatcher: full
1513
- variants: function
1459
+ variants: function, method
1514
1460
  device_guard: False
1515
- supports_named_tensor: True
1516
1461
  dispatch:
1517
1462
  CPU: isnan
1518
1463
  CUDA: isnan
@@ -1528,57 +1473,49 @@
1528
1473
  use_c10_dispatcher: full
1529
1474
  variants: function, method
1530
1475
  device_guard: False
1531
- supports_named_tensor: True
1532
1476
 
1533
1477
  - func: is_complex(Tensor self) -> bool
1534
1478
  use_c10_dispatcher: full
1535
1479
  variants: function, method
1536
1480
  device_guard: False
1537
- supports_named_tensor: True
1538
1481
 
1539
1482
  - func: is_nonzero(Tensor self) -> bool
1540
1483
  use_c10_dispatcher: full
1541
1484
  variants: function, method
1542
1485
  device_guard: False
1543
- supports_named_tensor: True
1544
1486
 
1545
1487
  - func: is_same_size(Tensor self, Tensor other) -> bool
1546
1488
  use_c10_dispatcher: full
1547
1489
  variants: function, method
1548
1490
  device_guard: False
1549
- supports_named_tensor: True
1550
1491
 
1551
1492
  - func: is_signed(Tensor self) -> bool
1552
1493
  use_c10_dispatcher: full
1553
1494
  variants: function, method
1554
1495
  device_guard: False
1555
- supports_named_tensor: True
1556
1496
 
1557
- - func: kl_div(Tensor self, Tensor target, int reduction=Mean) -> Tensor
1497
+ - func: kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
1558
1498
  use_c10_dispatcher: full
1559
1499
 
1560
- - func: kl_div_backward(Tensor grad_output, Tensor self, Tensor target, int reduction=Mean) -> Tensor
1500
+ - func: kl_div_backward(Tensor grad_output, Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor
1561
1501
  use_c10_dispatcher: full
1562
1502
  dispatch:
1563
1503
  CPU: kl_div_backward_cpu
1564
1504
  CUDA: kl_div_backward_cuda
1565
1505
 
1566
1506
  - func: kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
1567
- supports_named_tensor: True
1507
+ use_c10_dispatcher: full
1568
1508
  variants: function, method
1569
1509
 
1570
1510
  - func: kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
1571
- supports_named_tensor: True
1572
1511
  dispatch:
1573
1512
  CPU: kthvalue_out_cpu
1574
1513
  CUDA: kthvalue_out_cuda
1575
1514
 
1576
1515
  - func: kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
1577
- supports_named_tensor: True
1578
1516
  variants: function, method
1579
1517
 
1580
1518
  - func: kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
1581
- supports_named_tensor: True
1582
1519
 
1583
1520
  - func: layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
1584
1521
 
@@ -1601,16 +1538,19 @@
1601
1538
  MkldnnCPU: mkldnn_linear
1602
1539
 
1603
1540
  - func: fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
1541
+ use_c10_dispatcher: full
1604
1542
 
1605
1543
  - func: fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
1606
1544
  use_c10_dispatcher: full
1607
1545
 
1608
1546
  - func: fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)
1547
+ use_c10_dispatcher: full
1609
1548
 
1610
1549
  - func: fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor
1611
1550
  use_c10_dispatcher: full
1612
1551
 
1613
1552
  - func: fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
1553
+ use_c10_dispatcher: full
1614
1554
 
1615
1555
  - func: fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor
1616
1556
  use_c10_dispatcher: full
@@ -1630,41 +1570,33 @@
1630
1570
 
1631
1571
  - func: log(Tensor self) -> Tensor
1632
1572
  use_c10_dispatcher: full
1633
- supports_named_tensor: True
1634
1573
  variants: function, method
1635
1574
 
1636
1575
  - func: log_(Tensor(a!) self) -> Tensor(a!)
1637
- supports_named_tensor: True
1638
1576
  variants: function, method
1639
1577
 
1640
1578
  - func: log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1641
- supports_named_tensor: True
1642
1579
  dispatch:
1643
1580
  CPU: log_out
1644
1581
  CUDA: log_out
1645
1582
 
1646
1583
  - func: log10(Tensor self) -> Tensor
1647
1584
  use_c10_dispatcher: full
1648
- supports_named_tensor: True
1649
1585
  variants: function, method
1650
1586
 
1651
1587
  - func: log10_(Tensor(a!) self) -> Tensor(a!)
1652
- supports_named_tensor: True
1653
1588
  variants: function, method
1654
1589
 
1655
1590
  - func: log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1656
- supports_named_tensor: True
1657
1591
  dispatch:
1658
1592
  CPU: log10_out
1659
1593
  CUDA: log10_out
1660
1594
 
1661
1595
  - func: log1p(Tensor self) -> Tensor
1662
1596
  use_c10_dispatcher: full
1663
- supports_named_tensor: True
1664
1597
  variants: function, method
1665
1598
 
1666
1599
  - func: log1p_(Tensor(a!) self) -> Tensor(a!)
1667
- supports_named_tensor: True
1668
1600
  variants: function, method
1669
1601
  dispatch:
1670
1602
  CPU: log1p_
@@ -1673,7 +1605,6 @@
1673
1605
  SparseCUDA: log1p_sparse_
1674
1606
 
1675
1607
  - func: log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1676
- supports_named_tensor: True
1677
1608
  dispatch:
1678
1609
  CPU: log1p_out
1679
1610
  CUDA: log1p_out
@@ -1682,19 +1613,28 @@
1682
1613
 
1683
1614
  - func: log2(Tensor self) -> Tensor
1684
1615
  use_c10_dispatcher: full
1685
- supports_named_tensor: True
1686
1616
  variants: function, method
1687
1617
 
1688
1618
  - func: log2_(Tensor(a!) self) -> Tensor(a!)
1689
- supports_named_tensor: True
1690
1619
  variants: function, method
1691
1620
 
1692
1621
  - func: log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
1693
- supports_named_tensor: True
1694
1622
  dispatch:
1695
1623
  CPU: log2_out
1696
1624
  CUDA: log2_out
1697
1625
 
1626
+ - func: logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1627
+
1628
+ - func: logaddexp(Tensor self, Tensor other) -> Tensor
1629
+ use_c10_dispatcher: full
1630
+ variants: method, function
1631
+
1632
+ - func: logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1633
+
1634
+ - func: logaddexp2(Tensor self, Tensor other) -> Tensor
1635
+ use_c10_dispatcher: full
1636
+ variants: method, function
1637
+
1698
1638
  - func: logdet(Tensor self) -> Tensor
1699
1639
  use_c10_dispatcher: full
1700
1640
  variants: function, method
@@ -1709,11 +1649,9 @@
1709
1649
  # log_softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models.
1710
1650
  - func: log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
1711
1651
  variants: function, method
1712
- supports_named_tensor: True
1713
1652
 
1714
1653
  - func: log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
1715
1654
  variants: function, method
1716
- supports_named_tensor: True
1717
1655
 
1718
1656
  - func: _log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
1719
1657
  use_c10_dispatcher: full
@@ -1727,19 +1665,37 @@
1727
1665
  CPU: log_softmax_backward_cpu
1728
1666
  CUDA: log_softmax_backward_cuda
1729
1667
 
1668
+ - func: _logcumsumexp(Tensor self, int dim) -> Tensor
1669
+ use_c10_dispatcher: full
1670
+ dispatch:
1671
+ CPU: _logcumsumexp_cpu
1672
+ CUDA: _logcumsumexp_cuda
1673
+
1674
+ - func: _logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
1675
+ dispatch:
1676
+ CPU: _logcumsumexp_out_cpu
1677
+ CUDA: _logcumsumexp_out_cuda
1678
+
1679
+ - func: logcumsumexp(Tensor self, int dim) -> Tensor
1680
+ variants: function, method
1681
+
1682
+ - func: logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
1683
+
1684
+ - func: logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor
1685
+ variants: function, method
1686
+
1687
+ - func: logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)
1688
+
1730
1689
  - func: logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
1731
- supports_named_tensor: True
1690
+ use_c10_dispatcher: full
1732
1691
  variants: function, method
1733
1692
 
1734
1693
  - func: logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1735
- supports_named_tensor: True
1736
1694
 
1737
1695
  - func: logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
1738
- supports_named_tensor: True
1739
1696
  variants: function, method
1740
1697
 
1741
1698
  - func: logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
1742
- supports_named_tensor: True
1743
1699
 
1744
1700
  - func: margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor
1745
1701
  use_c10_dispatcher: full
@@ -1747,10 +1703,8 @@
1747
1703
  - func: matmul(Tensor self, Tensor other) -> Tensor
1748
1704
  use_c10_dispatcher: full
1749
1705
  variants: function, method
1750
- supports_named_tensor: True
1751
1706
 
1752
1707
  - func: matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
1753
- supports_named_tensor: True
1754
1708
 
1755
1709
  - func: matrix_rank.tol(Tensor self, float tol, bool symmetric=False) -> Tensor
1756
1710
  use_c10_dispatcher: full
@@ -1763,53 +1717,52 @@
1763
1717
  variants: function, method
1764
1718
 
1765
1719
  - func: max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
1720
+ use_c10_dispatcher: full
1766
1721
  variants: function, method
1767
- supports_named_tensor: True
1768
1722
 
1769
1723
  - func: max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
1770
- supports_named_tensor: True
1771
1724
 
1772
1725
  - func: max_values(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
1726
+ use_c10_dispatcher: full
1773
1727
  variants: function, method
1774
1728
 
1775
1729
  - func: max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
1776
1730
  variants: function, method
1777
- supports_named_tensor: True
1778
1731
 
1779
1732
  - func: max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
1780
- supports_named_tensor: True
1781
1733
 
1782
1734
  - func: max_values.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
1783
1735
  variants: function, method
1784
1736
 
1785
1737
  # Return: (Tensor output, Tensor indices)
1786
1738
  - func: max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
1787
- supports_named_tensor: True
1739
+ use_c10_dispatcher: full
1788
1740
 
1789
1741
  - func: max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor
1790
- supports_named_tensor: True
1742
+ use_c10_dispatcher: full
1791
1743
 
1792
1744
  - func: max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
1793
- supports_named_tensor: True
1745
+ use_c10_dispatcher: full
1794
1746
 
1795
1747
  - func: mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
1748
+ use_c10_dispatcher: full
1796
1749
  requires_tensor: True
1797
1750
  dispatch:
1798
1751
  MkldnnCPU: mkldnn_max_pool2d
1799
1752
 
1800
1753
  - func: quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
1754
+ use_c10_dispatcher: full
1801
1755
  requires_tensor: True
1802
1756
  dispatch:
1803
1757
  QuantizedCPU: quantized_max_pool2d
1804
1758
 
1805
1759
  - func: max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
1806
- supports_named_tensor: True
1760
+ use_c10_dispatcher: full
1807
1761
 
1808
1762
  # The CPU and GPU dispatch variants are named weirdly here because otherwise there
1809
1763
  # are namespacing issues in C++
1810
1764
  - func: mean(Tensor self, *, ScalarType? dtype=None) -> Tensor
1811
1765
  variants: function, method
1812
- supports_named_tensor: True
1813
1766
  dispatch:
1814
1767
  CPU: mean_cpu_gpu
1815
1768
  CUDA: mean_cpu_gpu
@@ -1817,14 +1770,13 @@
1817
1770
 
1818
1771
  - func: mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
1819
1772
  variants: function, method
1820
- supports_named_tensor: True
1821
1773
  dispatch:
1822
1774
  CPU: mean_cpu_gpu
1823
1775
  CUDA: mean_cpu_gpu
1824
1776
  QuantizedCPU: quantized_mean_cpu
1777
+ Vulkan: mean_vulkan
1825
1778
 
1826
1779
  - func: mean.out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
1827
- supports_named_tensor: True
1828
1780
  dispatch:
1829
1781
  CPU: mean_out_cpu_gpu
1830
1782
  CUDA: mean_out_cpu_gpu
@@ -1832,41 +1784,34 @@
1832
1784
 
1833
1785
  - func: mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
1834
1786
  variants: function, method
1835
- supports_named_tensor: True
1836
1787
 
1837
1788
  - func: mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
1838
- supports_named_tensor: True
1839
1789
 
1840
1790
  - func: median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
1841
- supports_named_tensor: True
1791
+ use_c10_dispatcher: full
1842
1792
  variants: function, method
1843
1793
 
1844
1794
  - func: median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
1845
- supports_named_tensor: True
1846
1795
 
1847
1796
  - func: median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
1848
- supports_named_tensor: True
1849
1797
  variants: function, method
1850
1798
 
1851
1799
  - func: median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
1852
- supports_named_tensor: True
1853
1800
 
1854
1801
  - func: min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
1802
+ use_c10_dispatcher: full
1855
1803
  variants: function, method
1856
- supports_named_tensor: True
1857
1804
 
1858
1805
  - func: min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
1859
- supports_named_tensor: True
1860
1806
 
1861
1807
  - func: min_values(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
1808
+ use_c10_dispatcher: full
1862
1809
  variants: function, method
1863
1810
 
1864
1811
  - func: min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
1865
1812
  variants: function, method
1866
- supports_named_tensor: True
1867
1813
 
1868
1814
  - func: min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
1869
- supports_named_tensor: True
1870
1815
 
1871
1816
  - func: min_values.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor
1872
1817
  variants: function, method
@@ -1874,10 +1819,13 @@
1874
1819
  - func: mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor
1875
1820
 
1876
1821
  - func: mkldnn_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool bias_defined) -> Tensor
1822
+ use_c10_dispatcher: full
1877
1823
 
1878
1824
  - func: mkldnn_convolution_backward_weights(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool bias_defined) -> (Tensor, Tensor)
1825
+ use_c10_dispatcher: full
1879
1826
 
1880
1827
  - func: mkldnn_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1828
+ use_c10_dispatcher: full
1881
1829
 
1882
1830
  - func: miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)
1883
1831
  dispatch:
@@ -1892,10 +1840,12 @@
1892
1840
  CUDA: miopen_convolution
1893
1841
 
1894
1842
  - func: miopen_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
1843
+ use_c10_dispatcher: full
1895
1844
  dispatch:
1896
1845
  CUDA: miopen_convolution_backward_input
1897
1846
 
1898
1847
  - func: miopen_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1848
+ use_c10_dispatcher: full
1899
1849
  dispatch:
1900
1850
  CUDA: miopen_convolution_backward
1901
1851
 
@@ -1905,6 +1855,7 @@
1905
1855
  CUDA: miopen_convolution_backward_bias
1906
1856
 
1907
1857
  - func: miopen_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
1858
+ use_c10_dispatcher: full
1908
1859
  dispatch:
1909
1860
  CUDA: miopen_convolution_backward_weight
1910
1861
 
@@ -1915,14 +1866,17 @@
1915
1866
  # NB: output_padding not strictly needed here, but it's helpful for the float
1916
1867
  # backwards
1917
1868
  - func: miopen_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1869
+ use_c10_dispatcher: full
1918
1870
  dispatch:
1919
1871
  CUDA: miopen_convolution_transpose_backward
1920
1872
 
1921
1873
  - func: miopen_convolution_transpose_backward_input(Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
1874
+ use_c10_dispatcher: full
1922
1875
  dispatch:
1923
1876
  CUDA: miopen_convolution_transpose_backward_input
1924
1877
 
1925
1878
  - func: miopen_convolution_transpose_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
1879
+ use_c10_dispatcher: full
1926
1880
  dispatch:
1927
1881
  CUDA: miopen_convolution_transpose_backward_weight
1928
1882
 
@@ -1931,14 +1885,17 @@
1931
1885
  CUDA: miopen_depthwise_convolution
1932
1886
 
1933
1887
  - func: miopen_depthwise_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
1888
+ use_c10_dispatcher: full
1934
1889
  dispatch:
1935
1890
  CUDA: miopen_depthwise_convolution_backward_input
1936
1891
 
1937
1892
  - func: miopen_depthwise_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
1893
+ use_c10_dispatcher: full
1938
1894
  dispatch:
1939
1895
  CUDA: miopen_depthwise_convolution_backward
1940
1896
 
1941
1897
  - func: miopen_depthwise_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor
1898
+ use_c10_dispatcher: full
1942
1899
  dispatch:
1943
1900
  CUDA: miopen_depthwise_convolution_backward_weight
1944
1901
 
@@ -1955,35 +1912,30 @@
1955
1912
  variants: function, method
1956
1913
  dispatch:
1957
1914
  CPU: mm_cpu
1958
- CUDA: legacy::cuda::_th_mm
1915
+ CUDA: mm_cuda
1959
1916
  SparseCPU: _sparse_mm
1960
1917
  SparseCUDA: _sparse_mm
1961
- supports_named_tensor: True
1962
1918
 
1963
1919
  - func: mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
1964
1920
  dispatch:
1965
1921
  CPU: mm_cpu_out
1966
- CUDA: legacy::cuda::_th_mm_out
1922
+ CUDA: mm_out_cuda
1967
1923
  SparseCPU: _sparse_mm_out
1968
1924
  SparseCUDA: _sparse_mm_out
1969
- supports_named_tensor: True
1970
1925
 
1971
1926
  - func: _sparse_mm(Tensor sparse, Tensor dense) -> Tensor
1972
1927
  use_c10_dispatcher: full
1973
1928
 
1974
1929
  - func: mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
1975
- supports_named_tensor: True
1930
+ use_c10_dispatcher: full
1976
1931
  variants: function, method
1977
1932
 
1978
1933
  - func: mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
1979
- supports_named_tensor: True
1980
1934
 
1981
1935
  - func: mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)
1982
1936
  variants: function, method
1983
- supports_named_tensor: True
1984
1937
 
1985
1938
  - func: mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
1986
- supports_named_tensor: True
1987
1939
 
1988
1940
  - func: mul.Tensor(Tensor self, Tensor other) -> Tensor
1989
1941
  use_c10_dispatcher: full
@@ -1994,7 +1946,6 @@
1994
1946
  SparseCPU: mul_sparse
1995
1947
  SparseCUDA: mul_sparse
1996
1948
  MkldnnCPU: mkldnn_mul
1997
- supports_named_tensor: True
1998
1949
 
1999
1950
  - func: mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2000
1951
  variants: method
@@ -2004,7 +1955,6 @@
2004
1955
  SparseCPU: mul_sparse_
2005
1956
  SparseCUDA: mul_sparse_
2006
1957
  MkldnnCPU: mkldnn_mul_
2007
- supports_named_tensor: True
2008
1958
 
2009
1959
  - func: mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2010
1960
  dispatch:
@@ -2013,7 +1963,6 @@
2013
1963
  SparseCPU: mul_out_sparse_cpu
2014
1964
  SparseCUDA: mul_out_sparse_cuda
2015
1965
  MkldnnCPU: mkldnn_mul_out
2016
- supports_named_tensor: True
2017
1966
 
2018
1967
  # For C++ only, until we have conversion from C++ numbers to Tensor
2019
1968
  - func: mul.Scalar(Tensor self, Scalar other) -> Tensor
@@ -2027,15 +1976,12 @@
2027
1976
  use_c10_dispatcher: full
2028
1977
  variants: function, method
2029
1978
  dispatch:
2030
- CPU: mv_cpu
2031
- CUDA: legacy::cuda::_th_mv
2032
- supports_named_tensor: True
1979
+ CPU: mv
1980
+ CUDA: mv
1981
+ SparseCPU: mv_sparse
1982
+ SparseCUDA: mv_sparse
2033
1983
 
2034
- - func: mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
2035
- dispatch:
2036
- CPU: mv_cpu_out
2037
- CUDA: legacy::cuda::_th_mv_out
2038
- supports_named_tensor: True
1984
+ - func: mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)
2039
1985
 
2040
1986
  - func: mvlgamma(Tensor self, int p) -> Tensor
2041
1987
  use_c10_dispatcher: full
@@ -2054,14 +2000,14 @@
2054
2000
  SparseCUDA: narrow_copy_sparse
2055
2001
 
2056
2002
  - func: narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a)
2003
+ use_c10_dispatcher: full
2057
2004
  variants: function, method
2058
2005
  device_guard: False
2059
- supports_named_tensor: True
2060
2006
 
2061
2007
  - func: narrow.Tensor(Tensor(a) self, int dim, Tensor start, int length) -> Tensor(a)
2008
+ use_c10_dispatcher: full
2062
2009
  variants: function, method
2063
2010
  device_guard: False
2064
- supports_named_tensor: True
2065
2011
 
2066
2012
  - func: native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
2067
2013
  dispatch:
@@ -2074,6 +2020,7 @@
2074
2020
  CUDA: batch_norm_cuda_out
2075
2021
 
2076
2022
  - func: batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)
2023
+ use_c10_dispatcher: full
2077
2024
  dispatch:
2078
2025
  CUDA: batch_norm_stats_cuda
2079
2026
 
@@ -2090,7 +2037,7 @@
2090
2037
  dispatch:
2091
2038
  CUDA: batch_norm_gather_stats_cuda
2092
2039
 
2093
- - func: batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int[] counts) -> (Tensor, Tensor)
2040
+ - func: batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)
2094
2041
  dispatch:
2095
2042
  CUDA: batch_norm_gather_stats_with_counts_cuda
2096
2043
 
@@ -2112,6 +2059,9 @@
2112
2059
  CPU: batch_norm_update_stats_cpu
2113
2060
  CUDA: batch_norm_update_stats_cuda
2114
2061
 
2062
+ - func: is_vulkan_available() -> bool
2063
+ use_c10_dispatcher: full
2064
+
2115
2065
  - func: _nnpack_available() -> bool
2116
2066
  use_c10_dispatcher: full
2117
2067
 
@@ -2119,12 +2069,15 @@
2119
2069
  variants: function
2120
2070
 
2121
2071
  - func: _nnpack_spatial_convolution_backward(Tensor input, Tensor grad_output, Tensor weight, int[2] padding, bool[3] output_mask) -> (Tensor, Tensor, Tensor)
2072
+ use_c10_dispatcher: full
2122
2073
  variants: function
2123
2074
 
2124
2075
  - func: _nnpack_spatial_convolution_backward_input(Tensor input, Tensor grad_output, Tensor weight, int[2] padding) -> Tensor
2076
+ use_c10_dispatcher: full
2125
2077
  variants: function
2126
2078
 
2127
2079
  - func: _nnpack_spatial_convolution_backward_weight(Tensor input, int[] weightsize, Tensor grad_output, int[2] padding) -> Tensor
2080
+ use_c10_dispatcher: full
2128
2081
  variants: function
2129
2082
 
2130
2083
  - func: ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
@@ -2135,17 +2088,18 @@
2135
2088
  - func: ones.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
2136
2089
 
2137
2090
  - func: ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2138
- supports_named_tensor: True
2139
2091
 
2140
2092
  - func: pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor
2141
2093
  use_c10_dispatcher: full
2142
2094
 
2143
2095
  - func: cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor
2144
- supports_named_tensor: True
2096
+ use_c10_dispatcher: full
2097
+
2098
+ - func: _euclidean_dist(Tensor x1, Tensor x2) -> Tensor
2099
+ use_c10_dispatcher: full
2145
2100
 
2146
2101
  - func: _cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor
2147
2102
  use_c10_dispatcher: full
2148
- supports_named_tensor: True
2149
2103
 
2150
2104
  - func: _cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor
2151
2105
  use_c10_dispatcher: full
@@ -2164,6 +2118,7 @@
2164
2118
  variants: function
2165
2119
 
2166
2120
  - func: permute(Tensor(a) self, int[] dims) -> Tensor(a)
2121
+ use_c10_dispatcher: full
2167
2122
  variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
2168
2123
 
2169
2124
  # Only exposed from C++ -- in Python,
@@ -2174,15 +2129,21 @@
2174
2129
  # behavior on Windows, for reasons I don't understand
2175
2130
  # (maybe related to capital letter collation somehow...)
2176
2131
  - func: numpy_T(Tensor(a) self) -> Tensor(a)
2132
+ use_c10_dispatcher: full
2177
2133
  variants: method
2178
2134
 
2179
2135
  - func: pixel_shuffle(Tensor self, int upscale_factor) -> Tensor
2180
2136
  use_c10_dispatcher: full
2181
2137
 
2138
+ - func: channel_shuffle(Tensor self, int groups) -> Tensor
2139
+ use_c10_dispatcher: full
2140
+ dispatch:
2141
+ CPU: channel_shuffle
2142
+ QuantizedCPU: quantized_channel_shuffle
2143
+
2182
2144
  - func: is_pinned(Tensor self) -> bool
2183
2145
  use_c10_dispatcher: full
2184
2146
  variants: method
2185
- supports_named_tensor: True
2186
2147
 
2187
2148
  - func: pin_memory(Tensor self) -> Tensor
2188
2149
  use_c10_dispatcher: full
@@ -2196,6 +2157,30 @@
2196
2157
  use_c10_dispatcher: full
2197
2158
  variants: function
2198
2159
 
2160
+ - func: rad2deg(Tensor self) -> Tensor
2161
+ use_c10_dispatcher: full
2162
+ variants: function, method
2163
+ supports_named_tensor: True
2164
+
2165
+ - func: rad2deg_(Tensor(a!) self) -> Tensor(a!)
2166
+ variants: function, method
2167
+ supports_named_tensor: True
2168
+
2169
+ - func: rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2170
+ supports_named_tensor: True
2171
+
2172
+ - func: deg2rad(Tensor self) -> Tensor
2173
+ use_c10_dispatcher: full
2174
+ variants: function, method
2175
+ supports_named_tensor: True
2176
+
2177
+ - func: deg2rad_(Tensor(a!) self) -> Tensor(a!)
2178
+ variants: function, method
2179
+ supports_named_tensor: True
2180
+
2181
+ - func: deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2182
+ supports_named_tensor: True
2183
+
2199
2184
  - func: scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2200
2185
 
2201
2186
  - func: rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
@@ -2213,7 +2198,6 @@
2213
2198
  - func: rand.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
2214
2199
 
2215
2200
  - func: rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2216
- supports_named_tensor: True
2217
2201
 
2218
2202
  - func: randint(int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2219
2203
 
@@ -2250,7 +2234,6 @@
2250
2234
  - func: randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)
2251
2235
 
2252
2236
  - func: randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
2253
- supports_named_tensor: True
2254
2237
 
2255
2238
  - func: randperm(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2256
2239
 
@@ -2274,32 +2257,27 @@
2274
2257
 
2275
2258
  - func: reciprocal(Tensor self) -> Tensor
2276
2259
  use_c10_dispatcher: full
2277
- supports_named_tensor: True
2278
2260
  variants: function, method
2279
2261
 
2280
2262
  - func: reciprocal_(Tensor(a!) self) -> Tensor(a!)
2281
- supports_named_tensor: True
2282
2263
  variants: function, method
2283
2264
 
2284
2265
  - func: reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2285
- supports_named_tensor: True
2286
2266
 
2287
2267
  - func: neg(Tensor self) -> Tensor
2288
2268
  use_c10_dispatcher: full
2289
- supports_named_tensor: True
2290
2269
  variants: function, method
2291
2270
 
2292
2271
  - func: neg_(Tensor(a!) self) -> Tensor(a!)
2293
- supports_named_tensor: True
2294
2272
  variants: function, method
2295
2273
 
2296
2274
  - func: neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2297
- supports_named_tensor: True
2298
2275
  dispatch:
2299
2276
  CPU: neg_out
2300
2277
  CUDA: neg_out
2301
2278
 
2302
2279
  - func: repeat(Tensor self, int[] repeats) -> Tensor
2280
+ use_c10_dispatcher: full
2303
2281
  variants: method # This is method-only to match the previous tensor API. In the future we could make this a function too.
2304
2282
 
2305
2283
  - func: repeat_interleave.Tensor(Tensor repeats) -> Tensor
@@ -2318,11 +2296,12 @@
2318
2296
  variants: function, method
2319
2297
 
2320
2298
  - func: reshape(Tensor self, int[] shape) -> Tensor
2299
+ use_c10_dispatcher: full
2321
2300
  variants: function, method
2322
2301
  device_guard: False
2323
- supports_named_tensor: True
2324
2302
 
2325
2303
  - func: _mkldnn_reshape(Tensor self, int[] shape) -> Tensor
2304
+ use_c10_dispatcher: full
2326
2305
  device_guard: False
2327
2306
  requires_tensor: True
2328
2307
  dispatch:
@@ -2335,15 +2314,12 @@
2335
2314
 
2336
2315
  - func: round(Tensor self) -> Tensor
2337
2316
  use_c10_dispatcher: full
2338
- supports_named_tensor: True
2339
2317
  variants: function, method
2340
2318
 
2341
2319
  - func: round_(Tensor(a!) self) -> Tensor(a!)
2342
- supports_named_tensor: True
2343
2320
  variants: function, method
2344
2321
 
2345
2322
  - func: round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2346
- supports_named_tensor: True
2347
2323
  dispatch:
2348
2324
  CPU: round_out
2349
2325
  CUDA: round_out
@@ -2360,10 +2336,8 @@
2360
2336
  CUDA: relu
2361
2337
  MkldnnCPU: mkldnn_relu
2362
2338
  QuantizedCPU: quantized_relu
2363
- supports_named_tensor: True
2364
2339
 
2365
2340
  - func: relu_(Tensor(a!) self) -> Tensor(a!)
2366
- supports_named_tensor: True
2367
2341
  variants: function, method
2368
2342
  dispatch:
2369
2343
  CPU: relu_
@@ -2379,6 +2353,7 @@
2379
2353
  CUDA: prelu_cuda
2380
2354
 
2381
2355
  - func: prelu_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)
2356
+ use_c10_dispatcher: full
2382
2357
  variants: function, method
2383
2358
  dispatch:
2384
2359
  CPU: prelu_backward_cpu
@@ -2408,15 +2383,12 @@
2408
2383
 
2409
2384
  - func: rsqrt(Tensor self) -> Tensor
2410
2385
  use_c10_dispatcher: full
2411
- supports_named_tensor: True
2412
2386
  variants: function, method
2413
2387
 
2414
2388
  - func: rsqrt_(Tensor(a!) self) -> Tensor(a!)
2415
- supports_named_tensor: True
2416
2389
  variants: function, method
2417
2390
 
2418
2391
  - func: rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2419
- supports_named_tensor: True
2420
2392
  dispatch:
2421
2393
  CPU: rsqrt_out
2422
2394
  CUDA: rsqrt_out
@@ -2424,12 +2396,11 @@
2424
2396
  - func: select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
2425
2397
  variants: function, method
2426
2398
  device_guard: False
2427
- supports_named_tensor: True
2428
2399
 
2429
2400
  - func: select.int(Tensor(a) self, int dim, int index) -> Tensor(a)
2401
+ use_c10_dispatcher: full
2430
2402
  variants: function, method
2431
2403
  device_guard: False
2432
- supports_named_tensor: True
2433
2404
 
2434
2405
  - func: selu(Tensor self) -> Tensor
2435
2406
  use_c10_dispatcher: full
@@ -2441,10 +2412,8 @@
2441
2412
 
2442
2413
  - func: celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)
2443
2414
 
2444
-
2445
2415
  - func: sigmoid(Tensor self) -> Tensor
2446
2416
  use_c10_dispatcher: full
2447
- supports_named_tensor: True
2448
2417
  variants: function, method
2449
2418
  dispatch:
2450
2419
  CPU: sigmoid
@@ -2453,7 +2422,6 @@
2453
2422
  MkldnnCPU: mkldnn_sigmoid
2454
2423
 
2455
2424
  - func: sigmoid_(Tensor(a!) self) -> Tensor(a!)
2456
- supports_named_tensor: True
2457
2425
  variants: function, method
2458
2426
  dispatch:
2459
2427
  CPU: sigmoid_
@@ -2461,34 +2429,27 @@
2461
2429
  MkldnnCPU: mkldnn_sigmoid_
2462
2430
 
2463
2431
  - func: sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2464
- supports_named_tensor: True
2465
2432
 
2466
2433
  - func: sin(Tensor self) -> Tensor
2467
2434
  use_c10_dispatcher: full
2468
- supports_named_tensor: True
2469
2435
  variants: function, method
2470
2436
 
2471
2437
  - func: sin_(Tensor(a!) self) -> Tensor(a!)
2472
- supports_named_tensor: True
2473
2438
  variants: function, method
2474
2439
 
2475
2440
  - func: sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2476
- supports_named_tensor: True
2477
2441
  dispatch:
2478
2442
  CPU: sin_out
2479
2443
  CUDA: sin_out
2480
2444
 
2481
2445
  - func: sinh(Tensor self) -> Tensor
2482
2446
  use_c10_dispatcher: full
2483
- supports_named_tensor: True
2484
2447
  variants: function, method
2485
2448
 
2486
2449
  - func: sinh_(Tensor(a!) self) -> Tensor(a!)
2487
- supports_named_tensor: True
2488
2450
  variants: function, method
2489
2451
 
2490
2452
  - func: sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2491
- supports_named_tensor: True
2492
2453
 
2493
2454
  # Returns a copy of this `Variable` that is detached from its autograd graph.
2494
2455
  # This method is OK to call if the `Variable` is a view.
@@ -2504,7 +2465,6 @@
2504
2465
  - func: detach(Tensor self) -> Tensor
2505
2466
  use_c10_dispatcher: full
2506
2467
  manual_kernel_registration: True
2507
- supports_named_tensor: True
2508
2468
  variants: function, method
2509
2469
 
2510
2470
  # Like `detach()`, but modifies this `Variable` in-place. This method may
@@ -2512,26 +2472,24 @@
2512
2472
  # this. If this `Variable` is a view, throws an `std::runtime_error()`.
2513
2473
  - func: detach_(Tensor(a!) self) -> Tensor(a!)
2514
2474
  manual_kernel_registration: True
2515
- supports_named_tensor: True
2516
2475
  variants: function, method
2517
2476
 
2518
2477
  - func: size.int(Tensor self, int dim) -> int
2519
2478
  use_c10_dispatcher: full
2520
2479
  variants: function, method
2521
2480
  device_guard: False
2522
- supports_named_tensor: True
2523
2481
 
2524
2482
  - func: size.Dimname(Tensor self, Dimname dim) -> int
2525
2483
  variants: function, method
2526
2484
  device_guard: False
2527
- supports_named_tensor: True
2528
2485
 
2529
2486
  - func: slice.Tensor(Tensor(a) self, int dim=0, int start=0, int end=9223372036854775807, int step=1) -> Tensor(a)
2487
+ use_c10_dispatcher: full
2530
2488
  variants: function, method
2531
2489
  device_guard: False
2532
- supports_named_tensor: True
2533
2490
 
2534
2491
  - func: slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)
2492
+ use_c10_dispatcher: full
2535
2493
  variants: function, method
2536
2494
 
2537
2495
  - func: smm(Tensor self, Tensor mat2) -> Tensor
@@ -2541,11 +2499,9 @@
2541
2499
  # softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models.
2542
2500
  - func: softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
2543
2501
  variants: function, method
2544
- supports_named_tensor: True
2545
2502
 
2546
2503
  - func: softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
2547
2504
  variants: function, method
2548
- supports_named_tensor: True
2549
2505
 
2550
2506
  - func: _softmax(Tensor self, int dim, bool half_to_float) -> Tensor
2551
2507
  use_c10_dispatcher: full
@@ -2561,27 +2517,26 @@
2561
2517
  CUDA: softmax_backward_cuda
2562
2518
 
2563
2519
  - func: split.Tensor(Tensor(a) self, int split_size, int dim=0) -> Tensor(a)[]
2520
+ use_c10_dispatcher: full
2564
2521
  variants: function, method
2565
2522
  device_guard: False
2566
- supports_named_tensor: True
2567
2523
 
2568
2524
  - func: split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]
2525
+ use_c10_dispatcher: full
2569
2526
  variants: function, method
2570
2527
  device_guard: False
2571
- supports_named_tensor: True
2572
2528
 
2573
2529
  - func: squeeze(Tensor(a) self) -> Tensor(a)
2574
- supports_named_tensor: True
2530
+ use_c10_dispatcher: full
2575
2531
  variants: function, method
2576
2532
  device_guard: False
2577
2533
 
2578
2534
  - func: squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)
2579
- supports_named_tensor: True
2535
+ use_c10_dispatcher: full
2580
2536
  variants: function, method
2581
2537
  device_guard: False
2582
2538
 
2583
2539
  - func: squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)
2584
- supports_named_tensor: True
2585
2540
  variants: function, method
2586
2541
  device_guard: False
2587
2542
 
@@ -2609,6 +2564,7 @@
2609
2564
  SparseCUDA: _sspaddmm_out_cuda
2610
2565
 
2611
2566
  - func: stack(Tensor[] tensors, int dim=0) -> Tensor
2567
+ use_c10_dispatcher: full
2612
2568
 
2613
2569
  - func: stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)
2614
2570
 
@@ -2619,114 +2575,95 @@
2619
2575
  - func: stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool onesided=True) -> Tensor
2620
2576
  variants: function, method
2621
2577
 
2578
+ - func: istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool onesided=True, int? length=None) -> Tensor
2579
+ variants: function, method
2580
+
2622
2581
  - func: stride.int(Tensor self, int dim) -> int
2623
2582
  use_c10_dispatcher: full
2624
2583
  variants: function, method
2625
2584
  device_guard: False
2626
- supports_named_tensor: True
2627
2585
 
2628
2586
  - func: stride.Dimname(Tensor self, Dimname dim) -> int
2629
2587
  variants: function, method
2630
2588
  device_guard: False
2631
- supports_named_tensor: True
2632
2589
 
2633
2590
  - func: sum(Tensor self, *, ScalarType? dtype=None) -> Tensor
2634
2591
  variants: function, method
2635
- supports_named_tensor: True
2636
2592
 
2637
2593
  - func: sum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
2638
2594
  variants: function, method
2639
- supports_named_tensor: True
2640
2595
 
2641
2596
  - func: sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
2642
2597
  variants: function, method
2643
- supports_named_tensor: True
2644
2598
 
2645
2599
  - func: sum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2646
- supports_named_tensor: True
2647
2600
 
2648
2601
  - func: sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2649
- supports_named_tensor: True
2650
2602
 
2651
2603
  - func: sum_to_size(Tensor self, int[] size) -> Tensor
2604
+ use_c10_dispatcher: full
2652
2605
  variants: method
2653
2606
  device_guard: False
2654
2607
 
2655
2608
  - func: sqrt(Tensor self) -> Tensor
2656
2609
  use_c10_dispatcher: full
2657
- supports_named_tensor: True
2658
2610
  variants: function, method
2659
2611
 
2660
2612
  - func: sqrt_(Tensor(a!) self) -> Tensor(a!)
2661
- supports_named_tensor: True
2662
2613
  variants: function, method
2663
2614
 
2664
2615
  - func: sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2665
- supports_named_tensor: True
2666
2616
 
2667
2617
  - func: square(Tensor self) -> Tensor
2668
2618
  use_c10_dispatcher: full
2669
- supports_named_tensor: True
2670
2619
  variants: function, method
2671
2620
 
2672
2621
  - func: square_(Tensor(a!) self) -> Tensor(a!)
2673
- supports_named_tensor: True
2674
2622
  variants: function, method
2675
2623
 
2676
2624
  - func: std(Tensor self, bool unbiased=True) -> Tensor
2677
2625
  use_c10_dispatcher: full
2678
2626
  variants: function, method
2679
- supports_named_tensor: True
2680
2627
 
2681
2628
  - func: std.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
2629
+ use_c10_dispatcher: full
2682
2630
  variants: function, method
2683
- supports_named_tensor: True
2684
2631
 
2685
2632
  - func: std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
2633
+ use_c10_dispatcher: full
2686
2634
  variants: function
2687
- supports_named_tensor: True
2688
2635
 
2689
2636
  - func: std_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
2637
+ use_c10_dispatcher: full
2690
2638
  variants: function
2691
- supports_named_tensor: True
2692
2639
 
2693
2640
  - func: std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
2694
2641
  variants: function
2695
- supports_named_tensor: True
2696
2642
 
2697
2643
  - func: std.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
2698
- supports_named_tensor: True
2699
2644
 
2700
2645
  - func: std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
2701
2646
  variants: function, method
2702
- supports_named_tensor: True
2703
2647
 
2704
2648
  - func: std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
2705
- supports_named_tensor: True
2706
2649
 
2707
2650
  - func: prod(Tensor self, *, ScalarType? dtype=None) -> Tensor
2708
2651
  variants: function, method
2709
- supports_named_tensor: True
2710
2652
 
2711
2653
  - func: prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
2712
2654
  variants: function, method
2713
- supports_named_tensor: True
2714
2655
 
2715
2656
  - func: prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2716
- supports_named_tensor: True
2717
2657
 
2718
2658
  - func: prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
2719
2659
  variants: function, method
2720
- supports_named_tensor: True
2721
2660
 
2722
2661
  - func: prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
2723
- supports_named_tensor: True
2724
-
2725
2662
 
2726
2663
  - func: t(Tensor(a) self) -> Tensor(a)
2664
+ use_c10_dispatcher: full
2727
2665
  device_guard: False
2728
2666
  variants: function, method
2729
- supports_named_tensor: True
2730
2667
 
2731
2668
  - func: t_(Tensor(a!) self) -> Tensor(a!)
2732
2669
  device_guard: False
@@ -2734,25 +2671,15 @@
2734
2671
 
2735
2672
  - func: tan(Tensor self) -> Tensor
2736
2673
  use_c10_dispatcher: full
2737
- supports_named_tensor: True
2738
2674
  variants: function, method
2739
2675
 
2740
2676
  - func: tan_(Tensor(a!) self) -> Tensor(a!)
2741
- supports_named_tensor: True
2742
2677
  variants: function, method
2743
- dispatch:
2744
- CPU: _tan__cpu
2745
- CUDA: _tan__cuda
2746
2678
 
2747
2679
  - func: tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2748
- supports_named_tensor: True
2749
- dispatch:
2750
- CPU: _tan_out_cpu
2751
- CUDA: _tan_out_cuda
2752
2680
 
2753
2681
  - func: tanh(Tensor self) -> Tensor
2754
2682
  use_c10_dispatcher: full
2755
- supports_named_tensor: True
2756
2683
  variants: function, method
2757
2684
  dispatch:
2758
2685
  CPU: tanh
@@ -2760,39 +2687,30 @@
2760
2687
  QuantizedCPU: quantized_tanh
2761
2688
 
2762
2689
  - func: tanh_(Tensor(a!) self) -> Tensor(a!)
2763
- supports_named_tensor: True
2764
2690
  variants: function, method
2765
- dispatch:
2766
- CPU: _tanh__cpu
2767
- CUDA: _tanh__cuda
2768
2691
 
2769
2692
  - func: tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2770
- supports_named_tensor: True
2771
- dispatch:
2772
- CPU: _tanh_out_cpu
2773
- CUDA: _tanh_out_cuda
2774
2693
 
2775
2694
  - func: tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor
2695
+ use_c10_dispatcher: full
2776
2696
  variants: function
2777
2697
 
2778
2698
  # TODO: namespace threshold in 'nn'
2779
2699
  - func: threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor
2780
2700
  use_c10_dispatcher: full
2781
2701
  variants: function
2782
- supports_named_tensor: True
2783
2702
  dispatch:
2784
2703
  CPU: threshold
2785
2704
  CUDA: threshold_cuda
2705
+ QuantizedCPU: quantized_threshold
2786
2706
 
2787
2707
  - func: threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)
2788
2708
  variants: function
2789
- supports_named_tensor: True
2790
2709
  dispatch:
2791
2710
  CPU: threshold_
2792
2711
  CUDA: threshold__cuda
2793
2712
 
2794
2713
  - func: threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)
2795
- supports_named_tensor: True
2796
2714
  dispatch:
2797
2715
  CPU: threshold_out
2798
2716
  CUDA: threshold_out_cuda
@@ -2805,14 +2723,13 @@
2805
2723
  CUDA: threshold_backward_cuda
2806
2724
 
2807
2725
  - func: transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
2726
+ use_c10_dispatcher: full
2808
2727
  variants: function, method
2809
2728
  device_guard: False
2810
- supports_named_tensor: True
2811
2729
 
2812
2730
  - func: transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
2813
2731
  variants: function, method
2814
2732
  device_guard: False
2815
- supports_named_tensor: True
2816
2733
 
2817
2734
  - func: _mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor
2818
2735
  use_c10_dispatcher: full
@@ -2837,12 +2754,22 @@
2837
2754
  variants: function
2838
2755
 
2839
2756
  - func: flip(Tensor self, int[] dims) -> Tensor
2757
+ use_c10_dispatcher: full
2840
2758
  variants: function, method
2841
2759
  dispatch:
2842
2760
  CPU: flip_cpu
2843
2761
  CUDA: flip_cuda
2844
2762
 
2763
+ - func: fliplr(Tensor self) -> Tensor
2764
+ use_c10_dispatcher: full
2765
+ variants: function, method
2766
+
2767
+ - func: flipud(Tensor self) -> Tensor
2768
+ use_c10_dispatcher: full
2769
+ variants: function, method
2770
+
2845
2771
  - func: roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor
2772
+ use_c10_dispatcher: full
2846
2773
  variants: function, method
2847
2774
  dispatch:
2848
2775
  CPU: roll_cpu
@@ -2851,6 +2778,7 @@
2851
2778
  # default int[] value [0,1] should not add space after comma, since native_parse.py uses ', ' to split args
2852
2779
 
2853
2780
  - func: rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor
2781
+ use_c10_dispatcher: full
2854
2782
  variants: function, method
2855
2783
 
2856
2784
  - func: trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor
@@ -2860,6 +2788,7 @@
2860
2788
  use_c10_dispatcher: full
2861
2789
 
2862
2790
  - func: _trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor
2791
+ use_c10_dispatcher: full
2863
2792
 
2864
2793
  - func: triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor
2865
2794
  use_c10_dispatcher: full
@@ -2872,7 +2801,6 @@
2872
2801
  CUDA: true_divide
2873
2802
  SparseCPU: true_divide_sparse
2874
2803
  SparseCUDA: true_divide_sparse
2875
- supports_named_tensor: True
2876
2804
 
2877
2805
  - func: true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
2878
2806
  variants: method
@@ -2881,7 +2809,6 @@
2881
2809
  CUDA: true_divide_
2882
2810
  SparseCPU: true_divide_sparse_
2883
2811
  SparseCUDA: true_divide_sparse_
2884
- supports_named_tensor: True
2885
2812
 
2886
2813
  - func: true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
2887
2814
  dispatch:
@@ -2889,28 +2816,22 @@
2889
2816
  CUDA: true_divide_out
2890
2817
  SparseCPU: true_divide_out_sparse_zerodim
2891
2818
  SparseCUDA: true_divide_out_sparse_zerodim
2892
- supports_named_tensor: True
2893
2819
 
2894
2820
  - func: true_divide.Scalar(Tensor self, Scalar other) -> Tensor
2895
2821
  use_c10_dispatcher: full
2896
2822
  variants: function, method
2897
- supports_named_tensor: True
2898
2823
 
2899
2824
  - func: true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
2900
2825
  variants: method
2901
- supports_named_tensor: True
2902
2826
 
2903
2827
  - func: trunc(Tensor self) -> Tensor
2904
2828
  use_c10_dispatcher: full
2905
- supports_named_tensor: True
2906
2829
  variants: function, method
2907
2830
 
2908
2831
  - func: trunc_(Tensor(a!) self) -> Tensor(a!)
2909
- supports_named_tensor: True
2910
2832
  variants: function, method
2911
2833
 
2912
2834
  - func: trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
2913
- supports_named_tensor: True
2914
2835
  dispatch:
2915
2836
  CPU: trunc_out
2916
2837
  CUDA: trunc_out
@@ -2924,24 +2845,28 @@
2924
2845
  variants: function
2925
2846
 
2926
2847
  - func: _unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)
2848
+ use_c10_dispatcher: full
2927
2849
  variants: function
2928
2850
  dispatch:
2929
2851
  CPU: _unique_cpu
2930
2852
  CUDA: _unique_cuda
2931
2853
 
2932
2854
  - func: unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
2855
+ use_c10_dispatcher: full
2933
2856
  variants: function
2934
2857
  dispatch:
2935
2858
  CPU: unique_dim_cpu
2936
2859
  CUDA: unique_dim_cuda
2937
2860
 
2938
2861
  - func: unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)
2862
+ use_c10_dispatcher: full
2939
2863
  variants: function
2940
2864
  dispatch:
2941
2865
  CPU: unique_consecutive_cpu
2942
2866
  CUDA: unique_consecutive_cuda
2943
2867
 
2944
2868
  - func: unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
2869
+ use_c10_dispatcher: full
2945
2870
  variants: function
2946
2871
  dispatch:
2947
2872
  CPU: unique_dim_consecutive_cpu
@@ -2952,14 +2877,17 @@
2952
2877
  # Please don't rely on these two operators, they will be removed soon
2953
2878
 
2954
2879
  - func: _unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)
2880
+ use_c10_dispatcher: full
2955
2881
  variants: function
2956
2882
  dispatch:
2957
2883
  CPU: _unique2_cpu
2958
2884
  CUDA: _unique2_cuda
2959
2885
 
2960
2886
  - func: _unsafe_view(Tensor self, int[] size) -> Tensor
2887
+ use_c10_dispatcher: full
2961
2888
 
2962
2889
  - func: unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
2890
+ use_c10_dispatcher: full
2963
2891
  variants: function, method
2964
2892
  device_guard: False
2965
2893
 
@@ -2967,36 +2895,34 @@
2967
2895
  variants: method
2968
2896
  device_guard: False
2969
2897
 
2898
+ - func: vander(Tensor x, int? N=None, bool increasing=False) -> Tensor
2899
+ use_c10_dispatcher: full
2900
+
2970
2901
  - func: var(Tensor self, bool unbiased=True) -> Tensor
2971
2902
  use_c10_dispatcher: full
2972
2903
  variants: function, method
2973
- supports_named_tensor: True
2974
2904
 
2975
2905
  - func: var.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
2906
+ use_c10_dispatcher: full
2976
2907
  variants: function, method
2977
- supports_named_tensor: True
2978
2908
 
2979
2909
  - func: var.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
2980
- supports_named_tensor: True
2981
2910
 
2982
2911
  - func: var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
2983
2912
  variants: function, method
2984
- supports_named_tensor: True
2985
2913
 
2986
2914
  - func: var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
2987
- supports_named_tensor: True
2988
2915
 
2989
2916
  - func: var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
2917
+ use_c10_dispatcher: full
2990
2918
  variants: function
2991
- supports_named_tensor: True
2992
2919
 
2993
2920
  - func: var_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
2921
+ use_c10_dispatcher: full
2994
2922
  variants: function
2995
- supports_named_tensor: True
2996
2923
 
2997
2924
  - func: var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
2998
2925
  variants: function
2999
- supports_named_tensor: True
3000
2926
 
3001
2927
  - func: view_as(Tensor self, Tensor other) -> Tensor
3002
2928
  use_c10_dispatcher: full
@@ -3011,6 +2937,7 @@
3011
2937
  variants: function, method
3012
2938
 
3013
2939
  - func: where(Tensor condition) -> Tensor[]
2940
+ use_c10_dispatcher: full
3014
2941
  variants: function
3015
2942
 
3016
2943
  - func: _s_where(Tensor condition, Tensor self, Tensor other) -> Tensor
@@ -3018,6 +2945,7 @@
3018
2945
  variants: function
3019
2946
 
3020
2947
  - func: norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor
2948
+ use_c10_dispatcher: full
3021
2949
  variants: function
3022
2950
 
3023
2951
  # VariableType::_weight_norm does not want to be given a gap in the autograd graph,
@@ -3027,16 +2955,19 @@
3027
2955
  variants: function
3028
2956
 
3029
2957
  - func: _weight_norm_cuda_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)
2958
+ use_c10_dispatcher: full
3030
2959
  variants: function
3031
2960
  dispatch:
3032
2961
  CUDA: weight_norm_cuda
3033
2962
 
3034
2963
  - func: _weight_norm_cuda_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
2964
+ use_c10_dispatcher: full
3035
2965
  variants: function
3036
2966
  dispatch:
3037
2967
  CUDA: weight_norm_cuda_backward
3038
2968
 
3039
2969
  - func: _weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)
2970
+ use_c10_dispatcher: full
3040
2971
  variants: function
3041
2972
 
3042
2973
  - func: zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
@@ -3047,7 +2978,6 @@
3047
2978
  - func: zeros.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)
3048
2979
 
3049
2980
  - func: zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
3050
- supports_named_tensor: True
3051
2981
 
3052
2982
  - func: _standard_gamma_grad(Tensor self, Tensor output) -> Tensor
3053
2983
  use_c10_dispatcher: full
@@ -3079,6 +3009,11 @@
3079
3009
  CPU: _s_poisson_cpu
3080
3010
  CUDA: _s_poisson_cuda
3081
3011
 
3012
+ - func: binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor
3013
+ dispatch:
3014
+ CPU: _s_binomial_cpu
3015
+ CUDA: _s_binomial_cuda
3016
+
3082
3017
  # When more variants get ported to native, this dispatch will get more
3083
3018
  # complicated
3084
3019
 
@@ -3095,14 +3030,46 @@
3095
3030
  - func: _sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor
3096
3031
 
3097
3032
  - func: _sparse_sum.dim(Tensor self, int[1] dim) -> Tensor
3033
+ use_c10_dispatcher: full
3098
3034
 
3099
3035
  - func: _sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor
3100
3036
 
3101
3037
  - func: _sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor
3038
+ use_c10_dispatcher: full
3102
3039
  dispatch:
3103
3040
  SparseCPU: _sparse_sum_backward_cpu
3104
3041
  SparseCUDA: _sparse_sum_backward_cuda
3105
3042
 
3043
+ - func: _sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
3044
+ variants: function
3045
+
3046
+ - func: _sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
3047
+ variants: function
3048
+
3049
+ - func: _sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
3050
+ use_c10_dispatcher: full
3051
+ dispatch:
3052
+ SparseCPU: softmax_sparse_cpu
3053
+
3054
+ - func: _sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
3055
+ dispatch:
3056
+ SparseCPU: softmax_backward_sparse_cpu
3057
+
3058
+ - func: _sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
3059
+ variants: function
3060
+
3061
+ - func: _sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
3062
+ variants: function
3063
+
3064
+ - func: _sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
3065
+ use_c10_dispatcher: full
3066
+ dispatch:
3067
+ SparseCPU: log_softmax_sparse_cpu
3068
+
3069
+ - func: _sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor
3070
+ dispatch:
3071
+ SparseCPU: log_softmax_backward_sparse_cpu
3072
+
3106
3073
  - func: norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor
3107
3074
  variants: function, method
3108
3075
 
@@ -3114,6 +3081,7 @@
3114
3081
  variants: function, method
3115
3082
 
3116
3083
  - func: norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor
3084
+ use_c10_dispatcher: full
3117
3085
  variants: function, method
3118
3086
 
3119
3087
  - func: norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)
@@ -3135,6 +3103,7 @@
3135
3103
  variants: function
3136
3104
 
3137
3105
  - func: frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor
3106
+ use_c10_dispatcher: full
3138
3107
  variants: function
3139
3108
 
3140
3109
  - func: frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
@@ -3148,6 +3117,7 @@
3148
3117
  variants: function
3149
3118
 
3150
3119
  - func: nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor
3120
+ use_c10_dispatcher: full
3151
3121
  variants: function
3152
3122
 
3153
3123
  - func: nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
@@ -3162,15 +3132,13 @@
3162
3132
  SparseCUDA: clone_sparse
3163
3133
  MkldnnCPU: mkldnn_clone
3164
3134
  QuantizedCPU: quantized_clone
3165
- supports_named_tensor: True
3135
+ QuantizedCUDA: quantized_clone
3166
3136
 
3167
3137
  - func: resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)
3168
3138
  manual_kernel_registration: True
3169
- supports_named_tensor: True
3170
3139
  variants: function, method
3171
3140
 
3172
3141
  - func: pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)
3173
- supports_named_tensor: True
3174
3142
  dispatch:
3175
3143
  CPU: pow_out
3176
3144
  CUDA: pow_out
@@ -3180,7 +3148,6 @@
3180
3148
  - func: pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor
3181
3149
  use_c10_dispatcher: full
3182
3150
  variants: function, method
3183
- supports_named_tensor: True
3184
3151
  dispatch:
3185
3152
  CPU: pow
3186
3153
  CUDA: pow
@@ -3188,7 +3155,6 @@
3188
3155
  SparseCUDA: pow_sparse_scalar
3189
3156
 
3190
3157
  - func: zero_(Tensor(a!) self) -> Tensor(a!)
3191
- supports_named_tensor: True
3192
3158
  variants: method, function
3193
3159
  dispatch:
3194
3160
  CPU: zero_
@@ -3203,7 +3169,6 @@
3203
3169
  CUDA: sub_out
3204
3170
  SparseCPU: sub_out_sparse
3205
3171
  SparseCUDA: sub_out_sparse
3206
- supports_named_tensor: True
3207
3172
 
3208
3173
  - func: sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
3209
3174
  use_c10_dispatcher: full
@@ -3213,7 +3178,6 @@
3213
3178
  CUDA: sub
3214
3179
  SparseCPU: sub_sparse
3215
3180
  SparseCUDA: sub_sparse
3216
- supports_named_tensor: True
3217
3181
 
3218
3182
  - func: sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
3219
3183
  variants: method
@@ -3222,64 +3186,55 @@
3222
3186
  CUDA: sub_
3223
3187
  SparseCPU: sub_sparse_
3224
3188
  SparseCUDA: sub_sparse_
3225
- supports_named_tensor: True
3226
3189
 
3227
3190
  # For C++ only, until we have conversion from C++ numbers to Tensor
3228
3191
  - func: sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
3229
3192
  use_c10_dispatcher: full
3230
3193
  variants: function, method
3231
- supports_named_tensor: True
3232
3194
 
3233
3195
  - func: sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
3234
3196
  variants: method
3235
- supports_named_tensor: True
3236
3197
 
3237
3198
  - func: rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
3238
3199
  use_c10_dispatcher: full
3239
3200
  variants: function
3240
- supports_named_tensor: True
3241
3201
 
3242
3202
  # For C++ only, until we have conversion from C++ numbers to Tensor
3243
3203
  - func: rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
3244
3204
  use_c10_dispatcher: full
3245
3205
  variants: function
3246
- supports_named_tensor: True
3247
3206
 
3248
3207
  # Functionally the same as addmm, but we give it a different derivative formula
3249
3208
  # that doesn't propagate gradients to non-present entries on sparse.
3250
3209
  - func: _sparse_addmm(Tensor self, Tensor sparse, Tensor dense, *, Scalar beta=1, Scalar alpha=1) -> Tensor
3251
3210
  use_c10_dispatcher: full
3252
- named_guard: False
3253
3211
 
3254
3212
  - func: addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
3255
3213
  dispatch:
3256
- CPU: legacy::cpu::_th_addmm_out
3257
- CUDA: legacy::cuda::_th_addmm_out
3214
+ CPU: addmm_cpu_out
3215
+ CUDA: addmm_out_cuda
3258
3216
  SparseCPU: addmm_out_sparse_dense_cpu
3259
3217
  SparseCUDA: addmm_out_sparse_dense_cuda
3260
- supports_named_tensor: True
3261
3218
 
3262
3219
  - func: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
3263
3220
  use_c10_dispatcher: full
3264
3221
  variants: function, method
3265
3222
  dispatch:
3266
- CPU: legacy::cpu::_th_addmm
3267
- CUDA: legacy::cuda::_th_addmm
3223
+ CPU: addmm_cpu
3224
+ CUDA: addmm_cuda
3268
3225
  SparseCPU: addmm_sparse_dense_cpu
3269
3226
  SparseCUDA: addmm_sparse_dense_cuda
3270
- supports_named_tensor: True
3227
+ Vulkan: vulkan_addmm
3271
3228
 
3272
3229
  - func: addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
3273
3230
  variants: method
3274
3231
  dispatch:
3275
3232
  CPU: legacy::cpu::_th_addmm_
3276
- CUDA: legacy::cuda::_th_addmm_
3233
+ CUDA: addmm__cuda
3277
3234
  # Warning! For whatever reason, the inplace sparse addmm is NON
3278
3235
  # broadcasting
3279
3236
  SparseCPU: s_addmm_sparse_dense_cpu_
3280
3237
  SparseCUDA: s_addmm_sparse_dense_cuda_
3281
- supports_named_tensor: True
3282
-
3283
3238
 
3284
3239
  # NOTE [ Sparse: autograd and API ]
3285
3240
  #
@@ -3396,7 +3351,6 @@
3396
3351
  # shared. In other words, their outputs are non-differentiable views of the
3397
3352
  # sparse tensor.
3398
3353
 
3399
-
3400
3354
  # FIXME: would be nicer if TensorOptions was optional based; not adding default arguments for options given
3401
3355
  # the default would never make sense.
3402
3356
  - func: sparse_coo_tensor.size(int[] size, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor
@@ -3433,7 +3387,6 @@
3433
3387
  SparseCUDA: sparse_resize_and_clear_
3434
3388
  requires_tensor: True
3435
3389
 
3436
-
3437
3390
  - func: sparse_mask(Tensor self, Tensor mask) -> Tensor
3438
3391
  use_c10_dispatcher: full
3439
3392
  variants: method
@@ -3442,7 +3395,6 @@
3442
3395
  SparseCUDA: sparse_mask_cuda
3443
3396
  requires_tensor: True
3444
3397
 
3445
-
3446
3398
  - func: to_dense(Tensor self) -> Tensor
3447
3399
  use_c10_dispatcher: full
3448
3400
  variants: method
@@ -3474,7 +3426,6 @@
3474
3426
  requires_tensor: True
3475
3427
  device_guard: False
3476
3428
 
3477
-
3478
3429
  - func: dense_dim(Tensor self) -> int
3479
3430
  use_c10_dispatcher: full
3480
3431
  variants: method
@@ -3494,7 +3445,6 @@
3494
3445
  requires_tensor: True
3495
3446
  device_guard: False
3496
3447
 
3497
-
3498
3448
  - func: _nnz(Tensor self) -> int
3499
3449
  use_c10_dispatcher: full
3500
3450
  variants: method
@@ -3504,7 +3454,6 @@
3504
3454
  requires_tensor: True
3505
3455
  device_guard: False
3506
3456
 
3507
-
3508
3457
  - func: coalesce(Tensor self) -> Tensor
3509
3458
  use_c10_dispatcher: full
3510
3459
  variants: method
@@ -3513,7 +3462,6 @@
3513
3462
  SparseCUDA: coalesce_sparse_cuda
3514
3463
  requires_tensor: True
3515
3464
 
3516
-
3517
3465
  - func: is_coalesced(Tensor self) -> bool
3518
3466
  use_c10_dispatcher: full
3519
3467
  variants: method
@@ -3522,10 +3470,9 @@
3522
3470
  SparseCUDA: is_coalesced_sparse
3523
3471
  requires_tensor: True
3524
3472
  device_guard: False
3525
- supports_named_tensor: True
3526
-
3527
3473
 
3528
3474
  - func: _indices(Tensor(a) self) -> Tensor(a)
3475
+ use_c10_dispatcher: full
3529
3476
  variants: method
3530
3477
  dispatch:
3531
3478
  SparseCPU: _indices_sparse
@@ -3534,6 +3481,7 @@
3534
3481
  device_guard: False
3535
3482
 
3536
3483
  - func: _values(Tensor(a) self) -> Tensor(a)
3484
+ use_c10_dispatcher: full
3537
3485
  variants: method
3538
3486
  dispatch:
3539
3487
  SparseCPU: _values_sparse
@@ -3553,6 +3501,7 @@
3553
3501
  device_guard: False
3554
3502
 
3555
3503
  - func: indices(Tensor(a) self) -> Tensor(a)
3504
+ use_c10_dispatcher: full
3556
3505
  variants: method
3557
3506
  dispatch:
3558
3507
  SparseCPU: indices_sparse
@@ -3561,6 +3510,7 @@
3561
3510
  device_guard: False
3562
3511
 
3563
3512
  - func: values(Tensor(a) self) -> Tensor(a)
3513
+ use_c10_dispatcher: full
3564
3514
  variants: method
3565
3515
  dispatch:
3566
3516
  SparseCPU: values_sparse
@@ -3568,7 +3518,6 @@
3568
3518
  requires_tensor: True
3569
3519
  device_guard: False
3570
3520
 
3571
-
3572
3521
  - func: hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)
3573
3522
  dispatch:
3574
3523
  SparseCPU: hspmm_out_sparse_cpu
@@ -3590,12 +3539,11 @@
3590
3539
  requires_tensor: True
3591
3540
 
3592
3541
  - func: unbind.int(Tensor(a) self, int dim=0) -> Tensor(a)[]
3542
+ use_c10_dispatcher: full
3593
3543
  variants: function, method
3594
- supports_named_tensor: True
3595
3544
 
3596
3545
  - func: unbind.Dimname(Tensor(a) self, Dimname dim) -> Tensor(a)[]
3597
3546
  variants: function, method
3598
- supports_named_tensor: True
3599
3547
 
3600
3548
  - func: to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor
3601
3549
  use_c10_dispatcher: full
@@ -3618,6 +3566,7 @@
3618
3566
  CPU: dense_to_mkldnn
3619
3567
 
3620
3568
  - func: mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1) -> Tensor
3569
+ use_c10_dispatcher: full
3621
3570
  variants: function
3622
3571
  python_module: nn
3623
3572
  dispatch:
@@ -3629,42 +3578,60 @@
3629
3578
  - func: quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor
3630
3579
  variants: function
3631
3580
  dispatch:
3632
- CPU: quantize_per_tensor_cpu
3581
+ CPU: quantize_per_tensor
3582
+ CUDA: quantize_per_tensor
3583
+
3584
+ - func: quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]
3585
+ variants: function
3586
+ dispatch:
3587
+ CPU: quantize_per_tensor_list_cpu
3633
3588
 
3634
3589
  - func: quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor
3635
3590
  variants: function
3636
3591
  dispatch:
3637
3592
  CPU: quantize_per_channel_cpu
3638
3593
 
3639
- - func: dequantize(Tensor self) -> Tensor
3594
+ - func: dequantize.self(Tensor self) -> Tensor
3640
3595
  use_c10_dispatcher: full
3641
3596
  variants: function, method
3642
3597
  dispatch:
3643
- QuantizedCPU: dequantize_quant
3598
+ QuantizedCPU: dequantize_quant
3599
+ QuantizedCUDA: dequantize_quant
3600
+
3601
+ - func: dequantize.tensors(Tensor[] tensors) -> Tensor[]
3602
+ use_c10_dispatcher: full
3603
+ variants: function
3604
+ dispatch:
3605
+ QuantizedCPU: dequantize_tensors_quant
3644
3606
 
3645
3607
  - func: q_scale(Tensor self) -> float
3646
3608
  use_c10_dispatcher: full
3647
3609
  variants: function, method
3648
3610
  dispatch:
3649
3611
  QuantizedCPU: q_scale_quant
3612
+ QuantizedCUDA: q_scale_quant
3650
3613
 
3651
3614
  - func: q_zero_point(Tensor self) -> int
3652
3615
  use_c10_dispatcher: full
3653
3616
  variants: function, method
3654
3617
  dispatch:
3655
3618
  QuantizedCPU: q_zero_point_quant
3619
+ QuantizedCUDA: q_zero_point_quant
3656
3620
 
3657
3621
  - func: q_per_channel_scales(Tensor self) -> Tensor
3622
+ use_c10_dispatcher: full
3658
3623
  variants: function, method
3659
3624
  dispatch:
3660
3625
  QuantizedCPU: q_per_channel_scales_quant
3661
3626
 
3662
3627
  - func: q_per_channel_zero_points(Tensor self) -> Tensor
3628
+ use_c10_dispatcher: full
3663
3629
  variants: function, method
3664
3630
  dispatch:
3665
3631
  QuantizedCPU: q_per_channel_zero_points_quant
3666
3632
 
3667
3633
  - func: q_per_channel_axis(Tensor self) -> int
3634
+ use_c10_dispatcher: full
3668
3635
  variants: function, method
3669
3636
  dispatch:
3670
3637
  QuantizedCPU: q_per_channel_axis_quant
@@ -3673,14 +3640,17 @@
3673
3640
  use_c10_dispatcher: full
3674
3641
  variants: function, method
3675
3642
  dispatch:
3676
- QuantizedCPU: int_repr_quant
3643
+ QuantizedCPU: int_repr_quant_cpu
3644
+ QuantizedCUDA: int_repr_quant_cuda
3677
3645
 
3678
3646
  - func: _make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor
3679
3647
  use_c10_dispatcher: full
3680
3648
  dispatch:
3681
3649
  CPU: make_per_tensor_quantized_tensor_cpu
3650
+ CUDA: make_per_tensor_quantized_tensor_cuda
3682
3651
 
3683
3652
  - func: _make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor
3653
+ use_c10_dispatcher: full
3684
3654
  dispatch:
3685
3655
  CPU: make_per_channel_quantized_tensor_cpu
3686
3656
 
@@ -3689,6 +3659,7 @@
3689
3659
  variants: method
3690
3660
  dispatch:
3691
3661
  QuantizedCPU: qscheme_quant
3662
+ QuantizedCUDA: qscheme_quant
3692
3663
 
3693
3664
  - func: fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor
3694
3665
  use_c10_dispatcher: full
@@ -3706,31 +3677,34 @@
3706
3677
  use_c10_dispatcher: full
3707
3678
  variants: function
3708
3679
 
3680
+ - func: _choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)
3681
+ use_c10_dispatcher: full
3682
+ variants: function
3683
+
3709
3684
  # to(Device) must not exist because all constructors of Device also works for
3710
3685
  # TensorOptions. Otherwise, an ambiguity error is thrown.
3711
3686
  # See NOTE [ TensorOptions Constructors ].
3712
3687
  - func: to.dtype_layout(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor
3713
3688
  variants: method
3714
3689
  device_guard: False
3715
- supports_named_tensor: True
3716
3690
 
3717
3691
  - func: to.device(Tensor self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor
3718
3692
  variants: method
3719
3693
  device_guard: False
3720
- supports_named_tensor: True
3721
3694
 
3722
3695
  - func: to.dtype(Tensor self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor
3723
3696
  variants: method
3724
3697
  device_guard: False
3725
- supports_named_tensor: True
3726
3698
 
3727
3699
  - func: to.other(Tensor self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor
3728
3700
  variants: method
3729
3701
  device_guard: False
3730
3702
 
3731
3703
  - func: meshgrid(Tensor[] tensors) -> Tensor[]
3704
+ use_c10_dispatcher: full
3732
3705
 
3733
3706
  - func: cartesian_prod(Tensor[] tensors) -> Tensor
3707
+ use_c10_dispatcher: full
3734
3708
  variants: function
3735
3709
 
3736
3710
  - func: combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
@@ -3740,7 +3714,6 @@
3740
3714
  - func: item(Tensor self) -> Scalar
3741
3715
  use_c10_dispatcher: full
3742
3716
  variants: method
3743
- supports_named_tensor: True
3744
3717
 
3745
3718
  - func: result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType
3746
3719
  variants: function
@@ -3766,7 +3739,6 @@
3766
3739
  CPU: _local_scalar_dense_cpu
3767
3740
  CUDA: _local_scalar_dense_cuda
3768
3741
  variants: function
3769
- supports_named_tensor: True
3770
3742
 
3771
3743
  # Fused RNN kernels
3772
3744
  - func: _thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)
@@ -3784,6 +3756,7 @@
3784
3756
  CUDA: _thnn_fused_gru_cell_cuda
3785
3757
 
3786
3758
  - func: _thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
3759
+ use_c10_dispatcher: full
3787
3760
  dispatch:
3788
3761
  CUDA: _thnn_fused_gru_cell_backward_cuda
3789
3762
 
@@ -3791,20 +3764,28 @@
3791
3764
 
3792
3765
  # RNN cells and layers
3793
3766
  - func: lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
3767
+ use_c10_dispatcher: full
3794
3768
 
3795
3769
  - func: lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)
3770
+ use_c10_dispatcher: full
3796
3771
 
3797
3772
  - func: gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
3773
+ use_c10_dispatcher: full
3798
3774
 
3799
3775
  - func: gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
3776
+ use_c10_dispatcher: full
3800
3777
 
3801
3778
  - func: rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
3779
+ use_c10_dispatcher: full
3802
3780
 
3803
3781
  - func: rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
3782
+ use_c10_dispatcher: full
3804
3783
 
3805
3784
  - func: rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
3785
+ use_c10_dispatcher: full
3806
3786
 
3807
3787
  - func: rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
3788
+ use_c10_dispatcher: full
3808
3789
 
3809
3790
  - func: lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)
3810
3791
 
@@ -3814,19 +3795,24 @@
3814
3795
 
3815
3796
  - func: rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor
3816
3797
 
3798
+ # Quantized RNN layer registration has been moved to C10 dispatch in `RNN.cpp`
3799
+
3817
3800
  # Quantized RNN layers
3818
- - func: quantized_lstm(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)
3801
+ # - func: quantized_lstm(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)
3819
3802
 
3820
- - func: quantized_lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)
3803
+ # - func: quantized_lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)
3821
3804
 
3822
3805
  # Quantized GRU layers
3823
3806
 
3824
- - func: quantized_gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
3807
+ # - func: quantized_gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
3808
+ # use_c10_dispatcher: full
3825
3809
 
3826
- - func: quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
3810
+ # - func: quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
3811
+ # use_c10_dispatcher: full
3827
3812
 
3828
3813
  # Quantized RNN cells
3829
3814
  - func: quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)
3815
+ use_c10_dispatcher: full
3830
3816
 
3831
3817
  - func: quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor
3832
3818
  use_c10_dispatcher: full
@@ -3839,10 +3825,13 @@
3839
3825
 
3840
3826
  # PackedSequence utilities
3841
3827
  - func: _pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)
3828
+ use_c10_dispatcher: full
3842
3829
 
3843
3830
  - func: _pack_padded_sequence_backward(Tensor grad, int[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor
3831
+ use_c10_dispatcher: full
3844
3832
 
3845
3833
  - func: _pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
3834
+ use_c10_dispatcher: full
3846
3835
 
3847
3836
  # wrappers for legacy TH methods
3848
3837
 
@@ -3857,9 +3846,10 @@
3857
3846
  variants: method
3858
3847
  device_guard: False
3859
3848
  dispatch:
3860
- CPU: legacy::cpu::_th_set_
3861
- CUDA: legacy::cuda::_th_set_
3862
- QuantizedCPU: set_storage
3849
+ CPU: set_storage_cpu_
3850
+ CUDA: set_storage_cuda_
3851
+ QuantizedCPU: set_storage_quantized_
3852
+ QuantizedCUDA: set_storage_quantized_
3863
3853
 
3864
3854
  - func: set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)
3865
3855
  variants: method
@@ -3878,6 +3868,7 @@
3878
3868
  variants: method
3879
3869
  dispatch:
3880
3870
  QuantizedCPU: set_quantizer_
3871
+ QuantizedCUDA: set_quantizer_
3881
3872
 
3882
3873
  - func: is_set_to(Tensor self, Tensor tensor) -> bool
3883
3874
  use_c10_dispatcher: full
@@ -3892,24 +3883,20 @@
3892
3883
  dispatch:
3893
3884
  CPU: masked_fill__cpu
3894
3885
  CUDA: masked_fill__cuda
3895
- supports_named_tensor: True
3896
3886
 
3897
3887
  - func: masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
3898
3888
  use_c10_dispatcher: full
3899
3889
  variants: function, method
3900
- supports_named_tensor: True
3901
3890
 
3902
3891
  - func: masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)
3903
3892
  variants: method
3904
3893
  dispatch:
3905
3894
  CPU: masked_fill__cpu
3906
3895
  CUDA: masked_fill__cuda
3907
- supports_named_tensor: True
3908
3896
 
3909
3897
  - func: masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor
3910
3898
  use_c10_dispatcher: full
3911
3899
  variants: function, method
3912
- supports_named_tensor: True
3913
3900
 
3914
3901
  - func: masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)
3915
3902
  variants: method
@@ -3922,6 +3909,7 @@
3922
3909
  variants: function, method
3923
3910
 
3924
3911
  - func: view(Tensor(a) self, int[] size) -> Tensor(a)
3912
+ use_c10_dispatcher: full
3925
3913
  variants: method
3926
3914
  device_guard: False
3927
3915
  dispatch:
@@ -3929,6 +3917,7 @@
3929
3917
  CUDA: view
3930
3918
  MkldnnCPU: mkldnn_view
3931
3919
  QuantizedCPU: view
3920
+ QuantizedCUDA: view
3932
3921
 
3933
3922
  - func: put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)
3934
3923
  variants: method
@@ -3951,14 +3940,12 @@
3951
3940
 
3952
3941
  - func: index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
3953
3942
  variants: method
3954
- supports_named_tensor: True
3955
3943
  dispatch:
3956
3944
  CPU: legacy::cpu::_th_index_fill_
3957
3945
  CUDA: legacy::cuda::_th_index_fill_
3958
3946
 
3959
3947
  - func: index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
3960
3948
  use_c10_dispatcher: full
3961
- supports_named_tensor: True
3962
3949
  variants: function, method
3963
3950
 
3964
3951
  - func: index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
@@ -3966,34 +3953,28 @@
3966
3953
  dispatch:
3967
3954
  CPU: index_fill_
3968
3955
  CUDA: index_fill_
3969
- supports_named_tensor: True
3970
3956
 
3971
3957
  - func: index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
3972
3958
  use_c10_dispatcher: full
3973
3959
  variants: function, method
3974
- supports_named_tensor: True
3975
3960
 
3976
3961
  - func: index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
3977
3962
  variants: method
3978
- supports_named_tensor: True
3979
3963
 
3980
3964
  - func: index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
3981
3965
  variants: method
3982
- supports_named_tensor: True
3983
3966
 
3984
3967
  - func: index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
3985
3968
  variants: function, method
3986
- supports_named_tensor: True
3987
3969
 
3988
3970
  - func: index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
3989
3971
  variants: function, method
3990
- supports_named_tensor: True
3991
3972
 
3992
3973
  - func: scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
3993
3974
  variants: method
3994
3975
  dispatch:
3995
- CPU: scatter_cpu_
3996
- CUDA: legacy::cuda::_th_scatter_
3976
+ CPU: scatter_
3977
+ CUDA: scatter_
3997
3978
 
3998
3979
  - func: scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
3999
3980
  use_c10_dispatcher: full
@@ -4002,8 +3983,8 @@
4002
3983
  - func: scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
4003
3984
  variants: method
4004
3985
  dispatch:
4005
- CPU: scatter_fill_cpu_
4006
- CUDA: legacy::cuda::_th_scatter_
3986
+ CPU: scatter_fill_
3987
+ CUDA: scatter_fill_
4007
3988
 
4008
3989
  - func: scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
4009
3990
  use_c10_dispatcher: full
@@ -4018,8 +3999,8 @@
4018
3999
  - func: scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)
4019
4000
  variants: method
4020
4001
  dispatch:
4021
- CPU: scatter_add_cpu_
4022
- CUDA: legacy::cuda::_th_scatter_add_
4002
+ CPU: scatter_add_
4003
+ CUDA: scatter_add_
4023
4004
 
4024
4005
  - func: scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor
4025
4006
  use_c10_dispatcher: full
@@ -4077,9 +4058,11 @@
4077
4058
  CUDA: bitwise_and_out
4078
4059
 
4079
4060
  - func: bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor
4061
+ use_c10_dispatcher: full
4080
4062
  variants: method, function
4081
4063
 
4082
4064
  - func: bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor
4065
+ use_c10_dispatcher: full
4083
4066
  variants: method, function
4084
4067
 
4085
4068
  - func: bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
@@ -4115,9 +4098,11 @@
4115
4098
  CUDA: bitwise_or_out
4116
4099
 
4117
4100
  - func: bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor
4101
+ use_c10_dispatcher: full
4118
4102
  variants: method, function
4119
4103
 
4120
4104
  - func: bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor
4105
+ use_c10_dispatcher: full
4121
4106
  variants: method, function
4122
4107
 
4123
4108
  - func: bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
@@ -4153,9 +4138,11 @@
4153
4138
  CUDA: bitwise_xor_out
4154
4139
 
4155
4140
  - func: bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor
4141
+ use_c10_dispatcher: full
4156
4142
  variants: method, function
4157
4143
 
4158
4144
  - func: bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor
4145
+ use_c10_dispatcher: full
4159
4146
  variants: method, function
4160
4147
 
4161
4148
  - func: bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
@@ -4231,14 +4218,12 @@
4231
4218
  CUDA: __irshift__
4232
4219
 
4233
4220
  - func: lgamma_(Tensor(a!) self) -> Tensor(a!)
4234
- supports_named_tensor: True
4235
4221
  variants: method
4236
4222
  dispatch:
4237
4223
  CPU: _lgamma__cpu
4238
4224
  CUDA: _lgamma__cuda
4239
4225
 
4240
4226
  - func: atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)
4241
- supports_named_tensor: True
4242
4227
  variants: method
4243
4228
 
4244
4229
  - func: tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)
@@ -4254,11 +4239,9 @@
4254
4239
  CUDA: triu_cuda_
4255
4240
 
4256
4241
  - func: digamma_(Tensor(a!) self) -> Tensor(a!)
4257
- supports_named_tensor: True
4258
4242
  variants: method
4259
4243
 
4260
4244
  - func: polygamma_(Tensor(a!) self, int n) -> Tensor(a!)
4261
- supports_named_tensor: True
4262
4245
  variants: method
4263
4246
 
4264
4247
  - func: renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
@@ -4268,14 +4251,12 @@
4268
4251
  CUDA: legacy::cuda::_th_renorm_
4269
4252
 
4270
4253
  - func: pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
4271
- supports_named_tensor: True
4272
4254
  variants: method
4273
4255
  dispatch:
4274
4256
  CPU: pow_
4275
4257
  CUDA: pow_
4276
4258
 
4277
4259
  - func: pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)
4278
- supports_named_tensor: True
4279
4260
  variants: method
4280
4261
  dispatch:
4281
4262
  CPU: pow_
@@ -4297,13 +4278,13 @@
4297
4278
  variants: method
4298
4279
  dispatch:
4299
4280
  CPU: fmod_
4300
- CUDA: legacy::cuda::_th_fmod_
4281
+ CUDA: fmod_cuda_
4301
4282
 
4302
4283
  - func: fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)
4303
4284
  variants: method
4304
4285
  dispatch:
4305
4286
  CPU: fmod_
4306
- CUDA: legacy::cuda::_th_fmod_
4287
+ CUDA: fmod_cuda_
4307
4288
 
4308
4289
  - func: remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
4309
4290
  variants: method
@@ -4321,72 +4302,57 @@
4321
4302
  variants: method
4322
4303
  dispatch:
4323
4304
  CPU: legacy::cpu::_th_addbmm_
4324
- CUDA: legacy::cuda::_th_addbmm_
4305
+ CUDA: addbmm__cuda
4325
4306
 
4326
4307
  - func: addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
4327
4308
  dispatch:
4328
- CPU: legacy::cpu::_th_addbmm_out
4329
- CUDA: legacy::cuda::_th_addbmm_out
4309
+ CPU: addbmm_cpu_out
4310
+ CUDA: addbmm_out_cuda
4330
4311
 
4331
4312
  - func: addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
4332
4313
  use_c10_dispatcher: full
4333
4314
  variants: method, function
4334
4315
  dispatch:
4335
- CPU: legacy::cpu::_th_addbmm
4336
- CUDA: legacy::cuda::_th_addbmm
4316
+ CPU: addbmm_cpu
4317
+ CUDA: addbmm_cuda
4337
4318
 
4338
4319
  - func: addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
4339
4320
  variants: method
4340
- supports_named_tensor: True
4341
4321
 
4342
4322
  - func: random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
4343
4323
  variants: method
4344
- supports_named_tensor: True
4345
4324
 
4346
4325
  - func: random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)
4347
4326
  variants: method
4348
- supports_named_tensor: True
4349
4327
 
4350
4328
  - func: random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)
4351
4329
  variants: method
4352
- supports_named_tensor: True
4353
4330
 
4354
4331
  - func: uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)
4355
4332
  variants: method
4356
- dispatch:
4357
- CPU: legacy::cpu::_th_uniform_
4358
- CUDA: uniform_cuda_
4359
- supports_named_tensor: True
4360
4333
 
4361
4334
  - func: cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)
4362
4335
  variants: method
4363
- supports_named_tensor: True
4364
4336
 
4365
4337
  - func: log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)
4366
4338
  variants: method
4367
- supports_named_tensor: True
4368
4339
 
4369
4340
  - func: exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)
4370
4341
  variants: method
4371
- supports_named_tensor: True
4372
4342
 
4373
4343
  - func: geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)
4374
4344
  variants: method
4375
- supports_named_tensor: True
4376
4345
 
4377
4346
  # wrappers for TH functions
4378
4347
 
4379
4348
  - func: diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)
4380
4349
  dispatch:
4381
- CPU: legacy::cpu::_th_diag_out
4382
- CUDA: legacy::cuda::_th_diag_out
4350
+ CPU: diag_cpu_out
4351
+ CUDA: diag_cuda_out
4383
4352
 
4384
4353
  - func: diag(Tensor self, int diagonal=0) -> Tensor
4385
4354
  use_c10_dispatcher: full
4386
4355
  variants: method, function
4387
- dispatch:
4388
- CPU: legacy::cpu::_th_diag
4389
- CUDA: legacy::cuda::_th_diag
4390
4356
 
4391
4357
  - func: cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)
4392
4358
 
@@ -4427,17 +4393,15 @@
4427
4393
  variants: method, function
4428
4394
  dispatch:
4429
4395
  CPU: legacy::cpu::_th_trace
4430
- CUDA: legacy::cuda::_th_trace
4396
+ CUDA: trace_cuda
4431
4397
 
4432
4398
  - func: ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
4433
- supports_named_tensor: True
4434
4399
  dispatch:
4435
4400
  CPU: ne_out
4436
4401
  CUDA: ne_out
4437
4402
  QuantizedCPU: ne_out_quantized_cpu
4438
4403
 
4439
4404
  - func: ne.Scalar(Tensor self, Scalar other) -> Tensor
4440
- supports_named_tensor: True
4441
4405
  use_c10_dispatcher: full
4442
4406
  variants: method, function
4443
4407
  dispatch:
@@ -4446,14 +4410,12 @@
4446
4410
  QuantizedCPU: ne_quantized_cpu
4447
4411
 
4448
4412
  - func: ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4449
- supports_named_tensor: True
4450
4413
  dispatch:
4451
4414
  CPU: ne_out
4452
4415
  CUDA: ne_out
4453
4416
  QuantizedCPU: ne_out_quantized_cpu
4454
4417
 
4455
4418
  - func: ne.Tensor(Tensor self, Tensor other) -> Tensor
4456
- supports_named_tensor: True
4457
4419
  use_c10_dispatcher: full
4458
4420
  variants: method, function
4459
4421
  dispatch:
@@ -4462,14 +4424,12 @@
4462
4424
  QuantizedCPU: ne_quantized_cpu
4463
4425
 
4464
4426
  - func: eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
4465
- supports_named_tensor: True
4466
4427
  dispatch:
4467
4428
  CPU: eq_out
4468
4429
  CUDA: eq_out
4469
4430
  QuantizedCPU: eq_out_quantized_cpu
4470
4431
 
4471
4432
  - func: eq.Scalar(Tensor self, Scalar other) -> Tensor
4472
- supports_named_tensor: True
4473
4433
  use_c10_dispatcher: full
4474
4434
  variants: method, function
4475
4435
  dispatch:
@@ -4478,14 +4438,12 @@
4478
4438
  QuantizedCPU: eq_quantized_cpu
4479
4439
 
4480
4440
  - func: eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4481
- supports_named_tensor: True
4482
4441
  dispatch:
4483
4442
  CPU: eq_out
4484
4443
  CUDA: eq_out
4485
4444
  QuantizedCPU: eq_out_quantized_cpu
4486
4445
 
4487
4446
  - func: eq.Tensor(Tensor self, Tensor other) -> Tensor
4488
- supports_named_tensor: True
4489
4447
  use_c10_dispatcher: full
4490
4448
  variants: method, function
4491
4449
  dispatch:
@@ -4494,14 +4452,12 @@
4494
4452
  QuantizedCPU: eq_quantized_cpu
4495
4453
 
4496
4454
  - func: ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
4497
- supports_named_tensor: True
4498
4455
  dispatch:
4499
4456
  CPU: ge_out
4500
4457
  CUDA: ge_out
4501
4458
  QuantizedCPU: ge_out_quantized_cpu
4502
4459
 
4503
4460
  - func: ge.Scalar(Tensor self, Scalar other) -> Tensor
4504
- supports_named_tensor: True
4505
4461
  use_c10_dispatcher: full
4506
4462
  variants: method, function
4507
4463
  dispatch:
@@ -4510,14 +4466,12 @@
4510
4466
  QuantizedCPU: ge_quantized_cpu
4511
4467
 
4512
4468
  - func: ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4513
- supports_named_tensor: True
4514
4469
  dispatch:
4515
4470
  CPU: ge_out
4516
4471
  CUDA: ge_out
4517
4472
  QuantizedCPU: ge_out_quantized_cpu
4518
4473
 
4519
4474
  - func: ge.Tensor(Tensor self, Tensor other) -> Tensor
4520
- supports_named_tensor: True
4521
4475
  use_c10_dispatcher: full
4522
4476
  variants: method, function
4523
4477
  dispatch:
@@ -4526,14 +4480,12 @@
4526
4480
  QuantizedCPU: ge_quantized_cpu
4527
4481
 
4528
4482
  - func: le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
4529
- supports_named_tensor: True
4530
4483
  dispatch:
4531
4484
  CPU: le_out
4532
4485
  CUDA: le_out
4533
4486
  QuantizedCPU: le_out_quantized_cpu
4534
4487
 
4535
4488
  - func: le.Scalar(Tensor self, Scalar other) -> Tensor
4536
- supports_named_tensor: True
4537
4489
  use_c10_dispatcher: full
4538
4490
  variants: method, function
4539
4491
  dispatch:
@@ -4542,14 +4494,12 @@
4542
4494
  QuantizedCPU: le_quantized_cpu
4543
4495
 
4544
4496
  - func: le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4545
- supports_named_tensor: True
4546
4497
  dispatch:
4547
4498
  CPU: le_out
4548
4499
  CUDA: le_out
4549
4500
  QuantizedCPU: le_out_quantized_cpu
4550
4501
 
4551
4502
  - func: le.Tensor(Tensor self, Tensor other) -> Tensor
4552
- supports_named_tensor: True
4553
4503
  use_c10_dispatcher: full
4554
4504
  variants: method, function
4555
4505
  dispatch:
@@ -4558,14 +4508,12 @@
4558
4508
  QuantizedCPU: le_quantized_cpu
4559
4509
 
4560
4510
  - func: gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
4561
- supports_named_tensor: True
4562
4511
  dispatch:
4563
4512
  CPU: gt_out
4564
4513
  CUDA: gt_out
4565
4514
  QuantizedCPU: gt_out_quantized_cpu
4566
4515
 
4567
4516
  - func: gt.Scalar(Tensor self, Scalar other) -> Tensor
4568
- supports_named_tensor: True
4569
4517
  use_c10_dispatcher: full
4570
4518
  variants: method, function
4571
4519
  dispatch:
@@ -4574,14 +4522,12 @@
4574
4522
  QuantizedCPU: gt_quantized_cpu
4575
4523
 
4576
4524
  - func: gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4577
- supports_named_tensor: True
4578
4525
  dispatch:
4579
4526
  CPU: gt_out
4580
4527
  CUDA: gt_out
4581
4528
  QuantizedCPU: gt_out_quantized_cpu
4582
4529
 
4583
4530
  - func: gt.Tensor(Tensor self, Tensor other) -> Tensor
4584
- supports_named_tensor: True
4585
4531
  use_c10_dispatcher: full
4586
4532
  variants: method, function
4587
4533
  dispatch:
@@ -4590,14 +4536,12 @@
4590
4536
  QuantizedCPU: gt_quantized_cpu
4591
4537
 
4592
4538
  - func: lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
4593
- supports_named_tensor: True
4594
4539
  dispatch:
4595
4540
  CPU: lt_out
4596
4541
  CUDA: lt_out
4597
4542
  QuantizedCPU: lt_out_quantized_cpu
4598
4543
 
4599
4544
  - func: lt.Scalar(Tensor self, Scalar other) -> Tensor
4600
- supports_named_tensor: True
4601
4545
  use_c10_dispatcher: full
4602
4546
  variants: method, function
4603
4547
  dispatch:
@@ -4606,14 +4550,12 @@
4606
4550
  QuantizedCPU: lt_quantized_cpu
4607
4551
 
4608
4552
  - func: lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4609
- supports_named_tensor: True
4610
4553
  dispatch:
4611
4554
  CPU: lt_out
4612
4555
  CUDA: lt_out
4613
4556
  QuantizedCPU: lt_out_quantized_cpu
4614
4557
 
4615
4558
  - func: lt.Tensor(Tensor self, Tensor other) -> Tensor
4616
- supports_named_tensor: True
4617
4559
  use_c10_dispatcher: full
4618
4560
  variants: method, function
4619
4561
  dispatch:
@@ -4656,7 +4598,6 @@
4656
4598
  dispatch:
4657
4599
  CPU: masked_select_out_cpu
4658
4600
  CUDA: masked_select_out_cuda
4659
- supports_named_tensor: True
4660
4601
 
4661
4602
  - func: masked_select(Tensor self, Tensor mask) -> Tensor
4662
4603
  use_c10_dispatcher: full
@@ -4664,7 +4605,6 @@
4664
4605
  dispatch:
4665
4606
  CPU: masked_select_cpu
4666
4607
  CUDA: masked_select_cuda
4667
- supports_named_tensor: True
4668
4608
 
4669
4609
  - func: nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4670
4610
  dispatch:
@@ -4679,19 +4619,20 @@
4679
4619
  CUDA: legacy::cuda::_th_nonzero
4680
4620
 
4681
4621
  - func: nonzero_numpy(Tensor self) -> Tensor[]
4622
+ use_c10_dispatcher: full
4682
4623
  variants: method, function
4683
4624
 
4684
4625
  - func: gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
4685
4626
  dispatch:
4686
- CPU: gather_out_cpu
4687
- CUDA: gather_out_cuda
4627
+ CPU: gather_out_cpu_cuda
4628
+ CUDA: gather_out_cpu_cuda
4688
4629
 
4689
4630
  - func: gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor
4690
4631
  use_c10_dispatcher: full
4691
4632
  variants: method, function
4692
4633
  dispatch:
4693
- CPU: gather_cpu
4694
- CUDA: gather_cuda
4634
+ CPU: gather
4635
+ CUDA: gather
4695
4636
 
4696
4637
  - func: gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
4697
4638
 
@@ -4702,24 +4643,19 @@
4702
4643
  use_c10_dispatcher: full
4703
4644
 
4704
4645
  - func: addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
4705
- supports_named_tensor: True
4706
4646
 
4707
4647
  - func: addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
4708
4648
  use_c10_dispatcher: full
4709
4649
  variants: method, function
4710
- supports_named_tensor: True
4711
4650
 
4712
4651
  - func: addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
4713
4652
  variants: method
4714
- supports_named_tensor: True
4715
4653
 
4716
4654
  - func: addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
4717
- supports_named_tensor: True
4718
4655
 
4719
4656
  - func: addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
4720
4657
  use_c10_dispatcher: full
4721
4658
  variants: method, function
4722
- supports_named_tensor: True
4723
4659
 
4724
4660
  - func: lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR)
4725
4661
  dispatch:
@@ -4727,6 +4663,7 @@
4727
4663
  CUDA: legacy::cuda::_th_gels_out
4728
4664
 
4729
4665
  - func: lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)
4666
+ use_c10_dispatcher: full
4730
4667
  variants: method, function
4731
4668
  dispatch:
4732
4669
  CPU: legacy::cpu::_th_gels
@@ -4735,9 +4672,11 @@
4735
4672
  - func: triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
4736
4673
 
4737
4674
  - func: triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)
4675
+ use_c10_dispatcher: full
4738
4676
  variants: method, function
4739
4677
 
4740
4678
  - func: _triangular_solve_helper(Tensor self, Tensor A, bool upper, bool transpose, bool unitriangular) -> (Tensor, Tensor)
4679
+ use_c10_dispatcher: full
4741
4680
  variants: function
4742
4681
  dispatch:
4743
4682
  CPU: _triangular_solve_helper_cpu
@@ -4746,9 +4685,11 @@
4746
4685
  - func: symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
4747
4686
 
4748
4687
  - func: symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors)
4688
+ use_c10_dispatcher: full
4749
4689
  variants: method, function
4750
4690
 
4751
4691
  - func: _symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor)
4692
+ use_c10_dispatcher: full
4752
4693
  variants: function
4753
4694
  dispatch:
4754
4695
  CPU: _symeig_helper_cpu
@@ -4760,6 +4701,7 @@
4760
4701
  CUDA: legacy::cuda::_th_eig_out
4761
4702
 
4762
4703
  - func: eig(Tensor self, bool eigenvectors=False) -> (Tensor eigenvalues, Tensor eigenvectors)
4704
+ use_c10_dispatcher: full
4763
4705
  variants: method, function
4764
4706
  dispatch:
4765
4707
  CPU: legacy::cpu::_th_eig
@@ -4768,9 +4710,11 @@
4768
4710
  - func: svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
4769
4711
 
4770
4712
  - func: svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)
4713
+ use_c10_dispatcher: full
4771
4714
  variants: method, function
4772
4715
 
4773
4716
  - func: _svd_helper(Tensor self, bool some, bool compute_uv) -> (Tensor, Tensor, Tensor)
4717
+ use_c10_dispatcher: full
4774
4718
  variants: function
4775
4719
  dispatch:
4776
4720
  CPU: _svd_helper_cpu
@@ -4803,11 +4747,13 @@
4803
4747
  CUDA: _cholesky_solve_helper_cuda
4804
4748
 
4805
4749
  - func: solve(Tensor self, Tensor A) -> (Tensor solution, Tensor LU)
4750
+ use_c10_dispatcher: full
4806
4751
  variants: function, method
4807
4752
 
4808
4753
  - func: solve.solution(Tensor self, Tensor A, *, Tensor(a!) solution, Tensor(b!) lu) -> (Tensor(a!) solution, Tensor(b!) LU)
4809
4754
 
4810
4755
  - func: _solve_helper(Tensor self, Tensor A) -> (Tensor, Tensor)
4756
+ use_c10_dispatcher: full
4811
4757
  variants: function
4812
4758
  dispatch:
4813
4759
  CPU: _solve_helper_cpu
@@ -4828,9 +4774,11 @@
4828
4774
  - func: qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
4829
4775
 
4830
4776
  - func: qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)
4777
+ use_c10_dispatcher: full
4831
4778
  variants: method, function
4832
4779
 
4833
4780
  - func: _qr_helper(Tensor self, bool some) -> (Tensor, Tensor)
4781
+ use_c10_dispatcher: full
4834
4782
  variants: function
4835
4783
  dispatch:
4836
4784
  CPU: _qr_helper_cpu
@@ -4842,6 +4790,7 @@
4842
4790
  CUDA: legacy::cuda::_th_geqrf_out
4843
4791
 
4844
4792
  - func: geqrf(Tensor self) -> (Tensor a, Tensor tau)
4793
+ use_c10_dispatcher: full
4845
4794
  variants: method, function
4846
4795
  dispatch:
4847
4796
  CPU: legacy::cpu::_th_geqrf
@@ -4868,6 +4817,7 @@
4868
4817
  CPU: legacy::cpu::_th_ormqr
4869
4818
 
4870
4819
  - func: _lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor, Tensor, Tensor)
4820
+ use_c10_dispatcher: full
4871
4821
  variants: function
4872
4822
  dispatch:
4873
4823
  CPU: _lu_with_info_cpu
@@ -4899,6 +4849,7 @@
4899
4849
  CUDA: multinomial
4900
4850
 
4901
4851
  - func: _multinomial_alias_setup(Tensor probs) -> (Tensor, Tensor)
4852
+ use_c10_dispatcher: full
4902
4853
  variants: function
4903
4854
  dispatch:
4904
4855
  CPU: legacy::cpu::_th_multinomial_alias_setup
@@ -4911,66 +4862,55 @@
4911
4862
  CUDA: legacy::cuda::_th_multinomial_alias_draw
4912
4863
 
4913
4864
  - func: lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4914
- supports_named_tensor: True
4915
4865
  dispatch:
4916
4866
  CPU: _lgamma_out_cpu
4917
4867
  CUDA: _lgamma_out_cuda
4918
4868
 
4919
4869
  - func: lgamma(Tensor self) -> Tensor
4920
4870
  use_c10_dispatcher: full
4921
- supports_named_tensor: True
4922
4871
  variants: method, function
4923
4872
  dispatch:
4924
4873
  CPU: lgamma
4925
4874
  CUDA: lgamma
4926
4875
 
4927
4876
  - func: digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4928
- supports_named_tensor: True
4929
4877
 
4930
4878
  - func: digamma(Tensor self) -> Tensor
4931
4879
  use_c10_dispatcher: full
4932
- supports_named_tensor: True
4933
4880
  variants: method, function
4934
4881
 
4935
4882
  - func: polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4936
- supports_named_tensor: True
4937
4883
 
4938
4884
  - func: polygamma(int n, Tensor self) -> Tensor
4939
4885
  use_c10_dispatcher: full
4940
- supports_named_tensor: True
4941
4886
  variants: method, function
4942
4887
 
4943
4888
  - func: erfinv(Tensor self) -> Tensor
4944
4889
  use_c10_dispatcher: full
4945
- supports_named_tensor: True
4946
4890
  variants: method, function
4947
4891
  dispatch:
4948
4892
  CPU: erfinv
4949
4893
  CUDA: erfinv
4950
4894
 
4951
4895
  - func: erfinv_(Tensor(a!) self) -> Tensor(a!)
4952
- supports_named_tensor: True
4953
4896
  variants: method
4954
4897
  dispatch:
4955
4898
  CPU: _erfinv__cpu
4956
4899
  CUDA: _erfinv__cuda
4957
4900
 
4958
4901
  - func: erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4959
- supports_named_tensor: True
4960
4902
  dispatch:
4961
4903
  CPU: _erfinv_out_cpu
4962
4904
  CUDA: _erfinv_out_cuda
4963
4905
 
4964
4906
  - func: sign(Tensor self) -> Tensor
4907
+ use_c10_dispatcher: full
4965
4908
  variants: function, method
4966
- supports_named_tensor: True
4967
4909
 
4968
4910
  - func: sign_(Tensor(a!) self) -> Tensor(a!)
4969
4911
  variants: method
4970
- supports_named_tensor: True
4971
4912
 
4972
4913
  - func: sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4973
- supports_named_tensor: True
4974
4914
  dispatch:
4975
4915
  CPU: sign_out
4976
4916
  CUDA: sign_out
@@ -4980,11 +4920,9 @@
4980
4920
  variants: method, function
4981
4921
 
4982
4922
  - func: atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
4983
- supports_named_tensor: True
4984
4923
 
4985
4924
  - func: atan2(Tensor self, Tensor other) -> Tensor
4986
4925
  use_c10_dispatcher: full
4987
- supports_named_tensor: True
4988
4926
  variants: method, function
4989
4927
 
4990
4928
  - func: lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)
@@ -5026,26 +4964,26 @@
5026
4964
  - func: fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
5027
4965
  dispatch:
5028
4966
  CPU: fmod_out
5029
- CUDA: legacy::cuda::_th_fmod_out
4967
+ CUDA: fmod_cuda_out
5030
4968
 
5031
4969
  - func: fmod.Scalar(Tensor self, Scalar other) -> Tensor
5032
4970
  use_c10_dispatcher: full
5033
4971
  variants: method, function
5034
4972
  dispatch:
5035
4973
  CPU: fmod
5036
- CUDA: legacy::cuda::_th_fmod
4974
+ CUDA: fmod_cuda
5037
4975
 
5038
4976
  - func: fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
5039
4977
  dispatch:
5040
4978
  CPU: fmod_out
5041
- CUDA: legacy::cuda::_th_fmod_out
4979
+ CUDA: fmod_cuda_out
5042
4980
 
5043
4981
  - func: fmod.Tensor(Tensor self, Tensor other) -> Tensor
5044
4982
  use_c10_dispatcher: full
5045
4983
  variants: method, function
5046
4984
  dispatch:
5047
4985
  CPU: fmod
5048
- CUDA: legacy::cuda::_th_fmod
4986
+ CUDA: fmod_cuda
5049
4987
 
5050
4988
  - func: remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
5051
4989
  dispatch:
@@ -5082,9 +5020,8 @@
5082
5020
  variants: method, function
5083
5021
  dispatch:
5084
5022
  CPU: min
5085
- CUDA: legacy::cuda::_th_min
5023
+ CUDA: min
5086
5024
  QuantizedCPU: min_quant
5087
- supports_named_tensor: True
5088
5025
 
5089
5026
  - func: max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)
5090
5027
 
@@ -5097,9 +5034,8 @@
5097
5034
  variants: method, function
5098
5035
  dispatch:
5099
5036
  CPU: max
5100
- CUDA: legacy::cuda::_th_max
5037
+ CUDA: max
5101
5038
  QuantizedCPU: max_quant
5102
- supports_named_tensor: True
5103
5039
 
5104
5040
  - func: median(Tensor self) -> Tensor
5105
5041
  use_c10_dispatcher: full
@@ -5107,7 +5043,6 @@
5107
5043
  dispatch:
5108
5044
  CPU: median_cpu
5109
5045
  CUDA: median_cuda
5110
- supports_named_tensor: True
5111
5046
 
5112
5047
  - func: sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
5113
5048
  dispatch:
@@ -5115,6 +5050,7 @@
5115
5050
  CUDA: legacy::cuda::_th_sort_out
5116
5051
 
5117
5052
  - func: sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
5053
+ use_c10_dispatcher: full
5118
5054
  variants: method, function
5119
5055
  dispatch:
5120
5056
  CPU: legacy::cpu::_th_sort
@@ -5139,6 +5075,7 @@
5139
5075
  CUDA: legacy::cuda::_th_topk_out
5140
5076
 
5141
5077
  - func: topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)
5078
+ use_c10_dispatcher: full
5142
5079
  variants: method, function
5143
5080
  dispatch:
5144
5081
  CPU: topk
@@ -5147,12 +5084,10 @@
5147
5084
 
5148
5085
  - func: all(Tensor self) -> Tensor
5149
5086
  use_c10_dispatcher: full
5150
- supports_named_tensor: True
5151
5087
  variants: method, function
5152
5088
 
5153
5089
  - func: any(Tensor self) -> Tensor
5154
5090
  use_c10_dispatcher: full
5155
- supports_named_tensor: True
5156
5091
  variants: method, function
5157
5092
  dispatch:
5158
5093
  CPU: any
@@ -5173,11 +5108,20 @@
5173
5108
  CUDA: legacy::cuda::_th_renorm
5174
5109
 
5175
5110
  - func: unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)
5111
+ use_c10_dispatcher: full
5176
5112
  variants: method
5177
5113
  device_guard: False
5178
5114
  dispatch:
5179
5115
  CPU: unfold
5180
5116
  CUDA: unfold
5117
+ QuantizedCPU: unfold
5118
+ QuantizedCUDA: unfold
5119
+
5120
+ - func: unfold_backward(Tensor grad_in, int[] input_sizes, int dim, int size, int step) -> Tensor
5121
+ variants: function
5122
+ dispatch:
5123
+ CPU: unfold_backward
5124
+ CUDA: unfold_backward
5181
5125
 
5182
5126
  - func: equal(Tensor self, Tensor other) -> bool
5183
5127
  use_c10_dispatcher: full
@@ -5185,96 +5129,69 @@
5185
5129
  dispatch:
5186
5130
  CPU: legacy::cpu::_th_equal
5187
5131
  CUDA: legacy::cuda::_th_equal
5188
- QuantizedCPU: quantized_equal
5189
- supports_named_tensor: True
5132
+ QuantizedCPU: quantized_equal_cpu
5190
5133
 
5191
5134
  - func: pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
5192
- supports_named_tensor: True
5193
5135
  dispatch:
5194
5136
  CPU: pow_out
5195
5137
  CUDA: pow_out
5196
5138
 
5197
5139
  - func: pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor
5198
5140
  use_c10_dispatcher: full
5199
- supports_named_tensor: True
5200
5141
  variants: method, function
5201
5142
  dispatch:
5202
5143
  CPU: pow
5203
5144
  CUDA: pow
5204
5145
 
5205
5146
  - func: pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)
5206
- supports_named_tensor: True
5207
5147
  dispatch:
5208
5148
  CPU: pow_out
5209
5149
  CUDA: pow_out
5210
5150
 
5211
5151
  - func: pow.Scalar(Scalar self, Tensor exponent) -> Tensor
5212
5152
  use_c10_dispatcher: full
5213
- supports_named_tensor: True
5214
5153
  dispatch:
5215
5154
  CPU: pow
5216
5155
  CUDA: pow
5217
5156
 
5218
5157
  - func: normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)
5219
5158
  variants: method
5220
- dispatch:
5221
- CPU: normal_cpu_
5222
- CUDA: normal_cuda_
5223
- supports_named_tensor: True
5224
5159
 
5225
5160
  - func: normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
5226
- dispatch:
5227
- CPU: normal_out_cpu
5228
- CUDA: normal_out_cuda
5229
5161
 
5230
5162
  - func: normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor
5231
- dispatch:
5232
- CPU: normal_cpu
5233
- CUDA: normal_cuda
5234
5163
 
5235
5164
  - func: normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
5236
- dispatch:
5237
- CPU: normal_out_cpu
5238
- CUDA: normal_out_cuda
5239
5165
 
5240
5166
  - func: normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor
5241
- dispatch:
5242
- CPU: normal_cpu
5243
- CUDA: normal_cuda
5244
5167
 
5245
5168
  - func: normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
5246
- dispatch:
5247
- CPU: normal_out_cpu
5248
- CUDA: normal_out_cuda
5249
5169
 
5250
5170
  - func: normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor
5251
- dispatch:
5252
- CPU: normal_cpu
5253
- CUDA: normal_cuda
5254
5171
 
5255
5172
  - func: normal.float_float(float mean, float std, int[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
5256
5173
 
5257
5174
  - func: normal.float_float_out(float mean, float std, int[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
5258
5175
 
5259
5176
  - func: alias(Tensor(a) self) -> Tensor(a)
5177
+ use_c10_dispatcher: full
5260
5178
  variants: method, function
5261
- supports_named_tensor: True
5262
5179
 
5263
5180
  - func: _addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
5264
5181
  use_c10_dispatcher: full
5265
5182
  dispatch:
5266
5183
  CPU: legacy::cpu::_th_addr
5267
- CUDA: legacy::cuda::_th_addr
5184
+ CUDA: addr_cuda
5268
5185
 
5269
5186
  - func: _addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
5270
5187
  dispatch:
5271
5188
  CPU: legacy::cpu::_th_addr_
5272
- CUDA: legacy::cuda::_th_addr_
5189
+ CUDA: addr__cuda
5273
5190
 
5274
5191
  - func: _addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
5275
5192
  dispatch:
5276
5193
  CPU: legacy::cpu::_th_addr_out
5277
- CUDA: legacy::cuda::_th_addr_out
5194
+ CUDA: addr_out_cuda
5278
5195
 
5279
5196
  - func: _index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)
5280
5197
  dispatch:
@@ -5285,37 +5202,23 @@
5285
5202
  use_c10_dispatcher: full
5286
5203
  dispatch:
5287
5204
  CPU: _cumsum_cpu
5288
- CUDA: legacy::cuda::_th_cumsum
5205
+ CUDA: _cumsum_cuda
5289
5206
 
5290
5207
  - func: _cumsum.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
5291
5208
  dispatch:
5292
5209
  CPU: _cumsum_out_cpu
5293
- CUDA: legacy::cuda::_th_cumsum_out
5210
+ CUDA: _cumsum_out_cuda
5294
5211
 
5295
5212
  - func: _cumprod(Tensor self, int dim) -> Tensor
5296
5213
  use_c10_dispatcher: full
5297
5214
  dispatch:
5298
5215
  CPU: _cumprod_cpu
5299
- CUDA: legacy::cuda::_th_cumprod
5216
+ CUDA: _cumprod_cuda
5300
5217
 
5301
5218
  - func: _cumprod.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)
5302
5219
  dispatch:
5303
5220
  CPU: _cumprod_out_cpu
5304
- CUDA: legacy::cuda::_th_cumprod_out
5305
-
5306
- - func: _var(Tensor self, bool unbiased=True) -> Tensor
5307
- use_c10_dispatcher: full
5308
- dispatch:
5309
- CPU: legacy::cpu::_th_var
5310
- CUDA: legacy::cuda::_th_var
5311
- supports_named_tensor: True
5312
-
5313
- - func: _std(Tensor self, bool unbiased=True) -> Tensor
5314
- use_c10_dispatcher: full
5315
- dispatch:
5316
- CPU: legacy::cpu::_th_std
5317
- CUDA: legacy::cuda::_th_std
5318
- supports_named_tensor: True
5221
+ CUDA: _cumprod_out_cuda
5319
5222
 
5320
5223
  - func: _amp_non_finite_check_and_unscale_(Tensor(a!) self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
5321
5224
  variants: function
@@ -5328,6 +5231,7 @@
5328
5231
  CUDA: _amp_update_scale_cuda
5329
5232
 
5330
5233
  - func: _cat(Tensor[] tensors, int dim=0) -> Tensor
5234
+ use_c10_dispatcher: full
5331
5235
  dispatch:
5332
5236
  CPU: _cat_cpu
5333
5237
  CUDA: cat_cuda
@@ -5340,6 +5244,7 @@
5340
5244
  QuantizedCPU: quantized_cat_out
5341
5245
 
5342
5246
  - func: _mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor, Tensor)
5247
+ use_c10_dispatcher: full
5343
5248
  dispatch:
5344
5249
  CPU: legacy::cpu::_th_mode
5345
5250
  CUDA: legacy::cuda::_th_mode
@@ -5349,25 +5254,39 @@
5349
5254
  CPU: legacy::cpu::_th_mode_out
5350
5255
  CUDA: legacy::cuda::_th_mode_out
5351
5256
 
5352
- - func: _max(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
5257
+ - func: bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
5258
+ use_c10_dispatcher: full
5259
+ dispatch:
5260
+ CPU: bucketize_cpu
5261
+ CUDA: bucketize_cuda
5262
+
5263
+ - func: bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
5264
+ dispatch:
5265
+ CPU: bucketize_out_cpu
5266
+ CUDA: bucketize_out_cuda
5267
+
5268
+ - func: bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor
5269
+ use_c10_dispatcher: full
5353
5270
  dispatch:
5354
- CPU: legacy::cpu::_th_max
5355
- CUDA: legacy::cuda::_th_max
5271
+ CPU: bucketize_cpu
5272
+ CUDA: bucketize_cuda
5356
5273
 
5357
- - func: _max.max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_indices) -> (Tensor(a!), Tensor(b!))
5274
+ - func: searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False) -> Tensor
5275
+ use_c10_dispatcher: full
5358
5276
  dispatch:
5359
- CPU: legacy::cpu::_th_max_out
5360
- CUDA: legacy::cuda::_th_max_out
5277
+ CPU: searchsorted_cpu
5278
+ CUDA: searchsorted_cuda
5361
5279
 
5362
- - func: _min(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
5280
+ - func: searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)
5363
5281
  dispatch:
5364
- CPU: legacy::cpu::_th_min
5365
- CUDA: legacy::cuda::_th_min
5282
+ CPU: searchsorted_out_cpu
5283
+ CUDA: searchsorted_out_cuda
5366
5284
 
5367
- - func: _min.min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!), Tensor(b!))
5285
+ - func: searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False) -> Tensor
5286
+ use_c10_dispatcher: full
5368
5287
  dispatch:
5369
- CPU: legacy::cpu::_th_min_out
5370
- CUDA: legacy::cuda::_th_min_out
5288
+ CPU: searchsorted_cpu
5289
+ CUDA: searchsorted_cuda
5371
5290
 
5372
5291
  ## NN wrappers
5373
5292
 
@@ -5446,6 +5365,7 @@
5446
5365
  CUDA: legacy::cuda::_thnn_multilabel_margin_loss_forward_out
5447
5366
 
5448
5367
  - func: multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)
5368
+ use_c10_dispatcher: full
5449
5369
  python_module: nn
5450
5370
  dispatch:
5451
5371
  CPU: multilabel_margin_loss_forward_cpu
@@ -5560,18 +5480,10 @@
5560
5480
 
5561
5481
  - func: elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)
5562
5482
  python_module: nn
5563
- dispatch:
5564
- CPU: elu_out
5565
- CUDA: elu_out
5566
- QuantizedCPU: quantized_elu_out
5567
5483
 
5568
5484
  - func: elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
5569
5485
  use_c10_dispatcher: full
5570
5486
  python_module: nn
5571
- dispatch:
5572
- CPU: elu
5573
- CUDA: elu
5574
- QuantizedCPU: quantized_elu
5575
5487
 
5576
5488
  - func: elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
5577
5489
  python_module: nn
@@ -5585,10 +5497,6 @@
5585
5497
 
5586
5498
  - func: elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)
5587
5499
  python_module: nn
5588
- dispatch:
5589
- CPU: elu_
5590
- CUDA: elu_
5591
- QuantizedCPU: quantized_elu_
5592
5500
 
5593
5501
  - func: glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
5594
5502
  python_module: nn
@@ -5622,6 +5530,10 @@
5622
5530
  - func: hardsigmoid(Tensor self) -> Tensor
5623
5531
  use_c10_dispatcher: full
5624
5532
  python_module: nn
5533
+ dispatch:
5534
+ CPU: hardsigmoid
5535
+ CUDA: hardsigmoid
5536
+ QuantizedCPU: quantized_hardsigmoid
5625
5537
 
5626
5538
  - func: hardsigmoid_(Tensor(a!) self) -> Tensor(a!)
5627
5539
  python_module: nn
@@ -5629,6 +5541,9 @@
5629
5541
  - func: hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor
5630
5542
  use_c10_dispatcher: full
5631
5543
  python_module: nn
5544
+ dispatch:
5545
+ CPU: hardsigmoid_backward
5546
+ CUDA: hardsigmoid_backward
5632
5547
 
5633
5548
  - func: hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
5634
5549
  python_module: nn
@@ -5661,6 +5576,24 @@
5661
5576
  CPU: hardtanh_
5662
5577
  CUDA: hardtanh_
5663
5578
  QuantizedCPU: quantized_hardtanh_
5579
+ Vulkan: vulkan_hardtanh_
5580
+
5581
+ - func: hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
5582
+ python_module: nn
5583
+
5584
+ - func: hardswish(Tensor self) -> Tensor
5585
+ use_c10_dispatcher: full
5586
+ python_module: nn
5587
+
5588
+ - func: hardswish_(Tensor(a!) self) -> Tensor(a!)
5589
+ python_module: nn
5590
+
5591
+ - func: hardswish_backward(Tensor grad_output, Tensor self) -> Tensor
5592
+ use_c10_dispatcher: full
5593
+ python_module: nn
5594
+ dispatch:
5595
+ CPU: hardswish_backward
5596
+ CUDA: hardswish_backward
5664
5597
 
5665
5598
  - func: leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
5666
5599
  python_module: nn
@@ -5702,6 +5635,7 @@
5702
5635
  CUDA: legacy::cuda::_thnn_log_sigmoid_forward_out
5703
5636
 
5704
5637
  - func: log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)
5638
+ use_c10_dispatcher: full
5705
5639
  python_module: nn
5706
5640
  dispatch:
5707
5641
  CPU: log_sigmoid_forward_cpu
@@ -5784,14 +5718,17 @@
5784
5718
  MkldnnCPU: mkldnn_adaptive_avg_pool2d_out
5785
5719
 
5786
5720
  - func: adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
5721
+ use_c10_dispatcher: full
5787
5722
  python_module: nn
5788
5723
 
5789
5724
  - func: mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
5725
+ use_c10_dispatcher: full
5790
5726
  dispatch:
5791
5727
  MkldnnCPU: mkldnn_adaptive_avg_pool2d
5792
5728
  requires_tensor: True
5793
5729
 
5794
5730
  - func: _adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor
5731
+ use_c10_dispatcher: full
5795
5732
  dispatch:
5796
5733
  CPU: adaptive_avg_pool2d_cpu
5797
5734
  CUDA: adaptive_avg_pool2d_cuda
@@ -5811,6 +5748,7 @@
5811
5748
  CUDA: adaptive_avg_pool3d_out_cuda
5812
5749
 
5813
5750
  - func: adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor
5751
+ use_c10_dispatcher: full
5814
5752
  python_module: nn
5815
5753
  dispatch:
5816
5754
  CPU: adaptive_avg_pool3d_cpu
@@ -5838,6 +5776,7 @@
5838
5776
 
5839
5777
  # Return: (Tensor output, Tensor indices)
5840
5778
  - func: adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)
5779
+ use_c10_dispatcher: full
5841
5780
  python_module: nn
5842
5781
  dispatch:
5843
5782
  CPU: adaptive_max_pool2d_cpu
@@ -5865,6 +5804,7 @@
5865
5804
 
5866
5805
  # Return: (Tensor output, Tensor indices)
5867
5806
  - func: adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)
5807
+ use_c10_dispatcher: full
5868
5808
  python_module: nn
5869
5809
  dispatch:
5870
5810
  CPU: adaptive_max_pool3d_cpu
@@ -5891,6 +5831,7 @@
5891
5831
  MkldnnCPU: mkldnn_avg_pool2d_out
5892
5832
 
5893
5833
  - func: avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
5834
+ use_c10_dispatcher: full
5894
5835
  python_module: nn
5895
5836
  dispatch:
5896
5837
  CPU: avg_pool2d_cpu
@@ -5905,6 +5846,7 @@
5905
5846
  CUDA: avg_pool2d_backward_out_cuda
5906
5847
 
5907
5848
  - func: avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
5849
+ use_c10_dispatcher: full
5908
5850
  python_module: nn
5909
5851
  dispatch:
5910
5852
  CPU: avg_pool2d_backward_cpu
@@ -5917,6 +5859,7 @@
5917
5859
  CUDA: avg_pool3d_out_cuda
5918
5860
 
5919
5861
  - func: avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
5862
+ use_c10_dispatcher: full
5920
5863
  python_module: nn
5921
5864
  dispatch:
5922
5865
  CPU: avg_pool3d_cpu
@@ -5930,6 +5873,7 @@
5930
5873
  CUDA: avg_pool3d_backward_out_cuda
5931
5874
 
5932
5875
  - func: avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor
5876
+ use_c10_dispatcher: full
5933
5877
  python_module: nn
5934
5878
  dispatch:
5935
5879
  CPU: avg_pool3d_backward_cpu
@@ -5944,6 +5888,7 @@
5944
5888
 
5945
5889
  # Return: (Tensor output, Tensor indices)
5946
5890
  - func: fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)
5891
+ use_c10_dispatcher: full
5947
5892
  python_module: nn
5948
5893
  dispatch:
5949
5894
  CPU: fractional_max_pool2d_cpu
@@ -5956,6 +5901,7 @@
5956
5901
  CUDA: fractional_max_pool2d_backward_out_cuda
5957
5902
 
5958
5903
  - func: fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor
5904
+ use_c10_dispatcher: full
5959
5905
  python_module: nn
5960
5906
  dispatch:
5961
5907
  CPU: fractional_max_pool2d_backward_cpu
@@ -5970,6 +5916,7 @@
5970
5916
 
5971
5917
  # Return: (Tensor output, Tensor indices)
5972
5918
  - func: fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)
5919
+ use_c10_dispatcher: full
5973
5920
  python_module: nn
5974
5921
  dispatch:
5975
5922
  CPU: fractional_max_pool3d_cpu
@@ -5982,6 +5929,7 @@
5982
5929
  CUDA: fractional_max_pool3d_backward_out_cuda
5983
5930
 
5984
5931
  - func: fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor
5932
+ use_c10_dispatcher: full
5985
5933
  python_module: nn
5986
5934
  dispatch:
5987
5935
  CPU: fractional_max_pool3d_backward_cpu
@@ -5996,11 +5944,11 @@
5996
5944
 
5997
5945
  # Return: (Tensor output, Tensor indices)
5998
5946
  - func: max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
5947
+ use_c10_dispatcher: full
5999
5948
  python_module: nn
6000
5949
  dispatch:
6001
5950
  CPU: max_pool2d_with_indices_cpu
6002
5951
  CUDA: max_pool2d_with_indices_cuda
6003
- supports_named_tensor: True
6004
5952
 
6005
5953
  - func: max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
6006
5954
  python_module: nn
@@ -6009,6 +5957,7 @@
6009
5957
  CUDA: max_pool2d_with_indices_backward_out_cuda
6010
5958
 
6011
5959
  - func: max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor
5960
+ use_c10_dispatcher: full
6012
5961
  python_module: nn
6013
5962
  dispatch:
6014
5963
  CPU: max_pool2d_with_indices_backward_cpu
@@ -6023,11 +5972,11 @@
6023
5972
 
6024
5973
  # Return: (Tensor output, Tensor indices)
6025
5974
  - func: max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)
5975
+ use_c10_dispatcher: full
6026
5976
  python_module: nn
6027
5977
  dispatch:
6028
5978
  CPU: max_pool3d_with_indices_cpu
6029
5979
  CUDA: max_pool3d_with_indices_cuda
6030
- supports_named_tensor: True
6031
5980
 
6032
5981
  - func: max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
6033
5982
  python_module: nn
@@ -6036,6 +5985,7 @@
6036
5985
  CUDA: max_pool3d_with_indices_backward_out_cuda
6037
5986
 
6038
5987
  - func: max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor
5988
+ use_c10_dispatcher: full
6039
5989
  python_module: nn
6040
5990
  dispatch:
6041
5991
  CPU: max_pool3d_with_indices_backward_cpu
@@ -6048,6 +5998,7 @@
6048
5998
  CUDA: max_unpooling2d_forward_out_cuda
6049
5999
 
6050
6000
  - func: max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor
6001
+ use_c10_dispatcher: full
6051
6002
  python_module: nn
6052
6003
  dispatch:
6053
6004
  CPU: max_unpooling2d_forward_cpu
@@ -6060,6 +6011,7 @@
6060
6011
  CUDA: max_unpooling2d_backward_out_cuda
6061
6012
 
6062
6013
  - func: max_unpool2d_backward(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size) -> Tensor
6014
+ use_c10_dispatcher: full
6063
6015
  python_module: nn
6064
6016
  dispatch:
6065
6017
  CPU: max_unpooling2d_backward_cpu
@@ -6072,6 +6024,7 @@
6072
6024
  CUDA: max_unpooling3d_forward_out_cuda
6073
6025
 
6074
6026
  - func: max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor
6027
+ use_c10_dispatcher: full
6075
6028
  python_module: nn
6076
6029
  dispatch:
6077
6030
  CPU: max_unpooling3d_forward_cpu
@@ -6084,6 +6037,7 @@
6084
6037
  CUDA: max_unpooling3d_backward_out_cuda
6085
6038
 
6086
6039
  - func: max_unpool3d_backward(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor
6040
+ use_c10_dispatcher: full
6087
6041
  python_module: nn
6088
6042
  dispatch:
6089
6043
  CPU: max_unpooling3d_backward_cpu
@@ -6096,10 +6050,12 @@
6096
6050
  CUDA: reflection_pad1d_out_cuda
6097
6051
 
6098
6052
  - func: reflection_pad1d(Tensor self, int[2] padding) -> Tensor
6053
+ use_c10_dispatcher: full
6099
6054
  python_module: nn
6100
6055
  dispatch:
6101
6056
  CPU: reflection_pad1d_cpu
6102
6057
  CUDA: reflection_pad1d_cuda
6058
+ QuantizedCPU: reflection_pad1d_cpu
6103
6059
 
6104
6060
  - func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
6105
6061
  python_module: nn
@@ -6108,6 +6064,7 @@
6108
6064
  CUDA: reflection_pad1d_backward_out_cuda
6109
6065
 
6110
6066
  - func: reflection_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor
6067
+ use_c10_dispatcher: full
6111
6068
  python_module: nn
6112
6069
  dispatch:
6113
6070
  CPU: reflection_pad1d_backward_cpu
@@ -6120,6 +6077,7 @@
6120
6077
  CUDA: reflection_pad2d_out_cuda
6121
6078
 
6122
6079
  - func: reflection_pad2d(Tensor self, int[4] padding) -> Tensor
6080
+ use_c10_dispatcher: full
6123
6081
  python_module: nn
6124
6082
  dispatch:
6125
6083
  CPU: reflection_pad2d_cpu
@@ -6132,6 +6090,7 @@
6132
6090
  CUDA: reflection_pad2d_backward_out_cuda
6133
6091
 
6134
6092
  - func: reflection_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor
6093
+ use_c10_dispatcher: full
6135
6094
  python_module: nn
6136
6095
  dispatch:
6137
6096
  CPU: reflection_pad2d_backward_cpu
@@ -6144,6 +6103,7 @@
6144
6103
  CUDA: replication_pad1d_out_cuda
6145
6104
 
6146
6105
  - func: replication_pad1d(Tensor self, int[2] padding) -> Tensor
6106
+ use_c10_dispatcher: full
6147
6107
  python_module: nn
6148
6108
  dispatch:
6149
6109
  CPU: replication_pad1d_cpu
@@ -6156,6 +6116,7 @@
6156
6116
  CUDA: replication_pad1d_backward_out_cuda
6157
6117
 
6158
6118
  - func: replication_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor
6119
+ use_c10_dispatcher: full
6159
6120
  python_module: nn
6160
6121
  dispatch:
6161
6122
  CPU: replication_pad1d_backward_cpu
@@ -6168,6 +6129,7 @@
6168
6129
  CUDA: replication_pad2d_out_cuda
6169
6130
 
6170
6131
  - func: replication_pad2d(Tensor self, int[4] padding) -> Tensor
6132
+ use_c10_dispatcher: full
6171
6133
  python_module: nn
6172
6134
  dispatch:
6173
6135
  CPU: replication_pad2d_cpu
@@ -6180,6 +6142,7 @@
6180
6142
  CUDA: replication_pad2d_backward_out_cuda
6181
6143
 
6182
6144
  - func: replication_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor
6145
+ use_c10_dispatcher: full
6183
6146
  python_module: nn
6184
6147
  dispatch:
6185
6148
  CPU: replication_pad2d_backward_cpu
@@ -6192,6 +6155,7 @@
6192
6155
  CUDA: replication_pad3d_out_cuda
6193
6156
 
6194
6157
  - func: replication_pad3d(Tensor self, int[6] padding) -> Tensor
6158
+ use_c10_dispatcher: full
6195
6159
  python_module: nn
6196
6160
  dispatch:
6197
6161
  CPU: replication_pad3d_cpu
@@ -6204,6 +6168,7 @@
6204
6168
  CUDA: replication_pad3d_backward_out_cuda
6205
6169
 
6206
6170
  - func: replication_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor
6171
+ use_c10_dispatcher: full
6207
6172
  python_module: nn
6208
6173
  dispatch:
6209
6174
  CPU: replication_pad3d_backward_cpu
@@ -6216,6 +6181,7 @@
6216
6181
  CUDA: upsample_linear1d_out_cuda
6217
6182
 
6218
6183
  - func: upsample_linear1d(Tensor self, int[1] output_size, bool align_corners, float? scales=None) -> Tensor
6184
+ use_c10_dispatcher: full
6219
6185
  python_module: nn
6220
6186
  dispatch:
6221
6187
  CPU: upsample_linear1d_cpu
@@ -6228,6 +6194,7 @@
6228
6194
  CUDA: upsample_linear1d_backward_out_cuda
6229
6195
 
6230
6196
  - func: upsample_linear1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, float? scales=None) -> Tensor
6197
+ use_c10_dispatcher: full
6231
6198
  python_module: nn
6232
6199
  dispatch:
6233
6200
  CPU: upsample_linear1d_backward_cpu
@@ -6240,6 +6207,7 @@
6240
6207
  CUDA: upsample_bilinear2d_out_cuda
6241
6208
 
6242
6209
  - func: upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
6210
+ use_c10_dispatcher: full
6243
6211
  python_module: nn
6244
6212
  dispatch:
6245
6213
  CPU: upsample_bilinear2d_cpu
@@ -6253,6 +6221,7 @@
6253
6221
  CUDA: upsample_bilinear2d_backward_out_cuda
6254
6222
 
6255
6223
  - func: upsample_bilinear2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
6224
+ use_c10_dispatcher: full
6256
6225
  python_module: nn
6257
6226
  dispatch:
6258
6227
  CPU: upsample_bilinear2d_backward_cpu
@@ -6265,6 +6234,7 @@
6265
6234
  CUDA: upsample_bicubic2d_out_cuda
6266
6235
 
6267
6236
  - func: upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
6237
+ use_c10_dispatcher: full
6268
6238
  python_module: nn
6269
6239
  dispatch:
6270
6240
  CPU: upsample_bicubic2d_cpu
@@ -6277,6 +6247,7 @@
6277
6247
  CUDA: upsample_bicubic2d_backward_out_cuda
6278
6248
 
6279
6249
  - func: upsample_bicubic2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
6250
+ use_c10_dispatcher: full
6280
6251
  python_module: nn
6281
6252
  dispatch:
6282
6253
  CPU: upsample_bicubic2d_backward_cpu
@@ -6289,6 +6260,7 @@
6289
6260
  CUDA: upsample_trilinear3d_out_cuda
6290
6261
 
6291
6262
  - func: upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
6263
+ use_c10_dispatcher: full
6292
6264
  python_module: nn
6293
6265
  dispatch:
6294
6266
  CPU: upsample_trilinear3d_cpu
@@ -6301,6 +6273,7 @@
6301
6273
  CUDA: upsample_trilinear3d_backward_out_cuda
6302
6274
 
6303
6275
  - func: upsample_trilinear3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
6276
+ use_c10_dispatcher: full
6304
6277
  python_module: nn
6305
6278
  dispatch:
6306
6279
  CPU: upsample_trilinear3d_backward_cpu
@@ -6313,6 +6286,7 @@
6313
6286
  CUDA: upsample_nearest1d_out_cuda
6314
6287
 
6315
6288
  - func: upsample_nearest1d(Tensor self, int[1] output_size, float? scales=None) -> Tensor
6289
+ use_c10_dispatcher: full
6316
6290
  python_module: nn
6317
6291
  dispatch:
6318
6292
  CPU: upsample_nearest1d_cpu
@@ -6325,6 +6299,7 @@
6325
6299
  CUDA: upsample_nearest1d_backward_out_cuda
6326
6300
 
6327
6301
  - func: upsample_nearest1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None) -> Tensor
6302
+ use_c10_dispatcher: full
6328
6303
  python_module: nn
6329
6304
  dispatch:
6330
6305
  CPU: upsample_nearest1d_backward_cpu
@@ -6337,11 +6312,13 @@
6337
6312
  CUDA: upsample_nearest2d_out_cuda
6338
6313
 
6339
6314
  - func: upsample_nearest2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor
6315
+ use_c10_dispatcher: full
6340
6316
  python_module: nn
6341
6317
  dispatch:
6342
6318
  CPU: upsample_nearest2d_cpu
6343
6319
  CUDA: upsample_nearest2d_cuda
6344
6320
  QuantizedCPU: quantized_upsample_nearest2d_cpu
6321
+ Vulkan: upsample_nearest2d_vulkan
6345
6322
 
6346
6323
  - func: upsample_nearest2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)
6347
6324
  python_module: nn
@@ -6350,6 +6327,7 @@
6350
6327
  CUDA: upsample_nearest2d_backward_out_cuda
6351
6328
 
6352
6329
  - func: upsample_nearest2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor
6330
+ use_c10_dispatcher: full
6353
6331
  python_module: nn
6354
6332
  dispatch:
6355
6333
  CPU: upsample_nearest2d_backward_cpu
@@ -6362,6 +6340,7 @@
6362
6340
  CUDA: upsample_nearest3d_out_cuda
6363
6341
 
6364
6342
  - func: upsample_nearest3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
6343
+ use_c10_dispatcher: full
6365
6344
  python_module: nn
6366
6345
  dispatch:
6367
6346
  CPU: upsample_nearest3d_cpu
@@ -6375,6 +6354,7 @@
6375
6354
  CUDA: upsample_nearest3d_backward_out_cuda
6376
6355
 
6377
6356
  - func: upsample_nearest3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor
6357
+ use_c10_dispatcher: full
6378
6358
  python_module: nn
6379
6359
  dispatch:
6380
6360
  CPU: upsample_nearest3d_backward_cpu
@@ -6437,6 +6417,7 @@
6437
6417
  CUDA: slow_conv_transpose2d_backward_out_cuda
6438
6418
 
6439
6419
  - func: slow_conv_transpose2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, Tensor columns, Tensor ones, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
6420
+ use_c10_dispatcher: full
6440
6421
  python_module: nn
6441
6422
  dispatch:
6442
6423
  CPU: slow_conv_transpose2d_backward_cpu
@@ -6461,6 +6442,7 @@
6461
6442
  CUDA: slow_conv_transpose3d_backward_out_cuda
6462
6443
 
6463
6444
  - func: slow_conv_transpose3d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] output_padding, int[3] dilation, Tensor finput, Tensor fgrad_input, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
6445
+ use_c10_dispatcher: full
6464
6446
  python_module: nn
6465
6447
  dispatch:
6466
6448
  CPU: slow_conv_transpose3d_backward_cpu
@@ -6488,13 +6470,14 @@
6488
6470
  python_module: nn
6489
6471
  dispatch:
6490
6472
  CPU: slow_conv2d_backward_out_cpu
6491
- CUDA: legacy::cuda::_thnn_conv2d_backward_out
6473
+ CUDA: slow_conv2d_backward_out_cuda
6492
6474
 
6493
6475
  - func: thnn_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor fgrad_input, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
6476
+ use_c10_dispatcher: full
6494
6477
  python_module: nn
6495
6478
  dispatch:
6496
6479
  CPU: slow_conv2d_backward_cpu
6497
- CUDA: legacy::cuda::_thnn_conv2d_backward
6480
+ CUDA: slow_conv2d_backward_cuda
6498
6481
 
6499
6482
  - func: thnn_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
6500
6483
  python_module: nn
@@ -6515,12 +6498,13 @@
6515
6498
  - func: thnn_conv_depthwise2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!)? grad_input, Tensor(b!)? grad_weight) -> (Tensor(a!), Tensor(b!))
6516
6499
  python_module: nn
6517
6500
  dispatch:
6518
- CUDA: legacy::cuda::_thnn_conv_depthwise2d_backward_out
6501
+ CUDA: thnn_conv_depthwise2d_backward_out
6519
6502
 
6520
6503
  - func: thnn_conv_depthwise2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool[2] output_mask) -> (Tensor grad_input, Tensor grad_weight)
6504
+ use_c10_dispatcher: full
6521
6505
  python_module: nn
6522
6506
  dispatch:
6523
- CUDA: legacy::cuda::_thnn_conv_depthwise2d_backward
6507
+ CUDA: thnn_conv_depthwise2d_backward
6524
6508
 
6525
6509
  - func: slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
6526
6510
  python_module: nn
@@ -6544,6 +6528,7 @@
6544
6528
  CPU: slow_conv3d_backward_out_cpu
6545
6529
 
6546
6530
  - func: slow_conv3d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor fgrad_input, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
6531
+ use_c10_dispatcher: full
6547
6532
  python_module: nn
6548
6533
  dispatch:
6549
6534
  CPU: slow_conv3d_backward_cpu
@@ -6555,6 +6540,7 @@
6555
6540
  CUDA: slow_conv_dilated2d_cuda
6556
6541
 
6557
6542
  - func: slow_conv_dilated2d_backward(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
6543
+ use_c10_dispatcher: full
6558
6544
  python_module: nn
6559
6545
  dispatch:
6560
6546
  CPU: slow_conv_dilated2d_backward_cpu
@@ -6567,6 +6553,7 @@
6567
6553
  CUDA: slow_conv_dilated3d_cuda
6568
6554
 
6569
6555
  - func: slow_conv_dilated3d_backward(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)
6556
+ use_c10_dispatcher: full
6570
6557
  python_module: nn
6571
6558
  dispatch:
6572
6559
  CPU: slow_conv_dilated3d_backward_cpu
@@ -6579,6 +6566,7 @@
6579
6566
  CUDA: col2im_out_cuda
6580
6567
 
6581
6568
  - func: col2im(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
6569
+ use_c10_dispatcher: full
6582
6570
  python_module: nn
6583
6571
  dispatch:
6584
6572
  CPU: col2im_cpu
@@ -6591,6 +6579,7 @@
6591
6579
  CUDA: col2im_backward_out_cuda
6592
6580
 
6593
6581
  - func: col2im_backward(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
6582
+ use_c10_dispatcher: full
6594
6583
  python_module: nn
6595
6584
  dispatch:
6596
6585
  CPU: col2im_backward_cpu
@@ -6603,6 +6592,7 @@
6603
6592
  CUDA: im2col_out_cuda
6604
6593
 
6605
6594
  - func: im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
6595
+ use_c10_dispatcher: full
6606
6596
  python_module: nn
6607
6597
  dispatch:
6608
6598
  CPU: im2col_cpu
@@ -6615,6 +6605,7 @@
6615
6605
  CUDA: im2col_backward_out_cuda
6616
6606
 
6617
6607
  - func: im2col_backward(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
6608
+ use_c10_dispatcher: full
6618
6609
  python_module: nn
6619
6610
  dispatch:
6620
6611
  CPU: im2col_backward_cpu
@@ -6622,12 +6613,15 @@
6622
6613
 
6623
6614
  - func: isfinite(Tensor self) -> Tensor
6624
6615
  use_c10_dispatcher: full
6625
- variants: function
6616
+ variants: function, method
6626
6617
  device_guard: False
6627
- supports_named_tensor: True
6628
6618
 
6629
6619
  - func: isinf(Tensor self) -> Tensor
6630
6620
  use_c10_dispatcher: full
6631
- variants: function
6621
+ variants: function, method
6632
6622
  device_guard: False
6633
- supports_named_tensor: True
6623
+
6624
+ # Note: this function is only for testing.
6625
+ # It is undocumented and should not be used outside of tests.
6626
+ - func: _test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor
6627
+ use_c10_dispatcher: full