torch-rb 0.18.0 → 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0f044a5934fa8a296fe407fd79d3b9ac7e5e582b110df6e024ce01fe88d4dbd6
4
- data.tar.gz: ac662c6a27bba6c173f9631b16e8832d24cfb72af9b4d845780b9c717404f923
3
+ metadata.gz: 03164cc479d8f8a32f0669d597e8fe5310d91955e6954cfdc0fffdc8983c5768
4
+ data.tar.gz: 87fc733016b6f4489b38a419a3879cacbdb1e190cfaa5c02397aceb57c012d16
5
5
  SHA512:
6
- metadata.gz: 8da9c0bb9d466a81483a31b63d8c7670d14025bc364d70461e9a26264ac0567ae10af7ef464d43d6cfc4e00bca5701e846093bc6651ded5c84ef22e6652a59a2
7
- data.tar.gz: 4420bf303e8ef0ed9b96c607eead13b33aa9f323d0422015868b93d28a1ccd1dd9999053792c04d8810e9dcc3ff8f4594c99587b033653031882f3d1c25310ff
6
+ metadata.gz: 6ba0480138a10ba43dff625dc1bcf99e2287f238dc4607ea6813e82914f1e335133f55408fa59579343b161c54850316f744c594cf6687b7f2de64a0d71746d1
7
+ data.tar.gz: 859015641dd14bf919a7982c6673acb296f858518552b8c924fc7e59b9c1b2a9491aa598c01b019b392c8c2bba7b9f65ff0923f838e6cbde7ddaede9c4b69191
data/CHANGELOG.md CHANGED
@@ -1,3 +1,10 @@
1
+ ## 0.19.0 (2025-01-29)
2
+
3
+ - Updated LibTorch to 2.6.0
4
+ - Improved `inspect` for `Device`
5
+ - Fixed equality for `Device`
6
+ - Fixed `index` method for `Device` when no index
7
+
1
8
  ## 0.18.0 (2024-10-22)
2
9
 
3
10
  - Updated LibTorch to 2.5.0
data/README.md CHANGED
@@ -17,7 +17,7 @@ Check out:
17
17
  First, [download LibTorch](https://pytorch.org/get-started/locally/). For Mac arm64, use:
18
18
 
19
19
  ```sh
20
- curl -L https://download.pytorch.org/libtorch/cpu/libtorch-macos-arm64-2.5.0.zip > libtorch.zip
20
+ curl -L https://download.pytorch.org/libtorch/cpu/libtorch-macos-arm64-2.6.0.zip > libtorch.zip
21
21
  unzip -q libtorch.zip
22
22
  ```
23
23
 
@@ -413,6 +413,7 @@ Here’s the list of compatible versions.
413
413
 
414
414
  Torch.rb | LibTorch
415
415
  --- | ---
416
+ 0.19.x | 2.6.x
416
417
  0.18.x | 2.5.x
417
418
  0.17.x | 2.4.x
418
419
  0.16.x | 2.3.x
@@ -53,7 +53,9 @@ def skip_functions(functions)
53
53
  f.base_name == "sym_size" ||
54
54
  f.base_name == "sym_numel" ||
55
55
  f.base_name == "sym_storage_offset" ||
56
- f.base_name == "sym_stride"
56
+ f.base_name == "sym_stride" ||
57
+ # TODO fix LibTorch 2.6 changes
58
+ f.base_name == "rrelu_with_noise"
57
59
  end
58
60
  end
59
61
 
@@ -187,7 +187,10 @@
187
187
  dispatch:
188
188
  CPU: _functional_assert_async_msg_cpu
189
189
 
190
- - func: _assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None) -> ()
190
+ - func: _assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? dtype=None, *, Device? device=None, Layout? layout=None) -> ()
191
+ dispatch:
192
+ CompositeExplicitAutograd: _assert_tensor_metadata
193
+ Meta: _assert_tensor_metadata_meta_symint
191
194
 
192
195
  - func: _print(str s) -> ()
193
196
  dispatch:
@@ -309,25 +312,25 @@
309
312
  - func: _shape_as_tensor(Tensor self) -> Tensor
310
313
 
311
314
  - func: dropout(Tensor input, float p, bool train) -> Tensor
312
- tags: nondeterministic_seeded
315
+ tags: [nondeterministic_seeded, maybe_aliasing_or_mutating]
313
316
 
314
317
  - func: dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
315
318
  tags: nondeterministic_seeded
316
319
 
317
320
  - func: feature_dropout(Tensor input, float p, bool train) -> Tensor
318
- tags: nondeterministic_seeded
321
+ tags: [nondeterministic_seeded, maybe_aliasing_or_mutating]
319
322
 
320
323
  - func: feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
321
324
  tags: nondeterministic_seeded
322
325
 
323
326
  - func: alpha_dropout(Tensor input, float p, bool train) -> Tensor
324
- tags: nondeterministic_seeded
327
+ tags: [nondeterministic_seeded, maybe_aliasing_or_mutating]
325
328
 
326
329
  - func: alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
327
330
  tags: nondeterministic_seeded
328
331
 
329
332
  - func: feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
330
- tags: nondeterministic_seeded
333
+ tags: [nondeterministic_seeded, maybe_aliasing_or_mutating]
331
334
 
332
335
  - func: feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
333
336
  tags: nondeterministic_seeded
@@ -477,7 +480,7 @@
477
480
 
478
481
  - func: conj_physical(Tensor self) -> Tensor
479
482
  variants: function, method
480
- tags: pointwise
483
+ tags: [pointwise, maybe_aliasing_or_mutating]
481
484
 
482
485
  - func: conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
483
486
  dispatch:
@@ -641,6 +644,7 @@
641
644
  CPU: addmv_out_cpu
642
645
  CUDA: addmv_out_cuda
643
646
  MPS: addmv_out_mps
647
+ XPU: addmv_out_xpu
644
648
  SparseCsrCPU: addmv_out_sparse_compressed
645
649
  SparseCsrCUDA: addmv_out_sparse_compressed_cuda
646
650
 
@@ -1031,17 +1035,20 @@
1031
1035
 
1032
1036
  - func: atleast_1d(Tensor self) -> Tensor
1033
1037
  variants: function
1038
+ tags: maybe_aliasing_or_mutating
1034
1039
 
1035
1040
  - func: atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]
1036
1041
 
1037
1042
  - func: atleast_2d(Tensor self) -> Tensor
1038
1043
  variants: function
1044
+ tags: maybe_aliasing_or_mutating
1039
1045
 
1040
1046
  - func: atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]
1041
1047
  variants: function
1042
1048
 
1043
1049
  - func: atleast_3d(Tensor self) -> Tensor
1044
1050
  variants: function
1051
+ tags: maybe_aliasing_or_mutating
1045
1052
 
1046
1053
  - func: atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]
1047
1054
  variants: function
@@ -1061,6 +1068,7 @@
1061
1068
  CPU: baddbmm_out_cpu
1062
1069
  CUDA: baddbmm_out_cuda
1063
1070
  MPS: baddbmm_out_mps
1071
+ XPU: baddbmm_out_xpu
1064
1072
  SparseCsrCUDA: baddbmm_out_sparse_csr_cuda
1065
1073
 
1066
1074
  - func: bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
@@ -1074,6 +1082,7 @@
1074
1082
  autogen: bartlett_window.periodic_out
1075
1083
 
1076
1084
  - func: batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor
1085
+ tags: maybe_aliasing_or_mutating
1077
1086
 
1078
1087
  - func: quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor
1079
1088
  dispatch:
@@ -1081,6 +1090,7 @@
1081
1090
  autogen: quantized_batch_norm.out
1082
1091
 
1083
1092
  - func: _batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)
1093
+ tags: maybe_aliasing_or_mutating
1084
1094
 
1085
1095
  - func: _batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)
1086
1096
 
@@ -1358,6 +1368,7 @@
1358
1368
  CPU: bmm_out_cpu
1359
1369
  CUDA: bmm_out_cuda
1360
1370
  MPS: bmm_out_mps
1371
+ XPU: bmm_out_xpu
1361
1372
  SparseCPU: bmm_out_sparse_cpu
1362
1373
  SparseCUDA: bmm_out_sparse_cuda
1363
1374
  SparseCsrCUDA: bmm_out_sparse_csr_cuda
@@ -1462,6 +1473,7 @@
1462
1473
  variants: function, method
1463
1474
  device_check: NoCheck
1464
1475
  device_guard: False
1476
+ tags: maybe_aliasing_or_mutating
1465
1477
 
1466
1478
  - func: chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]
1467
1479
  variants: function, method
@@ -1788,7 +1800,7 @@
1788
1800
  variants: function, method
1789
1801
  structured_delegate: cos.out
1790
1802
  dispatch:
1791
- NestedTensorCPU, NestedTensorCUDA: cos_nested
1803
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_cos
1792
1804
  tags: [core, pointwise]
1793
1805
 
1794
1806
  - func: cos_(Tensor(a!) self) -> Tensor(a!)
@@ -2821,6 +2833,7 @@
2821
2833
  # non-differentiable so NonFunctional doesn't apply
2822
2834
  CompositeExplicitAutograd: full_like
2823
2835
  autogen: full_like.out
2836
+ tags: core
2824
2837
 
2825
2838
  - func: from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2826
2839
  dispatch:
@@ -3179,6 +3192,7 @@
3179
3192
  device_guard: False
3180
3193
  dispatch:
3181
3194
  CPU, CUDA, MPS: isnan
3195
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_isnan
3182
3196
  SparseCPU, SparseCUDA: isnan_sparse
3183
3197
  SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: isnan_sparse_csr
3184
3198
  autogen: isnan.out
@@ -3289,7 +3303,9 @@
3289
3303
  autogen: native_layer_norm_backward.out
3290
3304
  tags: core
3291
3305
 
3292
- - func: rms_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor
3306
+ - func: rms_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, float? eps=None) -> Tensor
3307
+ dispatch:
3308
+ CompositeImplicitAutograd: rms_norm_symint
3293
3309
 
3294
3310
  - func: nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor
3295
3311
  variants: function, method
@@ -3355,9 +3371,10 @@
3355
3371
  dispatch:
3356
3372
  CUDA: _cslt_compress
3357
3373
 
3358
- - func: _cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0) -> Tensor
3374
+ - func: _cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0, int split_k=1, bool split_k_one_kernel=True) -> Tensor
3359
3375
  dispatch:
3360
3376
  CUDA: _cslt_sparse_mm
3377
+ tags: needs_fixed_stride_order
3361
3378
 
3362
3379
  - func: _cslt_sparse_mm_search(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False) -> int
3363
3380
  dispatch:
@@ -4126,6 +4143,7 @@
4126
4143
  CPU: mm_out_cpu
4127
4144
  CUDA: mm_out_cuda
4128
4145
  MPS: mm_out_mps
4146
+ XPU: mm_out_xpu
4129
4147
  SparseCPU, SparseCUDA: _sparse_mm_out
4130
4148
  SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: _sparse_csr_mm_out
4131
4149
 
@@ -4141,16 +4159,24 @@
4141
4159
 
4142
4160
  - func: _convert_weight_to_int4pack(Tensor self, int innerKTiles) -> Tensor
4143
4161
  dispatch:
4144
- CPU: _convert_weight_to_int4pack_cpu
4145
4162
  CUDA: _convert_weight_to_int4pack_cuda
4146
4163
  MPS: _convert_weight_to_int4pack_mps
4147
4164
 
4148
4165
  - func: _weight_int4pack_mm(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor
4149
4166
  dispatch:
4150
- CPU: _weight_int4pack_mm_cpu
4151
4167
  MPS: _weight_int4pack_mm_mps
4152
4168
  CUDA: _weight_int4pack_mm_cuda
4153
4169
 
4170
+ # Split int4 pack weight between cpu and other devices due to
4171
+ # https://github.com/pytorch/ao/issues/1117#issuecomment-2451252756.
4172
+ - func: _convert_weight_to_int4pack_for_cpu(Tensor self, int innerKTiles) -> Tensor
4173
+ dispatch:
4174
+ CPU: _convert_weight_to_int4pack_cpu
4175
+
4176
+ - func: _weight_int4pack_mm_for_cpu(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor
4177
+ dispatch:
4178
+ CPU: _weight_int4pack_mm_cpu
4179
+
4154
4180
  - func: _weight_int8pack_mm(Tensor self, Tensor mat2, Tensor scales) -> Tensor
4155
4181
  dispatch:
4156
4182
  CPU: _weight_int8pack_mm_cpu
@@ -4585,6 +4611,7 @@
4585
4611
  CompositeExplicitAutograd: rad2deg
4586
4612
  SparseCPU, SparseCUDA: rad2deg_sparse
4587
4613
  SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: rad2deg_sparse_csr
4614
+ tags: pointwise
4588
4615
 
4589
4616
  - func: rad2deg_(Tensor(a!) self) -> Tensor(a!)
4590
4617
  variants: function, method
@@ -4592,12 +4619,14 @@
4592
4619
  CompositeExplicitAutograd: rad2deg_
4593
4620
  SparseCPU, SparseCUDA: rad2deg_sparse_
4594
4621
  SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: rad2deg_sparse_csr_
4622
+ tags: pointwise
4595
4623
 
4596
4624
  - func: rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
4597
4625
  dispatch:
4598
4626
  CompositeExplicitAutograd: rad2deg_out
4599
4627
  SparseCPU, SparseCUDA: rad2deg_sparse_out
4600
4628
  SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: rad2deg_sparse_csr_out
4629
+ tags: pointwise
4601
4630
 
4602
4631
  - func: deg2rad(Tensor self) -> Tensor
4603
4632
  variants: function, method
@@ -4990,7 +5019,7 @@
4990
5019
 
4991
5020
  - func: rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
4992
5021
  device_check: NoCheck # TensorIterator
4993
- tags: nondeterministic_seeded
5022
+ tags: [pointwise, nondeterministic_seeded]
4994
5023
 
4995
5024
  - func: rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
4996
5025
  tags: nondeterministic_seeded
@@ -5027,6 +5056,7 @@
5027
5056
 
5028
5057
  - func: relu6(Tensor self) -> Tensor
5029
5058
  python_module: nn
5059
+ tags: pointwise
5030
5060
 
5031
5061
  - func: relu6_(Tensor(a!) self) -> Tensor(a!)
5032
5062
  python_module: nn
@@ -5111,6 +5141,7 @@
5111
5141
  structured_delegate: hardshrink.out
5112
5142
  device_check: NoCheck # TensorIterator
5113
5143
  variants: function, method
5144
+ tags: pointwise
5114
5145
 
5115
5146
  - func: hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
5116
5147
  structured: True
@@ -5175,6 +5206,7 @@
5175
5206
 
5176
5207
  - func: selu(Tensor self) -> Tensor
5177
5208
  device_check: NoCheck # TensorIterator
5209
+ tags: pointwise
5178
5210
 
5179
5211
  - func: selu_(Tensor(a!) self) -> Tensor(a!)
5180
5212
  device_check: NoCheck # TensorIterator
@@ -5183,6 +5215,7 @@
5183
5215
  device_check: NoCheck # TensorIterator
5184
5216
  dispatch:
5185
5217
  CompositeExplicitAutograd: celu
5218
+ tags: pointwise
5186
5219
 
5187
5220
  - func: celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)
5188
5221
  device_check: NoCheck # TensorIterator
@@ -5233,6 +5266,7 @@
5233
5266
  - func: mish(Tensor self) -> Tensor
5234
5267
  structured_delegate: mish.out
5235
5268
  python_module: nn
5269
+ tags: pointwise
5236
5270
 
5237
5271
  - func: mish_(Tensor(a!) self) -> Tensor(a!)
5238
5272
  structured_delegate: mish.out
@@ -5305,7 +5339,7 @@
5305
5339
  dispatch:
5306
5340
  SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: sin_sparse_csr
5307
5341
  SparseCPU, SparseCUDA: sin_sparse
5308
- NestedTensorCPU, NestedTensorCUDA: sin_nested
5342
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_sin
5309
5343
  tags: [core, pointwise]
5310
5344
 
5311
5345
  - func: sin_(Tensor(a!) self) -> Tensor(a!)
@@ -5803,6 +5837,7 @@
5803
5837
  structured_delegate: sqrt.out
5804
5838
  variants: function, method
5805
5839
  dispatch:
5840
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_sqrt
5806
5841
  SparseCPU, SparseCUDA: sqrt_sparse
5807
5842
  SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: sqrt_sparse_csr
5808
5843
  tags: [core, pointwise]
@@ -6032,6 +6067,7 @@
6032
6067
  structured_delegate: threshold.out
6033
6068
  dispatch:
6034
6069
  QuantizedCPU: threshold_quantized_cpu
6070
+ tags: pointwise
6035
6071
 
6036
6072
  - func: threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)
6037
6073
  device_check: NoCheck # TensorIterator
@@ -6486,6 +6522,7 @@
6486
6522
  device_check: NoCheck # TensorIterator
6487
6523
  dispatch:
6488
6524
  CPU, CUDA, MPS: where_self_out
6525
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_where_out
6489
6526
 
6490
6527
  - func: where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor
6491
6528
  variants: function
@@ -6988,6 +7025,7 @@
6988
7025
  CPU: addmm_out_cpu
6989
7026
  CUDA: addmm_out_cuda
6990
7027
  MPS: addmm_out_mps
7028
+ XPU: addmm_out_xpu
6991
7029
  SparseCPU: addmm_out_sparse_dense_cpu
6992
7030
  SparseCUDA: addmm_out_sparse_dense_cuda
6993
7031
  SparseCsrCPU: addmm_out_sparse_compressed_cpu
@@ -7016,6 +7054,7 @@
7016
7054
  dispatch:
7017
7055
  CPU: addmm_activation_out_cpu
7018
7056
  CUDA: addmm_activation_out_cuda
7057
+ XPU: addmm_activation_out_xpu
7019
7058
 
7020
7059
  - func: _addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
7021
7060
  structured_delegate: _addmm_activation.out
@@ -7732,6 +7771,7 @@
7732
7771
 
7733
7772
  - func: cartesian_prod(Tensor[] tensors) -> Tensor
7734
7773
  variants: function
7774
+ tags: maybe_aliasing_or_mutating
7735
7775
 
7736
7776
  - func: combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor
7737
7777
  variants: function
@@ -8013,6 +8053,7 @@
8013
8053
  variants: function, method
8014
8054
  dispatch:
8015
8055
  CompositeExplicitAutograd: masked_scatter
8056
+ tags: core
8016
8057
 
8017
8058
  - func: masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor
8018
8059
  dispatch:
@@ -8247,7 +8288,7 @@
8247
8288
  structured: True
8248
8289
  variants: function
8249
8290
  dispatch:
8250
- CPU, CUDA: scatter_reduce_two
8291
+ CPU, CUDA, MPS: scatter_reduce_two
8251
8292
 
8252
8293
  - func: eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
8253
8294
  structured_delegate: eq.Scalar_out
@@ -8649,18 +8690,18 @@
8649
8690
  - func: addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
8650
8691
  variants: method
8651
8692
  dispatch:
8652
- CPU, CUDA: addbmm_
8693
+ CPU, CUDA, XPU: addbmm_
8653
8694
  MPS: addbmm_mps_
8654
8695
 
8655
8696
  - func: addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
8656
8697
  dispatch:
8657
- CPU, CUDA: addbmm_out
8698
+ CPU, CUDA, XPU: addbmm_out
8658
8699
  MPS: addbmm_out_mps
8659
8700
 
8660
8701
  - func: addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
8661
8702
  variants: method, function
8662
8703
  dispatch:
8663
- CPU, CUDA: addbmm
8704
+ CPU, CUDA, XPU: addbmm
8664
8705
  MPS: addbmm_mps
8665
8706
 
8666
8707
  - func: random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)
@@ -8774,12 +8815,14 @@
8774
8815
  dispatch:
8775
8816
  CPU: tril_indices_cpu
8776
8817
  CUDA: tril_indices_cuda
8818
+ MPS: tril_indices_mps
8777
8819
  autogen: tril_indices.out
8778
8820
 
8779
8821
  - func: triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
8780
8822
  dispatch:
8781
8823
  CPU: triu_indices_cpu
8782
8824
  CUDA: triu_indices_cuda
8825
+ MPS: triu_indices_mps
8783
8826
  autogen: triu_indices.out
8784
8827
 
8785
8828
  - func: trace(Tensor self) -> Tensor
@@ -9234,11 +9277,13 @@
9234
9277
  - func: nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!)
9235
9278
  dispatch:
9236
9279
  CPU: nonzero_static_out_cpu
9280
+ CUDA: nonzero_static_out_cuda
9237
9281
 
9238
9282
  - func: nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor
9239
9283
  variants: method, function
9240
9284
  dispatch:
9241
9285
  CPU: nonzero_static_cpu
9286
+ CUDA: nonzero_static_cuda
9242
9287
 
9243
9288
  - func: nonzero_numpy(Tensor self) -> Tensor[]
9244
9289
  variants: method, function
@@ -9577,7 +9622,7 @@
9577
9622
  structured: True
9578
9623
  structured_inherits: TensorIteratorBase
9579
9624
  dispatch:
9580
- CPU, CUDA: i0_out
9625
+ CPU, CUDA, MPS: i0_out
9581
9626
  tags: pointwise
9582
9627
 
9583
9628
  - func: sign(Tensor self) -> Tensor
@@ -10153,7 +10198,7 @@
10153
10198
  - func: unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor
10154
10199
  variants: function
10155
10200
  dispatch:
10156
- CPU, CUDA: unfold_backward
10201
+ CPU, CUDA, MPS: unfold_backward
10157
10202
  autogen: unfold_backward.out
10158
10203
 
10159
10204
  - func: equal(Tensor self, Tensor other) -> bool
@@ -11083,6 +11128,22 @@
11083
11128
  CUDA: foreach_tensor_lerp_list_cuda_
11084
11129
  autogen: _foreach_lerp.Scalar_out
11085
11130
 
11131
+ - func: _foreach_lerp.ScalarList(Tensor[] self, Tensor[] tensors1, Scalar[] weight) -> Tensor[]
11132
+ device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
11133
+ variants: function
11134
+ dispatch:
11135
+ CompositeExplicitAutograd: foreach_tensor_lerp_scalarlist_kernel_slow
11136
+ CUDA: foreach_tensor_lerp_scalarlist_cuda
11137
+ autogen: _foreach_lerp.ScalarList_out
11138
+
11139
+ - func: _foreach_lerp_.ScalarList(Tensor(a!)[] self, Tensor[] tensors1, Scalar[] weight) -> ()
11140
+ device_check: NoCheck # foreach kernels fall back to slow path when tensors are on different devices
11141
+ variants: function
11142
+ dispatch:
11143
+ CompositeExplicitAutograd: foreach_tensor_lerp_scalarlist_kernel_slow_
11144
+ CUDA: foreach_tensor_lerp_scalarlist_cuda_
11145
+ autogen: _foreach_lerp.ScalarList_out
11146
+
11086
11147
  - func: _foreach_lgamma(Tensor[] self) -> Tensor[]
11087
11148
  device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
11088
11149
  variants: function
@@ -11271,6 +11332,21 @@
11271
11332
  CUDA: foreach_tensor_round_cuda_
11272
11333
  autogen: _foreach_round.out
11273
11334
 
11335
+ - func: _foreach_rsqrt(Tensor[] self) -> Tensor[]
11336
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
11337
+ variants: function
11338
+ dispatch:
11339
+ CompositeExplicitAutograd: foreach_tensor_rsqrt_slow
11340
+ CUDA: foreach_tensor_rsqrt_cuda
11341
+
11342
+ - func: _foreach_rsqrt_(Tensor(a!)[] self) -> ()
11343
+ device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
11344
+ variants: function
11345
+ dispatch:
11346
+ CompositeExplicitAutograd: foreach_tensor_rsqrt_slow_
11347
+ CUDA: foreach_tensor_rsqrt_cuda_
11348
+ autogen: _foreach_rsqrt.out
11349
+
11274
11350
  - func: _foreach_sigmoid(Tensor[] self) -> Tensor[]
11275
11351
  device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices
11276
11352
  variants: function
@@ -11714,6 +11790,7 @@
11714
11790
  structured_delegate: elu.out
11715
11791
  device_check: NoCheck # TensorIterator
11716
11792
  python_module: nn
11793
+ tags: pointwise
11717
11794
 
11718
11795
  - func: elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)
11719
11796
  structured: True
@@ -11787,6 +11864,7 @@
11787
11864
  python_module: nn
11788
11865
  dispatch:
11789
11866
  QuantizedCPU: hardsigmoid_quantized_cpu
11867
+ tags: pointwise
11790
11868
 
11791
11869
  - func: hardsigmoid_(Tensor(a!) self) -> Tensor(a!)
11792
11870
  structured_delegate: hardsigmoid.out
@@ -11818,7 +11896,7 @@
11818
11896
  dispatch:
11819
11897
  CPU, CUDA, MPS: hardtanh
11820
11898
  QuantizedCPU: hardtanh_quantized_cpu
11821
- tags: core
11899
+ tags: [pointwise, core]
11822
11900
 
11823
11901
  - func: hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
11824
11902
  python_module: nn
@@ -11942,19 +12020,20 @@
11942
12020
  CUDA: log_sigmoid_backward_cuda
11943
12021
  MPS: log_sigmoid_backward_mps
11944
12022
 
11945
- - func: rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
12023
+ - func: rrelu_with_noise.out(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
11946
12024
  python_module: nn
11947
12025
  tags: nondeterministic_seeded
11948
12026
  dispatch:
11949
12027
  CPU: rrelu_with_noise_out_cpu
11950
12028
  CUDA: rrelu_with_noise_out_cuda
11951
12029
 
11952
- - func: rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
12030
+ - func: rrelu_with_noise(Tensor self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
11953
12031
  python_module: nn
11954
12032
  dispatch:
11955
12033
  CPU: rrelu_with_noise_cpu
11956
12034
  CUDA: rrelu_with_noise_cuda
11957
12035
  tags: nondeterministic_seeded
12036
+ autogen: rrelu_with_noise_functional
11958
12037
 
11959
12038
  - func: rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor
11960
12039
  python_module: nn
@@ -11962,7 +12041,7 @@
11962
12041
  CompositeExplicitAutograd: rrelu_with_noise_backward
11963
12042
  autogen: rrelu_with_noise_backward.out
11964
12043
 
11965
- - func: rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
12044
+ - func: rrelu_with_noise_(Tensor(a!) self, Tensor(b!) noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
11966
12045
  python_module: nn
11967
12046
  tags: nondeterministic_seeded
11968
12047
  dispatch:
@@ -11982,6 +12061,7 @@
11982
12061
  structured_delegate: softplus.out
11983
12062
  device_check: NoCheck # TensorIterator
11984
12063
  python_module: nn
12064
+ tags: pointwise
11985
12065
 
11986
12066
  - func: softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)
11987
12067
  structured: True
@@ -12008,6 +12088,7 @@
12008
12088
  structured_delegate: softshrink.out
12009
12089
  device_check: NoCheck # TensorIterator
12010
12090
  python_module: nn
12091
+ tags: pointwise
12011
12092
 
12012
12093
  - func: softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
12013
12094
  structured: True
@@ -12652,6 +12733,7 @@
12652
12733
  dispatch:
12653
12734
  CPU: upsample_bicubic2d_out_cpu
12654
12735
  CUDA: upsample_bicubic2d_out_cuda
12736
+ MPS: upsample_bicubic2d_out_mps
12655
12737
 
12656
12738
  - func: upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
12657
12739
  python_module: nn
@@ -12663,6 +12745,7 @@
12663
12745
  dispatch:
12664
12746
  CPU: upsample_bicubic2d_backward_out_cpu
12665
12747
  CUDA: upsample_bicubic2d_backward_out_cuda
12748
+ MPS: upsample_bicubic2d_backward_out_mps
12666
12749
 
12667
12750
  - func: upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor
12668
12751
  python_module: nn
@@ -13043,17 +13126,20 @@
13043
13126
  dispatch:
13044
13127
  CPU: im2col_out_cpu
13045
13128
  CUDA: im2col_out_cuda
13129
+ MPS: im2col_out_mps
13046
13130
 
13047
13131
  - func: im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
13048
13132
  python_module: nn
13049
13133
  dispatch:
13050
13134
  CPU: im2col_cpu
13051
13135
  CUDA: im2col_cuda
13136
+ MPS: im2col_mps
13052
13137
 
13053
13138
  - func: isfinite(Tensor self) -> Tensor
13054
13139
  variants: function, method
13055
13140
  device_check: NoCheck
13056
13141
  device_guard: False
13142
+ tags: pointwise
13057
13143
 
13058
13144
  - func: isinf(Tensor self) -> Tensor
13059
13145
  variants: function, method
@@ -13061,6 +13147,7 @@
13061
13147
  device_guard: False
13062
13148
  dispatch:
13063
13149
  CompositeExplicitAutograd: isinf
13150
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_isinf
13064
13151
  SparseCPU, SparseCUDA: isinf_sparse
13065
13152
  SparseMeta: isinf_sparse_meta
13066
13153
  SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: isinf_sparse_csr
@@ -13076,6 +13163,7 @@
13076
13163
  variants: function, method
13077
13164
  structured_delegate: isposinf.out
13078
13165
  dispatch:
13166
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_isposinf
13079
13167
  SparseCPU, SparseCUDA: isposinf_sparse
13080
13168
  SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: isposinf_sparse_csr
13081
13169
  tags: pointwise
@@ -13084,7 +13172,7 @@
13084
13172
  structured: True
13085
13173
  structured_inherits: TensorIteratorBase
13086
13174
  dispatch:
13087
- CPU, CUDA: isposinf_out
13175
+ CPU, CUDA, MPS: isposinf_out
13088
13176
  SparseCPU, SparseCUDA: isposinf_sparse_out
13089
13177
  SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: isposinf_sparse_csr_out
13090
13178
  tags: pointwise
@@ -13093,6 +13181,7 @@
13093
13181
  variants: function, method
13094
13182
  structured_delegate: isneginf.out
13095
13183
  dispatch:
13184
+ NestedTensorCPU, NestedTensorCUDA: NestedTensor_isneginf
13096
13185
  SparseCPU, SparseCUDA: isneginf_sparse
13097
13186
  SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: isneginf_sparse_csr
13098
13187
  tags: pointwise
@@ -13101,7 +13190,7 @@
13101
13190
  structured: True
13102
13191
  structured_inherits: TensorIteratorBase
13103
13192
  dispatch:
13104
- CPU, CUDA: isneginf_out
13193
+ CPU, CUDA, MPS: isneginf_out
13105
13194
  SparseCPU, SparseCUDA: isneginf_sparse_out
13106
13195
  SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: isneginf_sparse_csr_out
13107
13196
  tags: pointwise
@@ -13114,7 +13203,7 @@
13114
13203
  variants: function
13115
13204
 
13116
13205
  # See NOTE [_add_batch_dim and _remove_batch_dim]
13117
- - func: _remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor
13206
+ - func: _remove_batch_dim(Tensor self, int level, SymInt batch_size, int out_dim) -> Tensor
13118
13207
  variants: function
13119
13208
 
13120
13209
  ## Functions related to the `torch.special` namespace
@@ -13414,7 +13503,7 @@
13414
13503
  structured: True
13415
13504
  structured_inherits: TensorIteratorBase
13416
13505
  dispatch:
13417
- CPU, CUDA: special_i1_out
13506
+ CPU, CUDA, MPS: special_i1_out
13418
13507
  tags: pointwise
13419
13508
 
13420
13509
  - func: special_i1e(Tensor self) -> Tensor
@@ -14706,6 +14795,11 @@
14706
14795
  CUDA: _fbgemm_dense_to_jagged_forward_symint
14707
14796
  CPU: _padded_dense_to_jagged_forward_cpu
14708
14797
 
14798
+ - func: _nested_from_padded_tensor(Tensor padded, Tensor offsets, Tensor dummy, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None, SymInt? sum_S=None) -> Tensor
14799
+ variants: function
14800
+ device_check: NoCheck
14801
+ dispatch: {}
14802
+
14709
14803
  - func: _nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor
14710
14804
  dispatch:
14711
14805
  NestedTensorCPU: NestedTensor_softmax_dropout
data/ext/torch/device.cpp CHANGED
@@ -8,7 +8,7 @@ void init_device(Rice::Module& m) {
8
8
  Rice::define_class_under<torch::Device>(m, "Device")
9
9
  .define_constructor(Rice::Constructor<torch::Device, const std::string&>())
10
10
  .define_method(
11
- "index",
11
+ "_index",
12
12
  [](torch::Device& self) {
13
13
  return self.index();
14
14
  })
@@ -23,5 +23,10 @@ void init_device(Rice::Module& m) {
23
23
  std::stringstream s;
24
24
  s << self.type();
25
25
  return s.str();
26
+ })
27
+ .define_method(
28
+ "_str",
29
+ [](torch::Device& self) {
30
+ return self.str();
26
31
  });
27
32
  }
data/ext/torch/ext.cpp CHANGED
@@ -31,6 +31,7 @@ void Init_ext()
31
31
 
32
32
  // keep this order
33
33
  init_torch(m);
34
+ init_device(m);
34
35
  init_tensor(m, rb_cTensor, rb_cTensorOptions);
35
36
  init_nn(m);
36
37
  init_fft(m);
@@ -39,7 +40,6 @@ void Init_ext()
39
40
 
40
41
  init_backends(m);
41
42
  init_cuda(m);
42
- init_device(m);
43
43
  init_generator(m, rb_cGenerator);
44
44
  init_ivalue(m, rb_cIValue);
45
45
  init_random(m);
data/ext/torch/tensor.cpp CHANGED
@@ -212,11 +212,9 @@ void init_tensor(Rice::Module& m, Rice::Class& c, Rice::Class& rb_cTensorOptions
212
212
  return s.str();
213
213
  })
214
214
  .define_method(
215
- "device",
215
+ "_device",
216
216
  [](Tensor& self) {
217
- std::stringstream s;
218
- s << self.device();
219
- return s.str();
217
+ return self.device();
220
218
  })
221
219
  .define_method(
222
220
  "_data_str",
data/ext/torch/torch.cpp CHANGED
@@ -9,19 +9,14 @@
9
9
  #include "utils.h"
10
10
 
11
11
  template<typename T>
12
- torch::Tensor make_tensor(Rice::Array a, std::vector<int64_t> size, const torch::TensorOptions &options) {
12
+ torch::Tensor make_tensor(Rice::Array a, const std::vector<int64_t> &size, const torch::TensorOptions &options) {
13
13
  std::vector<T> vec;
14
+ vec.reserve(a.size());
14
15
  for (long i = 0; i < a.size(); i++) {
15
16
  vec.push_back(Rice::detail::From_Ruby<T>().convert(a[i].value()));
16
17
  }
17
18
 
18
- // hack for requires_grad error
19
- auto requires_grad = options.requires_grad();
20
- torch::Tensor t = torch::tensor(vec, options.requires_grad(c10::nullopt));
21
- if (requires_grad) {
22
- t.set_requires_grad(true);
23
- }
24
-
19
+ torch::Tensor t = torch::tensor(vec, options);
25
20
  return t.reshape(size);
26
21
  }
27
22
 
@@ -46,12 +41,12 @@ void init_torch(Rice::Module& m) {
46
41
  // config
47
42
  .define_singleton_function(
48
43
  "show_config",
49
- [] {
44
+ []() {
50
45
  return torch::show_config();
51
46
  })
52
47
  .define_singleton_function(
53
48
  "parallel_info",
54
- [] {
49
+ []() {
55
50
  return torch::get_parallel_info();
56
51
  })
57
52
  // begin operations
@@ -74,13 +69,13 @@ void init_torch(Rice::Module& m) {
74
69
  })
75
70
  .define_singleton_function(
76
71
  "_from_blob",
77
- [](Rice::String s, std::vector<int64_t> size, const torch::TensorOptions &options) {
72
+ [](Rice::String s, const std::vector<int64_t> &size, const torch::TensorOptions &options) {
78
73
  void *data = const_cast<char *>(s.c_str());
79
74
  return torch::from_blob(data, size, options);
80
75
  })
81
76
  .define_singleton_function(
82
77
  "_tensor",
83
- [](Rice::Array a, std::vector<int64_t> size, const torch::TensorOptions &options) {
78
+ [](Rice::Array a, const std::vector<int64_t> &size, const torch::TensorOptions &options) {
84
79
  auto dtype = options.dtype();
85
80
  if (dtype == torch::kByte) {
86
81
  return make_tensor<uint8_t>(a, size, options);
data/ext/torch/utils.h CHANGED
@@ -6,7 +6,7 @@
6
6
  #include <rice/stl.hpp>
7
7
 
8
8
  static_assert(
9
- TORCH_VERSION_MAJOR == 2 && TORCH_VERSION_MINOR == 5,
9
+ TORCH_VERSION_MAJOR == 2 && TORCH_VERSION_MINOR == 6,
10
10
  "Incompatible LibTorch version"
11
11
  );
12
12
 
@@ -0,0 +1,25 @@
1
+ module Torch
2
+ class Device
3
+ def index
4
+ index? ? _index : nil
5
+ end
6
+
7
+ def inspect
8
+ extra = ", index: #{index.inspect}" if index?
9
+ "device(type: #{type.inspect}#{extra})"
10
+ end
11
+ alias_method :to_s, :inspect
12
+
13
+ def ==(other)
14
+ eql?(other)
15
+ end
16
+
17
+ def eql?(other)
18
+ other.is_a?(Device) && other.type == type && other.index == index
19
+ end
20
+
21
+ def hash
22
+ [type, index].hash
23
+ end
24
+ end
25
+ end
data/lib/torch/tensor.rb CHANGED
@@ -209,5 +209,10 @@ module Torch
209
209
  raise TypeError, "#{self.class} can't be coerced into #{other.class}"
210
210
  end
211
211
  end
212
+
213
+ # TODO return Device instead of String in 0.19.0
214
+ def device
215
+ _device._str
216
+ end
212
217
  end
213
218
  end
data/lib/torch/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module Torch
2
- VERSION = "0.18.0"
2
+ VERSION = "0.19.0"
3
3
  end
data/lib/torch.rb CHANGED
@@ -8,6 +8,7 @@ require "set"
8
8
  require "tmpdir"
9
9
 
10
10
  # modules
11
+ require_relative "torch/device"
11
12
  require_relative "torch/inspector"
12
13
  require_relative "torch/tensor"
13
14
  require_relative "torch/version"
@@ -382,7 +383,11 @@ module Torch
382
383
  alias_method :set_grad_enabled, :grad_enabled
383
384
 
384
385
  def device(str)
385
- Device.new(str)
386
+ if str.is_a?(Device)
387
+ str
388
+ else
389
+ Device.new(str)
390
+ end
386
391
  end
387
392
 
388
393
  def save(obj, f)
metadata CHANGED
@@ -1,14 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: torch-rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.18.0
4
+ version: 0.19.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrew Kane
8
- autorequire:
9
8
  bindir: bin
10
9
  cert_chain: []
11
- date: 2024-10-22 00:00:00.000000000 Z
10
+ date: 2025-01-30 00:00:00.000000000 Z
12
11
  dependencies:
13
12
  - !ruby/object:Gem::Dependency
14
13
  name: rice
@@ -24,7 +23,6 @@ dependencies:
24
23
  - - ">="
25
24
  - !ruby/object:Gem::Version
26
25
  version: 4.3.3
27
- description:
28
26
  email: andrew@ankane.org
29
27
  executables: []
30
28
  extensions:
@@ -65,6 +63,7 @@ files:
65
63
  - ext/torch/wrap_outputs.h
66
64
  - lib/torch-rb.rb
67
65
  - lib/torch.rb
66
+ - lib/torch/device.rb
68
67
  - lib/torch/hub.rb
69
68
  - lib/torch/inspector.rb
70
69
  - lib/torch/nn/adaptive_avg_pool1d.rb
@@ -224,7 +223,6 @@ homepage: https://github.com/ankane/torch.rb
224
223
  licenses:
225
224
  - BSD-3-Clause
226
225
  metadata: {}
227
- post_install_message:
228
226
  rdoc_options: []
229
227
  require_paths:
230
228
  - lib
@@ -239,8 +237,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
239
237
  - !ruby/object:Gem::Version
240
238
  version: '0'
241
239
  requirements: []
242
- rubygems_version: 3.5.16
243
- signing_key:
240
+ rubygems_version: 3.6.2
244
241
  specification_version: 4
245
242
  summary: Deep learning for Ruby, powered by LibTorch
246
243
  test_files: []