pyg-nightly 2.7.0.dev20250224__py3-none-any.whl → 2.7.0.dev20250226__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyg_nightly-2.7.0.dev20250224.dist-info → pyg_nightly-2.7.0.dev20250226.dist-info}/METADATA +1 -2
- {pyg_nightly-2.7.0.dev20250224.dist-info → pyg_nightly-2.7.0.dev20250226.dist-info}/RECORD +8 -8
- torch_geometric/__init__.py +1 -1
- torch_geometric/edge_index.py +10 -1
- torch_geometric/hash_tensor.py +170 -20
- torch_geometric/index.py +10 -1
- {pyg_nightly-2.7.0.dev20250224.dist-info → pyg_nightly-2.7.0.dev20250226.dist-info}/WHEEL +0 -0
- {pyg_nightly-2.7.0.dev20250224.dist-info → pyg_nightly-2.7.0.dev20250226.dist-info}/licenses/LICENSE +0 -0
{pyg_nightly-2.7.0.dev20250224.dist-info → pyg_nightly-2.7.0.dev20250226.dist-info}/METADATA
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: pyg-nightly
|
3
|
-
Version: 2.7.0.
|
3
|
+
Version: 2.7.0.dev20250226
|
4
4
|
Summary: Graph Neural Network Library for PyTorch
|
5
5
|
Keywords: deep-learning,pytorch,geometric-deep-learning,graph-neural-networks,graph-convolutional-networks
|
6
6
|
Author-email: Matthias Fey <matthias@pyg.org>
|
@@ -36,7 +36,6 @@ Requires-Dist: torch_geometric[test] ; extra == "dev"
|
|
36
36
|
Requires-Dist: scipy ; extra == "full"
|
37
37
|
Requires-Dist: scikit-learn ; extra == "full"
|
38
38
|
Requires-Dist: ase ; extra == "full"
|
39
|
-
Requires-Dist: captum<0.7.0 ; extra == "full"
|
40
39
|
Requires-Dist: graphviz ; extra == "full"
|
41
40
|
Requires-Dist: h5py ; extra == "full"
|
42
41
|
Requires-Dist: matplotlib ; extra == "full"
|
@@ -1,4 +1,4 @@
|
|
1
|
-
torch_geometric/__init__.py,sha256=
|
1
|
+
torch_geometric/__init__.py,sha256=MAAgbT2XVvKADBsLycGtgqDaaCWjhrtphXMHu1uUO9k,1978
|
2
2
|
torch_geometric/_compile.py,sha256=f-WQeH4VLi5Hn9lrgztFUCSrN_FImjhQa6BxFzcYC38,1338
|
3
3
|
torch_geometric/_onnx.py,sha256=V9ffrIKSqhDw6xUZ12lkuSfNs48cQp2EeJ6Z19GfnVw,349
|
4
4
|
torch_geometric/backend.py,sha256=lVaf7aLoVaB3M-UcByUJ1G4T4FOK6LXAg0CF4W3E8jo,1575
|
@@ -7,11 +7,11 @@ torch_geometric/config_store.py,sha256=zdMzlgBpUmBkPovpYQh5fMNwTZLDq2OneqX47QEx7
|
|
7
7
|
torch_geometric/debug.py,sha256=cLyH9OaL2v7POyW-80b19w-ctA7a_5EZsS4aUF1wc2U,1295
|
8
8
|
torch_geometric/deprecation.py,sha256=dWRymDIUkUVI2MeEmBG5WF4R6jObZeseSBV9G6FNfjc,858
|
9
9
|
torch_geometric/device.py,sha256=tU5-_lBNVbVHl_kUmWPwiG5mQ1pyapwMF4JkmtNN3MM,1224
|
10
|
-
torch_geometric/edge_index.py,sha256=
|
10
|
+
torch_geometric/edge_index.py,sha256=J8uHIUXleOv4A2XONkT6yaTJi9yXauxBRJiHmJHMox4,70330
|
11
11
|
torch_geometric/experimental.py,sha256=JbtNNEXjFGI8hZ9raM6-qrZURP6Z5nlDK8QicZUIbz0,4756
|
12
|
-
torch_geometric/hash_tensor.py,sha256=
|
12
|
+
torch_geometric/hash_tensor.py,sha256=WB-aBCJWNWqnlnzQ8Ob4LHeCXm0u1_NPPhmNAEwBpq4,24906
|
13
13
|
torch_geometric/home.py,sha256=EV54B4Dmiv61GDbkCwtCfWGWJ4eFGwZ8s3KOgGjwYgY,790
|
14
|
-
torch_geometric/index.py,sha256=
|
14
|
+
torch_geometric/index.py,sha256=VZGVSb19biQ-HyvZA6esAPW_23dgmtQLSc3436WjP64,24327
|
15
15
|
torch_geometric/inspector.py,sha256=nKi5o4Mn6xsG0Ex1GudTEQt_EqnF9mcMqGtp7Shh9sQ,19336
|
16
16
|
torch_geometric/isinstance.py,sha256=truZjdU9PxSvjJ6k0d_CLJ2iOpen2o8U-54pbUbNRyE,935
|
17
17
|
torch_geometric/lazy_loader.py,sha256=SM0UcXtIdiFge75MKBAWXedoiSOdFDOV0rm1PfoF9cE,908
|
@@ -633,7 +633,7 @@ torch_geometric/utils/undirected.py,sha256=H_nfpI0_WluOG6VfjPyldvcjL4w5USAKWu2x5
|
|
633
633
|
torch_geometric/visualization/__init__.py,sha256=PyR_4K5SafsJrBr6qWrkjKr6GBL1b7FtZybyXCDEVwY,154
|
634
634
|
torch_geometric/visualization/graph.py,sha256=ZuLPL92yGRi7lxlqsUPwL_EVVXF7P2kMcveTtW79vpA,4784
|
635
635
|
torch_geometric/visualization/influence.py,sha256=CWMvuNA_Nf1sfbJmQgn58yS4OFpeKXeZPe7kEuvkUBw,477
|
636
|
-
pyg_nightly-2.7.0.
|
637
|
-
pyg_nightly-2.7.0.
|
638
|
-
pyg_nightly-2.7.0.
|
639
|
-
pyg_nightly-2.7.0.
|
636
|
+
pyg_nightly-2.7.0.dev20250226.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
|
637
|
+
pyg_nightly-2.7.0.dev20250226.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82
|
638
|
+
pyg_nightly-2.7.0.dev20250226.dist-info/METADATA,sha256=ohLoC35Q3PN8CDwaHoNilg0ALbm8R_VEFqF2OPyqKj4,62975
|
639
|
+
pyg_nightly-2.7.0.dev20250226.dist-info/RECORD,,
|
torch_geometric/__init__.py
CHANGED
@@ -31,7 +31,7 @@ from .lazy_loader import LazyLoader
|
|
31
31
|
contrib = LazyLoader('contrib', globals(), 'torch_geometric.contrib')
|
32
32
|
graphgym = LazyLoader('graphgym', globals(), 'torch_geometric.graphgym')
|
33
33
|
|
34
|
-
__version__ = '2.7.0.
|
34
|
+
__version__ = '2.7.0.dev20250226'
|
35
35
|
|
36
36
|
__all__ = [
|
37
37
|
'Index',
|
torch_geometric/edge_index.py
CHANGED
@@ -17,6 +17,7 @@ from typing import (
|
|
17
17
|
overload,
|
18
18
|
)
|
19
19
|
|
20
|
+
import numpy as np
|
20
21
|
import torch
|
21
22
|
import torch.utils._pytree as pytree
|
22
23
|
from torch import Tensor
|
@@ -1246,6 +1247,14 @@ class EdgeIndex(Tensor):
|
|
1246
1247
|
return torch._tensor_str._add_suffixes(prefix + tensor_str, suffixes,
|
1247
1248
|
indent, force_newline=False)
|
1248
1249
|
|
1250
|
+
def tolist(self) -> List[Any]:
|
1251
|
+
"""""" # noqa: D419
|
1252
|
+
return self._data.tolist()
|
1253
|
+
|
1254
|
+
def numpy(self, *, force: bool = False) -> np.ndarray:
|
1255
|
+
"""""" # noqa: D419
|
1256
|
+
return self._data.numpy(force=force)
|
1257
|
+
|
1249
1258
|
# Helpers #################################################################
|
1250
1259
|
|
1251
1260
|
def _shallow_copy(self) -> 'EdgeIndex':
|
@@ -1478,7 +1487,7 @@ def _slice(
|
|
1478
1487
|
step: int = 1,
|
1479
1488
|
) -> Union[EdgeIndex, Tensor]:
|
1480
1489
|
|
1481
|
-
if ((start is None or start <=
|
1490
|
+
if ((start is None or start == 0 or start <= -input.size(dim))
|
1482
1491
|
and (end is None or end > input.size(dim)) and step == 1):
|
1483
1492
|
return input._shallow_copy() # No-op.
|
1484
1493
|
|
torch_geometric/hash_tensor.py
CHANGED
@@ -12,6 +12,7 @@ from typing import (
|
|
12
12
|
Union,
|
13
13
|
)
|
14
14
|
|
15
|
+
import numpy as np
|
15
16
|
import torch
|
16
17
|
import torch.utils._pytree as pytree
|
17
18
|
import xxhash
|
@@ -44,13 +45,6 @@ def as_key_tensor(
|
|
44
45
|
key = torch.as_tensor(key, device=device)
|
45
46
|
except Exception:
|
46
47
|
device = device or torch.get_default_device()
|
47
|
-
# TODO Convert int64 to int32.
|
48
|
-
# On GPU, we default to int32 for faster 'CUDAHashMap' implementation:
|
49
|
-
if torch_geometric.typing.WITH_CUDA_HASH_MAP and device.type == 'cuda':
|
50
|
-
pass
|
51
|
-
# key = torch.tensor(
|
52
|
-
# [xxhash.xxh32(x).intdigest() & 0x7FFFFFFF for x in key],
|
53
|
-
# dtype=torch.int32, device=device)
|
54
48
|
key = torch.tensor(
|
55
49
|
[xxhash.xxh64(x).intdigest() & 0x7FFFFFFFFFFFFFFF for x in key],
|
56
50
|
dtype=torch.int64, device=device)
|
@@ -72,7 +66,6 @@ def as_key_tensor(
|
|
72
66
|
|
73
67
|
def get_hash_map(key: Tensor) -> Union[CPUHashMap, CUDAHashMap]:
|
74
68
|
if torch_geometric.typing.WITH_CUDA_HASH_MAP and key.is_cuda:
|
75
|
-
# TODO Convert int64 to int32.
|
76
69
|
return CUDAHashMap(key, 0.5)
|
77
70
|
|
78
71
|
if key.is_cuda:
|
@@ -93,6 +86,62 @@ def get_hash_map(key: Tensor) -> Union[CPUHashMap, CUDAHashMap]:
|
|
93
86
|
|
94
87
|
|
95
88
|
class HashTensor(Tensor):
|
89
|
+
r"""A :pytorch:`null` :class:`torch.Tensor` that can be referenced by
|
90
|
+
arbitrary keys rather than indices in the first dimension.
|
91
|
+
|
92
|
+
:class:`HashTensor` sub-classes a general :pytorch:`null`
|
93
|
+
:class:`torch.Tensor`, and extends it by CPU- and GPU-accelerated mapping
|
94
|
+
routines. This allow for fast and efficient access to non-contiguous
|
95
|
+
indices/keys while the underlying data is stored in a compact format.
|
96
|
+
|
97
|
+
This representation is ideal for scenarios where one needs a fast mapping
|
98
|
+
routine without relying on CPU-based external packages, and can be used,
|
99
|
+
*e.g.*, to perform mapping of global indices to local indices during
|
100
|
+
subgraph creation, or in data-processing pipelines to map non-contiguous
|
101
|
+
input data into a contiguous space, such as
|
102
|
+
|
103
|
+
* mapping of hashed node IDs to range :obj:`[0, num_nodes - 1]`
|
104
|
+
* mapping of raw input data, *e.g.*, categorical data to range
|
105
|
+
:obj:`[0, num_categories - 1]`
|
106
|
+
|
107
|
+
Specifically, :class:`HashTensor` supports *any* keys of *any* type,
|
108
|
+
*e.g.*, strings, timestamps, etc.
|
109
|
+
|
110
|
+
.. code-block:: python
|
111
|
+
|
112
|
+
from torch_geometric import HashTensor
|
113
|
+
|
114
|
+
key = torch.tensor([1000, 100, 10000])
|
115
|
+
value = torch.randn(3, 4)
|
116
|
+
|
117
|
+
tensor = HashTensor(key, value)
|
118
|
+
assert tensor.size() == (3, 4)
|
119
|
+
|
120
|
+
# Filtering:
|
121
|
+
query = torch.tensor([10000, 1000])
|
122
|
+
out = tensor[query]
|
123
|
+
assert out.equal(value[[2, 0]])
|
124
|
+
|
125
|
+
# Accessing non-existing keys:
|
126
|
+
out = tensor[[10000, 0]]
|
127
|
+
out.isnan()
|
128
|
+
>>> tensor([[False, False, False, False],
|
129
|
+
... [True, True, True, True])
|
130
|
+
|
131
|
+
# If `value` is not given, indexing returns the position of `query` in
|
132
|
+
# `key`, and `-1` otherwise:
|
133
|
+
key = ['Animation', 'Comedy', 'Fantasy']
|
134
|
+
tensor = HashTensor(key)
|
135
|
+
|
136
|
+
out = tensor[['Comedy', 'Romance']]
|
137
|
+
>>> tensor([1, -1])
|
138
|
+
|
139
|
+
Args:
|
140
|
+
key: The keys in the first dimension.
|
141
|
+
value: The values to hold.
|
142
|
+
dtype: The desired data type of the values of the returned tensor.
|
143
|
+
device: The device of the returned tensor.
|
144
|
+
"""
|
96
145
|
_map: Union[Tensor, CPUHashMap, CUDAHashMap]
|
97
146
|
_value: Optional[Tensor]
|
98
147
|
_min_key: Tensor
|
@@ -245,7 +294,7 @@ class HashTensor(Tensor):
|
|
245
294
|
import pandas as pd
|
246
295
|
|
247
296
|
ser = pd.Series(query.cpu().numpy(), dtype=self._map)
|
248
|
-
index = torch.from_numpy(ser.cat.codes.to_numpy()
|
297
|
+
index = torch.from_numpy(ser.cat.codes.to_numpy().copy()).long()
|
249
298
|
|
250
299
|
index = index.to(self.device)
|
251
300
|
|
@@ -299,10 +348,44 @@ class HashTensor(Tensor):
|
|
299
348
|
kwargs)
|
300
349
|
return func(*args, **(kwargs or {}))
|
301
350
|
|
351
|
+
def __tensor_flatten__(self) -> Tuple[List[str], Tuple[Any, ...]]:
|
352
|
+
attrs = ['_map', '_min_key', '_max_key']
|
353
|
+
if self._value is not None:
|
354
|
+
attrs.append('_value')
|
355
|
+
|
356
|
+
ctx = (self.size(0), self.dtype)
|
357
|
+
|
358
|
+
return attrs, ctx
|
359
|
+
|
360
|
+
@staticmethod
|
361
|
+
def __tensor_unflatten__(
|
362
|
+
inner_tensors: Dict[str, Any],
|
363
|
+
ctx: Tuple[Any, ...],
|
364
|
+
outer_size: Tuple[int, ...],
|
365
|
+
outer_stride: Tuple[int, ...],
|
366
|
+
) -> 'HashTensor':
|
367
|
+
return HashTensor._from_data(
|
368
|
+
inner_tensors['_map'],
|
369
|
+
inner_tensors.get('_value', None),
|
370
|
+
inner_tensors['_min_key'],
|
371
|
+
inner_tensors['_min_key'],
|
372
|
+
num_keys=ctx[0],
|
373
|
+
dtype=ctx[1],
|
374
|
+
)
|
375
|
+
|
376
|
+
def __repr__(self) -> str: # type: ignore
|
377
|
+
indent = len(f'{self.__class__.__name__}(')
|
378
|
+
tensor_str = torch._tensor_str._tensor_str(self.as_tensor(), indent)
|
379
|
+
return torch._tensor_str._str_intern(self, tensor_contents=tensor_str)
|
380
|
+
|
302
381
|
def tolist(self) -> List[Any]:
|
303
382
|
"""""" # noqa: D419
|
304
383
|
return self.as_tensor().tolist()
|
305
384
|
|
385
|
+
def numpy(self, *, force: bool = False) -> np.ndarray:
|
386
|
+
"""""" # noqa: D419
|
387
|
+
return self.as_tensor().numpy(force=force)
|
388
|
+
|
306
389
|
def index_select( # type: ignore
|
307
390
|
self,
|
308
391
|
dim: int,
|
@@ -339,6 +422,33 @@ class HashTensor(Tensor):
|
|
339
422
|
self._value.detach_()
|
340
423
|
return super().detach_() # type: ignore
|
341
424
|
|
425
|
+
def __getitem__(self, indices: Any) -> Union['HashTensor', Tensor]:
|
426
|
+
if not isinstance(indices, tuple):
|
427
|
+
indices = (indices, )
|
428
|
+
assert len(indices) > 0
|
429
|
+
|
430
|
+
# We convert any index tensor in the first dimension into a tensor.
|
431
|
+
# This means that downstream handling (i.e. in `aten.index.Tensor`)
|
432
|
+
# needs to take this pre-conversion into account. However, detecting
|
433
|
+
# whether the first dimension is indexed can be tricky at times:
|
434
|
+
# * We need to take into account `Ellipsis`
|
435
|
+
# * We need to take any unsqueezing into account
|
436
|
+
if indices[0] is Ellipsis and len(indices) > 1:
|
437
|
+
nonempty_indices = [i for i in indices[1:] if i is not None]
|
438
|
+
if len(nonempty_indices) == self.dim():
|
439
|
+
indices = indices[1:]
|
440
|
+
|
441
|
+
if isinstance(indices[0], (int, bool)):
|
442
|
+
index: Union[int, Tensor] = int(as_key_tensor([indices[0]]))
|
443
|
+
indices = (index, ) + indices[1:]
|
444
|
+
elif isinstance(indices[0], (Tensor, list, np.ndarray)):
|
445
|
+
index = as_key_tensor(indices[0], device=self.device)
|
446
|
+
indices = (index, ) + indices[1:]
|
447
|
+
|
448
|
+
indices = indices[0] if len(indices) == 1 else indices
|
449
|
+
|
450
|
+
return super().__getitem__(indices)
|
451
|
+
|
342
452
|
|
343
453
|
@implements(aten.alias.default)
|
344
454
|
def _alias(tensor: HashTensor) -> HashTensor:
|
@@ -454,7 +564,8 @@ def _pin_memory(tensor: HashTensor) -> HashTensor:
|
|
454
564
|
def _unsqueeze(tensor: HashTensor, dim: int) -> HashTensor:
|
455
565
|
if dim == 0 or dim == -(tensor.dim() + 1):
|
456
566
|
raise IndexError(f"Cannot unsqueeze '{tensor.__class__.__name__}' in "
|
457
|
-
f"the first dimension"
|
567
|
+
f"the first dimension. Please call `as_tensor()` "
|
568
|
+
f"beforehand")
|
458
569
|
|
459
570
|
return tensor._from_data(
|
460
571
|
tensor._map,
|
@@ -528,7 +639,7 @@ def _slice(
|
|
528
639
|
) -> HashTensor:
|
529
640
|
|
530
641
|
if dim == 0 or dim == -tensor.dim():
|
531
|
-
copy = start is None or
|
642
|
+
copy = start is None or start == 0 or start <= -tensor.size(0)
|
532
643
|
copy &= end is None or end > tensor.size(0)
|
533
644
|
copy &= step == 1
|
534
645
|
if copy:
|
@@ -555,22 +666,32 @@ _old_index_select = torch.index_select
|
|
555
666
|
|
556
667
|
def _new_index_select(
|
557
668
|
input: Tensor,
|
558
|
-
dim: int,
|
669
|
+
dim: Union[int, str],
|
559
670
|
index: Tensor,
|
560
|
-
*,
|
561
671
|
out: Optional[Tensor] = None,
|
562
672
|
) -> Tensor:
|
563
673
|
|
564
|
-
if dim < -input.dim() or dim >= input.dim():
|
674
|
+
if isinstance(dim, int) and (dim < -input.dim() or dim >= input.dim()):
|
565
675
|
raise IndexError(f"Dimension out of range (expected to be in range of "
|
566
676
|
f"[{-input.dim()}, {input.dim()-1}], but got {dim})")
|
567
677
|
|
568
678
|
# We convert any index tensor in the first dimension into a tensor. This
|
569
679
|
# means that downstream handling (i.e. in `aten.index_select.default`)
|
570
680
|
# needs to take this pre-conversion into account.
|
571
|
-
if
|
681
|
+
if (not torch.jit.is_scripting() and isinstance(input, HashTensor)
|
682
|
+
and isinstance(dim, int) and (dim == 0 or dim == -input.dim())):
|
572
683
|
index = as_key_tensor(index, device=input.device)
|
573
|
-
|
684
|
+
|
685
|
+
if isinstance(dim, int): # Type narrowing...
|
686
|
+
if out is None:
|
687
|
+
return _old_index_select(input, dim, index)
|
688
|
+
else:
|
689
|
+
return _old_index_select(input, dim, index, out=out)
|
690
|
+
else:
|
691
|
+
if out is None:
|
692
|
+
return _old_index_select(input, dim, index)
|
693
|
+
else:
|
694
|
+
return _old_index_select(input, dim, index, out=out)
|
574
695
|
|
575
696
|
|
576
697
|
torch.index_select = _new_index_select # type: ignore
|
@@ -603,20 +724,25 @@ _old_select = torch.select
|
|
603
724
|
|
604
725
|
def _new_select(
|
605
726
|
input: Tensor,
|
606
|
-
dim: int,
|
727
|
+
dim: Union[int, str],
|
607
728
|
index: int,
|
608
729
|
) -> Tensor:
|
609
730
|
|
610
|
-
if dim < -input.dim() or dim >= input.dim():
|
731
|
+
if isinstance(dim, int) and (dim < -input.dim() or dim >= input.dim()):
|
611
732
|
raise IndexError(f"Dimension out of range (expected to be in range of "
|
612
733
|
f"[{-input.dim()}, {input.dim()-1}], but got {dim})")
|
613
734
|
|
614
735
|
# We convert any index in the first dimension into an integer. This means
|
615
736
|
# that downstream handling (i.e. in `aten.select.int`) needs to take this
|
616
737
|
# pre-conversion into account.
|
617
|
-
if
|
738
|
+
if (not torch.jit.is_scripting() and isinstance(input, HashTensor)
|
739
|
+
and isinstance(dim, int) and (dim == 0 or dim == -input.dim())):
|
618
740
|
index = int(as_key_tensor([index]))
|
619
|
-
|
741
|
+
|
742
|
+
if isinstance(dim, int): # Type narrowing...
|
743
|
+
return _old_select(input, dim, index)
|
744
|
+
else:
|
745
|
+
return _old_select(input, dim, index)
|
620
746
|
|
621
747
|
|
622
748
|
torch.select = _new_select # type: ignore
|
@@ -645,3 +771,27 @@ def _select(
|
|
645
771
|
num_keys=tensor.size(0),
|
646
772
|
dtype=tensor.dtype,
|
647
773
|
)
|
774
|
+
|
775
|
+
|
776
|
+
@implements(aten.index.Tensor)
|
777
|
+
def _index(
|
778
|
+
tensor: HashTensor,
|
779
|
+
indices: List[Optional[Tensor]],
|
780
|
+
) -> Union[HashTensor, Tensor]:
|
781
|
+
|
782
|
+
assert len(indices) > 0
|
783
|
+
|
784
|
+
if indices[0] is not None:
|
785
|
+
out = tensor._get(indices[0])
|
786
|
+
if len(indices) > 1:
|
787
|
+
out = aten.index.Tensor(out, [None] + indices[1:])
|
788
|
+
return out
|
789
|
+
|
790
|
+
return tensor._from_data(
|
791
|
+
tensor._map,
|
792
|
+
aten.index.Tensor(tensor.as_tensor(), indices),
|
793
|
+
tensor._min_key,
|
794
|
+
tensor._max_key,
|
795
|
+
num_keys=tensor.size(0),
|
796
|
+
dtype=tensor.dtype,
|
797
|
+
)
|
torch_geometric/index.py
CHANGED
@@ -12,6 +12,7 @@ from typing import (
|
|
12
12
|
Union,
|
13
13
|
)
|
14
14
|
|
15
|
+
import numpy as np
|
15
16
|
import torch
|
16
17
|
import torch.utils._pytree as pytree
|
17
18
|
from torch import Tensor
|
@@ -410,6 +411,14 @@ class Index(Tensor):
|
|
410
411
|
return torch._tensor_str._add_suffixes(prefix + tensor_str, suffixes,
|
411
412
|
indent, force_newline=False)
|
412
413
|
|
414
|
+
def tolist(self) -> List[Any]:
|
415
|
+
"""""" # noqa: D419
|
416
|
+
return self._data.tolist()
|
417
|
+
|
418
|
+
def numpy(self, *, force: bool = False) -> np.ndarray:
|
419
|
+
"""""" # noqa: D419
|
420
|
+
return self._data.numpy(force=force)
|
421
|
+
|
413
422
|
# Helpers #################################################################
|
414
423
|
|
415
424
|
def _shallow_copy(self) -> 'Index':
|
@@ -632,7 +641,7 @@ def _slice(
|
|
632
641
|
step: int = 1,
|
633
642
|
) -> Index:
|
634
643
|
|
635
|
-
if ((start is None or start <= 0)
|
644
|
+
if ((start is None or start <= 0 or start <= -input.size(dim))
|
636
645
|
and (end is None or end > input.size(dim)) and step == 1):
|
637
646
|
return input._shallow_copy() # No-op.
|
638
647
|
|
File without changes
|
{pyg_nightly-2.7.0.dev20250224.dist-info → pyg_nightly-2.7.0.dev20250226.dist-info}/licenses/LICENSE
RENAMED
File without changes
|