pyg-nightly 2.7.0.dev20250225__py3-none-any.whl → 2.7.0.dev20250226__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyg-nightly
3
- Version: 2.7.0.dev20250225
3
+ Version: 2.7.0.dev20250226
4
4
  Summary: Graph Neural Network Library for PyTorch
5
5
  Keywords: deep-learning,pytorch,geometric-deep-learning,graph-neural-networks,graph-convolutional-networks
6
6
  Author-email: Matthias Fey <matthias@pyg.org>
@@ -36,7 +36,6 @@ Requires-Dist: torch_geometric[test] ; extra == "dev"
36
36
  Requires-Dist: scipy ; extra == "full"
37
37
  Requires-Dist: scikit-learn ; extra == "full"
38
38
  Requires-Dist: ase ; extra == "full"
39
- Requires-Dist: captum<0.7.0 ; extra == "full"
40
39
  Requires-Dist: graphviz ; extra == "full"
41
40
  Requires-Dist: h5py ; extra == "full"
42
41
  Requires-Dist: matplotlib ; extra == "full"
@@ -1,4 +1,4 @@
1
- torch_geometric/__init__.py,sha256=2_FzSuy3z_Dpnuncdqy_FCcZmG9u9595U4yzJS-1EiY,1978
1
+ torch_geometric/__init__.py,sha256=MAAgbT2XVvKADBsLycGtgqDaaCWjhrtphXMHu1uUO9k,1978
2
2
  torch_geometric/_compile.py,sha256=f-WQeH4VLi5Hn9lrgztFUCSrN_FImjhQa6BxFzcYC38,1338
3
3
  torch_geometric/_onnx.py,sha256=V9ffrIKSqhDw6xUZ12lkuSfNs48cQp2EeJ6Z19GfnVw,349
4
4
  torch_geometric/backend.py,sha256=lVaf7aLoVaB3M-UcByUJ1G4T4FOK6LXAg0CF4W3E8jo,1575
@@ -7,11 +7,11 @@ torch_geometric/config_store.py,sha256=zdMzlgBpUmBkPovpYQh5fMNwTZLDq2OneqX47QEx7
7
7
  torch_geometric/debug.py,sha256=cLyH9OaL2v7POyW-80b19w-ctA7a_5EZsS4aUF1wc2U,1295
8
8
  torch_geometric/deprecation.py,sha256=dWRymDIUkUVI2MeEmBG5WF4R6jObZeseSBV9G6FNfjc,858
9
9
  torch_geometric/device.py,sha256=tU5-_lBNVbVHl_kUmWPwiG5mQ1pyapwMF4JkmtNN3MM,1224
10
- torch_geometric/edge_index.py,sha256=BsLh5tOZRjjSYDkjqOFAdBuvMaDg7EWaaLELYsUL0Z8,70048
10
+ torch_geometric/edge_index.py,sha256=J8uHIUXleOv4A2XONkT6yaTJi9yXauxBRJiHmJHMox4,70330
11
11
  torch_geometric/experimental.py,sha256=JbtNNEXjFGI8hZ9raM6-qrZURP6Z5nlDK8QicZUIbz0,4756
12
- torch_geometric/hash_tensor.py,sha256=xXKWffFz4ML4jTKPNagiAWqu-Cjptmb3WNhIPo0C0pw,23200
12
+ torch_geometric/hash_tensor.py,sha256=WB-aBCJWNWqnlnzQ8Ob4LHeCXm0u1_NPPhmNAEwBpq4,24906
13
13
  torch_geometric/home.py,sha256=EV54B4Dmiv61GDbkCwtCfWGWJ4eFGwZ8s3KOgGjwYgY,790
14
- torch_geometric/index.py,sha256=9ChzWFCwj2slNcVBOgfV-wQn-KscJe_y7502w-Vf76w,24045
14
+ torch_geometric/index.py,sha256=VZGVSb19biQ-HyvZA6esAPW_23dgmtQLSc3436WjP64,24327
15
15
  torch_geometric/inspector.py,sha256=nKi5o4Mn6xsG0Ex1GudTEQt_EqnF9mcMqGtp7Shh9sQ,19336
16
16
  torch_geometric/isinstance.py,sha256=truZjdU9PxSvjJ6k0d_CLJ2iOpen2o8U-54pbUbNRyE,935
17
17
  torch_geometric/lazy_loader.py,sha256=SM0UcXtIdiFge75MKBAWXedoiSOdFDOV0rm1PfoF9cE,908
@@ -633,7 +633,7 @@ torch_geometric/utils/undirected.py,sha256=H_nfpI0_WluOG6VfjPyldvcjL4w5USAKWu2x5
633
633
  torch_geometric/visualization/__init__.py,sha256=PyR_4K5SafsJrBr6qWrkjKr6GBL1b7FtZybyXCDEVwY,154
634
634
  torch_geometric/visualization/graph.py,sha256=ZuLPL92yGRi7lxlqsUPwL_EVVXF7P2kMcveTtW79vpA,4784
635
635
  torch_geometric/visualization/influence.py,sha256=CWMvuNA_Nf1sfbJmQgn58yS4OFpeKXeZPe7kEuvkUBw,477
636
- pyg_nightly-2.7.0.dev20250225.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
637
- pyg_nightly-2.7.0.dev20250225.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82
638
- pyg_nightly-2.7.0.dev20250225.dist-info/METADATA,sha256=m8UNJu0M5iArtdBVYT4uMh9AJdhwbI6VVLnsGvQcgnI,63021
639
- pyg_nightly-2.7.0.dev20250225.dist-info/RECORD,,
636
+ pyg_nightly-2.7.0.dev20250226.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
637
+ pyg_nightly-2.7.0.dev20250226.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82
638
+ pyg_nightly-2.7.0.dev20250226.dist-info/METADATA,sha256=ohLoC35Q3PN8CDwaHoNilg0ALbm8R_VEFqF2OPyqKj4,62975
639
+ pyg_nightly-2.7.0.dev20250226.dist-info/RECORD,,
@@ -31,7 +31,7 @@ from .lazy_loader import LazyLoader
31
31
  contrib = LazyLoader('contrib', globals(), 'torch_geometric.contrib')
32
32
  graphgym = LazyLoader('graphgym', globals(), 'torch_geometric.graphgym')
33
33
 
34
- __version__ = '2.7.0.dev20250225'
34
+ __version__ = '2.7.0.dev20250226'
35
35
 
36
36
  __all__ = [
37
37
  'Index',
@@ -17,6 +17,7 @@ from typing import (
17
17
  overload,
18
18
  )
19
19
 
20
+ import numpy as np
20
21
  import torch
21
22
  import torch.utils._pytree as pytree
22
23
  from torch import Tensor
@@ -1246,6 +1247,14 @@ class EdgeIndex(Tensor):
1246
1247
  return torch._tensor_str._add_suffixes(prefix + tensor_str, suffixes,
1247
1248
  indent, force_newline=False)
1248
1249
 
1250
+ def tolist(self) -> List[Any]:
1251
+ """""" # noqa: D419
1252
+ return self._data.tolist()
1253
+
1254
+ def numpy(self, *, force: bool = False) -> np.ndarray:
1255
+ """""" # noqa: D419
1256
+ return self._data.numpy(force=force)
1257
+
1249
1258
  # Helpers #################################################################
1250
1259
 
1251
1260
  def _shallow_copy(self) -> 'EdgeIndex':
@@ -1478,7 +1487,7 @@ def _slice(
1478
1487
  step: int = 1,
1479
1488
  ) -> Union[EdgeIndex, Tensor]:
1480
1489
 
1481
- if ((start is None or start <= 0)
1490
+ if ((start is None or start == 0 or start <= -input.size(dim))
1482
1491
  and (end is None or end > input.size(dim)) and step == 1):
1483
1492
  return input._shallow_copy() # No-op.
1484
1493
 
@@ -45,13 +45,6 @@ def as_key_tensor(
45
45
  key = torch.as_tensor(key, device=device)
46
46
  except Exception:
47
47
  device = device or torch.get_default_device()
48
- # TODO Convert int64 to int32.
49
- # On GPU, we default to int32 for faster 'CUDAHashMap' implementation:
50
- if torch_geometric.typing.WITH_CUDA_HASH_MAP and device.type == 'cuda':
51
- pass
52
- # key = torch.tensor(
53
- # [xxhash.xxh32(x).intdigest() & 0x7FFFFFFF for x in key],
54
- # dtype=torch.int32, device=device)
55
48
  key = torch.tensor(
56
49
  [xxhash.xxh64(x).intdigest() & 0x7FFFFFFFFFFFFFFF for x in key],
57
50
  dtype=torch.int64, device=device)
@@ -73,7 +66,6 @@ def as_key_tensor(
73
66
 
74
67
  def get_hash_map(key: Tensor) -> Union[CPUHashMap, CUDAHashMap]:
75
68
  if torch_geometric.typing.WITH_CUDA_HASH_MAP and key.is_cuda:
76
- # TODO Convert int64 to int32.
77
69
  return CUDAHashMap(key, 0.5)
78
70
 
79
71
  if key.is_cuda:
@@ -94,6 +86,62 @@ def get_hash_map(key: Tensor) -> Union[CPUHashMap, CUDAHashMap]:
94
86
 
95
87
 
96
88
  class HashTensor(Tensor):
89
+ r"""A :pytorch:`null` :class:`torch.Tensor` that can be referenced by
90
+ arbitrary keys rather than indices in the first dimension.
91
+
92
+ :class:`HashTensor` sub-classes a general :pytorch:`null`
93
+ :class:`torch.Tensor`, and extends it by CPU- and GPU-accelerated mapping
94
+ routines. This allow for fast and efficient access to non-contiguous
95
+ indices/keys while the underlying data is stored in a compact format.
96
+
97
+ This representation is ideal for scenarios where one needs a fast mapping
98
+ routine without relying on CPU-based external packages, and can be used,
99
+ *e.g.*, to perform mapping of global indices to local indices during
100
+ subgraph creation, or in data-processing pipelines to map non-contiguous
101
+ input data into a contiguous space, such as
102
+
103
+ * mapping of hashed node IDs to range :obj:`[0, num_nodes - 1]`
104
+ * mapping of raw input data, *e.g.*, categorical data to range
105
+ :obj:`[0, num_categories - 1]`
106
+
107
+ Specifically, :class:`HashTensor` supports *any* keys of *any* type,
108
+ *e.g.*, strings, timestamps, etc.
109
+
110
+ .. code-block:: python
111
+
112
+ from torch_geometric import HashTensor
113
+
114
+ key = torch.tensor([1000, 100, 10000])
115
+ value = torch.randn(3, 4)
116
+
117
+ tensor = HashTensor(key, value)
118
+ assert tensor.size() == (3, 4)
119
+
120
+ # Filtering:
121
+ query = torch.tensor([10000, 1000])
122
+ out = tensor[query]
123
+ assert out.equal(value[[2, 0]])
124
+
125
+ # Accessing non-existing keys:
126
+ out = tensor[[10000, 0]]
127
+ out.isnan()
128
+ >>> tensor([[False, False, False, False],
129
+ ... [True, True, True, True])
130
+
131
+ # If `value` is not given, indexing returns the position of `query` in
132
+ # `key`, and `-1` otherwise:
133
+ key = ['Animation', 'Comedy', 'Fantasy']
134
+ tensor = HashTensor(key)
135
+
136
+ out = tensor[['Comedy', 'Romance']]
137
+ >>> tensor([1, -1])
138
+
139
+ Args:
140
+ key: The keys in the first dimension.
141
+ value: The values to hold.
142
+ dtype: The desired data type of the values of the returned tensor.
143
+ device: The device of the returned tensor.
144
+ """
97
145
  _map: Union[Tensor, CPUHashMap, CUDAHashMap]
98
146
  _value: Optional[Tensor]
99
147
  _min_key: Tensor
@@ -591,7 +639,7 @@ def _slice(
591
639
  ) -> HashTensor:
592
640
 
593
641
  if dim == 0 or dim == -tensor.dim():
594
- copy = start is None or (start == 0 or start <= -tensor.size(0))
642
+ copy = start is None or start == 0 or start <= -tensor.size(0)
595
643
  copy &= end is None or end > tensor.size(0)
596
644
  copy &= step == 1
597
645
  if copy:
torch_geometric/index.py CHANGED
@@ -12,6 +12,7 @@ from typing import (
12
12
  Union,
13
13
  )
14
14
 
15
+ import numpy as np
15
16
  import torch
16
17
  import torch.utils._pytree as pytree
17
18
  from torch import Tensor
@@ -410,6 +411,14 @@ class Index(Tensor):
410
411
  return torch._tensor_str._add_suffixes(prefix + tensor_str, suffixes,
411
412
  indent, force_newline=False)
412
413
 
414
+ def tolist(self) -> List[Any]:
415
+ """""" # noqa: D419
416
+ return self._data.tolist()
417
+
418
+ def numpy(self, *, force: bool = False) -> np.ndarray:
419
+ """""" # noqa: D419
420
+ return self._data.numpy(force=force)
421
+
413
422
  # Helpers #################################################################
414
423
 
415
424
  def _shallow_copy(self) -> 'Index':
@@ -632,7 +641,7 @@ def _slice(
632
641
  step: int = 1,
633
642
  ) -> Index:
634
643
 
635
- if ((start is None or start <= 0)
644
+ if ((start is None or start <= 0 or start <= -input.size(dim))
636
645
  and (end is None or end > input.size(dim)) and step == 1):
637
646
  return input._shallow_copy() # No-op.
638
647