pyg-nightly 2.7.0.dev20250221__py3-none-any.whl → 2.7.0.dev20250222__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyg-nightly
3
- Version: 2.7.0.dev20250221
3
+ Version: 2.7.0.dev20250222
4
4
  Summary: Graph Neural Network Library for PyTorch
5
5
  Keywords: deep-learning,pytorch,geometric-deep-learning,graph-neural-networks,graph-convolutional-networks
6
6
  Author-email: Matthias Fey <matthias@pyg.org>
@@ -1,4 +1,4 @@
1
- torch_geometric/__init__.py,sha256=f29gN2VOOKFStDrxccL16W-vkm6S_5-CHkutK9E5NX4,1978
1
+ torch_geometric/__init__.py,sha256=P_eeFAUMVSeYfsS1X62TDRCGKklzvWG3UW2-KJcsFAo,1978
2
2
  torch_geometric/_compile.py,sha256=f-WQeH4VLi5Hn9lrgztFUCSrN_FImjhQa6BxFzcYC38,1338
3
3
  torch_geometric/_onnx.py,sha256=V9ffrIKSqhDw6xUZ12lkuSfNs48cQp2EeJ6Z19GfnVw,349
4
4
  torch_geometric/backend.py,sha256=lVaf7aLoVaB3M-UcByUJ1G4T4FOK6LXAg0CF4W3E8jo,1575
@@ -9,7 +9,7 @@ torch_geometric/deprecation.py,sha256=dWRymDIUkUVI2MeEmBG5WF4R6jObZeseSBV9G6FNfj
9
9
  torch_geometric/device.py,sha256=tU5-_lBNVbVHl_kUmWPwiG5mQ1pyapwMF4JkmtNN3MM,1224
10
10
  torch_geometric/edge_index.py,sha256=BsLh5tOZRjjSYDkjqOFAdBuvMaDg7EWaaLELYsUL0Z8,70048
11
11
  torch_geometric/experimental.py,sha256=JbtNNEXjFGI8hZ9raM6-qrZURP6Z5nlDK8QicZUIbz0,4756
12
- torch_geometric/hash_tensor.py,sha256=aM5chfNpYWOU01nEkE2hBm_ZNlOwCjrZcYXjNtf6PMc,11836
12
+ torch_geometric/hash_tensor.py,sha256=AlPwX3spNoJ4-gHLlLY9_beETe7eTbtYtY33tKOJs1g,14503
13
13
  torch_geometric/home.py,sha256=EV54B4Dmiv61GDbkCwtCfWGWJ4eFGwZ8s3KOgGjwYgY,790
14
14
  torch_geometric/index.py,sha256=9ChzWFCwj2slNcVBOgfV-wQn-KscJe_y7502w-Vf76w,24045
15
15
  torch_geometric/inspector.py,sha256=nKi5o4Mn6xsG0Ex1GudTEQt_EqnF9mcMqGtp7Shh9sQ,19336
@@ -290,7 +290,7 @@ torch_geometric/loader/temporal_dataloader.py,sha256=AQ2QFeiXKbPp6I8sUeE8H7br-1_
290
290
  torch_geometric/loader/utils.py,sha256=f27mczQ7fEP2HpTsJGJxKS0slPu0j8zTba3jP8ViNck,14901
291
291
  torch_geometric/loader/zip_loader.py,sha256=3lt10fD15Rxm1WhWzypswGzCEwUz4h8OLCD1nE15yNg,3843
292
292
  torch_geometric/metrics/__init__.py,sha256=3krvDobW6vV5yHTjq2S2pmOXxNfysNG26muq7z48e94,699
293
- torch_geometric/metrics/link_pred.py,sha256=cz9GbvZthV2PAnVnxiZlksGr0VmTQOJGNuZ-OYYg04U,29667
293
+ torch_geometric/metrics/link_pred.py,sha256=wGQG-Fl6BQYJMLZe_L_iIl4ixj6TWgLkkuHyMMraWBA,30480
294
294
  torch_geometric/nn/__init__.py,sha256=kQHHHUxFDht2ztD-XFQuv98TvC8MdodaFsIjAvltJBw,874
295
295
  torch_geometric/nn/data_parallel.py,sha256=lDAxRi83UNuzAQSj3eu9K2sQheOIU6wqR5elS6oDs90,4764
296
296
  torch_geometric/nn/encoding.py,sha256=QNjwWczYExZ1wRGBmpuqYbn6tB7NC4BU-DEgzjhcZqw,3115
@@ -633,7 +633,7 @@ torch_geometric/utils/undirected.py,sha256=H_nfpI0_WluOG6VfjPyldvcjL4w5USAKWu2x5
633
633
  torch_geometric/visualization/__init__.py,sha256=PyR_4K5SafsJrBr6qWrkjKr6GBL1b7FtZybyXCDEVwY,154
634
634
  torch_geometric/visualization/graph.py,sha256=ZuLPL92yGRi7lxlqsUPwL_EVVXF7P2kMcveTtW79vpA,4784
635
635
  torch_geometric/visualization/influence.py,sha256=CWMvuNA_Nf1sfbJmQgn58yS4OFpeKXeZPe7kEuvkUBw,477
636
- pyg_nightly-2.7.0.dev20250221.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
637
- pyg_nightly-2.7.0.dev20250221.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82
638
- pyg_nightly-2.7.0.dev20250221.dist-info/METADATA,sha256=LyCZFRktgqS8Zd9gDaXsIFQHAQGWPYgO7x7RDoG3yg8,63021
639
- pyg_nightly-2.7.0.dev20250221.dist-info/RECORD,,
636
+ pyg_nightly-2.7.0.dev20250222.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
637
+ pyg_nightly-2.7.0.dev20250222.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82
638
+ pyg_nightly-2.7.0.dev20250222.dist-info/METADATA,sha256=G_VhMGb5Inx-4RejhUEhgXEGyFipKayNtCZpJJyiTyA,63021
639
+ pyg_nightly-2.7.0.dev20250222.dist-info/RECORD,,
@@ -31,7 +31,7 @@ from .lazy_loader import LazyLoader
31
31
  contrib = LazyLoader('contrib', globals(), 'torch_geometric.contrib')
32
32
  graphgym = LazyLoader('graphgym', globals(), 'torch_geometric.graphgym')
33
33
 
34
- __version__ = '2.7.0.dev20250221'
34
+ __version__ = '2.7.0.dev20250222'
35
35
 
36
36
  __all__ = [
37
37
  'Index',
@@ -142,12 +142,12 @@ class HashTensor(Tensor):
142
142
  if (key.dtype in {torch.uint8, torch.int16} or _range <= 1_000_000
143
143
  or _range <= 2 * key.numel()):
144
144
  _map = torch.full(
145
- size=(_range + 2, ),
145
+ size=(_range + 3, ),
146
146
  fill_value=-1,
147
147
  dtype=torch.int64,
148
- device=device,
148
+ device=key.device,
149
149
  )
150
- _map[(key - (min_key - 1)).long()] = torch.arange(
150
+ _map[key.long() - (min_key.long() - 1)] = torch.arange(
151
151
  key.numel(),
152
152
  dtype=_map.dtype,
153
153
  device=_map.device,
@@ -164,6 +164,8 @@ class HashTensor(Tensor):
164
164
  dtype=dtype,
165
165
  )
166
166
 
167
+ # Private Methods #########################################################
168
+
167
169
  @classmethod
168
170
  def _from_data(
169
171
  cls,
@@ -217,6 +219,33 @@ class HashTensor(Tensor):
217
219
  dtype=self.dtype,
218
220
  )
219
221
 
222
+ def _get(self, query: Tensor) -> Tensor:
223
+ if isinstance(self._map, Tensor):
224
+ index = query.long() - (self._min_key.long() - 1)
225
+ index = self._map[index.clamp_(min=0, max=self._map.numel() - 1)]
226
+ elif torch_geometric.typing.WITH_CUDA_HASH_MAP and query.is_cuda:
227
+ index = self._map.get(query)
228
+ elif torch_geometric.typing.WITH_CPU_HASH_MAP:
229
+ index = self._map.get(query.cpu())
230
+ else:
231
+ import pandas as pd
232
+
233
+ ser = pd.Series(query.cpu().numpy(), dtype=self._map)
234
+ index = torch.from_numpy(ser.cat.codes.to_numpy()).to(torch.long)
235
+
236
+ index = index.to(self.device)
237
+
238
+ if self._value is None:
239
+ return index.to(self.dtype)
240
+
241
+ out = self._value[index]
242
+ mask = index != -1
243
+ mask = mask.view([-1] + [1] * (out.dim() - 1))
244
+ if out.is_floating_point():
245
+ return out.where(mask, float('NaN'))
246
+ else:
247
+ return out.where(mask, -1)
248
+
220
249
  # Methods #################################################################
221
250
 
222
251
  def as_tensor(self) -> Tensor:
@@ -252,6 +281,9 @@ class HashTensor(Tensor):
252
281
  kwargs)
253
282
  return func(*args, **(kwargs or {}))
254
283
 
284
+ def index_select(self, dim: int, index: Any) -> Tensor: # type: ignore
285
+ return torch.index_select(self, dim, index)
286
+
255
287
 
256
288
  @implements(aten.alias.default)
257
289
  def _alias(tensor: HashTensor) -> HashTensor:
@@ -387,3 +419,51 @@ def _slice(
387
419
  num_keys=tensor.size(0),
388
420
  dtype=tensor.dtype,
389
421
  )
422
+
423
+
424
+ # Since PyTorch does only allow PyTorch tensors as indices in `index_select`,
425
+ # we need to create a wrapper function and monkey patch `index_select` :(
426
+ _old_index_select = torch.index_select
427
+
428
+
429
+ def _new_index_select(
430
+ input: Tensor,
431
+ dim: int,
432
+ index: Any,
433
+ *,
434
+ out: Optional[Tensor] = None,
435
+ ) -> Tensor:
436
+
437
+ if dim < -input.dim() or dim >= input.dim():
438
+ raise IndexError(f"Dimension out of range (expected to be in range of "
439
+ f"[{-input.dim()}, {input.dim()-1}], but got {dim})")
440
+
441
+ # We convert any index tensor in the first dimension into a tensor. This
442
+ # means that downstream handling (i.e. in `aten.index_select.default`)
443
+ # needs to take this pre-conversion into account.
444
+ if isinstance(input, HashTensor) and (dim == 0 or dim == -input.dim()):
445
+ index = as_key_tensor(index, device=input.device)
446
+ return _old_index_select(input, dim, index, out=out)
447
+
448
+
449
+ torch.index_select = _new_index_select # type: ignore
450
+
451
+
452
+ @implements(aten.index_select.default)
453
+ def _index_select(
454
+ tensor: HashTensor,
455
+ dim: int,
456
+ index: Tensor,
457
+ ) -> Union[HashTensor, Tensor]:
458
+
459
+ if dim == 0 or dim == -tensor.dim():
460
+ return tensor._get(index)
461
+
462
+ return tensor._from_data(
463
+ tensor._map,
464
+ aten.index_select.default(tensor.as_tensor(), dim, index),
465
+ tensor._min_key,
466
+ tensor._max_key,
467
+ num_keys=tensor.size(0),
468
+ dtype=tensor.dtype,
469
+ )
@@ -715,22 +715,32 @@ class LinkPredPersonalization(_LinkPredMetric):
715
715
 
716
716
  Args:
717
717
  k (int): The number of top-:math:`k` predictions to evaluate against.
718
+ max_src_nodes (int, optional): The maximum source nodes to consider to
719
+ compute pair-wise dissimilarity. If specified,
720
+ Personalization @ :math:`k` is approximated to avoid computation
721
+ blowup due to quadratic complexity. (default: :obj:`2**12`)
718
722
  batch_size (int, optional): The batch size to determine how many pairs
719
723
  of user recommendations should be processed at once.
720
724
  (default: :obj:`2**16`)
721
725
  """
722
726
  higher_is_better: bool = True
723
727
 
724
- def __init__(self, k: int, batch_size: int = 2**16) -> None:
728
+ def __init__(
729
+ self,
730
+ k: int,
731
+ max_src_nodes: Optional[int] = 2**12,
732
+ batch_size: int = 2**16,
733
+ ) -> None:
725
734
  super().__init__(k)
735
+ self.max_src_nodes = max_src_nodes
726
736
  self.batch_size = batch_size
727
737
 
728
738
  if WITH_TORCHMETRICS:
729
739
  self.add_state('preds', default=[], dist_reduce_fx='cat')
730
- self.add_state('dev_tensor', torch.empty(0), dist_reduce_fx='sum')
740
+ self.add_state('total', torch.tensor(0), dist_reduce_fx='sum')
731
741
  else:
732
742
  self.preds: List[Tensor] = []
733
- self.register_buffer('dev_tensor', torch.empty(0))
743
+ self.register_buffer('total', torch.tensor(0))
734
744
 
735
745
  def update(
736
746
  self,
@@ -738,11 +748,21 @@ class LinkPredPersonalization(_LinkPredMetric):
738
748
  edge_label_index: Union[Tensor, Tuple[Tensor, Tensor]],
739
749
  edge_label_weight: Optional[Tensor] = None,
740
750
  ) -> None:
751
+
741
752
  # NOTE Move to CPU to avoid memory blowup.
742
- self.preds.append(pred_index_mat[:, :self.k].cpu())
753
+ pred_index_mat = pred_index_mat[:, :self.k].cpu()
754
+
755
+ if self.max_src_nodes is None:
756
+ self.preds.append(pred_index_mat)
757
+ self.total += pred_index_mat.size(0)
758
+ elif self.total < self.max_src_nodes:
759
+ remaining = int(self.max_src_nodes - self.total)
760
+ pred_index_mat = pred_index_mat[:remaining]
761
+ self.preds.append(pred_index_mat)
762
+ self.total += pred_index_mat.size(0)
743
763
 
744
764
  def compute(self) -> Tensor:
745
- device = self.dev_tensor.device
765
+ device = self.total.device
746
766
  score = torch.tensor(0.0, device=device)
747
767
  total = torch.tensor(0, device=device)
748
768
 
@@ -786,6 +806,7 @@ class LinkPredPersonalization(_LinkPredMetric):
786
806
 
787
807
  def _reset(self) -> None:
788
808
  self.preds = []
809
+ self.total.zero_()
789
810
 
790
811
 
791
812
  class LinkPredAveragePopularity(_LinkPredMetric):