pyg-nightly 2.7.0.dev20250220__py3-none-any.whl → 2.7.0.dev20250222__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyg-nightly
3
- Version: 2.7.0.dev20250220
3
+ Version: 2.7.0.dev20250222
4
4
  Summary: Graph Neural Network Library for PyTorch
5
5
  Keywords: deep-learning,pytorch,geometric-deep-learning,graph-neural-networks,graph-convolutional-networks
6
6
  Author-email: Matthias Fey <matthias@pyg.org>
@@ -1,4 +1,4 @@
1
- torch_geometric/__init__.py,sha256=Ht6O12SaZRLjd7aAphjjLY_T59xQTnRQ9nN1oSA1Xmk,1978
1
+ torch_geometric/__init__.py,sha256=P_eeFAUMVSeYfsS1X62TDRCGKklzvWG3UW2-KJcsFAo,1978
2
2
  torch_geometric/_compile.py,sha256=f-WQeH4VLi5Hn9lrgztFUCSrN_FImjhQa6BxFzcYC38,1338
3
3
  torch_geometric/_onnx.py,sha256=V9ffrIKSqhDw6xUZ12lkuSfNs48cQp2EeJ6Z19GfnVw,349
4
4
  torch_geometric/backend.py,sha256=lVaf7aLoVaB3M-UcByUJ1G4T4FOK6LXAg0CF4W3E8jo,1575
@@ -9,7 +9,7 @@ torch_geometric/deprecation.py,sha256=dWRymDIUkUVI2MeEmBG5WF4R6jObZeseSBV9G6FNfj
9
9
  torch_geometric/device.py,sha256=tU5-_lBNVbVHl_kUmWPwiG5mQ1pyapwMF4JkmtNN3MM,1224
10
10
  torch_geometric/edge_index.py,sha256=BsLh5tOZRjjSYDkjqOFAdBuvMaDg7EWaaLELYsUL0Z8,70048
11
11
  torch_geometric/experimental.py,sha256=JbtNNEXjFGI8hZ9raM6-qrZURP6Z5nlDK8QicZUIbz0,4756
12
- torch_geometric/hash_tensor.py,sha256=T4fRmS6TD-j7PKa9LMralSbI6naqA4ctW8AnjWsREzw,9615
12
+ torch_geometric/hash_tensor.py,sha256=AlPwX3spNoJ4-gHLlLY9_beETe7eTbtYtY33tKOJs1g,14503
13
13
  torch_geometric/home.py,sha256=EV54B4Dmiv61GDbkCwtCfWGWJ4eFGwZ8s3KOgGjwYgY,790
14
14
  torch_geometric/index.py,sha256=9ChzWFCwj2slNcVBOgfV-wQn-KscJe_y7502w-Vf76w,24045
15
15
  torch_geometric/inspector.py,sha256=nKi5o4Mn6xsG0Ex1GudTEQt_EqnF9mcMqGtp7Shh9sQ,19336
@@ -290,7 +290,7 @@ torch_geometric/loader/temporal_dataloader.py,sha256=AQ2QFeiXKbPp6I8sUeE8H7br-1_
290
290
  torch_geometric/loader/utils.py,sha256=f27mczQ7fEP2HpTsJGJxKS0slPu0j8zTba3jP8ViNck,14901
291
291
  torch_geometric/loader/zip_loader.py,sha256=3lt10fD15Rxm1WhWzypswGzCEwUz4h8OLCD1nE15yNg,3843
292
292
  torch_geometric/metrics/__init__.py,sha256=3krvDobW6vV5yHTjq2S2pmOXxNfysNG26muq7z48e94,699
293
- torch_geometric/metrics/link_pred.py,sha256=cz9GbvZthV2PAnVnxiZlksGr0VmTQOJGNuZ-OYYg04U,29667
293
+ torch_geometric/metrics/link_pred.py,sha256=wGQG-Fl6BQYJMLZe_L_iIl4ixj6TWgLkkuHyMMraWBA,30480
294
294
  torch_geometric/nn/__init__.py,sha256=kQHHHUxFDht2ztD-XFQuv98TvC8MdodaFsIjAvltJBw,874
295
295
  torch_geometric/nn/data_parallel.py,sha256=lDAxRi83UNuzAQSj3eu9K2sQheOIU6wqR5elS6oDs90,4764
296
296
  torch_geometric/nn/encoding.py,sha256=QNjwWczYExZ1wRGBmpuqYbn6tB7NC4BU-DEgzjhcZqw,3115
@@ -463,7 +463,7 @@ torch_geometric/nn/models/signed_gcn.py,sha256=J40CnedFIqtKI1LhW1ITSEFRbA_XiJZL6
463
463
  torch_geometric/nn/models/tgn.py,sha256=kEGdfLJybkbMT4UMoAh2nCzfX3_nDjfm1cicuPHEwAM,11878
464
464
  torch_geometric/nn/models/visnet.py,sha256=97OFMCsPDEI5BCSi7RhoRcU2CNRp7zck2tEzrltFZj4,43192
465
465
  torch_geometric/nn/nlp/__init__.py,sha256=q6CPUiJHcc9bXw90lyj-ID4F3kfW8uPM-SOxW9uCMHs,213
466
- torch_geometric/nn/nlp/llm.py,sha256=j03tyCO1ADgrzGhLqYOUcsy0haGbV4dmT9bdwEnESPE,12181
466
+ torch_geometric/nn/nlp/llm.py,sha256=uVPoIB2_nU0s3N_Z3oVW2Q7XGsAisW1QGaaLe2Y__hw,12227
467
467
  torch_geometric/nn/nlp/sentence_transformer.py,sha256=q5M7SGtrUzoSiNhKCGFb7JatWiukdhNF6zdq2yiqxwE,4475
468
468
  torch_geometric/nn/nlp/vision_transformer.py,sha256=diVBefjIynzYs8WBlcpTeSVnw1PUecHY--B9Yd-W2hA,863
469
469
  torch_geometric/nn/norm/__init__.py,sha256=u2qIDrkbeuObGVXSAIftAlvSd6ouGTtxznCfD-59UiA,669
@@ -633,7 +633,7 @@ torch_geometric/utils/undirected.py,sha256=H_nfpI0_WluOG6VfjPyldvcjL4w5USAKWu2x5
633
633
  torch_geometric/visualization/__init__.py,sha256=PyR_4K5SafsJrBr6qWrkjKr6GBL1b7FtZybyXCDEVwY,154
634
634
  torch_geometric/visualization/graph.py,sha256=ZuLPL92yGRi7lxlqsUPwL_EVVXF7P2kMcveTtW79vpA,4784
635
635
  torch_geometric/visualization/influence.py,sha256=CWMvuNA_Nf1sfbJmQgn58yS4OFpeKXeZPe7kEuvkUBw,477
636
- pyg_nightly-2.7.0.dev20250220.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
637
- pyg_nightly-2.7.0.dev20250220.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82
638
- pyg_nightly-2.7.0.dev20250220.dist-info/METADATA,sha256=SsXACQ7At6aou2g3LkqAlRX2zcjw3aTPw-mMliTKU1E,63021
639
- pyg_nightly-2.7.0.dev20250220.dist-info/RECORD,,
636
+ pyg_nightly-2.7.0.dev20250222.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
637
+ pyg_nightly-2.7.0.dev20250222.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82
638
+ pyg_nightly-2.7.0.dev20250222.dist-info/METADATA,sha256=G_VhMGb5Inx-4RejhUEhgXEGyFipKayNtCZpJJyiTyA,63021
639
+ pyg_nightly-2.7.0.dev20250222.dist-info/RECORD,,
@@ -31,7 +31,7 @@ from .lazy_loader import LazyLoader
31
31
  contrib = LazyLoader('contrib', globals(), 'torch_geometric.contrib')
32
32
  graphgym = LazyLoader('graphgym', globals(), 'torch_geometric.graphgym')
33
33
 
34
- __version__ = '2.7.0.dev20250220'
34
+ __version__ = '2.7.0.dev20250222'
35
35
 
36
36
  __all__ = [
37
37
  'Index',
@@ -1,6 +1,16 @@
1
1
  import functools
2
2
  import warnings
3
- from typing import Any, Callable, Dict, Iterable, Optional, Tuple, Type, Union
3
+ from typing import (
4
+ Any,
5
+ Callable,
6
+ Dict,
7
+ Iterable,
8
+ List,
9
+ Optional,
10
+ Tuple,
11
+ Type,
12
+ Union,
13
+ )
4
14
 
5
15
  import torch
6
16
  import torch.utils._pytree as pytree
@@ -132,12 +142,12 @@ class HashTensor(Tensor):
132
142
  if (key.dtype in {torch.uint8, torch.int16} or _range <= 1_000_000
133
143
  or _range <= 2 * key.numel()):
134
144
  _map = torch.full(
135
- size=(_range + 2, ),
145
+ size=(_range + 3, ),
136
146
  fill_value=-1,
137
147
  dtype=torch.int64,
138
- device=device,
148
+ device=key.device,
139
149
  )
140
- _map[(key - (min_key - 1)).long()] = torch.arange(
150
+ _map[key.long() - (min_key.long() - 1)] = torch.arange(
141
151
  key.numel(),
142
152
  dtype=_map.dtype,
143
153
  device=_map.device,
@@ -154,6 +164,8 @@ class HashTensor(Tensor):
154
164
  dtype=dtype,
155
165
  )
156
166
 
167
+ # Private Methods #########################################################
168
+
157
169
  @classmethod
158
170
  def _from_data(
159
171
  cls,
@@ -197,6 +209,43 @@ class HashTensor(Tensor):
197
209
 
198
210
  return out
199
211
 
212
+ def _shallow_copy(self) -> 'HashTensor':
213
+ return self._from_data(
214
+ self._map,
215
+ self._value,
216
+ self._min_key,
217
+ self._max_key,
218
+ num_keys=self.size(0),
219
+ dtype=self.dtype,
220
+ )
221
+
222
+ def _get(self, query: Tensor) -> Tensor:
223
+ if isinstance(self._map, Tensor):
224
+ index = query.long() - (self._min_key.long() - 1)
225
+ index = self._map[index.clamp_(min=0, max=self._map.numel() - 1)]
226
+ elif torch_geometric.typing.WITH_CUDA_HASH_MAP and query.is_cuda:
227
+ index = self._map.get(query)
228
+ elif torch_geometric.typing.WITH_CPU_HASH_MAP:
229
+ index = self._map.get(query.cpu())
230
+ else:
231
+ import pandas as pd
232
+
233
+ ser = pd.Series(query.cpu().numpy(), dtype=self._map)
234
+ index = torch.from_numpy(ser.cat.codes.to_numpy()).to(torch.long)
235
+
236
+ index = index.to(self.device)
237
+
238
+ if self._value is None:
239
+ return index.to(self.dtype)
240
+
241
+ out = self._value[index]
242
+ mask = index != -1
243
+ mask = mask.view([-1] + [1] * (out.dim() - 1))
244
+ if out.is_floating_point():
245
+ return out.where(mask, float('NaN'))
246
+ else:
247
+ return out.where(mask, -1)
248
+
200
249
  # Methods #################################################################
201
250
 
202
251
  def as_tensor(self) -> Tensor:
@@ -232,6 +281,14 @@ class HashTensor(Tensor):
232
281
  kwargs)
233
282
  return func(*args, **(kwargs or {}))
234
283
 
284
+ def index_select(self, dim: int, index: Any) -> Tensor: # type: ignore
285
+ return torch.index_select(self, dim, index)
286
+
287
+
288
+ @implements(aten.alias.default)
289
+ def _alias(tensor: HashTensor) -> HashTensor:
290
+ return tensor._shallow_copy()
291
+
235
292
 
236
293
  @implements(aten._to_copy.default)
237
294
  def _to_copy(
@@ -295,3 +352,118 @@ def _unsqueeze(tensor: HashTensor, dim: int) -> HashTensor:
295
352
  num_keys=tensor.size(0),
296
353
  dtype=tensor.dtype,
297
354
  )
355
+
356
+
357
+ @implements(aten.squeeze.default)
358
+ def _squeeze_default(tensor: HashTensor) -> HashTensor:
359
+ if tensor._value is None:
360
+ return tensor._shallow_copy()
361
+
362
+ return tensor._from_data(
363
+ tensor._map,
364
+ aten.squeeze.dims(tensor._value, list(range(1, tensor.dim()))),
365
+ tensor._min_key,
366
+ tensor._max_key,
367
+ num_keys=tensor.size(0),
368
+ dtype=tensor.dtype,
369
+ )
370
+
371
+
372
+ @implements(aten.squeeze.dim)
373
+ @implements(getattr(aten.squeeze, 'dims', aten.squeeze.dim))
374
+ def _squeeze_dim(
375
+ tensor: HashTensor,
376
+ dim: Union[int, List[int]],
377
+ ) -> HashTensor:
378
+ if isinstance(dim, int):
379
+ dim = [dim]
380
+
381
+ for d in dim:
382
+ if d < -tensor.dim() or d >= tensor.dim():
383
+ raise IndexError(f"Dimension out of range (expected to be in "
384
+ f"range of [{-tensor.dim()}, {tensor.dim()-1}], "
385
+ f"but got {d})")
386
+
387
+ if tensor._value is None:
388
+ return tensor._shallow_copy()
389
+
390
+ dim = [d for d in dim if d != 0 and d != -tensor.dim()]
391
+
392
+ return tensor._from_data(
393
+ tensor._map,
394
+ aten.squeeze.dims(tensor._value, dim),
395
+ tensor._min_key,
396
+ tensor._max_key,
397
+ num_keys=tensor.size(0),
398
+ dtype=tensor.dtype,
399
+ )
400
+
401
+
402
+ @implements(aten.slice.Tensor)
403
+ def _slice(
404
+ tensor: HashTensor,
405
+ dim: int,
406
+ start: Optional[int] = None,
407
+ end: Optional[int] = None,
408
+ step: int = 1,
409
+ ) -> Union[HashTensor, Tensor]:
410
+
411
+ if dim == 0 or dim == -tensor.dim():
412
+ return aten.slice.Tensor(tensor.as_tensor(), dim, start, end, step)
413
+
414
+ return tensor._from_data(
415
+ tensor._map,
416
+ aten.slice.Tensor(tensor.as_tensor(), dim, start, end, step),
417
+ tensor._min_key,
418
+ tensor._max_key,
419
+ num_keys=tensor.size(0),
420
+ dtype=tensor.dtype,
421
+ )
422
+
423
+
424
+ # Since PyTorch does only allow PyTorch tensors as indices in `index_select`,
425
+ # we need to create a wrapper function and monkey patch `index_select` :(
426
+ _old_index_select = torch.index_select
427
+
428
+
429
+ def _new_index_select(
430
+ input: Tensor,
431
+ dim: int,
432
+ index: Any,
433
+ *,
434
+ out: Optional[Tensor] = None,
435
+ ) -> Tensor:
436
+
437
+ if dim < -input.dim() or dim >= input.dim():
438
+ raise IndexError(f"Dimension out of range (expected to be in range of "
439
+ f"[{-input.dim()}, {input.dim()-1}], but got {dim})")
440
+
441
+ # We convert any index tensor in the first dimension into a tensor. This
442
+ # means that downstream handling (i.e. in `aten.index_select.default`)
443
+ # needs to take this pre-conversion into account.
444
+ if isinstance(input, HashTensor) and (dim == 0 or dim == -input.dim()):
445
+ index = as_key_tensor(index, device=input.device)
446
+ return _old_index_select(input, dim, index, out=out)
447
+
448
+
449
+ torch.index_select = _new_index_select # type: ignore
450
+
451
+
452
+ @implements(aten.index_select.default)
453
+ def _index_select(
454
+ tensor: HashTensor,
455
+ dim: int,
456
+ index: Tensor,
457
+ ) -> Union[HashTensor, Tensor]:
458
+
459
+ if dim == 0 or dim == -tensor.dim():
460
+ return tensor._get(index)
461
+
462
+ return tensor._from_data(
463
+ tensor._map,
464
+ aten.index_select.default(tensor.as_tensor(), dim, index),
465
+ tensor._min_key,
466
+ tensor._max_key,
467
+ num_keys=tensor.size(0),
468
+ dtype=tensor.dtype,
469
+ )
@@ -715,22 +715,32 @@ class LinkPredPersonalization(_LinkPredMetric):
715
715
 
716
716
  Args:
717
717
  k (int): The number of top-:math:`k` predictions to evaluate against.
718
+ max_src_nodes (int, optional): The maximum source nodes to consider to
719
+ compute pair-wise dissimilarity. If specified,
720
+ Personalization @ :math:`k` is approximated to avoid computation
721
+ blowup due to quadratic complexity. (default: :obj:`2**12`)
718
722
  batch_size (int, optional): The batch size to determine how many pairs
719
723
  of user recommendations should be processed at once.
720
724
  (default: :obj:`2**16`)
721
725
  """
722
726
  higher_is_better: bool = True
723
727
 
724
- def __init__(self, k: int, batch_size: int = 2**16) -> None:
728
+ def __init__(
729
+ self,
730
+ k: int,
731
+ max_src_nodes: Optional[int] = 2**12,
732
+ batch_size: int = 2**16,
733
+ ) -> None:
725
734
  super().__init__(k)
735
+ self.max_src_nodes = max_src_nodes
726
736
  self.batch_size = batch_size
727
737
 
728
738
  if WITH_TORCHMETRICS:
729
739
  self.add_state('preds', default=[], dist_reduce_fx='cat')
730
- self.add_state('dev_tensor', torch.empty(0), dist_reduce_fx='sum')
740
+ self.add_state('total', torch.tensor(0), dist_reduce_fx='sum')
731
741
  else:
732
742
  self.preds: List[Tensor] = []
733
- self.register_buffer('dev_tensor', torch.empty(0))
743
+ self.register_buffer('total', torch.tensor(0))
734
744
 
735
745
  def update(
736
746
  self,
@@ -738,11 +748,21 @@ class LinkPredPersonalization(_LinkPredMetric):
738
748
  edge_label_index: Union[Tensor, Tuple[Tensor, Tensor]],
739
749
  edge_label_weight: Optional[Tensor] = None,
740
750
  ) -> None:
751
+
741
752
  # NOTE Move to CPU to avoid memory blowup.
742
- self.preds.append(pred_index_mat[:, :self.k].cpu())
753
+ pred_index_mat = pred_index_mat[:, :self.k].cpu()
754
+
755
+ if self.max_src_nodes is None:
756
+ self.preds.append(pred_index_mat)
757
+ self.total += pred_index_mat.size(0)
758
+ elif self.total < self.max_src_nodes:
759
+ remaining = int(self.max_src_nodes - self.total)
760
+ pred_index_mat = pred_index_mat[:remaining]
761
+ self.preds.append(pred_index_mat)
762
+ self.total += pred_index_mat.size(0)
743
763
 
744
764
  def compute(self) -> Tensor:
745
- device = self.dev_tensor.device
765
+ device = self.total.device
746
766
  score = torch.tensor(0.0, device=device)
747
767
  total = torch.tensor(0, device=device)
748
768
 
@@ -786,6 +806,7 @@ class LinkPredPersonalization(_LinkPredMetric):
786
806
 
787
807
  def _reset(self) -> None:
788
808
  self.preds = []
809
+ self.total.zero_()
789
810
 
790
811
 
791
812
  class LinkPredAveragePopularity(_LinkPredMetric):
@@ -49,15 +49,16 @@ def get_llm_kwargs(required_memory: int, dtype=torch.dtype) -> Dict[str, Any]:
49
49
  class LLM(torch.nn.Module):
50
50
  r"""A wrapper around a Large Language Model (LLM) from HuggingFace.
51
51
 
52
- model_name (str): The HuggingFace model name, *e.g.*, :obj:`"llama2"` or
53
- :obj:`"gemma"`.
54
- num_params (int, optional): An integer representing how many parameters the
55
- HuggingFace model has, in billions. This is used to automatically
56
- allocate the correct number of GPUs needed, given the available GPU
57
- memory of your GPUs. If not specified, the number of parameters
58
- is determined using the `huggingface_hub` module.
59
- dtype (torch.dtype, optional): The data type to use for the LLM.
60
- (default :obj: `torch.bfloat16`)
52
+ Args:
53
+ model_name (str): The HuggingFace model name, *e.g.*, :obj:`"llama2"`
54
+ or :obj:`"gemma"`.
55
+ num_params (int, optional): An integer representing how many parameters
56
+ the HuggingFace model has, in billions. This is used to
57
+ automatically allocate the correct number of GPUs needed, given the
58
+ available GPU memory of your GPUs. If not specified, the number of
59
+ parameters is determined using the `huggingface_hub` module.
60
+ dtype (torch.dtype, optional): The data type to use for the LLM.
61
+ (default :obj: `torch.bfloat16`)
61
62
  """
62
63
  def __init__(
63
64
  self,