pyg-nightly 2.7.0.dev20250219__py3-none-any.whl → 2.7.0.dev20250221__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: pyg-nightly
3
- Version: 2.7.0.dev20250219
3
+ Version: 2.7.0.dev20250221
4
4
  Summary: Graph Neural Network Library for PyTorch
5
5
  Keywords: deep-learning,pytorch,geometric-deep-learning,graph-neural-networks,graph-convolutional-networks
6
6
  Author-email: Matthias Fey <matthias@pyg.org>
@@ -14,6 +14,7 @@ Classifier: Programming Language :: Python :: 3.10
14
14
  Classifier: Programming Language :: Python :: 3.11
15
15
  Classifier: Programming Language :: Python :: 3.12
16
16
  Classifier: Programming Language :: Python :: 3 :: Only
17
+ License-File: LICENSE
17
18
  Requires-Dist: aiohttp
18
19
  Requires-Dist: fsspec
19
20
  Requires-Dist: jinja2
@@ -1,4 +1,4 @@
1
- torch_geometric/__init__.py,sha256=P-W8gnhQltNcKfeLyPOC4diB9zVIHQ5yi0IXFmjYzd4,1978
1
+ torch_geometric/__init__.py,sha256=f29gN2VOOKFStDrxccL16W-vkm6S_5-CHkutK9E5NX4,1978
2
2
  torch_geometric/_compile.py,sha256=f-WQeH4VLi5Hn9lrgztFUCSrN_FImjhQa6BxFzcYC38,1338
3
3
  torch_geometric/_onnx.py,sha256=V9ffrIKSqhDw6xUZ12lkuSfNs48cQp2EeJ6Z19GfnVw,349
4
4
  torch_geometric/backend.py,sha256=lVaf7aLoVaB3M-UcByUJ1G4T4FOK6LXAg0CF4W3E8jo,1575
@@ -9,7 +9,7 @@ torch_geometric/deprecation.py,sha256=dWRymDIUkUVI2MeEmBG5WF4R6jObZeseSBV9G6FNfj
9
9
  torch_geometric/device.py,sha256=tU5-_lBNVbVHl_kUmWPwiG5mQ1pyapwMF4JkmtNN3MM,1224
10
10
  torch_geometric/edge_index.py,sha256=BsLh5tOZRjjSYDkjqOFAdBuvMaDg7EWaaLELYsUL0Z8,70048
11
11
  torch_geometric/experimental.py,sha256=JbtNNEXjFGI8hZ9raM6-qrZURP6Z5nlDK8QicZUIbz0,4756
12
- torch_geometric/hash_tensor.py,sha256=T4fRmS6TD-j7PKa9LMralSbI6naqA4ctW8AnjWsREzw,9615
12
+ torch_geometric/hash_tensor.py,sha256=aM5chfNpYWOU01nEkE2hBm_ZNlOwCjrZcYXjNtf6PMc,11836
13
13
  torch_geometric/home.py,sha256=EV54B4Dmiv61GDbkCwtCfWGWJ4eFGwZ8s3KOgGjwYgY,790
14
14
  torch_geometric/index.py,sha256=9ChzWFCwj2slNcVBOgfV-wQn-KscJe_y7502w-Vf76w,24045
15
15
  torch_geometric/inspector.py,sha256=nKi5o4Mn6xsG0Ex1GudTEQt_EqnF9mcMqGtp7Shh9sQ,19336
@@ -291,7 +291,7 @@ torch_geometric/loader/utils.py,sha256=f27mczQ7fEP2HpTsJGJxKS0slPu0j8zTba3jP8ViN
291
291
  torch_geometric/loader/zip_loader.py,sha256=3lt10fD15Rxm1WhWzypswGzCEwUz4h8OLCD1nE15yNg,3843
292
292
  torch_geometric/metrics/__init__.py,sha256=3krvDobW6vV5yHTjq2S2pmOXxNfysNG26muq7z48e94,699
293
293
  torch_geometric/metrics/link_pred.py,sha256=cz9GbvZthV2PAnVnxiZlksGr0VmTQOJGNuZ-OYYg04U,29667
294
- torch_geometric/nn/__init__.py,sha256=RrWRzEoqtR3lsO2lAzYXboLPb3uYEX2z3tLxiBIVWjc,847
294
+ torch_geometric/nn/__init__.py,sha256=kQHHHUxFDht2ztD-XFQuv98TvC8MdodaFsIjAvltJBw,874
295
295
  torch_geometric/nn/data_parallel.py,sha256=lDAxRi83UNuzAQSj3eu9K2sQheOIU6wqR5elS6oDs90,4764
296
296
  torch_geometric/nn/encoding.py,sha256=QNjwWczYExZ1wRGBmpuqYbn6tB7NC4BU-DEgzjhcZqw,3115
297
297
  torch_geometric/nn/fx.py,sha256=oRfnYiih0FM1MhPNcDYIog0oQ0G0soQJuaz7KeNCOjo,16048
@@ -463,7 +463,7 @@ torch_geometric/nn/models/signed_gcn.py,sha256=J40CnedFIqtKI1LhW1ITSEFRbA_XiJZL6
463
463
  torch_geometric/nn/models/tgn.py,sha256=kEGdfLJybkbMT4UMoAh2nCzfX3_nDjfm1cicuPHEwAM,11878
464
464
  torch_geometric/nn/models/visnet.py,sha256=97OFMCsPDEI5BCSi7RhoRcU2CNRp7zck2tEzrltFZj4,43192
465
465
  torch_geometric/nn/nlp/__init__.py,sha256=q6CPUiJHcc9bXw90lyj-ID4F3kfW8uPM-SOxW9uCMHs,213
466
- torch_geometric/nn/nlp/llm.py,sha256=j03tyCO1ADgrzGhLqYOUcsy0haGbV4dmT9bdwEnESPE,12181
466
+ torch_geometric/nn/nlp/llm.py,sha256=uVPoIB2_nU0s3N_Z3oVW2Q7XGsAisW1QGaaLe2Y__hw,12227
467
467
  torch_geometric/nn/nlp/sentence_transformer.py,sha256=q5M7SGtrUzoSiNhKCGFb7JatWiukdhNF6zdq2yiqxwE,4475
468
468
  torch_geometric/nn/nlp/vision_transformer.py,sha256=diVBefjIynzYs8WBlcpTeSVnw1PUecHY--B9Yd-W2hA,863
469
469
  torch_geometric/nn/norm/__init__.py,sha256=u2qIDrkbeuObGVXSAIftAlvSd6ouGTtxznCfD-59UiA,669
@@ -633,7 +633,7 @@ torch_geometric/utils/undirected.py,sha256=H_nfpI0_WluOG6VfjPyldvcjL4w5USAKWu2x5
633
633
  torch_geometric/visualization/__init__.py,sha256=PyR_4K5SafsJrBr6qWrkjKr6GBL1b7FtZybyXCDEVwY,154
634
634
  torch_geometric/visualization/graph.py,sha256=ZuLPL92yGRi7lxlqsUPwL_EVVXF7P2kMcveTtW79vpA,4784
635
635
  torch_geometric/visualization/influence.py,sha256=CWMvuNA_Nf1sfbJmQgn58yS4OFpeKXeZPe7kEuvkUBw,477
636
- pyg_nightly-2.7.0.dev20250219.dist-info/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
637
- pyg_nightly-2.7.0.dev20250219.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82
638
- pyg_nightly-2.7.0.dev20250219.dist-info/METADATA,sha256=vvByDyHVHwRDOLKEnsUSA1Dh2obTtvRZvzGks5iU8XA,62999
639
- pyg_nightly-2.7.0.dev20250219.dist-info/RECORD,,
636
+ pyg_nightly-2.7.0.dev20250221.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
637
+ pyg_nightly-2.7.0.dev20250221.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82
638
+ pyg_nightly-2.7.0.dev20250221.dist-info/METADATA,sha256=LyCZFRktgqS8Zd9gDaXsIFQHAQGWPYgO7x7RDoG3yg8,63021
639
+ pyg_nightly-2.7.0.dev20250221.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: flit 3.10.1
2
+ Generator: flit 3.11.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -31,7 +31,7 @@ from .lazy_loader import LazyLoader
31
31
  contrib = LazyLoader('contrib', globals(), 'torch_geometric.contrib')
32
32
  graphgym = LazyLoader('graphgym', globals(), 'torch_geometric.graphgym')
33
33
 
34
- __version__ = '2.7.0.dev20250219'
34
+ __version__ = '2.7.0.dev20250221'
35
35
 
36
36
  __all__ = [
37
37
  'Index',
@@ -1,6 +1,16 @@
1
1
  import functools
2
2
  import warnings
3
- from typing import Any, Callable, Dict, Iterable, Optional, Tuple, Type, Union
3
+ from typing import (
4
+ Any,
5
+ Callable,
6
+ Dict,
7
+ Iterable,
8
+ List,
9
+ Optional,
10
+ Tuple,
11
+ Type,
12
+ Union,
13
+ )
4
14
 
5
15
  import torch
6
16
  import torch.utils._pytree as pytree
@@ -197,6 +207,16 @@ class HashTensor(Tensor):
197
207
 
198
208
  return out
199
209
 
210
+ def _shallow_copy(self) -> 'HashTensor':
211
+ return self._from_data(
212
+ self._map,
213
+ self._value,
214
+ self._min_key,
215
+ self._max_key,
216
+ num_keys=self.size(0),
217
+ dtype=self.dtype,
218
+ )
219
+
200
220
  # Methods #################################################################
201
221
 
202
222
  def as_tensor(self) -> Tensor:
@@ -233,6 +253,11 @@ class HashTensor(Tensor):
233
253
  return func(*args, **(kwargs or {}))
234
254
 
235
255
 
256
+ @implements(aten.alias.default)
257
+ def _alias(tensor: HashTensor) -> HashTensor:
258
+ return tensor._shallow_copy()
259
+
260
+
236
261
  @implements(aten._to_copy.default)
237
262
  def _to_copy(
238
263
  tensor: HashTensor,
@@ -295,3 +320,70 @@ def _unsqueeze(tensor: HashTensor, dim: int) -> HashTensor:
295
320
  num_keys=tensor.size(0),
296
321
  dtype=tensor.dtype,
297
322
  )
323
+
324
+
325
+ @implements(aten.squeeze.default)
326
+ def _squeeze_default(tensor: HashTensor) -> HashTensor:
327
+ if tensor._value is None:
328
+ return tensor._shallow_copy()
329
+
330
+ return tensor._from_data(
331
+ tensor._map,
332
+ aten.squeeze.dims(tensor._value, list(range(1, tensor.dim()))),
333
+ tensor._min_key,
334
+ tensor._max_key,
335
+ num_keys=tensor.size(0),
336
+ dtype=tensor.dtype,
337
+ )
338
+
339
+
340
+ @implements(aten.squeeze.dim)
341
+ @implements(getattr(aten.squeeze, 'dims', aten.squeeze.dim))
342
+ def _squeeze_dim(
343
+ tensor: HashTensor,
344
+ dim: Union[int, List[int]],
345
+ ) -> HashTensor:
346
+ if isinstance(dim, int):
347
+ dim = [dim]
348
+
349
+ for d in dim:
350
+ if d < -tensor.dim() or d >= tensor.dim():
351
+ raise IndexError(f"Dimension out of range (expected to be in "
352
+ f"range of [{-tensor.dim()}, {tensor.dim()-1}], "
353
+ f"but got {d})")
354
+
355
+ if tensor._value is None:
356
+ return tensor._shallow_copy()
357
+
358
+ dim = [d for d in dim if d != 0 and d != -tensor.dim()]
359
+
360
+ return tensor._from_data(
361
+ tensor._map,
362
+ aten.squeeze.dims(tensor._value, dim),
363
+ tensor._min_key,
364
+ tensor._max_key,
365
+ num_keys=tensor.size(0),
366
+ dtype=tensor.dtype,
367
+ )
368
+
369
+
370
+ @implements(aten.slice.Tensor)
371
+ def _slice(
372
+ tensor: HashTensor,
373
+ dim: int,
374
+ start: Optional[int] = None,
375
+ end: Optional[int] = None,
376
+ step: int = 1,
377
+ ) -> Union[HashTensor, Tensor]:
378
+
379
+ if dim == 0 or dim == -tensor.dim():
380
+ return aten.slice.Tensor(tensor.as_tensor(), dim, start, end, step)
381
+
382
+ return tensor._from_data(
383
+ tensor._map,
384
+ aten.slice.Tensor(tensor.as_tensor(), dim, start, end, step),
385
+ tensor._min_key,
386
+ tensor._max_key,
387
+ num_keys=tensor.size(0),
388
+ dtype=tensor.dtype,
389
+ )
@@ -17,6 +17,7 @@ from .dense import * # noqa
17
17
  from .kge import * # noqa
18
18
  from .models import * # noqa
19
19
  from .functional import * # noqa
20
+ from .nlp import * # noqa
20
21
 
21
22
  __all__ = [
22
23
  'Reshape',
@@ -49,15 +49,16 @@ def get_llm_kwargs(required_memory: int, dtype=torch.dtype) -> Dict[str, Any]:
49
49
  class LLM(torch.nn.Module):
50
50
  r"""A wrapper around a Large Language Model (LLM) from HuggingFace.
51
51
 
52
- model_name (str): The HuggingFace model name, *e.g.*, :obj:`"llama2"` or
53
- :obj:`"gemma"`.
54
- num_params (int, optional): An integer representing how many parameters the
55
- HuggingFace model has, in billions. This is used to automatically
56
- allocate the correct number of GPUs needed, given the available GPU
57
- memory of your GPUs. If not specified, the number of parameters
58
- is determined using the `huggingface_hub` module.
59
- dtype (torch.dtype, optional): The data type to use for the LLM.
60
- (default :obj: `torch.bfloat16`)
52
+ Args:
53
+ model_name (str): The HuggingFace model name, *e.g.*, :obj:`"llama2"`
54
+ or :obj:`"gemma"`.
55
+ num_params (int, optional): An integer representing how many parameters
56
+ the HuggingFace model has, in billions. This is used to
57
+ automatically allocate the correct number of GPUs needed, given the
58
+ available GPU memory of your GPUs. If not specified, the number of
59
+ parameters is determined using the `huggingface_hub` module.
60
+ dtype (torch.dtype, optional): The data type to use for the LLM.
61
+ (default :obj: `torch.bfloat16`)
61
62
  """
62
63
  def __init__(
63
64
  self,