pyg-nightly 2.7.0.dev20250223__py3-none-any.whl → 2.7.0.dev20250225__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyg-nightly
3
- Version: 2.7.0.dev20250223
3
+ Version: 2.7.0.dev20250225
4
4
  Summary: Graph Neural Network Library for PyTorch
5
5
  Keywords: deep-learning,pytorch,geometric-deep-learning,graph-neural-networks,graph-convolutional-networks
6
6
  Author-email: Matthias Fey <matthias@pyg.org>
@@ -1,4 +1,4 @@
1
- torch_geometric/__init__.py,sha256=u79QBiX3vYzq10QGHDb7To3rzMR1UXXopOn8ptXDZ0A,1978
1
+ torch_geometric/__init__.py,sha256=2_FzSuy3z_Dpnuncdqy_FCcZmG9u9595U4yzJS-1EiY,1978
2
2
  torch_geometric/_compile.py,sha256=f-WQeH4VLi5Hn9lrgztFUCSrN_FImjhQa6BxFzcYC38,1338
3
3
  torch_geometric/_onnx.py,sha256=V9ffrIKSqhDw6xUZ12lkuSfNs48cQp2EeJ6Z19GfnVw,349
4
4
  torch_geometric/backend.py,sha256=lVaf7aLoVaB3M-UcByUJ1G4T4FOK6LXAg0CF4W3E8jo,1575
@@ -9,7 +9,7 @@ torch_geometric/deprecation.py,sha256=dWRymDIUkUVI2MeEmBG5WF4R6jObZeseSBV9G6FNfj
9
9
  torch_geometric/device.py,sha256=tU5-_lBNVbVHl_kUmWPwiG5mQ1pyapwMF4JkmtNN3MM,1224
10
10
  torch_geometric/edge_index.py,sha256=BsLh5tOZRjjSYDkjqOFAdBuvMaDg7EWaaLELYsUL0Z8,70048
11
11
  torch_geometric/experimental.py,sha256=JbtNNEXjFGI8hZ9raM6-qrZURP6Z5nlDK8QicZUIbz0,4756
12
- torch_geometric/hash_tensor.py,sha256=9Zg1KCebfN-xJE1dX2nGGYnK09snSyJkjaYVzCUOfkM,17278
12
+ torch_geometric/hash_tensor.py,sha256=xXKWffFz4ML4jTKPNagiAWqu-Cjptmb3WNhIPo0C0pw,23200
13
13
  torch_geometric/home.py,sha256=EV54B4Dmiv61GDbkCwtCfWGWJ4eFGwZ8s3KOgGjwYgY,790
14
14
  torch_geometric/index.py,sha256=9ChzWFCwj2slNcVBOgfV-wQn-KscJe_y7502w-Vf76w,24045
15
15
  torch_geometric/inspector.py,sha256=nKi5o4Mn6xsG0Ex1GudTEQt_EqnF9mcMqGtp7Shh9sQ,19336
@@ -633,7 +633,7 @@ torch_geometric/utils/undirected.py,sha256=H_nfpI0_WluOG6VfjPyldvcjL4w5USAKWu2x5
633
633
  torch_geometric/visualization/__init__.py,sha256=PyR_4K5SafsJrBr6qWrkjKr6GBL1b7FtZybyXCDEVwY,154
634
634
  torch_geometric/visualization/graph.py,sha256=ZuLPL92yGRi7lxlqsUPwL_EVVXF7P2kMcveTtW79vpA,4784
635
635
  torch_geometric/visualization/influence.py,sha256=CWMvuNA_Nf1sfbJmQgn58yS4OFpeKXeZPe7kEuvkUBw,477
636
- pyg_nightly-2.7.0.dev20250223.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
637
- pyg_nightly-2.7.0.dev20250223.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82
638
- pyg_nightly-2.7.0.dev20250223.dist-info/METADATA,sha256=o3vW1MbKajweST33mDeCk-b1CKb5wGegFomLfUE_rOQ,63021
639
- pyg_nightly-2.7.0.dev20250223.dist-info/RECORD,,
636
+ pyg_nightly-2.7.0.dev20250225.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
637
+ pyg_nightly-2.7.0.dev20250225.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82
638
+ pyg_nightly-2.7.0.dev20250225.dist-info/METADATA,sha256=m8UNJu0M5iArtdBVYT4uMh9AJdhwbI6VVLnsGvQcgnI,63021
639
+ pyg_nightly-2.7.0.dev20250225.dist-info/RECORD,,
@@ -31,7 +31,7 @@ from .lazy_loader import LazyLoader
31
31
  contrib = LazyLoader('contrib', globals(), 'torch_geometric.contrib')
32
32
  graphgym = LazyLoader('graphgym', globals(), 'torch_geometric.graphgym')
33
33
 
34
- __version__ = '2.7.0.dev20250223'
34
+ __version__ = '2.7.0.dev20250225'
35
35
 
36
36
  __all__ = [
37
37
  'Index',
@@ -12,6 +12,7 @@ from typing import (
12
12
  Union,
13
13
  )
14
14
 
15
+ import numpy as np
15
16
  import torch
16
17
  import torch.utils._pytree as pytree
17
18
  import xxhash
@@ -245,7 +246,7 @@ class HashTensor(Tensor):
245
246
  import pandas as pd
246
247
 
247
248
  ser = pd.Series(query.cpu().numpy(), dtype=self._map)
248
- index = torch.from_numpy(ser.cat.codes.to_numpy()).to(torch.long)
249
+ index = torch.from_numpy(ser.cat.codes.to_numpy().copy()).long()
249
250
 
250
251
  index = index.to(self.device)
251
252
 
@@ -299,14 +300,50 @@ class HashTensor(Tensor):
299
300
  kwargs)
300
301
  return func(*args, **(kwargs or {}))
301
302
 
303
+ def __tensor_flatten__(self) -> Tuple[List[str], Tuple[Any, ...]]:
304
+ attrs = ['_map', '_min_key', '_max_key']
305
+ if self._value is not None:
306
+ attrs.append('_value')
307
+
308
+ ctx = (self.size(0), self.dtype)
309
+
310
+ return attrs, ctx
311
+
312
+ @staticmethod
313
+ def __tensor_unflatten__(
314
+ inner_tensors: Dict[str, Any],
315
+ ctx: Tuple[Any, ...],
316
+ outer_size: Tuple[int, ...],
317
+ outer_stride: Tuple[int, ...],
318
+ ) -> 'HashTensor':
319
+ return HashTensor._from_data(
320
+ inner_tensors['_map'],
321
+ inner_tensors.get('_value', None),
322
+ inner_tensors['_min_key'],
323
+ inner_tensors['_min_key'],
324
+ num_keys=ctx[0],
325
+ dtype=ctx[1],
326
+ )
327
+
328
+ def __repr__(self) -> str: # type: ignore
329
+ indent = len(f'{self.__class__.__name__}(')
330
+ tensor_str = torch._tensor_str._tensor_str(self.as_tensor(), indent)
331
+ return torch._tensor_str._str_intern(self, tensor_contents=tensor_str)
332
+
302
333
  def tolist(self) -> List[Any]:
334
+ """""" # noqa: D419
303
335
  return self.as_tensor().tolist()
304
336
 
337
+ def numpy(self, *, force: bool = False) -> np.ndarray:
338
+ """""" # noqa: D419
339
+ return self.as_tensor().numpy(force=force)
340
+
305
341
  def index_select( # type: ignore
306
342
  self,
307
343
  dim: int,
308
344
  index: Any,
309
345
  ) -> Union['HashTensor', Tensor]:
346
+ """""" # noqa: D419
310
347
  return torch.index_select(self, dim, index)
311
348
 
312
349
  def select( # type: ignore
@@ -314,14 +351,99 @@ class HashTensor(Tensor):
314
351
  dim: int,
315
352
  index: Any,
316
353
  ) -> Union['HashTensor', Tensor]:
354
+ """""" # noqa: D419
317
355
  return torch.select(self, dim, index)
318
356
 
357
+ def share_memory_(self) -> 'HashTensor':
358
+ """""" # noqa: D419
359
+ if isinstance(self._map, Tensor):
360
+ self._map.share_memory_()
361
+ if self._value is not None:
362
+ self._value.share_memory_()
363
+ self._min_key.share_memory_()
364
+ self._max_key.share_memory_()
365
+ return self
366
+
367
+ def is_shared(self) -> bool:
368
+ """""" # noqa: D419
369
+ return self._min_key.is_shared()
370
+
371
+ def detach_(self) -> 'HashTensor': # type: ignore
372
+ """""" # noqa: D419
373
+ if self._value is not None:
374
+ self._value.detach_()
375
+ return super().detach_() # type: ignore
376
+
377
+ def __getitem__(self, indices: Any) -> Union['HashTensor', Tensor]:
378
+ if not isinstance(indices, tuple):
379
+ indices = (indices, )
380
+ assert len(indices) > 0
381
+
382
+ # We convert any index tensor in the first dimension into a tensor.
383
+ # This means that downstream handling (i.e. in `aten.index.Tensor`)
384
+ # needs to take this pre-conversion into account. However, detecting
385
+ # whether the first dimension is indexed can be tricky at times:
386
+ # * We need to take into account `Ellipsis`
387
+ # * We need to take any unsqueezing into account
388
+ if indices[0] is Ellipsis and len(indices) > 1:
389
+ nonempty_indices = [i for i in indices[1:] if i is not None]
390
+ if len(nonempty_indices) == self.dim():
391
+ indices = indices[1:]
392
+
393
+ if isinstance(indices[0], (int, bool)):
394
+ index: Union[int, Tensor] = int(as_key_tensor([indices[0]]))
395
+ indices = (index, ) + indices[1:]
396
+ elif isinstance(indices[0], (Tensor, list, np.ndarray)):
397
+ index = as_key_tensor(indices[0], device=self.device)
398
+ indices = (index, ) + indices[1:]
399
+
400
+ indices = indices[0] if len(indices) == 1 else indices
401
+
402
+ return super().__getitem__(indices)
403
+
319
404
 
320
405
  @implements(aten.alias.default)
321
406
  def _alias(tensor: HashTensor) -> HashTensor:
322
407
  return tensor._shallow_copy()
323
408
 
324
409
 
410
+ @implements(aten.clone.default)
411
+ def _clone(
412
+ tensor: HashTensor,
413
+ *,
414
+ memory_format: torch.memory_format = torch.preserve_format,
415
+ ) -> HashTensor:
416
+
417
+ value = tensor._value
418
+ if value is not None:
419
+ value = aten.clone.default(value, memory_format=memory_format)
420
+
421
+ return tensor._from_data(
422
+ tensor._map, # NOTE No reason to do clone since it is read-only.
423
+ value,
424
+ tensor._min_key, # NOTE No reason to do clone since it is read-only.
425
+ tensor._max_key, # NOTE No reason to do clone since it is read-only.
426
+ num_keys=tensor.size(0),
427
+ dtype=tensor.dtype,
428
+ )
429
+
430
+
431
+ @implements(aten.detach.default)
432
+ def _detach(tensor: HashTensor) -> HashTensor:
433
+ value = tensor._value
434
+ if value is not None:
435
+ value = aten.detach.default(value)
436
+
437
+ return tensor._from_data(
438
+ tensor._map,
439
+ value,
440
+ tensor._min_key,
441
+ tensor._max_key,
442
+ num_keys=tensor.size(0),
443
+ dtype=tensor.dtype,
444
+ )
445
+
446
+
325
447
  @implements(aten._to_copy.default)
326
448
  def _to_copy(
327
449
  tensor: HashTensor,
@@ -354,8 +476,8 @@ def _to_copy(
354
476
  _map = aten._to_copy.default(_map, device=device)
355
477
  # Only convert `_map` in case `CUDAHashMap` exists - otherwise we use
356
478
  # CPU-based mapping anyway and there is no need for a copy.
357
- elif (torch_geometric.typing.WITH_CUDA_HASH_MAP and min_key.is_cuda
358
- and tensor._min_key.device != min_key.device):
479
+ elif (torch_geometric.typing.WITH_CUDA_HASH_MAP and tensor.is_cuda
480
+ and tensor.device != min_key.device):
359
481
  key = _map.keys()
360
482
  key = aten._to_copy.default(key, device=device)
361
483
  _map = get_hash_map(key)
@@ -370,11 +492,32 @@ def _to_copy(
370
492
  )
371
493
 
372
494
 
495
+ @implements(aten._pin_memory.default)
496
+ def _pin_memory(tensor: HashTensor) -> HashTensor:
497
+ _map = tensor._map
498
+ if isinstance(_map, Tensor):
499
+ _map = aten._pin_memory.default(_map)
500
+
501
+ value = tensor._value
502
+ if value is not None:
503
+ value = aten._pin_memory.default(value)
504
+
505
+ return tensor._from_data(
506
+ _map,
507
+ value,
508
+ aten._pin_memory.default(tensor._min_key),
509
+ aten._pin_memory.default(tensor._max_key),
510
+ num_keys=tensor.size(0),
511
+ dtype=tensor.dtype,
512
+ )
513
+
514
+
373
515
  @implements(aten.unsqueeze.default)
374
516
  def _unsqueeze(tensor: HashTensor, dim: int) -> HashTensor:
375
517
  if dim == 0 or dim == -(tensor.dim() + 1):
376
518
  raise IndexError(f"Cannot unsqueeze '{tensor.__class__.__name__}' in "
377
- f"the first dimension")
519
+ f"the first dimension. Please call `as_tensor()` "
520
+ f"beforehand")
378
521
 
379
522
  return tensor._from_data(
380
523
  tensor._map,
@@ -475,22 +618,32 @@ _old_index_select = torch.index_select
475
618
 
476
619
  def _new_index_select(
477
620
  input: Tensor,
478
- dim: int,
621
+ dim: Union[int, str],
479
622
  index: Tensor,
480
- *,
481
623
  out: Optional[Tensor] = None,
482
624
  ) -> Tensor:
483
625
 
484
- if dim < -input.dim() or dim >= input.dim():
626
+ if isinstance(dim, int) and (dim < -input.dim() or dim >= input.dim()):
485
627
  raise IndexError(f"Dimension out of range (expected to be in range of "
486
628
  f"[{-input.dim()}, {input.dim()-1}], but got {dim})")
487
629
 
488
630
  # We convert any index tensor in the first dimension into a tensor. This
489
631
  # means that downstream handling (i.e. in `aten.index_select.default`)
490
632
  # needs to take this pre-conversion into account.
491
- if isinstance(input, HashTensor) and (dim == 0 or dim == -input.dim()):
633
+ if (not torch.jit.is_scripting() and isinstance(input, HashTensor)
634
+ and isinstance(dim, int) and (dim == 0 or dim == -input.dim())):
492
635
  index = as_key_tensor(index, device=input.device)
493
- return _old_index_select(input, dim, index, out=out)
636
+
637
+ if isinstance(dim, int): # Type narrowing...
638
+ if out is None:
639
+ return _old_index_select(input, dim, index)
640
+ else:
641
+ return _old_index_select(input, dim, index, out=out)
642
+ else:
643
+ if out is None:
644
+ return _old_index_select(input, dim, index)
645
+ else:
646
+ return _old_index_select(input, dim, index, out=out)
494
647
 
495
648
 
496
649
  torch.index_select = _new_index_select # type: ignore
@@ -523,20 +676,25 @@ _old_select = torch.select
523
676
 
524
677
  def _new_select(
525
678
  input: Tensor,
526
- dim: int,
679
+ dim: Union[int, str],
527
680
  index: int,
528
681
  ) -> Tensor:
529
682
 
530
- if dim < -input.dim() or dim >= input.dim():
683
+ if isinstance(dim, int) and (dim < -input.dim() or dim >= input.dim()):
531
684
  raise IndexError(f"Dimension out of range (expected to be in range of "
532
685
  f"[{-input.dim()}, {input.dim()-1}], but got {dim})")
533
686
 
534
687
  # We convert any index in the first dimension into an integer. This means
535
688
  # that downstream handling (i.e. in `aten.select.int`) needs to take this
536
689
  # pre-conversion into account.
537
- if isinstance(input, HashTensor) and (dim == 0 or dim == -input.dim()):
690
+ if (not torch.jit.is_scripting() and isinstance(input, HashTensor)
691
+ and isinstance(dim, int) and (dim == 0 or dim == -input.dim())):
538
692
  index = int(as_key_tensor([index]))
539
- return _old_select(input, dim, index)
693
+
694
+ if isinstance(dim, int): # Type narrowing...
695
+ return _old_select(input, dim, index)
696
+ else:
697
+ return _old_select(input, dim, index)
540
698
 
541
699
 
542
700
  torch.select = _new_select # type: ignore
@@ -553,7 +711,7 @@ def _select(
553
711
  key = torch.tensor(
554
712
  [index],
555
713
  dtype=tensor._min_key.dtype,
556
- device=tensor._min_key.device,
714
+ device=tensor.device,
557
715
  )
558
716
  return tensor._get(key).squeeze(0)
559
717
 
@@ -565,3 +723,27 @@ def _select(
565
723
  num_keys=tensor.size(0),
566
724
  dtype=tensor.dtype,
567
725
  )
726
+
727
+
728
+ @implements(aten.index.Tensor)
729
+ def _index(
730
+ tensor: HashTensor,
731
+ indices: List[Optional[Tensor]],
732
+ ) -> Union[HashTensor, Tensor]:
733
+
734
+ assert len(indices) > 0
735
+
736
+ if indices[0] is not None:
737
+ out = tensor._get(indices[0])
738
+ if len(indices) > 1:
739
+ out = aten.index.Tensor(out, [None] + indices[1:])
740
+ return out
741
+
742
+ return tensor._from_data(
743
+ tensor._map,
744
+ aten.index.Tensor(tensor.as_tensor(), indices),
745
+ tensor._min_key,
746
+ tensor._max_key,
747
+ num_keys=tensor.size(0),
748
+ dtype=tensor.dtype,
749
+ )