pyg-nightly 2.7.0.dev20250606__py3-none-any.whl → 2.7.0.dev20250608__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. {pyg_nightly-2.7.0.dev20250606.dist-info → pyg_nightly-2.7.0.dev20250608.dist-info}/METADATA +3 -2
  2. {pyg_nightly-2.7.0.dev20250606.dist-info → pyg_nightly-2.7.0.dev20250608.dist-info}/RECORD +84 -84
  3. torch_geometric/__init__.py +5 -4
  4. torch_geometric/_compile.py +3 -2
  5. torch_geometric/contrib/__init__.py +1 -1
  6. torch_geometric/data/data.py +3 -3
  7. torch_geometric/data/database.py +4 -0
  8. torch_geometric/data/dataset.py +9 -6
  9. torch_geometric/data/hetero_data.py +7 -6
  10. torch_geometric/data/hypergraph_data.py +1 -1
  11. torch_geometric/data/in_memory_dataset.py +2 -2
  12. torch_geometric/data/large_graph_indexer.py +1 -1
  13. torch_geometric/data/lightning/datamodule.py +28 -20
  14. torch_geometric/data/storage.py +1 -1
  15. torch_geometric/datasets/dbp15k.py +1 -1
  16. torch_geometric/datasets/molecule_net.py +3 -2
  17. torch_geometric/datasets/tag_dataset.py +1 -1
  18. torch_geometric/datasets/wikics.py +2 -1
  19. torch_geometric/deprecation.py +1 -1
  20. torch_geometric/distributed/rpc.py +2 -2
  21. torch_geometric/explain/algorithm/captum_explainer.py +2 -1
  22. torch_geometric/explain/algorithm/graphmask_explainer.py +7 -7
  23. torch_geometric/explain/explainer.py +1 -1
  24. torch_geometric/graphgym/config.py +3 -2
  25. torch_geometric/graphgym/imports.py +4 -2
  26. torch_geometric/graphgym/logger.py +1 -1
  27. torch_geometric/graphgym/models/encoder.py +2 -2
  28. torch_geometric/graphgym/utils/comp_budget.py +2 -1
  29. torch_geometric/hash_tensor.py +5 -4
  30. torch_geometric/io/fs.py +5 -4
  31. torch_geometric/loader/ibmb_loader.py +4 -4
  32. torch_geometric/loader/mixin.py +2 -1
  33. torch_geometric/loader/prefetch.py +3 -2
  34. torch_geometric/nn/aggr/fused.py +1 -1
  35. torch_geometric/nn/conv/appnp.py +1 -1
  36. torch_geometric/nn/conv/eg_conv.py +7 -7
  37. torch_geometric/nn/conv/gen_conv.py +1 -1
  38. torch_geometric/nn/conv/gravnet_conv.py +2 -1
  39. torch_geometric/nn/conv/hetero_conv.py +2 -1
  40. torch_geometric/nn/conv/meshcnn_conv.py +6 -4
  41. torch_geometric/nn/conv/message_passing.py +3 -2
  42. torch_geometric/nn/conv/sg_conv.py +1 -1
  43. torch_geometric/nn/conv/spline_conv.py +2 -1
  44. torch_geometric/nn/conv/ssg_conv.py +1 -1
  45. torch_geometric/nn/data_parallel.py +5 -4
  46. torch_geometric/nn/fx.py +7 -5
  47. torch_geometric/nn/models/attentive_fp.py +1 -1
  48. torch_geometric/nn/models/deep_graph_infomax.py +1 -1
  49. torch_geometric/nn/models/glem.py +20 -12
  50. torch_geometric/nn/models/gpse.py +30 -13
  51. torch_geometric/nn/models/graph_unet.py +1 -1
  52. torch_geometric/nn/models/metapath2vec.py +1 -1
  53. torch_geometric/nn/models/mlp.py +4 -2
  54. torch_geometric/nn/models/node2vec.py +1 -1
  55. torch_geometric/nn/models/rev_gnn.py +1 -1
  56. torch_geometric/nn/models/signed_gcn.py +1 -1
  57. torch_geometric/nn/nlp/llm.py +2 -1
  58. torch_geometric/nn/pool/__init__.py +8 -4
  59. torch_geometric/nn/pool/knn.py +13 -10
  60. torch_geometric/nn/to_hetero_module.py +4 -3
  61. torch_geometric/nn/to_hetero_transformer.py +3 -3
  62. torch_geometric/nn/to_hetero_with_bases_transformer.py +3 -3
  63. torch_geometric/sampler/base.py +7 -4
  64. torch_geometric/sampler/hgt_sampler.py +11 -1
  65. torch_geometric/sampler/neighbor_sampler.py +10 -8
  66. torch_geometric/testing/decorators.py +3 -2
  67. torch_geometric/testing/distributed.py +1 -1
  68. torch_geometric/transforms/add_gpse.py +11 -2
  69. torch_geometric/transforms/add_metapaths.py +8 -6
  70. torch_geometric/transforms/base_transform.py +2 -1
  71. torch_geometric/transforms/gdc.py +7 -8
  72. torch_geometric/transforms/largest_connected_components.py +1 -1
  73. torch_geometric/transforms/normalize_features.py +3 -3
  74. torch_geometric/transforms/random_link_split.py +1 -1
  75. torch_geometric/transforms/remove_duplicated_edges.py +4 -2
  76. torch_geometric/typing.py +13 -9
  77. torch_geometric/utils/_scatter.py +8 -6
  78. torch_geometric/utils/_spmm.py +15 -12
  79. torch_geometric/utils/convert.py +2 -2
  80. torch_geometric/utils/embedding.py +5 -3
  81. torch_geometric/utils/geodesic.py +4 -3
  82. torch_geometric/utils/sparse.py +3 -2
  83. {pyg_nightly-2.7.0.dev20250606.dist-info → pyg_nightly-2.7.0.dev20250608.dist-info}/WHEEL +0 -0
  84. {pyg_nightly-2.7.0.dev20250606.dist-info → pyg_nightly-2.7.0.dev20250608.dist-info}/licenses/LICENSE +0 -0
@@ -1,5 +1,5 @@
1
1
  import copy
2
- from abc import ABC
2
+ from abc import ABC, abstractmethod
3
3
  from typing import Any
4
4
 
5
5
 
@@ -31,6 +31,7 @@ class BaseTransform(ABC):
31
31
  # Shallow-copy the data so that we prevent in-place data modification.
32
32
  return self.forward(copy.copy(data))
33
33
 
34
+ @abstractmethod
34
35
  def forward(self, data: Any) -> Any:
35
36
  pass
36
37
 
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Tuple
1
+ from typing import Any, Dict, Optional, Tuple
2
2
 
3
3
  import numpy as np
4
4
  import torch
@@ -78,18 +78,17 @@ class GDC(BaseTransform):
78
78
  self_loop_weight: float = 1.,
79
79
  normalization_in: str = 'sym',
80
80
  normalization_out: str = 'col',
81
- diffusion_kwargs: Dict[str, Any] = dict(method='ppr', alpha=0.15),
82
- sparsification_kwargs: Dict[str, Any] = dict(
83
- method='threshold',
84
- avg_degree=64,
85
- ),
81
+ diffusion_kwargs: Optional[Dict[str, Any]] = None,
82
+ sparsification_kwargs: Optional[Dict[str, Any]] = None,
86
83
  exact: bool = True,
87
84
  ) -> None:
88
85
  self.self_loop_weight = self_loop_weight
89
86
  self.normalization_in = normalization_in
90
87
  self.normalization_out = normalization_out
91
- self.diffusion_kwargs = diffusion_kwargs
92
- self.sparsification_kwargs = sparsification_kwargs
88
+ self.diffusion_kwargs = diffusion_kwargs or dict(
89
+ method='ppr', alpha=0.15)
90
+ self.sparsification_kwargs = sparsification_kwargs or dict(
91
+ method='threshold', avg_degree=64)
93
92
  self.exact = exact
94
93
 
95
94
  if self_loop_weight:
@@ -47,7 +47,7 @@ class LargestConnectedComponents(BaseTransform):
47
47
  return data
48
48
 
49
49
  _, count = np.unique(component, return_counts=True)
50
- subset_np = np.in1d(component, count.argsort()[-self.num_components:])
50
+ subset_np = np.isin(component, count.argsort()[-self.num_components:])
51
51
  subset = torch.from_numpy(subset_np)
52
52
  subset = subset.to(data.edge_index.device, torch.bool)
53
53
 
@@ -1,4 +1,4 @@
1
- from typing import List, Union
1
+ from typing import List, Optional, Union
2
2
 
3
3
  from torch_geometric.data import Data, HeteroData
4
4
  from torch_geometric.data.datapipes import functional_transform
@@ -14,8 +14,8 @@ class NormalizeFeatures(BaseTransform):
14
14
  attrs (List[str]): The names of attributes to normalize.
15
15
  (default: :obj:`["x"]`)
16
16
  """
17
- def __init__(self, attrs: List[str] = ["x"]):
18
- self.attrs = attrs
17
+ def __init__(self, attrs: Optional[List[str]] = None) -> None:
18
+ self.attrs = attrs or ["x"]
19
19
 
20
20
  def forward(
21
21
  self,
@@ -245,7 +245,7 @@ class RandomLinkSplit(BaseTransform):
245
245
  warnings.warn(
246
246
  f"There are not enough negative edges to satisfy "
247
247
  "the provided sampling ratio. The ratio will be "
248
- f"adjusted to {ratio:.2f}.")
248
+ f"adjusted to {ratio:.2f}.", stacklevel=2)
249
249
  num_neg_train = int((num_neg_train / num_neg) * num_neg_found)
250
250
  num_neg_val = int((num_neg_val / num_neg) * num_neg_found)
251
251
  num_neg_test = num_neg_found - num_neg_train - num_neg_val
@@ -1,4 +1,4 @@
1
- from typing import List, Union
1
+ from typing import List, Optional, Union
2
2
 
3
3
  from torch_geometric.data import Data, HeteroData
4
4
  from torch_geometric.data.datapipes import functional_transform
@@ -22,9 +22,11 @@ class RemoveDuplicatedEdges(BaseTransform):
22
22
  """
23
23
  def __init__(
24
24
  self,
25
- key: Union[str, List[str]] = ['edge_attr', 'edge_weight'],
25
+ key: Optional[Union[str, List[str]]] = None,
26
26
  reduce: str = "add",
27
27
  ) -> None:
28
+ key = key or ['edge_attr', 'edge_weight']
29
+
28
30
  if isinstance(key, str):
29
31
  key = [key]
30
32
 
torch_geometric/typing.py CHANGED
@@ -81,8 +81,9 @@ try:
81
81
  WITH_CUDA_HASH_MAP = False
82
82
  except Exception as e:
83
83
  if not isinstance(e, ImportError): # pragma: no cover
84
- warnings.warn(f"An issue occurred while importing 'pyg-lib'. "
85
- f"Disabling its usage. Stacktrace: {e}")
84
+ warnings.warn(
85
+ f"An issue occurred while importing 'pyg-lib'. "
86
+ f"Disabling its usage. Stacktrace: {e}", stacklevel=2)
86
87
  pyg_lib = object
87
88
  WITH_PYG_LIB = False
88
89
  WITH_GMM = False
@@ -125,8 +126,9 @@ try:
125
126
  WITH_TORCH_SCATTER = True
126
127
  except Exception as e:
127
128
  if not isinstance(e, ImportError): # pragma: no cover
128
- warnings.warn(f"An issue occurred while importing 'torch-scatter'. "
129
- f"Disabling its usage. Stacktrace: {e}")
129
+ warnings.warn(
130
+ f"An issue occurred while importing 'torch-scatter'. "
131
+ f"Disabling its usage. Stacktrace: {e}", stacklevel=2)
130
132
  torch_scatter = object
131
133
  WITH_TORCH_SCATTER = False
132
134
 
@@ -136,8 +138,9 @@ try:
136
138
  WITH_TORCH_CLUSTER_BATCH_SIZE = 'batch_size' in torch_cluster.knn.__doc__
137
139
  except Exception as e:
138
140
  if not isinstance(e, ImportError): # pragma: no cover
139
- warnings.warn(f"An issue occurred while importing 'torch-cluster'. "
140
- f"Disabling its usage. Stacktrace: {e}")
141
+ warnings.warn(
142
+ f"An issue occurred while importing 'torch-cluster'. "
143
+ f"Disabling its usage. Stacktrace: {e}", stacklevel=2)
141
144
  WITH_TORCH_CLUSTER = False
142
145
  WITH_TORCH_CLUSTER_BATCH_SIZE = False
143
146
 
@@ -154,7 +157,7 @@ except Exception as e:
154
157
  if not isinstance(e, ImportError): # pragma: no cover
155
158
  warnings.warn(
156
159
  f"An issue occurred while importing 'torch-spline-conv'. "
157
- f"Disabling its usage. Stacktrace: {e}")
160
+ f"Disabling its usage. Stacktrace: {e}", stacklevel=2)
158
161
  WITH_TORCH_SPLINE_CONV = False
159
162
 
160
163
  try:
@@ -163,8 +166,9 @@ try:
163
166
  WITH_TORCH_SPARSE = True
164
167
  except Exception as e:
165
168
  if not isinstance(e, ImportError): # pragma: no cover
166
- warnings.warn(f"An issue occurred while importing 'torch-sparse'. "
167
- f"Disabling its usage. Stacktrace: {e}")
169
+ warnings.warn(
170
+ f"An issue occurred while importing 'torch-sparse'. "
171
+ f"Disabling its usage. Stacktrace: {e}", stacklevel=2)
168
172
  WITH_TORCH_SPARSE = False
169
173
 
170
174
  class SparseStorage: # type: ignore
@@ -88,9 +88,10 @@ def scatter(
88
88
 
89
89
  if (src.is_cuda and src.requires_grad and not is_compiling()
90
90
  and not is_in_onnx_export()):
91
- warnings.warn(f"The usage of `scatter(reduce='{reduce}')` "
92
- f"can be accelerated via the 'torch-scatter'"
93
- f" package, but it was not found")
91
+ warnings.warn(
92
+ f"The usage of `scatter(reduce='{reduce}')` "
93
+ f"can be accelerated via the 'torch-scatter'"
94
+ f" package, but it was not found", stacklevel=2)
94
95
 
95
96
  index = broadcast(index, src, dim)
96
97
  if not is_in_onnx_export():
@@ -120,9 +121,10 @@ def scatter(
120
121
  or not src.is_cuda):
121
122
 
122
123
  if src.is_cuda and not is_compiling():
123
- warnings.warn(f"The usage of `scatter(reduce='{reduce}')` "
124
- f"can be accelerated via the 'torch-scatter'"
125
- f" package, but it was not found")
124
+ warnings.warn(
125
+ f"The usage of `scatter(reduce='{reduce}')` "
126
+ f"can be accelerated via the 'torch-scatter'"
127
+ f" package, but it was not found", stacklevel=2)
126
128
 
127
129
  index = broadcast(index, src, dim)
128
130
  # We initialize with `one` here to match `scatter_mul` output:
@@ -63,18 +63,20 @@ def spmm(
63
63
 
64
64
  # Always convert COO to CSR for more efficient processing:
65
65
  if src.layout == torch.sparse_coo:
66
- warnings.warn(f"Converting sparse tensor to CSR format for more "
67
- f"efficient processing. Consider converting your "
68
- f"sparse tensor to CSR format beforehand to avoid "
69
- f"repeated conversion (got '{src.layout}')")
66
+ warnings.warn(
67
+ f"Converting sparse tensor to CSR format for more "
68
+ f"efficient processing. Consider converting your "
69
+ f"sparse tensor to CSR format beforehand to avoid "
70
+ f"repeated conversion (got '{src.layout}')", stacklevel=2)
70
71
  src = src.to_sparse_csr()
71
72
 
72
73
  # Warn in case of CSC format without gradient computation:
73
74
  if src.layout == torch.sparse_csc and not other.requires_grad:
74
- warnings.warn(f"Converting sparse tensor to CSR format for more "
75
- f"efficient processing. Consider converting your "
76
- f"sparse tensor to CSR format beforehand to avoid "
77
- f"repeated conversion (got '{src.layout}')")
75
+ warnings.warn(
76
+ f"Converting sparse tensor to CSR format for more "
77
+ f"efficient processing. Consider converting your "
78
+ f"sparse tensor to CSR format beforehand to avoid "
79
+ f"repeated conversion (got '{src.layout}')", stacklevel=2)
78
80
 
79
81
  # Use the default code path for `sum` reduction (works on CPU/GPU):
80
82
  if reduce == 'sum':
@@ -99,10 +101,11 @@ def spmm(
99
101
  # TODO The `torch.sparse.mm` code path with the `reduce` argument does
100
102
  # not yet support CSC :(
101
103
  if src.layout == torch.sparse_csc:
102
- warnings.warn(f"Converting sparse tensor to CSR format for more "
103
- f"efficient processing. Consider converting your "
104
- f"sparse tensor to CSR format beforehand to avoid "
105
- f"repeated conversion (got '{src.layout}')")
104
+ warnings.warn(
105
+ f"Converting sparse tensor to CSR format for more "
106
+ f"efficient processing. Consider converting your "
107
+ f"sparse tensor to CSR format beforehand to avoid "
108
+ f"repeated conversion (got '{src.layout}')", stacklevel=2)
106
109
  src = src.to_sparse_csr()
107
110
 
108
111
  return torch.sparse.mm(src, other, reduce)
@@ -251,13 +251,13 @@ def from_networkx(
251
251
  if group_edge_attrs is not None and not isinstance(group_edge_attrs, list):
252
252
  group_edge_attrs = edge_attrs
253
253
 
254
- for i, (_, feat_dict) in enumerate(G.nodes(data=True)):
254
+ for _, feat_dict in G.nodes(data=True):
255
255
  if set(feat_dict.keys()) != set(node_attrs):
256
256
  raise ValueError('Not all nodes contain the same attributes')
257
257
  for key, value in feat_dict.items():
258
258
  data_dict[str(key)].append(value)
259
259
 
260
- for i, (_, _, feat_dict) in enumerate(G.edges(data=True)):
260
+ for _, _, feat_dict in G.edges(data=True):
261
261
  if set(feat_dict.keys()) != set(edge_attrs):
262
262
  raise ValueError('Not all edges contain the same attributes')
263
263
  for key, value in feat_dict.items():
@@ -42,7 +42,8 @@ def get_embeddings(
42
42
  hook_handles.append(module.register_forward_hook(hook))
43
43
 
44
44
  if len(hook_handles) == 0:
45
- warnings.warn("The 'model' does not have any 'MessagePassing' layers")
45
+ warnings.warn("The 'model' does not have any 'MessagePassing' layers",
46
+ stacklevel=2)
46
47
 
47
48
  training = model.training
48
49
  model.eval()
@@ -123,8 +124,9 @@ def get_embeddings_hetero(
123
124
  hook_handles.append(module.register_forward_hook(hook))
124
125
 
125
126
  if len(hook_handles) == 0:
126
- warnings.warn("The 'model' does not have any heterogenous "
127
- "'MessagePassing' layers")
127
+ warnings.warn(
128
+ "The 'model' does not have any heterogenous "
129
+ "'MessagePassing' layers", stacklevel=2)
128
130
 
129
131
  # Run the model forward pass
130
132
  training = model.training
@@ -66,9 +66,10 @@ def geodesic_distance( # noqa: D417
66
66
 
67
67
  if 'dest' in kwargs:
68
68
  dst = kwargs['dest']
69
- warnings.warn("'dest' attribute in 'geodesic_distance' is deprecated "
70
- "and will be removed in a future release. Use the 'dst' "
71
- "argument instead.")
69
+ warnings.warn(
70
+ "'dest' attribute in 'geodesic_distance' is deprecated "
71
+ "and will be removed in a future release. Use the 'dst' "
72
+ "argument instead.", stacklevel=2)
72
73
 
73
74
  max_distance = float('inf') if max_distance is None else max_distance
74
75
 
@@ -70,8 +70,9 @@ def dense_to_sparse(
70
70
  f"three-dimensional (got {adj.dim()} dimensions)")
71
71
 
72
72
  if mask is not None and adj.dim() == 2:
73
- warnings.warn("Mask should not be provided in case the dense "
74
- "adjacency matrix is two-dimensional")
73
+ warnings.warn(
74
+ "Mask should not be provided in case the dense "
75
+ "adjacency matrix is two-dimensional", stacklevel=2)
75
76
  mask = None
76
77
 
77
78
  if mask is not None and mask.dim() != 2: