pyg-nightly 2.7.0.dev20250607__py3-none-any.whl → 2.7.0.dev20250608__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyg_nightly-2.7.0.dev20250607.dist-info → pyg_nightly-2.7.0.dev20250608.dist-info}/METADATA +3 -2
- {pyg_nightly-2.7.0.dev20250607.dist-info → pyg_nightly-2.7.0.dev20250608.dist-info}/RECORD +79 -79
- torch_geometric/__init__.py +5 -4
- torch_geometric/_compile.py +3 -2
- torch_geometric/contrib/__init__.py +1 -1
- torch_geometric/data/data.py +3 -3
- torch_geometric/data/database.py +4 -0
- torch_geometric/data/dataset.py +9 -6
- torch_geometric/data/hetero_data.py +7 -6
- torch_geometric/data/hypergraph_data.py +1 -1
- torch_geometric/data/in_memory_dataset.py +2 -2
- torch_geometric/data/large_graph_indexer.py +1 -1
- torch_geometric/data/lightning/datamodule.py +28 -20
- torch_geometric/data/storage.py +1 -1
- torch_geometric/datasets/dbp15k.py +1 -1
- torch_geometric/datasets/molecule_net.py +3 -2
- torch_geometric/datasets/tag_dataset.py +1 -1
- torch_geometric/datasets/wikics.py +2 -1
- torch_geometric/deprecation.py +1 -1
- torch_geometric/distributed/rpc.py +2 -2
- torch_geometric/explain/algorithm/captum_explainer.py +2 -1
- torch_geometric/explain/algorithm/graphmask_explainer.py +7 -7
- torch_geometric/explain/explainer.py +1 -1
- torch_geometric/graphgym/config.py +3 -2
- torch_geometric/graphgym/imports.py +4 -2
- torch_geometric/graphgym/logger.py +1 -1
- torch_geometric/graphgym/models/encoder.py +2 -2
- torch_geometric/hash_tensor.py +5 -4
- torch_geometric/io/fs.py +5 -4
- torch_geometric/loader/ibmb_loader.py +4 -4
- torch_geometric/loader/mixin.py +2 -1
- torch_geometric/loader/prefetch.py +3 -2
- torch_geometric/nn/aggr/fused.py +1 -1
- torch_geometric/nn/conv/appnp.py +1 -1
- torch_geometric/nn/conv/gen_conv.py +1 -1
- torch_geometric/nn/conv/gravnet_conv.py +2 -1
- torch_geometric/nn/conv/hetero_conv.py +2 -1
- torch_geometric/nn/conv/meshcnn_conv.py +6 -4
- torch_geometric/nn/conv/message_passing.py +3 -2
- torch_geometric/nn/conv/sg_conv.py +1 -1
- torch_geometric/nn/conv/spline_conv.py +2 -1
- torch_geometric/nn/conv/ssg_conv.py +1 -1
- torch_geometric/nn/data_parallel.py +5 -4
- torch_geometric/nn/fx.py +7 -5
- torch_geometric/nn/models/attentive_fp.py +1 -1
- torch_geometric/nn/models/deep_graph_infomax.py +1 -1
- torch_geometric/nn/models/glem.py +20 -12
- torch_geometric/nn/models/gpse.py +2 -2
- torch_geometric/nn/models/graph_unet.py +1 -1
- torch_geometric/nn/models/metapath2vec.py +1 -1
- torch_geometric/nn/models/mlp.py +4 -2
- torch_geometric/nn/models/node2vec.py +1 -1
- torch_geometric/nn/models/rev_gnn.py +1 -1
- torch_geometric/nn/models/signed_gcn.py +1 -1
- torch_geometric/nn/nlp/llm.py +2 -1
- torch_geometric/nn/pool/__init__.py +8 -4
- torch_geometric/nn/pool/knn.py +13 -10
- torch_geometric/nn/to_hetero_module.py +4 -3
- torch_geometric/nn/to_hetero_transformer.py +3 -3
- torch_geometric/nn/to_hetero_with_bases_transformer.py +3 -3
- torch_geometric/sampler/base.py +7 -4
- torch_geometric/sampler/hgt_sampler.py +11 -1
- torch_geometric/sampler/neighbor_sampler.py +10 -8
- torch_geometric/testing/decorators.py +3 -2
- torch_geometric/testing/distributed.py +1 -1
- torch_geometric/transforms/add_gpse.py +11 -2
- torch_geometric/transforms/add_metapaths.py +8 -6
- torch_geometric/transforms/base_transform.py +2 -1
- torch_geometric/transforms/largest_connected_components.py +1 -1
- torch_geometric/transforms/random_link_split.py +1 -1
- torch_geometric/typing.py +13 -9
- torch_geometric/utils/_scatter.py +8 -6
- torch_geometric/utils/_spmm.py +15 -12
- torch_geometric/utils/convert.py +2 -2
- torch_geometric/utils/embedding.py +5 -3
- torch_geometric/utils/geodesic.py +4 -3
- torch_geometric/utils/sparse.py +3 -2
- {pyg_nightly-2.7.0.dev20250607.dist-info → pyg_nightly-2.7.0.dev20250608.dist-info}/WHEEL +0 -0
- {pyg_nightly-2.7.0.dev20250607.dist-info → pyg_nightly-2.7.0.dev20250608.dist-info}/licenses/LICENSE +0 -0
@@ -585,12 +585,13 @@ class HeteroData(BaseData, FeatureStore, GraphStore):
|
|
585
585
|
global _DISPLAYED_TYPE_NAME_WARNING
|
586
586
|
if not _DISPLAYED_TYPE_NAME_WARNING and '__' in name:
|
587
587
|
_DISPLAYED_TYPE_NAME_WARNING = True
|
588
|
-
warnings.warn(
|
589
|
-
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
588
|
+
warnings.warn(
|
589
|
+
f"There exist type names in the "
|
590
|
+
f"'{self.__class__.__name__}' object that contain "
|
591
|
+
f"double underscores '__' (e.g., '{name}'). This "
|
592
|
+
f"may lead to unexpected behavior. To avoid any "
|
593
|
+
f"issues, ensure that your type names only contain "
|
594
|
+
f"single underscores.", stacklevel=2)
|
594
595
|
|
595
596
|
def get_node_store(self, key: NodeType) -> NodeStorage:
|
596
597
|
r"""Gets the :class:`~torch_geometric.data.storage.NodeStorage` object
|
@@ -297,7 +297,7 @@ class InMemoryDataset(Dataset):
|
|
297
297
|
self._data_list = None
|
298
298
|
msg += f' {msg4}'
|
299
299
|
|
300
|
-
warnings.warn(msg)
|
300
|
+
warnings.warn(msg, stacklevel=2)
|
301
301
|
|
302
302
|
return self._data
|
303
303
|
|
@@ -346,7 +346,7 @@ class InMemoryDataset(Dataset):
|
|
346
346
|
|
347
347
|
def nested_iter(node: Union[Mapping, Sequence]) -> Iterable:
|
348
348
|
if isinstance(node, Mapping):
|
349
|
-
for
|
349
|
+
for value in node.values():
|
350
350
|
yield from nested_iter(value)
|
351
351
|
elif isinstance(node, Sequence):
|
352
352
|
yield from enumerate(node)
|
@@ -141,7 +141,7 @@ class LargeGraphIndexer:
|
|
141
141
|
self.edge_attr[default_key] = list()
|
142
142
|
self.edge_attr[EDGE_PID] = edges
|
143
143
|
|
144
|
-
for
|
144
|
+
for tup in edges:
|
145
145
|
h, r, t = tup
|
146
146
|
self.edge_attr[EDGE_HEAD].append(h)
|
147
147
|
self.edge_attr[EDGE_RELATION].append(r)
|
@@ -40,9 +40,11 @@ class LightningDataModule(PLLightningDataModule):
|
|
40
40
|
kwargs.get('num_workers', 0) > 0)
|
41
41
|
|
42
42
|
if 'shuffle' in kwargs:
|
43
|
-
warnings.warn(
|
44
|
-
|
45
|
-
|
43
|
+
warnings.warn(
|
44
|
+
f"The 'shuffle={kwargs['shuffle']}' option is "
|
45
|
+
f"ignored in '{self.__class__.__name__}'. Remove it "
|
46
|
+
f"from the argument list to disable this warning",
|
47
|
+
stacklevel=2)
|
46
48
|
del kwargs['shuffle']
|
47
49
|
|
48
50
|
self.kwargs = kwargs
|
@@ -74,34 +76,39 @@ class LightningData(LightningDataModule):
|
|
74
76
|
raise ValueError(f"Undefined 'loader' option (got '{loader}')")
|
75
77
|
|
76
78
|
if loader == 'full' and kwargs['batch_size'] != 1:
|
77
|
-
warnings.warn(
|
78
|
-
|
79
|
-
|
79
|
+
warnings.warn(
|
80
|
+
f"Re-setting 'batch_size' to 1 in "
|
81
|
+
f"'{self.__class__.__name__}' for loader='full' "
|
82
|
+
f"(got '{kwargs['batch_size']}')", stacklevel=2)
|
80
83
|
kwargs['batch_size'] = 1
|
81
84
|
|
82
85
|
if loader == 'full' and kwargs['num_workers'] != 0:
|
83
|
-
warnings.warn(
|
84
|
-
|
85
|
-
|
86
|
+
warnings.warn(
|
87
|
+
f"Re-setting 'num_workers' to 0 in "
|
88
|
+
f"'{self.__class__.__name__}' for loader='full' "
|
89
|
+
f"(got '{kwargs['num_workers']}')", stacklevel=2)
|
86
90
|
kwargs['num_workers'] = 0
|
87
91
|
|
88
92
|
if loader == 'full' and kwargs.get('sampler') is not None:
|
89
|
-
warnings.warn(
|
90
|
-
|
93
|
+
warnings.warn(
|
94
|
+
"'sampler' option is not supported for "
|
95
|
+
"loader='full'", stacklevel=2)
|
91
96
|
kwargs.pop('sampler', None)
|
92
97
|
|
93
98
|
if loader == 'full' and kwargs.get('batch_sampler') is not None:
|
94
|
-
warnings.warn(
|
95
|
-
|
99
|
+
warnings.warn(
|
100
|
+
"'batch_sampler' option is not supported for "
|
101
|
+
"loader='full'", stacklevel=2)
|
96
102
|
kwargs.pop('batch_sampler', None)
|
97
103
|
|
98
104
|
super().__init__(has_val, has_test, **kwargs)
|
99
105
|
|
100
106
|
if loader == 'full':
|
101
107
|
if kwargs.get('pin_memory', False):
|
102
|
-
warnings.warn(
|
103
|
-
|
104
|
-
|
108
|
+
warnings.warn(
|
109
|
+
f"Re-setting 'pin_memory' to 'False' in "
|
110
|
+
f"'{self.__class__.__name__}' for loader='full' "
|
111
|
+
f"(got 'True')", stacklevel=2)
|
105
112
|
self.kwargs['pin_memory'] = False
|
106
113
|
|
107
114
|
self.data = data
|
@@ -127,10 +134,11 @@ class LightningData(LightningDataModule):
|
|
127
134
|
graph_sampler.__class__,
|
128
135
|
)
|
129
136
|
if len(sampler_kwargs) > 0:
|
130
|
-
warnings.warn(
|
131
|
-
|
132
|
-
|
133
|
-
|
137
|
+
warnings.warn(
|
138
|
+
f"Ignoring the arguments "
|
139
|
+
f"{list(sampler_kwargs.keys())} in "
|
140
|
+
f"'{self.__class__.__name__}' since a custom "
|
141
|
+
f"'graph_sampler' was passed", stacklevel=2)
|
134
142
|
self.graph_sampler = graph_sampler
|
135
143
|
|
136
144
|
else:
|
torch_geometric/data/storage.py
CHANGED
@@ -454,7 +454,7 @@ class NodeStorage(BaseStorage):
|
|
454
454
|
f"'{set(self.keys())}'. Please explicitly set 'num_nodes' as an "
|
455
455
|
f"attribute of " +
|
456
456
|
("'data'" if self._key is None else f"'data[{self._key}]'") +
|
457
|
-
" to suppress this warning")
|
457
|
+
" to suppress this warning", stacklevel=2)
|
458
458
|
if 'edge_index' in self and isinstance(self.edge_index, Tensor):
|
459
459
|
if self.edge_index.numel() > 0:
|
460
460
|
return int(self.edge_index.max()) + 1
|
@@ -73,7 +73,7 @@ class DBP15K(InMemoryDataset):
|
|
73
73
|
def process(self) -> None:
|
74
74
|
embs = {}
|
75
75
|
with open(osp.join(self.raw_dir, 'sub.glove.300d')) as f:
|
76
|
-
for
|
76
|
+
for line in f:
|
77
77
|
info = line.strip().split(' ')
|
78
78
|
if len(info) > 300:
|
79
79
|
embs[info[0]] = torch.tensor([float(x) for x in info[1:]])
|
@@ -210,8 +210,9 @@ class MoleculeNet(InMemoryDataset):
|
|
210
210
|
data.y = y
|
211
211
|
|
212
212
|
if data.num_nodes == 0:
|
213
|
-
warnings.warn(
|
214
|
-
|
213
|
+
warnings.warn(
|
214
|
+
f"Skipping molecule '{smiles}' since it "
|
215
|
+
f"resulted in zero atoms", stacklevel=2)
|
215
216
|
continue
|
216
217
|
|
217
218
|
if self.pre_filter is not None and not self.pre_filter(data):
|
@@ -128,7 +128,7 @@ class TAGDataset(InMemoryDataset):
|
|
128
128
|
@property
|
129
129
|
def raw_file_names(self) -> List[str]:
|
130
130
|
file_names = []
|
131
|
-
for
|
131
|
+
for _, _, files in os.walk(osp.join(self.root, 'raw')):
|
132
132
|
for file in files:
|
133
133
|
file_names.append(file)
|
134
134
|
return file_names
|
@@ -45,7 +45,8 @@ class WikiCS(InMemoryDataset):
|
|
45
45
|
warnings.warn(
|
46
46
|
f"The {self.__class__.__name__} dataset now returns an "
|
47
47
|
f"undirected graph by default. Please explicitly specify "
|
48
|
-
f"'is_undirected=False' to restore the old behavior."
|
48
|
+
f"'is_undirected=False' to restore the old behavior.",
|
49
|
+
stacklevel=2)
|
49
50
|
is_undirected = True
|
50
51
|
self.is_undirected = is_undirected
|
51
52
|
super().__init__(root, transform, pre_transform,
|
torch_geometric/deprecation.py
CHANGED
@@ -92,7 +92,7 @@ def shutdown_rpc(id: str = None, graceful: bool = True,
|
|
92
92
|
class RPCRouter:
|
93
93
|
r"""A router to get the worker based on the partition ID."""
|
94
94
|
def __init__(self, partition_to_workers: List[List[str]]):
|
95
|
-
for
|
95
|
+
for rpc_worker_list in partition_to_workers:
|
96
96
|
if len(rpc_worker_list) == 0:
|
97
97
|
raise ValueError('No RPC worker is in worker list')
|
98
98
|
self.partition_to_workers = partition_to_workers
|
@@ -120,7 +120,7 @@ def rpc_partition_to_workers(
|
|
120
120
|
partition_to_workers = [[] for _ in range(num_partitions)]
|
121
121
|
gathered_results = global_all_gather(
|
122
122
|
(ctx.role, num_partitions, current_partition_idx))
|
123
|
-
for worker_name, (
|
123
|
+
for worker_name, (_, _, idx) in gathered_results.items():
|
124
124
|
partition_to_workers[idx].append(worker_name)
|
125
125
|
return partition_to_workers
|
126
126
|
|
@@ -73,7 +73,8 @@ class CaptumExplainer(ExplainerAlgorithm):
|
|
73
73
|
f"{self.attribution_method_class.__name__}")
|
74
74
|
|
75
75
|
if kwargs.get('internal_batch_size', 1) != 1:
|
76
|
-
warnings.warn("Overriding 'internal_batch_size' to 1"
|
76
|
+
warnings.warn("Overriding 'internal_batch_size' to 1",
|
77
|
+
stacklevel=2)
|
77
78
|
|
78
79
|
if 'internal_batch_size' in self._get_attribute_parameters():
|
79
80
|
kwargs['internal_batch_size'] = 1
|
@@ -202,25 +202,25 @@ class GraphMaskExplainer(ExplainerAlgorithm):
|
|
202
202
|
|
203
203
|
baselines, self.gates, full_biases = [], torch.nn.ModuleList(), []
|
204
204
|
|
205
|
-
for v_dim, m_dim,
|
205
|
+
for v_dim, m_dim, o_dim in zip(i_dim, j_dim, h_dim):
|
206
206
|
self.transform, self.layer_norm = [], []
|
207
207
|
input_dims = [v_dim, m_dim, v_dim]
|
208
208
|
for _, input_dim in enumerate(input_dims):
|
209
209
|
self.transform.append(
|
210
|
-
Linear(input_dim,
|
211
|
-
self.layer_norm.append(LayerNorm(
|
210
|
+
Linear(input_dim, o_dim, bias=False).to(device))
|
211
|
+
self.layer_norm.append(LayerNorm(o_dim).to(device))
|
212
212
|
|
213
213
|
self.transforms = torch.nn.ModuleList(self.transform)
|
214
214
|
self.layer_norms = torch.nn.ModuleList(self.layer_norm)
|
215
215
|
|
216
216
|
self.full_bias = Parameter(
|
217
|
-
torch.tensor(
|
217
|
+
torch.tensor(o_dim, dtype=torch.float, device=device))
|
218
218
|
full_biases.append(self.full_bias)
|
219
219
|
|
220
|
-
self.reset_parameters(input_dims,
|
220
|
+
self.reset_parameters(input_dims, o_dim)
|
221
221
|
|
222
222
|
self.non_linear = ReLU()
|
223
|
-
self.output_layer = Linear(
|
223
|
+
self.output_layer = Linear(o_dim, 1).to(device)
|
224
224
|
|
225
225
|
gate = [
|
226
226
|
self.transforms, self.layer_norms, self.non_linear,
|
@@ -385,7 +385,7 @@ class GraphMaskExplainer(ExplainerAlgorithm):
|
|
385
385
|
f'Train explainer for graph {index} with layer '
|
386
386
|
f'{layer}')
|
387
387
|
self._enable_layer(layer)
|
388
|
-
for
|
388
|
+
for _ in range(self.epochs):
|
389
389
|
with torch.no_grad():
|
390
390
|
model(x, edge_index, **kwargs)
|
391
391
|
gates, total_penalty = [], 0
|
@@ -192,7 +192,7 @@ class Explainer:
|
|
192
192
|
if target is not None:
|
193
193
|
warnings.warn(
|
194
194
|
f"The 'target' should not be provided for the explanation "
|
195
|
-
f"type '{self.explanation_type.value}'")
|
195
|
+
f"type '{self.explanation_type.value}'", stacklevel=2)
|
196
196
|
prediction = self.get_prediction(x, edge_index, **kwargs)
|
197
197
|
target = self.get_target(prediction)
|
198
198
|
|
@@ -16,8 +16,9 @@ try: # Define global config object
|
|
16
16
|
cfg = CN()
|
17
17
|
except ImportError:
|
18
18
|
cfg = None
|
19
|
-
warnings.warn(
|
20
|
-
|
19
|
+
warnings.warn(
|
20
|
+
"Could not define global config object. Please install "
|
21
|
+
"'yacs' via 'pip install yacs' in order to use GraphGym", stacklevel=2)
|
21
22
|
|
22
23
|
|
23
24
|
def set_cfg(cfg):
|
@@ -11,5 +11,7 @@ except ImportError:
|
|
11
11
|
LightningModule = torch.nn.Module
|
12
12
|
Callback = object
|
13
13
|
|
14
|
-
warnings.warn(
|
15
|
-
|
14
|
+
warnings.warn(
|
15
|
+
"Please install 'pytorch_lightning' via "
|
16
|
+
"'pip install pytorch_lightning' in order to use GraphGym",
|
17
|
+
stacklevel=2)
|
@@ -239,7 +239,7 @@ def create_logger():
|
|
239
239
|
r"""Create logger for the experiment."""
|
240
240
|
loggers = []
|
241
241
|
names = ['train', 'val', 'test']
|
242
|
-
for i,
|
242
|
+
for i, _ in enumerate(range(cfg.share.num_splits)):
|
243
243
|
loggers.append(Logger(name=names[i], task_type=infer_task()))
|
244
244
|
return loggers
|
245
245
|
|
@@ -53,7 +53,7 @@ class AtomEncoder(torch.nn.Module):
|
|
53
53
|
|
54
54
|
self.atom_embedding_list = torch.nn.ModuleList()
|
55
55
|
|
56
|
-
for
|
56
|
+
for dim in get_atom_feature_dims():
|
57
57
|
emb = torch.nn.Embedding(dim, emb_dim)
|
58
58
|
torch.nn.init.xavier_uniform_(emb.weight.data)
|
59
59
|
self.atom_embedding_list.append(emb)
|
@@ -87,7 +87,7 @@ class BondEncoder(torch.nn.Module):
|
|
87
87
|
|
88
88
|
self.bond_embedding_list = torch.nn.ModuleList()
|
89
89
|
|
90
|
-
for
|
90
|
+
for dim in get_bond_feature_dims():
|
91
91
|
emb = torch.nn.Embedding(dim, emb_dim)
|
92
92
|
torch.nn.init.xavier_uniform_(emb.weight.data)
|
93
93
|
self.bond_embedding_list.append(emb)
|
torch_geometric/hash_tensor.py
CHANGED
@@ -69,10 +69,11 @@ def get_hash_map(key: Tensor) -> Union[CPUHashMap, CUDAHashMap]:
|
|
69
69
|
return CUDAHashMap(key, 0.5)
|
70
70
|
|
71
71
|
if key.is_cuda:
|
72
|
-
warnings.warn(
|
73
|
-
|
74
|
-
|
75
|
-
|
72
|
+
warnings.warn(
|
73
|
+
"Fallback to CPU-based mapping algorithm which may "
|
74
|
+
"cause slowdowns and device synchronization. Please "
|
75
|
+
"install 'pyg-lib' for an accelerated 'HashTensor' "
|
76
|
+
"implementation.", stacklevel=2)
|
76
77
|
|
77
78
|
if torch_geometric.typing.WITH_CPU_HASH_MAP:
|
78
79
|
return CPUHashMap(key.cpu(), -1)
|
torch_geometric/io/fs.py
CHANGED
@@ -226,11 +226,12 @@ def torch_load(path: str, map_location: Any = None) -> Any:
|
|
226
226
|
"compatible in your case.")
|
227
227
|
match = re.search(r'add_safe_globals\(.*?\)', error_msg)
|
228
228
|
if match is not None:
|
229
|
-
warnings.warn(
|
230
|
-
|
231
|
-
|
229
|
+
warnings.warn(
|
230
|
+
f"{warn_msg} Please use "
|
231
|
+
f"`torch.serialization.{match.group()}` to "
|
232
|
+
f"allowlist this global.", stacklevel=2)
|
232
233
|
else:
|
233
|
-
warnings.warn(warn_msg)
|
234
|
+
warnings.warn(warn_msg, stacklevel=2)
|
234
235
|
|
235
236
|
with fsspec.open(path, 'rb') as f:
|
236
237
|
return torch.load(f, map_location, weights_only=False)
|
@@ -148,7 +148,7 @@ def indices_complete_check(
|
|
148
148
|
if isinstance(aux, Tensor):
|
149
149
|
aux = aux.cpu().numpy()
|
150
150
|
|
151
|
-
assert np.all(np.
|
151
|
+
assert np.all(np.isin(out,
|
152
152
|
aux)), "Not all output nodes are in aux nodes!"
|
153
153
|
outs.append(out)
|
154
154
|
|
@@ -236,7 +236,7 @@ def create_batchwise_out_aux_pairs(
|
|
236
236
|
logits[tele_set, i] = 1. / len(tele_set)
|
237
237
|
|
238
238
|
new_logits = logits.clone()
|
239
|
-
for
|
239
|
+
for _ in range(num_iter):
|
240
240
|
new_logits = adj @ new_logits * (1 - alpha) + alpha * logits
|
241
241
|
|
242
242
|
inds = new_logits.argsort(0)
|
@@ -498,7 +498,7 @@ class IBMBBaseLoader(torch.utils.data.DataLoader):
|
|
498
498
|
assert adj is not None
|
499
499
|
|
500
500
|
for out, aux in pbar:
|
501
|
-
mask = torch.from_numpy(np.
|
501
|
+
mask = torch.from_numpy(np.isin(aux, out))
|
502
502
|
if isinstance(aux, np.ndarray):
|
503
503
|
aux = torch.from_numpy(aux)
|
504
504
|
subg = get_subgraph(aux, graph, return_edge_index_type, adj,
|
@@ -541,7 +541,7 @@ class IBMBBaseLoader(torch.utils.data.DataLoader):
|
|
541
541
|
out, aux = zip(*data_list)
|
542
542
|
out = np.concatenate(out)
|
543
543
|
aux = np.unique(np.concatenate(aux))
|
544
|
-
mask = torch.from_numpy(np.
|
544
|
+
mask = torch.from_numpy(np.isin(aux, out))
|
545
545
|
aux = torch.from_numpy(aux)
|
546
546
|
|
547
547
|
subg = get_subgraph(aux, self.graph, self.return_edge_index_type,
|
torch_geometric/loader/mixin.py
CHANGED
@@ -248,7 +248,8 @@ class AffinityMixin:
|
|
248
248
|
warnings.warn(
|
249
249
|
"Due to conflicting parallelization methods it is not advised "
|
250
250
|
"to use affinitization with 'HeteroData' datasets. "
|
251
|
-
"Use `enable_multithreading` for better performance."
|
251
|
+
"Use `enable_multithreading` for better performance.",
|
252
|
+
stacklevel=2)
|
252
253
|
|
253
254
|
self.loader_cores = loader_cores[:] if loader_cores else None
|
254
255
|
if self.loader_cores is None:
|
@@ -27,8 +27,9 @@ class DeviceHelper:
|
|
27
27
|
|
28
28
|
if ((self.device.type == 'cuda' and not with_cuda)
|
29
29
|
or (self.device.type == 'xpu' and not with_xpu)):
|
30
|
-
warnings.warn(
|
31
|
-
|
30
|
+
warnings.warn(
|
31
|
+
f"Requested device '{self.device.type}' is not "
|
32
|
+
f"available, falling back to CPU", stacklevel=2)
|
32
33
|
self.device = torch.device('cpu')
|
33
34
|
|
34
35
|
self.stream = None
|
torch_geometric/nn/aggr/fused.py
CHANGED
@@ -216,7 +216,7 @@ class FusedAggregation(Aggregation):
|
|
216
216
|
outs: List[Optional[Tensor]] = []
|
217
217
|
|
218
218
|
# Iterate over all reduction ops to compute first results:
|
219
|
-
for
|
219
|
+
for reduce in self.reduce_ops:
|
220
220
|
if reduce is None:
|
221
221
|
outs.append(None)
|
222
222
|
continue
|
torch_geometric/nn/conv/appnp.py
CHANGED
@@ -178,7 +178,7 @@ class GENConv(MessagePassing):
|
|
178
178
|
self.lin_dst = Linear(in_channels[1], out_channels, bias=bias)
|
179
179
|
|
180
180
|
channels = [out_channels]
|
181
|
-
for
|
181
|
+
for _ in range(num_layers - 1):
|
182
182
|
channels.append(out_channels * expansion)
|
183
183
|
channels.append(out_channels)
|
184
184
|
self.mlp = MLP(channels, norm=norm, bias=bias)
|
@@ -63,7 +63,8 @@ class GravNetConv(MessagePassing):
|
|
63
63
|
if num_workers is not None:
|
64
64
|
warnings.warn(
|
65
65
|
"'num_workers' attribute in '{self.__class__.__name__}' is "
|
66
|
-
"deprecated and will be removed in a future release"
|
66
|
+
"deprecated and will be removed in a future release",
|
67
|
+
stacklevel=2)
|
67
68
|
|
68
69
|
self.in_channels = in_channels
|
69
70
|
self.out_channels = out_channels
|
@@ -77,7 +77,8 @@ class HeteroConv(torch.nn.Module):
|
|
77
77
|
f"There exist node types ({src_node_types - dst_node_types}) "
|
78
78
|
f"whose representations do not get updated during message "
|
79
79
|
f"passing as they do not occur as destination type in any "
|
80
|
-
f"edge type. This may lead to unexpected behavior."
|
80
|
+
f"edge type. This may lead to unexpected behavior.",
|
81
|
+
stacklevel=2)
|
81
82
|
|
82
83
|
self.convs = ModuleDict(convs)
|
83
84
|
self.aggr = aggr
|
@@ -456,12 +456,13 @@ class MeshCNNConv(MessagePassing):
|
|
456
456
|
{type(network)}"
|
457
457
|
if not hasattr(network, "in_channels") and \
|
458
458
|
not hasattr(network, "in_features"):
|
459
|
-
warn(
|
459
|
+
warn(
|
460
|
+
f"kernel[{i}] does not have attribute \
|
460
461
|
'in_channels' nor 'out_features'. The \
|
461
462
|
network must take as input a \
|
462
463
|
{self.in_channels}-dimensional tensor. \
|
463
464
|
Still, assuming user configured \
|
464
|
-
correctly. Continuing..")
|
465
|
+
correctly. Continuing..", stacklevel=2)
|
465
466
|
else:
|
466
467
|
input_dimension = getattr(network, "in_channels",
|
467
468
|
network.in_features)
|
@@ -474,12 +475,13 @@ class MeshCNNConv(MessagePassing):
|
|
474
475
|
|
475
476
|
if not hasattr(network, "out_channels") and \
|
476
477
|
not hasattr(network, "out_features"):
|
477
|
-
warn(
|
478
|
+
warn(
|
479
|
+
f"kernel[{i}] does not have attribute \
|
478
480
|
'in_channels' nor 'out_features'. The \
|
479
481
|
network must take as input a \
|
480
482
|
{self.in_channels}-dimensional tensor. \
|
481
483
|
Still, assuming user configured \
|
482
|
-
correctly. Continuing..")
|
484
|
+
correctly. Continuing..", stacklevel=2)
|
483
485
|
else:
|
484
486
|
output_dimension = getattr(network, "out_channels",
|
485
487
|
network.out_features)
|
@@ -1029,6 +1029,7 @@ class MessagePassing(torch.nn.Module):
|
|
1029
1029
|
:meth:`jittable` is deprecated and a no-op from :pyg:`PyG` 2.5
|
1030
1030
|
onwards.
|
1031
1031
|
"""
|
1032
|
-
warnings.warn(
|
1033
|
-
|
1032
|
+
warnings.warn(
|
1033
|
+
f"'{self.__class__.__name__}.jittable' is deprecated "
|
1034
|
+
f"and a no-op. Please remove its usage.", stacklevel=2)
|
1034
1035
|
return self
|
@@ -90,7 +90,7 @@ class SGConv(MessagePassing):
|
|
90
90
|
edge_index, edge_weight, x.size(self.node_dim), False,
|
91
91
|
self.add_self_loops, self.flow, dtype=x.dtype)
|
92
92
|
|
93
|
-
for
|
93
|
+
for _ in range(self.K):
|
94
94
|
# propagate_type: (x: Tensor, edge_weight: OptTensor)
|
95
95
|
x = self.propagate(edge_index, x=x, edge_weight=edge_weight)
|
96
96
|
if self.cached:
|
@@ -132,7 +132,8 @@ class SplineConv(MessagePassing):
|
|
132
132
|
if not x[0].is_cuda:
|
133
133
|
warnings.warn(
|
134
134
|
'We do not recommend using the non-optimized CPU version of '
|
135
|
-
'`SplineConv`. If possible, please move your data to GPU.'
|
135
|
+
'`SplineConv`. If possible, please move your data to GPU.',
|
136
|
+
stacklevel=2)
|
136
137
|
|
137
138
|
# propagate_type: (x: OptPairTensor, edge_attr: OptTensor)
|
138
139
|
out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=size)
|
@@ -100,7 +100,7 @@ class SSGConv(MessagePassing):
|
|
100
100
|
self.add_self_loops, self.flow, dtype=x.dtype)
|
101
101
|
|
102
102
|
h = x * self.alpha
|
103
|
-
for
|
103
|
+
for _ in range(self.K):
|
104
104
|
# propagate_type: (x: Tensor, edge_weight: OptTensor)
|
105
105
|
x = self.propagate(edge_index, x=x, edge_weight=edge_weight)
|
106
106
|
h = h + (1 - self.alpha) / self.K * x
|
@@ -57,10 +57,11 @@ class DataParallel(torch.nn.DataParallel):
|
|
57
57
|
follow_batch=None, exclude_keys=None):
|
58
58
|
super().__init__(module, device_ids, output_device)
|
59
59
|
|
60
|
-
warnings.warn(
|
61
|
-
|
62
|
-
|
63
|
-
|
60
|
+
warnings.warn(
|
61
|
+
"'DataParallel' is usually much slower than "
|
62
|
+
"'DistributedDataParallel' even on a single machine. "
|
63
|
+
"Please consider switching to 'DistributedDataParallel' "
|
64
|
+
"for multi-GPU training.", stacklevel=2)
|
64
65
|
|
65
66
|
self.src_device = torch.device(f'cuda:{self.device_ids[0]}')
|
66
67
|
self.follow_batch = follow_batch or []
|
torch_geometric/nn/fx.py
CHANGED
@@ -130,11 +130,13 @@ class Transformer:
|
|
130
130
|
# (node-level, edge-level) by filling `self._state`:
|
131
131
|
for node in list(self.graph.nodes):
|
132
132
|
if node.op == 'call_function' and 'training' in node.kwargs:
|
133
|
-
warnings.warn(
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
133
|
+
warnings.warn(
|
134
|
+
f"Found function '{node.name}' with keyword "
|
135
|
+
f"argument 'training'. During FX tracing, this "
|
136
|
+
f"will likely be baked in as a constant value. "
|
137
|
+
f"Consider replacing this function by a module "
|
138
|
+
f"to properly encapsulate its training flag.",
|
139
|
+
stacklevel=2)
|
138
140
|
|
139
141
|
if node.op == 'placeholder':
|
140
142
|
if node.name not in self._state:
|
@@ -160,7 +160,7 @@ class AttentiveFP(torch.nn.Module):
|
|
160
160
|
edge_index = torch.stack([row, batch], dim=0)
|
161
161
|
|
162
162
|
out = global_add_pool(x, batch).relu_()
|
163
|
-
for
|
163
|
+
for _ in range(self.num_timesteps):
|
164
164
|
h = F.elu_(self.mol_conv((x, out), edge_index))
|
165
165
|
h = F.dropout(h, p=self.dropout, training=self.training)
|
166
166
|
out = self.mol_gru(h, out).relu_()
|