pyg-nightly 2.7.0.dev20250217__py3-none-any.whl → 2.7.0.dev20250219__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- pyg_nightly-2.7.0.dev20250219.dist-info/LICENSE +19 -0
- {pyg_nightly-2.7.0.dev20250217.dist-info → pyg_nightly-2.7.0.dev20250219.dist-info}/METADATA +1 -1
- {pyg_nightly-2.7.0.dev20250217.dist-info → pyg_nightly-2.7.0.dev20250219.dist-info}/RECORD +6 -5
- torch_geometric/__init__.py +1 -1
- torch_geometric/hash_tensor.py +165 -39
- {pyg_nightly-2.7.0.dev20250217.dist-info → pyg_nightly-2.7.0.dev20250219.dist-info}/WHEEL +0 -0
@@ -0,0 +1,19 @@
|
|
1
|
+
Copyright (c) 2023 PyG Team <team@pyg.org>
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
of this software and associated documentation files (the "Software"), to deal
|
5
|
+
in the Software without restriction, including without limitation the rights
|
6
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7
|
+
copies of the Software, and to permit persons to whom the Software is
|
8
|
+
furnished to do so, subject to the following conditions:
|
9
|
+
|
10
|
+
The above copyright notice and this permission notice shall be included in
|
11
|
+
all copies or substantial portions of the Software.
|
12
|
+
|
13
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
18
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
19
|
+
THE SOFTWARE.
|
{pyg_nightly-2.7.0.dev20250217.dist-info → pyg_nightly-2.7.0.dev20250219.dist-info}/METADATA
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: pyg-nightly
|
3
|
-
Version: 2.7.0.
|
3
|
+
Version: 2.7.0.dev20250219
|
4
4
|
Summary: Graph Neural Network Library for PyTorch
|
5
5
|
Keywords: deep-learning,pytorch,geometric-deep-learning,graph-neural-networks,graph-convolutional-networks
|
6
6
|
Author-email: Matthias Fey <matthias@pyg.org>
|
@@ -1,4 +1,4 @@
|
|
1
|
-
torch_geometric/__init__.py,sha256=
|
1
|
+
torch_geometric/__init__.py,sha256=P-W8gnhQltNcKfeLyPOC4diB9zVIHQ5yi0IXFmjYzd4,1978
|
2
2
|
torch_geometric/_compile.py,sha256=f-WQeH4VLi5Hn9lrgztFUCSrN_FImjhQa6BxFzcYC38,1338
|
3
3
|
torch_geometric/_onnx.py,sha256=V9ffrIKSqhDw6xUZ12lkuSfNs48cQp2EeJ6Z19GfnVw,349
|
4
4
|
torch_geometric/backend.py,sha256=lVaf7aLoVaB3M-UcByUJ1G4T4FOK6LXAg0CF4W3E8jo,1575
|
@@ -9,7 +9,7 @@ torch_geometric/deprecation.py,sha256=dWRymDIUkUVI2MeEmBG5WF4R6jObZeseSBV9G6FNfj
|
|
9
9
|
torch_geometric/device.py,sha256=tU5-_lBNVbVHl_kUmWPwiG5mQ1pyapwMF4JkmtNN3MM,1224
|
10
10
|
torch_geometric/edge_index.py,sha256=BsLh5tOZRjjSYDkjqOFAdBuvMaDg7EWaaLELYsUL0Z8,70048
|
11
11
|
torch_geometric/experimental.py,sha256=JbtNNEXjFGI8hZ9raM6-qrZURP6Z5nlDK8QicZUIbz0,4756
|
12
|
-
torch_geometric/hash_tensor.py,sha256=
|
12
|
+
torch_geometric/hash_tensor.py,sha256=T4fRmS6TD-j7PKa9LMralSbI6naqA4ctW8AnjWsREzw,9615
|
13
13
|
torch_geometric/home.py,sha256=EV54B4Dmiv61GDbkCwtCfWGWJ4eFGwZ8s3KOgGjwYgY,790
|
14
14
|
torch_geometric/index.py,sha256=9ChzWFCwj2slNcVBOgfV-wQn-KscJe_y7502w-Vf76w,24045
|
15
15
|
torch_geometric/inspector.py,sha256=nKi5o4Mn6xsG0Ex1GudTEQt_EqnF9mcMqGtp7Shh9sQ,19336
|
@@ -633,6 +633,7 @@ torch_geometric/utils/undirected.py,sha256=H_nfpI0_WluOG6VfjPyldvcjL4w5USAKWu2x5
|
|
633
633
|
torch_geometric/visualization/__init__.py,sha256=PyR_4K5SafsJrBr6qWrkjKr6GBL1b7FtZybyXCDEVwY,154
|
634
634
|
torch_geometric/visualization/graph.py,sha256=ZuLPL92yGRi7lxlqsUPwL_EVVXF7P2kMcveTtW79vpA,4784
|
635
635
|
torch_geometric/visualization/influence.py,sha256=CWMvuNA_Nf1sfbJmQgn58yS4OFpeKXeZPe7kEuvkUBw,477
|
636
|
-
pyg_nightly-2.7.0.
|
637
|
-
pyg_nightly-2.7.0.
|
638
|
-
pyg_nightly-2.7.0.
|
636
|
+
pyg_nightly-2.7.0.dev20250219.dist-info/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
|
637
|
+
pyg_nightly-2.7.0.dev20250219.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82
|
638
|
+
pyg_nightly-2.7.0.dev20250219.dist-info/METADATA,sha256=vvByDyHVHwRDOLKEnsUSA1Dh2obTtvRZvzGks5iU8XA,62999
|
639
|
+
pyg_nightly-2.7.0.dev20250219.dist-info/RECORD,,
|
torch_geometric/__init__.py
CHANGED
@@ -31,7 +31,7 @@ from .lazy_loader import LazyLoader
|
|
31
31
|
contrib = LazyLoader('contrib', globals(), 'torch_geometric.contrib')
|
32
32
|
graphgym = LazyLoader('graphgym', globals(), 'torch_geometric.graphgym')
|
33
33
|
|
34
|
-
__version__ = '2.7.0.
|
34
|
+
__version__ = '2.7.0.dev20250219'
|
35
35
|
|
36
36
|
__all__ = [
|
37
37
|
'Index',
|
torch_geometric/hash_tensor.py
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
import functools
|
2
|
+
import warnings
|
1
3
|
from typing import Any, Callable, Dict, Iterable, Optional, Tuple, Type, Union
|
2
4
|
|
3
5
|
import torch
|
@@ -8,9 +10,21 @@ from torch import Tensor
|
|
8
10
|
import torch_geometric.typing
|
9
11
|
from torch_geometric.typing import CPUHashMap, CUDAHashMap
|
10
12
|
|
13
|
+
aten = torch.ops.aten
|
14
|
+
|
11
15
|
HANDLED_FUNCTIONS: Dict[Callable, Callable] = {}
|
12
16
|
|
13
17
|
|
18
|
+
def implements(torch_function: Callable) -> Callable:
|
19
|
+
r"""Registers a :pytorch:`PyTorch` function override."""
|
20
|
+
@functools.wraps(torch_function)
|
21
|
+
def decorator(my_function: Callable) -> Callable:
|
22
|
+
HANDLED_FUNCTIONS[torch_function] = my_function
|
23
|
+
return my_function
|
24
|
+
|
25
|
+
return decorator
|
26
|
+
|
27
|
+
|
14
28
|
def as_key_tensor(
|
15
29
|
key: Any,
|
16
30
|
*,
|
@@ -20,15 +34,16 @@ def as_key_tensor(
|
|
20
34
|
key = torch.as_tensor(key, device=device)
|
21
35
|
except Exception:
|
22
36
|
device = device or torch.get_default_device()
|
37
|
+
# TODO Convert int64 to int32.
|
23
38
|
# On GPU, we default to int32 for faster 'CUDAHashMap' implementation:
|
24
|
-
if device.type == 'cuda':
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
39
|
+
if torch_geometric.typing.WITH_CUDA_HASH_MAP and device.type == 'cuda':
|
40
|
+
pass
|
41
|
+
# key = torch.tensor(
|
42
|
+
# [xxhash.xxh32(x).intdigest() & 0x7FFFFFFF for x in key],
|
43
|
+
# dtype=torch.int32, device=device)
|
44
|
+
key = torch.tensor(
|
45
|
+
[xxhash.xxh64(x).intdigest() & 0x7FFFFFFFFFFFFFFF for x in key],
|
46
|
+
dtype=torch.int64, device=device)
|
32
47
|
|
33
48
|
if key.element_size() == 1:
|
34
49
|
key = key.view(torch.uint8)
|
@@ -45,6 +60,28 @@ def as_key_tensor(
|
|
45
60
|
return key
|
46
61
|
|
47
62
|
|
63
|
+
def get_hash_map(key: Tensor) -> Union[CPUHashMap, CUDAHashMap]:
|
64
|
+
if torch_geometric.typing.WITH_CUDA_HASH_MAP and key.is_cuda:
|
65
|
+
# TODO Convert int64 to int32.
|
66
|
+
return CUDAHashMap(key, 0.5)
|
67
|
+
|
68
|
+
if key.is_cuda:
|
69
|
+
warnings.warn("Fallback to CPU-based mapping algorithm which may "
|
70
|
+
"cause slowdowns and device synchronization. Please "
|
71
|
+
"install 'pyg-lib' for an accelerated 'HashTensor' "
|
72
|
+
"implementation.")
|
73
|
+
|
74
|
+
if torch_geometric.typing.WITH_CPU_HASH_MAP:
|
75
|
+
return CPUHashMap(key.cpu(), -1)
|
76
|
+
|
77
|
+
import pandas as pd
|
78
|
+
|
79
|
+
return pd.CategoricalDtype(
|
80
|
+
categories=key.cpu().numpy(),
|
81
|
+
ordered=True,
|
82
|
+
)
|
83
|
+
|
84
|
+
|
48
85
|
class HashTensor(Tensor):
|
49
86
|
_map: Union[Tensor, CPUHashMap, CUDAHashMap]
|
50
87
|
_value: Optional[Tensor]
|
@@ -66,7 +103,6 @@ class HashTensor(Tensor):
|
|
66
103
|
device = value.device
|
67
104
|
|
68
105
|
key = as_key_tensor(key, device=device)
|
69
|
-
device = key.device
|
70
106
|
|
71
107
|
if key.dim() != 1:
|
72
108
|
raise ValueError(f"'key' data in '{cls.__name__}' needs to be "
|
@@ -88,6 +124,49 @@ class HashTensor(Tensor):
|
|
88
124
|
f"first dimension (got {key.size(0)} and "
|
89
125
|
f"{value.size(0)})")
|
90
126
|
|
127
|
+
min_key = key.min() if key.numel() > 0 else key.new_zeros(())
|
128
|
+
max_key = key.max() if key.numel() > 0 else key.new_zeros(())
|
129
|
+
|
130
|
+
_range = max_key - min_key
|
131
|
+
# TODO Expose fixed threshold as argument.
|
132
|
+
if (key.dtype in {torch.uint8, torch.int16} or _range <= 1_000_000
|
133
|
+
or _range <= 2 * key.numel()):
|
134
|
+
_map = torch.full(
|
135
|
+
size=(_range + 2, ),
|
136
|
+
fill_value=-1,
|
137
|
+
dtype=torch.int64,
|
138
|
+
device=device,
|
139
|
+
)
|
140
|
+
_map[(key - (min_key - 1)).long()] = torch.arange(
|
141
|
+
key.numel(),
|
142
|
+
dtype=_map.dtype,
|
143
|
+
device=_map.device,
|
144
|
+
)
|
145
|
+
else:
|
146
|
+
_map = get_hash_map(key)
|
147
|
+
|
148
|
+
return cls._from_data(
|
149
|
+
_map,
|
150
|
+
value,
|
151
|
+
min_key,
|
152
|
+
max_key,
|
153
|
+
num_keys=key.numel(),
|
154
|
+
dtype=dtype,
|
155
|
+
)
|
156
|
+
|
157
|
+
@classmethod
|
158
|
+
def _from_data(
|
159
|
+
cls,
|
160
|
+
_map: Union[Tensor, CPUHashMap, CUDAHashMap],
|
161
|
+
value: Optional[Tensor],
|
162
|
+
min_key: Tensor,
|
163
|
+
max_key: Tensor,
|
164
|
+
*,
|
165
|
+
num_keys: int,
|
166
|
+
dtype: Optional[torch.dtype],
|
167
|
+
) -> 'HashTensor':
|
168
|
+
|
169
|
+
if value is not None:
|
91
170
|
dtype = value.dtype
|
92
171
|
size = value.size()
|
93
172
|
stride = value.stride()
|
@@ -95,7 +174,7 @@ class HashTensor(Tensor):
|
|
95
174
|
requires_grad = value.requires_grad
|
96
175
|
else:
|
97
176
|
dtype = dtype or torch.int64
|
98
|
-
size = torch.Size([
|
177
|
+
size = torch.Size([num_keys])
|
99
178
|
stride = (1, )
|
100
179
|
layout = torch.strided
|
101
180
|
requires_grad = False
|
@@ -105,43 +184,21 @@ class HashTensor(Tensor):
|
|
105
184
|
size=size,
|
106
185
|
strides=stride,
|
107
186
|
dtype=dtype,
|
108
|
-
device=device,
|
187
|
+
device=min_key.device,
|
109
188
|
layout=layout,
|
110
189
|
requires_grad=requires_grad,
|
111
190
|
)
|
191
|
+
assert isinstance(out, HashTensor)
|
112
192
|
|
193
|
+
out._map = _map
|
113
194
|
out._value = value
|
114
|
-
|
115
|
-
out.
|
116
|
-
out._max_key = key.max() if key.numel() > 0 else key.new_zeros(())
|
117
|
-
|
118
|
-
_range = out._max_key - out._min_key
|
119
|
-
# TODO Expose fixed threshold as argument.
|
120
|
-
if (key.dtype in {torch.uint8, torch.int16} or _range <= 1_000_000
|
121
|
-
or _range <= 2 * key.numel()):
|
122
|
-
out._map = torch.full(
|
123
|
-
size=(_range + 2, ),
|
124
|
-
fill_value=-1,
|
125
|
-
dtype=torch.int64,
|
126
|
-
device=device,
|
127
|
-
)
|
128
|
-
out._map[(key - (out._min_key - 1)).long()] = torch.arange(
|
129
|
-
key.numel(),
|
130
|
-
dtype=out._map.dtype,
|
131
|
-
device=out._map.device,
|
132
|
-
)
|
133
|
-
elif torch_geometric.typing.WITH_CUDA_HASH_MAP and key.is_cuda:
|
134
|
-
# TODO Convert int64 to int32.
|
135
|
-
out._map = CUDAHashMap(key, 0.5)
|
136
|
-
elif torch_geometric.typing.WITH_CPU_HASH_MAP and key.is_cpu:
|
137
|
-
out._map = CPUHashMap(key, -1)
|
138
|
-
else:
|
139
|
-
# TODO Expose pandas fallback.
|
140
|
-
# warnings.warn()
|
141
|
-
raise NotImplementedError
|
195
|
+
out._min_key = min_key
|
196
|
+
out._max_key = max_key
|
142
197
|
|
143
198
|
return out
|
144
199
|
|
200
|
+
# Methods #################################################################
|
201
|
+
|
145
202
|
def as_tensor(self) -> Tensor:
|
146
203
|
r"""Zero-copies the :class:`HashTensor` representation back to a
|
147
204
|
:class:`torch.Tensor` representation.
|
@@ -150,6 +207,11 @@ class HashTensor(Tensor):
|
|
150
207
|
return self._value
|
151
208
|
return torch.arange(self.size(0), dtype=self.dtype, device=self.device)
|
152
209
|
|
210
|
+
# PyTorch/Python builtins #################################################
|
211
|
+
|
212
|
+
# Prevent auto-wrapping outputs back into the proper subclass type:
|
213
|
+
__torch_function__ = torch._C._disabled_torch_function_impl
|
214
|
+
|
153
215
|
@classmethod
|
154
216
|
def __torch_dispatch__(
|
155
217
|
cls: Type,
|
@@ -169,3 +231,67 @@ class HashTensor(Tensor):
|
|
169
231
|
kwargs = pytree.tree_map_only(HashTensor, lambda x: x.as_tensor(),
|
170
232
|
kwargs)
|
171
233
|
return func(*args, **(kwargs or {}))
|
234
|
+
|
235
|
+
|
236
|
+
@implements(aten._to_copy.default)
|
237
|
+
def _to_copy(
|
238
|
+
tensor: HashTensor,
|
239
|
+
*,
|
240
|
+
dtype: Optional[torch.dtype] = None,
|
241
|
+
layout: Optional[torch.layout] = None,
|
242
|
+
device: Optional[torch.device] = None,
|
243
|
+
pin_memory: bool = False,
|
244
|
+
non_blocking: bool = False,
|
245
|
+
memory_format: Optional[torch.memory_format] = None,
|
246
|
+
) -> HashTensor:
|
247
|
+
|
248
|
+
value = tensor._value
|
249
|
+
if value is not None:
|
250
|
+
value = aten._to_copy.default(
|
251
|
+
value,
|
252
|
+
dtype=dtype,
|
253
|
+
layout=layout,
|
254
|
+
device=device,
|
255
|
+
pin_memory=pin_memory,
|
256
|
+
non_blocking=non_blocking,
|
257
|
+
memory_format=memory_format,
|
258
|
+
)
|
259
|
+
|
260
|
+
min_key = aten._to_copy.default(tensor._min_key, device=device)
|
261
|
+
max_key = aten._to_copy.default(tensor._max_key, device=device)
|
262
|
+
|
263
|
+
_map = tensor._map
|
264
|
+
if isinstance(_map, Tensor):
|
265
|
+
_map = aten._to_copy.default(_map, device=device)
|
266
|
+
# Only convert `_map` in case `CUDAHashMap` exists - otherwise we use
|
267
|
+
# CPU-based mapping anyway and there is no need for a copy.
|
268
|
+
elif (torch_geometric.typing.WITH_CUDA_HASH_MAP and min_key.is_cuda
|
269
|
+
and tensor._min_key.device != min_key.device):
|
270
|
+
key = _map.keys()
|
271
|
+
key = aten._to_copy.default(key, device=device)
|
272
|
+
_map = get_hash_map(key)
|
273
|
+
|
274
|
+
return tensor._from_data(
|
275
|
+
_map,
|
276
|
+
value,
|
277
|
+
min_key,
|
278
|
+
max_key,
|
279
|
+
num_keys=tensor.size(0),
|
280
|
+
dtype=dtype or tensor.dtype,
|
281
|
+
)
|
282
|
+
|
283
|
+
|
284
|
+
@implements(aten.unsqueeze.default)
|
285
|
+
def _unsqueeze(tensor: HashTensor, dim: int) -> HashTensor:
|
286
|
+
if dim == 0 or dim == -(tensor.dim() + 1):
|
287
|
+
raise IndexError(f"Cannot unsqueeze '{tensor.__class__.__name__}' in "
|
288
|
+
f"the first dimension")
|
289
|
+
|
290
|
+
return tensor._from_data(
|
291
|
+
tensor._map,
|
292
|
+
aten.unsqueeze.default(tensor.as_tensor(), dim),
|
293
|
+
tensor._min_key,
|
294
|
+
tensor._max_key,
|
295
|
+
num_keys=tensor.size(0),
|
296
|
+
dtype=tensor.dtype,
|
297
|
+
)
|
File without changes
|