pyg-nightly 2.7.0.dev20250222__py3-none-any.whl → 2.7.0.dev20250224__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyg-nightly
3
- Version: 2.7.0.dev20250222
3
+ Version: 2.7.0.dev20250224
4
4
  Summary: Graph Neural Network Library for PyTorch
5
5
  Keywords: deep-learning,pytorch,geometric-deep-learning,graph-neural-networks,graph-convolutional-networks
6
6
  Author-email: Matthias Fey <matthias@pyg.org>
@@ -1,4 +1,4 @@
1
- torch_geometric/__init__.py,sha256=P_eeFAUMVSeYfsS1X62TDRCGKklzvWG3UW2-KJcsFAo,1978
1
+ torch_geometric/__init__.py,sha256=nc0R-kBpZZJ_9mHk-xQJhyukW9OMLPN9EmnGrJIwu1c,1978
2
2
  torch_geometric/_compile.py,sha256=f-WQeH4VLi5Hn9lrgztFUCSrN_FImjhQa6BxFzcYC38,1338
3
3
  torch_geometric/_onnx.py,sha256=V9ffrIKSqhDw6xUZ12lkuSfNs48cQp2EeJ6Z19GfnVw,349
4
4
  torch_geometric/backend.py,sha256=lVaf7aLoVaB3M-UcByUJ1G4T4FOK6LXAg0CF4W3E8jo,1575
@@ -9,7 +9,7 @@ torch_geometric/deprecation.py,sha256=dWRymDIUkUVI2MeEmBG5WF4R6jObZeseSBV9G6FNfj
9
9
  torch_geometric/device.py,sha256=tU5-_lBNVbVHl_kUmWPwiG5mQ1pyapwMF4JkmtNN3MM,1224
10
10
  torch_geometric/edge_index.py,sha256=BsLh5tOZRjjSYDkjqOFAdBuvMaDg7EWaaLELYsUL0Z8,70048
11
11
  torch_geometric/experimental.py,sha256=JbtNNEXjFGI8hZ9raM6-qrZURP6Z5nlDK8QicZUIbz0,4756
12
- torch_geometric/hash_tensor.py,sha256=AlPwX3spNoJ4-gHLlLY9_beETe7eTbtYtY33tKOJs1g,14503
12
+ torch_geometric/hash_tensor.py,sha256=koofBrEEo5oHCCt0gH6gCkiywyxSFSGdk-keg7MXGf4,19490
13
13
  torch_geometric/home.py,sha256=EV54B4Dmiv61GDbkCwtCfWGWJ4eFGwZ8s3KOgGjwYgY,790
14
14
  torch_geometric/index.py,sha256=9ChzWFCwj2slNcVBOgfV-wQn-KscJe_y7502w-Vf76w,24045
15
15
  torch_geometric/inspector.py,sha256=nKi5o4Mn6xsG0Ex1GudTEQt_EqnF9mcMqGtp7Shh9sQ,19336
@@ -513,10 +513,10 @@ torch_geometric/sampler/base.py,sha256=kT6hYM6losYta3pqLQlqiqboJiujLy6RlH8qM--U_
513
513
  torch_geometric/sampler/hgt_sampler.py,sha256=UAm8_wwzEcziKDJ8-TnfZh1705dXRsy_I5PKhZSDTK8,2721
514
514
  torch_geometric/sampler/neighbor_sampler.py,sha256=MAVphWqNf0-cwlHRvdiU8de86dBxwjm3Miam_6s1ep4,33971
515
515
  torch_geometric/sampler/utils.py,sha256=RJtasO6Q7Pp3oYEOWrbf2DEYuSfuKZOsF2I7-eJDnoA,5485
516
- torch_geometric/testing/__init__.py,sha256=QUTeYNkmibxFu08AlZGzAnMHfEoBp2kt9o65k0wmfmU,1249
516
+ torch_geometric/testing/__init__.py,sha256=0mAGVWRrTBNsGV2YUkCu_FkyQ8JIcrYVw2LsdKgY9ak,1291
517
517
  torch_geometric/testing/asserts.py,sha256=DLC9HnBgFWuTIiQs2OalsQcXGhOVG-e6R99IWhkO32c,4606
518
518
  torch_geometric/testing/data.py,sha256=O1qo8FyNxt6RGf63Ys3eXBfa5RvYydeZLk74szrez3c,2604
519
- torch_geometric/testing/decorators.py,sha256=b0Xqpu-qdiElGo0cFG8cSu-Pqgce7NH8xcoI0NigWiM,8309
519
+ torch_geometric/testing/decorators.py,sha256=j45wlxMB1-Pn3wPKBgDziqg6KkWJUb_fcwfUXzkL2mM,8677
520
520
  torch_geometric/testing/distributed.py,sha256=ZZCCXqiQC4-m1ExSjDZhS_a1qPXnHEwhJGTmACxNnVI,2227
521
521
  torch_geometric/testing/feature_store.py,sha256=J6JBIt2XK-t8yG8B4JzXp-aJcVl5jaCS1m2H7d6OUxs,2158
522
522
  torch_geometric/testing/graph_store.py,sha256=00B7QToCIspYmgN7svQKp1iU-qAzEtrt3VQRFxkHfuk,1044
@@ -633,7 +633,7 @@ torch_geometric/utils/undirected.py,sha256=H_nfpI0_WluOG6VfjPyldvcjL4w5USAKWu2x5
633
633
  torch_geometric/visualization/__init__.py,sha256=PyR_4K5SafsJrBr6qWrkjKr6GBL1b7FtZybyXCDEVwY,154
634
634
  torch_geometric/visualization/graph.py,sha256=ZuLPL92yGRi7lxlqsUPwL_EVVXF7P2kMcveTtW79vpA,4784
635
635
  torch_geometric/visualization/influence.py,sha256=CWMvuNA_Nf1sfbJmQgn58yS4OFpeKXeZPe7kEuvkUBw,477
636
- pyg_nightly-2.7.0.dev20250222.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
637
- pyg_nightly-2.7.0.dev20250222.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82
638
- pyg_nightly-2.7.0.dev20250222.dist-info/METADATA,sha256=G_VhMGb5Inx-4RejhUEhgXEGyFipKayNtCZpJJyiTyA,63021
639
- pyg_nightly-2.7.0.dev20250222.dist-info/RECORD,,
636
+ pyg_nightly-2.7.0.dev20250224.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
637
+ pyg_nightly-2.7.0.dev20250224.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82
638
+ pyg_nightly-2.7.0.dev20250224.dist-info/METADATA,sha256=OzM9N2hJ-COuEqz7HmEj8_wZkdLZDavaZfWDS5j08SA,63021
639
+ pyg_nightly-2.7.0.dev20250224.dist-info/RECORD,,
@@ -31,7 +31,7 @@ from .lazy_loader import LazyLoader
31
31
  contrib = LazyLoader('contrib', globals(), 'torch_geometric.contrib')
32
32
  graphgym = LazyLoader('graphgym', globals(), 'torch_geometric.graphgym')
33
33
 
34
- __version__ = '2.7.0.dev20250222'
34
+ __version__ = '2.7.0.dev20250224'
35
35
 
36
36
  __all__ = [
37
37
  'Index',
@@ -209,6 +209,20 @@ class HashTensor(Tensor):
209
209
 
210
210
  return out
211
211
 
212
+ @property
213
+ def _key(self) -> Tensor:
214
+ if isinstance(self._map, Tensor):
215
+ mask = self._map >= 0
216
+ key = mask.nonzero().view(-1) - 1
217
+ key = key[self._map[mask]]
218
+ elif (torch_geometric.typing.WITH_CUDA_HASH_MAP
219
+ or torch_geometric.typing.WITH_CPU_HASH_MAP):
220
+ key = self._map.keys().to(self.device)
221
+ else:
222
+ key = torch.from_numpy(self._map.categories.to_numpy())
223
+
224
+ return key.to(self.device)
225
+
212
226
  def _shallow_copy(self) -> 'HashTensor':
213
227
  return self._from_data(
214
228
  self._map,
@@ -239,12 +253,16 @@ class HashTensor(Tensor):
239
253
  return index.to(self.dtype)
240
254
 
241
255
  out = self._value[index]
256
+
242
257
  mask = index != -1
243
258
  mask = mask.view([-1] + [1] * (out.dim() - 1))
244
- if out.is_floating_point():
245
- return out.where(mask, float('NaN'))
259
+ fill_value = float('NaN') if out.is_floating_point() else -1
260
+ if torch_geometric.typing.WITH_PT20:
261
+ other: Union[int, float, Tensor] = fill_value
246
262
  else:
247
- return out.where(mask, -1)
263
+ other = torch.full_like(out, fill_value)
264
+
265
+ return out.where(mask, other)
248
266
 
249
267
  # Methods #################################################################
250
268
 
@@ -281,15 +299,89 @@ class HashTensor(Tensor):
281
299
  kwargs)
282
300
  return func(*args, **(kwargs or {}))
283
301
 
284
- def index_select(self, dim: int, index: Any) -> Tensor: # type: ignore
302
+ def tolist(self) -> List[Any]:
303
+ """""" # noqa: D419
304
+ return self.as_tensor().tolist()
305
+
306
+ def index_select( # type: ignore
307
+ self,
308
+ dim: int,
309
+ index: Any,
310
+ ) -> Union['HashTensor', Tensor]:
311
+ """""" # noqa: D419
285
312
  return torch.index_select(self, dim, index)
286
313
 
314
+ def select( # type: ignore
315
+ self,
316
+ dim: int,
317
+ index: Any,
318
+ ) -> Union['HashTensor', Tensor]:
319
+ """""" # noqa: D419
320
+ return torch.select(self, dim, index)
321
+
322
+ def share_memory_(self) -> 'HashTensor':
323
+ """""" # noqa: D419
324
+ if isinstance(self._map, Tensor):
325
+ self._map.share_memory_()
326
+ if self._value is not None:
327
+ self._value.share_memory_()
328
+ self._min_key.share_memory_()
329
+ self._max_key.share_memory_()
330
+ return self
331
+
332
+ def is_shared(self) -> bool:
333
+ """""" # noqa: D419
334
+ return self._min_key.is_shared()
335
+
336
+ def detach_(self) -> 'HashTensor': # type: ignore
337
+ """""" # noqa: D419
338
+ if self._value is not None:
339
+ self._value.detach_()
340
+ return super().detach_() # type: ignore
341
+
287
342
 
288
343
  @implements(aten.alias.default)
289
344
  def _alias(tensor: HashTensor) -> HashTensor:
290
345
  return tensor._shallow_copy()
291
346
 
292
347
 
348
+ @implements(aten.clone.default)
349
+ def _clone(
350
+ tensor: HashTensor,
351
+ *,
352
+ memory_format: torch.memory_format = torch.preserve_format,
353
+ ) -> HashTensor:
354
+
355
+ value = tensor._value
356
+ if value is not None:
357
+ value = aten.clone.default(value, memory_format=memory_format)
358
+
359
+ return tensor._from_data(
360
+ tensor._map, # NOTE No reason to do clone since it is read-only.
361
+ value,
362
+ tensor._min_key, # NOTE No reason to do clone since it is read-only.
363
+ tensor._max_key, # NOTE No reason to do clone since it is read-only.
364
+ num_keys=tensor.size(0),
365
+ dtype=tensor.dtype,
366
+ )
367
+
368
+
369
+ @implements(aten.detach.default)
370
+ def _detach(tensor: HashTensor) -> HashTensor:
371
+ value = tensor._value
372
+ if value is not None:
373
+ value = aten.detach.default(value)
374
+
375
+ return tensor._from_data(
376
+ tensor._map,
377
+ value,
378
+ tensor._min_key,
379
+ tensor._max_key,
380
+ num_keys=tensor.size(0),
381
+ dtype=tensor.dtype,
382
+ )
383
+
384
+
293
385
  @implements(aten._to_copy.default)
294
386
  def _to_copy(
295
387
  tensor: HashTensor,
@@ -322,8 +414,8 @@ def _to_copy(
322
414
  _map = aten._to_copy.default(_map, device=device)
323
415
  # Only convert `_map` in case `CUDAHashMap` exists - otherwise we use
324
416
  # CPU-based mapping anyway and there is no need for a copy.
325
- elif (torch_geometric.typing.WITH_CUDA_HASH_MAP and min_key.is_cuda
326
- and tensor._min_key.device != min_key.device):
417
+ elif (torch_geometric.typing.WITH_CUDA_HASH_MAP and tensor.is_cuda
418
+ and tensor.device != min_key.device):
327
419
  key = _map.keys()
328
420
  key = aten._to_copy.default(key, device=device)
329
421
  _map = get_hash_map(key)
@@ -338,6 +430,26 @@ def _to_copy(
338
430
  )
339
431
 
340
432
 
433
+ @implements(aten._pin_memory.default)
434
+ def _pin_memory(tensor: HashTensor) -> HashTensor:
435
+ _map = tensor._map
436
+ if isinstance(_map, Tensor):
437
+ _map = aten._pin_memory.default(_map)
438
+
439
+ value = tensor._value
440
+ if value is not None:
441
+ value = aten._pin_memory.default(value)
442
+
443
+ return tensor._from_data(
444
+ _map,
445
+ value,
446
+ aten._pin_memory.default(tensor._min_key),
447
+ aten._pin_memory.default(tensor._max_key),
448
+ num_keys=tensor.size(0),
449
+ dtype=tensor.dtype,
450
+ )
451
+
452
+
341
453
  @implements(aten.unsqueeze.default)
342
454
  def _unsqueeze(tensor: HashTensor, dim: int) -> HashTensor:
343
455
  if dim == 0 or dim == -(tensor.dim() + 1):
@@ -359,9 +471,13 @@ def _squeeze_default(tensor: HashTensor) -> HashTensor:
359
471
  if tensor._value is None:
360
472
  return tensor._shallow_copy()
361
473
 
474
+ value = tensor.as_tensor()
475
+ for d in range(tensor.dim() - 1, 0, -1):
476
+ value = value.squeeze(d)
477
+
362
478
  return tensor._from_data(
363
479
  tensor._map,
364
- aten.squeeze.dims(tensor._value, list(range(1, tensor.dim()))),
480
+ value,
365
481
  tensor._min_key,
366
482
  tensor._max_key,
367
483
  num_keys=tensor.size(0),
@@ -387,11 +503,14 @@ def _squeeze_dim(
387
503
  if tensor._value is None:
388
504
  return tensor._shallow_copy()
389
505
 
390
- dim = [d for d in dim if d != 0 and d != -tensor.dim()]
506
+ value = tensor.as_tensor()
507
+ for d in dim[::-1]:
508
+ if d != 0 and d != -tensor.dim():
509
+ value = value.squeeze(d)
391
510
 
392
511
  return tensor._from_data(
393
512
  tensor._map,
394
- aten.squeeze.dims(tensor._value, dim),
513
+ value,
395
514
  tensor._min_key,
396
515
  tensor._max_key,
397
516
  num_keys=tensor.size(0),
@@ -406,10 +525,18 @@ def _slice(
406
525
  start: Optional[int] = None,
407
526
  end: Optional[int] = None,
408
527
  step: int = 1,
409
- ) -> Union[HashTensor, Tensor]:
528
+ ) -> HashTensor:
410
529
 
411
530
  if dim == 0 or dim == -tensor.dim():
412
- return aten.slice.Tensor(tensor.as_tensor(), dim, start, end, step)
531
+ copy = start is None or (start == 0 or start <= -tensor.size(0))
532
+ copy &= end is None or end > tensor.size(0)
533
+ copy &= step == 1
534
+ if copy:
535
+ return tensor._shallow_copy()
536
+
537
+ key = aten.slice.Tensor(tensor._key, 0, start, end, step)
538
+ value = aten.slice.Tensor(tensor.as_tensor(), 0, start, end, step)
539
+ return tensor.__class__(key, value)
413
540
 
414
541
  return tensor._from_data(
415
542
  tensor._map,
@@ -429,7 +556,7 @@ _old_index_select = torch.index_select
429
556
  def _new_index_select(
430
557
  input: Tensor,
431
558
  dim: int,
432
- index: Any,
559
+ index: Tensor,
433
560
  *,
434
561
  out: Optional[Tensor] = None,
435
562
  ) -> Tensor:
@@ -467,3 +594,54 @@ def _index_select(
467
594
  num_keys=tensor.size(0),
468
595
  dtype=tensor.dtype,
469
596
  )
597
+
598
+
599
+ # Since PyTorch does only allow PyTorch tensors as indices in `select`, we need
600
+ # to create a wrapper function and monkey patch `select` :(
601
+ _old_select = torch.select
602
+
603
+
604
+ def _new_select(
605
+ input: Tensor,
606
+ dim: int,
607
+ index: int,
608
+ ) -> Tensor:
609
+
610
+ if dim < -input.dim() or dim >= input.dim():
611
+ raise IndexError(f"Dimension out of range (expected to be in range of "
612
+ f"[{-input.dim()}, {input.dim()-1}], but got {dim})")
613
+
614
+ # We convert any index in the first dimension into an integer. This means
615
+ # that downstream handling (i.e. in `aten.select.int`) needs to take this
616
+ # pre-conversion into account.
617
+ if isinstance(input, HashTensor) and (dim == 0 or dim == -input.dim()):
618
+ index = int(as_key_tensor([index]))
619
+ return _old_select(input, dim, index)
620
+
621
+
622
+ torch.select = _new_select # type: ignore
623
+
624
+
625
+ @implements(aten.select.int)
626
+ def _select(
627
+ tensor: HashTensor,
628
+ dim: int,
629
+ index: int,
630
+ ) -> Union[HashTensor, Tensor]:
631
+
632
+ if dim == 0 or dim == -tensor.dim():
633
+ key = torch.tensor(
634
+ [index],
635
+ dtype=tensor._min_key.dtype,
636
+ device=tensor.device,
637
+ )
638
+ return tensor._get(key).squeeze(0)
639
+
640
+ return tensor._from_data(
641
+ tensor._map,
642
+ aten.select.int(tensor.as_tensor(), dim, index),
643
+ tensor._min_key,
644
+ tensor._max_key,
645
+ num_keys=tensor.size(0),
646
+ dtype=tensor.dtype,
647
+ )
@@ -22,6 +22,7 @@ from .decorators import (
22
22
  withDevice,
23
23
  withCUDA,
24
24
  withMETIS,
25
+ withHashTensor,
25
26
  disableExtensions,
26
27
  withoutExtensions,
27
28
  )
@@ -53,6 +54,7 @@ __all__ = [
53
54
  'withDevice',
54
55
  'withCUDA',
55
56
  'withMETIS',
57
+ 'withHashTensor',
56
58
  'disableExtensions',
57
59
  'withoutExtensions',
58
60
  'assert_module',
@@ -10,6 +10,7 @@ from packaging.requirements import Requirement
10
10
  from packaging.version import Version
11
11
 
12
12
  import torch_geometric
13
+ import torch_geometric.typing
13
14
  from torch_geometric.typing import WITH_METIS, WITH_PYG_LIB, WITH_TORCH_SPARSE
14
15
  from torch_geometric.visualization.graph import has_graphviz
15
16
 
@@ -265,6 +266,17 @@ def withMETIS(func: Callable) -> Callable:
265
266
  )(func)
266
267
 
267
268
 
269
+ def withHashTensor(func: Callable) -> Callable:
270
+ r"""A decorator to only test in case :class:`HashTensor` is available."""
271
+ import pytest
272
+
273
+ return pytest.mark.skipif(
274
+ not torch_geometric.typing.WITH_CPU_HASH_MAP
275
+ and not has_package('pandas'),
276
+ reason="HashTensor dependencies not available",
277
+ )(func)
278
+
279
+
268
280
  def disableExtensions(func: Callable) -> Callable:
269
281
  r"""A decorator to temporarily disable the usage of the
270
282
  :obj:`torch_scatter`, :obj:`torch_sparse` and :obj:`pyg_lib` extension