pyg-nightly 2.7.0.dev20251010__py3-none-any.whl → 2.7.0.dev20251012__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyg-nightly
3
- Version: 2.7.0.dev20251010
3
+ Version: 2.7.0.dev20251012
4
4
  Summary: Graph Neural Network Library for PyTorch
5
5
  Keywords: deep-learning,pytorch,geometric-deep-learning,graph-neural-networks,graph-convolutional-networks
6
6
  Author-email: Matthias Fey <matthias@pyg.org>
@@ -1,4 +1,4 @@
1
- torch_geometric/__init__.py,sha256=70HTSmZMg_7JrYiYRtuyPb_1c9-YFyCl4wy29XGiGDs,2292
1
+ torch_geometric/__init__.py,sha256=3ALLRK-890CBKAJrL8MegdGoKBbcSrSB1cV9DzGIXPw,2292
2
2
  torch_geometric/_compile.py,sha256=9yqMTBKatZPr40WavJz9FjNi7pQj8YZAZOyZmmRGXgc,1351
3
3
  torch_geometric/_onnx.py,sha256=ODB_8cwFUiwBUjngXn6-K5HHb7IDul7DDXuuGX7vj_0,8178
4
4
  torch_geometric/backend.py,sha256=lVaf7aLoVaB3M-UcByUJ1G4T4FOK6LXAg0CF4W3E8jo,1575
@@ -278,14 +278,14 @@ torch_geometric/llm/models/llm.py,sha256=039mq9rZBZMyZW5rYj0fMP5kl9RJAI7N-oJyC4M
278
278
  torch_geometric/llm/models/llm_judge.py,sha256=qhc8hmIPNhcfLVRyBVk7jQW7ncoIb9QYw7rcsGAIpyg,6457
279
279
  torch_geometric/llm/models/molecule_gpt.py,sha256=RWoP4RMsoRzZtuedPCLNCfooqibCqxkuAhH-pyek9No,7641
280
280
  torch_geometric/llm/models/protein_mpnn.py,sha256=SwTgafSbI2KJ-yqzn0trZtVWLmfo0_kPEaWSNJUCt70,12266
281
- torch_geometric/llm/models/sentence_transformer.py,sha256=TSXBxeTktj10YU-h_1prdMCCmAG8MTOKMm760ch4g30,6377
281
+ torch_geometric/llm/models/sentence_transformer.py,sha256=XYDN7xYyMqUNZyxpY6EjDjfYt1mope5iGsBZ2fLc1J8,7041
282
282
  torch_geometric/llm/models/txt2kg.py,sha256=CjWXCa_WrqIlBfOSPMAK-mnaX19dvH_YhXMWSuLkk4o,14074
283
283
  torch_geometric/llm/models/vision_transformer.py,sha256=aPuVfpSwGR96KaicRYut49g6ShrCklbouaLwyPuwhBQ,1022
284
284
  torch_geometric/llm/utils/__init__.py,sha256=P5By_n15MqkUU1tfh87PGE--J7RVygPeDSBOTy_VlZ0,292
285
- torch_geometric/llm/utils/backend_utils.py,sha256=AcvZ8ym3UKCWwWoMl8sNRnj4FSl0_srHw9yxZAKiyeU,15840
285
+ torch_geometric/llm/utils/backend_utils.py,sha256=vde10npVYZTY6ONExu-eeMgzouMnhdFcoTe8GyYuL1k,15874
286
286
  torch_geometric/llm/utils/feature_store.py,sha256=d60n3TlclEhlqoDEHKmvvGI6t8r0nur1BNwXyqqtj24,5903
287
287
  torch_geometric/llm/utils/graph_store.py,sha256=_Hh0aGnokUn0zvOC80xUfT4TtX_7G4KIDoEBkNXkgHY,7103
288
- torch_geometric/llm/utils/vectorrag.py,sha256=m51drMNXsGQTN6qkbR8QiHb8jvcrBlZDHzEjsEmtnes,4753
288
+ torch_geometric/llm/utils/vectorrag.py,sha256=7WE73NOzHfChSvmYEfg2dHp8JIgLtX4XaKVRrsKmkc0,4791
289
289
  torch_geometric/loader/__init__.py,sha256=w9LSTbyrLRkyrLXi_10d80csWgfKOKDRQDJXRdcfD0M,1835
290
290
  torch_geometric/loader/base.py,sha256=ataIwNEYL0px3CN3LJEgXIVTRylDHB6-yBFXXuX2JN0,1615
291
291
  torch_geometric/loader/cache.py,sha256=S65heO3YTyUPbttqizCNtKPHIoAw5iHRpbvw6KlXmok,2106
@@ -654,7 +654,7 @@ torch_geometric/utils/undirected.py,sha256=H_nfpI0_WluOG6VfjPyldvcjL4w5USAKWu2x5
654
654
  torch_geometric/visualization/__init__.py,sha256=b-HnVesXjyJ_L1N-DnjiRiRVf7lhwKaBQF_2i5YMVSU,208
655
655
  torch_geometric/visualization/graph.py,sha256=mfZHXYfiU-CWMtfawYc80IxVwVmtK9hbIkSKhM_j7oI,14311
656
656
  torch_geometric/visualization/influence.py,sha256=CWMvuNA_Nf1sfbJmQgn58yS4OFpeKXeZPe7kEuvkUBw,477
657
- pyg_nightly-2.7.0.dev20251010.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
658
- pyg_nightly-2.7.0.dev20251010.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
659
- pyg_nightly-2.7.0.dev20251010.dist-info/METADATA,sha256=DpuCNBUoadbXK5QZUt98PWRPh4QTH4-M0gdqfD9btFU,63680
660
- pyg_nightly-2.7.0.dev20251010.dist-info/RECORD,,
657
+ pyg_nightly-2.7.0.dev20251012.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
658
+ pyg_nightly-2.7.0.dev20251012.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
659
+ pyg_nightly-2.7.0.dev20251012.dist-info/METADATA,sha256=Jiqw0vebP6H8jj7vXsCGfDzGSB2v45yJaMybAvFTMQM,63680
660
+ pyg_nightly-2.7.0.dev20251012.dist-info/RECORD,,
@@ -31,7 +31,7 @@ from .lazy_loader import LazyLoader
31
31
  contrib = LazyLoader('contrib', globals(), 'torch_geometric.contrib')
32
32
  graphgym = LazyLoader('graphgym', globals(), 'torch_geometric.graphgym')
33
33
 
34
- __version__ = '2.7.0.dev20251010'
34
+ __version__ = '2.7.0.dev20251012'
35
35
 
36
36
  __all__ = [
37
37
  'Index',
@@ -112,6 +112,19 @@ class SentenceTransformer(torch.nn.Module):
112
112
  output_device: Optional[Union[torch.device, str]] = None,
113
113
  verbose=False,
114
114
  ) -> Tensor:
115
+ r"""Main function for users. Converts strings to embeddings.
116
+
117
+ Args:
118
+ text (List[str]): List of strings to embed.
119
+ batch_size (int, optional): How many strings to process.
120
+ Defaults to processing all at once, but this may lead to
121
+ OOM errors. (default: obj:`None`)
122
+ output_device (Union[torch.device, str], optional):
123
+ By default outputs cpu pytorch tensor, but can choose
124
+ to output to specific cuda devices. (default: obj:`None`)
125
+ verbose (bool, optional): Controls the verbosity of outputs.
126
+ (default: obj:`False`)
127
+ """
115
128
  is_empty = len(text) == 0
116
129
  text = ['dummy'] if is_empty else text
117
130
 
@@ -408,7 +408,8 @@ def make_pcst_filter(triples: List[Tuple[str, str,
408
408
  :return: Retrieved graph/query data
409
409
  """
410
410
  # PCST relies on numpy and pcst_fast pypi libs, hence to("cpu")
411
- q_emb = model.encode([query]).to("cpu")
411
+ with torch.no_grad():
412
+ q_emb = model.encode([query]).to("cpu")
412
413
  textual_nodes = [(int(i), full_textual_nodes[i])
413
414
  for i in graph["node_idx"]]
414
415
  textual_nodes = DataFrame(textual_nodes,
@@ -65,7 +65,8 @@ class DocumentRetriever(VectorRetriever):
65
65
  List[str]: Documents retrieved from the vector database.
66
66
  """
67
67
  if isinstance(query, str):
68
- query_enc = self.encoder(query, **self.model_kwargs)
68
+ with torch.no_grad():
69
+ query_enc = self.encoder(query, **self.model_kwargs)
69
70
  else:
70
71
  query_enc = query
71
72