pyg-nightly 2.6.0.dev20240511__py3-none-any.whl → 2.7.0.dev20250114__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (205) hide show
  1. {pyg_nightly-2.6.0.dev20240511.dist-info → pyg_nightly-2.7.0.dev20250114.dist-info}/METADATA +30 -31
  2. {pyg_nightly-2.6.0.dev20240511.dist-info → pyg_nightly-2.7.0.dev20250114.dist-info}/RECORD +205 -181
  3. {pyg_nightly-2.6.0.dev20240511.dist-info → pyg_nightly-2.7.0.dev20250114.dist-info}/WHEEL +1 -1
  4. torch_geometric/__init__.py +26 -1
  5. torch_geometric/_compile.py +8 -1
  6. torch_geometric/_onnx.py +14 -0
  7. torch_geometric/config_mixin.py +113 -0
  8. torch_geometric/config_store.py +16 -14
  9. torch_geometric/data/__init__.py +24 -1
  10. torch_geometric/data/batch.py +2 -2
  11. torch_geometric/data/data.py +13 -8
  12. torch_geometric/data/database.py +15 -7
  13. torch_geometric/data/dataset.py +14 -6
  14. torch_geometric/data/feature_store.py +13 -22
  15. torch_geometric/data/graph_store.py +0 -4
  16. torch_geometric/data/hetero_data.py +4 -4
  17. torch_geometric/data/in_memory_dataset.py +2 -4
  18. torch_geometric/data/large_graph_indexer.py +677 -0
  19. torch_geometric/data/lightning/datamodule.py +4 -4
  20. torch_geometric/data/storage.py +15 -5
  21. torch_geometric/data/summary.py +14 -4
  22. torch_geometric/data/temporal.py +1 -2
  23. torch_geometric/datasets/__init__.py +11 -1
  24. torch_geometric/datasets/actor.py +9 -11
  25. torch_geometric/datasets/airfrans.py +15 -18
  26. torch_geometric/datasets/airports.py +10 -12
  27. torch_geometric/datasets/amazon.py +8 -11
  28. torch_geometric/datasets/amazon_book.py +9 -10
  29. torch_geometric/datasets/amazon_products.py +9 -10
  30. torch_geometric/datasets/aminer.py +8 -9
  31. torch_geometric/datasets/aqsol.py +10 -13
  32. torch_geometric/datasets/attributed_graph_dataset.py +10 -12
  33. torch_geometric/datasets/ba_multi_shapes.py +10 -12
  34. torch_geometric/datasets/ba_shapes.py +5 -6
  35. torch_geometric/datasets/bitcoin_otc.py +1 -1
  36. torch_geometric/datasets/brca_tgca.py +1 -1
  37. torch_geometric/datasets/dblp.py +2 -1
  38. torch_geometric/datasets/dbp15k.py +2 -2
  39. torch_geometric/datasets/fake.py +1 -3
  40. torch_geometric/datasets/flickr.py +2 -1
  41. torch_geometric/datasets/freebase.py +1 -1
  42. torch_geometric/datasets/gdelt_lite.py +3 -2
  43. torch_geometric/datasets/ged_dataset.py +3 -2
  44. torch_geometric/datasets/git_mol_dataset.py +263 -0
  45. torch_geometric/datasets/gnn_benchmark_dataset.py +6 -5
  46. torch_geometric/datasets/hgb_dataset.py +8 -8
  47. torch_geometric/datasets/imdb.py +2 -1
  48. torch_geometric/datasets/last_fm.py +2 -1
  49. torch_geometric/datasets/linkx_dataset.py +4 -3
  50. torch_geometric/datasets/lrgb.py +3 -5
  51. torch_geometric/datasets/malnet_tiny.py +4 -3
  52. torch_geometric/datasets/mnist_superpixels.py +2 -3
  53. torch_geometric/datasets/molecule_gpt_dataset.py +485 -0
  54. torch_geometric/datasets/molecule_net.py +7 -1
  55. torch_geometric/datasets/motif_generator/base.py +0 -1
  56. torch_geometric/datasets/neurograph.py +1 -3
  57. torch_geometric/datasets/ogb_mag.py +1 -1
  58. torch_geometric/datasets/opf.py +239 -0
  59. torch_geometric/datasets/ose_gvcs.py +1 -1
  60. torch_geometric/datasets/pascal_pf.py +1 -1
  61. torch_geometric/datasets/pcpnet_dataset.py +1 -1
  62. torch_geometric/datasets/pcqm4m.py +2 -1
  63. torch_geometric/datasets/ppi.py +1 -1
  64. torch_geometric/datasets/qm9.py +4 -3
  65. torch_geometric/datasets/reddit.py +2 -1
  66. torch_geometric/datasets/reddit2.py +2 -1
  67. torch_geometric/datasets/rel_link_pred_dataset.py +3 -3
  68. torch_geometric/datasets/s3dis.py +2 -2
  69. torch_geometric/datasets/shapenet.py +3 -3
  70. torch_geometric/datasets/shrec2016.py +2 -2
  71. torch_geometric/datasets/tag_dataset.py +350 -0
  72. torch_geometric/datasets/upfd.py +2 -1
  73. torch_geometric/datasets/web_qsp_dataset.py +246 -0
  74. torch_geometric/datasets/webkb.py +2 -2
  75. torch_geometric/datasets/wikics.py +1 -1
  76. torch_geometric/datasets/wikidata.py +3 -2
  77. torch_geometric/datasets/wikipedia_network.py +2 -2
  78. torch_geometric/datasets/word_net.py +2 -2
  79. torch_geometric/datasets/yelp.py +2 -1
  80. torch_geometric/datasets/zinc.py +1 -1
  81. torch_geometric/device.py +42 -0
  82. torch_geometric/distributed/local_feature_store.py +3 -2
  83. torch_geometric/distributed/local_graph_store.py +2 -1
  84. torch_geometric/distributed/partition.py +9 -8
  85. torch_geometric/edge_index.py +17 -8
  86. torch_geometric/explain/algorithm/base.py +0 -1
  87. torch_geometric/explain/algorithm/pg_explainer.py +1 -1
  88. torch_geometric/explain/explanation.py +2 -2
  89. torch_geometric/graphgym/checkpoint.py +2 -1
  90. torch_geometric/graphgym/logger.py +4 -4
  91. torch_geometric/graphgym/loss.py +1 -1
  92. torch_geometric/graphgym/utils/agg_runs.py +6 -6
  93. torch_geometric/index.py +20 -7
  94. torch_geometric/inspector.py +6 -2
  95. torch_geometric/io/fs.py +28 -2
  96. torch_geometric/io/npz.py +2 -1
  97. torch_geometric/io/off.py +2 -2
  98. torch_geometric/io/sdf.py +2 -2
  99. torch_geometric/io/tu.py +2 -3
  100. torch_geometric/loader/__init__.py +4 -0
  101. torch_geometric/loader/cluster.py +9 -3
  102. torch_geometric/loader/graph_saint.py +2 -1
  103. torch_geometric/loader/ibmb_loader.py +12 -4
  104. torch_geometric/loader/mixin.py +1 -1
  105. torch_geometric/loader/neighbor_loader.py +1 -1
  106. torch_geometric/loader/neighbor_sampler.py +2 -2
  107. torch_geometric/loader/prefetch.py +1 -1
  108. torch_geometric/loader/rag_loader.py +107 -0
  109. torch_geometric/loader/zip_loader.py +10 -0
  110. torch_geometric/metrics/__init__.py +11 -2
  111. torch_geometric/metrics/link_pred.py +159 -34
  112. torch_geometric/nn/aggr/__init__.py +2 -0
  113. torch_geometric/nn/aggr/attention.py +0 -2
  114. torch_geometric/nn/aggr/base.py +2 -4
  115. torch_geometric/nn/aggr/patch_transformer.py +143 -0
  116. torch_geometric/nn/aggr/set_transformer.py +1 -1
  117. torch_geometric/nn/attention/__init__.py +5 -1
  118. torch_geometric/nn/attention/qformer.py +71 -0
  119. torch_geometric/nn/conv/collect.jinja +6 -3
  120. torch_geometric/nn/conv/cugraph/base.py +0 -1
  121. torch_geometric/nn/conv/edge_conv.py +3 -2
  122. torch_geometric/nn/conv/gat_conv.py +35 -7
  123. torch_geometric/nn/conv/gatv2_conv.py +36 -6
  124. torch_geometric/nn/conv/general_conv.py +1 -1
  125. torch_geometric/nn/conv/gravnet_conv.py +3 -2
  126. torch_geometric/nn/conv/hetero_conv.py +3 -3
  127. torch_geometric/nn/conv/hgt_conv.py +1 -1
  128. torch_geometric/nn/conv/message_passing.py +100 -82
  129. torch_geometric/nn/conv/mixhop_conv.py +1 -1
  130. torch_geometric/nn/conv/rgcn_conv.py +2 -1
  131. torch_geometric/nn/conv/spline_conv.py +4 -4
  132. torch_geometric/nn/conv/x_conv.py +3 -2
  133. torch_geometric/nn/dense/linear.py +5 -4
  134. torch_geometric/nn/fx.py +3 -3
  135. torch_geometric/nn/model_hub.py +3 -1
  136. torch_geometric/nn/models/__init__.py +10 -2
  137. torch_geometric/nn/models/deep_graph_infomax.py +1 -2
  138. torch_geometric/nn/models/dimenet_utils.py +5 -7
  139. torch_geometric/nn/models/g_retriever.py +230 -0
  140. torch_geometric/nn/models/git_mol.py +336 -0
  141. torch_geometric/nn/models/glem.py +385 -0
  142. torch_geometric/nn/models/gnnff.py +0 -1
  143. torch_geometric/nn/models/graph_unet.py +12 -3
  144. torch_geometric/nn/models/jumping_knowledge.py +63 -4
  145. torch_geometric/nn/models/lightgcn.py +1 -1
  146. torch_geometric/nn/models/metapath2vec.py +3 -4
  147. torch_geometric/nn/models/molecule_gpt.py +222 -0
  148. torch_geometric/nn/models/node2vec.py +1 -2
  149. torch_geometric/nn/models/schnet.py +2 -1
  150. torch_geometric/nn/models/signed_gcn.py +3 -3
  151. torch_geometric/nn/module_dict.py +2 -2
  152. torch_geometric/nn/nlp/__init__.py +9 -0
  153. torch_geometric/nn/nlp/llm.py +322 -0
  154. torch_geometric/nn/nlp/sentence_transformer.py +134 -0
  155. torch_geometric/nn/nlp/vision_transformer.py +33 -0
  156. torch_geometric/nn/norm/batch_norm.py +1 -1
  157. torch_geometric/nn/parameter_dict.py +2 -2
  158. torch_geometric/nn/pool/__init__.py +7 -5
  159. torch_geometric/nn/pool/cluster_pool.py +145 -0
  160. torch_geometric/nn/pool/connect/base.py +0 -1
  161. torch_geometric/nn/pool/edge_pool.py +1 -1
  162. torch_geometric/nn/pool/graclus.py +4 -2
  163. torch_geometric/nn/pool/select/base.py +0 -1
  164. torch_geometric/nn/pool/voxel_grid.py +3 -2
  165. torch_geometric/nn/resolver.py +1 -1
  166. torch_geometric/nn/sequential.jinja +10 -23
  167. torch_geometric/nn/sequential.py +203 -77
  168. torch_geometric/nn/summary.py +1 -1
  169. torch_geometric/nn/to_hetero_with_bases_transformer.py +19 -19
  170. torch_geometric/profile/__init__.py +2 -0
  171. torch_geometric/profile/nvtx.py +66 -0
  172. torch_geometric/profile/profiler.py +24 -15
  173. torch_geometric/resolver.py +1 -1
  174. torch_geometric/sampler/base.py +34 -13
  175. torch_geometric/sampler/neighbor_sampler.py +11 -10
  176. torch_geometric/testing/decorators.py +17 -22
  177. torch_geometric/transforms/__init__.py +2 -0
  178. torch_geometric/transforms/add_metapaths.py +4 -4
  179. torch_geometric/transforms/add_positional_encoding.py +1 -1
  180. torch_geometric/transforms/delaunay.py +65 -14
  181. torch_geometric/transforms/face_to_edge.py +32 -3
  182. torch_geometric/transforms/gdc.py +7 -6
  183. torch_geometric/transforms/laplacian_lambda_max.py +2 -2
  184. torch_geometric/transforms/mask.py +5 -1
  185. torch_geometric/transforms/node_property_split.py +1 -2
  186. torch_geometric/transforms/pad.py +7 -6
  187. torch_geometric/transforms/random_link_split.py +1 -1
  188. torch_geometric/transforms/remove_self_loops.py +36 -0
  189. torch_geometric/transforms/svd_feature_reduction.py +1 -1
  190. torch_geometric/transforms/virtual_node.py +2 -1
  191. torch_geometric/typing.py +31 -5
  192. torch_geometric/utils/__init__.py +5 -1
  193. torch_geometric/utils/_negative_sampling.py +1 -1
  194. torch_geometric/utils/_normalize_edge_index.py +46 -0
  195. torch_geometric/utils/_scatter.py +37 -12
  196. torch_geometric/utils/_subgraph.py +4 -0
  197. torch_geometric/utils/_tree_decomposition.py +2 -2
  198. torch_geometric/utils/augmentation.py +1 -1
  199. torch_geometric/utils/convert.py +5 -5
  200. torch_geometric/utils/geodesic.py +24 -22
  201. torch_geometric/utils/hetero.py +1 -1
  202. torch_geometric/utils/map.py +1 -1
  203. torch_geometric/utils/smiles.py +66 -28
  204. torch_geometric/utils/sparse.py +25 -10
  205. torch_geometric/visualization/graph.py +3 -4
@@ -0,0 +1,222 @@
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+ from torch import Tensor
5
+
6
+ from torch_geometric.nn.attention import QFormer
7
+ from torch_geometric.nn.nlp.llm import BOS, LLM, MAX_NEW_TOKENS
8
+ from torch_geometric.utils import to_dense_batch
9
+
10
+
11
+ def pad_or_truncate(embeddings: Tensor, max_seq_len: int,
12
+ padding_value: int = 0) -> Tensor:
13
+ batch_size, current_seq_len, d = embeddings.size()
14
+
15
+ if current_seq_len > max_seq_len:
16
+ return embeddings[:, :max_seq_len, :]
17
+ elif current_seq_len < max_seq_len:
18
+ pad_tensor = torch.full((batch_size, max_seq_len - current_seq_len, d),
19
+ padding_value, dtype=embeddings.dtype,
20
+ device=embeddings.device)
21
+ return torch.cat([embeddings, pad_tensor], dim=1)
22
+ else:
23
+ return embeddings
24
+
25
+
26
+ class MoleculeGPT(torch.nn.Module):
27
+ r"""The MoleculeGPT model from the `"MoleculeGPT: Instruction
28
+ Following Large Language Models for Molecular Property Prediction"
29
+ <https://ai4d3.github.io/papers/34.pdf>`_ paper.
30
+
31
+ Args:
32
+ llm (LLM): The LLM to use.
33
+ graph_encoder (torch.nn.Module): Encode 2D molecule graph.
34
+ smiles_encoder (torch.nn.Module): Encode 1D SMILES.
35
+ mlp_out_channels (int, optional): The size of each embedding
36
+ after qformer encoding. (default: :obj:`32`)
37
+ max_tokens (int, optional): Max output tokens of 1D/2D encoder.
38
+ (default: :obj:`20`)
39
+
40
+ .. warning::
41
+ This module has been tested with the following HuggingFace models
42
+
43
+ * :obj:`llm_to_use="lmsys/vicuna-7b-v1.5"`
44
+
45
+ and may not work with other models. See other models at `HuggingFace
46
+ Models <https://huggingface.co/models>`_ and let us know if you
47
+ encounter any issues.
48
+
49
+ .. note::
50
+ For an example of using :class:`MoleculeGPT`, see
51
+ `examples/llm/molecule_gpt.py <https://github.com/pyg-team/
52
+ pytorch_geometric/blob/master/examples/llm/molecule_gpt.py>`_.
53
+ """
54
+ def __init__(
55
+ self,
56
+ llm: LLM,
57
+ graph_encoder: torch.nn.Module,
58
+ smiles_encoder: torch.nn.Module,
59
+ mlp_out_channels: int = 32,
60
+ max_tokens: Optional[int] = 20,
61
+ ) -> None:
62
+ super().__init__()
63
+ self.llm = llm
64
+ self.graph_encoder = graph_encoder.to(self.llm.device)
65
+ self.smiles_encoder = smiles_encoder.to(self.llm.device)
66
+
67
+ self.graph_qformer = QFormer(
68
+ input_dim=self.graph_encoder.nn[-1].out_features,
69
+ hidden_dim=mlp_out_channels,
70
+ output_dim=mlp_out_channels,
71
+ num_heads=4,
72
+ num_layers=2,
73
+ ).to(self.llm.device)
74
+
75
+ self.smiles_qformer = QFormer(
76
+ input_dim=self.smiles_encoder.model.pooler.dense.out_features,
77
+ hidden_dim=mlp_out_channels,
78
+ output_dim=mlp_out_channels,
79
+ num_heads=4,
80
+ num_layers=2,
81
+ ).to(self.llm.device)
82
+
83
+ self.max_tokens = max_tokens
84
+
85
+ self.word_embedding = self.llm.word_embedding
86
+ self.llm_generator = self.llm.llm
87
+
88
+ # LLMs
89
+ in_dim = 2 * mlp_out_channels * max_tokens
90
+ out_dim = self.llm.llm.model.embed_tokens.embedding_dim
91
+ self.projector = torch.nn.Sequential(
92
+ torch.nn.Linear(in_dim, in_dim),
93
+ torch.nn.Sigmoid(),
94
+ torch.nn.Linear(in_dim, out_dim),
95
+ ).to(self.llm.device)
96
+
97
+ def encode(
98
+ self,
99
+ x: Tensor,
100
+ edge_index: Tensor,
101
+ batch: Tensor,
102
+ edge_attr: Optional[Tensor],
103
+ smiles: List[str],
104
+ ) -> Tensor:
105
+ batch_size = len(smiles)
106
+ # 2D Graph Branch: [bs, node_len, d]
107
+ x = x.to(self.llm.device)
108
+ edge_index = edge_index.to(self.llm.device)
109
+ if edge_attr is not None:
110
+ edge_attr = edge_attr.to(self.llm.device)
111
+ batch = batch.to(self.llm.device)
112
+
113
+ x_graph = self.graph_encoder(x, edge_index, edge_attr=edge_attr)
114
+ x_graph = to_dense_batch(x_graph, batch)[0]
115
+ out_graph = self.graph_qformer(x_graph)
116
+ out_graph = pad_or_truncate(out_graph, max_seq_len=self.max_tokens,
117
+ padding_value=0)
118
+ out_graph = out_graph.view(batch_size, -1)
119
+
120
+ # 1D SMILES Branch: [bs, seq_len, d]
121
+ x_smiles = self.smiles_encoder.encode(smiles,
122
+ output_device=self.llm.device)
123
+ out_smiles = self.smiles_qformer(x_smiles)
124
+ out_smiles = pad_or_truncate(out_smiles, max_seq_len=self.max_tokens,
125
+ padding_value=0)
126
+ out_smiles = out_smiles.view(batch_size, -1)
127
+
128
+ # Merge into LLMs
129
+ x_cat = torch.cat([out_graph, out_smiles], dim=1)
130
+ return x_cat
131
+
132
+ def forward(
133
+ self,
134
+ x: Tensor,
135
+ edge_index: Tensor,
136
+ batch: Tensor,
137
+ edge_attr: Optional[Tensor],
138
+ smiles: List[str],
139
+ instructions: List[str],
140
+ label: List[str],
141
+ additional_text_context: Optional[List[str]] = None,
142
+ ):
143
+ x = self.encode(x, edge_index, batch, edge_attr, smiles)
144
+ x = self.projector(x)
145
+ xs = x.split(1, dim=0)
146
+
147
+ batch_unique = batch.unique()
148
+ batch_size = len(instructions)
149
+ if len(batch_unique) < batch_size:
150
+ xs = [
151
+ xs[i] if i in batch_unique else None for i in range(batch_size)
152
+ ]
153
+
154
+ (
155
+ inputs_embeds,
156
+ attention_mask,
157
+ label_input_ids,
158
+ ) = self.llm._get_embeds(instructions, additional_text_context, xs,
159
+ label)
160
+
161
+ with self.llm.autocast_context:
162
+ outputs = self.llm_generator(
163
+ inputs_embeds=inputs_embeds,
164
+ attention_mask=attention_mask,
165
+ return_dict=True,
166
+ labels=label_input_ids,
167
+ )
168
+
169
+ return outputs.loss
170
+
171
+ @torch.no_grad()
172
+ def inference(
173
+ self,
174
+ x: Tensor,
175
+ edge_index: Tensor,
176
+ batch: Tensor,
177
+ edge_attr: Optional[Tensor],
178
+ smiles: List[str],
179
+ instructions: List[str],
180
+ additional_text_context: Optional[List[str]] = None,
181
+ max_out_tokens: Optional[int] = MAX_NEW_TOKENS,
182
+ ):
183
+ x = self.encode(x, edge_index, batch, edge_attr, smiles)
184
+ x = self.projector(x)
185
+ xs = x.split(1, dim=0)
186
+
187
+ # Handle questions without node features:
188
+ batch_unique = batch.unique()
189
+ batch_size = len(instructions)
190
+ if len(batch_unique) < batch_size:
191
+ xs = [
192
+ xs[i] if i in batch_unique else None for i in range(batch_size)
193
+ ]
194
+
195
+ inputs_embeds, attention_mask, _ = self.llm._get_embeds(
196
+ instructions, additional_text_context, xs)
197
+
198
+ bos_token = self.llm.tokenizer(
199
+ BOS,
200
+ add_special_tokens=False,
201
+ ).input_ids[0]
202
+
203
+ with self.llm.autocast_context:
204
+ outputs = self.llm_generator.generate(
205
+ inputs_embeds=inputs_embeds,
206
+ max_new_tokens=max_out_tokens,
207
+ attention_mask=attention_mask,
208
+ bos_token_id=bos_token,
209
+ use_cache=True # Important to set!
210
+ )
211
+
212
+ return self.llm.tokenizer.batch_decode(
213
+ outputs,
214
+ skip_special_tokens=True,
215
+ )
216
+
217
+ def __repr__(self) -> str:
218
+ return (f'{self.__class__.__name__}(\n'
219
+ f' llm={self.llm},\n'
220
+ f' graph={self.graph_encoder.__class__.__name__},\n'
221
+ f' smiles={self.smiles_encoder},\n'
222
+ f')')
@@ -173,7 +173,6 @@ class Node2Vec(torch.nn.Module):
173
173
  test_z: Tensor,
174
174
  test_y: Tensor,
175
175
  solver: str = 'lbfgs',
176
- multi_class: str = 'auto',
177
176
  *args,
178
177
  **kwargs,
179
178
  ) -> float:
@@ -182,7 +181,7 @@ class Node2Vec(torch.nn.Module):
182
181
  """
183
182
  from sklearn.linear_model import LogisticRegression
184
183
 
185
- clf = LogisticRegression(solver=solver, multi_class=multi_class, *args,
184
+ clf = LogisticRegression(solver=solver, *args,
186
185
  **kwargs).fit(train_z.detach().cpu().numpy(),
187
186
  train_y.detach().cpu().numpy())
188
187
  return clf.score(test_z.detach().cpu().numpy(),
@@ -11,6 +11,7 @@ from torch import Tensor
11
11
  from torch.nn import Embedding, Linear, ModuleList, Sequential
12
12
 
13
13
  from torch_geometric.data import Dataset, download_url, extract_zip
14
+ from torch_geometric.io import fs
14
15
  from torch_geometric.nn import MessagePassing, SumAggregation, radius_graph
15
16
  from torch_geometric.nn.resolver import aggregation_resolver as aggr_resolver
16
17
  from torch_geometric.typing import OptTensor
@@ -216,7 +217,7 @@ class SchNet(torch.nn.Module):
216
217
 
217
218
  with warnings.catch_warnings():
218
219
  warnings.simplefilter('ignore')
219
- state = torch.load(path, map_location='cpu')
220
+ state = fs.torch_load(path, map_location='cpu')
220
221
 
221
222
  net = SchNet(
222
223
  hidden_channels=128,
@@ -1,6 +1,5 @@
1
1
  from typing import Optional, Tuple
2
2
 
3
- import scipy.sparse
4
3
  import torch
5
4
  import torch.nn.functional as F
6
5
  from torch import Tensor
@@ -98,6 +97,7 @@ class SignedGCN(torch.nn.Module):
98
97
  :obj:`max_val + 1` of :attr:`pos_edge_index` and
99
98
  :attr:`neg_edge_index`. (default: :obj:`None`)
100
99
  """
100
+ import scipy.sparse as sp
101
101
  from sklearn.decomposition import TruncatedSVD
102
102
 
103
103
  edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=1)
@@ -119,7 +119,7 @@ class SignedGCN(torch.nn.Module):
119
119
  # https://github.com/benedekrozemberczki/SGCN/blob/master/src/utils.py
120
120
  edge_index = edge_index.detach().numpy()
121
121
  val = val.detach().numpy()
122
- A = scipy.sparse.coo_matrix((val, edge_index), shape=(N, N))
122
+ A = sp.coo_matrix((val, edge_index), shape=(N, N))
123
123
  svd = TruncatedSVD(n_components=self.in_channels, n_iter=128)
124
124
  svd.fit(A)
125
125
  x = svd.components_.T
@@ -256,7 +256,7 @@ class SignedGCN(torch.nn.Module):
256
256
  neg_p = self.discriminate(z, neg_edge_index)[:, :2].max(dim=1)[1]
257
257
  pred = (1 - torch.cat([pos_p, neg_p])).cpu()
258
258
  y = torch.cat(
259
- [pred.new_ones((pos_p.size(0))),
259
+ [pred.new_ones(pos_p.size(0)),
260
260
  pred.new_zeros(neg_p.size(0))])
261
261
  pred, y = pred.numpy(), y.numpy()
262
262
 
@@ -1,4 +1,4 @@
1
- from typing import Final, Iterable, Mapping, Optional, Set, Tuple, Union
1
+ from typing import Final, Iterable, Mapping, Optional, Tuple, Union
2
2
 
3
3
  import torch
4
4
  from torch.nn import Module
@@ -11,7 +11,7 @@ Key = Union[str, Tuple[str, ...]]
11
11
  # internal representation and converts it back to `.` in the external
12
12
  # representation. It also allows passing tuples as keys.
13
13
  class ModuleDict(torch.nn.ModuleDict):
14
- CLASS_ATTRS: Final[Set[str]] = set(dir(torch.nn.ModuleDict))
14
+ CLASS_ATTRS: Final[Tuple[str, ...]] = tuple(dir(torch.nn.ModuleDict))
15
15
 
16
16
  def __init__(
17
17
  self,
@@ -0,0 +1,9 @@
1
+ from .sentence_transformer import SentenceTransformer
2
+ from .vision_transformer import VisionTransformer
3
+ from .llm import LLM
4
+
5
+ __all__ = classes = [
6
+ 'SentenceTransformer',
7
+ 'VisionTransformer',
8
+ 'LLM',
9
+ ]
@@ -0,0 +1,322 @@
1
+ import warnings
2
+ from contextlib import nullcontext
3
+ from typing import Any, Dict, List, Optional
4
+
5
+ import torch
6
+ from torch import Tensor
7
+
8
+ try:
9
+ from transformers.tokenization_utils_base import BatchEncoding
10
+ except ImportError:
11
+ BatchEncoding = Dict
12
+
13
+ BOS = '<s>[INST]'
14
+ EOS_USER = '[/INST]'
15
+ EOS = '[/s]'
16
+ IGNORE_INDEX = -100
17
+ MAX_TXT_LEN = 512
18
+ MAX_NEW_TOKENS = 32
19
+ PAD_TOKEN_ID = 0
20
+ PADDING_SIDE = 'left'
21
+
22
+
23
+ def get_llm_kwargs(required_memory: int, dtype=torch.dtype) -> Dict[str, Any]:
24
+ torch.cuda.empty_cache()
25
+
26
+ gpu_memory: List[int] = []
27
+ for i in range(torch.cuda.device_count()):
28
+ gpu_memory.append(torch.cuda.mem_get_info(i)[0] // 1024**3)
29
+ # Use the minimum number of GPUs to fit the LLM on.
30
+ if sum(gpu_memory) >= required_memory:
31
+ break
32
+
33
+ if sum(gpu_memory) < required_memory:
34
+ gpu_memory = [] # If not enough VRAM, use pure CPU.
35
+
36
+ kwargs = dict(revision='main')
37
+ if len(gpu_memory) > 0:
38
+ kwargs['max_memory'] = {
39
+ i: f'{memory}GiB'
40
+ for i, memory in enumerate(gpu_memory)
41
+ }
42
+ kwargs['low_cpu_mem_usage'] = True
43
+ kwargs['device_map'] = 'auto'
44
+ kwargs['torch_dtype'] = dtype
45
+
46
+ return kwargs
47
+
48
+
49
+ class LLM(torch.nn.Module):
50
+ r"""A wrapper around a Large Language Model (LLM) from HuggingFace.
51
+
52
+ model_name (str): The HuggingFace model name, *e.g.*, :obj:`"llama2"` or
53
+ :obj:`"gemma"`.
54
+ num_params (int): An integer representing how many parameters the
55
+ HuggingFace model has, in billions. This is used to automatically
56
+ allocate the correct number of GPUs needed, given the available GPU
57
+ memory of your GPUs.
58
+ dtype (torch.dtype, optional): The data type to use for the LLM.
59
+ (default :obj: `torch.bfloat16`)
60
+ """
61
+ def __init__(
62
+ self,
63
+ model_name: str,
64
+ num_params: int,
65
+ dtype=torch.bfloat16,
66
+ ) -> None:
67
+ super().__init__()
68
+
69
+ self.model_name = model_name
70
+
71
+ from transformers import AutoModelForCausalLM, AutoTokenizer
72
+
73
+ # A rough heuristic on GPU memory requirements, e.g., we found that
74
+ # LLAMA2 (7B parameters) fits on a 85GB GPU.
75
+ required_memory = 85 * num_params / 7
76
+ kwargs = get_llm_kwargs(required_memory, dtype)
77
+
78
+ print(f"Setting up '{model_name}' with configuration: {kwargs}")
79
+ self.tokenizer = AutoTokenizer.from_pretrained(
80
+ model_name,
81
+ use_fast=False,
82
+ )
83
+ self.tokenizer.pad_token_id = PAD_TOKEN_ID
84
+ self.tokenizer.padding_side = PADDING_SIDE
85
+ self.llm = AutoModelForCausalLM.from_pretrained(model_name, **kwargs)
86
+ self.word_embedding = self.llm.model.get_input_embeddings()
87
+
88
+ if 'max_memory' not in kwargs: # Pure CPU:
89
+ warnings.warn("LLM is being used on CPU, which may be slow")
90
+ self.device = torch.device('cpu')
91
+ self.autocast_context = nullcontext()
92
+ else:
93
+ self.device = self.llm.device
94
+ self.autocast_context = torch.amp.autocast('cuda', dtype=dtype)
95
+
96
+ def _encode_inputs(
97
+ self,
98
+ question: List[str],
99
+ context: Optional[List[str]] = None,
100
+ ) -> tuple:
101
+ batch_size = len(question)
102
+ questions = self.tokenizer(question, add_special_tokens=False)
103
+ if context is not None:
104
+ context = self.tokenizer(context, add_special_tokens=False)
105
+
106
+ eos_user_tokens = self.tokenizer(EOS_USER, add_special_tokens=False)
107
+ bos_token = self.tokenizer(
108
+ BOS,
109
+ add_special_tokens=False,
110
+ return_tensors='pt',
111
+ ).input_ids[0].to(self.device)
112
+ bos_embeds = self.word_embedding(bos_token)
113
+ pad_token = torch.tensor(self.tokenizer.pad_token_id,
114
+ device=self.device)
115
+ pad_embeds = self.word_embedding(pad_token).unsqueeze(0)
116
+ return (batch_size, questions, context, eos_user_tokens, bos_embeds,
117
+ pad_embeds)
118
+
119
+ def _label_input_ids(
120
+ self,
121
+ i: int,
122
+ label: BatchEncoding,
123
+ eos_tokens: BatchEncoding,
124
+ ) -> List[int]:
125
+ label_input_ids = label.input_ids[i][:MAX_NEW_TOKENS]
126
+ label_input_ids = label_input_ids + eos_tokens.input_ids
127
+ return label_input_ids
128
+
129
+ def _input_ids(
130
+ self,
131
+ i: int,
132
+ context: BatchEncoding,
133
+ question: BatchEncoding,
134
+ eos_user_tokens: BatchEncoding,
135
+ ) -> List[int]:
136
+ input_ids: List[int] = []
137
+ if context is not None:
138
+ input_ids += context.input_ids[i][:MAX_TXT_LEN]
139
+ input_ids += question.input_ids[i]
140
+ input_ids += eos_user_tokens.input_ids
141
+ return input_ids
142
+
143
+ def _inputs_embeds(
144
+ self,
145
+ i: int,
146
+ input_ids: List[int],
147
+ bos_embeds: Tensor,
148
+ embedding: Optional[List[Tensor]] = None,
149
+ ) -> Tensor:
150
+ inputs_embeds = self.word_embedding(
151
+ torch.tensor(input_ids, device=self.device))
152
+
153
+ to_cat = [bos_embeds]
154
+ if embedding is not None and embedding[i] is not None:
155
+ to_cat.append(embedding[i])
156
+ to_cat.append(inputs_embeds)
157
+ return torch.cat(to_cat, dim=0).to(self.device)
158
+
159
+ def _append_embeds(
160
+ self,
161
+ inputs_embeds: Tensor,
162
+ batch_inputs_embeds: List[Tensor],
163
+ batch_attention_mask: List[List[int]],
164
+ label_input_ids: List[int] = None,
165
+ batch_label_input_ids: Optional[List[List[int]]] = None,
166
+ ) -> tuple:
167
+ batch_inputs_embeds.append(inputs_embeds)
168
+ batch_attention_mask.append([1] * inputs_embeds.size(0))
169
+ if label_input_ids is not None:
170
+ pad = inputs_embeds.size(0) - len(label_input_ids)
171
+ label_input_ids = [IGNORE_INDEX] * pad + label_input_ids
172
+ batch_label_input_ids.append(label_input_ids)
173
+ return batch_inputs_embeds, batch_attention_mask, batch_label_input_ids
174
+
175
+ def _pad_embeds(
176
+ self,
177
+ pad_embeds: Tensor,
178
+ batch_inputs_embeds: List[Tensor],
179
+ batch_attention_mask: List[List[int]],
180
+ batch_label_input_ids: Optional[List[List[int]]] = None,
181
+ ) -> tuple:
182
+ max_length = max([x.size(0) for x in batch_inputs_embeds])
183
+ batch_size = len(batch_inputs_embeds)
184
+ for i in range(batch_size):
185
+ pad = max_length - batch_inputs_embeds[i].size(0)
186
+ batch_inputs_embeds[i] = torch.cat([
187
+ pad_embeds.repeat(pad, 1),
188
+ batch_inputs_embeds[i],
189
+ ])
190
+ batch_attention_mask[i] = [0] * pad + batch_attention_mask[i]
191
+ if batch_label_input_ids is not None:
192
+ tmp = [IGNORE_INDEX] * pad + batch_label_input_ids[i]
193
+ batch_label_input_ids[i] = tmp
194
+ inputs_embeds = torch.stack(batch_inputs_embeds, dim=0)
195
+ attention_mask = torch.tensor(batch_attention_mask, device=self.device)
196
+ label_input_ids = None
197
+ if batch_label_input_ids is not None:
198
+ label_input_ids = torch.tensor(batch_label_input_ids,
199
+ device=self.device)
200
+ return inputs_embeds, attention_mask, label_input_ids
201
+
202
+ def _get_embeds(
203
+ self,
204
+ question: List[str],
205
+ context: Optional[List[str]] = None,
206
+ embedding: Optional[List[Tensor]] = None,
207
+ answer: Optional[List[str]] = None,
208
+ ) -> tuple:
209
+ (batch_size, question, context, eos_user_tokens, bos_embeds,
210
+ pad_embeds) = self._encode_inputs(question, context)
211
+
212
+ batch_label_input_ids = None
213
+ if answer is not None:
214
+ label = self.tokenizer(answer, add_special_tokens=False)
215
+ eos_tokens = self.tokenizer(EOS, add_special_tokens=False)
216
+ batch_label_input_ids = []
217
+
218
+ batch_inputs_embeds = []
219
+ batch_attention_mask = []
220
+ for i in range(batch_size):
221
+ input_ids = self._input_ids(i, context, question, eos_user_tokens)
222
+ if answer is not None:
223
+ label_input_ids = self._label_input_ids(i, label, eos_tokens)
224
+ input_ids += label_input_ids
225
+ else:
226
+ label_input_ids = None
227
+
228
+ inputs_embeds = self._inputs_embeds(i, input_ids, bos_embeds,
229
+ embedding)
230
+
231
+ (
232
+ batch_inputs_embeds,
233
+ batch_attention_mask,
234
+ batch_label_input_ids,
235
+ ) = self._append_embeds(
236
+ inputs_embeds,
237
+ batch_inputs_embeds,
238
+ batch_attention_mask,
239
+ label_input_ids,
240
+ batch_label_input_ids,
241
+ )
242
+
243
+ inputs_embeds, attention_mask, label_input_ids = self._pad_embeds(
244
+ pad_embeds, batch_inputs_embeds, batch_attention_mask,
245
+ batch_label_input_ids)
246
+
247
+ return inputs_embeds, attention_mask, label_input_ids
248
+
249
+ def forward(
250
+ self,
251
+ question: List[str],
252
+ answer: List[str],
253
+ context: Optional[List[str]] = None,
254
+ embedding: Optional[List[Tensor]] = None,
255
+ ) -> Tensor:
256
+ r"""The forward pass.
257
+
258
+ Args:
259
+ question (list[str]): The questions/prompts.
260
+ answer (list[str]): The answers/labels.
261
+ context (list[str], optional): Additional context to give to the
262
+ LLM, such as textified knowledge graphs. (default: :obj:`None`)
263
+ embedding (list[torch.Tensor], optional): RAG embedding
264
+ tensors, *i.e.* the embedded form of :obj:`context`. Either
265
+ :obj:`context` or :obj:`embedding` should be used, not
266
+ both. (default: :obj:`None`)
267
+ """
268
+ inputs_embeds, attention_mask, label_input_ids = self._get_embeds(
269
+ question, context, embedding, answer)
270
+
271
+ with self.autocast_context:
272
+ outputs = self.llm(
273
+ inputs_embeds=inputs_embeds,
274
+ attention_mask=attention_mask,
275
+ return_dict=True,
276
+ labels=label_input_ids,
277
+ )
278
+ return outputs.loss
279
+
280
+ @torch.no_grad()
281
+ def inference(
282
+ self,
283
+ question: List[str],
284
+ context: Optional[List[str]] = None,
285
+ embedding: Optional[List[Tensor]] = None,
286
+ max_tokens: Optional[int] = MAX_NEW_TOKENS,
287
+ ) -> List[str]:
288
+ r"""The inference pass.
289
+
290
+ Args:
291
+ question (list[str]): The questions/prompts.
292
+ answer (list[str]): The answers/labels.
293
+ context (list[str], optional): Additional context to give to the
294
+ LLM, such as textified knowledge graphs. (default: :obj:`None`)
295
+ embedding (list[torch.Tensor], optional): RAG embedding
296
+ tensors, *i.e.* the embedded form of :obj:`context`. Either
297
+ :obj:`context` or :obj:`embedding` should be used, not
298
+ both. (default: :obj:`None`)
299
+ max_tokens (int, optional): How many tokens for the LLM to
300
+ generate. (default: :obj:`32`)
301
+ """
302
+ inputs_embeds, attention_mask, _ = self._get_embeds(
303
+ question, context, embedding)
304
+
305
+ bos_token = self.tokenizer(
306
+ BOS,
307
+ add_special_tokens=False,
308
+ ).input_ids[0]
309
+
310
+ with self.autocast_context:
311
+ outputs = self.llm.generate(
312
+ inputs_embeds=inputs_embeds,
313
+ bos_token_id=bos_token,
314
+ max_new_tokens=max_tokens,
315
+ attention_mask=attention_mask,
316
+ use_cache=True,
317
+ )
318
+
319
+ return self.tokenizer.batch_decode(outputs, skip_special_tokens=True)
320
+
321
+ def __repr__(self) -> str:
322
+ return f'{self.__class__.__name__}({self.model_name})'