pyg-nightly 2.6.0.dev20240318__py3-none-any.whl → 2.7.0.dev20250115__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- {pyg_nightly-2.6.0.dev20240318.dist-info → pyg_nightly-2.7.0.dev20250115.dist-info}/METADATA +31 -47
- {pyg_nightly-2.6.0.dev20240318.dist-info → pyg_nightly-2.7.0.dev20250115.dist-info}/RECORD +226 -199
- {pyg_nightly-2.6.0.dev20240318.dist-info → pyg_nightly-2.7.0.dev20250115.dist-info}/WHEEL +1 -1
- torch_geometric/__init__.py +28 -1
- torch_geometric/_compile.py +8 -1
- torch_geometric/_onnx.py +14 -0
- torch_geometric/config_mixin.py +113 -0
- torch_geometric/config_store.py +28 -19
- torch_geometric/data/__init__.py +24 -1
- torch_geometric/data/batch.py +2 -2
- torch_geometric/data/collate.py +8 -2
- torch_geometric/data/data.py +16 -8
- torch_geometric/data/database.py +61 -15
- torch_geometric/data/dataset.py +14 -6
- torch_geometric/data/feature_store.py +25 -42
- torch_geometric/data/graph_store.py +1 -5
- torch_geometric/data/hetero_data.py +18 -9
- torch_geometric/data/in_memory_dataset.py +2 -4
- torch_geometric/data/large_graph_indexer.py +677 -0
- torch_geometric/data/lightning/datamodule.py +4 -4
- torch_geometric/data/separate.py +6 -1
- torch_geometric/data/storage.py +17 -7
- torch_geometric/data/summary.py +14 -4
- torch_geometric/data/temporal.py +1 -2
- torch_geometric/datasets/__init__.py +17 -2
- torch_geometric/datasets/actor.py +9 -11
- torch_geometric/datasets/airfrans.py +15 -18
- torch_geometric/datasets/airports.py +10 -12
- torch_geometric/datasets/amazon.py +8 -11
- torch_geometric/datasets/amazon_book.py +9 -10
- torch_geometric/datasets/amazon_products.py +9 -10
- torch_geometric/datasets/aminer.py +8 -9
- torch_geometric/datasets/aqsol.py +10 -13
- torch_geometric/datasets/attributed_graph_dataset.py +10 -12
- torch_geometric/datasets/ba_multi_shapes.py +10 -12
- torch_geometric/datasets/ba_shapes.py +5 -6
- torch_geometric/datasets/bitcoin_otc.py +1 -1
- torch_geometric/datasets/brca_tgca.py +1 -1
- torch_geometric/datasets/cornell.py +145 -0
- torch_geometric/datasets/dblp.py +2 -1
- torch_geometric/datasets/dbp15k.py +2 -2
- torch_geometric/datasets/fake.py +1 -3
- torch_geometric/datasets/flickr.py +2 -1
- torch_geometric/datasets/freebase.py +1 -1
- torch_geometric/datasets/gdelt_lite.py +3 -2
- torch_geometric/datasets/ged_dataset.py +3 -2
- torch_geometric/datasets/git_mol_dataset.py +263 -0
- torch_geometric/datasets/gnn_benchmark_dataset.py +11 -10
- torch_geometric/datasets/hgb_dataset.py +8 -8
- torch_geometric/datasets/imdb.py +2 -1
- torch_geometric/datasets/karate.py +3 -2
- torch_geometric/datasets/last_fm.py +2 -1
- torch_geometric/datasets/linkx_dataset.py +4 -3
- torch_geometric/datasets/lrgb.py +3 -5
- torch_geometric/datasets/malnet_tiny.py +4 -3
- torch_geometric/datasets/mnist_superpixels.py +2 -3
- torch_geometric/datasets/molecule_gpt_dataset.py +485 -0
- torch_geometric/datasets/molecule_net.py +15 -3
- torch_geometric/datasets/motif_generator/base.py +0 -1
- torch_geometric/datasets/neurograph.py +1 -3
- torch_geometric/datasets/ogb_mag.py +1 -1
- torch_geometric/datasets/opf.py +239 -0
- torch_geometric/datasets/ose_gvcs.py +1 -1
- torch_geometric/datasets/pascal.py +11 -9
- torch_geometric/datasets/pascal_pf.py +1 -1
- torch_geometric/datasets/pcpnet_dataset.py +1 -1
- torch_geometric/datasets/pcqm4m.py +10 -3
- torch_geometric/datasets/ppi.py +1 -1
- torch_geometric/datasets/qm9.py +8 -7
- torch_geometric/datasets/rcdd.py +4 -4
- torch_geometric/datasets/reddit.py +2 -1
- torch_geometric/datasets/reddit2.py +2 -1
- torch_geometric/datasets/rel_link_pred_dataset.py +3 -3
- torch_geometric/datasets/s3dis.py +5 -3
- torch_geometric/datasets/shapenet.py +3 -3
- torch_geometric/datasets/shrec2016.py +2 -2
- torch_geometric/datasets/snap_dataset.py +7 -1
- torch_geometric/datasets/tag_dataset.py +350 -0
- torch_geometric/datasets/upfd.py +2 -1
- torch_geometric/datasets/web_qsp_dataset.py +246 -0
- torch_geometric/datasets/webkb.py +2 -2
- torch_geometric/datasets/wikics.py +1 -1
- torch_geometric/datasets/wikidata.py +3 -2
- torch_geometric/datasets/wikipedia_network.py +2 -2
- torch_geometric/datasets/willow_object_class.py +1 -1
- torch_geometric/datasets/word_net.py +2 -2
- torch_geometric/datasets/yelp.py +2 -1
- torch_geometric/datasets/zinc.py +1 -1
- torch_geometric/device.py +42 -0
- torch_geometric/distributed/local_feature_store.py +3 -2
- torch_geometric/distributed/local_graph_store.py +2 -1
- torch_geometric/distributed/partition.py +9 -8
- torch_geometric/edge_index.py +616 -438
- torch_geometric/explain/algorithm/base.py +0 -1
- torch_geometric/explain/algorithm/graphmask_explainer.py +1 -2
- torch_geometric/explain/algorithm/pg_explainer.py +1 -1
- torch_geometric/explain/explanation.py +2 -2
- torch_geometric/graphgym/checkpoint.py +2 -1
- torch_geometric/graphgym/logger.py +4 -4
- torch_geometric/graphgym/loss.py +1 -1
- torch_geometric/graphgym/utils/agg_runs.py +6 -6
- torch_geometric/index.py +826 -0
- torch_geometric/inspector.py +13 -7
- torch_geometric/io/fs.py +28 -2
- torch_geometric/io/npz.py +2 -1
- torch_geometric/io/off.py +2 -2
- torch_geometric/io/sdf.py +2 -2
- torch_geometric/io/tu.py +4 -5
- torch_geometric/loader/__init__.py +4 -0
- torch_geometric/loader/cluster.py +10 -4
- torch_geometric/loader/graph_saint.py +2 -1
- torch_geometric/loader/ibmb_loader.py +12 -4
- torch_geometric/loader/mixin.py +1 -1
- torch_geometric/loader/neighbor_loader.py +1 -1
- torch_geometric/loader/neighbor_sampler.py +2 -2
- torch_geometric/loader/prefetch.py +1 -1
- torch_geometric/loader/rag_loader.py +107 -0
- torch_geometric/loader/utils.py +8 -7
- torch_geometric/loader/zip_loader.py +10 -0
- torch_geometric/metrics/__init__.py +11 -2
- torch_geometric/metrics/link_pred.py +317 -65
- torch_geometric/nn/aggr/__init__.py +4 -0
- torch_geometric/nn/aggr/attention.py +0 -2
- torch_geometric/nn/aggr/base.py +3 -5
- torch_geometric/nn/aggr/patch_transformer.py +143 -0
- torch_geometric/nn/aggr/set_transformer.py +1 -1
- torch_geometric/nn/aggr/variance_preserving.py +33 -0
- torch_geometric/nn/attention/__init__.py +5 -1
- torch_geometric/nn/attention/qformer.py +71 -0
- torch_geometric/nn/conv/collect.jinja +7 -4
- torch_geometric/nn/conv/cugraph/base.py +8 -12
- torch_geometric/nn/conv/edge_conv.py +3 -2
- torch_geometric/nn/conv/fused_gat_conv.py +1 -1
- torch_geometric/nn/conv/gat_conv.py +35 -7
- torch_geometric/nn/conv/gatv2_conv.py +36 -6
- torch_geometric/nn/conv/general_conv.py +1 -1
- torch_geometric/nn/conv/graph_conv.py +21 -3
- torch_geometric/nn/conv/gravnet_conv.py +3 -2
- torch_geometric/nn/conv/hetero_conv.py +3 -3
- torch_geometric/nn/conv/hgt_conv.py +1 -1
- torch_geometric/nn/conv/message_passing.py +138 -87
- torch_geometric/nn/conv/mixhop_conv.py +1 -1
- torch_geometric/nn/conv/propagate.jinja +9 -1
- torch_geometric/nn/conv/rgcn_conv.py +5 -5
- torch_geometric/nn/conv/spline_conv.py +4 -4
- torch_geometric/nn/conv/x_conv.py +3 -2
- torch_geometric/nn/dense/linear.py +11 -6
- torch_geometric/nn/fx.py +3 -3
- torch_geometric/nn/model_hub.py +3 -1
- torch_geometric/nn/models/__init__.py +10 -2
- torch_geometric/nn/models/deep_graph_infomax.py +1 -2
- torch_geometric/nn/models/dimenet_utils.py +5 -7
- torch_geometric/nn/models/g_retriever.py +230 -0
- torch_geometric/nn/models/git_mol.py +336 -0
- torch_geometric/nn/models/glem.py +385 -0
- torch_geometric/nn/models/gnnff.py +0 -1
- torch_geometric/nn/models/graph_unet.py +12 -3
- torch_geometric/nn/models/jumping_knowledge.py +63 -4
- torch_geometric/nn/models/lightgcn.py +1 -1
- torch_geometric/nn/models/metapath2vec.py +5 -5
- torch_geometric/nn/models/molecule_gpt.py +222 -0
- torch_geometric/nn/models/node2vec.py +2 -3
- torch_geometric/nn/models/schnet.py +2 -1
- torch_geometric/nn/models/signed_gcn.py +3 -3
- torch_geometric/nn/module_dict.py +2 -2
- torch_geometric/nn/nlp/__init__.py +9 -0
- torch_geometric/nn/nlp/llm.py +329 -0
- torch_geometric/nn/nlp/sentence_transformer.py +134 -0
- torch_geometric/nn/nlp/vision_transformer.py +33 -0
- torch_geometric/nn/norm/batch_norm.py +1 -1
- torch_geometric/nn/parameter_dict.py +2 -2
- torch_geometric/nn/pool/__init__.py +21 -5
- torch_geometric/nn/pool/cluster_pool.py +145 -0
- torch_geometric/nn/pool/connect/base.py +0 -1
- torch_geometric/nn/pool/edge_pool.py +1 -1
- torch_geometric/nn/pool/graclus.py +4 -2
- torch_geometric/nn/pool/pool.py +8 -2
- torch_geometric/nn/pool/select/base.py +0 -1
- torch_geometric/nn/pool/voxel_grid.py +3 -2
- torch_geometric/nn/resolver.py +1 -1
- torch_geometric/nn/sequential.jinja +10 -23
- torch_geometric/nn/sequential.py +204 -78
- torch_geometric/nn/summary.py +1 -1
- torch_geometric/nn/to_hetero_with_bases_transformer.py +19 -19
- torch_geometric/profile/__init__.py +2 -0
- torch_geometric/profile/nvtx.py +66 -0
- torch_geometric/profile/profiler.py +30 -19
- torch_geometric/resolver.py +1 -1
- torch_geometric/sampler/base.py +34 -13
- torch_geometric/sampler/neighbor_sampler.py +11 -10
- torch_geometric/sampler/utils.py +1 -1
- torch_geometric/template.py +1 -0
- torch_geometric/testing/__init__.py +6 -2
- torch_geometric/testing/decorators.py +56 -22
- torch_geometric/testing/feature_store.py +1 -1
- torch_geometric/transforms/__init__.py +2 -0
- torch_geometric/transforms/add_metapaths.py +5 -5
- torch_geometric/transforms/add_positional_encoding.py +1 -1
- torch_geometric/transforms/delaunay.py +65 -14
- torch_geometric/transforms/face_to_edge.py +32 -3
- torch_geometric/transforms/gdc.py +7 -6
- torch_geometric/transforms/laplacian_lambda_max.py +3 -3
- torch_geometric/transforms/mask.py +5 -1
- torch_geometric/transforms/node_property_split.py +1 -2
- torch_geometric/transforms/pad.py +7 -6
- torch_geometric/transforms/random_link_split.py +1 -1
- torch_geometric/transforms/remove_self_loops.py +36 -0
- torch_geometric/transforms/svd_feature_reduction.py +1 -1
- torch_geometric/transforms/to_sparse_tensor.py +1 -1
- torch_geometric/transforms/two_hop.py +1 -1
- torch_geometric/transforms/virtual_node.py +2 -1
- torch_geometric/typing.py +43 -6
- torch_geometric/utils/__init__.py +5 -1
- torch_geometric/utils/_negative_sampling.py +1 -1
- torch_geometric/utils/_normalize_edge_index.py +46 -0
- torch_geometric/utils/_scatter.py +38 -12
- torch_geometric/utils/_subgraph.py +4 -0
- torch_geometric/utils/_tree_decomposition.py +2 -2
- torch_geometric/utils/augmentation.py +1 -1
- torch_geometric/utils/convert.py +12 -8
- torch_geometric/utils/geodesic.py +24 -22
- torch_geometric/utils/hetero.py +1 -1
- torch_geometric/utils/map.py +8 -2
- torch_geometric/utils/smiles.py +65 -27
- torch_geometric/utils/sparse.py +39 -25
- torch_geometric/visualization/graph.py +3 -4
@@ -0,0 +1,222 @@
|
|
1
|
+
from typing import List, Optional
|
2
|
+
|
3
|
+
import torch
|
4
|
+
from torch import Tensor
|
5
|
+
|
6
|
+
from torch_geometric.nn.attention import QFormer
|
7
|
+
from torch_geometric.nn.nlp.llm import BOS, LLM, MAX_NEW_TOKENS
|
8
|
+
from torch_geometric.utils import to_dense_batch
|
9
|
+
|
10
|
+
|
11
|
+
def pad_or_truncate(embeddings: Tensor, max_seq_len: int,
|
12
|
+
padding_value: int = 0) -> Tensor:
|
13
|
+
batch_size, current_seq_len, d = embeddings.size()
|
14
|
+
|
15
|
+
if current_seq_len > max_seq_len:
|
16
|
+
return embeddings[:, :max_seq_len, :]
|
17
|
+
elif current_seq_len < max_seq_len:
|
18
|
+
pad_tensor = torch.full((batch_size, max_seq_len - current_seq_len, d),
|
19
|
+
padding_value, dtype=embeddings.dtype,
|
20
|
+
device=embeddings.device)
|
21
|
+
return torch.cat([embeddings, pad_tensor], dim=1)
|
22
|
+
else:
|
23
|
+
return embeddings
|
24
|
+
|
25
|
+
|
26
|
+
class MoleculeGPT(torch.nn.Module):
|
27
|
+
r"""The MoleculeGPT model from the `"MoleculeGPT: Instruction
|
28
|
+
Following Large Language Models for Molecular Property Prediction"
|
29
|
+
<https://ai4d3.github.io/papers/34.pdf>`_ paper.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
llm (LLM): The LLM to use.
|
33
|
+
graph_encoder (torch.nn.Module): Encode 2D molecule graph.
|
34
|
+
smiles_encoder (torch.nn.Module): Encode 1D SMILES.
|
35
|
+
mlp_out_channels (int, optional): The size of each embedding
|
36
|
+
after qformer encoding. (default: :obj:`32`)
|
37
|
+
max_tokens (int, optional): Max output tokens of 1D/2D encoder.
|
38
|
+
(default: :obj:`20`)
|
39
|
+
|
40
|
+
.. warning::
|
41
|
+
This module has been tested with the following HuggingFace models
|
42
|
+
|
43
|
+
* :obj:`llm_to_use="lmsys/vicuna-7b-v1.5"`
|
44
|
+
|
45
|
+
and may not work with other models. See other models at `HuggingFace
|
46
|
+
Models <https://huggingface.co/models>`_ and let us know if you
|
47
|
+
encounter any issues.
|
48
|
+
|
49
|
+
.. note::
|
50
|
+
For an example of using :class:`MoleculeGPT`, see
|
51
|
+
`examples/llm/molecule_gpt.py <https://github.com/pyg-team/
|
52
|
+
pytorch_geometric/blob/master/examples/llm/molecule_gpt.py>`_.
|
53
|
+
"""
|
54
|
+
def __init__(
|
55
|
+
self,
|
56
|
+
llm: LLM,
|
57
|
+
graph_encoder: torch.nn.Module,
|
58
|
+
smiles_encoder: torch.nn.Module,
|
59
|
+
mlp_out_channels: int = 32,
|
60
|
+
max_tokens: Optional[int] = 20,
|
61
|
+
) -> None:
|
62
|
+
super().__init__()
|
63
|
+
self.llm = llm
|
64
|
+
self.graph_encoder = graph_encoder.to(self.llm.device)
|
65
|
+
self.smiles_encoder = smiles_encoder.to(self.llm.device)
|
66
|
+
|
67
|
+
self.graph_qformer = QFormer(
|
68
|
+
input_dim=self.graph_encoder.nn[-1].out_features,
|
69
|
+
hidden_dim=mlp_out_channels,
|
70
|
+
output_dim=mlp_out_channels,
|
71
|
+
num_heads=4,
|
72
|
+
num_layers=2,
|
73
|
+
).to(self.llm.device)
|
74
|
+
|
75
|
+
self.smiles_qformer = QFormer(
|
76
|
+
input_dim=self.smiles_encoder.model.pooler.dense.out_features,
|
77
|
+
hidden_dim=mlp_out_channels,
|
78
|
+
output_dim=mlp_out_channels,
|
79
|
+
num_heads=4,
|
80
|
+
num_layers=2,
|
81
|
+
).to(self.llm.device)
|
82
|
+
|
83
|
+
self.max_tokens = max_tokens
|
84
|
+
|
85
|
+
self.word_embedding = self.llm.word_embedding
|
86
|
+
self.llm_generator = self.llm.llm
|
87
|
+
|
88
|
+
# LLMs
|
89
|
+
in_dim = 2 * mlp_out_channels * max_tokens
|
90
|
+
out_dim = self.llm.llm.model.embed_tokens.embedding_dim
|
91
|
+
self.projector = torch.nn.Sequential(
|
92
|
+
torch.nn.Linear(in_dim, in_dim),
|
93
|
+
torch.nn.Sigmoid(),
|
94
|
+
torch.nn.Linear(in_dim, out_dim),
|
95
|
+
).to(self.llm.device)
|
96
|
+
|
97
|
+
def encode(
|
98
|
+
self,
|
99
|
+
x: Tensor,
|
100
|
+
edge_index: Tensor,
|
101
|
+
batch: Tensor,
|
102
|
+
edge_attr: Optional[Tensor],
|
103
|
+
smiles: List[str],
|
104
|
+
) -> Tensor:
|
105
|
+
batch_size = len(smiles)
|
106
|
+
# 2D Graph Branch: [bs, node_len, d]
|
107
|
+
x = x.to(self.llm.device)
|
108
|
+
edge_index = edge_index.to(self.llm.device)
|
109
|
+
if edge_attr is not None:
|
110
|
+
edge_attr = edge_attr.to(self.llm.device)
|
111
|
+
batch = batch.to(self.llm.device)
|
112
|
+
|
113
|
+
x_graph = self.graph_encoder(x, edge_index, edge_attr=edge_attr)
|
114
|
+
x_graph = to_dense_batch(x_graph, batch)[0]
|
115
|
+
out_graph = self.graph_qformer(x_graph)
|
116
|
+
out_graph = pad_or_truncate(out_graph, max_seq_len=self.max_tokens,
|
117
|
+
padding_value=0)
|
118
|
+
out_graph = out_graph.view(batch_size, -1)
|
119
|
+
|
120
|
+
# 1D SMILES Branch: [bs, seq_len, d]
|
121
|
+
x_smiles = self.smiles_encoder.encode(smiles,
|
122
|
+
output_device=self.llm.device)
|
123
|
+
out_smiles = self.smiles_qformer(x_smiles)
|
124
|
+
out_smiles = pad_or_truncate(out_smiles, max_seq_len=self.max_tokens,
|
125
|
+
padding_value=0)
|
126
|
+
out_smiles = out_smiles.view(batch_size, -1)
|
127
|
+
|
128
|
+
# Merge into LLMs
|
129
|
+
x_cat = torch.cat([out_graph, out_smiles], dim=1)
|
130
|
+
return x_cat
|
131
|
+
|
132
|
+
def forward(
|
133
|
+
self,
|
134
|
+
x: Tensor,
|
135
|
+
edge_index: Tensor,
|
136
|
+
batch: Tensor,
|
137
|
+
edge_attr: Optional[Tensor],
|
138
|
+
smiles: List[str],
|
139
|
+
instructions: List[str],
|
140
|
+
label: List[str],
|
141
|
+
additional_text_context: Optional[List[str]] = None,
|
142
|
+
):
|
143
|
+
x = self.encode(x, edge_index, batch, edge_attr, smiles)
|
144
|
+
x = self.projector(x)
|
145
|
+
xs = x.split(1, dim=0)
|
146
|
+
|
147
|
+
batch_unique = batch.unique()
|
148
|
+
batch_size = len(instructions)
|
149
|
+
if len(batch_unique) < batch_size:
|
150
|
+
xs = [
|
151
|
+
xs[i] if i in batch_unique else None for i in range(batch_size)
|
152
|
+
]
|
153
|
+
|
154
|
+
(
|
155
|
+
inputs_embeds,
|
156
|
+
attention_mask,
|
157
|
+
label_input_ids,
|
158
|
+
) = self.llm._get_embeds(instructions, additional_text_context, xs,
|
159
|
+
label)
|
160
|
+
|
161
|
+
with self.llm.autocast_context:
|
162
|
+
outputs = self.llm_generator(
|
163
|
+
inputs_embeds=inputs_embeds,
|
164
|
+
attention_mask=attention_mask,
|
165
|
+
return_dict=True,
|
166
|
+
labels=label_input_ids,
|
167
|
+
)
|
168
|
+
|
169
|
+
return outputs.loss
|
170
|
+
|
171
|
+
@torch.no_grad()
|
172
|
+
def inference(
|
173
|
+
self,
|
174
|
+
x: Tensor,
|
175
|
+
edge_index: Tensor,
|
176
|
+
batch: Tensor,
|
177
|
+
edge_attr: Optional[Tensor],
|
178
|
+
smiles: List[str],
|
179
|
+
instructions: List[str],
|
180
|
+
additional_text_context: Optional[List[str]] = None,
|
181
|
+
max_out_tokens: Optional[int] = MAX_NEW_TOKENS,
|
182
|
+
):
|
183
|
+
x = self.encode(x, edge_index, batch, edge_attr, smiles)
|
184
|
+
x = self.projector(x)
|
185
|
+
xs = x.split(1, dim=0)
|
186
|
+
|
187
|
+
# Handle questions without node features:
|
188
|
+
batch_unique = batch.unique()
|
189
|
+
batch_size = len(instructions)
|
190
|
+
if len(batch_unique) < batch_size:
|
191
|
+
xs = [
|
192
|
+
xs[i] if i in batch_unique else None for i in range(batch_size)
|
193
|
+
]
|
194
|
+
|
195
|
+
inputs_embeds, attention_mask, _ = self.llm._get_embeds(
|
196
|
+
instructions, additional_text_context, xs)
|
197
|
+
|
198
|
+
bos_token = self.llm.tokenizer(
|
199
|
+
BOS,
|
200
|
+
add_special_tokens=False,
|
201
|
+
).input_ids[0]
|
202
|
+
|
203
|
+
with self.llm.autocast_context:
|
204
|
+
outputs = self.llm_generator.generate(
|
205
|
+
inputs_embeds=inputs_embeds,
|
206
|
+
max_new_tokens=max_out_tokens,
|
207
|
+
attention_mask=attention_mask,
|
208
|
+
bos_token_id=bos_token,
|
209
|
+
use_cache=True # Important to set!
|
210
|
+
)
|
211
|
+
|
212
|
+
return self.llm.tokenizer.batch_decode(
|
213
|
+
outputs,
|
214
|
+
skip_special_tokens=True,
|
215
|
+
)
|
216
|
+
|
217
|
+
def __repr__(self) -> str:
|
218
|
+
return (f'{self.__class__.__name__}(\n'
|
219
|
+
f' llm={self.llm},\n'
|
220
|
+
f' graph={self.graph_encoder.__class__.__name__},\n'
|
221
|
+
f' smiles={self.smiles_encoder},\n'
|
222
|
+
f')')
|
@@ -5,10 +5,10 @@ from torch import Tensor
|
|
5
5
|
from torch.nn import Embedding
|
6
6
|
from torch.utils.data import DataLoader
|
7
7
|
|
8
|
+
from torch_geometric.index import index2ptr
|
8
9
|
from torch_geometric.typing import WITH_PYG_LIB, WITH_TORCH_CLUSTER
|
9
10
|
from torch_geometric.utils import sort_edge_index
|
10
11
|
from torch_geometric.utils.num_nodes import maybe_num_nodes
|
11
|
-
from torch_geometric.utils.sparse import index2ptr
|
12
12
|
|
13
13
|
|
14
14
|
class Node2Vec(torch.nn.Module):
|
@@ -173,7 +173,6 @@ class Node2Vec(torch.nn.Module):
|
|
173
173
|
test_z: Tensor,
|
174
174
|
test_y: Tensor,
|
175
175
|
solver: str = 'lbfgs',
|
176
|
-
multi_class: str = 'auto',
|
177
176
|
*args,
|
178
177
|
**kwargs,
|
179
178
|
) -> float:
|
@@ -182,7 +181,7 @@ class Node2Vec(torch.nn.Module):
|
|
182
181
|
"""
|
183
182
|
from sklearn.linear_model import LogisticRegression
|
184
183
|
|
185
|
-
clf = LogisticRegression(solver=solver,
|
184
|
+
clf = LogisticRegression(solver=solver, *args,
|
186
185
|
**kwargs).fit(train_z.detach().cpu().numpy(),
|
187
186
|
train_y.detach().cpu().numpy())
|
188
187
|
return clf.score(test_z.detach().cpu().numpy(),
|
@@ -11,6 +11,7 @@ from torch import Tensor
|
|
11
11
|
from torch.nn import Embedding, Linear, ModuleList, Sequential
|
12
12
|
|
13
13
|
from torch_geometric.data import Dataset, download_url, extract_zip
|
14
|
+
from torch_geometric.io import fs
|
14
15
|
from torch_geometric.nn import MessagePassing, SumAggregation, radius_graph
|
15
16
|
from torch_geometric.nn.resolver import aggregation_resolver as aggr_resolver
|
16
17
|
from torch_geometric.typing import OptTensor
|
@@ -216,7 +217,7 @@ class SchNet(torch.nn.Module):
|
|
216
217
|
|
217
218
|
with warnings.catch_warnings():
|
218
219
|
warnings.simplefilter('ignore')
|
219
|
-
state =
|
220
|
+
state = fs.torch_load(path, map_location='cpu')
|
220
221
|
|
221
222
|
net = SchNet(
|
222
223
|
hidden_channels=128,
|
@@ -1,6 +1,5 @@
|
|
1
1
|
from typing import Optional, Tuple
|
2
2
|
|
3
|
-
import scipy.sparse
|
4
3
|
import torch
|
5
4
|
import torch.nn.functional as F
|
6
5
|
from torch import Tensor
|
@@ -98,6 +97,7 @@ class SignedGCN(torch.nn.Module):
|
|
98
97
|
:obj:`max_val + 1` of :attr:`pos_edge_index` and
|
99
98
|
:attr:`neg_edge_index`. (default: :obj:`None`)
|
100
99
|
"""
|
100
|
+
import scipy.sparse as sp
|
101
101
|
from sklearn.decomposition import TruncatedSVD
|
102
102
|
|
103
103
|
edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=1)
|
@@ -119,7 +119,7 @@ class SignedGCN(torch.nn.Module):
|
|
119
119
|
# https://github.com/benedekrozemberczki/SGCN/blob/master/src/utils.py
|
120
120
|
edge_index = edge_index.detach().numpy()
|
121
121
|
val = val.detach().numpy()
|
122
|
-
A =
|
122
|
+
A = sp.coo_matrix((val, edge_index), shape=(N, N))
|
123
123
|
svd = TruncatedSVD(n_components=self.in_channels, n_iter=128)
|
124
124
|
svd.fit(A)
|
125
125
|
x = svd.components_.T
|
@@ -256,7 +256,7 @@ class SignedGCN(torch.nn.Module):
|
|
256
256
|
neg_p = self.discriminate(z, neg_edge_index)[:, :2].max(dim=1)[1]
|
257
257
|
pred = (1 - torch.cat([pos_p, neg_p])).cpu()
|
258
258
|
y = torch.cat(
|
259
|
-
[pred.new_ones(
|
259
|
+
[pred.new_ones(pos_p.size(0)),
|
260
260
|
pred.new_zeros(neg_p.size(0))])
|
261
261
|
pred, y = pred.numpy(), y.numpy()
|
262
262
|
|
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import Final, Iterable, Mapping, Optional,
|
1
|
+
from typing import Final, Iterable, Mapping, Optional, Tuple, Union
|
2
2
|
|
3
3
|
import torch
|
4
4
|
from torch.nn import Module
|
@@ -11,7 +11,7 @@ Key = Union[str, Tuple[str, ...]]
|
|
11
11
|
# internal representation and converts it back to `.` in the external
|
12
12
|
# representation. It also allows passing tuples as keys.
|
13
13
|
class ModuleDict(torch.nn.ModuleDict):
|
14
|
-
CLASS_ATTRS: Final[
|
14
|
+
CLASS_ATTRS: Final[Tuple[str, ...]] = tuple(dir(torch.nn.ModuleDict))
|
15
15
|
|
16
16
|
def __init__(
|
17
17
|
self,
|
@@ -0,0 +1,329 @@
|
|
1
|
+
import warnings
|
2
|
+
from contextlib import nullcontext
|
3
|
+
from typing import Any, Dict, List, Optional
|
4
|
+
|
5
|
+
import torch
|
6
|
+
from torch import Tensor
|
7
|
+
|
8
|
+
try:
|
9
|
+
from transformers.tokenization_utils_base import BatchEncoding
|
10
|
+
except ImportError:
|
11
|
+
BatchEncoding = Dict
|
12
|
+
|
13
|
+
BOS = '<s>[INST]'
|
14
|
+
EOS_USER = '[/INST]'
|
15
|
+
EOS = '[/s]'
|
16
|
+
IGNORE_INDEX = -100
|
17
|
+
MAX_TXT_LEN = 512
|
18
|
+
MAX_NEW_TOKENS = 32
|
19
|
+
PAD_TOKEN_ID = 0
|
20
|
+
PADDING_SIDE = 'left'
|
21
|
+
|
22
|
+
|
23
|
+
def get_llm_kwargs(required_memory: int, dtype=torch.dtype) -> Dict[str, Any]:
|
24
|
+
torch.cuda.empty_cache()
|
25
|
+
|
26
|
+
gpu_memory: List[int] = []
|
27
|
+
for i in range(torch.cuda.device_count()):
|
28
|
+
gpu_memory.append(torch.cuda.mem_get_info(i)[0] // 1024**3)
|
29
|
+
# Use the minimum number of GPUs to fit the LLM on.
|
30
|
+
if sum(gpu_memory) >= required_memory:
|
31
|
+
break
|
32
|
+
|
33
|
+
if sum(gpu_memory) < required_memory:
|
34
|
+
gpu_memory = [] # If not enough VRAM, use pure CPU.
|
35
|
+
|
36
|
+
kwargs = dict(revision='main')
|
37
|
+
if len(gpu_memory) > 0:
|
38
|
+
kwargs['max_memory'] = {
|
39
|
+
i: f'{memory}GiB'
|
40
|
+
for i, memory in enumerate(gpu_memory)
|
41
|
+
}
|
42
|
+
kwargs['low_cpu_mem_usage'] = True
|
43
|
+
kwargs['device_map'] = 'auto'
|
44
|
+
kwargs['torch_dtype'] = dtype
|
45
|
+
|
46
|
+
return kwargs
|
47
|
+
|
48
|
+
|
49
|
+
class LLM(torch.nn.Module):
|
50
|
+
r"""A wrapper around a Large Language Model (LLM) from HuggingFace.
|
51
|
+
|
52
|
+
model_name (str): The HuggingFace model name, *e.g.*, :obj:`"llama2"` or
|
53
|
+
:obj:`"gemma"`.
|
54
|
+
num_params (int, optional): An integer representing how many parameters the
|
55
|
+
HuggingFace model has, in billions. This is used to automatically
|
56
|
+
allocate the correct number of GPUs needed, given the available GPU
|
57
|
+
memory of your GPUs. If not specified, the number of parameters
|
58
|
+
is determined using the `huggingface_hub` module.
|
59
|
+
dtype (torch.dtype, optional): The data type to use for the LLM.
|
60
|
+
(default :obj: `torch.bfloat16`)
|
61
|
+
"""
|
62
|
+
def __init__(
|
63
|
+
self,
|
64
|
+
model_name: str,
|
65
|
+
num_params: int = None,
|
66
|
+
dtype=torch.bfloat16,
|
67
|
+
) -> None:
|
68
|
+
super().__init__()
|
69
|
+
|
70
|
+
self.model_name = model_name
|
71
|
+
|
72
|
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
73
|
+
|
74
|
+
if num_params is None:
|
75
|
+
from huggingface_hub import get_safetensors_metadata
|
76
|
+
safetensors_metadata = get_safetensors_metadata(model_name)
|
77
|
+
param_count = safetensors_metadata.parameter_count
|
78
|
+
num_params = list(param_count.values())[0] // 10**9
|
79
|
+
|
80
|
+
# A rough heuristic on GPU memory requirements, e.g., we found that
|
81
|
+
# LLAMA2 (7B parameters) fits on a 85GB GPU.
|
82
|
+
required_memory = 85 * num_params / 7
|
83
|
+
kwargs = get_llm_kwargs(required_memory, dtype)
|
84
|
+
|
85
|
+
print(f"Setting up '{model_name}' with configuration: {kwargs}")
|
86
|
+
self.tokenizer = AutoTokenizer.from_pretrained(
|
87
|
+
model_name,
|
88
|
+
use_fast=False,
|
89
|
+
)
|
90
|
+
self.tokenizer.pad_token_id = PAD_TOKEN_ID
|
91
|
+
self.tokenizer.padding_side = PADDING_SIDE
|
92
|
+
self.llm = AutoModelForCausalLM.from_pretrained(model_name, **kwargs)
|
93
|
+
self.word_embedding = self.llm.model.get_input_embeddings()
|
94
|
+
|
95
|
+
if 'max_memory' not in kwargs: # Pure CPU:
|
96
|
+
warnings.warn("LLM is being used on CPU, which may be slow")
|
97
|
+
self.device = torch.device('cpu')
|
98
|
+
self.autocast_context = nullcontext()
|
99
|
+
else:
|
100
|
+
self.device = self.llm.device
|
101
|
+
self.autocast_context = torch.amp.autocast('cuda', dtype=dtype)
|
102
|
+
|
103
|
+
def _encode_inputs(
|
104
|
+
self,
|
105
|
+
question: List[str],
|
106
|
+
context: Optional[List[str]] = None,
|
107
|
+
) -> tuple:
|
108
|
+
batch_size = len(question)
|
109
|
+
questions = self.tokenizer(question, add_special_tokens=False)
|
110
|
+
if context is not None:
|
111
|
+
context = self.tokenizer(context, add_special_tokens=False)
|
112
|
+
|
113
|
+
eos_user_tokens = self.tokenizer(EOS_USER, add_special_tokens=False)
|
114
|
+
bos_token = self.tokenizer(
|
115
|
+
BOS,
|
116
|
+
add_special_tokens=False,
|
117
|
+
return_tensors='pt',
|
118
|
+
).input_ids[0].to(self.device)
|
119
|
+
bos_embeds = self.word_embedding(bos_token)
|
120
|
+
pad_token = torch.tensor(self.tokenizer.pad_token_id,
|
121
|
+
device=self.device)
|
122
|
+
pad_embeds = self.word_embedding(pad_token).unsqueeze(0)
|
123
|
+
return (batch_size, questions, context, eos_user_tokens, bos_embeds,
|
124
|
+
pad_embeds)
|
125
|
+
|
126
|
+
def _label_input_ids(
|
127
|
+
self,
|
128
|
+
i: int,
|
129
|
+
label: BatchEncoding,
|
130
|
+
eos_tokens: BatchEncoding,
|
131
|
+
) -> List[int]:
|
132
|
+
label_input_ids = label.input_ids[i][:MAX_NEW_TOKENS]
|
133
|
+
label_input_ids = label_input_ids + eos_tokens.input_ids
|
134
|
+
return label_input_ids
|
135
|
+
|
136
|
+
def _input_ids(
|
137
|
+
self,
|
138
|
+
i: int,
|
139
|
+
context: BatchEncoding,
|
140
|
+
question: BatchEncoding,
|
141
|
+
eos_user_tokens: BatchEncoding,
|
142
|
+
) -> List[int]:
|
143
|
+
input_ids: List[int] = []
|
144
|
+
if context is not None:
|
145
|
+
input_ids += context.input_ids[i][:MAX_TXT_LEN]
|
146
|
+
input_ids += question.input_ids[i]
|
147
|
+
input_ids += eos_user_tokens.input_ids
|
148
|
+
return input_ids
|
149
|
+
|
150
|
+
def _inputs_embeds(
|
151
|
+
self,
|
152
|
+
i: int,
|
153
|
+
input_ids: List[int],
|
154
|
+
bos_embeds: Tensor,
|
155
|
+
embedding: Optional[List[Tensor]] = None,
|
156
|
+
) -> Tensor:
|
157
|
+
inputs_embeds = self.word_embedding(
|
158
|
+
torch.tensor(input_ids, device=self.device))
|
159
|
+
|
160
|
+
to_cat = [bos_embeds]
|
161
|
+
if embedding is not None and embedding[i] is not None:
|
162
|
+
to_cat.append(embedding[i])
|
163
|
+
to_cat.append(inputs_embeds)
|
164
|
+
return torch.cat(to_cat, dim=0).to(self.device)
|
165
|
+
|
166
|
+
def _append_embeds(
|
167
|
+
self,
|
168
|
+
inputs_embeds: Tensor,
|
169
|
+
batch_inputs_embeds: List[Tensor],
|
170
|
+
batch_attention_mask: List[List[int]],
|
171
|
+
label_input_ids: List[int] = None,
|
172
|
+
batch_label_input_ids: Optional[List[List[int]]] = None,
|
173
|
+
) -> tuple:
|
174
|
+
batch_inputs_embeds.append(inputs_embeds)
|
175
|
+
batch_attention_mask.append([1] * inputs_embeds.size(0))
|
176
|
+
if label_input_ids is not None:
|
177
|
+
pad = inputs_embeds.size(0) - len(label_input_ids)
|
178
|
+
label_input_ids = [IGNORE_INDEX] * pad + label_input_ids
|
179
|
+
batch_label_input_ids.append(label_input_ids)
|
180
|
+
return batch_inputs_embeds, batch_attention_mask, batch_label_input_ids
|
181
|
+
|
182
|
+
def _pad_embeds(
|
183
|
+
self,
|
184
|
+
pad_embeds: Tensor,
|
185
|
+
batch_inputs_embeds: List[Tensor],
|
186
|
+
batch_attention_mask: List[List[int]],
|
187
|
+
batch_label_input_ids: Optional[List[List[int]]] = None,
|
188
|
+
) -> tuple:
|
189
|
+
max_length = max([x.size(0) for x in batch_inputs_embeds])
|
190
|
+
batch_size = len(batch_inputs_embeds)
|
191
|
+
for i in range(batch_size):
|
192
|
+
pad = max_length - batch_inputs_embeds[i].size(0)
|
193
|
+
batch_inputs_embeds[i] = torch.cat([
|
194
|
+
pad_embeds.repeat(pad, 1),
|
195
|
+
batch_inputs_embeds[i],
|
196
|
+
])
|
197
|
+
batch_attention_mask[i] = [0] * pad + batch_attention_mask[i]
|
198
|
+
if batch_label_input_ids is not None:
|
199
|
+
tmp = [IGNORE_INDEX] * pad + batch_label_input_ids[i]
|
200
|
+
batch_label_input_ids[i] = tmp
|
201
|
+
inputs_embeds = torch.stack(batch_inputs_embeds, dim=0)
|
202
|
+
attention_mask = torch.tensor(batch_attention_mask, device=self.device)
|
203
|
+
label_input_ids = None
|
204
|
+
if batch_label_input_ids is not None:
|
205
|
+
label_input_ids = torch.tensor(batch_label_input_ids,
|
206
|
+
device=self.device)
|
207
|
+
return inputs_embeds, attention_mask, label_input_ids
|
208
|
+
|
209
|
+
def _get_embeds(
|
210
|
+
self,
|
211
|
+
question: List[str],
|
212
|
+
context: Optional[List[str]] = None,
|
213
|
+
embedding: Optional[List[Tensor]] = None,
|
214
|
+
answer: Optional[List[str]] = None,
|
215
|
+
) -> tuple:
|
216
|
+
(batch_size, question, context, eos_user_tokens, bos_embeds,
|
217
|
+
pad_embeds) = self._encode_inputs(question, context)
|
218
|
+
|
219
|
+
batch_label_input_ids = None
|
220
|
+
if answer is not None:
|
221
|
+
label = self.tokenizer(answer, add_special_tokens=False)
|
222
|
+
eos_tokens = self.tokenizer(EOS, add_special_tokens=False)
|
223
|
+
batch_label_input_ids = []
|
224
|
+
|
225
|
+
batch_inputs_embeds = []
|
226
|
+
batch_attention_mask = []
|
227
|
+
for i in range(batch_size):
|
228
|
+
input_ids = self._input_ids(i, context, question, eos_user_tokens)
|
229
|
+
if answer is not None:
|
230
|
+
label_input_ids = self._label_input_ids(i, label, eos_tokens)
|
231
|
+
input_ids += label_input_ids
|
232
|
+
else:
|
233
|
+
label_input_ids = None
|
234
|
+
|
235
|
+
inputs_embeds = self._inputs_embeds(i, input_ids, bos_embeds,
|
236
|
+
embedding)
|
237
|
+
|
238
|
+
(
|
239
|
+
batch_inputs_embeds,
|
240
|
+
batch_attention_mask,
|
241
|
+
batch_label_input_ids,
|
242
|
+
) = self._append_embeds(
|
243
|
+
inputs_embeds,
|
244
|
+
batch_inputs_embeds,
|
245
|
+
batch_attention_mask,
|
246
|
+
label_input_ids,
|
247
|
+
batch_label_input_ids,
|
248
|
+
)
|
249
|
+
|
250
|
+
inputs_embeds, attention_mask, label_input_ids = self._pad_embeds(
|
251
|
+
pad_embeds, batch_inputs_embeds, batch_attention_mask,
|
252
|
+
batch_label_input_ids)
|
253
|
+
|
254
|
+
return inputs_embeds, attention_mask, label_input_ids
|
255
|
+
|
256
|
+
def forward(
|
257
|
+
self,
|
258
|
+
question: List[str],
|
259
|
+
answer: List[str],
|
260
|
+
context: Optional[List[str]] = None,
|
261
|
+
embedding: Optional[List[Tensor]] = None,
|
262
|
+
) -> Tensor:
|
263
|
+
r"""The forward pass.
|
264
|
+
|
265
|
+
Args:
|
266
|
+
question (list[str]): The questions/prompts.
|
267
|
+
answer (list[str]): The answers/labels.
|
268
|
+
context (list[str], optional): Additional context to give to the
|
269
|
+
LLM, such as textified knowledge graphs. (default: :obj:`None`)
|
270
|
+
embedding (list[torch.Tensor], optional): RAG embedding
|
271
|
+
tensors, *i.e.* the embedded form of :obj:`context`. Either
|
272
|
+
:obj:`context` or :obj:`embedding` should be used, not
|
273
|
+
both. (default: :obj:`None`)
|
274
|
+
"""
|
275
|
+
inputs_embeds, attention_mask, label_input_ids = self._get_embeds(
|
276
|
+
question, context, embedding, answer)
|
277
|
+
|
278
|
+
with self.autocast_context:
|
279
|
+
outputs = self.llm(
|
280
|
+
inputs_embeds=inputs_embeds,
|
281
|
+
attention_mask=attention_mask,
|
282
|
+
return_dict=True,
|
283
|
+
labels=label_input_ids,
|
284
|
+
)
|
285
|
+
return outputs.loss
|
286
|
+
|
287
|
+
@torch.no_grad()
|
288
|
+
def inference(
|
289
|
+
self,
|
290
|
+
question: List[str],
|
291
|
+
context: Optional[List[str]] = None,
|
292
|
+
embedding: Optional[List[Tensor]] = None,
|
293
|
+
max_tokens: Optional[int] = MAX_NEW_TOKENS,
|
294
|
+
) -> List[str]:
|
295
|
+
r"""The inference pass.
|
296
|
+
|
297
|
+
Args:
|
298
|
+
question (list[str]): The questions/prompts.
|
299
|
+
answer (list[str]): The answers/labels.
|
300
|
+
context (list[str], optional): Additional context to give to the
|
301
|
+
LLM, such as textified knowledge graphs. (default: :obj:`None`)
|
302
|
+
embedding (list[torch.Tensor], optional): RAG embedding
|
303
|
+
tensors, *i.e.* the embedded form of :obj:`context`. Either
|
304
|
+
:obj:`context` or :obj:`embedding` should be used, not
|
305
|
+
both. (default: :obj:`None`)
|
306
|
+
max_tokens (int, optional): How many tokens for the LLM to
|
307
|
+
generate. (default: :obj:`32`)
|
308
|
+
"""
|
309
|
+
inputs_embeds, attention_mask, _ = self._get_embeds(
|
310
|
+
question, context, embedding)
|
311
|
+
|
312
|
+
bos_token = self.tokenizer(
|
313
|
+
BOS,
|
314
|
+
add_special_tokens=False,
|
315
|
+
).input_ids[0]
|
316
|
+
|
317
|
+
with self.autocast_context:
|
318
|
+
outputs = self.llm.generate(
|
319
|
+
inputs_embeds=inputs_embeds,
|
320
|
+
bos_token_id=bos_token,
|
321
|
+
max_new_tokens=max_tokens,
|
322
|
+
attention_mask=attention_mask,
|
323
|
+
use_cache=True,
|
324
|
+
)
|
325
|
+
|
326
|
+
return self.tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
327
|
+
|
328
|
+
def __repr__(self) -> str:
|
329
|
+
return f'{self.__class__.__name__}({self.model_name})'
|