cehrgpt 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- __init__.py +0 -0
- cehrgpt/__init__.py +0 -0
- cehrgpt/analysis/__init__.py +0 -0
- cehrgpt/analysis/privacy/__init__.py +0 -0
- cehrgpt/analysis/privacy/attribute_inference.py +275 -0
- cehrgpt/analysis/privacy/attribute_inference_config.yml +8975 -0
- cehrgpt/analysis/privacy/member_inference.py +172 -0
- cehrgpt/analysis/privacy/nearest_neighbor_inference.py +189 -0
- cehrgpt/analysis/privacy/reid_inference.py +407 -0
- cehrgpt/analysis/privacy/utils.py +255 -0
- cehrgpt/cehrgpt_args.py +142 -0
- cehrgpt/data/__init__.py +0 -0
- cehrgpt/data/hf_cehrgpt_dataset.py +80 -0
- cehrgpt/data/hf_cehrgpt_dataset_collator.py +482 -0
- cehrgpt/data/hf_cehrgpt_dataset_mapping.py +116 -0
- cehrgpt/generation/__init__.py +0 -0
- cehrgpt/generation/chatgpt_generation.py +106 -0
- cehrgpt/generation/generate_batch_hf_gpt_sequence.py +333 -0
- cehrgpt/generation/omop_converter_batch.py +644 -0
- cehrgpt/generation/omop_entity.py +515 -0
- cehrgpt/gpt_utils.py +331 -0
- cehrgpt/models/__init__.py +0 -0
- cehrgpt/models/config.py +205 -0
- cehrgpt/models/hf_cehrgpt.py +1817 -0
- cehrgpt/models/hf_modeling_outputs.py +158 -0
- cehrgpt/models/pretrained_embeddings.py +82 -0
- cehrgpt/models/special_tokens.py +30 -0
- cehrgpt/models/tokenization_hf_cehrgpt.py +1077 -0
- cehrgpt/omop/__init__.py +0 -0
- cehrgpt/omop/condition_era.py +20 -0
- cehrgpt/omop/observation_period.py +43 -0
- cehrgpt/omop/omop_argparse.py +38 -0
- cehrgpt/omop/omop_table_builder.py +86 -0
- cehrgpt/omop/queries/__init__.py +0 -0
- cehrgpt/omop/queries/condition_era.py +86 -0
- cehrgpt/omop/queries/observation_period.py +135 -0
- cehrgpt/omop/sample_omop_tables.py +71 -0
- cehrgpt/runners/__init__.py +0 -0
- cehrgpt/runners/gpt_runner_util.py +99 -0
- cehrgpt/runners/hf_cehrgpt_finetune_runner.py +746 -0
- cehrgpt/runners/hf_cehrgpt_pretrain_runner.py +370 -0
- cehrgpt/runners/hf_gpt_runner_argument_dataclass.py +137 -0
- cehrgpt/runners/hyperparameter_search_util.py +223 -0
- cehrgpt/time_to_event/__init__.py +0 -0
- cehrgpt/time_to_event/config/30_day_readmission.yaml +8 -0
- cehrgpt/time_to_event/config/next_visit_type_prediction.yaml +8 -0
- cehrgpt/time_to_event/config/t2dm_hf.yaml +8 -0
- cehrgpt/time_to_event/time_to_event_model.py +226 -0
- cehrgpt/time_to_event/time_to_event_prediction.py +347 -0
- cehrgpt/time_to_event/time_to_event_utils.py +55 -0
- cehrgpt/tools/__init__.py +0 -0
- cehrgpt/tools/ehrshot_benchmark.py +74 -0
- cehrgpt/tools/generate_pretrained_embeddings.py +130 -0
- cehrgpt/tools/merge_synthetic_real_dataasets.py +218 -0
- cehrgpt/tools/upload_omop_tables.py +108 -0
- cehrgpt-0.0.1.dist-info/LICENSE +21 -0
- cehrgpt-0.0.1.dist-info/METADATA +66 -0
- cehrgpt-0.0.1.dist-info/RECORD +60 -0
- cehrgpt-0.0.1.dist-info/WHEEL +5 -0
- cehrgpt-0.0.1.dist-info/top_level.txt +2 -0
@@ -0,0 +1,1817 @@
|
|
1
|
+
import math
|
2
|
+
import warnings
|
3
|
+
from typing import List, Optional, Tuple, Union
|
4
|
+
|
5
|
+
import numpy as np
|
6
|
+
import torch
|
7
|
+
import torch.nn.functional as f
|
8
|
+
from torch import nn
|
9
|
+
from torch.distributions import Gamma
|
10
|
+
from torch.nn import CrossEntropyLoss
|
11
|
+
from torch.nn import functional as F
|
12
|
+
from transformers import PreTrainedModel
|
13
|
+
from transformers.activations import gelu_new
|
14
|
+
from transformers.generation.logits_process import LogitsProcessorList
|
15
|
+
from transformers.generation.stopping_criteria import (
|
16
|
+
StoppingCriteriaList,
|
17
|
+
validate_stopping_criteria,
|
18
|
+
)
|
19
|
+
from transformers.generation.streamers import BaseStreamer
|
20
|
+
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
|
21
|
+
from transformers.models.gpt2.modeling_gpt2 import GPT2Attention, GPT2Block
|
22
|
+
from transformers.pytorch_utils import Conv1D
|
23
|
+
from transformers.utils import (
|
24
|
+
is_accelerate_available,
|
25
|
+
is_flash_attn_2_available,
|
26
|
+
logging,
|
27
|
+
)
|
28
|
+
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
|
29
|
+
|
30
|
+
from cehrgpt.models.config import CEHRGPTConfig
|
31
|
+
from cehrgpt.models.hf_modeling_outputs import (
|
32
|
+
CehrGptCausalLMOutput,
|
33
|
+
CehrGptGenerateDecoderOnlyOutput,
|
34
|
+
CehrGptOutputWithPast,
|
35
|
+
CehrGptSequenceClassifierOutput,
|
36
|
+
)
|
37
|
+
|
38
|
+
if is_flash_attn_2_available():
|
39
|
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
40
|
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
41
|
+
|
42
|
+
if is_accelerate_available():
|
43
|
+
from accelerate.hooks import add_hook_to_module
|
44
|
+
|
45
|
+
logger = logging.get_logger(__name__)
|
46
|
+
|
47
|
+
|
48
|
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
49
|
+
def _get_unpad_data(attention_mask):
|
50
|
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
51
|
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
52
|
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
53
|
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
54
|
+
return (
|
55
|
+
indices,
|
56
|
+
cu_seqlens,
|
57
|
+
max_seqlen_in_batch,
|
58
|
+
)
|
59
|
+
|
60
|
+
|
61
|
+
class GPT2FlashAttention(GPT2Attention):
|
62
|
+
"""
|
63
|
+
GPT2FlashAttention inherits from `GPT2Attention`.
|
64
|
+
|
65
|
+
The primary change is in the forward pass, where it correctly
|
66
|
+
calls the public API of flash attention and handles padding tokens.
|
67
|
+
"""
|
68
|
+
|
69
|
+
def forward(
|
70
|
+
self,
|
71
|
+
hidden_states: Optional[Tuple[torch.FloatTensor]],
|
72
|
+
layer_past: Optional[Tuple[torch.Tensor]] = None,
|
73
|
+
attention_mask: Optional[torch.FloatTensor] = None,
|
74
|
+
head_mask: Optional[torch.FloatTensor] = None,
|
75
|
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
76
|
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
77
|
+
use_cache: Optional[bool] = False,
|
78
|
+
output_attentions: Optional[bool] = False,
|
79
|
+
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
|
80
|
+
# Prepare query, key, and value
|
81
|
+
if encoder_hidden_states is not None:
|
82
|
+
if not hasattr(self, "q_attn"):
|
83
|
+
raise ValueError(
|
84
|
+
"If class is used as cross attention, the weights `q_attn` have to be defined. "
|
85
|
+
"Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`."
|
86
|
+
)
|
87
|
+
|
88
|
+
query = self.q_attn(hidden_states)
|
89
|
+
key, value = self.c_attn(encoder_hidden_states).split(
|
90
|
+
self.split_size, dim=2
|
91
|
+
)
|
92
|
+
attention_mask = encoder_attention_mask
|
93
|
+
else:
|
94
|
+
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
|
95
|
+
|
96
|
+
query = self._split_heads(query, self.num_heads, self.head_dim)
|
97
|
+
key = self._split_heads(key, self.num_heads, self.head_dim)
|
98
|
+
value = self._split_heads(value, self.num_heads, self.head_dim)
|
99
|
+
|
100
|
+
if layer_past is not None:
|
101
|
+
past_key, past_value = layer_past
|
102
|
+
key = torch.cat((past_key, key), dim=-2)
|
103
|
+
value = torch.cat((past_value, value), dim=-2)
|
104
|
+
|
105
|
+
if use_cache is True:
|
106
|
+
present = (key, value)
|
107
|
+
else:
|
108
|
+
present = None
|
109
|
+
|
110
|
+
# Apply Flash Attention Forward
|
111
|
+
if self.reorder_and_upcast_attn:
|
112
|
+
attn_output, attn_weights = self._upcast_and_reordered_attn(
|
113
|
+
query, key, value, attention_mask, head_mask
|
114
|
+
)
|
115
|
+
else:
|
116
|
+
# Flash Attention forward pass
|
117
|
+
attn_output = self._flash_attention_forward(
|
118
|
+
query,
|
119
|
+
key,
|
120
|
+
value,
|
121
|
+
attention_mask,
|
122
|
+
query.size(-2),
|
123
|
+
self.attn_dropout.p,
|
124
|
+
softmax_scale=None,
|
125
|
+
)
|
126
|
+
|
127
|
+
# Merge heads and project back to hidden size
|
128
|
+
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
|
129
|
+
attn_output = self.c_proj(attn_output)
|
130
|
+
attn_output = self.resid_dropout(attn_output)
|
131
|
+
|
132
|
+
outputs = (attn_output, present)
|
133
|
+
if output_attentions:
|
134
|
+
outputs += (attn_weights,)
|
135
|
+
|
136
|
+
return outputs
|
137
|
+
|
138
|
+
def _flash_attention_forward(
|
139
|
+
self,
|
140
|
+
query_states,
|
141
|
+
key_states,
|
142
|
+
value_states,
|
143
|
+
attention_mask,
|
144
|
+
query_length,
|
145
|
+
dropout=0.0,
|
146
|
+
softmax_scale=None,
|
147
|
+
):
|
148
|
+
"""
|
149
|
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token.
|
150
|
+
|
151
|
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
152
|
+
Args:
|
153
|
+
query_states (`torch.Tensor`):
|
154
|
+
Input query states to be passed to Flash Attention API
|
155
|
+
key_states (`torch.Tensor`):
|
156
|
+
Input key states to be passed to Flash Attention API
|
157
|
+
value_states (`torch.Tensor`):
|
158
|
+
Input value states to be passed to Flash Attention API
|
159
|
+
attention_mask (`torch.Tensor`):
|
160
|
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
161
|
+
position of padding tokens and 1 for the position of non-padding tokens.
|
162
|
+
dropout (`int`, *optional*):
|
163
|
+
Attention dropout
|
164
|
+
softmax_scale (`float`, *optional*):
|
165
|
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
166
|
+
"""
|
167
|
+
|
168
|
+
# Flash attention requires the input to have the shape
|
169
|
+
# batch_size x seq_length x head_dim x hidden_dim
|
170
|
+
# therefore we just need to keep the original shape
|
171
|
+
dtype = query_states.dtype
|
172
|
+
query_states = query_states.permute(0, 2, 1, 3).contiguous().to(torch.bfloat16)
|
173
|
+
key_states = key_states.permute(0, 2, 1, 3).contiguous().to(torch.bfloat16)
|
174
|
+
value_states = value_states.permute(0, 2, 1, 3).contiguous().to(torch.bfloat16)
|
175
|
+
|
176
|
+
# Contains at least one padding token in the sequence
|
177
|
+
if attention_mask is not None:
|
178
|
+
batch_size = query_states.shape[0]
|
179
|
+
|
180
|
+
(
|
181
|
+
query_states,
|
182
|
+
key_states,
|
183
|
+
value_states,
|
184
|
+
indices_q,
|
185
|
+
cu_seq_lens,
|
186
|
+
max_seq_lens,
|
187
|
+
) = self._upad_input(
|
188
|
+
query_states, key_states, value_states, attention_mask, query_length
|
189
|
+
)
|
190
|
+
|
191
|
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
192
|
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
193
|
+
|
194
|
+
attn_output_unpad = flash_attn_varlen_func(
|
195
|
+
query_states,
|
196
|
+
key_states,
|
197
|
+
value_states,
|
198
|
+
cu_seqlens_q=cu_seqlens_q,
|
199
|
+
cu_seqlens_k=cu_seqlens_k,
|
200
|
+
max_seqlen_q=max_seqlen_in_batch_q,
|
201
|
+
max_seqlen_k=max_seqlen_in_batch_k,
|
202
|
+
dropout_p=dropout,
|
203
|
+
softmax_scale=softmax_scale,
|
204
|
+
causal=True,
|
205
|
+
)
|
206
|
+
# (batch, seq_length, n_heads, head_dim)
|
207
|
+
attn_output = pad_input(
|
208
|
+
attn_output_unpad, indices_q, batch_size, query_length
|
209
|
+
)
|
210
|
+
else:
|
211
|
+
attn_output = flash_attn_func(
|
212
|
+
query_states,
|
213
|
+
key_states,
|
214
|
+
value_states,
|
215
|
+
dropout,
|
216
|
+
softmax_scale=softmax_scale,
|
217
|
+
causal=self.is_causal,
|
218
|
+
)
|
219
|
+
# re-order the tensor back to (batch, n_heads, seq_length, head_dim)
|
220
|
+
return attn_output.permute(0, 2, 1, 3).contiguous().to(dtype)
|
221
|
+
|
222
|
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
|
223
|
+
def _upad_input(
|
224
|
+
self, query_layer, key_layer, value_layer, attention_mask, query_length
|
225
|
+
):
|
226
|
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
227
|
+
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
|
228
|
+
|
229
|
+
key_layer = index_first_axis(
|
230
|
+
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim),
|
231
|
+
indices_k,
|
232
|
+
)
|
233
|
+
value_layer = index_first_axis(
|
234
|
+
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim),
|
235
|
+
indices_k,
|
236
|
+
)
|
237
|
+
if query_length == kv_seq_len:
|
238
|
+
query_layer = index_first_axis(
|
239
|
+
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim),
|
240
|
+
indices_k,
|
241
|
+
)
|
242
|
+
cu_seqlens_q = cu_seqlens_k
|
243
|
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
244
|
+
indices_q = indices_k
|
245
|
+
elif query_length == 1:
|
246
|
+
max_seqlen_in_batch_q = 1
|
247
|
+
cu_seqlens_q = torch.arange(
|
248
|
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
249
|
+
) # There is a memcpy here, that is very bad.
|
250
|
+
indices_q = cu_seqlens_q[:-1]
|
251
|
+
query_layer = query_layer.squeeze(1)
|
252
|
+
else:
|
253
|
+
# The -q_len: slice assumes left padding.
|
254
|
+
attention_mask = attention_mask[:, -query_length:]
|
255
|
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(
|
256
|
+
query_layer, attention_mask
|
257
|
+
)
|
258
|
+
|
259
|
+
return (
|
260
|
+
query_layer,
|
261
|
+
key_layer,
|
262
|
+
value_layer,
|
263
|
+
indices_q,
|
264
|
+
(cu_seqlens_q, cu_seqlens_k),
|
265
|
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
266
|
+
)
|
267
|
+
|
268
|
+
|
269
|
+
class WeibullModel(nn.Module):
|
270
|
+
def __init__(self, input_dim):
|
271
|
+
super(WeibullModel, self).__init__()
|
272
|
+
self.linear1 = nn.Sequential(
|
273
|
+
nn.Linear(input_dim, input_dim // 2), gelu_new, nn.Linear(input_dim // 2, 1)
|
274
|
+
)
|
275
|
+
self.linear2 = nn.Sequential(
|
276
|
+
nn.Linear(input_dim, input_dim // 2), gelu_new, nn.Linear(input_dim // 2, 1)
|
277
|
+
)
|
278
|
+
|
279
|
+
def forward(self, x):
|
280
|
+
lambda_param = f.softplus(self.linear1(x)) # Ensure scale is positive
|
281
|
+
k_param = f.softplus(self.linear2(x)) # Ensure shape is positive
|
282
|
+
# Check for NaN values
|
283
|
+
if torch.isnan(lambda_param).any():
|
284
|
+
logger.warning(f"NaN values found in scale_param. x: {x}")
|
285
|
+
if torch.isnan(k_param).any():
|
286
|
+
logger.warning(f"NaN values found in k_param. x: {x}")
|
287
|
+
return lambda_param, k_param
|
288
|
+
|
289
|
+
|
290
|
+
class ConceptValueTransformationLayer(nn.Module):
|
291
|
+
def __init__(self, embedding_size):
|
292
|
+
super(ConceptValueTransformationLayer, self).__init__()
|
293
|
+
self.embedding_size = embedding_size
|
294
|
+
self.merge_value_transformation_layer = nn.Sequential(
|
295
|
+
nn.Linear(
|
296
|
+
2 * embedding_size, embedding_size
|
297
|
+
) # +1 for the concept_values concatenation
|
298
|
+
)
|
299
|
+
|
300
|
+
def forward(
|
301
|
+
self,
|
302
|
+
concept_embeddings: Optional[torch.FloatTensor],
|
303
|
+
value_indicators: Optional[torch.BoolTensor] = None,
|
304
|
+
value_embeddings: Optional[torch.FloatTensor] = None,
|
305
|
+
):
|
306
|
+
value_indicators = value_indicators.unsqueeze(-1)
|
307
|
+
|
308
|
+
# Concatenate concept_embeddings and concept_values
|
309
|
+
concept_embeddings_with_val = torch.cat(
|
310
|
+
[concept_embeddings, value_embeddings], dim=-1
|
311
|
+
)
|
312
|
+
|
313
|
+
# Transform concatenated embeddings back to embedding_size
|
314
|
+
transformed_embeddings = self.merge_value_transformation_layer(
|
315
|
+
concept_embeddings_with_val
|
316
|
+
)
|
317
|
+
|
318
|
+
# Apply mask using torch.where
|
319
|
+
concept_embeddings = torch.where(
|
320
|
+
value_indicators, transformed_embeddings, concept_embeddings
|
321
|
+
)
|
322
|
+
|
323
|
+
return concept_embeddings
|
324
|
+
|
325
|
+
|
326
|
+
class CEHRGPTPreTrainedModel(PreTrainedModel):
|
327
|
+
"""
|
328
|
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained.
|
329
|
+
|
330
|
+
models.
|
331
|
+
"""
|
332
|
+
|
333
|
+
config_class = CEHRGPTConfig
|
334
|
+
base_model_prefix = "cehrgpt"
|
335
|
+
is_parallelizable = True
|
336
|
+
supports_gradient_checkpointing = True
|
337
|
+
_no_split_modules = ["GPT2Block"]
|
338
|
+
_skip_keys_device_placement = "past_key_values"
|
339
|
+
_supports_flash_attn_2 = True
|
340
|
+
|
341
|
+
def __init__(self, *inputs, **kwargs):
|
342
|
+
super().__init__(*inputs, **kwargs)
|
343
|
+
|
344
|
+
def _init_weights(self, module):
|
345
|
+
"""Initialize the weights."""
|
346
|
+
if isinstance(module, (nn.Linear, Conv1D)):
|
347
|
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
348
|
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
349
|
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
350
|
+
if module.bias is not None:
|
351
|
+
module.bias.data.zero_()
|
352
|
+
elif isinstance(module, nn.Embedding):
|
353
|
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
354
|
+
if module.padding_idx is not None:
|
355
|
+
module.weight.data[module.padding_idx].zero_()
|
356
|
+
elif isinstance(module, nn.LayerNorm):
|
357
|
+
module.bias.data.zero_()
|
358
|
+
module.weight.data.fill_(1.0)
|
359
|
+
|
360
|
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
361
|
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
362
|
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
363
|
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
364
|
+
#
|
365
|
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
366
|
+
for name, p in module.named_parameters():
|
367
|
+
if name == "c_proj.weight":
|
368
|
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
369
|
+
p.data.normal_(
|
370
|
+
mean=0.0,
|
371
|
+
std=(
|
372
|
+
self.config.initializer_range
|
373
|
+
/ math.sqrt(2 * self.config.n_layer)
|
374
|
+
),
|
375
|
+
)
|
376
|
+
|
377
|
+
def tie_weights(self):
|
378
|
+
# We only want to tie weights when we DO NOT use use_pretrained_embeddings
|
379
|
+
if not getattr(self.config, "use_pretrained_embeddings", False):
|
380
|
+
super().tie_weights()
|
381
|
+
# We want to tie the weights for value tokens regardless of the value of use_pretrained_embeddings
|
382
|
+
if getattr(self.config, "tie_word_embeddings", True):
|
383
|
+
output_value_embeddings = self.get_value_output_embeddings()
|
384
|
+
if output_value_embeddings is not None:
|
385
|
+
self._tie_or_clone_weights(
|
386
|
+
output_value_embeddings, self.get_value_input_embeddings()
|
387
|
+
)
|
388
|
+
|
389
|
+
def resize_token_embeddings(
|
390
|
+
self,
|
391
|
+
new_num_tokens: Optional[int] = None,
|
392
|
+
pad_to_multiple_of: Optional[int] = None,
|
393
|
+
) -> nn.Embedding:
|
394
|
+
if getattr(self.config, "use_pretrained_embeddings", False):
|
395
|
+
base_model = getattr(self, self.base_model_prefix, self)
|
396
|
+
if base_model is not self:
|
397
|
+
old_embeddings = base_model.pretrained_wte[0]
|
398
|
+
new_embeddings = self._get_resized_embeddings(
|
399
|
+
old_embeddings, new_num_tokens, pad_to_multiple_of
|
400
|
+
)
|
401
|
+
old_embeddings_requires_grad = old_embeddings.weight.requires_grad
|
402
|
+
new_embeddings.requires_grad_(old_embeddings_requires_grad)
|
403
|
+
base_model.pretrained_wte[0] = new_embeddings
|
404
|
+
else:
|
405
|
+
raise NotImplementedError
|
406
|
+
return super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
|
407
|
+
|
408
|
+
def update_pretrained_embeddings(self, token_ids, pretrained_embeddings):
|
409
|
+
if getattr(self.config, "use_pretrained_embeddings", False):
|
410
|
+
new_pretrained_token_ids = []
|
411
|
+
new_pretrained_embeddings = []
|
412
|
+
for token_id, vector in zip(token_ids, pretrained_embeddings):
|
413
|
+
if token_id not in self.config.pretrained_token_ids:
|
414
|
+
new_pretrained_token_ids.append(token_id)
|
415
|
+
new_pretrained_embeddings.append(vector)
|
416
|
+
|
417
|
+
if new_pretrained_token_ids:
|
418
|
+
self.pretrained_wte[0].weight.requires_grad = False
|
419
|
+
self.pretrained_wte[0].weight[new_pretrained_token_ids] = torch.tensor(
|
420
|
+
np.asarray(new_pretrained_embeddings),
|
421
|
+
dtype=self.pretrained_wte[0].weight.dtype,
|
422
|
+
device=self.pretrained_wte[0].weight.device,
|
423
|
+
)
|
424
|
+
self.config.pretrained_token_ids.extend(new_pretrained_token_ids)
|
425
|
+
|
426
|
+
def resize_value_embeddings(
|
427
|
+
self,
|
428
|
+
new_num_tokens: Optional[int] = None,
|
429
|
+
pad_to_multiple_of: Optional[int] = None,
|
430
|
+
) -> Optional[nn.Embedding]:
|
431
|
+
"""
|
432
|
+
Resizes value token embeddings matrix of the model if `new_num_tokens != config.value_vocab_size`.
|
433
|
+
|
434
|
+
Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
|
435
|
+
|
436
|
+
Arguments:
|
437
|
+
new_num_tokens (`int`, *optional*):
|
438
|
+
The new number of tokens in the embedding matrix. Increasing the size will add newly initialized
|
439
|
+
vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just
|
440
|
+
returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything.
|
441
|
+
pad_to_multiple_of (`int`, *optional*):
|
442
|
+
If set will pad the embedding matrix to a multiple of the provided value.If `new_num_tokens` is set to
|
443
|
+
`None` will just pad the embedding to a multiple of `pad_to_multiple_of`.
|
444
|
+
|
445
|
+
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
|
446
|
+
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more
|
447
|
+
details about this, or help on choosing the correct value for resizing, refer to this guide:
|
448
|
+
https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc
|
449
|
+
|
450
|
+
Return:
|
451
|
+
`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
|
452
|
+
"""
|
453
|
+
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
|
454
|
+
|
455
|
+
# If the model did not include values, we don't want to resize the value embeddings
|
456
|
+
if not self.config.include_values:
|
457
|
+
return None
|
458
|
+
|
459
|
+
# check if the value vocab_size is less than the number of tokens
|
460
|
+
# we need to resize the value_embeddings if necessary
|
461
|
+
if self.config.value_vocab_size < new_num_tokens:
|
462
|
+
# Update the embedding size
|
463
|
+
old_value_embeddings = self.get_value_input_embeddings()
|
464
|
+
new_value_embeddings = self._get_resized_embeddings(
|
465
|
+
old_value_embeddings, new_num_tokens, pad_to_multiple_of
|
466
|
+
)
|
467
|
+
old_embeddings_requires_grad = old_value_embeddings.weight.requires_grad
|
468
|
+
new_value_embeddings.requires_grad_(old_embeddings_requires_grad)
|
469
|
+
self.set_value_input_embeddings(new_value_embeddings)
|
470
|
+
is_quantized = (
|
471
|
+
hasattr(self, "hf_quantizer") and self.hf_quantizer is not None
|
472
|
+
)
|
473
|
+
# Update new_num_tokens with the actual size of new_value_embeddings
|
474
|
+
if pad_to_multiple_of is not None:
|
475
|
+
if is_deepspeed_zero3_enabled() and not is_quantized:
|
476
|
+
import deepspeed
|
477
|
+
|
478
|
+
with deepspeed.zero.GatheredParameters(
|
479
|
+
new_value_embeddings.weight, modifier_rank=None
|
480
|
+
):
|
481
|
+
new_num_tokens = new_value_embeddings.weight.shape[0]
|
482
|
+
else:
|
483
|
+
new_num_tokens = new_value_embeddings.weight.shape[0]
|
484
|
+
|
485
|
+
# make sure that lm head is resized as well
|
486
|
+
if (
|
487
|
+
self.get_value_output_embeddings() is not None
|
488
|
+
and not self.config.tie_word_embeddings
|
489
|
+
):
|
490
|
+
old_value_head = self.get_value_output_embeddings()
|
491
|
+
new_value_head = self._get_resized_lm_head(
|
492
|
+
old_value_head, new_num_tokens
|
493
|
+
)
|
494
|
+
old_value_head_requires_grad = old_value_head.weight.requires_grad
|
495
|
+
new_value_head.requires_grad_(old_value_head_requires_grad)
|
496
|
+
self.set_value_output_embeddings(new_value_head)
|
497
|
+
# Update base model and current model config
|
498
|
+
self.config.value_vocab_size = (
|
499
|
+
self.get_value_input_embeddings().weight.shape[0]
|
500
|
+
)
|
501
|
+
|
502
|
+
# Return the input value embeddings
|
503
|
+
return self.get_value_input_embeddings()
|
504
|
+
|
505
|
+
def get_value_input_embeddings(self) -> nn.Embedding:
|
506
|
+
"""
|
507
|
+
Returns the model's input embeddings.
|
508
|
+
|
509
|
+
Returns:
|
510
|
+
`nn.Module`: A torch module mapping vocabulary to hidden states.
|
511
|
+
"""
|
512
|
+
base_model = getattr(self, self.base_model_prefix, self)
|
513
|
+
if base_model is not self:
|
514
|
+
return base_model.get_value_input_embeddings()
|
515
|
+
else:
|
516
|
+
raise NotImplementedError
|
517
|
+
|
518
|
+
def set_value_input_embeddings(self, value: nn.Embedding):
|
519
|
+
"""
|
520
|
+
Set model's input embeddings.
|
521
|
+
|
522
|
+
Args:
|
523
|
+
value (`nn.Module`): A module mapping vocabulary to hidden states.
|
524
|
+
"""
|
525
|
+
base_model = getattr(self, self.base_model_prefix, self)
|
526
|
+
if base_model is not self:
|
527
|
+
base_model.set_value_input_embeddings(value)
|
528
|
+
else:
|
529
|
+
raise NotImplementedError
|
530
|
+
|
531
|
+
def get_value_output_embeddings(self) -> Optional[nn.Linear]:
|
532
|
+
"""
|
533
|
+
Returns the model's output embeddings.
|
534
|
+
|
535
|
+
Returns:
|
536
|
+
`nn.Module`: A torch module mapping hidden states to vocabulary.
|
537
|
+
"""
|
538
|
+
return None # Overwrite for models with output embeddings
|
539
|
+
|
540
|
+
def set_value_output_embeddings(self, output_embeddings: nn.Module) -> None:
|
541
|
+
"""
|
542
|
+
Returns the model's output embeddings.
|
543
|
+
|
544
|
+
Returns:
|
545
|
+
`nn.Module`: A torch module mapping hidden states to vocabulary.
|
546
|
+
"""
|
547
|
+
|
548
|
+
def set_position_embeddings(
|
549
|
+
self, position_embeddings: Union[nn.Embedding, Tuple[nn.Embedding]]
|
550
|
+
) -> None:
|
551
|
+
raise NotImplementedError(
|
552
|
+
f"`set_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should "
|
553
|
+
f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`"
|
554
|
+
)
|
555
|
+
|
556
|
+
def update_attn_bias(self, new_num_position_embeddings: Optional[int]) -> None:
|
557
|
+
raise NotImplementedError(
|
558
|
+
f"`update_attn_bias` is not implemented for {self.__class__}`. To implement it, you should "
|
559
|
+
f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`"
|
560
|
+
)
|
561
|
+
|
562
|
+
def resize_position_embeddings(self, new_num_position_embeddings: Optional[int]):
|
563
|
+
if new_num_position_embeddings is not None:
|
564
|
+
is_quantized = (
|
565
|
+
hasattr(self, "hf_quantizer") and self.hf_quantizer is not None
|
566
|
+
)
|
567
|
+
wpe = self.get_position_embeddings()
|
568
|
+
max_position, embed_dim = wpe.weight.shape
|
569
|
+
if new_num_position_embeddings > max_position:
|
570
|
+
new_embeddings = nn.Embedding(
|
571
|
+
new_num_position_embeddings,
|
572
|
+
embed_dim,
|
573
|
+
device=wpe.weight.device,
|
574
|
+
dtype=wpe.weight.dtype,
|
575
|
+
)
|
576
|
+
|
577
|
+
# initialize all new embeddings (in particular added tokens)
|
578
|
+
self._init_weights(new_embeddings)
|
579
|
+
if is_deepspeed_zero3_enabled() and not is_quantized:
|
580
|
+
import deepspeed
|
581
|
+
|
582
|
+
params = [wpe.weight, new_embeddings.weight]
|
583
|
+
with deepspeed.zero.GatheredParameters(params, modifier_rank=0):
|
584
|
+
new_embeddings.weight.data[:max_position, :] = wpe.weight.data[
|
585
|
+
:max_position, :
|
586
|
+
]
|
587
|
+
else:
|
588
|
+
new_embeddings.weight.data[:max_position, :] = wpe.weight.data[
|
589
|
+
:max_position, :
|
590
|
+
]
|
591
|
+
self.set_position_embeddings(new_embeddings)
|
592
|
+
self.config.max_position_embeddings = new_num_position_embeddings
|
593
|
+
self.update_attn_bias(new_num_position_embeddings)
|
594
|
+
|
595
|
+
|
596
|
+
class CEHRGPT2Model(CEHRGPTPreTrainedModel):
|
597
|
+
|
598
|
+
def __init__(self, config: CEHRGPTConfig):
|
599
|
+
super().__init__(config)
|
600
|
+
|
601
|
+
self.exclude_position_ids = config.exclude_position_ids
|
602
|
+
self.include_values = config.include_values
|
603
|
+
self.include_ttv_prediction = config.include_ttv_prediction
|
604
|
+
self.embed_dim = config.hidden_size
|
605
|
+
|
606
|
+
if config.use_pretrained_embeddings:
|
607
|
+
self.initialize_pretrained_embeddings()
|
608
|
+
else:
|
609
|
+
self.pretrained_wte = None
|
610
|
+
|
611
|
+
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
|
612
|
+
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
|
613
|
+
if self.include_values:
|
614
|
+
self.vte = nn.Embedding(config.value_vocab_size, self.embed_dim)
|
615
|
+
self.concept_value_transformation_layer = ConceptValueTransformationLayer(
|
616
|
+
self.embed_dim
|
617
|
+
)
|
618
|
+
|
619
|
+
self.drop = nn.Dropout(config.embd_pdrop)
|
620
|
+
gpt_blocks = []
|
621
|
+
for i in range(config.num_hidden_layers):
|
622
|
+
gpt_block = GPT2Block(config, layer_idx=i)
|
623
|
+
if getattr(config, "_attn_implementation", "eager") == "flash_attention_2":
|
624
|
+
gpt_block.attn = GPT2FlashAttention(config, layer_idx=i)
|
625
|
+
gpt_block.is_causal = True
|
626
|
+
gpt_blocks.append(gpt_block)
|
627
|
+
self.h = nn.ModuleList(gpt_blocks)
|
628
|
+
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
|
629
|
+
|
630
|
+
# Model parallel
|
631
|
+
self.model_parallel = False
|
632
|
+
self.device_map = None
|
633
|
+
self.gradient_checkpointing = False
|
634
|
+
|
635
|
+
# Initialize weights and apply final processing
|
636
|
+
self.post_init()
|
637
|
+
|
638
|
+
def initialize_pretrained_embeddings(self):
|
639
|
+
layers = [
|
640
|
+
nn.Embedding(self.config.vocab_size, self.config.pretrained_embedding_dim),
|
641
|
+
nn.Linear(self.config.pretrained_embedding_dim, self.embed_dim),
|
642
|
+
gelu_new,
|
643
|
+
]
|
644
|
+
for _ in range(self.config.n_pretrained_embeddings_layers - 1):
|
645
|
+
layers.extend(
|
646
|
+
[
|
647
|
+
nn.Linear(self.embed_dim, self.embed_dim),
|
648
|
+
gelu_new,
|
649
|
+
]
|
650
|
+
)
|
651
|
+
self.pretrained_wte = nn.Sequential(*layers)
|
652
|
+
# Disable the weight of the pretrained embeddings
|
653
|
+
self.pretrained_wte[0].weight.requires_grad = False
|
654
|
+
|
655
|
+
def parallelize(self, device_map=None):
|
656
|
+
# Check validity of device_map
|
657
|
+
warnings.warn(
|
658
|
+
"`CEHRGPT2Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your"
|
659
|
+
" model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
|
660
|
+
" `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1,"
|
661
|
+
" ...}",
|
662
|
+
FutureWarning,
|
663
|
+
)
|
664
|
+
self.device_map = (
|
665
|
+
get_device_map(len(self.h), range(torch.cuda.device_count()))
|
666
|
+
if device_map is None
|
667
|
+
else device_map
|
668
|
+
)
|
669
|
+
assert_device_map(self.device_map, len(self.h))
|
670
|
+
self.model_parallel = True
|
671
|
+
self.first_device = (
|
672
|
+
"cpu"
|
673
|
+
if "cpu" in self.device_map.keys()
|
674
|
+
else "cuda:" + str(min(self.device_map.keys()))
|
675
|
+
)
|
676
|
+
self.last_device = "cuda:" + str(max(self.device_map.keys()))
|
677
|
+
self.wte = self.wte.to(self.first_device)
|
678
|
+
if self.config.use_pretrained_embeddings:
|
679
|
+
self.pretrained_wte = self.pretrained_wte.to(self.first_device)
|
680
|
+
self.wpe = self.wpe.to(self.first_device)
|
681
|
+
if self.include_values:
|
682
|
+
self.vte = self.vte.to(self.first_device)
|
683
|
+
self.concept_value_transformation_layer = (
|
684
|
+
self.concept_value_transformation_layer.to(self.first_device)
|
685
|
+
)
|
686
|
+
# Load onto devices
|
687
|
+
for k, v in self.device_map.items():
|
688
|
+
for block in v:
|
689
|
+
cuda_device = "cuda:" + str(k)
|
690
|
+
self.h[block] = self.h[block].to(cuda_device)
|
691
|
+
# ln_f to last
|
692
|
+
self.ln_f = self.ln_f.to(self.last_device)
|
693
|
+
|
694
|
+
def deparallelize(self):
|
695
|
+
warnings.warn(
|
696
|
+
"Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
|
697
|
+
FutureWarning,
|
698
|
+
)
|
699
|
+
self.model_parallel = False
|
700
|
+
self.device_map = None
|
701
|
+
self.first_device = "cpu"
|
702
|
+
self.last_device = "cpu"
|
703
|
+
self.wte = self.wte.to("cpu")
|
704
|
+
if self.config.use_pretrained_embeddings:
|
705
|
+
self.pretrained_wte = self.pretrained_wte.to("cpu")
|
706
|
+
self.wpe = self.wpe.to("cpu")
|
707
|
+
self.vte = self.vte.to("cpu")
|
708
|
+
self.concept_value_transformation_layer = (
|
709
|
+
self.concept_value_transformation_layer.to("cpu")
|
710
|
+
)
|
711
|
+
for index in range(len(self.h)):
|
712
|
+
self.h[index] = self.h[index].to("cpu")
|
713
|
+
self.ln_f = self.ln_f.to("cpu")
|
714
|
+
torch.cuda.empty_cache()
|
715
|
+
|
716
|
+
def update_attn_bias(self, max_position_embeddings: int):
|
717
|
+
for i in range(len(self.h)):
|
718
|
+
self.h[i].attn.register_buffer(
|
719
|
+
"bias",
|
720
|
+
torch.tril(
|
721
|
+
torch.ones(
|
722
|
+
(max_position_embeddings, max_position_embeddings),
|
723
|
+
dtype=torch.bool,
|
724
|
+
)
|
725
|
+
)
|
726
|
+
.view(1, 1, max_position_embeddings, max_position_embeddings)
|
727
|
+
.to(self.h[i].attn.bias.device),
|
728
|
+
persistent=False,
|
729
|
+
)
|
730
|
+
|
731
|
+
def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]:
|
732
|
+
return self.wpe
|
733
|
+
|
734
|
+
def set_position_embeddings(self, new_embeddings: nn.Embedding):
|
735
|
+
self.wpe = new_embeddings
|
736
|
+
|
737
|
+
def get_value_input_embeddings(self) -> nn.Module:
|
738
|
+
return self.vte
|
739
|
+
|
740
|
+
def set_value_input_embeddings(self, value: nn.Module):
|
741
|
+
self.vte = value
|
742
|
+
|
743
|
+
def get_input_embeddings(self):
|
744
|
+
return self.wte
|
745
|
+
|
746
|
+
def set_input_embeddings(self, new_embeddings: nn.Embedding):
|
747
|
+
self.wte = new_embeddings
|
748
|
+
|
749
|
+
def _prune_heads(self, heads_to_prune):
|
750
|
+
"""
|
751
|
+
Prunes heads of the model.
|
752
|
+
|
753
|
+
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
|
754
|
+
"""
|
755
|
+
for layer, heads in heads_to_prune.items():
|
756
|
+
self.h[layer].attn.prune_heads(heads)
|
757
|
+
|
758
|
+
def forward(
|
759
|
+
self,
|
760
|
+
input_ids: Optional[torch.LongTensor],
|
761
|
+
value_indicators: Optional[torch.BoolTensor],
|
762
|
+
values: Optional[torch.LongTensor],
|
763
|
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
764
|
+
attention_mask: Optional[torch.FloatTensor] = None,
|
765
|
+
position_ids: Optional[torch.LongTensor] = None,
|
766
|
+
random_vectors: Optional[torch.FloatTensor] = None,
|
767
|
+
head_mask: Optional[torch.FloatTensor] = None,
|
768
|
+
use_cache: Optional[bool] = None,
|
769
|
+
output_attentions: Optional[bool] = None,
|
770
|
+
output_hidden_states: Optional[bool] = None,
|
771
|
+
return_dict: Optional[bool] = None,
|
772
|
+
) -> Union[Tuple, CehrGptOutputWithPast]:
|
773
|
+
output_attentions = (
|
774
|
+
output_attentions
|
775
|
+
if output_attentions is not None
|
776
|
+
else self.config.output_attentions
|
777
|
+
)
|
778
|
+
output_hidden_states = (
|
779
|
+
output_hidden_states
|
780
|
+
if output_hidden_states is not None
|
781
|
+
else self.config.output_hidden_states
|
782
|
+
)
|
783
|
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
784
|
+
return_dict = (
|
785
|
+
return_dict if return_dict is not None else self.config.use_return_dict
|
786
|
+
)
|
787
|
+
|
788
|
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
789
|
+
input_shape = input_ids.size()
|
790
|
+
input_ids = input_ids.view(-1, input_shape[-1])
|
791
|
+
batch_size = input_ids.shape[0]
|
792
|
+
|
793
|
+
# When causal SFM is enabled, we need to expand the context window by one to make room for the random vector
|
794
|
+
if (
|
795
|
+
self.config.causal_sfm
|
796
|
+
and input_ids.shape[1] >= self.config.demographics_size
|
797
|
+
):
|
798
|
+
# Convert torch.Size to a list
|
799
|
+
shape_list = list(input_shape)
|
800
|
+
# Increment the last dimension
|
801
|
+
shape_list[-1] += 1
|
802
|
+
# Convert list back to torch.Size if needed
|
803
|
+
input_shape = torch.Size(shape_list)
|
804
|
+
|
805
|
+
device = input_ids.device
|
806
|
+
|
807
|
+
if past_key_values is None:
|
808
|
+
past_length = 0
|
809
|
+
past_key_values = tuple([None] * len(self.h))
|
810
|
+
else:
|
811
|
+
past_length = past_key_values[0][0].size(-2)
|
812
|
+
|
813
|
+
# This is normally called during training or fine-tuning.
|
814
|
+
# While the generation logic will handle position_ids in the sampling logic
|
815
|
+
if position_ids is None and not self.exclude_position_ids:
|
816
|
+
position_ids = torch.arange(
|
817
|
+
past_length,
|
818
|
+
input_shape[-1] + past_length,
|
819
|
+
dtype=torch.long,
|
820
|
+
device=device,
|
821
|
+
)
|
822
|
+
position_ids = position_ids.unsqueeze(0)
|
823
|
+
|
824
|
+
# GPT2Attention mask.
|
825
|
+
if attention_mask is not None:
|
826
|
+
if batch_size <= 0:
|
827
|
+
raise ValueError("batch_size has to be defined and > 0")
|
828
|
+
|
829
|
+
if (
|
830
|
+
self.config.causal_sfm
|
831
|
+
and attention_mask.shape[-1] >= self.config.demographics_size
|
832
|
+
):
|
833
|
+
# # Specify the indices to set to 0
|
834
|
+
# rows = [1, 2, 2, 3, 3, 3]
|
835
|
+
# cols = [0, 0, 1, 0, 1, 2]
|
836
|
+
# # Set the specified indices to 0
|
837
|
+
# attention_mask[rows, cols] = 0.0
|
838
|
+
attention_mask = torch.concat(
|
839
|
+
[
|
840
|
+
attention_mask[..., : self.config.demographics_size],
|
841
|
+
attention_mask.new_ones(attention_mask.shape[:-1] + (1,)),
|
842
|
+
attention_mask[..., self.config.demographics_size :],
|
843
|
+
],
|
844
|
+
dim=-1,
|
845
|
+
)
|
846
|
+
|
847
|
+
# The flash attention requires the original attention_mask
|
848
|
+
if (
|
849
|
+
not getattr(self.config, "_attn_implementation", "eager")
|
850
|
+
== "flash_attention_2"
|
851
|
+
):
|
852
|
+
attention_mask = attention_mask.view(batch_size, -1)
|
853
|
+
# We create a 3D attention mask from a 2D tensor mask.
|
854
|
+
# Sizes are [batch_size, 1, 1, to_seq_length]
|
855
|
+
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
856
|
+
# this attention mask is more simple than the triangular masking of causal attention
|
857
|
+
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
858
|
+
attention_mask = attention_mask[:, None, None, :]
|
859
|
+
|
860
|
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
861
|
+
# masked positions, this operation will create a tensor which is 0.0 for
|
862
|
+
# positions we want to attend and the dtype's smallest value for masked positions.
|
863
|
+
# Since we are adding it to the raw scores before the softmax, this is
|
864
|
+
# effectively the same as removing these entirely.
|
865
|
+
attention_mask = attention_mask.to(
|
866
|
+
dtype=self.dtype
|
867
|
+
) # fp16 compatibility
|
868
|
+
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
|
869
|
+
|
870
|
+
# Prepare head mask if needed
|
871
|
+
# 1.0 in head_mask indicate we keep the head
|
872
|
+
# attention_probs has shape bsz x n_heads x N x N
|
873
|
+
# head_mask has shape n_layer x batch x n_heads x N x N
|
874
|
+
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
|
875
|
+
|
876
|
+
if self.config.use_pretrained_embeddings:
|
877
|
+
pretrained_token_id_indicators = torch.isin(
|
878
|
+
input_ids,
|
879
|
+
torch.tensor(self.config.pretrained_token_ids).to(input_ids.device),
|
880
|
+
)
|
881
|
+
input_embeddings = torch.where(
|
882
|
+
pretrained_token_id_indicators.unsqueeze(-1),
|
883
|
+
self.pretrained_wte(input_ids),
|
884
|
+
self.wte(input_ids),
|
885
|
+
)
|
886
|
+
else:
|
887
|
+
input_embeddings = self.wte(input_ids)
|
888
|
+
|
889
|
+
if self.config.causal_sfm and input_shape[1] >= self.config.demographics_size:
|
890
|
+
demographic_embeddings = input_embeddings[
|
891
|
+
:, : self.config.demographics_size
|
892
|
+
]
|
893
|
+
medical_event_embeddings = input_embeddings[
|
894
|
+
:, self.config.demographics_size :
|
895
|
+
]
|
896
|
+
if random_vectors is None:
|
897
|
+
random_vectors = torch.rand_like(input_embeddings[:, :1])
|
898
|
+
input_embeddings = torch.concat(
|
899
|
+
[demographic_embeddings, random_vectors, medical_event_embeddings],
|
900
|
+
dim=1,
|
901
|
+
)
|
902
|
+
|
903
|
+
if self.include_values:
|
904
|
+
if (
|
905
|
+
self.config.causal_sfm
|
906
|
+
and input_shape[1] >= self.config.demographics_size
|
907
|
+
):
|
908
|
+
values = torch.concat(
|
909
|
+
[torch.zeros_like(values[:, :1], dtype=torch.int32), values],
|
910
|
+
dim=1,
|
911
|
+
)
|
912
|
+
value_indicators = torch.concat(
|
913
|
+
[
|
914
|
+
torch.zeros_like(value_indicators[:, :1]).to(torch.bool),
|
915
|
+
value_indicators,
|
916
|
+
],
|
917
|
+
dim=1,
|
918
|
+
)
|
919
|
+
value_embeddings = self.vte(values)
|
920
|
+
# Combine the value and concept embeddings together
|
921
|
+
input_embeddings = self.concept_value_transformation_layer(
|
922
|
+
concept_embeddings=input_embeddings,
|
923
|
+
value_indicators=value_indicators,
|
924
|
+
value_embeddings=value_embeddings,
|
925
|
+
)
|
926
|
+
|
927
|
+
if not self.exclude_position_ids:
|
928
|
+
position_embeds = self.wpe(position_ids)
|
929
|
+
hidden_states = input_embeddings + position_embeds
|
930
|
+
else:
|
931
|
+
hidden_states = input_embeddings
|
932
|
+
|
933
|
+
hidden_states = self.drop(hidden_states)
|
934
|
+
|
935
|
+
output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
|
936
|
+
|
937
|
+
if self.gradient_checkpointing and self.training:
|
938
|
+
if use_cache:
|
939
|
+
logger.warning(
|
940
|
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
941
|
+
)
|
942
|
+
use_cache = False
|
943
|
+
|
944
|
+
presents = () if use_cache else None
|
945
|
+
all_self_attentions = () if output_attentions else None
|
946
|
+
all_hidden_states = () if output_hidden_states else None
|
947
|
+
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
|
948
|
+
# Model parallel
|
949
|
+
if self.model_parallel:
|
950
|
+
torch.cuda.set_device(hidden_states.device)
|
951
|
+
# Ensure layer_past is on same device as hidden_states (might not be correct)
|
952
|
+
if layer_past is not None:
|
953
|
+
layer_past = tuple(
|
954
|
+
past_state.to(hidden_states.device) for past_state in layer_past
|
955
|
+
)
|
956
|
+
# Ensure that attention_mask is always on the same device as hidden_states
|
957
|
+
if attention_mask is not None:
|
958
|
+
attention_mask = attention_mask.to(hidden_states.device)
|
959
|
+
if isinstance(head_mask, torch.Tensor):
|
960
|
+
head_mask = head_mask.to(hidden_states.device)
|
961
|
+
if output_hidden_states:
|
962
|
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
963
|
+
|
964
|
+
if self.gradient_checkpointing and self.training:
|
965
|
+
outputs = self._gradient_checkpointing_func(
|
966
|
+
block.__call__,
|
967
|
+
hidden_states,
|
968
|
+
None,
|
969
|
+
attention_mask,
|
970
|
+
head_mask[i],
|
971
|
+
None,
|
972
|
+
None,
|
973
|
+
use_cache,
|
974
|
+
output_attentions,
|
975
|
+
)
|
976
|
+
else:
|
977
|
+
outputs = block(
|
978
|
+
hidden_states,
|
979
|
+
layer_past=layer_past,
|
980
|
+
attention_mask=attention_mask,
|
981
|
+
head_mask=head_mask[i],
|
982
|
+
encoder_hidden_states=None,
|
983
|
+
encoder_attention_mask=None,
|
984
|
+
use_cache=use_cache,
|
985
|
+
output_attentions=output_attentions,
|
986
|
+
)
|
987
|
+
|
988
|
+
hidden_states = outputs[0]
|
989
|
+
if use_cache is True:
|
990
|
+
presents = presents + (outputs[1],)
|
991
|
+
|
992
|
+
if output_attentions:
|
993
|
+
all_self_attentions = all_self_attentions + (
|
994
|
+
outputs[2 if use_cache else 1],
|
995
|
+
)
|
996
|
+
|
997
|
+
# Model Parallel: If it's the last layer for that device, put things on the next device
|
998
|
+
if self.model_parallel:
|
999
|
+
for k, v in self.device_map.items():
|
1000
|
+
if i == v[-1] and "cuda:" + str(k) != self.last_device:
|
1001
|
+
hidden_states = hidden_states.to("cuda:" + str(k + 1))
|
1002
|
+
|
1003
|
+
hidden_states = self.ln_f(hidden_states)
|
1004
|
+
|
1005
|
+
hidden_states = hidden_states.view(output_shape)
|
1006
|
+
# Add last hidden state
|
1007
|
+
if output_hidden_states:
|
1008
|
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
1009
|
+
|
1010
|
+
if not return_dict:
|
1011
|
+
return tuple(
|
1012
|
+
v
|
1013
|
+
for v in [
|
1014
|
+
hidden_states,
|
1015
|
+
presents,
|
1016
|
+
all_hidden_states,
|
1017
|
+
all_self_attentions,
|
1018
|
+
]
|
1019
|
+
if v is not None
|
1020
|
+
)
|
1021
|
+
|
1022
|
+
return CehrGptOutputWithPast(
|
1023
|
+
last_hidden_state=hidden_states,
|
1024
|
+
past_key_values=presents,
|
1025
|
+
hidden_states=all_hidden_states,
|
1026
|
+
attentions=all_self_attentions,
|
1027
|
+
)
|
1028
|
+
|
1029
|
+
|
1030
|
+
class CEHRGPT2LMHeadModel(CEHRGPTPreTrainedModel):
|
1031
|
+
_tied_weights_keys = ["lm_head.weight", "value_head.weight"]
|
1032
|
+
|
1033
|
+
def __init__(self, config: CEHRGPTConfig):
|
1034
|
+
super().__init__(config)
|
1035
|
+
self.cehrgpt = CEHRGPT2Model(config)
|
1036
|
+
if self.config.include_ttv_prediction:
|
1037
|
+
self.tte_head = WeibullModel(config.n_embd)
|
1038
|
+
|
1039
|
+
if self.config.use_sub_time_tokenization:
|
1040
|
+
self.time_token_lm_head = nn.Linear(
|
1041
|
+
config.n_embd // 3, config.time_token_vocab_size, bias=False
|
1042
|
+
)
|
1043
|
+
|
1044
|
+
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
|
1045
|
+
if self.config.include_values:
|
1046
|
+
self.value_head = nn.Linear(
|
1047
|
+
config.n_embd, config.value_vocab_size, bias=False
|
1048
|
+
)
|
1049
|
+
|
1050
|
+
# Model parallel
|
1051
|
+
self.model_parallel = False
|
1052
|
+
self.device_map = None
|
1053
|
+
|
1054
|
+
# Initialize weights and apply final processing
|
1055
|
+
self.post_init()
|
1056
|
+
|
1057
|
+
def parallelize(self, device_map=None):
|
1058
|
+
warnings.warn(
|
1059
|
+
"`GPT2LMHeadModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load"
|
1060
|
+
" your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
|
1061
|
+
" `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':"
|
1062
|
+
" 0, 'transformer.h.1': 1, ...}",
|
1063
|
+
FutureWarning,
|
1064
|
+
)
|
1065
|
+
self.device_map = (
|
1066
|
+
get_device_map(len(self.cehrgpt.h), range(torch.cuda.device_count()))
|
1067
|
+
if device_map is None
|
1068
|
+
else device_map
|
1069
|
+
)
|
1070
|
+
assert_device_map(self.device_map, len(self.cehrgpt.h))
|
1071
|
+
self.cehrgpt.parallelize(self.device_map)
|
1072
|
+
self.lm_head = self.lm_head.to(self.cehrgpt.first_device)
|
1073
|
+
if self.config.include_values:
|
1074
|
+
self.value_head = self.value_head.to(self.cehrgpt.first_device)
|
1075
|
+
if self.config.include_ttv_prediction:
|
1076
|
+
self.tte_head = self.tte_head.to(self.cehrgpt.first_device)
|
1077
|
+
self.model_parallel = True
|
1078
|
+
|
1079
|
+
def deparallelize(self):
|
1080
|
+
warnings.warn(
|
1081
|
+
"Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
|
1082
|
+
FutureWarning,
|
1083
|
+
)
|
1084
|
+
self.cehrgpt.deparallelize()
|
1085
|
+
self.cehrgpt = self.cehrgpt.to("cpu")
|
1086
|
+
self.lm_head = self.lm_head.to("cpu")
|
1087
|
+
if self.config.include_values:
|
1088
|
+
self.value_head = self.value_head.to("cpu")
|
1089
|
+
if self.config.include_ttv_prediction:
|
1090
|
+
self.tte_head = self.tte_head.to("cpu")
|
1091
|
+
self.model_parallel = False
|
1092
|
+
torch.cuda.empty_cache()
|
1093
|
+
|
1094
|
+
def get_output_embeddings(self):
|
1095
|
+
return self.lm_head
|
1096
|
+
|
1097
|
+
def set_output_embeddings(self, new_embeddings):
|
1098
|
+
self.lm_head = new_embeddings
|
1099
|
+
|
1100
|
+
def get_value_output_embeddings(self):
|
1101
|
+
if self.config.include_values:
|
1102
|
+
return self.value_head
|
1103
|
+
return None
|
1104
|
+
|
1105
|
+
def set_value_output_embeddings(self, new_embeddings):
|
1106
|
+
if self.config.include_values:
|
1107
|
+
self.value_head = new_embeddings
|
1108
|
+
|
1109
|
+
def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]:
|
1110
|
+
return self.cehrgpt.get_position_embeddings()
|
1111
|
+
|
1112
|
+
def set_position_embeddings(self, new_embeddings: nn.Embedding):
|
1113
|
+
self.cehrgpt.set_position_embeddings(new_embeddings)
|
1114
|
+
|
1115
|
+
def update_attn_bias(self, max_position_embeddings: int):
|
1116
|
+
self.cehrgpt.update_attn_bias(max_position_embeddings)
|
1117
|
+
|
1118
|
+
def prepare_inputs_for_generation(
|
1119
|
+
self,
|
1120
|
+
input_ids,
|
1121
|
+
past_key_values=None,
|
1122
|
+
inputs_embeds=None,
|
1123
|
+
lab_token_ids=None,
|
1124
|
+
**kwargs,
|
1125
|
+
):
|
1126
|
+
|
1127
|
+
# Omit tokens covered by past_key_values
|
1128
|
+
if past_key_values:
|
1129
|
+
past_length = past_key_values[0][0].shape[2]
|
1130
|
+
# Subtract the past_length by 1 due to the random vector
|
1131
|
+
if self.cehrgpt.config.causal_sfm:
|
1132
|
+
past_length -= 1
|
1133
|
+
# Some generation methods already pass only the last input ID
|
1134
|
+
if input_ids.shape[1] > past_length:
|
1135
|
+
remove_prefix_length = past_length
|
1136
|
+
else:
|
1137
|
+
# Default to old behavior: keep only final ID
|
1138
|
+
remove_prefix_length = input_ids.shape[1] - 1
|
1139
|
+
|
1140
|
+
input_ids = input_ids[:, remove_prefix_length:]
|
1141
|
+
|
1142
|
+
attention_mask = kwargs.get("attention_mask", None)
|
1143
|
+
position_ids = kwargs.get("position_ids", None)
|
1144
|
+
random_vectors = kwargs.get("random_vectors", None)
|
1145
|
+
|
1146
|
+
if attention_mask is not None and position_ids is None:
|
1147
|
+
# create position_ids on the fly for batch generation
|
1148
|
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
1149
|
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
1150
|
+
if past_key_values:
|
1151
|
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
1152
|
+
|
1153
|
+
# Add one more position for the random vectors
|
1154
|
+
if (
|
1155
|
+
self.cehrgpt.config.causal_sfm
|
1156
|
+
and position_ids.shape[-1] >= self.cehrgpt.config.demographics_size
|
1157
|
+
):
|
1158
|
+
position_ids = torch.concat(
|
1159
|
+
[
|
1160
|
+
position_ids,
|
1161
|
+
torch.max(position_ids, dim=-1, keepdim=True)[0] + 1,
|
1162
|
+
],
|
1163
|
+
dim=-1,
|
1164
|
+
)
|
1165
|
+
else:
|
1166
|
+
position_ids = None
|
1167
|
+
|
1168
|
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
1169
|
+
if inputs_embeds is not None and past_key_values is None:
|
1170
|
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
1171
|
+
else:
|
1172
|
+
model_inputs = {"input_ids": input_ids}
|
1173
|
+
|
1174
|
+
if self.cehrgpt.include_values:
|
1175
|
+
value_indicators = kwargs.get(
|
1176
|
+
"value_indicators", torch.zeros_like(input_ids).to(torch.bool)
|
1177
|
+
)
|
1178
|
+
values = kwargs.get(
|
1179
|
+
"values",
|
1180
|
+
torch.zeros_like(
|
1181
|
+
input_ids,
|
1182
|
+
dtype=torch.int32,
|
1183
|
+
),
|
1184
|
+
)
|
1185
|
+
# Omit tokens covered by past_key_values
|
1186
|
+
if past_key_values:
|
1187
|
+
past_length = past_key_values[0][0].shape[2]
|
1188
|
+
# Some generation methods already pass only the last input ID
|
1189
|
+
if value_indicators.shape[1] > past_length:
|
1190
|
+
remove_prefix_length = past_length
|
1191
|
+
else:
|
1192
|
+
# Default to old behavior: keep only final ID
|
1193
|
+
remove_prefix_length = value_indicators.shape[1] - 1
|
1194
|
+
value_indicators = value_indicators[:, remove_prefix_length:]
|
1195
|
+
values = values[:, remove_prefix_length:]
|
1196
|
+
|
1197
|
+
model_inputs.update(
|
1198
|
+
{"value_indicators": value_indicators, "values": values}
|
1199
|
+
)
|
1200
|
+
|
1201
|
+
model_inputs.update(
|
1202
|
+
{
|
1203
|
+
"past_key_values": past_key_values,
|
1204
|
+
"use_cache": kwargs.get("use_cache"),
|
1205
|
+
"position_ids": position_ids,
|
1206
|
+
"attention_mask": attention_mask,
|
1207
|
+
"random_vectors": random_vectors,
|
1208
|
+
}
|
1209
|
+
)
|
1210
|
+
|
1211
|
+
return model_inputs
|
1212
|
+
|
1213
|
+
def forward(
|
1214
|
+
self,
|
1215
|
+
input_ids: Optional[torch.LongTensor] = None,
|
1216
|
+
value_indicators: Optional[torch.BoolTensor] = None,
|
1217
|
+
values: Optional[torch.LongTensor] = None,
|
1218
|
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
1219
|
+
attention_mask: Optional[torch.FloatTensor] = None,
|
1220
|
+
position_ids: Optional[torch.LongTensor] = None,
|
1221
|
+
head_mask: Optional[torch.FloatTensor] = None,
|
1222
|
+
random_vectors: Optional[torch.FloatTensor] = None,
|
1223
|
+
labels: Optional[torch.LongTensor] = None,
|
1224
|
+
true_value_indicators: Optional[torch.BoolTensor] = None,
|
1225
|
+
true_values: Optional[torch.LongTensor] = None,
|
1226
|
+
time_to_visits: Optional[torch.FloatTensor] = None,
|
1227
|
+
time_token_indicators: Optional[torch.BoolTensor] = None,
|
1228
|
+
sub_time_tokens: Optional[torch.LongTensor] = None,
|
1229
|
+
use_cache: Optional[bool] = None,
|
1230
|
+
output_attentions: Optional[bool] = None,
|
1231
|
+
output_hidden_states: Optional[bool] = None,
|
1232
|
+
return_dict: Optional[bool] = None,
|
1233
|
+
) -> Union[Tuple, CehrGptCausalLMOutput]:
|
1234
|
+
r"""
|
1235
|
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1236
|
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
1237
|
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
1238
|
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
1239
|
+
"""
|
1240
|
+
return_dict = (
|
1241
|
+
return_dict if return_dict is not None else self.config.use_return_dict
|
1242
|
+
)
|
1243
|
+
|
1244
|
+
transformer_outputs = self.cehrgpt(
|
1245
|
+
input_ids,
|
1246
|
+
value_indicators=value_indicators,
|
1247
|
+
values=values,
|
1248
|
+
past_key_values=past_key_values,
|
1249
|
+
attention_mask=attention_mask,
|
1250
|
+
position_ids=position_ids,
|
1251
|
+
random_vectors=random_vectors,
|
1252
|
+
head_mask=head_mask,
|
1253
|
+
use_cache=use_cache,
|
1254
|
+
output_attentions=output_attentions,
|
1255
|
+
output_hidden_states=output_hidden_states,
|
1256
|
+
return_dict=return_dict,
|
1257
|
+
)
|
1258
|
+
hidden_states = transformer_outputs[0]
|
1259
|
+
# get rid of the random vector:
|
1260
|
+
if (
|
1261
|
+
self.config.causal_sfm
|
1262
|
+
and hidden_states.shape[1] > self.config.demographics_size + 1
|
1263
|
+
):
|
1264
|
+
hidden_states = torch.concat(
|
1265
|
+
[
|
1266
|
+
hidden_states[:, : self.config.demographics_size],
|
1267
|
+
hidden_states[:, self.config.demographics_size + 1 :],
|
1268
|
+
],
|
1269
|
+
dim=1,
|
1270
|
+
)
|
1271
|
+
|
1272
|
+
# Set device for model parallelism
|
1273
|
+
if self.model_parallel:
|
1274
|
+
torch.cuda.set_device(self.cehrgpt.first_device)
|
1275
|
+
hidden_states = hidden_states.to(self.lm_head.weight.device)
|
1276
|
+
|
1277
|
+
lm_logits = self.lm_head(hidden_states)
|
1278
|
+
if self.cehrgpt.include_values:
|
1279
|
+
value_logits = self.value_head(hidden_states)
|
1280
|
+
else:
|
1281
|
+
value_logits = None
|
1282
|
+
|
1283
|
+
loss = None
|
1284
|
+
token_loss = None
|
1285
|
+
time_token_loss = None
|
1286
|
+
time_to_visit_loss = None
|
1287
|
+
token_value_loss = None
|
1288
|
+
if labels is not None:
|
1289
|
+
# move labels to correct device to enable model parallelism
|
1290
|
+
labels = labels.to(lm_logits.device)
|
1291
|
+
# Shift so that tokens < n predict n
|
1292
|
+
shift_logits = lm_logits[..., :-1, :].contiguous()
|
1293
|
+
shift_labels = labels[..., 1:].contiguous()
|
1294
|
+
if (
|
1295
|
+
self.cehrgpt.config.lab_token_penalty
|
1296
|
+
and self.cehrgpt.config.lab_token_exists
|
1297
|
+
):
|
1298
|
+
lab_index = torch.isin(
|
1299
|
+
shift_labels.view(-1),
|
1300
|
+
torch.tensor(self.cehrgpt.config.lab_token_ids).to(
|
1301
|
+
lm_logits.device
|
1302
|
+
),
|
1303
|
+
)
|
1304
|
+
# Flatten the tokens
|
1305
|
+
loss_fct = CrossEntropyLoss(reduction="none")
|
1306
|
+
token_loss = loss_fct(
|
1307
|
+
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
|
1308
|
+
)
|
1309
|
+
token_loss = torch.where(
|
1310
|
+
lab_index,
|
1311
|
+
token_loss * self.cehrgpt.config.lab_token_loss_weight,
|
1312
|
+
token_loss,
|
1313
|
+
).mean()
|
1314
|
+
else:
|
1315
|
+
# Flatten the tokens
|
1316
|
+
loss_fct = CrossEntropyLoss()
|
1317
|
+
token_loss = loss_fct(
|
1318
|
+
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
|
1319
|
+
)
|
1320
|
+
loss = token_loss
|
1321
|
+
|
1322
|
+
if self.cehrgpt.config.entropy_penalty:
|
1323
|
+
# Compute probabilities using softmax
|
1324
|
+
probs = torch.softmax(lm_logits, dim=-1)
|
1325
|
+
# Compute negative entropy: sum(p * log(p))
|
1326
|
+
entropy = torch.sum(
|
1327
|
+
probs * torch.log(probs + 1e-9), dim=-1
|
1328
|
+
) # Add epsilon for numerical stability
|
1329
|
+
# Regularization term: mean entropy scaled by alpha
|
1330
|
+
loss += self.cehrgpt.config.entropy_penalty_alpha * entropy.mean()
|
1331
|
+
|
1332
|
+
# We add another loss term when use_sub_time_tokenization is enabled, we need to recover the sub time token
|
1333
|
+
# predictions for year/month/token
|
1334
|
+
if self.config.use_sub_time_tokenization:
|
1335
|
+
# Split the last dimensions into three parts
|
1336
|
+
time_loss_fct = CrossEntropyLoss(reduction="none")
|
1337
|
+
time_token_logits = self.time_token_lm_head(
|
1338
|
+
torch.unflatten(hidden_states, 2, (3, -1))
|
1339
|
+
)
|
1340
|
+
shifted_time_token_logits = time_token_logits[
|
1341
|
+
..., :-1, :, :
|
1342
|
+
].contiguous()
|
1343
|
+
shifted_time_token_indicators = (
|
1344
|
+
time_token_indicators[..., 1:].contiguous().to(lm_logits.device)
|
1345
|
+
)
|
1346
|
+
shifted_time_token_labels = (
|
1347
|
+
sub_time_tokens[:, 1:, ...].contiguous().to(lm_logits.device)
|
1348
|
+
)
|
1349
|
+
time_token_loss = time_loss_fct(
|
1350
|
+
shifted_time_token_logits.view(
|
1351
|
+
-1, self.config.time_token_vocab_size
|
1352
|
+
),
|
1353
|
+
shifted_time_token_labels.view(-1),
|
1354
|
+
)
|
1355
|
+
|
1356
|
+
time_token_loss = time_token_loss.view(
|
1357
|
+
-1, 3
|
1358
|
+
) * shifted_time_token_indicators.view(-1, 1).to(hidden_states.dtype)
|
1359
|
+
time_token_loss = time_token_loss.sum(-1)
|
1360
|
+
time_token_loss = (
|
1361
|
+
torch.mean(time_token_loss) * self.config.time_token_loss_weight
|
1362
|
+
)
|
1363
|
+
loss += time_token_loss
|
1364
|
+
|
1365
|
+
if time_to_visits is not None:
|
1366
|
+
# Get lambda and k parameters
|
1367
|
+
lambda_param, k_param = self.tte_head(hidden_states)
|
1368
|
+
|
1369
|
+
# Perform slicing before tensors are split across GPUs
|
1370
|
+
shifted_lambda_param = lambda_param[..., :-1, :].contiguous()
|
1371
|
+
shifted_k_param = k_param[..., :-1, :].contiguous()
|
1372
|
+
shift_time_to_visits = time_to_visits[..., 1:].contiguous()
|
1373
|
+
|
1374
|
+
# Move to the same device as lambda_param
|
1375
|
+
shift_time_to_visits = shift_time_to_visits.to(lambda_param.device)
|
1376
|
+
|
1377
|
+
time_to_visit_indicator = (shift_time_to_visits >= 0).to(
|
1378
|
+
hidden_states.dtype
|
1379
|
+
)
|
1380
|
+
# Define the Gamma distribution
|
1381
|
+
dist = Gamma(shifted_k_param.squeeze(-1), shifted_lambda_param.squeeze(-1))
|
1382
|
+
# Compute log-probs and apply the time_to_visit_indicator
|
1383
|
+
log_probs = dist.log_prob(torch.clamp(shift_time_to_visits, min=0.0) + 1e-6)
|
1384
|
+
log_probs *= time_to_visit_indicator
|
1385
|
+
time_to_visit_loss = (
|
1386
|
+
-log_probs.mean() * self.config.time_to_visit_loss_weight
|
1387
|
+
)
|
1388
|
+
# Compute the loss
|
1389
|
+
loss += time_to_visit_loss
|
1390
|
+
|
1391
|
+
if true_values is not None and true_value_indicators is not None:
|
1392
|
+
true_values = true_values.to(value_logits.device)
|
1393
|
+
shift_value_logits = value_logits[..., :-1, :].contiguous()
|
1394
|
+
shift_value_indicators = true_value_indicators[..., :-1].contiguous()
|
1395
|
+
shift_next_values = true_values[..., 1:].contiguous()
|
1396
|
+
value_loss_fct = CrossEntropyLoss(reduce=False)
|
1397
|
+
token_value_loss = value_loss_fct(
|
1398
|
+
shift_value_logits.view(-1, shift_value_logits.size(-1)),
|
1399
|
+
shift_next_values.view(-1),
|
1400
|
+
)
|
1401
|
+
token_value_loss *= shift_value_indicators.view(-1)
|
1402
|
+
loss += token_value_loss.mean()
|
1403
|
+
|
1404
|
+
if not return_dict:
|
1405
|
+
output = (lm_logits,) + transformer_outputs[1:]
|
1406
|
+
return ((loss,) + output) if loss is not None else output
|
1407
|
+
|
1408
|
+
return CehrGptCausalLMOutput(
|
1409
|
+
loss=loss,
|
1410
|
+
logits=lm_logits,
|
1411
|
+
value_indicators=value_indicators,
|
1412
|
+
next_value_logits=value_logits,
|
1413
|
+
past_key_values=transformer_outputs.past_key_values,
|
1414
|
+
hidden_states=transformer_outputs.hidden_states,
|
1415
|
+
attentions=transformer_outputs.attentions,
|
1416
|
+
token_loss=token_loss,
|
1417
|
+
time_token_loss=time_token_loss,
|
1418
|
+
time_to_visit_loss=time_to_visit_loss,
|
1419
|
+
token_value_loss=token_value_loss,
|
1420
|
+
)
|
1421
|
+
|
1422
|
+
@staticmethod
|
1423
|
+
def _reorder_cache(
|
1424
|
+
past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
|
1425
|
+
) -> Tuple[Tuple[torch.Tensor]]:
|
1426
|
+
"""
|
1427
|
+
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or.
|
1428
|
+
|
1429
|
+
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
|
1430
|
+
beam_idx at every generation step.
|
1431
|
+
"""
|
1432
|
+
return tuple(
|
1433
|
+
tuple(
|
1434
|
+
past_state.index_select(0, beam_idx.to(past_state.device))
|
1435
|
+
for past_state in layer_past
|
1436
|
+
)
|
1437
|
+
for layer_past in past_key_values
|
1438
|
+
)
|
1439
|
+
|
1440
|
+
def _sample(
|
1441
|
+
self,
|
1442
|
+
input_ids: torch.LongTensor,
|
1443
|
+
logits_processor: Optional[LogitsProcessorList] = None,
|
1444
|
+
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
1445
|
+
logits_warper: Optional[LogitsProcessorList] = None,
|
1446
|
+
max_length: Optional[int] = None,
|
1447
|
+
pad_token_id: Optional[int] = None,
|
1448
|
+
eos_token_id: Optional[Union[int, List[int]]] = None,
|
1449
|
+
output_attentions: Optional[bool] = None,
|
1450
|
+
output_hidden_states: Optional[bool] = None,
|
1451
|
+
output_scores: Optional[bool] = None,
|
1452
|
+
output_logits: Optional[bool] = None,
|
1453
|
+
return_dict_in_generate: Optional[bool] = None,
|
1454
|
+
synced_gpus: bool = False,
|
1455
|
+
streamer: Optional["BaseStreamer"] = None,
|
1456
|
+
**model_kwargs,
|
1457
|
+
) -> Union[CehrGptGenerateDecoderOnlyOutput, torch.LongTensor]:
|
1458
|
+
# init values
|
1459
|
+
logits_processor = (
|
1460
|
+
logits_processor if logits_processor is not None else LogitsProcessorList()
|
1461
|
+
)
|
1462
|
+
stopping_criteria = (
|
1463
|
+
stopping_criteria
|
1464
|
+
if stopping_criteria is not None
|
1465
|
+
else StoppingCriteriaList()
|
1466
|
+
)
|
1467
|
+
if max_length is not None:
|
1468
|
+
warnings.warn(
|
1469
|
+
"`max_length` is deprecated in this function, use"
|
1470
|
+
" `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.",
|
1471
|
+
UserWarning,
|
1472
|
+
)
|
1473
|
+
stopping_criteria = validate_stopping_criteria(
|
1474
|
+
stopping_criteria, max_length
|
1475
|
+
)
|
1476
|
+
logits_warper = (
|
1477
|
+
logits_warper if logits_warper is not None else LogitsProcessorList()
|
1478
|
+
)
|
1479
|
+
pad_token_id = (
|
1480
|
+
pad_token_id
|
1481
|
+
if pad_token_id is not None
|
1482
|
+
else self.generation_config.pad_token_id
|
1483
|
+
)
|
1484
|
+
eos_token_id = (
|
1485
|
+
eos_token_id
|
1486
|
+
if eos_token_id is not None
|
1487
|
+
else self.generation_config.eos_token_id
|
1488
|
+
)
|
1489
|
+
if isinstance(eos_token_id, int):
|
1490
|
+
eos_token_id = [eos_token_id]
|
1491
|
+
eos_token_id_tensor = (
|
1492
|
+
torch.tensor(eos_token_id).to(input_ids.device)
|
1493
|
+
if eos_token_id is not None
|
1494
|
+
else None
|
1495
|
+
)
|
1496
|
+
output_scores = (
|
1497
|
+
output_scores
|
1498
|
+
if output_scores is not None
|
1499
|
+
else self.generation_config.output_scores
|
1500
|
+
)
|
1501
|
+
output_logits = (
|
1502
|
+
output_logits
|
1503
|
+
if output_logits is not None
|
1504
|
+
else self.generation_config.output_logits
|
1505
|
+
)
|
1506
|
+
output_attentions = (
|
1507
|
+
output_attentions
|
1508
|
+
if output_attentions is not None
|
1509
|
+
else self.generation_config.output_attentions
|
1510
|
+
)
|
1511
|
+
output_hidden_states = (
|
1512
|
+
output_hidden_states
|
1513
|
+
if output_hidden_states is not None
|
1514
|
+
else self.generation_config.output_hidden_states
|
1515
|
+
)
|
1516
|
+
return_dict_in_generate = (
|
1517
|
+
return_dict_in_generate
|
1518
|
+
if return_dict_in_generate is not None
|
1519
|
+
else self.generation_config.return_dict_in_generate
|
1520
|
+
)
|
1521
|
+
|
1522
|
+
# init attention / hidden states / scores tuples
|
1523
|
+
scores = () if (return_dict_in_generate and output_scores) else None
|
1524
|
+
raw_logits = () if (return_dict_in_generate and output_logits) else None
|
1525
|
+
decoder_attentions = (
|
1526
|
+
() if (return_dict_in_generate and output_attentions) else None
|
1527
|
+
)
|
1528
|
+
cross_attentions = (
|
1529
|
+
() if (return_dict_in_generate and output_attentions) else None
|
1530
|
+
)
|
1531
|
+
decoder_hidden_states = (
|
1532
|
+
() if (return_dict_in_generate and output_hidden_states) else None
|
1533
|
+
)
|
1534
|
+
|
1535
|
+
# keep track of which sequences are already finished
|
1536
|
+
batch_size, cur_len = input_ids.shape
|
1537
|
+
if "inputs_embeds" in model_kwargs:
|
1538
|
+
cur_len = model_kwargs["inputs_embeds"].shape[1]
|
1539
|
+
this_peer_finished = False
|
1540
|
+
unfinished_sequences = torch.ones(
|
1541
|
+
batch_size, dtype=torch.long, device=input_ids.device
|
1542
|
+
)
|
1543
|
+
model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device)
|
1544
|
+
# Use the lab_token_ids in the argument, otherwise default to the configuration token_ids
|
1545
|
+
if "lab_token_ids" in model_kwargs:
|
1546
|
+
lab_token_ids = torch.tensor(
|
1547
|
+
model_kwargs["lab_token_ids"],
|
1548
|
+
dtype=torch.int32,
|
1549
|
+
)
|
1550
|
+
else:
|
1551
|
+
lab_token_ids = torch.tensor(
|
1552
|
+
[] if self.config.lab_token_ids is None else self.config.lab_token_ids,
|
1553
|
+
dtype=torch.int32,
|
1554
|
+
)
|
1555
|
+
value_indicators = torch.zeros_like(input_ids).to(torch.bool)
|
1556
|
+
values = torch.zeros_like(
|
1557
|
+
input_ids,
|
1558
|
+
dtype=torch.int32,
|
1559
|
+
)
|
1560
|
+
# Generate initial random_vectors
|
1561
|
+
if self.cehrgpt.config.causal_sfm:
|
1562
|
+
model_kwargs["random_vectors"] = torch.rand(
|
1563
|
+
[batch_size, 1, self.cehrgpt.embed_dim],
|
1564
|
+
dtype=(
|
1565
|
+
torch.bfloat16 if is_flash_attn_2_available() else torch.float32
|
1566
|
+
),
|
1567
|
+
device=input_ids.device,
|
1568
|
+
)
|
1569
|
+
else:
|
1570
|
+
model_kwargs["random_vectors"] = None
|
1571
|
+
model_kwargs["value_indicators"] = value_indicators
|
1572
|
+
model_kwargs["values"] = values
|
1573
|
+
while self._has_unfinished_sequences(
|
1574
|
+
this_peer_finished, synced_gpus, device=input_ids.device
|
1575
|
+
):
|
1576
|
+
# prepare model inputs
|
1577
|
+
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
|
1578
|
+
|
1579
|
+
# forward pass to get next token
|
1580
|
+
outputs = self(
|
1581
|
+
**model_inputs,
|
1582
|
+
return_dict=True,
|
1583
|
+
output_attentions=output_attentions,
|
1584
|
+
output_hidden_states=output_hidden_states,
|
1585
|
+
)
|
1586
|
+
|
1587
|
+
if synced_gpus and this_peer_finished:
|
1588
|
+
continue # don't waste resources running the code we don't need
|
1589
|
+
|
1590
|
+
next_token_logits = outputs.logits[:, -1, :]
|
1591
|
+
|
1592
|
+
# pre-process distribution
|
1593
|
+
next_token_scores = logits_processor(input_ids, next_token_logits)
|
1594
|
+
next_token_scores = logits_warper(input_ids, next_token_scores)
|
1595
|
+
|
1596
|
+
# Store scores, attentions and hidden_states when required
|
1597
|
+
if return_dict_in_generate:
|
1598
|
+
if output_scores:
|
1599
|
+
scores += (next_token_scores,)
|
1600
|
+
if output_logits:
|
1601
|
+
raw_logits += (next_token_logits,)
|
1602
|
+
if output_attentions:
|
1603
|
+
decoder_attentions += (
|
1604
|
+
(outputs.decoder_attentions,)
|
1605
|
+
if self.config.is_encoder_decoder
|
1606
|
+
else (outputs.attentions,)
|
1607
|
+
)
|
1608
|
+
if self.config.is_encoder_decoder:
|
1609
|
+
cross_attentions += (outputs.cross_attentions,)
|
1610
|
+
|
1611
|
+
if output_hidden_states:
|
1612
|
+
decoder_hidden_states += (
|
1613
|
+
(outputs.decoder_hidden_states,)
|
1614
|
+
if self.config.is_encoder_decoder
|
1615
|
+
else (outputs.hidden_states,)
|
1616
|
+
)
|
1617
|
+
|
1618
|
+
# sample
|
1619
|
+
probs = nn.functional.softmax(next_token_scores, dim=-1)
|
1620
|
+
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
|
1621
|
+
|
1622
|
+
# finished sentences should have their next token be a padding token
|
1623
|
+
if eos_token_id is not None:
|
1624
|
+
if pad_token_id is None:
|
1625
|
+
raise ValueError(
|
1626
|
+
"If `eos_token_id` is defined, make sure that `pad_token_id` is defined."
|
1627
|
+
)
|
1628
|
+
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (
|
1629
|
+
1 - unfinished_sequences
|
1630
|
+
)
|
1631
|
+
|
1632
|
+
# update generated ids, model inputs, and length for next step
|
1633
|
+
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
|
1634
|
+
|
1635
|
+
if self.cehrgpt.include_values:
|
1636
|
+
next_value_indicators = torch.isin(
|
1637
|
+
next_tokens, lab_token_ids.to(next_tokens.device)
|
1638
|
+
)
|
1639
|
+
next_value_logits = outputs.next_value_logits[:, -1]
|
1640
|
+
# sample
|
1641
|
+
next_value_probs = nn.functional.softmax(next_value_logits, dim=-1)
|
1642
|
+
next_value_tokens = torch.multinomial(next_value_probs, num_samples=1)
|
1643
|
+
|
1644
|
+
# update value_indicators
|
1645
|
+
value_indicators = torch.cat(
|
1646
|
+
[value_indicators, next_value_indicators[:, None]], dim=-1
|
1647
|
+
)
|
1648
|
+
|
1649
|
+
# update values
|
1650
|
+
values = torch.cat([values, next_value_tokens], dim=-1)
|
1651
|
+
|
1652
|
+
model_kwargs["value_indicators"] = value_indicators
|
1653
|
+
model_kwargs["values"] = values
|
1654
|
+
|
1655
|
+
if streamer is not None:
|
1656
|
+
streamer.put(next_tokens.cpu())
|
1657
|
+
model_kwargs = self._update_model_kwargs_for_generation(
|
1658
|
+
outputs,
|
1659
|
+
model_kwargs,
|
1660
|
+
is_encoder_decoder=self.config.is_encoder_decoder,
|
1661
|
+
)
|
1662
|
+
|
1663
|
+
# if eos_token was found in one sentence, set sentence to finished
|
1664
|
+
if eos_token_id_tensor is not None:
|
1665
|
+
unfinished_sequences = unfinished_sequences.mul(
|
1666
|
+
next_tokens.tile(eos_token_id_tensor.shape[0], 1)
|
1667
|
+
.ne(eos_token_id_tensor.unsqueeze(1))
|
1668
|
+
.prod(dim=0)
|
1669
|
+
)
|
1670
|
+
|
1671
|
+
unfinished_sequences = unfinished_sequences & ~stopping_criteria(
|
1672
|
+
input_ids, scores
|
1673
|
+
)
|
1674
|
+
this_peer_finished = unfinished_sequences.max() == 0
|
1675
|
+
|
1676
|
+
if streamer is not None:
|
1677
|
+
streamer.end()
|
1678
|
+
|
1679
|
+
return CehrGptGenerateDecoderOnlyOutput(
|
1680
|
+
sequences=input_ids,
|
1681
|
+
sequence_val_masks=(
|
1682
|
+
value_indicators.to(torch.bool) if self.cehrgpt.include_values else None
|
1683
|
+
),
|
1684
|
+
sequence_vals=(values if self.cehrgpt.include_values else None),
|
1685
|
+
scores=scores,
|
1686
|
+
logits=raw_logits,
|
1687
|
+
attentions=decoder_attentions,
|
1688
|
+
hidden_states=decoder_hidden_states,
|
1689
|
+
past_key_values=model_kwargs.get("past_key_values"),
|
1690
|
+
)
|
1691
|
+
|
1692
|
+
|
1693
|
+
class CehrGptForClassification(CEHRGPTPreTrainedModel):
|
1694
|
+
_keep_in_fp32_modules = ["age_batch_norm", "dense_layer", "classifier"]
|
1695
|
+
|
1696
|
+
def __init__(self, config: CEHRGPTConfig):
|
1697
|
+
super().__init__(config)
|
1698
|
+
|
1699
|
+
self.cehrgpt = CEHRGPT2Model(config)
|
1700
|
+
self.age_batch_norm = torch.nn.BatchNorm1d(1)
|
1701
|
+
|
1702
|
+
# Workaround
|
1703
|
+
self.age_batch_norm.weight.data = self.age_batch_norm.weight.data.float()
|
1704
|
+
self.age_batch_norm.bias.data = self.age_batch_norm.bias.data.float()
|
1705
|
+
|
1706
|
+
self.dropout = nn.Dropout(config.summary_first_dropout)
|
1707
|
+
self.dense_layer = nn.Linear(config.hidden_size + 1, config.hidden_size // 2)
|
1708
|
+
self.dense_dropout = nn.Dropout(config.summary_first_dropout)
|
1709
|
+
self.classifier = nn.Linear(config.hidden_size // 2, 1)
|
1710
|
+
|
1711
|
+
# Model parallel
|
1712
|
+
self.model_parallel = False
|
1713
|
+
self.device_map = None
|
1714
|
+
self.gradient_checkpointing = False
|
1715
|
+
|
1716
|
+
# Initialize weights and apply final processing
|
1717
|
+
self.post_init()
|
1718
|
+
|
1719
|
+
def resize_position_embeddings(self, new_num_position_embeddings: Optional[int]):
|
1720
|
+
return self.cehrgpt.resize_position_embeddings(new_num_position_embeddings)
|
1721
|
+
|
1722
|
+
def _apply_age_norm(
|
1723
|
+
self,
|
1724
|
+
age_at_index: torch.Tensor,
|
1725
|
+
) -> torch.Tensor:
|
1726
|
+
"""
|
1727
|
+
Applies batch normalization to the input age tensor.
|
1728
|
+
|
1729
|
+
If the batch contains more than one example,
|
1730
|
+
standard batch normalization is applied. If the batch size is 1, batch normalization is applied
|
1731
|
+
without updating the running statistics, ensuring that the normalization uses the stored running
|
1732
|
+
mean and variance without modification.
|
1733
|
+
|
1734
|
+
Args:
|
1735
|
+
age_at_index (torch.FloatTensor): A tensor containing the age values to normalize.
|
1736
|
+
The tensor has shape `(batch_size, num_features)` where `batch_size` is the number of samples in the batch.
|
1737
|
+
|
1738
|
+
Returns:
|
1739
|
+
torch.FloatTensor: A tensor with the normalized age values.
|
1740
|
+
"""
|
1741
|
+
if age_at_index.shape[0] > 1:
|
1742
|
+
normalized_age = self.age_batch_norm(age_at_index.float())
|
1743
|
+
else:
|
1744
|
+
self.age_batch_norm.eval()
|
1745
|
+
# Apply batch norm without updating running stats
|
1746
|
+
with (
|
1747
|
+
torch.no_grad(),
|
1748
|
+
): # Prevent tracking gradients, since we don't want to update anything
|
1749
|
+
normalized_age = self.age_batch_norm(age_at_index)
|
1750
|
+
# Optionally, set the layer back to training mode if needed later
|
1751
|
+
self.age_batch_norm.train()
|
1752
|
+
return normalized_age
|
1753
|
+
|
1754
|
+
def forward(
|
1755
|
+
self,
|
1756
|
+
input_ids: Optional[torch.LongTensor],
|
1757
|
+
age_at_index: torch.FloatTensor,
|
1758
|
+
classifier_label: Optional[torch.FloatTensor],
|
1759
|
+
value_indicators: Optional[torch.BoolTensor] = None,
|
1760
|
+
values: Optional[torch.LongTensor] = None,
|
1761
|
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
1762
|
+
attention_mask: Optional[torch.FloatTensor] = None,
|
1763
|
+
position_ids: Optional[torch.LongTensor] = None,
|
1764
|
+
head_mask: Optional[torch.FloatTensor] = None,
|
1765
|
+
use_cache: Optional[bool] = None,
|
1766
|
+
output_attentions: Optional[bool] = None,
|
1767
|
+
output_hidden_states: Optional[bool] = None,
|
1768
|
+
return_dict: Optional[bool] = None,
|
1769
|
+
) -> CehrGptSequenceClassifierOutput:
|
1770
|
+
cehrgpt_output = self.cehrgpt(
|
1771
|
+
input_ids=input_ids,
|
1772
|
+
value_indicators=value_indicators,
|
1773
|
+
values=values,
|
1774
|
+
past_key_values=past_key_values,
|
1775
|
+
attention_mask=attention_mask,
|
1776
|
+
position_ids=position_ids,
|
1777
|
+
head_mask=head_mask,
|
1778
|
+
use_cache=use_cache,
|
1779
|
+
output_attentions=output_attentions,
|
1780
|
+
output_hidden_states=output_hidden_states,
|
1781
|
+
return_dict=return_dict,
|
1782
|
+
)
|
1783
|
+
|
1784
|
+
# Disable autocasting for precision-sensitive operations
|
1785
|
+
with torch.autocast(device_type="cuda", enabled=False):
|
1786
|
+
normalized_age = self._apply_age_norm(age_at_index)
|
1787
|
+
|
1788
|
+
# In case the model is in bfloat16
|
1789
|
+
if cehrgpt_output.last_hidden_state.dtype != normalized_age.dtype:
|
1790
|
+
normalized_age = normalized_age.to(cehrgpt_output.last_hidden_state.dtype)
|
1791
|
+
|
1792
|
+
# In fine-tuning, the sequences are left-padded, so we use the last element as the pooler
|
1793
|
+
output_pooler = cehrgpt_output.last_hidden_state[..., -1, :]
|
1794
|
+
next_input = self.dropout(output_pooler)
|
1795
|
+
next_input = torch.cat([next_input, normalized_age], dim=1)
|
1796
|
+
next_input = self.dense_layer(next_input)
|
1797
|
+
next_input = nn.functional.relu(next_input)
|
1798
|
+
next_input = self.dense_dropout(next_input)
|
1799
|
+
logits = self.classifier(next_input)
|
1800
|
+
|
1801
|
+
loss = None
|
1802
|
+
if classifier_label is not None:
|
1803
|
+
loss_fct = nn.BCEWithLogitsLoss()
|
1804
|
+
loss = loss_fct(logits, classifier_label)
|
1805
|
+
|
1806
|
+
return CehrGptSequenceClassifierOutput(
|
1807
|
+
loss=loss,
|
1808
|
+
logits=logits,
|
1809
|
+
hidden_states=cehrgpt_output.last_hidden_state,
|
1810
|
+
attentions=cehrgpt_output.attentions,
|
1811
|
+
)
|
1812
|
+
|
1813
|
+
def parallelize(self, device_map=None):
|
1814
|
+
self.cehrgpt.parallelize(device_map=device_map)
|
1815
|
+
|
1816
|
+
def deparallelize(self):
|
1817
|
+
self.cehrgpt.deparallelize()
|