magic-pdf 0.5.13__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. magic_pdf/cli/magicpdf.py +18 -7
  2. magic_pdf/dict2md/ocr_mkcontent.py +2 -2
  3. magic_pdf/libs/config_reader.py +10 -0
  4. magic_pdf/libs/version.py +1 -1
  5. magic_pdf/model/__init__.py +1 -0
  6. magic_pdf/model/doc_analyze_by_custom_model.py +38 -15
  7. magic_pdf/model/model_list.py +1 -0
  8. magic_pdf/model/pdf_extract_kit.py +200 -0
  9. magic_pdf/model/pek_sub_modules/__init__.py +0 -0
  10. magic_pdf/model/pek_sub_modules/layoutlmv3/__init__.py +0 -0
  11. magic_pdf/model/pek_sub_modules/layoutlmv3/backbone.py +179 -0
  12. magic_pdf/model/pek_sub_modules/layoutlmv3/beit.py +671 -0
  13. magic_pdf/model/pek_sub_modules/layoutlmv3/deit.py +476 -0
  14. magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/__init__.py +7 -0
  15. magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/data/__init__.py +2 -0
  16. magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/data/cord.py +171 -0
  17. magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/data/data_collator.py +124 -0
  18. magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/data/funsd.py +136 -0
  19. magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/data/image_utils.py +284 -0
  20. magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/data/xfund.py +213 -0
  21. magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/models/__init__.py +7 -0
  22. magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/models/layoutlmv3/__init__.py +24 -0
  23. magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/models/layoutlmv3/configuration_layoutlmv3.py +60 -0
  24. magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/models/layoutlmv3/modeling_layoutlmv3.py +1282 -0
  25. magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/models/layoutlmv3/tokenization_layoutlmv3.py +32 -0
  26. magic_pdf/model/pek_sub_modules/layoutlmv3/layoutlmft/models/layoutlmv3/tokenization_layoutlmv3_fast.py +34 -0
  27. magic_pdf/model/pek_sub_modules/layoutlmv3/model_init.py +150 -0
  28. magic_pdf/model/pek_sub_modules/layoutlmv3/rcnn_vl.py +163 -0
  29. magic_pdf/model/pek_sub_modules/layoutlmv3/visualizer.py +1236 -0
  30. magic_pdf/model/pek_sub_modules/post_process.py +36 -0
  31. magic_pdf/model/pek_sub_modules/self_modify.py +260 -0
  32. magic_pdf/model/pp_structure_v2.py +7 -0
  33. magic_pdf/pipe/AbsPipe.py +8 -14
  34. magic_pdf/pipe/OCRPipe.py +12 -8
  35. magic_pdf/pipe/TXTPipe.py +12 -8
  36. magic_pdf/pipe/UNIPipe.py +9 -7
  37. magic_pdf/resources/model_config/UniMERNet/demo.yaml +46 -0
  38. magic_pdf/resources/model_config/layoutlmv3/layoutlmv3_base_inference.yaml +351 -0
  39. magic_pdf/resources/model_config/model_configs.yaml +9 -0
  40. {magic_pdf-0.5.13.dist-info → magic_pdf-0.6.1.dist-info}/METADATA +95 -12
  41. {magic_pdf-0.5.13.dist-info → magic_pdf-0.6.1.dist-info}/RECORD +45 -19
  42. magic_pdf/model/360_layout_analysis.py +0 -8
  43. {magic_pdf-0.5.13.dist-info → magic_pdf-0.6.1.dist-info}/LICENSE.md +0 -0
  44. {magic_pdf-0.5.13.dist-info → magic_pdf-0.6.1.dist-info}/WHEEL +0 -0
  45. {magic_pdf-0.5.13.dist-info → magic_pdf-0.6.1.dist-info}/entry_points.txt +0 -0
  46. {magic_pdf-0.5.13.dist-info → magic_pdf-0.6.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1282 @@
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch LayoutLMv3 model. """
17
+ import math
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.functional as F
22
+ import torch.utils.checkpoint
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from transformers import apply_chunking_to_forward
26
+ from transformers.modeling_outputs import (
27
+ BaseModelOutputWithPastAndCrossAttentions,
28
+ BaseModelOutputWithPoolingAndCrossAttentions,
29
+ MaskedLMOutput,
30
+ TokenClassifierOutput,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutput,
33
+ )
34
+ from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
35
+ from transformers.models.roberta.modeling_roberta import (
36
+ RobertaIntermediate,
37
+ RobertaLMHead,
38
+ RobertaOutput,
39
+ RobertaSelfOutput,
40
+ )
41
+ from transformers.utils import logging
42
+
43
+ from .configuration_layoutlmv3 import LayoutLMv3Config
44
+ from timm.models.layers import to_2tuple
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ class PatchEmbed(nn.Module):
51
+ """ Image to Patch Embedding
52
+ """
53
+ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
54
+ super().__init__()
55
+ img_size = to_2tuple(img_size)
56
+ patch_size = to_2tuple(patch_size)
57
+ self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
58
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
59
+ # The following variables are used in detection mycheckpointer.py
60
+ self.num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
61
+ self.num_patches_w = self.patch_shape[0]
62
+ self.num_patches_h = self.patch_shape[1]
63
+
64
+ def forward(self, x, position_embedding=None):
65
+ x = self.proj(x)
66
+
67
+ if position_embedding is not None:
68
+ # interpolate the position embedding to the corresponding size
69
+ position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(0, 3, 1, 2)
70
+ Hp, Wp = x.shape[2], x.shape[3]
71
+ position_embedding = F.interpolate(position_embedding, size=(Hp, Wp), mode='bicubic')
72
+ x = x + position_embedding
73
+
74
+ x = x.flatten(2).transpose(1, 2)
75
+ return x
76
+
77
+ class LayoutLMv3Embeddings(nn.Module):
78
+ """
79
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
80
+ """
81
+
82
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
83
+ def __init__(self, config):
84
+ super().__init__()
85
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
86
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
87
+
88
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
89
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
90
+
91
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
92
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
93
+
94
+ # End copy
95
+ self.padding_idx = config.pad_token_id
96
+ self.position_embeddings = nn.Embedding(
97
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
98
+ )
99
+
100
+ self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
101
+ self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
102
+ self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
103
+ self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
104
+
105
+ def _calc_spatial_position_embeddings(self, bbox):
106
+ try:
107
+ assert torch.all(0 <= bbox) and torch.all(bbox <= 1023)
108
+ left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
109
+ upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
110
+ right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
111
+ lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
112
+ except IndexError as e:
113
+ raise IndexError("The :obj:`bbox` coordinate values should be within 0-1000 range.") from e
114
+
115
+ h_position_embeddings = self.h_position_embeddings(torch.clip(bbox[:, :, 3] - bbox[:, :, 1], 0, 1023))
116
+ w_position_embeddings = self.w_position_embeddings(torch.clip(bbox[:, :, 2] - bbox[:, :, 0], 0, 1023))
117
+
118
+ # below is the difference between LayoutLMEmbeddingsV2 (torch.cat) and LayoutLMEmbeddingsV1 (add)
119
+ spatial_position_embeddings = torch.cat(
120
+ [
121
+ left_position_embeddings,
122
+ upper_position_embeddings,
123
+ right_position_embeddings,
124
+ lower_position_embeddings,
125
+ h_position_embeddings,
126
+ w_position_embeddings,
127
+ ],
128
+ dim=-1,
129
+ )
130
+ return spatial_position_embeddings
131
+
132
+ def create_position_ids_from_input_ids(self, input_ids, padding_idx, past_key_values_length=0):
133
+ """
134
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
135
+ are ignored. This is modified from fairseq's `utils.make_positions`.
136
+
137
+ Args:
138
+ x: torch.Tensor x:
139
+
140
+ Returns: torch.Tensor
141
+ """
142
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
143
+ mask = input_ids.ne(padding_idx).int()
144
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
145
+ return incremental_indices.long() + padding_idx
146
+
147
+ def forward(
148
+ self,
149
+ input_ids=None,
150
+ bbox=None,
151
+ token_type_ids=None,
152
+ position_ids=None,
153
+ inputs_embeds=None,
154
+ past_key_values_length=0,
155
+ ):
156
+ if position_ids is None:
157
+ if input_ids is not None:
158
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
159
+ position_ids = self.create_position_ids_from_input_ids(
160
+ input_ids, self.padding_idx, past_key_values_length).to(input_ids.device)
161
+ else:
162
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
163
+
164
+ if input_ids is not None:
165
+ input_shape = input_ids.size()
166
+ else:
167
+ input_shape = inputs_embeds.size()[:-1]
168
+
169
+ if token_type_ids is None:
170
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
171
+
172
+ if inputs_embeds is None:
173
+ inputs_embeds = self.word_embeddings(input_ids)
174
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
175
+
176
+ embeddings = inputs_embeds + token_type_embeddings
177
+ position_embeddings = self.position_embeddings(position_ids)
178
+ embeddings += position_embeddings
179
+
180
+ spatial_position_embeddings = self._calc_spatial_position_embeddings(bbox)
181
+
182
+ embeddings = embeddings + spatial_position_embeddings
183
+
184
+ embeddings = self.LayerNorm(embeddings)
185
+ embeddings = self.dropout(embeddings)
186
+ return embeddings
187
+
188
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
189
+ """
190
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
191
+
192
+ Args:
193
+ inputs_embeds: torch.Tensor≈
194
+
195
+ Returns: torch.Tensor
196
+ """
197
+ input_shape = inputs_embeds.size()[:-1]
198
+ sequence_length = input_shape[1]
199
+
200
+ position_ids = torch.arange(
201
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
202
+ )
203
+ return position_ids.unsqueeze(0).expand(input_shape)
204
+
205
+
206
+ class LayoutLMv3PreTrainedModel(PreTrainedModel):
207
+ """
208
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
209
+ models.
210
+ """
211
+
212
+ config_class = LayoutLMv3Config
213
+ base_model_prefix = "layoutlmv3"
214
+
215
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
216
+ def _init_weights(self, module):
217
+ """Initialize the weights"""
218
+ if isinstance(module, nn.Linear):
219
+ # Slightly different from the TF version which uses truncated_normal for initialization
220
+ # cf https://github.com/pytorch/pytorch/pull/5617
221
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
222
+ if module.bias is not None:
223
+ module.bias.data.zero_()
224
+ elif isinstance(module, nn.Embedding):
225
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
226
+ if module.padding_idx is not None:
227
+ module.weight.data[module.padding_idx].zero_()
228
+ elif isinstance(module, nn.LayerNorm):
229
+ module.bias.data.zero_()
230
+ module.weight.data.fill_(1.0)
231
+
232
+
233
+ class LayoutLMv3SelfAttention(nn.Module):
234
+ def __init__(self, config):
235
+ super().__init__()
236
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
237
+ raise ValueError(
238
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
239
+ f"heads ({config.num_attention_heads})"
240
+ )
241
+
242
+ self.num_attention_heads = config.num_attention_heads
243
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
244
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
245
+
246
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
247
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
248
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
249
+
250
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
251
+ self.has_relative_attention_bias = config.has_relative_attention_bias
252
+ self.has_spatial_attention_bias = config.has_spatial_attention_bias
253
+
254
+ def transpose_for_scores(self, x):
255
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
256
+ x = x.view(*new_x_shape)
257
+ return x.permute(0, 2, 1, 3)
258
+
259
+ def cogview_attn(self, attention_scores, alpha=32):
260
+ '''
261
+ https://arxiv.org/pdf/2105.13290.pdf
262
+ Section 2.4 Stabilization of training: Precision Bottleneck Relaxation (PB-Relax).
263
+ A replacement of the original nn.Softmax(dim=-1)(attention_scores)
264
+ Seems the new attention_probs will result in a slower speed and a little bias
265
+ Can use torch.allclose(standard_attention_probs, cogview_attention_probs, atol=1e-08) for comparison
266
+ The smaller atol (e.g., 1e-08), the better.
267
+ '''
268
+ scaled_attention_scores = attention_scores / alpha
269
+ max_value = scaled_attention_scores.amax(dim=(-1)).unsqueeze(-1)
270
+ # max_value = scaled_attention_scores.amax(dim=(-2, -1)).unsqueeze(-1).unsqueeze(-1)
271
+ new_attention_scores = (scaled_attention_scores - max_value) * alpha
272
+ return nn.Softmax(dim=-1)(new_attention_scores)
273
+
274
+ def forward(
275
+ self,
276
+ hidden_states,
277
+ attention_mask=None,
278
+ head_mask=None,
279
+ encoder_hidden_states=None,
280
+ encoder_attention_mask=None,
281
+ past_key_value=None,
282
+ output_attentions=False,
283
+ rel_pos=None,
284
+ rel_2d_pos=None,
285
+ ):
286
+ mixed_query_layer = self.query(hidden_states)
287
+
288
+ # If this is instantiated as a cross-attention module, the keys
289
+ # and values come from an encoder; the attention mask needs to be
290
+ # such that the encoder's padding tokens are not attended to.
291
+ is_cross_attention = encoder_hidden_states is not None
292
+
293
+ if is_cross_attention and past_key_value is not None:
294
+ # reuse k,v, cross_attentions
295
+ key_layer = past_key_value[0]
296
+ value_layer = past_key_value[1]
297
+ attention_mask = encoder_attention_mask
298
+ elif is_cross_attention:
299
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
300
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
301
+ attention_mask = encoder_attention_mask
302
+ elif past_key_value is not None:
303
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
304
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
305
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
306
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
307
+ else:
308
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
309
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
310
+
311
+ query_layer = self.transpose_for_scores(mixed_query_layer)
312
+
313
+ # Take the dot product between "query" and "key" to get the raw attention scores.
314
+ # The attention scores QT K/√d could be significantly larger than input elements, and result in overflow.
315
+ # Changing the computational order into QT(K/√d) alleviates the problem. (https://arxiv.org/pdf/2105.13290.pdf)
316
+ attention_scores = torch.matmul(query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
317
+
318
+ if self.has_relative_attention_bias and self.has_spatial_attention_bias:
319
+ attention_scores += (rel_pos + rel_2d_pos) / math.sqrt(self.attention_head_size)
320
+ elif self.has_relative_attention_bias:
321
+ attention_scores += rel_pos / math.sqrt(self.attention_head_size)
322
+
323
+ # if self.has_relative_attention_bias:
324
+ # attention_scores += rel_pos
325
+ # if self.has_spatial_attention_bias:
326
+ # attention_scores += rel_2d_pos
327
+
328
+ # attention_scores = attention_scores / math.sqrt(self.attention_head_size)
329
+ if attention_mask is not None:
330
+ # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
331
+ attention_scores = attention_scores + attention_mask
332
+
333
+ # Normalize the attention scores to probabilities.
334
+ # attention_probs = nn.Softmax(dim=-1)(attention_scores) # comment the line below and use this line for speedup
335
+ attention_probs = self.cogview_attn(attention_scores) # to stablize training
336
+ # assert torch.allclose(attention_probs, nn.Softmax(dim=-1)(attention_scores), atol=1e-8)
337
+
338
+ # This is actually dropping out entire tokens to attend to, which might
339
+ # seem a bit unusual, but is taken from the original Transformer paper.
340
+ attention_probs = self.dropout(attention_probs)
341
+
342
+ # Mask heads if we want to
343
+ if head_mask is not None:
344
+ attention_probs = attention_probs * head_mask
345
+
346
+ context_layer = torch.matmul(attention_probs, value_layer)
347
+
348
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
349
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
350
+ context_layer = context_layer.view(*new_context_layer_shape)
351
+
352
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
353
+
354
+ return outputs
355
+
356
+
357
+ class LayoutLMv3Attention(nn.Module):
358
+ def __init__(self, config):
359
+ super().__init__()
360
+ self.self = LayoutLMv3SelfAttention(config)
361
+ self.output = RobertaSelfOutput(config)
362
+ self.pruned_heads = set()
363
+
364
+ def prune_heads(self, heads):
365
+ if len(heads) == 0:
366
+ return
367
+ heads, index = find_pruneable_heads_and_indices(
368
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
369
+ )
370
+
371
+ # Prune linear layers
372
+ self.self.query = prune_linear_layer(self.self.query, index)
373
+ self.self.key = prune_linear_layer(self.self.key, index)
374
+ self.self.value = prune_linear_layer(self.self.value, index)
375
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
376
+
377
+ # Update hyper params and store pruned heads
378
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
379
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
380
+ self.pruned_heads = self.pruned_heads.union(heads)
381
+
382
+ def forward(
383
+ self,
384
+ hidden_states,
385
+ attention_mask=None,
386
+ head_mask=None,
387
+ encoder_hidden_states=None,
388
+ encoder_attention_mask=None,
389
+ past_key_value=None,
390
+ output_attentions=False,
391
+ rel_pos=None,
392
+ rel_2d_pos=None,
393
+ ):
394
+ self_outputs = self.self(
395
+ hidden_states,
396
+ attention_mask,
397
+ head_mask,
398
+ encoder_hidden_states,
399
+ encoder_attention_mask,
400
+ past_key_value,
401
+ output_attentions,
402
+ rel_pos=rel_pos,
403
+ rel_2d_pos=rel_2d_pos,
404
+ )
405
+ attention_output = self.output(self_outputs[0], hidden_states)
406
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
407
+ return outputs
408
+
409
+
410
+ class LayoutLMv3Layer(nn.Module):
411
+ def __init__(self, config):
412
+ super().__init__()
413
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
414
+ self.seq_len_dim = 1
415
+ self.attention = LayoutLMv3Attention(config)
416
+ assert not config.is_decoder and not config.add_cross_attention, \
417
+ "This version do not support decoder. Please refer to RoBERTa for implementation of is_decoder."
418
+ self.intermediate = RobertaIntermediate(config)
419
+ self.output = RobertaOutput(config)
420
+
421
+ def forward(
422
+ self,
423
+ hidden_states,
424
+ attention_mask=None,
425
+ head_mask=None,
426
+ encoder_hidden_states=None,
427
+ encoder_attention_mask=None,
428
+ past_key_value=None,
429
+ output_attentions=False,
430
+ rel_pos=None,
431
+ rel_2d_pos=None,
432
+ ):
433
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
434
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
435
+ self_attention_outputs = self.attention(
436
+ hidden_states,
437
+ attention_mask,
438
+ head_mask,
439
+ output_attentions=output_attentions,
440
+ past_key_value=self_attn_past_key_value,
441
+ rel_pos=rel_pos,
442
+ rel_2d_pos=rel_2d_pos,
443
+ )
444
+ attention_output = self_attention_outputs[0]
445
+
446
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
447
+
448
+ layer_output = apply_chunking_to_forward(
449
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
450
+ )
451
+ outputs = (layer_output,) + outputs
452
+
453
+ return outputs
454
+
455
+ def feed_forward_chunk(self, attention_output):
456
+ intermediate_output = self.intermediate(attention_output)
457
+ layer_output = self.output(intermediate_output, attention_output)
458
+ return layer_output
459
+
460
+
461
+ class LayoutLMv3Encoder(nn.Module):
462
+ def __init__(self, config, detection=False, out_features=None):
463
+ super().__init__()
464
+ self.config = config
465
+ self.detection = detection
466
+ self.layer = nn.ModuleList([LayoutLMv3Layer(config) for _ in range(config.num_hidden_layers)])
467
+ self.gradient_checkpointing = False
468
+
469
+ self.has_relative_attention_bias = config.has_relative_attention_bias
470
+ self.has_spatial_attention_bias = config.has_spatial_attention_bias
471
+
472
+ if self.has_relative_attention_bias:
473
+ self.rel_pos_bins = config.rel_pos_bins
474
+ self.max_rel_pos = config.max_rel_pos
475
+ self.rel_pos_onehot_size = config.rel_pos_bins
476
+ self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False)
477
+
478
+ if self.has_spatial_attention_bias:
479
+ self.max_rel_2d_pos = config.max_rel_2d_pos
480
+ self.rel_2d_pos_bins = config.rel_2d_pos_bins
481
+ self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins
482
+ self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
483
+ self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
484
+
485
+ if self.detection:
486
+ self.gradient_checkpointing = True
487
+ embed_dim = self.config.hidden_size
488
+ self.out_features = out_features
489
+ self.out_indices = [int(name[5:]) for name in out_features]
490
+ self.fpn1 = nn.Sequential(
491
+ nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
492
+ # nn.SyncBatchNorm(embed_dim),
493
+ nn.BatchNorm2d(embed_dim),
494
+ nn.GELU(),
495
+ nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
496
+ )
497
+
498
+ self.fpn2 = nn.Sequential(
499
+ nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
500
+ )
501
+
502
+ self.fpn3 = nn.Identity()
503
+
504
+ self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
505
+ self.ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
506
+
507
+ def relative_position_bucket(self, relative_position, bidirectional=True, num_buckets=32, max_distance=128):
508
+ ret = 0
509
+ if bidirectional:
510
+ num_buckets //= 2
511
+ ret += (relative_position > 0).long() * num_buckets
512
+ n = torch.abs(relative_position)
513
+ else:
514
+ n = torch.max(-relative_position, torch.zeros_like(relative_position))
515
+ # now n is in the range [0, inf)
516
+
517
+ # half of the buckets are for exact increments in positions
518
+ max_exact = num_buckets // 2
519
+ is_small = n < max_exact
520
+
521
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
522
+ val_if_large = max_exact + (
523
+ torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
524
+ ).to(torch.long)
525
+ val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
526
+
527
+ ret += torch.where(is_small, n, val_if_large)
528
+ return ret
529
+
530
+ def _cal_1d_pos_emb(self, hidden_states, position_ids, valid_span):
531
+ VISUAL_NUM = 196 + 1
532
+
533
+ rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
534
+
535
+ if valid_span is not None:
536
+ # for the text part, if two words are not in the same line,
537
+ # set their distance to the max value (position_ids.shape[-1])
538
+ rel_pos_mat[(rel_pos_mat > 0) & (valid_span == False)] = position_ids.shape[1]
539
+ rel_pos_mat[(rel_pos_mat < 0) & (valid_span == False)] = -position_ids.shape[1]
540
+
541
+ # image-text, minimum distance
542
+ rel_pos_mat[:, -VISUAL_NUM:, :-VISUAL_NUM] = 0
543
+ rel_pos_mat[:, :-VISUAL_NUM, -VISUAL_NUM:] = 0
544
+
545
+ rel_pos = self.relative_position_bucket(
546
+ rel_pos_mat,
547
+ num_buckets=self.rel_pos_bins,
548
+ max_distance=self.max_rel_pos,
549
+ )
550
+ rel_pos = F.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states)
551
+ rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
552
+ rel_pos = rel_pos.contiguous()
553
+ return rel_pos
554
+
555
+ def _cal_2d_pos_emb(self, hidden_states, bbox):
556
+ position_coord_x = bbox[:, :, 0]
557
+ position_coord_y = bbox[:, :, 3]
558
+ rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
559
+ rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
560
+ rel_pos_x = self.relative_position_bucket(
561
+ rel_pos_x_2d_mat,
562
+ num_buckets=self.rel_2d_pos_bins,
563
+ max_distance=self.max_rel_2d_pos,
564
+ )
565
+ rel_pos_y = self.relative_position_bucket(
566
+ rel_pos_y_2d_mat,
567
+ num_buckets=self.rel_2d_pos_bins,
568
+ max_distance=self.max_rel_2d_pos,
569
+ )
570
+ rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
571
+ rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
572
+ rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2)
573
+ rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2)
574
+ rel_pos_x = rel_pos_x.contiguous()
575
+ rel_pos_y = rel_pos_y.contiguous()
576
+ rel_2d_pos = rel_pos_x + rel_pos_y
577
+ return rel_2d_pos
578
+
579
+ def forward(
580
+ self,
581
+ hidden_states,
582
+ bbox=None,
583
+ attention_mask=None,
584
+ head_mask=None,
585
+ encoder_hidden_states=None,
586
+ encoder_attention_mask=None,
587
+ past_key_values=None,
588
+ use_cache=None,
589
+ output_attentions=False,
590
+ output_hidden_states=False,
591
+ return_dict=True,
592
+ position_ids=None,
593
+ Hp=None,
594
+ Wp=None,
595
+ valid_span=None,
596
+ ):
597
+ all_hidden_states = () if output_hidden_states else None
598
+ all_self_attentions = () if output_attentions else None
599
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
600
+
601
+ next_decoder_cache = () if use_cache else None
602
+
603
+ rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids, valid_span) if self.has_relative_attention_bias else None
604
+ rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None
605
+
606
+ if self.detection:
607
+ feat_out = {}
608
+ j = 0
609
+
610
+ for i, layer_module in enumerate(self.layer):
611
+ if output_hidden_states:
612
+ all_hidden_states = all_hidden_states + (hidden_states,)
613
+
614
+ layer_head_mask = head_mask[i] if head_mask is not None else None
615
+ past_key_value = past_key_values[i] if past_key_values is not None else None
616
+
617
+ if self.gradient_checkpointing and self.training:
618
+
619
+ if use_cache:
620
+ logger.warning(
621
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
622
+ )
623
+ use_cache = False
624
+
625
+ def create_custom_forward(module):
626
+ def custom_forward(*inputs):
627
+ return module(*inputs)
628
+ # return module(*inputs, past_key_value, output_attentions, rel_pos, rel_2d_pos)
629
+ # The above line will cause error:
630
+ # RuntimeError: Trying to backward through the graph a second time
631
+ # (or directly access saved tensors after they have already been freed).
632
+ return custom_forward
633
+
634
+ layer_outputs = torch.utils.checkpoint.checkpoint(
635
+ create_custom_forward(layer_module),
636
+ hidden_states,
637
+ attention_mask,
638
+ layer_head_mask,
639
+ encoder_hidden_states,
640
+ encoder_attention_mask,
641
+ past_key_value,
642
+ output_attentions,
643
+ rel_pos,
644
+ rel_2d_pos
645
+ )
646
+ else:
647
+ layer_outputs = layer_module(
648
+ hidden_states,
649
+ attention_mask,
650
+ layer_head_mask,
651
+ encoder_hidden_states,
652
+ encoder_attention_mask,
653
+ past_key_value,
654
+ output_attentions,
655
+ rel_pos=rel_pos,
656
+ rel_2d_pos=rel_2d_pos,
657
+ )
658
+
659
+ hidden_states = layer_outputs[0]
660
+ if use_cache:
661
+ next_decoder_cache += (layer_outputs[-1],)
662
+ if output_attentions:
663
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
664
+ if self.config.add_cross_attention:
665
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
666
+
667
+ if self.detection and i in self.out_indices:
668
+ xp = hidden_states[:, -Hp*Wp:, :].permute(0, 2, 1).reshape(len(hidden_states), -1, Hp, Wp)
669
+ feat_out[self.out_features[j]] = self.ops[j](xp.contiguous())
670
+ j += 1
671
+
672
+ if self.detection:
673
+ return feat_out
674
+
675
+ if output_hidden_states:
676
+ all_hidden_states = all_hidden_states + (hidden_states,)
677
+
678
+ if not return_dict:
679
+ return tuple(
680
+ v
681
+ for v in [
682
+ hidden_states,
683
+ next_decoder_cache,
684
+ all_hidden_states,
685
+ all_self_attentions,
686
+ all_cross_attentions,
687
+ ]
688
+ if v is not None
689
+ )
690
+ return BaseModelOutputWithPastAndCrossAttentions(
691
+ last_hidden_state=hidden_states,
692
+ past_key_values=next_decoder_cache,
693
+ hidden_states=all_hidden_states,
694
+ attentions=all_self_attentions,
695
+ cross_attentions=all_cross_attentions,
696
+ )
697
+
698
+
699
+ class LayoutLMv3Model(LayoutLMv3PreTrainedModel):
700
+ """
701
+ """
702
+
703
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
704
+
705
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
706
+ def __init__(self, config, detection=False, out_features=None, image_only=False):
707
+ super().__init__(config)
708
+ self.config = config
709
+ assert not config.is_decoder and not config.add_cross_attention, \
710
+ "This version do not support decoder. Please refer to RoBERTa for implementation of is_decoder."
711
+ self.detection = detection
712
+ if not self.detection:
713
+ self.image_only = False
714
+ else:
715
+ assert config.visual_embed
716
+ self.image_only = image_only
717
+
718
+ if not self.image_only:
719
+ self.embeddings = LayoutLMv3Embeddings(config)
720
+ self.encoder = LayoutLMv3Encoder(config, detection=detection, out_features=out_features)
721
+
722
+ if config.visual_embed:
723
+ embed_dim = self.config.hidden_size
724
+ # use the default pre-training parameters for fine-tuning (e.g., input_size)
725
+ # when the input_size is larger in fine-tuning, we will interpolate the position embedding in forward
726
+ self.patch_embed = PatchEmbed(embed_dim=embed_dim)
727
+
728
+ patch_size = 16
729
+ size = int(self.config.input_size / patch_size)
730
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
731
+ self.pos_embed = nn.Parameter(torch.zeros(1, size * size + 1, embed_dim))
732
+ self.pos_drop = nn.Dropout(p=0.)
733
+
734
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
735
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
736
+
737
+ if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
738
+ self._init_visual_bbox(img_size=(size, size))
739
+
740
+ from functools import partial
741
+ norm_layer = partial(nn.LayerNorm, eps=1e-6)
742
+ self.norm = norm_layer(embed_dim)
743
+
744
+ self.init_weights()
745
+
746
+ def get_input_embeddings(self):
747
+ return self.embeddings.word_embeddings
748
+
749
+ def set_input_embeddings(self, value):
750
+ self.embeddings.word_embeddings = value
751
+
752
+ def _prune_heads(self, heads_to_prune):
753
+ """
754
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
755
+ class PreTrainedModel
756
+ """
757
+ for layer, heads in heads_to_prune.items():
758
+ self.encoder.layer[layer].attention.prune_heads(heads)
759
+
760
+ def _init_visual_bbox(self, img_size=(14, 14), max_len=1000):
761
+ visual_bbox_x = torch.div(torch.arange(0, max_len * (img_size[1] + 1), max_len),
762
+ img_size[1], rounding_mode='trunc')
763
+ visual_bbox_y = torch.div(torch.arange(0, max_len * (img_size[0] + 1), max_len),
764
+ img_size[0], rounding_mode='trunc')
765
+ visual_bbox = torch.stack(
766
+ [
767
+ visual_bbox_x[:-1].repeat(img_size[0], 1),
768
+ visual_bbox_y[:-1].repeat(img_size[1], 1).transpose(0, 1),
769
+ visual_bbox_x[1:].repeat(img_size[0], 1),
770
+ visual_bbox_y[1:].repeat(img_size[1], 1).transpose(0, 1),
771
+ ],
772
+ dim=-1,
773
+ ).view(-1, 4)
774
+
775
+ cls_token_box = torch.tensor([[0 + 1, 0 + 1, max_len - 1, max_len - 1]])
776
+ self.visual_bbox = torch.cat([cls_token_box, visual_bbox], dim=0)
777
+
778
+ def _calc_visual_bbox(self, device, dtype, bsz): # , img_size=(14, 14), max_len=1000):
779
+ visual_bbox = self.visual_bbox.repeat(bsz, 1, 1)
780
+ visual_bbox = visual_bbox.to(device).type(dtype)
781
+ return visual_bbox
782
+
783
+ def forward_image(self, x):
784
+ if self.detection:
785
+ x = self.patch_embed(x, self.pos_embed[:, 1:, :] if self.pos_embed is not None else None)
786
+ else:
787
+ x = self.patch_embed(x)
788
+ batch_size, seq_len, _ = x.size()
789
+
790
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
791
+ if self.pos_embed is not None and self.detection:
792
+ cls_tokens = cls_tokens + self.pos_embed[:, :1, :]
793
+
794
+ x = torch.cat((cls_tokens, x), dim=1)
795
+ if self.pos_embed is not None and not self.detection:
796
+ x = x + self.pos_embed
797
+ x = self.pos_drop(x)
798
+
799
+ x = self.norm(x)
800
+ return x
801
+
802
+ # Copied from transformers.models.bert.modeling_bert.BertModel.forward
803
+ def forward(
804
+ self,
805
+ input_ids=None,
806
+ bbox=None,
807
+ attention_mask=None,
808
+ token_type_ids=None,
809
+ valid_span=None,
810
+ position_ids=None,
811
+ head_mask=None,
812
+ inputs_embeds=None,
813
+ encoder_hidden_states=None,
814
+ encoder_attention_mask=None,
815
+ past_key_values=None,
816
+ use_cache=None,
817
+ output_attentions=None,
818
+ output_hidden_states=None,
819
+ return_dict=None,
820
+ images=None,
821
+ ):
822
+ r"""
823
+ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
824
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
825
+ the model is configured as a decoder.
826
+ encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
827
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
828
+ the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
829
+
830
+ - 1 for tokens that are **not masked**,
831
+ - 0 for tokens that are **masked**.
832
+ past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
833
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
834
+
835
+ If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
836
+ (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
837
+ instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
838
+ use_cache (:obj:`bool`, `optional`):
839
+ If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
840
+ decoding (see :obj:`past_key_values`).
841
+ """
842
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
843
+ output_hidden_states = (
844
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
845
+ )
846
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
847
+
848
+ use_cache = False
849
+
850
+ # if input_ids is not None and inputs_embeds is not None:
851
+ # raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
852
+ if input_ids is not None:
853
+ input_shape = input_ids.size()
854
+ batch_size, seq_length = input_shape
855
+ device = input_ids.device
856
+ elif inputs_embeds is not None:
857
+ input_shape = inputs_embeds.size()[:-1]
858
+ batch_size, seq_length = input_shape
859
+ device = inputs_embeds.device
860
+ elif images is not None:
861
+ batch_size = len(images)
862
+ device = images.device
863
+ else:
864
+ raise ValueError("You have to specify either input_ids or inputs_embeds or images")
865
+
866
+ if not self.image_only:
867
+ # past_key_values_length
868
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
869
+
870
+ if attention_mask is None:
871
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
872
+ if token_type_ids is None:
873
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
874
+
875
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
876
+ # ourselves in which case we just need to make it broadcastable to all heads.
877
+ # extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
878
+
879
+ encoder_extended_attention_mask = None
880
+
881
+ # Prepare head mask if needed
882
+ # 1.0 in head_mask indicate we keep the head
883
+ # attention_probs has shape bsz x n_heads x N x N
884
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
885
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
886
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
887
+
888
+ if not self.image_only:
889
+ if bbox is None:
890
+ bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
891
+
892
+ embedding_output = self.embeddings(
893
+ input_ids=input_ids,
894
+ bbox=bbox,
895
+ position_ids=position_ids,
896
+ token_type_ids=token_type_ids,
897
+ inputs_embeds=inputs_embeds,
898
+ past_key_values_length=past_key_values_length,
899
+ )
900
+
901
+ final_bbox = final_position_ids = None
902
+ Hp = Wp = None
903
+ if images is not None:
904
+ patch_size = 16
905
+ Hp, Wp = int(images.shape[2] / patch_size), int(images.shape[3] / patch_size)
906
+ visual_emb = self.forward_image(images)
907
+ if self.detection:
908
+ visual_attention_mask = torch.ones((batch_size, visual_emb.shape[1]), dtype=torch.long, device=device)
909
+ if self.image_only:
910
+ attention_mask = visual_attention_mask
911
+ else:
912
+ attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
913
+ elif self.image_only:
914
+ attention_mask = torch.ones((batch_size, visual_emb.shape[1]), dtype=torch.long, device=device)
915
+
916
+ if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
917
+ if self.config.has_spatial_attention_bias:
918
+ visual_bbox = self._calc_visual_bbox(device, dtype=torch.long, bsz=batch_size)
919
+ if self.image_only:
920
+ final_bbox = visual_bbox
921
+ else:
922
+ final_bbox = torch.cat([bbox, visual_bbox], dim=1)
923
+
924
+ visual_position_ids = torch.arange(0, visual_emb.shape[1], dtype=torch.long, device=device).repeat(
925
+ batch_size, 1)
926
+ if self.image_only:
927
+ final_position_ids = visual_position_ids
928
+ else:
929
+ position_ids = torch.arange(0, input_shape[1], device=device).unsqueeze(0)
930
+ position_ids = position_ids.expand_as(input_ids)
931
+ final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
932
+
933
+ if self.image_only:
934
+ embedding_output = visual_emb
935
+ else:
936
+ embedding_output = torch.cat([embedding_output, visual_emb], dim=1)
937
+ embedding_output = self.LayerNorm(embedding_output)
938
+ embedding_output = self.dropout(embedding_output)
939
+ elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
940
+ if self.config.has_spatial_attention_bias:
941
+ final_bbox = bbox
942
+ if self.config.has_relative_attention_bias:
943
+ position_ids = self.embeddings.position_ids[:, :input_shape[1]]
944
+ position_ids = position_ids.expand_as(input_ids)
945
+ final_position_ids = position_ids
946
+
947
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, None, device)
948
+
949
+ encoder_outputs = self.encoder(
950
+ embedding_output,
951
+ bbox=final_bbox,
952
+ position_ids=final_position_ids,
953
+ attention_mask=extended_attention_mask,
954
+ head_mask=head_mask,
955
+ encoder_hidden_states=encoder_hidden_states,
956
+ encoder_attention_mask=encoder_extended_attention_mask,
957
+ past_key_values=past_key_values,
958
+ use_cache=use_cache,
959
+ output_attentions=output_attentions,
960
+ output_hidden_states=output_hidden_states,
961
+ return_dict=return_dict,
962
+ Hp=Hp,
963
+ Wp=Wp,
964
+ valid_span=valid_span,
965
+ )
966
+
967
+ if self.detection:
968
+ return encoder_outputs
969
+
970
+ sequence_output = encoder_outputs[0]
971
+ pooled_output = None
972
+
973
+ if not return_dict:
974
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
975
+
976
+ return BaseModelOutputWithPoolingAndCrossAttentions(
977
+ last_hidden_state=sequence_output,
978
+ pooler_output=pooled_output,
979
+ past_key_values=encoder_outputs.past_key_values,
980
+ hidden_states=encoder_outputs.hidden_states,
981
+ attentions=encoder_outputs.attentions,
982
+ cross_attentions=encoder_outputs.cross_attentions,
983
+ )
984
+
985
+
986
+ class LayoutLMv3ClassificationHead(nn.Module):
987
+ """
988
+ Head for sentence-level classification tasks.
989
+ Reference: RobertaClassificationHead
990
+ """
991
+
992
+ def __init__(self, config, pool_feature=False):
993
+ super().__init__()
994
+ self.pool_feature = pool_feature
995
+ if pool_feature:
996
+ self.dense = nn.Linear(config.hidden_size*3, config.hidden_size)
997
+ else:
998
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
999
+ classifier_dropout = (
1000
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1001
+ )
1002
+ self.dropout = nn.Dropout(classifier_dropout)
1003
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1004
+
1005
+ def forward(self, x):
1006
+ # x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1007
+ x = self.dropout(x)
1008
+ x = self.dense(x)
1009
+ x = torch.tanh(x)
1010
+ x = self.dropout(x)
1011
+ x = self.out_proj(x)
1012
+ return x
1013
+
1014
+
1015
+ class LayoutLMv3ForTokenClassification(LayoutLMv3PreTrainedModel):
1016
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
1017
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1018
+
1019
+ def __init__(self, config):
1020
+ super().__init__(config)
1021
+ self.num_labels = config.num_labels
1022
+
1023
+ self.layoutlmv3 = LayoutLMv3Model(config)
1024
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1025
+ if config.num_labels < 10:
1026
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1027
+ else:
1028
+ self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False)
1029
+
1030
+ self.init_weights()
1031
+
1032
+ def forward(
1033
+ self,
1034
+ input_ids=None,
1035
+ bbox=None,
1036
+ attention_mask=None,
1037
+ token_type_ids=None,
1038
+ position_ids=None,
1039
+ valid_span=None,
1040
+ head_mask=None,
1041
+ inputs_embeds=None,
1042
+ labels=None,
1043
+ output_attentions=None,
1044
+ output_hidden_states=None,
1045
+ return_dict=None,
1046
+ images=None,
1047
+ ):
1048
+ r"""
1049
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
1050
+ Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1051
+ 1]``.
1052
+ """
1053
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1054
+
1055
+ outputs = self.layoutlmv3(
1056
+ input_ids,
1057
+ bbox=bbox,
1058
+ attention_mask=attention_mask,
1059
+ token_type_ids=token_type_ids,
1060
+ position_ids=position_ids,
1061
+ head_mask=head_mask,
1062
+ inputs_embeds=inputs_embeds,
1063
+ output_attentions=output_attentions,
1064
+ output_hidden_states=output_hidden_states,
1065
+ return_dict=return_dict,
1066
+ images=images,
1067
+ valid_span=valid_span,
1068
+ )
1069
+
1070
+ sequence_output = outputs[0]
1071
+
1072
+ sequence_output = self.dropout(sequence_output)
1073
+ logits = self.classifier(sequence_output)
1074
+
1075
+ loss = None
1076
+ if labels is not None:
1077
+ loss_fct = CrossEntropyLoss()
1078
+ # Only keep active parts of the loss
1079
+ if attention_mask is not None:
1080
+ active_loss = attention_mask.view(-1) == 1
1081
+ active_logits = logits.view(-1, self.num_labels)
1082
+ active_labels = torch.where(
1083
+ active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
1084
+ )
1085
+ loss = loss_fct(active_logits, active_labels)
1086
+ else:
1087
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1088
+
1089
+ if not return_dict:
1090
+ output = (logits,) + outputs[2:]
1091
+ return ((loss,) + output) if loss is not None else output
1092
+
1093
+ return TokenClassifierOutput(
1094
+ loss=loss,
1095
+ logits=logits,
1096
+ hidden_states=outputs.hidden_states,
1097
+ attentions=outputs.attentions,
1098
+ )
1099
+
1100
+
1101
+ class LayoutLMv3ForQuestionAnswering(LayoutLMv3PreTrainedModel):
1102
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
1103
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1104
+
1105
+ def __init__(self, config):
1106
+ super().__init__(config)
1107
+ self.num_labels = config.num_labels
1108
+
1109
+ self.layoutlmv3 = LayoutLMv3Model(config)
1110
+ # self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1111
+ self.qa_outputs = LayoutLMv3ClassificationHead(config, pool_feature=False)
1112
+
1113
+ self.init_weights()
1114
+
1115
+ def forward(
1116
+ self,
1117
+ input_ids=None,
1118
+ attention_mask=None,
1119
+ token_type_ids=None,
1120
+ position_ids=None,
1121
+ valid_span=None,
1122
+ head_mask=None,
1123
+ inputs_embeds=None,
1124
+ start_positions=None,
1125
+ end_positions=None,
1126
+ output_attentions=None,
1127
+ output_hidden_states=None,
1128
+ return_dict=None,
1129
+ bbox=None,
1130
+ images=None,
1131
+ ):
1132
+ r"""
1133
+ start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
1134
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1135
+ Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
1136
+ sequence are not taken into account for computing the loss.
1137
+ end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
1138
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1139
+ Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
1140
+ sequence are not taken into account for computing the loss.
1141
+ """
1142
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1143
+
1144
+ outputs = self.layoutlmv3(
1145
+ input_ids,
1146
+ attention_mask=attention_mask,
1147
+ token_type_ids=token_type_ids,
1148
+ position_ids=position_ids,
1149
+ head_mask=head_mask,
1150
+ inputs_embeds=inputs_embeds,
1151
+ output_attentions=output_attentions,
1152
+ output_hidden_states=output_hidden_states,
1153
+ return_dict=return_dict,
1154
+ bbox=bbox,
1155
+ images=images,
1156
+ valid_span=valid_span,
1157
+ )
1158
+
1159
+ sequence_output = outputs[0]
1160
+
1161
+ logits = self.qa_outputs(sequence_output)
1162
+ start_logits, end_logits = logits.split(1, dim=-1)
1163
+ start_logits = start_logits.squeeze(-1).contiguous()
1164
+ end_logits = end_logits.squeeze(-1).contiguous()
1165
+
1166
+ total_loss = None
1167
+ if start_positions is not None and end_positions is not None:
1168
+ # If we are on multi-GPU, split add a dimension
1169
+ if len(start_positions.size()) > 1:
1170
+ start_positions = start_positions.squeeze(-1)
1171
+ if len(end_positions.size()) > 1:
1172
+ end_positions = end_positions.squeeze(-1)
1173
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1174
+ ignored_index = start_logits.size(1)
1175
+ start_positions = start_positions.clamp(0, ignored_index)
1176
+ end_positions = end_positions.clamp(0, ignored_index)
1177
+
1178
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1179
+ start_loss = loss_fct(start_logits, start_positions)
1180
+ end_loss = loss_fct(end_logits, end_positions)
1181
+ total_loss = (start_loss + end_loss) / 2
1182
+
1183
+ if not return_dict:
1184
+ output = (start_logits, end_logits) + outputs[2:]
1185
+ return ((total_loss,) + output) if total_loss is not None else output
1186
+
1187
+ return QuestionAnsweringModelOutput(
1188
+ loss=total_loss,
1189
+ start_logits=start_logits,
1190
+ end_logits=end_logits,
1191
+ hidden_states=outputs.hidden_states,
1192
+ attentions=outputs.attentions,
1193
+ )
1194
+
1195
+
1196
+ class LayoutLMv3ForSequenceClassification(LayoutLMv3PreTrainedModel):
1197
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1198
+
1199
+ def __init__(self, config):
1200
+ super().__init__(config)
1201
+ self.num_labels = config.num_labels
1202
+ self.config = config
1203
+ self.layoutlmv3 = LayoutLMv3Model(config)
1204
+ self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False)
1205
+
1206
+ self.init_weights()
1207
+
1208
+ def forward(
1209
+ self,
1210
+ input_ids=None,
1211
+ attention_mask=None,
1212
+ token_type_ids=None,
1213
+ position_ids=None,
1214
+ valid_span=None,
1215
+ head_mask=None,
1216
+ inputs_embeds=None,
1217
+ labels=None,
1218
+ output_attentions=None,
1219
+ output_hidden_states=None,
1220
+ return_dict=None,
1221
+ bbox=None,
1222
+ images=None,
1223
+ ):
1224
+ r"""
1225
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
1226
+ Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
1227
+ config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
1228
+ If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1229
+ """
1230
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1231
+
1232
+ outputs = self.layoutlmv3(
1233
+ input_ids,
1234
+ attention_mask=attention_mask,
1235
+ token_type_ids=token_type_ids,
1236
+ position_ids=position_ids,
1237
+ head_mask=head_mask,
1238
+ inputs_embeds=inputs_embeds,
1239
+ output_attentions=output_attentions,
1240
+ output_hidden_states=output_hidden_states,
1241
+ return_dict=return_dict,
1242
+ bbox=bbox,
1243
+ images=images,
1244
+ valid_span=valid_span,
1245
+ )
1246
+
1247
+ sequence_output = outputs[0][:, 0, :]
1248
+ logits = self.classifier(sequence_output)
1249
+
1250
+ loss = None
1251
+ if labels is not None:
1252
+ if self.config.problem_type is None:
1253
+ if self.num_labels == 1:
1254
+ self.config.problem_type = "regression"
1255
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1256
+ self.config.problem_type = "single_label_classification"
1257
+ else:
1258
+ self.config.problem_type = "multi_label_classification"
1259
+
1260
+ if self.config.problem_type == "regression":
1261
+ loss_fct = MSELoss()
1262
+ if self.num_labels == 1:
1263
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1264
+ else:
1265
+ loss = loss_fct(logits, labels)
1266
+ elif self.config.problem_type == "single_label_classification":
1267
+ loss_fct = CrossEntropyLoss()
1268
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1269
+ elif self.config.problem_type == "multi_label_classification":
1270
+ loss_fct = BCEWithLogitsLoss()
1271
+ loss = loss_fct(logits, labels)
1272
+
1273
+ if not return_dict:
1274
+ output = (logits,) + outputs[2:]
1275
+ return ((loss,) + output) if loss is not None else output
1276
+
1277
+ return SequenceClassifierOutput(
1278
+ loss=loss,
1279
+ logits=logits,
1280
+ hidden_states=outputs.hidden_states,
1281
+ attentions=outputs.attentions,
1282
+ )