magic-pdf 1.2.2__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. magic_pdf/data/batch_build_dataset.py +156 -0
  2. magic_pdf/data/dataset.py +56 -25
  3. magic_pdf/data/utils.py +108 -9
  4. magic_pdf/dict2md/ocr_mkcontent.py +4 -3
  5. magic_pdf/libs/pdf_image_tools.py +11 -6
  6. magic_pdf/libs/performance_stats.py +12 -1
  7. magic_pdf/libs/version.py +1 -1
  8. magic_pdf/model/batch_analyze.py +175 -201
  9. magic_pdf/model/doc_analyze_by_custom_model.py +142 -92
  10. magic_pdf/model/pdf_extract_kit.py +5 -38
  11. magic_pdf/model/sub_modules/language_detection/utils.py +2 -4
  12. magic_pdf/model/sub_modules/language_detection/yolov11/YOLOv11.py +24 -19
  13. magic_pdf/model/sub_modules/layout/doclayout_yolo/DocLayoutYOLO.py +3 -1
  14. magic_pdf/model/sub_modules/mfd/yolov8/YOLOv8.py +3 -1
  15. magic_pdf/model/sub_modules/mfr/unimernet/Unimernet.py +31 -102
  16. magic_pdf/model/sub_modules/mfr/unimernet/unimernet_hf/__init__.py +13 -0
  17. magic_pdf/model/sub_modules/mfr/unimernet/unimernet_hf/modeling_unimernet.py +189 -0
  18. magic_pdf/model/sub_modules/mfr/unimernet/unimernet_hf/unimer_mbart/__init__.py +8 -0
  19. magic_pdf/model/sub_modules/mfr/unimernet/unimernet_hf/unimer_mbart/configuration_unimer_mbart.py +163 -0
  20. magic_pdf/model/sub_modules/mfr/unimernet/unimernet_hf/unimer_mbart/modeling_unimer_mbart.py +2351 -0
  21. magic_pdf/model/sub_modules/mfr/unimernet/unimernet_hf/unimer_swin/__init__.py +9 -0
  22. magic_pdf/model/sub_modules/mfr/unimernet/unimernet_hf/unimer_swin/configuration_unimer_swin.py +132 -0
  23. magic_pdf/model/sub_modules/mfr/unimernet/unimernet_hf/unimer_swin/image_processing_unimer_swin.py +132 -0
  24. magic_pdf/model/sub_modules/mfr/unimernet/unimernet_hf/unimer_swin/modeling_unimer_swin.py +1084 -0
  25. magic_pdf/model/sub_modules/model_init.py +50 -37
  26. magic_pdf/model/sub_modules/model_utils.py +18 -12
  27. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/__init__.py +1 -0
  28. magic_pdf/model/sub_modules/ocr/{paddleocr → paddleocr2pytorch}/ocr_utils.py +102 -97
  29. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorch_paddle.py +193 -0
  30. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/base_ocr_v20.py +39 -0
  31. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/data/__init__.py +8 -0
  32. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/data/imaug/__init__.py +48 -0
  33. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/data/imaug/operators.py +418 -0
  34. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/architectures/__init__.py +25 -0
  35. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/architectures/base_model.py +105 -0
  36. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/backbones/__init__.py +62 -0
  37. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/backbones/det_mobilenet_v3.py +269 -0
  38. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/backbones/rec_hgnet.py +290 -0
  39. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/backbones/rec_lcnetv3.py +516 -0
  40. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/backbones/rec_mobilenet_v3.py +136 -0
  41. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/backbones/rec_mv1_enhance.py +234 -0
  42. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/backbones/rec_svtrnet.py +638 -0
  43. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/common.py +76 -0
  44. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/heads/__init__.py +43 -0
  45. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/heads/cls_head.py +23 -0
  46. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/heads/det_db_head.py +109 -0
  47. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/heads/rec_ctc_head.py +54 -0
  48. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/heads/rec_multi_head.py +58 -0
  49. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/necks/__init__.py +29 -0
  50. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/necks/db_fpn.py +456 -0
  51. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/necks/intracl.py +117 -0
  52. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/modeling/necks/rnn.py +228 -0
  53. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/postprocess/__init__.py +33 -0
  54. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/postprocess/cls_postprocess.py +20 -0
  55. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/postprocess/db_postprocess.py +179 -0
  56. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/postprocess/rec_postprocess.py +690 -0
  57. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/__init__.py +0 -0
  58. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/arch_config.yaml +383 -0
  59. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/arabic_dict.txt +162 -0
  60. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/chinese_cht_dict.txt +8421 -0
  61. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/cyrillic_dict.txt +163 -0
  62. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/devanagari_dict.txt +167 -0
  63. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/en_dict.txt +95 -0
  64. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/japan_dict.txt +4399 -0
  65. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/ka_dict.txt +153 -0
  66. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/korean_dict.txt +3688 -0
  67. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/latin_dict.txt +185 -0
  68. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/ppocr_keys_v1.txt +6623 -0
  69. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/ta_dict.txt +128 -0
  70. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/dict/te_dict.txt +151 -0
  71. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/pytorchocr/utils/resources/models_config.yml +49 -0
  72. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/tools/__init__.py +1 -0
  73. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/tools/infer/__init__.py +1 -0
  74. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/tools/infer/predict_cls.py +106 -0
  75. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/tools/infer/predict_det.py +217 -0
  76. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/tools/infer/predict_rec.py +440 -0
  77. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/tools/infer/predict_system.py +104 -0
  78. magic_pdf/model/sub_modules/ocr/paddleocr2pytorch/tools/infer/pytorchocr_utility.py +227 -0
  79. magic_pdf/model/sub_modules/table/rapidtable/rapid_table.py +15 -19
  80. magic_pdf/pdf_parse_union_core_v2.py +112 -74
  81. magic_pdf/pre_proc/ocr_dict_merge.py +9 -1
  82. magic_pdf/pre_proc/ocr_span_list_modify.py +51 -0
  83. magic_pdf/resources/model_config/model_configs.yaml +1 -1
  84. magic_pdf/resources/slanet_plus/slanet-plus.onnx +0 -0
  85. magic_pdf/tools/cli.py +30 -12
  86. magic_pdf/tools/common.py +90 -12
  87. {magic_pdf-1.2.2.dist-info → magic_pdf-1.3.1.dist-info}/METADATA +92 -59
  88. magic_pdf-1.3.1.dist-info/RECORD +203 -0
  89. {magic_pdf-1.2.2.dist-info → magic_pdf-1.3.1.dist-info}/WHEEL +1 -1
  90. magic_pdf/model/sub_modules/ocr/paddleocr/ppocr_273_mod.py +0 -204
  91. magic_pdf/model/sub_modules/ocr/paddleocr/ppocr_291_mod.py +0 -213
  92. magic_pdf/model/sub_modules/table/structeqtable/struct_eqtable.py +0 -37
  93. magic_pdf/model/sub_modules/table/tablemaster/tablemaster_paddle.py +0 -71
  94. magic_pdf/resources/model_config/UniMERNet/demo.yaml +0 -46
  95. magic_pdf/resources/model_config/layoutlmv3/layoutlmv3_base_inference.yaml +0 -351
  96. magic_pdf-1.2.2.dist-info/RECORD +0 -147
  97. /magic_pdf/model/sub_modules/{ocr/paddleocr/__init__.py → mfr/unimernet/unimernet_hf/unimer_mbart/tokenization_unimer_mbart.py} +0 -0
  98. /magic_pdf/model/sub_modules/{table/structeqtable → ocr/paddleocr2pytorch/pytorchocr}/__init__.py +0 -0
  99. /magic_pdf/model/sub_modules/{table/tablemaster → ocr/paddleocr2pytorch/pytorchocr/modeling}/__init__.py +0 -0
  100. {magic_pdf-1.2.2.dist-info → magic_pdf-1.3.1.dist-info}/LICENSE.md +0 -0
  101. {magic_pdf-1.2.2.dist-info → magic_pdf-1.3.1.dist-info}/entry_points.txt +0 -0
  102. {magic_pdf-1.2.2.dist-info → magic_pdf-1.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1084 @@
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch UnimerSwin Transformer model.
16
+
17
+ This implementation is identical to a regular Swin Transformer, without final layer norm on top of the final hidden
18
+ states."""
19
+
20
+ import collections.abc
21
+ import math
22
+ from dataclasses import dataclass
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import torch
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.modeling_utils import PreTrainedModel
31
+ from transformers.pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
32
+ from transformers.utils import (
33
+ ModelOutput,
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ logging,
38
+ torch_int,
39
+ )
40
+ from .configuration_unimer_swin import UnimerSwinConfig
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+ # General docstring
46
+ _CONFIG_FOR_DOC = "UnimerSwinConfig"
47
+
48
+ # Base docstring
49
+ _CHECKPOINT_FOR_DOC = "https://huggingface.co/naver-clova-ix/donut-base"
50
+ _EXPECTED_OUTPUT_SHAPE = [1, 49, 768]
51
+
52
+
53
+ @dataclass
54
+ # Copied from transformers.models.swin.modeling_swin.SwinEncoderOutput with Swin->UnimerSwin
55
+ class UnimerSwinEncoderOutput(ModelOutput):
56
+ """
57
+ UnimerSwin encoder's outputs, with potential hidden states and attentions.
58
+
59
+ Args:
60
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
61
+ Sequence of hidden-states at the output of the last layer of the model.
62
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
63
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
64
+ shape `(batch_size, sequence_length, hidden_size)`.
65
+
66
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
67
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
68
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
69
+ sequence_length)`.
70
+
71
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
72
+ heads.
73
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
74
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
75
+ shape `(batch_size, hidden_size, height, width)`.
76
+
77
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
78
+ include the spatial dimensions.
79
+ """
80
+
81
+ last_hidden_state: torch.FloatTensor = None
82
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
83
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
84
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
85
+
86
+
87
+ @dataclass
88
+ # Copied from transformers.models.swin.modeling_swin.SwinModelOutput with Swin->UnimerSwin
89
+ class UnimerSwinModelOutput(ModelOutput):
90
+ """
91
+ UnimerSwin model's outputs that also contains a pooling of the last hidden states.
92
+
93
+ Args:
94
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
95
+ Sequence of hidden-states at the output of the last layer of the model.
96
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
97
+ Average pooling of the last layer hidden-state.
98
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
99
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
100
+ shape `(batch_size, sequence_length, hidden_size)`.
101
+
102
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
103
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
104
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
105
+ sequence_length)`.
106
+
107
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
108
+ heads.
109
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
110
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
111
+ shape `(batch_size, hidden_size, height, width)`.
112
+
113
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
114
+ include the spatial dimensions.
115
+ """
116
+
117
+ last_hidden_state: torch.FloatTensor = None
118
+ pooler_output: Optional[torch.FloatTensor] = None
119
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
120
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
121
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
122
+
123
+
124
+ # Copied from transformers.models.swin.modeling_swin.window_partition
125
+ def window_partition(input_feature, window_size):
126
+ """
127
+ Partitions the given input into windows.
128
+ """
129
+ batch_size, height, width, num_channels = input_feature.shape
130
+ input_feature = input_feature.view(
131
+ batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
132
+ )
133
+ windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
134
+ return windows
135
+
136
+
137
+ # Copied from transformers.models.swin.modeling_swin.window_reverse
138
+ def window_reverse(windows, window_size, height, width):
139
+ """
140
+ Merges windows to produce higher resolution features.
141
+ """
142
+ num_channels = windows.shape[-1]
143
+ windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
144
+ windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
145
+ return windows
146
+
147
+
148
+ # Copied from transformers.models.swin.modeling_swin.SwinEmbeddings with Swin->UnimerSwin
149
+ class UnimerSwinEmbeddings(nn.Module):
150
+ """
151
+ Construct the patch and position embeddings. Optionally, also the mask token.
152
+ """
153
+
154
+ def __init__(self, config, use_mask_token=False):
155
+ super().__init__()
156
+
157
+ self.patch_embeddings = UnimerSwinPatchEmbeddings(config)
158
+ num_patches = self.patch_embeddings.num_patches
159
+ self.patch_grid = self.patch_embeddings.grid_size
160
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
161
+
162
+ if config.use_absolute_embeddings:
163
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
164
+ else:
165
+ self.position_embeddings = None
166
+
167
+ ### code added. ###
168
+ if config.use_2d_embeddings:
169
+ self.row_embeddings = nn.Parameter(torch.zeros(1, self.patch_grid[0] + 1, config.embed_dim))
170
+ self.column_embeddings = nn.Parameter(torch.zeros(1, self.patch_grid[1] + 1, config.embed_dim))
171
+ else:
172
+ self.row_embeddings = None
173
+ self.column_embeddings = None
174
+ ######
175
+
176
+ self.norm = nn.LayerNorm(config.embed_dim)
177
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
178
+
179
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
180
+ """
181
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
182
+ resolution images.
183
+
184
+ Source:
185
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
186
+ """
187
+
188
+ num_patches = embeddings.shape[1] - 1
189
+ num_positions = self.position_embeddings.shape[1] - 1
190
+ if num_patches == num_positions and height == width:
191
+ return self.position_embeddings
192
+ class_pos_embed = self.position_embeddings[:, 0]
193
+ patch_pos_embed = self.position_embeddings[:, 1:]
194
+ dim = embeddings.shape[-1]
195
+ h0 = height // self.config.patch_size
196
+ w0 = width // self.config.patch_size
197
+ # we add a small number to avoid floating point error in the interpolation
198
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
199
+ h0, w0 = h0 + 0.1, w0 + 0.1
200
+ patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
201
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
202
+ patch_pos_embed = nn.functional.interpolate(
203
+ patch_pos_embed,
204
+ scale_factor=(h0 / math.sqrt(num_positions), w0 / math.sqrt(num_positions)),
205
+ mode="bicubic",
206
+ align_corners=False,
207
+ )
208
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
209
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
210
+
211
+ def forward(
212
+ self,
213
+ pixel_values: Optional[torch.FloatTensor],
214
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
215
+ interpolate_pos_encoding: bool = False,
216
+ ) -> Tuple[torch.Tensor]:
217
+ _, num_channels, height, width = pixel_values.shape
218
+ embeddings, output_dimensions = self.patch_embeddings(pixel_values)
219
+ embeddings = self.norm(embeddings)
220
+ batch_size, seq_len, _ = embeddings.size()
221
+
222
+ if bool_masked_pos is not None:
223
+ mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
224
+ # replace the masked visual tokens by mask_tokens
225
+ mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
226
+ embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
227
+
228
+ if self.position_embeddings is not None:
229
+ # if interpolate_pos_encoding:
230
+ # embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
231
+ # else:
232
+ # embeddings = embeddings + self.position_embeddings
233
+ embeddings = embeddings + self.position_embeddings[:, :seq_len, :] # code edited.
234
+
235
+ ### code added. ###
236
+ if self.row_embeddings is not None and self.column_embeddings is not None:
237
+ # Repeat the x position embeddings across the y axis like 0, 1, 2, 3, 0, 1, 2, 3, ...
238
+ row_embeddings = self.row_embeddings[:, :output_dimensions[0], :].repeat_interleave(output_dimensions[1], dim=1)
239
+ column_embeddings = self.column_embeddings[:, :output_dimensions[1], :].repeat(1, output_dimensions[0], 1)
240
+ embeddings = embeddings + row_embeddings + column_embeddings
241
+ ######
242
+
243
+ embeddings = self.dropout(embeddings)
244
+
245
+ return embeddings, output_dimensions
246
+
247
+ class StemLayer(nn.Module):
248
+ r""" Stem layer of InternImage
249
+ Args:
250
+ in_chans (int): number of input channels
251
+ out_chans (int): number of output channels
252
+ act_layer (str): activation layer
253
+ norm_layer (str): normalization layer
254
+ """
255
+
256
+ def __init__(self, in_chans=3, out_chans=96, act_layer=nn.GELU, norm_layer='BN'):
257
+ super().__init__()
258
+ self.conv1 = nn.Conv2d(in_chans, out_chans // 2, kernel_size=3, stride=2, padding=1)
259
+ self.norm1 = self.build_norm_layer(out_chans // 2, norm_layer)
260
+ self.act = act_layer()
261
+ self.conv2 = nn.Conv2d(out_chans // 2, out_chans, kernel_size=3, stride=2, padding=1)
262
+
263
+ def build_norm_layer(self, dim, norm_layer):
264
+ layers = []
265
+ if norm_layer == 'BN':
266
+ layers.append(nn.BatchNorm2d(dim))
267
+ else:
268
+ raise NotImplementedError(f'build_norm_layer does not support {norm_layer}')
269
+ return nn.Sequential(*layers)
270
+
271
+ def forward(self, x):
272
+ x = self.conv1(x)
273
+ x = self.norm1(x)
274
+ x = self.act(x)
275
+ x = self.conv2(x)
276
+ return x
277
+
278
+ # Copied from transformers.models.swin.modeling_swin.SwinPatchEmbeddings with Swin->UnimerSwin
279
+ class UnimerSwinPatchEmbeddings(nn.Module):
280
+ """
281
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
282
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
283
+ Transformer.
284
+ """
285
+
286
+ def __init__(self, config):
287
+ super().__init__()
288
+ image_size, patch_size = config.image_size, config.patch_size
289
+ num_channels, hidden_size = config.num_channels, config.embed_dim
290
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
291
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
292
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
293
+ self.image_size = image_size
294
+ self.patch_size = patch_size
295
+ self.num_channels = num_channels
296
+ self.num_patches = num_patches
297
+ self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
298
+
299
+ ### code edited. ###
300
+ # self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
301
+ self.projection = StemLayer(in_chans=num_channels, out_chans=hidden_size)
302
+ ###
303
+
304
+ def maybe_pad(self, pixel_values, height, width):
305
+ if width % self.patch_size[1] != 0:
306
+ pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
307
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
308
+ if height % self.patch_size[0] != 0:
309
+ pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
310
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
311
+ return pixel_values
312
+
313
+ def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]:
314
+ _, num_channels, height, width = pixel_values.shape
315
+ # pad the input to be divisible by self.patch_size, if needed
316
+ pixel_values = self.maybe_pad(pixel_values, height, width)
317
+ embeddings = self.projection(pixel_values)
318
+ _, _, height, width = embeddings.shape
319
+ output_dimensions = (height, width)
320
+ embeddings = embeddings.flatten(2).transpose(1, 2)
321
+
322
+ return embeddings, output_dimensions
323
+
324
+
325
+ # Copied from transformers.models.swin.modeling_swin.SwinPatchMerging
326
+ class UnimerSwinPatchMerging(nn.Module):
327
+ """
328
+ Patch Merging Layer.
329
+
330
+ Args:
331
+ input_resolution (`Tuple[int]`):
332
+ Resolution of input feature.
333
+ dim (`int`):
334
+ Number of input channels.
335
+ norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
336
+ Normalization layer class.
337
+ """
338
+
339
+ def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
340
+ super().__init__()
341
+ self.input_resolution = input_resolution
342
+ self.dim = dim
343
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
344
+ self.norm = norm_layer(4 * dim)
345
+
346
+ def maybe_pad(self, input_feature, height, width):
347
+ should_pad = (height % 2 == 1) or (width % 2 == 1)
348
+ if should_pad:
349
+ pad_values = (0, 0, 0, width % 2, 0, height % 2)
350
+ input_feature = nn.functional.pad(input_feature, pad_values)
351
+
352
+ return input_feature
353
+
354
+ def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:
355
+ height, width = input_dimensions
356
+ # `dim` is height * width
357
+ batch_size, dim, num_channels = input_feature.shape
358
+
359
+ input_feature = input_feature.view(batch_size, height, width, num_channels)
360
+ # pad input to be disible by width and height, if needed
361
+ input_feature = self.maybe_pad(input_feature, height, width)
362
+ # [batch_size, height/2, width/2, num_channels]
363
+ input_feature_0 = input_feature[:, 0::2, 0::2, :]
364
+ # [batch_size, height/2, width/2, num_channels]
365
+ input_feature_1 = input_feature[:, 1::2, 0::2, :]
366
+ # [batch_size, height/2, width/2, num_channels]
367
+ input_feature_2 = input_feature[:, 0::2, 1::2, :]
368
+ # [batch_size, height/2, width/2, num_channels]
369
+ input_feature_3 = input_feature[:, 1::2, 1::2, :]
370
+ # batch_size height/2 width/2 4*num_channels
371
+ input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
372
+ input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C
373
+
374
+ input_feature = self.norm(input_feature)
375
+ input_feature = self.reduction(input_feature)
376
+
377
+ return input_feature
378
+
379
+
380
+ # Copied from transformers.models.beit.modeling_beit.drop_path
381
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
382
+ """
383
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
384
+
385
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
386
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
387
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
388
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
389
+ argument.
390
+ """
391
+ if drop_prob == 0.0 or not training:
392
+ return input
393
+ keep_prob = 1 - drop_prob
394
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
395
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
396
+ random_tensor.floor_() # binarize
397
+ output = input.div(keep_prob) * random_tensor
398
+ return output
399
+
400
+
401
+ # Copied from transformers.models.swin.modeling_swin.SwinDropPath
402
+ class UnimerSwinDropPath(nn.Module):
403
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
404
+
405
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
406
+ super().__init__()
407
+ self.drop_prob = drop_prob
408
+
409
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
410
+ return drop_path(hidden_states, self.drop_prob, self.training)
411
+
412
+ def extra_repr(self) -> str:
413
+ return "p={}".format(self.drop_prob)
414
+
415
+
416
+ # Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->UnimerSwin
417
+ class UnimerSwinSelfAttention(nn.Module):
418
+ def __init__(self, config, dim, num_heads, window_size):
419
+ super().__init__()
420
+ if dim % num_heads != 0:
421
+ raise ValueError(
422
+ f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
423
+ )
424
+
425
+ self.num_attention_heads = num_heads
426
+ self.attention_head_size = int(dim / num_heads)
427
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
428
+ self.window_size = (
429
+ window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
430
+ )
431
+
432
+ self.relative_position_bias_table = nn.Parameter(
433
+ torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
434
+ )
435
+
436
+ # get pair-wise relative position index for each token inside the window
437
+ coords_h = torch.arange(self.window_size[0])
438
+ coords_w = torch.arange(self.window_size[1])
439
+ coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
440
+ coords_flatten = torch.flatten(coords, 1)
441
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
442
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous()
443
+ relative_coords[:, :, 0] += self.window_size[0] - 1
444
+ relative_coords[:, :, 1] += self.window_size[1] - 1
445
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
446
+ relative_position_index = relative_coords.sum(-1)
447
+ self.register_buffer("relative_position_index", relative_position_index)
448
+
449
+ self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
450
+ self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
451
+ self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
452
+
453
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
454
+
455
+ def transpose_for_scores(self, x):
456
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
457
+ x = x.view(new_x_shape)
458
+ return x.permute(0, 2, 1, 3)
459
+
460
+ def forward(
461
+ self,
462
+ hidden_states: torch.Tensor,
463
+ attention_mask: Optional[torch.FloatTensor] = None,
464
+ head_mask: Optional[torch.FloatTensor] = None,
465
+ output_attentions: Optional[bool] = False,
466
+ ) -> Tuple[torch.Tensor]:
467
+ batch_size, dim, num_channels = hidden_states.shape
468
+ mixed_query_layer = self.query(hidden_states)
469
+
470
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
471
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
472
+ query_layer = self.transpose_for_scores(mixed_query_layer)
473
+
474
+ # Take the dot product between "query" and "key" to get the raw attention scores.
475
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
476
+
477
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
478
+
479
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
480
+ relative_position_bias = relative_position_bias.view(
481
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
482
+ )
483
+
484
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
485
+ attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
486
+
487
+ if attention_mask is not None:
488
+ # Apply the attention mask is (precomputed for all layers in UnimerSwinModel forward() function)
489
+ mask_shape = attention_mask.shape[0]
490
+ attention_scores = attention_scores.view(
491
+ batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
492
+ )
493
+ attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
494
+ attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
495
+
496
+ # Normalize the attention scores to probabilities.
497
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
498
+
499
+ # This is actually dropping out entire tokens to attend to, which might
500
+ # seem a bit unusual, but is taken from the original Transformer paper.
501
+ attention_probs = self.dropout(attention_probs)
502
+
503
+ # Mask heads if we want to
504
+ if head_mask is not None:
505
+ attention_probs = attention_probs * head_mask
506
+
507
+ context_layer = torch.matmul(attention_probs, value_layer)
508
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
509
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
510
+ context_layer = context_layer.view(new_context_layer_shape)
511
+
512
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
513
+
514
+ return outputs
515
+
516
+
517
+ # Copied from transformers.models.swin.modeling_swin.SwinSelfOutput
518
+ class UnimerSwinSelfOutput(nn.Module):
519
+ def __init__(self, config, dim):
520
+ super().__init__()
521
+ self.dense = nn.Linear(dim, dim)
522
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
523
+
524
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
525
+ hidden_states = self.dense(hidden_states)
526
+ hidden_states = self.dropout(hidden_states)
527
+
528
+ return hidden_states
529
+
530
+
531
+ # Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->UnimerSwin
532
+ class UnimerSwinAttention(nn.Module):
533
+ def __init__(self, config, dim, num_heads, window_size):
534
+ super().__init__()
535
+ self.self = UnimerSwinSelfAttention(config, dim, num_heads, window_size)
536
+ self.output = UnimerSwinSelfOutput(config, dim)
537
+ self.pruned_heads = set()
538
+
539
+ def prune_heads(self, heads):
540
+ if len(heads) == 0:
541
+ return
542
+ heads, index = find_pruneable_heads_and_indices(
543
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
544
+ )
545
+
546
+ # Prune linear layers
547
+ self.self.query = prune_linear_layer(self.self.query, index)
548
+ self.self.key = prune_linear_layer(self.self.key, index)
549
+ self.self.value = prune_linear_layer(self.self.value, index)
550
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
551
+
552
+ # Update hyper params and store pruned heads
553
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
554
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
555
+ self.pruned_heads = self.pruned_heads.union(heads)
556
+
557
+ def forward(
558
+ self,
559
+ hidden_states: torch.Tensor,
560
+ attention_mask: Optional[torch.FloatTensor] = None,
561
+ head_mask: Optional[torch.FloatTensor] = None,
562
+ output_attentions: Optional[bool] = False,
563
+ ) -> Tuple[torch.Tensor]:
564
+ self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
565
+ attention_output = self.output(self_outputs[0], hidden_states)
566
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
567
+ return outputs
568
+
569
+
570
+ # Copied from transformers.models.swin.modeling_swin.SwinIntermediate
571
+ class UnimerSwinIntermediate(nn.Module):
572
+ def __init__(self, config, dim):
573
+ super().__init__()
574
+ self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
575
+ if isinstance(config.hidden_act, str):
576
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
577
+ else:
578
+ self.intermediate_act_fn = config.hidden_act
579
+
580
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
581
+ hidden_states = self.dense(hidden_states)
582
+ hidden_states = self.intermediate_act_fn(hidden_states)
583
+ return hidden_states
584
+
585
+
586
+ # Copied from transformers.models.swin.modeling_swin.SwinOutput
587
+ class UnimerSwinOutput(nn.Module):
588
+ def __init__(self, config, dim):
589
+ super().__init__()
590
+ self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
591
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
592
+
593
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
594
+ hidden_states = self.dense(hidden_states)
595
+ hidden_states = self.dropout(hidden_states)
596
+ return hidden_states
597
+
598
+
599
+ class ConvEnhance(nn.Module):
600
+ """Depth-wise convolution to get the positional information.
601
+ """
602
+ def __init__(self, config, dim, k=3):
603
+ super(ConvEnhance, self).__init__()
604
+ self.proj = nn.Conv2d(dim,
605
+ dim,
606
+ (k,k),
607
+ (1,1),
608
+ (k // 2,k // 2),
609
+ groups=dim)
610
+ self.act_fn = ACT2FN[config.hidden_act]
611
+
612
+ def forward(self, x, size: Tuple[int, int]):
613
+ B, N, C = x.shape
614
+ H, W = size
615
+ assert N == H * W
616
+
617
+ feat = x.transpose(1, 2).view(B, C, H, W)
618
+ feat = self.proj(feat)
619
+ feat = self.act_fn(feat)
620
+ feat = feat.flatten(2).transpose(1, 2)
621
+
622
+ x = x + feat
623
+ return x
624
+
625
+
626
+ # Copied from transformers.models.swin.modeling_swin.SwinLayer with Swin->UnimerSwin
627
+ class UnimerSwinLayer(nn.Module):
628
+ def __init__(self, config, dim, input_resolution, num_heads, shift_size=0):
629
+ super().__init__()
630
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
631
+ self.shift_size = shift_size
632
+ self.window_size = config.window_size
633
+ self.input_resolution = input_resolution
634
+ self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
635
+
636
+ self.ce = nn.ModuleList([ConvEnhance(config, dim=dim, k=3),
637
+ ConvEnhance(config, dim=dim, k=3)])
638
+
639
+ self.attention = UnimerSwinAttention(config, dim, num_heads, window_size=self.window_size)
640
+ self.drop_path = UnimerSwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
641
+ self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
642
+ self.intermediate = UnimerSwinIntermediate(config, dim)
643
+ self.output = UnimerSwinOutput(config, dim)
644
+
645
+ def set_shift_and_window_size(self, input_resolution):
646
+ if min(input_resolution) <= self.window_size:
647
+ # if window size is larger than input resolution, we don't partition windows
648
+ self.shift_size = torch_int(0)
649
+ self.window_size = (
650
+ torch.min(torch.tensor(input_resolution)) if torch.jit.is_tracing() else min(input_resolution)
651
+ )
652
+
653
+ def get_attn_mask(self, height, width, dtype, device):
654
+ if self.shift_size > 0:
655
+ # calculate attention mask for SW-MSA
656
+ img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device)
657
+ height_slices = (
658
+ slice(0, -self.window_size),
659
+ slice(-self.window_size, -self.shift_size),
660
+ slice(-self.shift_size, None),
661
+ )
662
+ width_slices = (
663
+ slice(0, -self.window_size),
664
+ slice(-self.window_size, -self.shift_size),
665
+ slice(-self.shift_size, None),
666
+ )
667
+ count = 0
668
+ for height_slice in height_slices:
669
+ for width_slice in width_slices:
670
+ img_mask[:, height_slice, width_slice, :] = count
671
+ count += 1
672
+
673
+ mask_windows = window_partition(img_mask, self.window_size)
674
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
675
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
676
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
677
+ else:
678
+ attn_mask = None
679
+ return attn_mask
680
+
681
+ def maybe_pad(self, hidden_states, height, width):
682
+ pad_right = (self.window_size - width % self.window_size) % self.window_size
683
+ pad_bottom = (self.window_size - height % self.window_size) % self.window_size
684
+ pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
685
+ hidden_states = nn.functional.pad(hidden_states, pad_values)
686
+ return hidden_states, pad_values
687
+
688
+ def forward(
689
+ self,
690
+ hidden_states: torch.Tensor,
691
+ input_dimensions: Tuple[int, int],
692
+ head_mask: Optional[torch.FloatTensor] = None,
693
+ output_attentions: Optional[bool] = False,
694
+ always_partition: Optional[bool] = False,
695
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
696
+ if not always_partition:
697
+ self.set_shift_and_window_size(input_dimensions)
698
+ else:
699
+ pass
700
+ height, width = input_dimensions
701
+ batch_size, _, channels = hidden_states.size()
702
+
703
+
704
+
705
+ hidden_states = self.ce[0](hidden_states, input_dimensions)
706
+ shortcut = hidden_states
707
+
708
+
709
+ hidden_states = self.layernorm_before(hidden_states)
710
+ hidden_states = hidden_states.view(batch_size, height, width, channels)
711
+
712
+ # pad hidden_states to multiples of window size
713
+ hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
714
+
715
+ _, height_pad, width_pad, _ = hidden_states.shape
716
+ # cyclic shift
717
+ if self.shift_size > 0:
718
+ shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
719
+ else:
720
+ shifted_hidden_states = hidden_states
721
+
722
+ # partition windows
723
+ hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
724
+ hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
725
+ attn_mask = self.get_attn_mask(
726
+ height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device
727
+ )
728
+
729
+ attention_outputs = self.attention(
730
+ hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions
731
+ )
732
+
733
+ attention_output = attention_outputs[0]
734
+
735
+ attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
736
+ shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
737
+
738
+ # reverse cyclic shift
739
+ if self.shift_size > 0:
740
+ attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
741
+ else:
742
+ attention_windows = shifted_windows
743
+
744
+ was_padded = pad_values[3] > 0 or pad_values[5] > 0
745
+ if was_padded:
746
+ attention_windows = attention_windows[:, :height, :width, :].contiguous()
747
+
748
+ attention_windows = attention_windows.view(batch_size, height * width, channels)
749
+
750
+ hidden_states = shortcut + self.drop_path(attention_windows)
751
+
752
+
753
+
754
+ hidden_states = self.ce[1](hidden_states, input_dimensions)
755
+ layer_output = self.layernorm_after(hidden_states)
756
+ layer_output = self.intermediate(layer_output)
757
+ layer_output = hidden_states + self.output(layer_output)
758
+
759
+ layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
760
+ return layer_outputs
761
+
762
+
763
+ # Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->UnimerSwin
764
+ class UnimerSwinStage(nn.Module):
765
+ def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
766
+ super().__init__()
767
+ self.config = config
768
+ self.dim = dim
769
+ self.blocks = nn.ModuleList(
770
+ [
771
+ UnimerSwinLayer(
772
+ config=config,
773
+ dim=dim,
774
+ input_resolution=input_resolution,
775
+ num_heads=num_heads,
776
+ shift_size=0,
777
+ )
778
+ for i in range(depth)
779
+ ]
780
+ )
781
+
782
+ # patch merging layer
783
+ if downsample is not None:
784
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
785
+ else:
786
+ self.downsample = None
787
+
788
+ self.pointing = False
789
+
790
+ def forward(
791
+ self,
792
+ hidden_states: torch.Tensor,
793
+ input_dimensions: Tuple[int, int],
794
+ head_mask: Optional[torch.FloatTensor] = None,
795
+ output_attentions: Optional[bool] = False,
796
+ always_partition: Optional[bool] = False,
797
+ ) -> Tuple[torch.Tensor]:
798
+ height, width = input_dimensions
799
+ for i, layer_module in enumerate(self.blocks):
800
+ layer_head_mask = head_mask[i] if head_mask is not None else None
801
+
802
+ layer_outputs = layer_module(
803
+ hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
804
+ )
805
+
806
+ hidden_states = layer_outputs[0]
807
+
808
+ hidden_states_before_downsampling = hidden_states
809
+ if self.downsample is not None:
810
+ height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
811
+ output_dimensions = (height, width, height_downsampled, width_downsampled)
812
+ hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
813
+ else:
814
+ output_dimensions = (height, width, height, width)
815
+
816
+ stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
817
+
818
+ if output_attentions:
819
+ stage_outputs += layer_outputs[1:]
820
+ return stage_outputs
821
+
822
+
823
+ # Copied from transformers.models.swin.modeling_swin.SwinEncoder with Swin->UnimerSwin
824
+ class UnimerSwinEncoder(nn.Module):
825
+ def __init__(self, config, grid_size):
826
+ super().__init__()
827
+ self.num_layers = len(config.depths)
828
+ self.config = config
829
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
830
+ self.layers = nn.ModuleList(
831
+ [
832
+ UnimerSwinStage(
833
+ config=config,
834
+ dim=int(config.embed_dim * 2**i_layer),
835
+ input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
836
+ depth=config.depths[i_layer],
837
+ num_heads=config.num_heads[i_layer],
838
+ drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
839
+ downsample=UnimerSwinPatchMerging if (i_layer < self.num_layers - 1) else None,
840
+ )
841
+ for i_layer in range(self.num_layers)
842
+ ]
843
+ )
844
+
845
+ self.gradient_checkpointing = False
846
+
847
+ def forward(
848
+ self,
849
+ hidden_states: torch.Tensor,
850
+ input_dimensions: Tuple[int, int],
851
+ head_mask: Optional[torch.FloatTensor] = None,
852
+ output_attentions: Optional[bool] = False,
853
+ output_hidden_states: Optional[bool] = False,
854
+ output_hidden_states_before_downsampling: Optional[bool] = False,
855
+ always_partition: Optional[bool] = False,
856
+ return_dict: Optional[bool] = True,
857
+ ) -> Union[Tuple, UnimerSwinEncoderOutput]:
858
+ all_hidden_states = () if output_hidden_states else None
859
+ all_reshaped_hidden_states = () if output_hidden_states else None
860
+ all_self_attentions = () if output_attentions else None
861
+
862
+ if output_hidden_states:
863
+ batch_size, _, hidden_size = hidden_states.shape
864
+ # rearrange b (h w) c -> b c h w
865
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
866
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
867
+ all_hidden_states += (hidden_states,)
868
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
869
+
870
+ for i, layer_module in enumerate(self.layers):
871
+ layer_head_mask = head_mask[i] if head_mask is not None else None
872
+
873
+ if self.gradient_checkpointing and self.training:
874
+ layer_outputs = self._gradient_checkpointing_func(
875
+ layer_module.__call__,
876
+ hidden_states,
877
+ input_dimensions,
878
+ layer_head_mask,
879
+ output_attentions,
880
+ always_partition,
881
+ )
882
+ else:
883
+ layer_outputs = layer_module(
884
+ hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
885
+ )
886
+
887
+ hidden_states = layer_outputs[0]
888
+ hidden_states_before_downsampling = layer_outputs[1]
889
+ output_dimensions = layer_outputs[2]
890
+
891
+ input_dimensions = (output_dimensions[-2], output_dimensions[-1])
892
+
893
+ if output_hidden_states and output_hidden_states_before_downsampling:
894
+ batch_size, _, hidden_size = hidden_states_before_downsampling.shape
895
+ # rearrange b (h w) c -> b c h w
896
+ # here we use the original (not downsampled) height and width
897
+ reshaped_hidden_state = hidden_states_before_downsampling.view(
898
+ batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
899
+ )
900
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
901
+ all_hidden_states += (hidden_states_before_downsampling,)
902
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
903
+ elif output_hidden_states and not output_hidden_states_before_downsampling:
904
+ batch_size, _, hidden_size = hidden_states.shape
905
+ # rearrange b (h w) c -> b c h w
906
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
907
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
908
+ all_hidden_states += (hidden_states,)
909
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
910
+
911
+ if output_attentions:
912
+ all_self_attentions += layer_outputs[3:]
913
+
914
+ if not return_dict:
915
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
916
+
917
+ return UnimerSwinEncoderOutput(
918
+ last_hidden_state=hidden_states,
919
+ hidden_states=all_hidden_states,
920
+ attentions=all_self_attentions,
921
+ reshaped_hidden_states=all_reshaped_hidden_states,
922
+ )
923
+
924
+
925
+ # Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->UnimerSwin
926
+ class UnimerSwinPreTrainedModel(PreTrainedModel):
927
+ """
928
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
929
+ models.
930
+ """
931
+
932
+ config_class = UnimerSwinConfig
933
+ base_model_prefix = "unimer-swin"
934
+ main_input_name = "pixel_values"
935
+ supports_gradient_checkpointing = True
936
+ _no_split_modules = ["UnimerSwinStage"]
937
+
938
+ def _init_weights(self, module):
939
+ """Initialize the weights"""
940
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
941
+ # Slightly different from the TF version which uses truncated_normal for initialization
942
+ # cf https://github.com/pytorch/pytorch/pull/5617
943
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
944
+ if module.bias is not None:
945
+ module.bias.data.zero_()
946
+ elif isinstance(module, nn.LayerNorm):
947
+ module.bias.data.zero_()
948
+ module.weight.data.fill_(1.0)
949
+
950
+
951
+ SWIN_START_DOCSTRING = r"""
952
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
953
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
954
+ behavior.
955
+
956
+ Parameters:
957
+ config ([`UnimerSwinConfig`]): Model configuration class with all the parameters of the model.
958
+ Initializing with a config file does not load the weights associated with the model, only the
959
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
960
+ """
961
+
962
+ SWIN_INPUTS_DOCSTRING = r"""
963
+ Args:
964
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
965
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
966
+ [`DonutImageProcessor.__call__`] for details.
967
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
968
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
969
+
970
+ - 1 indicates the head is **not masked**,
971
+ - 0 indicates the head is **masked**.
972
+
973
+ output_attentions (`bool`, *optional*):
974
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
975
+ tensors for more detail.
976
+ output_hidden_states (`bool`, *optional*):
977
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
978
+ more detail.
979
+ interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
980
+ Whether to interpolate the pre-trained position encodings.
981
+ return_dict (`bool`, *optional*):
982
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
983
+ """
984
+
985
+
986
+ @add_start_docstrings(
987
+ "The bare UnimerSwin Model transformer outputting raw hidden-states without any specific head on top.",
988
+ SWIN_START_DOCSTRING,
989
+ )
990
+ class UnimerSwinModel(UnimerSwinPreTrainedModel):
991
+ def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
992
+ super().__init__(config)
993
+ self.config = config
994
+ self.num_layers = len(config.depths)
995
+ self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
996
+
997
+ self.embeddings = UnimerSwinEmbeddings(config, use_mask_token=use_mask_token)
998
+ self.encoder = UnimerSwinEncoder(config, self.embeddings.patch_grid)
999
+ self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
1000
+
1001
+ # Initialize weights and apply final processing
1002
+ self.post_init()
1003
+
1004
+ def get_input_embeddings(self):
1005
+ return self.embeddings.patch_embeddings
1006
+
1007
+ def _prune_heads(self, heads_to_prune):
1008
+ """
1009
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1010
+ class PreTrainedModel
1011
+ """
1012
+ for layer, heads in heads_to_prune.items():
1013
+ self.encoder.layer[layer].attention.prune_heads(heads)
1014
+
1015
+ @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING)
1016
+ @add_code_sample_docstrings(
1017
+ checkpoint=_CHECKPOINT_FOR_DOC,
1018
+ output_type=UnimerSwinModelOutput,
1019
+ config_class=_CONFIG_FOR_DOC,
1020
+ modality="vision",
1021
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
1022
+ )
1023
+ def forward(
1024
+ self,
1025
+ pixel_values: Optional[torch.FloatTensor] = None,
1026
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
1027
+ head_mask: Optional[torch.FloatTensor] = None,
1028
+ output_attentions: Optional[bool] = None,
1029
+ output_hidden_states: Optional[bool] = None,
1030
+ interpolate_pos_encoding: bool = False,
1031
+ return_dict: Optional[bool] = None,
1032
+ ) -> Union[Tuple, UnimerSwinModelOutput]:
1033
+ r"""
1034
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
1035
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
1036
+ """
1037
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1038
+ output_hidden_states = (
1039
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1040
+ )
1041
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1042
+
1043
+ if pixel_values is None:
1044
+ raise ValueError("You have to specify pixel_values")
1045
+
1046
+ # Prepare head mask if needed
1047
+ # 1.0 in head_mask indicate we keep the head
1048
+ # attention_probs has shape bsz x n_heads x N x N
1049
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1050
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1051
+ head_mask = self.get_head_mask(head_mask, len(self.config.depths))
1052
+
1053
+ embedding_output, input_dimensions = self.embeddings(
1054
+ pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding
1055
+ )
1056
+
1057
+ encoder_outputs = self.encoder(
1058
+ embedding_output,
1059
+ input_dimensions,
1060
+ head_mask=head_mask,
1061
+ output_attentions=output_attentions,
1062
+ output_hidden_states=output_hidden_states,
1063
+ return_dict=return_dict,
1064
+ )
1065
+
1066
+ sequence_output = encoder_outputs[0]
1067
+
1068
+ pooled_output = None
1069
+ if self.pooler is not None:
1070
+ pooled_output = self.pooler(sequence_output.transpose(1, 2))
1071
+ pooled_output = torch.flatten(pooled_output, 1)
1072
+
1073
+ if not return_dict:
1074
+ output = (sequence_output, pooled_output) + encoder_outputs[1:]
1075
+
1076
+ return output
1077
+
1078
+ return UnimerSwinModelOutput(
1079
+ last_hidden_state=sequence_output,
1080
+ pooler_output=pooled_output,
1081
+ hidden_states=encoder_outputs.hidden_states,
1082
+ attentions=encoder_outputs.attentions,
1083
+ reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
1084
+ )