evalscope 0.14.0__py3-none-any.whl → 0.15.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (181) hide show
  1. evalscope/arguments.py +2 -1
  2. evalscope/benchmarks/__init__.py +2 -2
  3. evalscope/benchmarks/aigc/__init__.py +0 -0
  4. evalscope/benchmarks/aigc/t2i/__init__.py +0 -0
  5. evalscope/benchmarks/aigc/t2i/base.py +56 -0
  6. evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +77 -0
  7. evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +58 -0
  8. evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +58 -0
  9. evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +57 -0
  10. evalscope/benchmarks/aigc/t2i/tifa_adapter.py +37 -0
  11. evalscope/benchmarks/aime/aime24_adapter.py +1 -1
  12. evalscope/benchmarks/aime/aime25_adapter.py +4 -4
  13. evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +1 -2
  14. evalscope/benchmarks/arc/arc_adapter.py +1 -1
  15. evalscope/benchmarks/arena_hard/arena_hard_adapter.py +1 -3
  16. evalscope/benchmarks/ceval/ceval_adapter.py +2 -2
  17. evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +1 -3
  18. evalscope/benchmarks/cmmlu/cmmlu_adapter.py +1 -1
  19. evalscope/benchmarks/competition_math/competition_math_adapter.py +1 -2
  20. evalscope/benchmarks/data_adapter.py +16 -9
  21. evalscope/benchmarks/data_collection/data_collection_adapter.py +6 -4
  22. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +2 -2
  23. evalscope/benchmarks/general_qa/general_qa_adapter.py +3 -3
  24. evalscope/benchmarks/live_code_bench/evaluate_utils.py +16 -21
  25. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +4 -1
  26. evalscope/benchmarks/live_code_bench/testing_util.py +6 -3
  27. evalscope/benchmarks/math_500/math_500_adapter.py +1 -1
  28. evalscope/benchmarks/mmlu/mmlu_adapter.py +3 -1
  29. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +1 -2
  30. evalscope/benchmarks/utils.py +7 -16
  31. evalscope/cli/start_app.py +1 -1
  32. evalscope/collections/evaluator.py +16 -4
  33. evalscope/config.py +7 -3
  34. evalscope/constants.py +11 -0
  35. evalscope/evaluator/evaluator.py +9 -3
  36. evalscope/evaluator/reviewer/auto_reviewer.py +1 -1
  37. evalscope/metrics/__init__.py +49 -4
  38. evalscope/metrics/llm_judge.py +1 -1
  39. evalscope/metrics/named_metrics.py +13 -0
  40. evalscope/metrics/t2v_metrics/__init__.py +66 -0
  41. evalscope/metrics/t2v_metrics/clipscore.py +14 -0
  42. evalscope/metrics/t2v_metrics/constants.py +12 -0
  43. evalscope/metrics/t2v_metrics/itmscore.py +14 -0
  44. evalscope/metrics/t2v_metrics/models/__init__.py +0 -0
  45. evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +30 -0
  46. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/__init__.py +0 -0
  47. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +6 -0
  48. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +132 -0
  49. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +286 -0
  50. evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +114 -0
  51. evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +86 -0
  52. evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +85 -0
  53. evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +62 -0
  54. evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +26 -0
  55. evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +84 -0
  56. evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +97 -0
  57. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +171 -0
  58. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/__init__.py +0 -0
  59. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +80 -0
  60. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +73 -0
  61. evalscope/metrics/t2v_metrics/models/model.py +45 -0
  62. evalscope/metrics/t2v_metrics/models/utils.py +25 -0
  63. evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +22 -0
  64. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/__init__.py +0 -0
  65. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +1 -0
  66. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +300 -0
  67. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +12 -0
  68. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +82 -0
  69. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +50 -0
  70. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +218 -0
  71. evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +150 -0
  72. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +26 -0
  73. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +465 -0
  74. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +141 -0
  75. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +22 -0
  76. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +188 -0
  77. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +106 -0
  78. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +307 -0
  79. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +416 -0
  80. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +8 -0
  81. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +191 -0
  82. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +318 -0
  83. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +10 -0
  84. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +42 -0
  85. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +42 -0
  86. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +42 -0
  87. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +36 -0
  88. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +43 -0
  89. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +43 -0
  90. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +43 -0
  91. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +43 -0
  92. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +36 -0
  93. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +42 -0
  94. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +42 -0
  95. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +42 -0
  96. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +43 -0
  97. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +42 -0
  98. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +42 -0
  99. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +42 -0
  100. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +37 -0
  101. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +43 -0
  102. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +43 -0
  103. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +21 -0
  104. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +22 -0
  105. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +21 -0
  106. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +208 -0
  107. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +231 -0
  108. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +1093 -0
  109. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/__init__.py +0 -0
  110. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +211 -0
  111. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +109 -0
  112. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +452 -0
  113. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +364 -0
  114. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +755 -0
  115. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +273 -0
  116. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +880 -0
  117. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +1844 -0
  118. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +81 -0
  119. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +56 -0
  120. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +212 -0
  121. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +164 -0
  122. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +202 -0
  123. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +185 -0
  124. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +178 -0
  125. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +112 -0
  126. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +371 -0
  127. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +344 -0
  128. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +858 -0
  129. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +271 -0
  130. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +503 -0
  131. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +1270 -0
  132. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +473 -0
  133. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +31 -0
  134. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +27 -0
  135. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +233 -0
  136. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +392 -0
  137. evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +127 -0
  138. evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +17 -0
  139. evalscope/metrics/t2v_metrics/score.py +78 -0
  140. evalscope/metrics/t2v_metrics/vqascore.py +14 -0
  141. evalscope/models/__init__.py +50 -14
  142. evalscope/models/adapters/__init__.py +17 -0
  143. evalscope/models/{base_adapter.py → adapters/base_adapter.py} +17 -17
  144. evalscope/models/{chat_adapter.py → adapters/chat_adapter.py} +10 -7
  145. evalscope/models/{choice_adapter.py → adapters/choice_adapter.py} +2 -6
  146. evalscope/models/{custom_adapter.py → adapters/custom_adapter.py} +2 -4
  147. evalscope/models/{server_adapter.py → adapters/server_adapter.py} +1 -3
  148. evalscope/models/adapters/t2i_adapter.py +76 -0
  149. evalscope/models/custom/__init__.py +2 -1
  150. evalscope/models/custom/dummy_model.py +11 -13
  151. evalscope/models/local_model.py +82 -33
  152. evalscope/models/model.py +2 -42
  153. evalscope/models/register.py +26 -0
  154. evalscope/perf/benchmark.py +4 -3
  155. evalscope/perf/main.py +4 -2
  156. evalscope/perf/plugin/datasets/flickr8k.py +2 -1
  157. evalscope/perf/utils/benchmark_util.py +2 -2
  158. evalscope/perf/utils/db_util.py +16 -8
  159. evalscope/report/__init__.py +1 -0
  160. evalscope/report/app.py +117 -67
  161. evalscope/report/app_arguments.py +11 -0
  162. evalscope/report/generator.py +1 -1
  163. evalscope/run.py +3 -3
  164. evalscope/third_party/thinkbench/eval.py +19 -7
  165. evalscope/utils/chat_service.py +2 -2
  166. evalscope/utils/import_utils.py +66 -0
  167. evalscope/utils/utils.py +12 -4
  168. evalscope/version.py +2 -2
  169. {evalscope-0.14.0.dist-info → evalscope-0.15.1.dist-info}/METADATA +20 -3
  170. {evalscope-0.14.0.dist-info → evalscope-0.15.1.dist-info}/RECORD +178 -66
  171. tests/aigc/__init__.py +1 -0
  172. tests/aigc/test_t2i.py +87 -0
  173. tests/cli/test_run.py +20 -7
  174. tests/perf/test_perf.py +6 -3
  175. evalscope/metrics/code_metric.py +0 -98
  176. evalscope/metrics/resources/gpt2-zhcn3-v4.bpe +0 -58485
  177. evalscope/metrics/resources/gpt2-zhcn3-v4.json +0 -1
  178. {evalscope-0.14.0.dist-info → evalscope-0.15.1.dist-info}/LICENSE +0 -0
  179. {evalscope-0.14.0.dist-info → evalscope-0.15.1.dist-info}/WHEEL +0 -0
  180. {evalscope-0.14.0.dist-info → evalscope-0.15.1.dist-info}/entry_points.txt +0 -0
  181. {evalscope-0.14.0.dist-info → evalscope-0.15.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,858 @@
1
+ """
2
+ Copyright (c) 2022, salesforce.com, inc.
3
+ All rights reserved.
4
+ SPDX-License-Identifier: BSD-3-Clause
5
+ For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
+ """
7
+
8
+ import math
9
+ import torch
10
+ import torch.utils.checkpoint
11
+ from torch import Tensor, device, nn
12
+ from transformers.activations import ACT2FN
13
+ from transformers.modeling_outputs import (BaseModelOutputWithPastAndCrossAttentions,
14
+ BaseModelOutputWithPoolingAndCrossAttentions)
15
+ from transformers.modeling_utils import (PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices,
16
+ prune_linear_layer)
17
+ from transformers.models.bert.configuration_bert import BertConfig
18
+ from transformers.utils import logging
19
+ from typing import Tuple
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ class BertEmbeddings(nn.Module):
25
+ """Construct the embeddings from word and position embeddings."""
26
+
27
+ def __init__(self, config):
28
+ super().__init__()
29
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
30
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
31
+
32
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
33
+ # any TensorFlow checkpoint file
34
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
35
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
36
+
37
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
38
+ self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)))
39
+ self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
40
+
41
+ self.config = config
42
+
43
+ def forward(
44
+ self,
45
+ input_ids=None,
46
+ position_ids=None,
47
+ inputs_embeds=None,
48
+ past_key_values_length=0,
49
+ ):
50
+ if input_ids is not None:
51
+ input_shape = input_ids.size()
52
+ else:
53
+ input_shape = inputs_embeds.size()[:-1]
54
+
55
+ seq_length = input_shape[1]
56
+
57
+ if position_ids is None:
58
+ position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length]
59
+
60
+ if inputs_embeds is None:
61
+ inputs_embeds = self.word_embeddings(input_ids)
62
+
63
+ embeddings = inputs_embeds
64
+
65
+ if self.position_embedding_type == 'absolute':
66
+ position_embeddings = self.position_embeddings(position_ids)
67
+ embeddings += position_embeddings
68
+ embeddings = self.LayerNorm(embeddings)
69
+ embeddings = self.dropout(embeddings)
70
+ return embeddings
71
+
72
+
73
+ class BertSelfAttention(nn.Module):
74
+
75
+ def __init__(self, config, is_cross_attention):
76
+ super().__init__()
77
+ self.config = config
78
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size'):
79
+ raise ValueError('The hidden size (%d) is not a multiple of the number of attention '
80
+ 'heads (%d)' % (config.hidden_size, config.num_attention_heads))
81
+
82
+ self.num_attention_heads = config.num_attention_heads
83
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
84
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
85
+
86
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
87
+ if is_cross_attention:
88
+ self.key = nn.Linear(config.encoder_width, self.all_head_size)
89
+ self.value = nn.Linear(config.encoder_width, self.all_head_size)
90
+ else:
91
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
92
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
93
+
94
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
95
+ self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
96
+ if (self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query'):
97
+ self.max_position_embeddings = config.max_position_embeddings
98
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
99
+ self.save_attention = False
100
+
101
+ def save_attn_gradients(self, attn_gradients):
102
+ self.attn_gradients = attn_gradients
103
+
104
+ def get_attn_gradients(self):
105
+ return self.attn_gradients
106
+
107
+ def save_attention_map(self, attention_map):
108
+ self.attention_map = attention_map
109
+
110
+ def get_attention_map(self):
111
+ return self.attention_map
112
+
113
+ def transpose_for_scores(self, x):
114
+ new_x_shape = x.size()[:-1] + (
115
+ self.num_attention_heads,
116
+ self.attention_head_size,
117
+ )
118
+ x = x.view(*new_x_shape)
119
+ return x.permute(0, 2, 1, 3)
120
+
121
+ def forward(
122
+ self,
123
+ hidden_states,
124
+ attention_mask=None,
125
+ head_mask=None,
126
+ encoder_hidden_states=None,
127
+ encoder_attention_mask=None,
128
+ past_key_value=None,
129
+ output_attentions=False,
130
+ ):
131
+ mixed_query_layer = self.query(hidden_states)
132
+
133
+ # If this is instantiated as a cross-attention module, the keys
134
+ # and values come from an encoder; the attention mask needs to be
135
+ # such that the encoder's padding tokens are not attended to.
136
+ is_cross_attention = encoder_hidden_states is not None
137
+
138
+ if is_cross_attention:
139
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
140
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
141
+ attention_mask = encoder_attention_mask
142
+ elif past_key_value is not None:
143
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
144
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
145
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
146
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
147
+ else:
148
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
149
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
150
+
151
+ query_layer = self.transpose_for_scores(mixed_query_layer)
152
+
153
+ past_key_value = (key_layer, value_layer)
154
+
155
+ # Take the dot product between "query" and "key" to get the raw attention scores.
156
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
157
+
158
+ if (self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query'):
159
+ seq_length = hidden_states.size()[1]
160
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
161
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
162
+ distance = position_ids_l - position_ids_r
163
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
164
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
165
+
166
+ if self.position_embedding_type == 'relative_key':
167
+ relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
168
+ attention_scores = attention_scores + relative_position_scores
169
+ elif self.position_embedding_type == 'relative_key_query':
170
+ relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
171
+ relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
172
+ attention_scores = (attention_scores + relative_position_scores_query + relative_position_scores_key)
173
+
174
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
175
+ if attention_mask is not None:
176
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
177
+ attention_scores = attention_scores + attention_mask
178
+
179
+ # Normalize the attention scores to probabilities.
180
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
181
+
182
+ if is_cross_attention and self.save_attention:
183
+ self.save_attention_map(attention_probs)
184
+ attention_probs.register_hook(self.save_attn_gradients)
185
+
186
+ # This is actually dropping out entire tokens to attend to, which might
187
+ # seem a bit unusual, but is taken from the original Transformer paper.
188
+ attention_probs_dropped = self.dropout(attention_probs)
189
+
190
+ # Mask heads if we want to
191
+ if head_mask is not None:
192
+ attention_probs_dropped = attention_probs_dropped * head_mask
193
+
194
+ context_layer = torch.matmul(attention_probs_dropped, value_layer)
195
+
196
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
197
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size, )
198
+ context_layer = context_layer.view(*new_context_layer_shape)
199
+
200
+ outputs = ((context_layer, attention_probs) if output_attentions else (context_layer, ))
201
+
202
+ outputs = outputs + (past_key_value, )
203
+ return outputs
204
+
205
+
206
+ class BertSelfOutput(nn.Module):
207
+
208
+ def __init__(self, config, twin=False, merge=False):
209
+ super().__init__()
210
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
211
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
212
+ if twin:
213
+ self.dense0 = nn.Linear(config.hidden_size, config.hidden_size)
214
+ self.dense1 = nn.Linear(config.hidden_size, config.hidden_size)
215
+ else:
216
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
217
+ if merge:
218
+ self.act = ACT2FN[config.hidden_act]
219
+ self.merge_layer = nn.Linear(config.hidden_size * 2, config.hidden_size)
220
+ self.merge = True
221
+ else:
222
+ self.merge = False
223
+
224
+ def forward(self, hidden_states, input_tensor):
225
+ if type(hidden_states) == list:
226
+ hidden_states0 = self.dense0(hidden_states[0])
227
+ hidden_states1 = self.dense1(hidden_states[1])
228
+ if self.merge:
229
+ # hidden_states = self.merge_layer(self.act(torch.cat([hidden_states0,hidden_states1],dim=-1)))
230
+ hidden_states = self.merge_layer(torch.cat([hidden_states0, hidden_states1], dim=-1))
231
+ else:
232
+ hidden_states = (hidden_states0 + hidden_states1) / 2
233
+ else:
234
+ hidden_states = self.dense(hidden_states)
235
+ hidden_states = self.dropout(hidden_states)
236
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
237
+ return hidden_states
238
+
239
+
240
+ class BertAttention(nn.Module):
241
+
242
+ def __init__(self, config, is_cross_attention=False, layer_num=-1):
243
+ super().__init__()
244
+ if is_cross_attention:
245
+ self.self0 = BertSelfAttention(config, is_cross_attention)
246
+ self.self1 = BertSelfAttention(config, is_cross_attention)
247
+ else:
248
+ self.self = BertSelfAttention(config, is_cross_attention)
249
+ self.output = BertSelfOutput(
250
+ config,
251
+ twin=is_cross_attention,
252
+ merge=(is_cross_attention and layer_num >= 6),
253
+ )
254
+ self.pruned_heads = set()
255
+
256
+ def prune_heads(self, heads):
257
+ if len(heads) == 0:
258
+ return
259
+ heads, index = find_pruneable_heads_and_indices(
260
+ heads,
261
+ self.self.num_attention_heads,
262
+ self.self.attention_head_size,
263
+ self.pruned_heads,
264
+ )
265
+
266
+ # Prune linear layers
267
+ self.self.query = prune_linear_layer(self.self.query, index)
268
+ self.self.key = prune_linear_layer(self.self.key, index)
269
+ self.self.value = prune_linear_layer(self.self.value, index)
270
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
271
+
272
+ # Update hyper params and store pruned heads
273
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
274
+ self.self.all_head_size = (self.self.attention_head_size * self.self.num_attention_heads)
275
+ self.pruned_heads = self.pruned_heads.union(heads)
276
+
277
+ def forward(
278
+ self,
279
+ hidden_states,
280
+ attention_mask=None,
281
+ head_mask=None,
282
+ encoder_hidden_states=None,
283
+ encoder_attention_mask=None,
284
+ past_key_value=None,
285
+ output_attentions=False,
286
+ ):
287
+ if type(encoder_hidden_states) == list:
288
+ self_outputs0 = self.self0(
289
+ hidden_states,
290
+ attention_mask,
291
+ head_mask,
292
+ encoder_hidden_states[0],
293
+ encoder_attention_mask[0],
294
+ past_key_value,
295
+ output_attentions,
296
+ )
297
+ self_outputs1 = self.self1(
298
+ hidden_states,
299
+ attention_mask,
300
+ head_mask,
301
+ encoder_hidden_states[1],
302
+ encoder_attention_mask[1],
303
+ past_key_value,
304
+ output_attentions,
305
+ )
306
+ attention_output = self.output([self_outputs0[0], self_outputs1[0]], hidden_states)
307
+
308
+ outputs = (attention_output, ) + self_outputs0[1:] # add attentions if we output them
309
+ else:
310
+ self_outputs = self.self(
311
+ hidden_states,
312
+ attention_mask,
313
+ head_mask,
314
+ encoder_hidden_states,
315
+ encoder_attention_mask,
316
+ past_key_value,
317
+ output_attentions,
318
+ )
319
+ attention_output = self.output(self_outputs[0], hidden_states)
320
+ outputs = (attention_output, ) + self_outputs[1:] # add attentions if we output them
321
+ return outputs
322
+
323
+
324
+ class BertIntermediate(nn.Module):
325
+
326
+ def __init__(self, config):
327
+ super().__init__()
328
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
329
+ if isinstance(config.hidden_act, str):
330
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
331
+ else:
332
+ self.intermediate_act_fn = config.hidden_act
333
+
334
+ def forward(self, hidden_states):
335
+ hidden_states = self.dense(hidden_states)
336
+ hidden_states = self.intermediate_act_fn(hidden_states)
337
+ return hidden_states
338
+
339
+
340
+ class BertOutput(nn.Module):
341
+
342
+ def __init__(self, config):
343
+ super().__init__()
344
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
345
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
346
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
347
+
348
+ def forward(self, hidden_states, input_tensor):
349
+ hidden_states = self.dense(hidden_states)
350
+ hidden_states = self.dropout(hidden_states)
351
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
352
+ return hidden_states
353
+
354
+
355
+ class BertLayer(nn.Module):
356
+
357
+ def __init__(self, config, layer_num):
358
+ super().__init__()
359
+ self.config = config
360
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
361
+ self.seq_len_dim = 1
362
+ self.attention = BertAttention(config)
363
+ self.layer_num = layer_num
364
+ if self.config.add_cross_attention:
365
+ self.crossattention = BertAttention(
366
+ config,
367
+ is_cross_attention=self.config.add_cross_attention,
368
+ layer_num=layer_num,
369
+ )
370
+ self.intermediate = BertIntermediate(config)
371
+ self.output = BertOutput(config)
372
+
373
+ def forward(
374
+ self,
375
+ hidden_states,
376
+ attention_mask=None,
377
+ head_mask=None,
378
+ encoder_hidden_states=None,
379
+ encoder_attention_mask=None,
380
+ past_key_value=None,
381
+ output_attentions=False,
382
+ mode=None,
383
+ ):
384
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
385
+ self_attn_past_key_value = (past_key_value[:2] if past_key_value is not None else None)
386
+ self_attention_outputs = self.attention(
387
+ hidden_states,
388
+ attention_mask,
389
+ head_mask,
390
+ output_attentions=output_attentions,
391
+ past_key_value=self_attn_past_key_value,
392
+ )
393
+ attention_output = self_attention_outputs[0]
394
+
395
+ outputs = self_attention_outputs[1:-1]
396
+ present_key_value = self_attention_outputs[-1]
397
+
398
+ if mode == 'multimodal':
399
+ assert (encoder_hidden_states is not None), 'encoder_hidden_states must be given for cross-attention layers'
400
+ cross_attention_outputs = self.crossattention(
401
+ attention_output,
402
+ attention_mask,
403
+ head_mask,
404
+ encoder_hidden_states,
405
+ encoder_attention_mask,
406
+ output_attentions=output_attentions,
407
+ )
408
+ attention_output = cross_attention_outputs[0]
409
+ outputs = (outputs + cross_attention_outputs[1:-1]) # add cross attentions if we output attention weights
410
+ layer_output = apply_chunking_to_forward(
411
+ self.feed_forward_chunk,
412
+ self.chunk_size_feed_forward,
413
+ self.seq_len_dim,
414
+ attention_output,
415
+ )
416
+ outputs = (layer_output, ) + outputs
417
+
418
+ outputs = outputs + (present_key_value, )
419
+
420
+ return outputs
421
+
422
+ def feed_forward_chunk(self, attention_output):
423
+ intermediate_output = self.intermediate(attention_output)
424
+ layer_output = self.output(intermediate_output, attention_output)
425
+ return layer_output
426
+
427
+
428
+ class BertEncoder(nn.Module):
429
+
430
+ def __init__(self, config):
431
+ super().__init__()
432
+ self.config = config
433
+ self.layer = nn.ModuleList([BertLayer(config, i) for i in range(config.num_hidden_layers)])
434
+ self.gradient_checkpointing = False
435
+
436
+ def forward(
437
+ self,
438
+ hidden_states,
439
+ attention_mask=None,
440
+ head_mask=None,
441
+ encoder_hidden_states=None,
442
+ encoder_attention_mask=None,
443
+ past_key_values=None,
444
+ use_cache=None,
445
+ output_attentions=False,
446
+ output_hidden_states=False,
447
+ return_dict=True,
448
+ mode='multimodal',
449
+ ):
450
+ all_hidden_states = () if output_hidden_states else None
451
+ all_self_attentions = () if output_attentions else None
452
+ all_cross_attentions = (() if output_attentions and self.config.add_cross_attention else None)
453
+
454
+ next_decoder_cache = () if use_cache else None
455
+
456
+ for i in range(self.config.num_hidden_layers):
457
+ layer_module = self.layer[i]
458
+ if output_hidden_states:
459
+ all_hidden_states = all_hidden_states + (hidden_states, )
460
+
461
+ layer_head_mask = head_mask[i] if head_mask is not None else None
462
+ past_key_value = past_key_values[i] if past_key_values is not None else None
463
+
464
+ if self.gradient_checkpointing and self.training:
465
+
466
+ if use_cache:
467
+ logger.warn(
468
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
469
+ use_cache = False
470
+
471
+ def create_custom_forward(module):
472
+
473
+ def custom_forward(*inputs):
474
+ return module(*inputs, past_key_value, output_attentions)
475
+
476
+ return custom_forward
477
+
478
+ layer_outputs = torch.utils.checkpoint.checkpoint(
479
+ create_custom_forward(layer_module),
480
+ hidden_states,
481
+ attention_mask,
482
+ layer_head_mask,
483
+ encoder_hidden_states,
484
+ encoder_attention_mask,
485
+ mode=mode,
486
+ )
487
+ else:
488
+ layer_outputs = layer_module(
489
+ hidden_states,
490
+ attention_mask,
491
+ layer_head_mask,
492
+ encoder_hidden_states,
493
+ encoder_attention_mask,
494
+ past_key_value,
495
+ output_attentions,
496
+ mode=mode,
497
+ )
498
+
499
+ hidden_states = layer_outputs[0]
500
+ if use_cache:
501
+ next_decoder_cache += (layer_outputs[-1], )
502
+ if output_attentions:
503
+ all_self_attentions = all_self_attentions + (layer_outputs[1], )
504
+
505
+ if output_hidden_states:
506
+ all_hidden_states = all_hidden_states + (hidden_states, )
507
+
508
+ if not return_dict:
509
+ return tuple(v for v in [
510
+ hidden_states,
511
+ next_decoder_cache,
512
+ all_hidden_states,
513
+ all_self_attentions,
514
+ all_cross_attentions,
515
+ ] if v is not None)
516
+ return BaseModelOutputWithPastAndCrossAttentions(
517
+ last_hidden_state=hidden_states,
518
+ past_key_values=next_decoder_cache,
519
+ hidden_states=all_hidden_states,
520
+ attentions=all_self_attentions,
521
+ cross_attentions=all_cross_attentions,
522
+ )
523
+
524
+
525
+ class BertPooler(nn.Module):
526
+
527
+ def __init__(self, config):
528
+ super().__init__()
529
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
530
+ self.activation = nn.Tanh()
531
+
532
+ def forward(self, hidden_states):
533
+ # We "pool" the model by simply taking the hidden state corresponding
534
+ # to the first token.
535
+ first_token_tensor = hidden_states[:, 0]
536
+ pooled_output = self.dense(first_token_tensor)
537
+ pooled_output = self.activation(pooled_output)
538
+ return pooled_output
539
+
540
+
541
+ class BertPredictionHeadTransform(nn.Module):
542
+
543
+ def __init__(self, config):
544
+ super().__init__()
545
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
546
+ if isinstance(config.hidden_act, str):
547
+ self.transform_act_fn = ACT2FN[config.hidden_act]
548
+ else:
549
+ self.transform_act_fn = config.hidden_act
550
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
551
+
552
+ def forward(self, hidden_states):
553
+ hidden_states = self.dense(hidden_states)
554
+ hidden_states = self.transform_act_fn(hidden_states)
555
+ hidden_states = self.LayerNorm(hidden_states)
556
+ return hidden_states
557
+
558
+
559
+ class BertLMPredictionHead(nn.Module):
560
+
561
+ def __init__(self, config):
562
+ super().__init__()
563
+ self.transform = BertPredictionHeadTransform(config)
564
+
565
+ # The output weights are the same as the input embeddings, but there is
566
+ # an output-only bias for each token.
567
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
568
+
569
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
570
+
571
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
572
+ self.decoder.bias = self.bias
573
+
574
+ def forward(self, hidden_states):
575
+ hidden_states = self.transform(hidden_states)
576
+ hidden_states = self.decoder(hidden_states)
577
+ return hidden_states
578
+
579
+
580
+ class BertOnlyMLMHead(nn.Module):
581
+
582
+ def __init__(self, config):
583
+ super().__init__()
584
+ self.predictions = BertLMPredictionHead(config)
585
+
586
+ def forward(self, sequence_output):
587
+ prediction_scores = self.predictions(sequence_output)
588
+ return prediction_scores
589
+
590
+
591
+ class BertPreTrainedModel(PreTrainedModel):
592
+ """
593
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
594
+ models.
595
+ """
596
+
597
+ config_class = BertConfig
598
+ base_model_prefix = 'bert'
599
+ _keys_to_ignore_on_load_missing = [r'position_ids']
600
+
601
+ def _init_weights(self, module):
602
+ """Initialize the weights"""
603
+ if isinstance(module, (nn.Linear, nn.Embedding)):
604
+ # Slightly different from the TF version which uses truncated_normal for initialization
605
+ # cf https://github.com/pytorch/pytorch/pull/5617
606
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
607
+ elif isinstance(module, nn.LayerNorm):
608
+ module.bias.data.zero_()
609
+ module.weight.data.fill_(1.0)
610
+ if isinstance(module, nn.Linear) and module.bias is not None:
611
+ module.bias.data.zero_()
612
+
613
+
614
+ class BertModel(BertPreTrainedModel):
615
+ """
616
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
617
+ cross-attention is added between the self-attention layers, following the architecture described in `Attention is
618
+ all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
619
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
620
+ argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
621
+ input to the forward pass.
622
+ """
623
+
624
+ def __init__(self, config, add_pooling_layer=True):
625
+ super().__init__(config)
626
+ self.config = config
627
+
628
+ self.embeddings = BertEmbeddings(config)
629
+
630
+ self.encoder = BertEncoder(config)
631
+
632
+ self.pooler = BertPooler(config) if add_pooling_layer else None
633
+
634
+ self.init_weights()
635
+
636
+ def get_input_embeddings(self):
637
+ return self.embeddings.word_embeddings
638
+
639
+ def set_input_embeddings(self, value):
640
+ self.embeddings.word_embeddings = value
641
+
642
+ def _prune_heads(self, heads_to_prune):
643
+ """
644
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
645
+ class PreTrainedModel
646
+ """
647
+ for layer, heads in heads_to_prune.items():
648
+ self.encoder.layer[layer].attention.prune_heads(heads)
649
+
650
+ def get_extended_attention_mask(
651
+ self,
652
+ attention_mask: Tensor,
653
+ input_shape: Tuple[int],
654
+ device: device,
655
+ is_decoder: bool,
656
+ ) -> Tensor:
657
+ """
658
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
659
+
660
+ Arguments:
661
+ attention_mask (:obj:`torch.Tensor`):
662
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
663
+ input_shape (:obj:`Tuple[int]`):
664
+ The shape of the input to the model.
665
+ device: (:obj:`torch.device`):
666
+ The device of the input to the model.
667
+
668
+ Returns:
669
+ :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
670
+ """
671
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
672
+ # ourselves in which case we just need to make it broadcastable to all heads.
673
+ if attention_mask.dim() == 3:
674
+ extended_attention_mask = attention_mask[:, None, :, :]
675
+ elif attention_mask.dim() == 2:
676
+ # Provided a padding mask of dimensions [batch_size, seq_length]
677
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
678
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
679
+ if is_decoder:
680
+ batch_size, seq_length = input_shape
681
+
682
+ seq_ids = torch.arange(seq_length, device=device)
683
+ causal_mask = (seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None])
684
+ # in case past_key_values are used we need to add a prefix ones mask to the causal mask
685
+ # causal and attention masks must have same type with pytorch version < 1.3
686
+ causal_mask = causal_mask.to(attention_mask.dtype)
687
+
688
+ if causal_mask.shape[1] < attention_mask.shape[1]:
689
+ prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
690
+ causal_mask = torch.cat(
691
+ [
692
+ torch.ones(
693
+ (batch_size, seq_length, prefix_seq_len),
694
+ device=device,
695
+ dtype=causal_mask.dtype,
696
+ ),
697
+ causal_mask,
698
+ ],
699
+ axis=-1,
700
+ )
701
+
702
+ extended_attention_mask = (causal_mask[:, None, :, :] * attention_mask[:, None, None, :])
703
+ else:
704
+ extended_attention_mask = attention_mask[:, None, None, :]
705
+ else:
706
+ raise ValueError('Wrong shape for input_ids (shape {}) or attention_mask (shape {})'.format(
707
+ input_shape, attention_mask.shape))
708
+
709
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
710
+ # masked positions, this operation will create a tensor which is 0.0 for
711
+ # positions we want to attend and -10000.0 for masked positions.
712
+ # Since we are adding it to the raw scores before the softmax, this is
713
+ # effectively the same as removing these entirely.
714
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
715
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
716
+ return extended_attention_mask
717
+
718
+ def forward(
719
+ self,
720
+ input_ids=None,
721
+ attention_mask=None,
722
+ position_ids=None,
723
+ head_mask=None,
724
+ inputs_embeds=None,
725
+ encoder_embeds=None,
726
+ encoder_hidden_states=None,
727
+ encoder_attention_mask=None,
728
+ past_key_values=None,
729
+ use_cache=None,
730
+ output_attentions=None,
731
+ output_hidden_states=None,
732
+ return_dict=None,
733
+ is_decoder=False,
734
+ mode='multimodal',
735
+ ):
736
+ r"""
737
+ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
738
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
739
+ the model is configured as a decoder.
740
+ encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
741
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
742
+ the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
743
+ - 1 for tokens that are **not masked**,
744
+ - 0 for tokens that are **masked**.
745
+ past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
746
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
747
+ If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
748
+ (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
749
+ instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
750
+ use_cache (:obj:`bool`, `optional`):
751
+ If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
752
+ decoding (see :obj:`past_key_values`).
753
+ """
754
+ output_attentions = (output_attentions if output_attentions is not None else self.config.output_attentions)
755
+ output_hidden_states = (
756
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states)
757
+ return_dict = (return_dict if return_dict is not None else self.config.use_return_dict)
758
+
759
+ if is_decoder:
760
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
761
+ else:
762
+ use_cache = False
763
+
764
+ if input_ids is not None and inputs_embeds is not None:
765
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
766
+ elif input_ids is not None:
767
+ input_shape = input_ids.size()
768
+ batch_size, seq_length = input_shape
769
+ device = input_ids.device
770
+ elif inputs_embeds is not None:
771
+ input_shape = inputs_embeds.size()[:-1]
772
+ batch_size, seq_length = input_shape
773
+ device = inputs_embeds.device
774
+ elif encoder_embeds is not None:
775
+ input_shape = encoder_embeds.size()[:-1]
776
+ batch_size, seq_length = input_shape
777
+ device = encoder_embeds.device
778
+ else:
779
+ raise ValueError('You have to specify either input_ids or inputs_embeds or encoder_embeds')
780
+
781
+ # past_key_values_length
782
+ past_key_values_length = (past_key_values[0][0].shape[2] if past_key_values is not None else 0)
783
+
784
+ if attention_mask is None:
785
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
786
+
787
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
788
+ # ourselves in which case we just need to make it broadcastable to all heads.
789
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device,
790
+ is_decoder)
791
+
792
+ # If a 2D or 3D attention mask is provided for the cross-attention
793
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
794
+ if encoder_hidden_states is not None:
795
+ if type(encoder_hidden_states) == list:
796
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
797
+ else:
798
+ (
799
+ encoder_batch_size,
800
+ encoder_sequence_length,
801
+ _,
802
+ ) = encoder_hidden_states.size()
803
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
804
+
805
+ if type(encoder_attention_mask) == list:
806
+ encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
807
+ elif encoder_attention_mask is None:
808
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
809
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
810
+ else:
811
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
812
+ else:
813
+ encoder_extended_attention_mask = None
814
+
815
+ # Prepare head mask if needed
816
+ # 1.0 in head_mask indicate we keep the head
817
+ # attention_probs has shape bsz x n_heads x N x N
818
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
819
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
820
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
821
+
822
+ if encoder_embeds is None:
823
+ embedding_output = self.embeddings(
824
+ input_ids=input_ids,
825
+ position_ids=position_ids,
826
+ inputs_embeds=inputs_embeds,
827
+ past_key_values_length=past_key_values_length,
828
+ )
829
+ else:
830
+ embedding_output = encoder_embeds
831
+
832
+ encoder_outputs = self.encoder(
833
+ embedding_output,
834
+ attention_mask=extended_attention_mask,
835
+ head_mask=head_mask,
836
+ encoder_hidden_states=encoder_hidden_states,
837
+ encoder_attention_mask=encoder_extended_attention_mask,
838
+ past_key_values=past_key_values,
839
+ use_cache=use_cache,
840
+ output_attentions=output_attentions,
841
+ output_hidden_states=output_hidden_states,
842
+ return_dict=return_dict,
843
+ mode=mode,
844
+ )
845
+ sequence_output = encoder_outputs[0]
846
+ pooled_output = (self.pooler(sequence_output) if self.pooler is not None else None)
847
+
848
+ if not return_dict:
849
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
850
+
851
+ return BaseModelOutputWithPoolingAndCrossAttentions(
852
+ last_hidden_state=sequence_output,
853
+ pooler_output=pooled_output,
854
+ past_key_values=encoder_outputs.past_key_values,
855
+ hidden_states=encoder_outputs.hidden_states,
856
+ attentions=encoder_outputs.attentions,
857
+ cross_attentions=encoder_outputs.cross_attentions,
858
+ )