evalscope 0.10.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (606) hide show
  1. evalscope/__init__.py +4 -1
  2. evalscope/api/benchmark/__init__.py +11 -0
  3. evalscope/api/benchmark/adapters/__init__.py +7 -0
  4. evalscope/api/benchmark/adapters/agent_adapter.py +8 -0
  5. evalscope/api/benchmark/adapters/default_data_adapter.py +754 -0
  6. evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
  7. evalscope/api/benchmark/adapters/multi_choice_adapter.py +86 -0
  8. evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
  9. evalscope/api/benchmark/adapters/text2image_adapter.py +157 -0
  10. evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
  11. evalscope/api/benchmark/benchmark.py +404 -0
  12. evalscope/api/benchmark/meta.py +124 -0
  13. evalscope/api/dataset/__init__.py +2 -0
  14. evalscope/api/dataset/dataset.py +370 -0
  15. evalscope/api/dataset/loader.py +266 -0
  16. evalscope/api/dataset/utils.py +143 -0
  17. evalscope/api/evaluator/__init__.py +3 -0
  18. evalscope/api/evaluator/cache.py +382 -0
  19. evalscope/api/evaluator/evaluator.py +61 -0
  20. evalscope/api/evaluator/state.py +280 -0
  21. evalscope/api/filter/__init__.py +1 -0
  22. evalscope/api/filter/filter.py +72 -0
  23. evalscope/api/messages/__init__.py +12 -0
  24. evalscope/api/messages/chat_message.py +248 -0
  25. evalscope/api/messages/content.py +102 -0
  26. evalscope/api/messages/utils.py +35 -0
  27. evalscope/api/metric/__init__.py +2 -0
  28. evalscope/api/metric/metric.py +60 -0
  29. evalscope/api/metric/scorer.py +113 -0
  30. evalscope/api/mixin/__init__.py +2 -0
  31. evalscope/api/mixin/llm_judge_mixin.py +170 -0
  32. evalscope/api/mixin/sandbox_mixin.py +182 -0
  33. evalscope/api/model/__init__.py +12 -0
  34. evalscope/api/model/generate_config.py +161 -0
  35. evalscope/api/model/model.py +386 -0
  36. evalscope/api/model/model_output.py +285 -0
  37. evalscope/api/registry.py +182 -0
  38. evalscope/api/tool/__init__.py +3 -0
  39. evalscope/api/tool/tool_call.py +101 -0
  40. evalscope/api/tool/tool_info.py +173 -0
  41. evalscope/api/tool/utils.py +64 -0
  42. evalscope/app/__init__.py +28 -0
  43. evalscope/app/app.py +38 -0
  44. evalscope/app/arguments.py +11 -0
  45. evalscope/app/constants.py +22 -0
  46. evalscope/app/ui/__init__.py +20 -0
  47. evalscope/app/ui/app_ui.py +53 -0
  48. evalscope/app/ui/multi_model.py +353 -0
  49. evalscope/app/ui/sidebar.py +42 -0
  50. evalscope/app/ui/single_model.py +220 -0
  51. evalscope/app/ui/visualization.py +36 -0
  52. evalscope/app/utils/data_utils.py +195 -0
  53. evalscope/app/utils/env_utils.py +12 -0
  54. evalscope/app/utils/localization.py +221 -0
  55. evalscope/app/utils/text_utils.py +119 -0
  56. evalscope/app/utils/visualization.py +96 -0
  57. evalscope/arguments.py +32 -9
  58. evalscope/backend/opencompass/api_meta_template.py +2 -1
  59. evalscope/backend/opencompass/backend_manager.py +10 -7
  60. evalscope/backend/rag_eval/__init__.py +1 -1
  61. evalscope/backend/rag_eval/backend_manager.py +23 -6
  62. evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +33 -21
  63. evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
  64. evalscope/backend/rag_eval/cmteb/arguments.py +14 -1
  65. evalscope/backend/rag_eval/cmteb/task_template.py +19 -3
  66. evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +1 -1
  67. evalscope/backend/rag_eval/ragas/arguments.py +0 -1
  68. evalscope/backend/rag_eval/ragas/task_template.py +2 -1
  69. evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
  70. evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
  71. evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +9 -3
  72. evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -6
  73. evalscope/backend/rag_eval/utils/embedding.py +125 -32
  74. evalscope/backend/rag_eval/utils/llm.py +16 -16
  75. evalscope/backend/vlm_eval_kit/backend_manager.py +8 -3
  76. evalscope/benchmarks/__init__.py +17 -5
  77. evalscope/benchmarks/aa_lcr/__init__.py +0 -0
  78. evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
  79. evalscope/benchmarks/ai2d/__init__.py +0 -0
  80. evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
  81. evalscope/benchmarks/aime/__init__.py +0 -0
  82. evalscope/benchmarks/aime/aime24_adapter.py +55 -0
  83. evalscope/benchmarks/aime/aime25_adapter.py +181 -0
  84. evalscope/benchmarks/aime/grader.py +307 -0
  85. evalscope/{metrics/math_accuracy.py → benchmarks/aime/math_normalize.py} +61 -72
  86. evalscope/benchmarks/alpaca_eval/__init__.py +0 -0
  87. evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +133 -0
  88. evalscope/benchmarks/amc/__init__.py +0 -0
  89. evalscope/benchmarks/amc/amc_adapter.py +51 -0
  90. evalscope/benchmarks/arc/arc_adapter.py +34 -149
  91. evalscope/benchmarks/arena_hard/__init__.py +0 -0
  92. evalscope/benchmarks/arena_hard/arena_hard_adapter.py +149 -0
  93. evalscope/benchmarks/arena_hard/utils.py +186 -0
  94. evalscope/benchmarks/bbh/bbh_adapter.py +117 -157
  95. evalscope/benchmarks/bfcl/__init__.py +0 -0
  96. evalscope/benchmarks/bfcl/v3/__init__.py +0 -0
  97. evalscope/benchmarks/bfcl/v3/bfcl_v3_adapter.py +370 -0
  98. evalscope/benchmarks/bfcl/v3/generation.py +222 -0
  99. evalscope/benchmarks/bfcl/v3/utils.py +23 -0
  100. evalscope/benchmarks/bfcl/v4/__init__.py +0 -0
  101. evalscope/benchmarks/bfcl/v4/bfcl_v4_adapter.py +229 -0
  102. evalscope/benchmarks/bfcl/v4/utils.py +410 -0
  103. evalscope/benchmarks/biomix_qa/__init__.py +0 -0
  104. evalscope/benchmarks/biomix_qa/biomix_qa_adapter.py +36 -0
  105. evalscope/benchmarks/blink/__init__.py +0 -0
  106. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  107. evalscope/benchmarks/ceval/ceval_adapter.py +93 -174
  108. evalscope/benchmarks/chartqa/__init__.py +0 -0
  109. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  110. evalscope/benchmarks/chartqa/utils.py +38 -0
  111. evalscope/benchmarks/chinese_simple_qa/__init__.py +0 -0
  112. evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +170 -0
  113. evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -140
  114. evalscope/benchmarks/coin_flip/__init__.py +0 -0
  115. evalscope/benchmarks/coin_flip/coin_flip_adapter.py +128 -0
  116. evalscope/benchmarks/commonsense_qa/__init__.py +0 -0
  117. evalscope/benchmarks/commonsense_qa/commonsense_qa_adapter.py +32 -0
  118. evalscope/benchmarks/competition_math/competition_math_adapter.py +64 -112
  119. evalscope/benchmarks/data_collection/__init__.py +0 -0
  120. evalscope/benchmarks/data_collection/data_collection_adapter.py +215 -0
  121. evalscope/benchmarks/docmath/__init__.py +0 -0
  122. evalscope/benchmarks/docmath/docmath_adapter.py +143 -0
  123. evalscope/benchmarks/docmath/utils.py +219 -0
  124. evalscope/benchmarks/docvqa/__init__.py +0 -0
  125. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  126. evalscope/benchmarks/drivelology/__init__.py +0 -0
  127. evalscope/benchmarks/drivelology/drivelology_binary_adapter.py +170 -0
  128. evalscope/benchmarks/drivelology/drivelology_multilabel_adapter.py +254 -0
  129. evalscope/benchmarks/drivelology/drivelology_selection_adapter.py +49 -0
  130. evalscope/benchmarks/drivelology/drivelology_writing_adapter.py +218 -0
  131. evalscope/benchmarks/drop/__init__.py +0 -0
  132. evalscope/benchmarks/drop/drop_adapter.py +155 -0
  133. evalscope/benchmarks/drop/utils.py +156 -0
  134. evalscope/benchmarks/frames/__init__.py +0 -0
  135. evalscope/benchmarks/frames/frames_adapter.py +175 -0
  136. evalscope/benchmarks/frames/utils.py +37 -0
  137. evalscope/benchmarks/general_arena/__init__.py +0 -0
  138. evalscope/benchmarks/general_arena/general_arena_adapter.py +454 -0
  139. evalscope/benchmarks/general_arena/utils.py +223 -0
  140. evalscope/benchmarks/general_mcq/__init__.py +0 -0
  141. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +58 -0
  142. evalscope/benchmarks/general_qa/general_qa_adapter.py +75 -107
  143. evalscope/benchmarks/gpqa/__init__.py +0 -0
  144. evalscope/benchmarks/gpqa/gpqa_adapter.py +90 -0
  145. evalscope/benchmarks/gpqa/prompt.py +88 -0
  146. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +77 -144
  147. evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
  148. evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +159 -0
  149. evalscope/benchmarks/halu_eval/__init__.py +0 -0
  150. evalscope/benchmarks/halu_eval/halu_eval_adapter.py +128 -0
  151. evalscope/benchmarks/halu_eval/halu_eval_instructions.py +84 -0
  152. evalscope/benchmarks/healthbench/__init__.py +0 -0
  153. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  154. evalscope/benchmarks/healthbench/utils.py +102 -0
  155. evalscope/benchmarks/hellaswag/hellaswag_adapter.py +36 -134
  156. evalscope/benchmarks/hle/__init__.py +0 -0
  157. evalscope/benchmarks/hle/hle_adapter.py +153 -0
  158. evalscope/benchmarks/humaneval/humaneval_adapter.py +80 -88
  159. evalscope/benchmarks/humaneval/utils.py +235 -0
  160. evalscope/benchmarks/ifeval/ifeval_adapter.py +71 -45
  161. evalscope/benchmarks/ifeval/instructions.py +112 -68
  162. evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
  163. evalscope/benchmarks/ifeval/instructions_util.py +2 -3
  164. evalscope/benchmarks/ifeval/utils.py +6 -7
  165. evalscope/benchmarks/image_edit/__init__.py +0 -0
  166. evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
  167. evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
  168. evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
  169. evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
  170. evalscope/benchmarks/infovqa/__init__.py +0 -0
  171. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  172. evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -58
  173. evalscope/benchmarks/live_code_bench/__init__.py +0 -0
  174. evalscope/benchmarks/live_code_bench/evaluate_utils.py +195 -0
  175. evalscope/benchmarks/live_code_bench/extract_utils.py +70 -0
  176. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +150 -0
  177. evalscope/benchmarks/live_code_bench/load_utils.py +63 -0
  178. evalscope/benchmarks/live_code_bench/pass_k_utils.py +56 -0
  179. evalscope/benchmarks/live_code_bench/prompts.py +207 -0
  180. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  181. evalscope/benchmarks/live_code_bench/testing_util.py +544 -0
  182. evalscope/benchmarks/logi_qa/__int__.py +0 -0
  183. evalscope/benchmarks/logi_qa/logi_qa_adapter.py +41 -0
  184. evalscope/benchmarks/maritime_bench/__init__.py +0 -0
  185. evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +56 -0
  186. evalscope/benchmarks/math_500/__init__.py +0 -0
  187. evalscope/benchmarks/math_500/math_500_adapter.py +55 -0
  188. evalscope/benchmarks/math_qa/__init__.py +0 -0
  189. evalscope/benchmarks/math_qa/math_qa_adapter.py +35 -0
  190. evalscope/benchmarks/math_verse/__init__.py +0 -0
  191. evalscope/benchmarks/math_verse/math_verse_adapter.py +105 -0
  192. evalscope/benchmarks/math_vision/__init__.py +0 -0
  193. evalscope/benchmarks/math_vision/math_vision_adapter.py +116 -0
  194. evalscope/benchmarks/math_vista/__init__.py +0 -0
  195. evalscope/benchmarks/math_vista/math_vista_adapter.py +114 -0
  196. evalscope/benchmarks/med_mcqa/__init__.py +0 -0
  197. evalscope/benchmarks/med_mcqa/med_mcqa_adapter.py +32 -0
  198. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  199. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +53 -0
  200. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  201. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  202. evalscope/benchmarks/mm_star/__init__.py +0 -0
  203. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  204. evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -210
  205. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +87 -103
  206. evalscope/benchmarks/mmlu_redux/__init__.py +0 -0
  207. evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +139 -0
  208. evalscope/benchmarks/mmmu/__init__.py +0 -0
  209. evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
  210. evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
  211. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
  212. evalscope/benchmarks/mri_mcqa/__init__.py +0 -0
  213. evalscope/benchmarks/mri_mcqa/mri_mcqa_adapter.py +34 -0
  214. evalscope/benchmarks/multi_if/__init__.py +0 -0
  215. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  216. evalscope/benchmarks/multi_if/metrics.py +120 -0
  217. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  218. evalscope/benchmarks/music_trivia/__init__.py +0 -0
  219. evalscope/benchmarks/music_trivia/music_trivia_adapter.py +36 -0
  220. evalscope/benchmarks/musr/__init__.py +0 -0
  221. evalscope/benchmarks/musr/musr_adapter.py +43 -0
  222. evalscope/benchmarks/needle_haystack/__init__.py +0 -0
  223. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +389 -0
  224. evalscope/benchmarks/needle_haystack/utils.py +79 -0
  225. evalscope/benchmarks/ner/__init__.py +0 -0
  226. evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
  227. evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
  228. evalscope/benchmarks/ner/copious_adapter.py +85 -0
  229. evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
  230. evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
  231. evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
  232. evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
  233. evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
  234. evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
  235. evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
  236. evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
  237. evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
  238. evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
  239. evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
  240. evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
  241. evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
  242. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  243. evalscope/benchmarks/ocr_bench/ocr_bench/__init__.py +0 -0
  244. evalscope/benchmarks/ocr_bench/ocr_bench/ocr_bench_adapter.py +101 -0
  245. evalscope/benchmarks/ocr_bench/ocr_bench_v2/IoUscore_metric.py +87 -0
  246. evalscope/benchmarks/ocr_bench/ocr_bench_v2/TEDS_metric.py +963 -0
  247. evalscope/benchmarks/ocr_bench/ocr_bench_v2/__init__.py +0 -0
  248. evalscope/benchmarks/ocr_bench/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  249. evalscope/benchmarks/ocr_bench/ocr_bench_v2/page_ocr_metric.py +50 -0
  250. evalscope/benchmarks/ocr_bench/ocr_bench_v2/parallel.py +46 -0
  251. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  252. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  253. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  254. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/script.py +481 -0
  255. evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_metric.py +179 -0
  256. evalscope/benchmarks/ocr_bench/ocr_bench_v2/utils.py +433 -0
  257. evalscope/benchmarks/ocr_bench/ocr_bench_v2/vqa_metric.py +254 -0
  258. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  259. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  260. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  261. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  262. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  263. evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
  264. evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
  265. evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
  266. evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
  267. evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
  268. evalscope/benchmarks/piqa/__init__.py +0 -0
  269. evalscope/benchmarks/piqa/piqa_adapter.py +32 -0
  270. evalscope/benchmarks/poly_math/__init__.py +0 -0
  271. evalscope/benchmarks/poly_math/poly_math_adapter.py +132 -0
  272. evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
  273. evalscope/benchmarks/pope/__init__.py +0 -0
  274. evalscope/benchmarks/pope/pope_adapter.py +112 -0
  275. evalscope/benchmarks/process_bench/__init__.py +0 -0
  276. evalscope/benchmarks/process_bench/process_bench_adapter.py +171 -0
  277. evalscope/benchmarks/pumed_qa/__init__.py +0 -0
  278. evalscope/benchmarks/pumed_qa/pubmed_qa_adapter.py +175 -0
  279. evalscope/benchmarks/qasc/__init__.py +0 -0
  280. evalscope/benchmarks/qasc/qasc_adapter.py +35 -0
  281. evalscope/benchmarks/race/race_adapter.py +33 -120
  282. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  283. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  284. evalscope/benchmarks/sciq/__init__.py +0 -0
  285. evalscope/benchmarks/sciq/sciq_adapter.py +36 -0
  286. evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
  287. evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
  288. evalscope/benchmarks/simple_qa/__init__.py +0 -0
  289. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +169 -0
  290. evalscope/benchmarks/simple_vqa/__init__.py +0 -0
  291. evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
  292. evalscope/benchmarks/siqa/__init__.py +0 -0
  293. evalscope/benchmarks/siqa/siqa_adapter.py +39 -0
  294. evalscope/benchmarks/super_gpqa/__init__.py +0 -0
  295. evalscope/benchmarks/super_gpqa/prompt.py +88 -0
  296. evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +165 -0
  297. evalscope/benchmarks/super_gpqa/utils.py +86 -0
  298. evalscope/benchmarks/tau_bench/__init__.py +0 -0
  299. evalscope/benchmarks/tau_bench/tau2_bench/__init__.py +0 -0
  300. evalscope/benchmarks/tau_bench/tau2_bench/generation.py +158 -0
  301. evalscope/benchmarks/tau_bench/tau2_bench/tau2_bench_adapter.py +146 -0
  302. evalscope/benchmarks/tau_bench/tau_bench/__init__.py +0 -0
  303. evalscope/benchmarks/tau_bench/tau_bench/generation.py +147 -0
  304. evalscope/benchmarks/tau_bench/tau_bench/tau_bench_adapter.py +168 -0
  305. evalscope/benchmarks/text2image/__init__.py +0 -0
  306. evalscope/benchmarks/text2image/evalmuse_adapter.py +78 -0
  307. evalscope/benchmarks/text2image/genai_bench_adapter.py +53 -0
  308. evalscope/benchmarks/text2image/general_t2i_adapter.py +42 -0
  309. evalscope/benchmarks/text2image/hpdv2_adapter.py +52 -0
  310. evalscope/benchmarks/text2image/tifa_adapter.py +27 -0
  311. evalscope/benchmarks/tool_bench/__init__.py +0 -0
  312. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +102 -0
  313. evalscope/benchmarks/tool_bench/utils.py +203 -0
  314. evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -118
  315. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -270
  316. evalscope/benchmarks/visu_logic/__init__.py +0 -0
  317. evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
  318. evalscope/benchmarks/winogrande/__init__.py +0 -0
  319. evalscope/benchmarks/winogrande/winogrande_adapter.py +34 -0
  320. evalscope/benchmarks/wmt/__init__.py +0 -0
  321. evalscope/benchmarks/wmt/wmt24_adapter.py +294 -0
  322. evalscope/benchmarks/zerobench/__init__.py +0 -0
  323. evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
  324. evalscope/cli/cli.py +2 -0
  325. evalscope/cli/start_app.py +12 -2
  326. evalscope/cli/start_eval.py +4 -3
  327. evalscope/cli/start_perf.py +10 -2
  328. evalscope/cli/start_server.py +6 -3
  329. evalscope/collections/__init__.py +27 -3
  330. evalscope/collections/sampler.py +12 -11
  331. evalscope/collections/schema.py +13 -12
  332. evalscope/config.py +218 -147
  333. evalscope/constants.py +78 -82
  334. evalscope/evaluator/__init__.py +1 -1
  335. evalscope/evaluator/evaluator.py +334 -318
  336. evalscope/filters/__init__.py +2 -0
  337. evalscope/filters/extraction.py +126 -0
  338. evalscope/filters/selection.py +57 -0
  339. evalscope/metrics/__init__.py +59 -3
  340. evalscope/metrics/bert_score/__init__.py +0 -0
  341. evalscope/metrics/bert_score/scorer.py +338 -0
  342. evalscope/metrics/bert_score/utils.py +697 -0
  343. evalscope/metrics/bundled_rouge_score/rouge_scorer.py +20 -15
  344. evalscope/metrics/llm_judge.py +211 -0
  345. evalscope/metrics/math_parser.py +545 -0
  346. evalscope/metrics/metric.py +611 -0
  347. evalscope/metrics/metrics.py +112 -23
  348. evalscope/metrics/rouge_metric.py +11 -13
  349. evalscope/metrics/t2v_metrics/__init__.py +0 -0
  350. evalscope/metrics/t2v_metrics/clipscore.py +14 -0
  351. evalscope/metrics/t2v_metrics/constants.py +12 -0
  352. evalscope/metrics/t2v_metrics/itmscore.py +14 -0
  353. evalscope/metrics/t2v_metrics/models/__init__.py +0 -0
  354. evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +30 -0
  355. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/__init__.py +0 -0
  356. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +6 -0
  357. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +134 -0
  358. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +282 -0
  359. evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +115 -0
  360. evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +87 -0
  361. evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +86 -0
  362. evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +62 -0
  363. evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +26 -0
  364. evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +85 -0
  365. evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +99 -0
  366. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +176 -0
  367. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/__init__.py +0 -0
  368. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +82 -0
  369. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +74 -0
  370. evalscope/metrics/t2v_metrics/models/model.py +45 -0
  371. evalscope/metrics/t2v_metrics/models/utils.py +25 -0
  372. evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +22 -0
  373. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/__init__.py +0 -0
  374. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +1 -0
  375. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +306 -0
  376. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +12 -0
  377. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +84 -0
  378. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +50 -0
  379. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +223 -0
  380. evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +153 -0
  381. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +26 -0
  382. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +465 -0
  383. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +141 -0
  384. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +24 -0
  385. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +190 -0
  386. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +100 -0
  387. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +313 -0
  388. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +416 -0
  389. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +8 -0
  390. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +192 -0
  391. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +320 -0
  392. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +10 -0
  393. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +42 -0
  394. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +42 -0
  395. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +42 -0
  396. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +36 -0
  397. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +43 -0
  398. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +43 -0
  399. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +43 -0
  400. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +43 -0
  401. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +36 -0
  402. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +42 -0
  403. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +42 -0
  404. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +42 -0
  405. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +43 -0
  406. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +42 -0
  407. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +42 -0
  408. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +42 -0
  409. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +37 -0
  410. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +43 -0
  411. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +43 -0
  412. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +21 -0
  413. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +22 -0
  414. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +21 -0
  415. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +212 -0
  416. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +231 -0
  417. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +1111 -0
  418. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/__init__.py +0 -0
  419. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +211 -0
  420. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +109 -0
  421. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +457 -0
  422. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +370 -0
  423. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +765 -0
  424. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +274 -0
  425. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +896 -0
  426. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +1876 -0
  427. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +83 -0
  428. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +58 -0
  429. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +212 -0
  430. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +164 -0
  431. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +202 -0
  432. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +187 -0
  433. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +179 -0
  434. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +115 -0
  435. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +371 -0
  436. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +348 -0
  437. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +870 -0
  438. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +273 -0
  439. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +514 -0
  440. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +1291 -0
  441. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +476 -0
  442. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +35 -0
  443. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +27 -0
  444. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +233 -0
  445. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +393 -0
  446. evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +129 -0
  447. evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +18 -0
  448. evalscope/metrics/t2v_metrics/score.py +78 -0
  449. evalscope/metrics/t2v_metrics/vqascore.py +14 -0
  450. evalscope/models/__init__.py +23 -13
  451. evalscope/models/image_edit_model.py +125 -0
  452. evalscope/models/mockllm.py +65 -0
  453. evalscope/models/model_apis.py +69 -0
  454. evalscope/models/modelscope.py +455 -0
  455. evalscope/models/openai_compatible.py +144 -0
  456. evalscope/models/text2image_model.py +124 -0
  457. evalscope/models/utils/openai.py +708 -0
  458. evalscope/perf/__init__.py +0 -1
  459. evalscope/perf/arguments.py +103 -69
  460. evalscope/perf/benchmark.py +114 -163
  461. evalscope/perf/http_client.py +59 -89
  462. evalscope/perf/main.py +91 -18
  463. evalscope/perf/plugin/__init__.py +3 -2
  464. evalscope/perf/plugin/api/__init__.py +4 -3
  465. evalscope/perf/plugin/api/base.py +27 -7
  466. evalscope/perf/plugin/api/custom_api.py +170 -57
  467. evalscope/perf/plugin/api/dashscope_api.py +4 -10
  468. evalscope/perf/plugin/api/default_api.py +214 -0
  469. evalscope/perf/plugin/api/openai_api.py +120 -41
  470. evalscope/perf/plugin/datasets/__init__.py +10 -6
  471. evalscope/perf/plugin/datasets/base.py +43 -1
  472. evalscope/perf/plugin/datasets/custom.py +22 -3
  473. evalscope/perf/plugin/datasets/flickr8k.py +5 -27
  474. evalscope/perf/plugin/datasets/kontext_bench.py +28 -0
  475. evalscope/perf/plugin/datasets/line_by_line.py +7 -3
  476. evalscope/perf/plugin/datasets/longalpaca.py +7 -3
  477. evalscope/perf/plugin/datasets/openqa.py +13 -14
  478. evalscope/perf/plugin/datasets/random_dataset.py +67 -0
  479. evalscope/perf/plugin/datasets/random_vl_dataset.py +80 -0
  480. evalscope/perf/plugin/datasets/speed_benchmark.py +11 -0
  481. evalscope/perf/plugin/registry.py +36 -16
  482. evalscope/perf/utils/analysis_result.py +24 -23
  483. evalscope/perf/utils/benchmark_util.py +95 -55
  484. evalscope/perf/utils/db_util.py +115 -78
  485. evalscope/perf/utils/local_server.py +12 -47
  486. evalscope/perf/utils/log_utils.py +63 -0
  487. evalscope/perf/utils/rich_display.py +192 -0
  488. evalscope/report/__init__.py +46 -3
  489. evalscope/report/combinator.py +143 -32
  490. evalscope/report/generator.py +74 -34
  491. evalscope/report/report.py +238 -0
  492. evalscope/run.py +71 -46
  493. evalscope/summarizer.py +5 -5
  494. evalscope/third_party/longbench_write/infer.py +1 -1
  495. evalscope/third_party/thinkbench/__init__.py +3 -0
  496. evalscope/third_party/thinkbench/eval.py +441 -0
  497. evalscope/third_party/thinkbench/infer.py +130 -0
  498. evalscope/third_party/thinkbench/resources/critique_template.txt +17 -0
  499. evalscope/third_party/thinkbench/resources/reformat_template.txt +31 -0
  500. evalscope/third_party/thinkbench/tools/__init__.py +0 -0
  501. evalscope/third_party/thinkbench/tools/llm.py +48 -0
  502. evalscope/third_party/thinkbench/tools/utils.py +13 -0
  503. evalscope/third_party/toolbench_static/llm/swift_infer.py +46 -20
  504. evalscope/third_party/toolbench_static/toolbench_static.py +2 -1
  505. evalscope/utils/__init__.py +82 -2
  506. evalscope/utils/argument_utils.py +64 -0
  507. evalscope/utils/chat_service.py +8 -6
  508. evalscope/utils/deprecation_utils.py +53 -0
  509. evalscope/utils/function_utils.py +266 -0
  510. evalscope/utils/import_utils.py +154 -0
  511. evalscope/utils/io_utils.py +336 -8
  512. evalscope/utils/json_schema.py +231 -0
  513. evalscope/utils/logger.py +121 -31
  514. evalscope/utils/model_utils.py +57 -1
  515. evalscope/utils/multi_choices.py +303 -0
  516. evalscope/utils/ner.py +377 -0
  517. evalscope/utils/url_utils.py +65 -0
  518. evalscope/version.py +2 -2
  519. evalscope-1.2.0.dist-info/METADATA +553 -0
  520. evalscope-1.2.0.dist-info/RECORD +628 -0
  521. {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/WHEEL +1 -1
  522. {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/top_level.txt +0 -1
  523. evalscope/backend/vlm_eval_kit/custom_dataset.py +0 -46
  524. evalscope/benchmarks/arc/ai2_arc.py +0 -151
  525. evalscope/benchmarks/benchmark.py +0 -76
  526. evalscope/benchmarks/ceval/ceval_exam.py +0 -146
  527. evalscope/benchmarks/ceval/samples.jsonl +0 -1
  528. evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
  529. evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
  530. evalscope/benchmarks/competition_math/competition_math.py +0 -79
  531. evalscope/benchmarks/data_adapter.py +0 -291
  532. evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
  533. evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
  534. evalscope/benchmarks/humaneval/humaneval.py +0 -79
  535. evalscope/benchmarks/mmlu/mmlu.py +0 -160
  536. evalscope/benchmarks/mmlu/samples.jsonl +0 -5
  537. evalscope/benchmarks/race/race.py +0 -104
  538. evalscope/benchmarks/race/samples.jsonl +0 -5
  539. evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
  540. evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
  541. evalscope/collections/evaluator.py +0 -198
  542. evalscope/evaluator/rating_eval.py +0 -157
  543. evalscope/evaluator/reviewer/__init__.py +0 -1
  544. evalscope/evaluator/reviewer/auto_reviewer.py +0 -391
  545. evalscope/metrics/code_metric.py +0 -98
  546. evalscope/metrics/named_metrics.py +0 -17
  547. evalscope/metrics/resources/gpt2-zhcn3-v4.bpe +0 -58485
  548. evalscope/metrics/resources/gpt2-zhcn3-v4.json +0 -1
  549. evalscope/models/base_adapter.py +0 -52
  550. evalscope/models/chat_adapter.py +0 -138
  551. evalscope/models/choice_adapter.py +0 -211
  552. evalscope/models/custom/__init__.py +0 -3
  553. evalscope/models/custom/custom_model.py +0 -53
  554. evalscope/models/custom/dummy_model.py +0 -63
  555. evalscope/models/custom_adapter.py +0 -67
  556. evalscope/models/local_model.py +0 -74
  557. evalscope/models/model.py +0 -229
  558. evalscope/models/server_adapter.py +0 -111
  559. evalscope/registry/__init__.py +0 -1
  560. evalscope/registry/config/cfg_arena.yaml +0 -77
  561. evalscope/registry/config/cfg_arena_zhihu.yaml +0 -63
  562. evalscope/registry/config/cfg_pairwise_baseline.yaml +0 -83
  563. evalscope/registry/config/cfg_single.yaml +0 -78
  564. evalscope/registry/data/prompt_template/lmsys_v2.jsonl +0 -8
  565. evalscope/registry/data/prompt_template/prompt_templates.jsonl +0 -8
  566. evalscope/registry/data/qa_browser/battle.jsonl +0 -634
  567. evalscope/registry/data/qa_browser/category_mapping.yaml +0 -10
  568. evalscope/registry/data/question.jsonl +0 -80
  569. evalscope/registry/tasks/arc.yaml +0 -28
  570. evalscope/registry/tasks/bbh.yaml +0 -26
  571. evalscope/registry/tasks/bbh_mini.yaml +0 -26
  572. evalscope/registry/tasks/ceval.yaml +0 -27
  573. evalscope/registry/tasks/ceval_mini.yaml +0 -26
  574. evalscope/registry/tasks/cmmlu.yaml +0 -27
  575. evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +0 -28
  576. evalscope/registry/tasks/general_qa.yaml +0 -27
  577. evalscope/registry/tasks/gsm8k.yaml +0 -29
  578. evalscope/registry/tasks/mmlu.yaml +0 -29
  579. evalscope/registry/tasks/mmlu_mini.yaml +0 -27
  580. evalscope/report/app.py +0 -506
  581. evalscope/report/utils.py +0 -133
  582. evalscope/run_arena.py +0 -202
  583. evalscope/utils/arena_utils.py +0 -217
  584. evalscope/utils/completion_parsers.py +0 -82
  585. evalscope/utils/utils.py +0 -301
  586. evalscope-0.10.0.dist-info/METADATA +0 -565
  587. evalscope-0.10.0.dist-info/RECORD +0 -286
  588. tests/__init__.py +0 -1
  589. tests/cli/__init__.py +0 -1
  590. tests/cli/test_collection.py +0 -57
  591. tests/cli/test_run.py +0 -165
  592. tests/perf/__init__.py +0 -1
  593. tests/perf/test_perf.py +0 -101
  594. tests/rag/test_clip_benchmark.py +0 -85
  595. tests/rag/test_mteb.py +0 -138
  596. tests/rag/test_ragas.py +0 -120
  597. tests/swift/__init__.py +0 -1
  598. tests/swift/test_run_swift_eval.py +0 -145
  599. tests/swift/test_run_swift_vlm_eval.py +0 -127
  600. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -156
  601. tests/test_run_all.py +0 -12
  602. tests/vlm/__init__.py +0 -1
  603. tests/vlm/test_vlmeval.py +0 -60
  604. {tests/rag → evalscope/api}/__init__.py +0 -0
  605. {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/entry_points.txt +0 -0
  606. {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,1291 @@
1
+ """
2
+ Copyright (c) 2022, salesforce.com, inc.
3
+ All rights reserved.
4
+ SPDX-License-Identifier: BSD-3-Clause
5
+ For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
+
7
+ Based on huggingface code base
8
+ https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
9
+ """
10
+
11
+ import math
12
+ import os
13
+ import torch
14
+ import torch.nn.functional as F
15
+ import torch.utils.checkpoint
16
+ import warnings
17
+ from dataclasses import dataclass
18
+ from torch import Tensor, device, nn
19
+ from torch.nn import CrossEntropyLoss
20
+ from transformers import BatchEncoding, PreTrainedTokenizer
21
+ from transformers.activations import ACT2FN
22
+ from transformers.file_utils import ModelOutput
23
+ from transformers.modeling_outputs import (
24
+ BaseModelOutputWithPastAndCrossAttentions,
25
+ BaseModelOutputWithPoolingAndCrossAttentions,
26
+ CausalLMOutputWithCrossAttentions,
27
+ MaskedLMOutput,
28
+ MultipleChoiceModelOutput,
29
+ NextSentencePredictorOutput,
30
+ QuestionAnsweringModelOutput,
31
+ SequenceClassifierOutput,
32
+ TokenClassifierOutput,
33
+ )
34
+ from transformers.modeling_utils import PreTrainedModel
35
+ from transformers.models.bert.configuration_bert import BertConfig
36
+ from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
37
+ from transformers.utils import logging
38
+ from typing import Optional, Tuple
39
+
40
+ from ..common.utils import get_abs_path
41
+ from ..models.base_model import BaseEncoder
42
+
43
+ logging.set_verbosity_error()
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ class BertEmbeddings(nn.Module):
48
+ """Construct the embeddings from word and position embeddings."""
49
+
50
+ def __init__(self, config):
51
+ super().__init__()
52
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
53
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
54
+
55
+ if config.add_type_embeddings:
56
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
57
+
58
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
59
+ # any TensorFlow checkpoint file
60
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
61
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
62
+
63
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
64
+ self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)))
65
+ self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
66
+
67
+ self.config = config
68
+
69
+ def forward(
70
+ self,
71
+ input_ids=None,
72
+ token_type_ids=None,
73
+ position_ids=None,
74
+ inputs_embeds=None,
75
+ past_key_values_length=0,
76
+ ):
77
+ if input_ids is not None:
78
+ input_shape = input_ids.size()
79
+ else:
80
+ input_shape = inputs_embeds.size()[:-1]
81
+
82
+ seq_length = input_shape[1]
83
+
84
+ if position_ids is None:
85
+ position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length]
86
+
87
+ if inputs_embeds is None:
88
+ inputs_embeds = self.word_embeddings(input_ids)
89
+
90
+ if token_type_ids is not None:
91
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
92
+
93
+ embeddings = inputs_embeds + token_type_embeddings
94
+ else:
95
+ embeddings = inputs_embeds
96
+
97
+ if self.position_embedding_type == 'absolute':
98
+ position_embeddings = self.position_embeddings(position_ids)
99
+ embeddings += position_embeddings
100
+ embeddings = self.LayerNorm(embeddings)
101
+ embeddings = self.dropout(embeddings)
102
+ return embeddings
103
+
104
+
105
+ class BertSelfAttention(nn.Module):
106
+
107
+ def __init__(self, config, is_cross_attention):
108
+ super().__init__()
109
+ self.config = config
110
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, 'embedding_size'):
111
+ raise ValueError(
112
+ 'The hidden size (%d) is not a multiple of the number of attention '
113
+ 'heads (%d)' % (config.hidden_size, config.num_attention_heads)
114
+ )
115
+
116
+ self.num_attention_heads = config.num_attention_heads
117
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
118
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
119
+
120
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
121
+ if is_cross_attention:
122
+ self.key = nn.Linear(config.encoder_width, self.all_head_size)
123
+ self.value = nn.Linear(config.encoder_width, self.all_head_size)
124
+ else:
125
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
126
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
127
+
128
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
129
+ self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
130
+ if (self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query'):
131
+ self.max_position_embeddings = config.max_position_embeddings
132
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
133
+ self.save_attention = False
134
+
135
+ def save_attn_gradients(self, attn_gradients):
136
+ self.attn_gradients = attn_gradients
137
+
138
+ def get_attn_gradients(self):
139
+ return self.attn_gradients
140
+
141
+ def save_attention_map(self, attention_map):
142
+ self.attention_map = attention_map
143
+
144
+ def get_attention_map(self):
145
+ return self.attention_map
146
+
147
+ def transpose_for_scores(self, x):
148
+ new_x_shape = x.size()[:-1] + (
149
+ self.num_attention_heads,
150
+ self.attention_head_size,
151
+ )
152
+ x = x.view(*new_x_shape)
153
+ return x.permute(0, 2, 1, 3)
154
+
155
+ def forward(
156
+ self,
157
+ hidden_states,
158
+ attention_mask=None,
159
+ head_mask=None,
160
+ encoder_hidden_states=None,
161
+ encoder_attention_mask=None,
162
+ past_key_value=None,
163
+ output_attentions=False,
164
+ ):
165
+ mixed_query_layer = self.query(hidden_states)
166
+
167
+ # If this is instantiated as a cross-attention module, the keys
168
+ # and values come from an encoder; the attention mask needs to be
169
+ # such that the encoder's padding tokens are not attended to.
170
+ is_cross_attention = encoder_hidden_states is not None
171
+
172
+ if is_cross_attention:
173
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
174
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
175
+ attention_mask = encoder_attention_mask
176
+ elif past_key_value is not None:
177
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
178
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
179
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
180
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
181
+ else:
182
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
183
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
184
+
185
+ query_layer = self.transpose_for_scores(mixed_query_layer)
186
+
187
+ past_key_value = (key_layer, value_layer)
188
+
189
+ # Take the dot product between "query" and "key" to get the raw attention scores.
190
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
191
+
192
+ if (self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query'):
193
+ seq_length = hidden_states.size()[1]
194
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
195
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
196
+ distance = position_ids_l - position_ids_r
197
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
198
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
199
+
200
+ if self.position_embedding_type == 'relative_key':
201
+ relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
202
+ attention_scores = attention_scores + relative_position_scores
203
+ elif self.position_embedding_type == 'relative_key_query':
204
+ relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
205
+ relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
206
+ attention_scores = (attention_scores + relative_position_scores_query + relative_position_scores_key)
207
+
208
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
209
+ if attention_mask is not None:
210
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
211
+ attention_scores = attention_scores + attention_mask
212
+
213
+ # Normalize the attention scores to probabilities.
214
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
215
+
216
+ if is_cross_attention and self.save_attention:
217
+ self.save_attention_map(attention_probs)
218
+ attention_probs.register_hook(self.save_attn_gradients)
219
+
220
+ # This is actually dropping out entire tokens to attend to, which might
221
+ # seem a bit unusual, but is taken from the original Transformer paper.
222
+ attention_probs_dropped = self.dropout(attention_probs)
223
+
224
+ # Mask heads if we want to
225
+ if head_mask is not None:
226
+ attention_probs_dropped = attention_probs_dropped * head_mask
227
+
228
+ context_layer = torch.matmul(attention_probs_dropped, value_layer)
229
+
230
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
231
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size, )
232
+ context_layer = context_layer.view(*new_context_layer_shape)
233
+
234
+ outputs = ((context_layer, attention_probs) if output_attentions else (context_layer, ))
235
+
236
+ outputs = outputs + (past_key_value, )
237
+ return outputs
238
+
239
+
240
+ class BertSelfOutput(nn.Module):
241
+
242
+ def __init__(self, config):
243
+ super().__init__()
244
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
245
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
246
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
247
+
248
+ def forward(self, hidden_states, input_tensor):
249
+ hidden_states = self.dense(hidden_states)
250
+ hidden_states = self.dropout(hidden_states)
251
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
252
+ return hidden_states
253
+
254
+
255
+ class BertAttention(nn.Module):
256
+
257
+ def __init__(self, config, is_cross_attention=False):
258
+ super().__init__()
259
+ self.self = BertSelfAttention(config, is_cross_attention)
260
+ self.output = BertSelfOutput(config)
261
+ self.pruned_heads = set()
262
+
263
+ def prune_heads(self, heads):
264
+ if len(heads) == 0:
265
+ return
266
+ heads, index = find_pruneable_heads_and_indices(
267
+ heads,
268
+ self.self.num_attention_heads,
269
+ self.self.attention_head_size,
270
+ self.pruned_heads,
271
+ )
272
+
273
+ # Prune linear layers
274
+ self.self.query = prune_linear_layer(self.self.query, index)
275
+ self.self.key = prune_linear_layer(self.self.key, index)
276
+ self.self.value = prune_linear_layer(self.self.value, index)
277
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
278
+
279
+ # Update hyper params and store pruned heads
280
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
281
+ self.self.all_head_size = (self.self.attention_head_size * self.self.num_attention_heads)
282
+ self.pruned_heads = self.pruned_heads.union(heads)
283
+
284
+ def forward(
285
+ self,
286
+ hidden_states,
287
+ attention_mask=None,
288
+ head_mask=None,
289
+ encoder_hidden_states=None,
290
+ encoder_attention_mask=None,
291
+ past_key_value=None,
292
+ output_attentions=False,
293
+ ):
294
+ self_outputs = self.self(
295
+ hidden_states,
296
+ attention_mask,
297
+ head_mask,
298
+ encoder_hidden_states,
299
+ encoder_attention_mask,
300
+ past_key_value,
301
+ output_attentions,
302
+ )
303
+ attention_output = self.output(self_outputs[0], hidden_states)
304
+ outputs = (attention_output, ) + self_outputs[1:] # add attentions if we output them
305
+ return outputs
306
+
307
+
308
+ class BertIntermediate(nn.Module):
309
+
310
+ def __init__(self, config):
311
+ super().__init__()
312
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
313
+ if isinstance(config.hidden_act, str):
314
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
315
+ else:
316
+ self.intermediate_act_fn = config.hidden_act
317
+
318
+ def forward(self, hidden_states):
319
+ hidden_states = self.dense(hidden_states)
320
+ hidden_states = self.intermediate_act_fn(hidden_states)
321
+ return hidden_states
322
+
323
+
324
+ class BertOutput(nn.Module):
325
+
326
+ def __init__(self, config):
327
+ super().__init__()
328
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
329
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
330
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
331
+
332
+ def forward(self, hidden_states, input_tensor):
333
+ hidden_states = self.dense(hidden_states)
334
+ hidden_states = self.dropout(hidden_states)
335
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
336
+ return hidden_states
337
+
338
+
339
+ class BertLayer(nn.Module):
340
+
341
+ def __init__(self, config, layer_num):
342
+ super().__init__()
343
+ self.config = config
344
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
345
+ self.seq_len_dim = 1
346
+ self.attention = BertAttention(config)
347
+ self.layer_num = layer_num
348
+
349
+ # compatibility for ALBEF and BLIP
350
+ try:
351
+ # ALBEF & ALPRO
352
+ fusion_layer = self.config.fusion_layer
353
+ add_cross_attention = (fusion_layer <= layer_num and self.config.add_cross_attention)
354
+
355
+ self.fusion_layer = fusion_layer
356
+ except AttributeError:
357
+ # BLIP
358
+ self.fusion_layer = self.config.num_hidden_layers
359
+ add_cross_attention = self.config.add_cross_attention
360
+
361
+ # if self.config.add_cross_attention:
362
+ if add_cross_attention:
363
+ self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention)
364
+ self.intermediate = BertIntermediate(config)
365
+ self.output = BertOutput(config)
366
+
367
+ def forward(
368
+ self,
369
+ hidden_states,
370
+ attention_mask=None,
371
+ head_mask=None,
372
+ encoder_hidden_states=None,
373
+ encoder_attention_mask=None,
374
+ past_key_value=None,
375
+ output_attentions=False,
376
+ mode=None,
377
+ ):
378
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
379
+ self_attn_past_key_value = (past_key_value[:2] if past_key_value is not None else None)
380
+ self_attention_outputs = self.attention(
381
+ hidden_states,
382
+ attention_mask,
383
+ head_mask,
384
+ output_attentions=output_attentions,
385
+ past_key_value=self_attn_past_key_value,
386
+ )
387
+ attention_output = self_attention_outputs[0]
388
+
389
+ outputs = self_attention_outputs[1:-1]
390
+ present_key_value = self_attention_outputs[-1]
391
+
392
+ # TODO line 482 in albef/models/xbert.py
393
+ # compatibility for ALBEF and BLIP
394
+ if mode in ['multimodal', 'fusion'] and hasattr(self, 'crossattention'):
395
+ assert (encoder_hidden_states is not None), 'encoder_hidden_states must be given for cross-attention layers'
396
+
397
+ if isinstance(encoder_hidden_states, list):
398
+ cross_attention_outputs = self.crossattention(
399
+ attention_output,
400
+ attention_mask,
401
+ head_mask,
402
+ encoder_hidden_states[(self.layer_num - self.fusion_layer) % len(encoder_hidden_states)],
403
+ encoder_attention_mask[(self.layer_num - self.fusion_layer) % len(encoder_hidden_states)],
404
+ output_attentions=output_attentions,
405
+ )
406
+ attention_output = cross_attention_outputs[0]
407
+ outputs = outputs + cross_attention_outputs[1:-1]
408
+
409
+ else:
410
+ cross_attention_outputs = self.crossattention(
411
+ attention_output,
412
+ attention_mask,
413
+ head_mask,
414
+ encoder_hidden_states,
415
+ encoder_attention_mask,
416
+ output_attentions=output_attentions,
417
+ )
418
+ attention_output = cross_attention_outputs[0]
419
+ outputs = (
420
+ outputs + cross_attention_outputs[1:-1]
421
+ ) # add cross attentions if we output attention weights
422
+ layer_output = apply_chunking_to_forward(
423
+ self.feed_forward_chunk,
424
+ self.chunk_size_feed_forward,
425
+ self.seq_len_dim,
426
+ attention_output,
427
+ )
428
+ outputs = (layer_output, ) + outputs
429
+
430
+ outputs = outputs + (present_key_value, )
431
+
432
+ return outputs
433
+
434
+ def feed_forward_chunk(self, attention_output):
435
+ intermediate_output = self.intermediate(attention_output)
436
+ layer_output = self.output(intermediate_output, attention_output)
437
+ return layer_output
438
+
439
+
440
+ class BertEncoder(nn.Module):
441
+
442
+ def __init__(self, config):
443
+ super().__init__()
444
+ self.config = config
445
+ self.layer = nn.ModuleList([BertLayer(config, i) for i in range(config.num_hidden_layers)])
446
+ self.gradient_checkpointing = False
447
+
448
+ def forward(
449
+ self,
450
+ hidden_states,
451
+ attention_mask=None,
452
+ head_mask=None,
453
+ encoder_hidden_states=None,
454
+ encoder_attention_mask=None,
455
+ past_key_values=None,
456
+ use_cache=None,
457
+ output_attentions=False,
458
+ output_hidden_states=False,
459
+ return_dict=True,
460
+ mode='multimodal',
461
+ ):
462
+ all_hidden_states = () if output_hidden_states else None
463
+ all_self_attentions = () if output_attentions else None
464
+ all_cross_attentions = (() if output_attentions and self.config.add_cross_attention else None)
465
+
466
+ next_decoder_cache = () if use_cache else None
467
+
468
+ try:
469
+ # ALBEF
470
+ fusion_layer = self.config.fusion_layer
471
+ except AttributeError:
472
+ # BLIP
473
+ fusion_layer = self.config.num_hidden_layers
474
+
475
+ if mode == 'text':
476
+ start_layer = 0
477
+ # output_layer = self.config.fusion_layer
478
+ output_layer = fusion_layer
479
+
480
+ elif mode == 'fusion':
481
+ # start_layer = self.config.fusion_layer
482
+ start_layer = fusion_layer
483
+ output_layer = self.config.num_hidden_layers
484
+
485
+ elif mode == 'multimodal':
486
+ start_layer = 0
487
+ output_layer = self.config.num_hidden_layers
488
+
489
+ # compatibility for ALBEF and BLIP
490
+ # for i in range(self.config.num_hidden_layers):
491
+ for i in range(start_layer, output_layer):
492
+ layer_module = self.layer[i]
493
+ if output_hidden_states:
494
+ all_hidden_states = all_hidden_states + (hidden_states, )
495
+
496
+ layer_head_mask = head_mask[i] if head_mask is not None else None
497
+ past_key_value = past_key_values[i] if past_key_values is not None else None
498
+
499
+ # TODO pay attention to this.
500
+ if self.gradient_checkpointing and self.training:
501
+
502
+ if use_cache:
503
+ logger.warn(
504
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
505
+ )
506
+ use_cache = False
507
+
508
+ def create_custom_forward(module):
509
+
510
+ def custom_forward(*inputs):
511
+ return module(*inputs, past_key_value, output_attentions)
512
+
513
+ return custom_forward
514
+
515
+ layer_outputs = torch.utils.checkpoint.checkpoint(
516
+ create_custom_forward(layer_module),
517
+ hidden_states,
518
+ attention_mask,
519
+ layer_head_mask,
520
+ encoder_hidden_states,
521
+ encoder_attention_mask,
522
+ mode=mode,
523
+ )
524
+ else:
525
+ layer_outputs = layer_module(
526
+ hidden_states,
527
+ attention_mask,
528
+ layer_head_mask,
529
+ encoder_hidden_states,
530
+ encoder_attention_mask,
531
+ past_key_value,
532
+ output_attentions,
533
+ mode=mode,
534
+ )
535
+
536
+ hidden_states = layer_outputs[0]
537
+ if use_cache:
538
+ next_decoder_cache += (layer_outputs[-1], )
539
+ if output_attentions:
540
+ all_self_attentions = all_self_attentions + (layer_outputs[1], )
541
+
542
+ if output_hidden_states:
543
+ all_hidden_states = all_hidden_states + (hidden_states, )
544
+
545
+ if not return_dict:
546
+ return tuple(
547
+ v for v in [
548
+ hidden_states,
549
+ next_decoder_cache,
550
+ all_hidden_states,
551
+ all_self_attentions,
552
+ all_cross_attentions,
553
+ ] if v is not None
554
+ )
555
+ return BaseModelOutputWithPastAndCrossAttentions(
556
+ last_hidden_state=hidden_states,
557
+ past_key_values=next_decoder_cache,
558
+ hidden_states=all_hidden_states,
559
+ attentions=all_self_attentions,
560
+ cross_attentions=all_cross_attentions,
561
+ )
562
+
563
+
564
+ class BertPooler(nn.Module):
565
+
566
+ def __init__(self, config):
567
+ super().__init__()
568
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
569
+ self.activation = nn.Tanh()
570
+
571
+ def forward(self, hidden_states):
572
+ # We "pool" the model by simply taking the hidden state corresponding
573
+ # to the first token.
574
+ first_token_tensor = hidden_states[:, 0]
575
+ pooled_output = self.dense(first_token_tensor)
576
+ pooled_output = self.activation(pooled_output)
577
+ return pooled_output
578
+
579
+
580
+ class BertPredictionHeadTransform(nn.Module):
581
+
582
+ def __init__(self, config):
583
+ super().__init__()
584
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
585
+ if isinstance(config.hidden_act, str):
586
+ self.transform_act_fn = ACT2FN[config.hidden_act]
587
+ else:
588
+ self.transform_act_fn = config.hidden_act
589
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
590
+
591
+ def forward(self, hidden_states):
592
+ hidden_states = self.dense(hidden_states)
593
+ hidden_states = self.transform_act_fn(hidden_states)
594
+ hidden_states = self.LayerNorm(hidden_states)
595
+ return hidden_states
596
+
597
+
598
+ class BertLMPredictionHead(nn.Module):
599
+
600
+ def __init__(self, config):
601
+ super().__init__()
602
+ self.transform = BertPredictionHeadTransform(config)
603
+
604
+ # The output weights are the same as the input embeddings, but there is
605
+ # an output-only bias for each token.
606
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
607
+
608
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
609
+
610
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
611
+ self.decoder.bias = self.bias
612
+
613
+ def forward(self, hidden_states):
614
+ hidden_states = self.transform(hidden_states)
615
+ hidden_states = self.decoder(hidden_states)
616
+ return hidden_states
617
+
618
+
619
+ class BertOnlyMLMHead(nn.Module):
620
+
621
+ def __init__(self, config):
622
+ super().__init__()
623
+ self.predictions = BertLMPredictionHead(config)
624
+
625
+ def forward(self, sequence_output):
626
+ prediction_scores = self.predictions(sequence_output)
627
+ return prediction_scores
628
+
629
+
630
+ class BertPreTrainedModel(PreTrainedModel):
631
+ """
632
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
633
+ models.
634
+ """
635
+
636
+ config_class = BertConfig
637
+ base_model_prefix = 'bert'
638
+ _keys_to_ignore_on_load_missing = [r'position_ids']
639
+
640
+ def _init_weights(self, module):
641
+ """Initialize the weights"""
642
+ if isinstance(module, (nn.Linear, nn.Embedding)):
643
+ # Slightly different from the TF version which uses truncated_normal for initialization
644
+ # cf https://github.com/pytorch/pytorch/pull/5617
645
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
646
+ elif isinstance(module, nn.LayerNorm):
647
+ module.bias.data.zero_()
648
+ module.weight.data.fill_(1.0)
649
+ if isinstance(module, nn.Linear) and module.bias is not None:
650
+ module.bias.data.zero_()
651
+
652
+
653
+ class BertModel(BertPreTrainedModel):
654
+ """
655
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
656
+ cross-attention is added between the self-attention layers, following the architecture described in `Attention is
657
+ all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
658
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
659
+ argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
660
+ input to the forward pass.
661
+ """
662
+
663
+ def __init__(self, config, add_pooling_layer=True):
664
+ super().__init__(config)
665
+ self.config = config
666
+
667
+ self.embeddings = BertEmbeddings(config)
668
+
669
+ self.encoder = BertEncoder(config)
670
+
671
+ self.pooler = BertPooler(config) if add_pooling_layer else None
672
+
673
+ self.init_weights()
674
+
675
+ def get_input_embeddings(self):
676
+ return self.embeddings.word_embeddings
677
+
678
+ def set_input_embeddings(self, value):
679
+ self.embeddings.word_embeddings = value
680
+
681
+ def _prune_heads(self, heads_to_prune):
682
+ """
683
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
684
+ class PreTrainedModel
685
+ """
686
+ for layer, heads in heads_to_prune.items():
687
+ self.encoder.layer[layer].attention.prune_heads(heads)
688
+
689
+ def get_extended_attention_mask(
690
+ self,
691
+ attention_mask: Tensor,
692
+ input_shape: Tuple[int],
693
+ device: device,
694
+ is_decoder: bool,
695
+ ) -> Tensor:
696
+ """
697
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
698
+
699
+ Arguments:
700
+ attention_mask (:obj:`torch.Tensor`):
701
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
702
+ input_shape (:obj:`Tuple[int]`):
703
+ The shape of the input to the model.
704
+ device: (:obj:`torch.device`):
705
+ The device of the input to the model.
706
+
707
+ Returns:
708
+ :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
709
+ """
710
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
711
+ # ourselves in which case we just need to make it broadcastable to all heads.
712
+ if attention_mask.dim() == 3:
713
+ extended_attention_mask = attention_mask[:, None, :, :]
714
+ elif attention_mask.dim() == 2:
715
+ # Provided a padding mask of dimensions [batch_size, seq_length]
716
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
717
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
718
+ if is_decoder:
719
+ batch_size, seq_length = input_shape
720
+
721
+ seq_ids = torch.arange(seq_length, device=device)
722
+ causal_mask = (seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None])
723
+ # in case past_key_values are used we need to add a prefix ones mask to the causal mask
724
+ # causal and attention masks must have same type with pytorch version < 1.3
725
+ causal_mask = causal_mask.to(attention_mask.dtype)
726
+
727
+ if causal_mask.shape[1] < attention_mask.shape[1]:
728
+ prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
729
+ causal_mask = torch.cat(
730
+ [
731
+ torch.ones(
732
+ (batch_size, seq_length, prefix_seq_len),
733
+ device=device,
734
+ dtype=causal_mask.dtype,
735
+ ),
736
+ causal_mask,
737
+ ],
738
+ axis=-1,
739
+ )
740
+
741
+ extended_attention_mask = (causal_mask[:, None, :, :] * attention_mask[:, None, None, :])
742
+ else:
743
+ extended_attention_mask = attention_mask[:, None, None, :]
744
+ else:
745
+ raise ValueError(
746
+ 'Wrong shape for input_ids (shape {}) or attention_mask (shape {})'.format(
747
+ input_shape, attention_mask.shape
748
+ )
749
+ )
750
+
751
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
752
+ # masked positions, this operation will create a tensor which is 0.0 for
753
+ # positions we want to attend and -10000.0 for masked positions.
754
+ # Since we are adding it to the raw scores before the softmax, this is
755
+ # effectively the same as removing these entirely.
756
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
757
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
758
+ return extended_attention_mask
759
+
760
+ def forward(
761
+ self,
762
+ input_ids=None,
763
+ attention_mask=None,
764
+ token_type_ids=None,
765
+ position_ids=None,
766
+ head_mask=None,
767
+ inputs_embeds=None,
768
+ encoder_embeds=None,
769
+ encoder_hidden_states=None,
770
+ encoder_attention_mask=None,
771
+ past_key_values=None,
772
+ use_cache=None,
773
+ output_attentions=None,
774
+ output_hidden_states=None,
775
+ return_dict=None,
776
+ is_decoder=False,
777
+ mode='multimodal',
778
+ ):
779
+ r"""
780
+ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
781
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
782
+ the model is configured as a decoder.
783
+ encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
784
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
785
+ the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
786
+ - 1 for tokens that are **not masked**,
787
+ - 0 for tokens that are **masked**.
788
+ past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
789
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
790
+ If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
791
+ (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
792
+ instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
793
+ use_cache (:obj:`bool`, `optional`):
794
+ If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
795
+ decoding (see :obj:`past_key_values`).
796
+ """
797
+ output_attentions = (output_attentions if output_attentions is not None else self.config.output_attentions)
798
+ output_hidden_states = (
799
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
800
+ )
801
+ return_dict = (return_dict if return_dict is not None else self.config.use_return_dict)
802
+
803
+ if is_decoder:
804
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
805
+ else:
806
+ use_cache = False
807
+
808
+ if input_ids is not None and inputs_embeds is not None:
809
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
810
+ elif input_ids is not None:
811
+ input_shape = input_ids.size()
812
+ batch_size, seq_length = input_shape
813
+ device = input_ids.device
814
+ elif inputs_embeds is not None:
815
+ input_shape = inputs_embeds.size()[:-1]
816
+ batch_size, seq_length = input_shape
817
+ device = inputs_embeds.device
818
+ elif encoder_embeds is not None:
819
+ input_shape = encoder_embeds.size()[:-1]
820
+ batch_size, seq_length = input_shape
821
+ device = encoder_embeds.device
822
+ else:
823
+ raise ValueError('You have to specify either input_ids or inputs_embeds or encoder_embeds')
824
+
825
+ # past_key_values_length
826
+ past_key_values_length = (past_key_values[0][0].shape[2] if past_key_values is not None else 0)
827
+
828
+ if attention_mask is None:
829
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
830
+
831
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
832
+ # ourselves in which case we just need to make it broadcastable to all heads.
833
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
834
+ attention_mask, input_shape, device, is_decoder
835
+ )
836
+
837
+ # If a 2D or 3D attention mask is provided for the cross-attention
838
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
839
+ if encoder_hidden_states is not None:
840
+ if type(encoder_hidden_states) == list:
841
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
842
+ else:
843
+ (
844
+ encoder_batch_size,
845
+ encoder_sequence_length,
846
+ _,
847
+ ) = encoder_hidden_states.size()
848
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
849
+
850
+ if type(encoder_attention_mask) == list:
851
+ encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
852
+ elif encoder_attention_mask is None:
853
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
854
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
855
+ else:
856
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
857
+ else:
858
+ encoder_extended_attention_mask = None
859
+
860
+ # Prepare head mask if needed
861
+ # 1.0 in head_mask indicate we keep the head
862
+ # attention_probs has shape bsz x n_heads x N x N
863
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
864
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
865
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
866
+
867
+ if encoder_embeds is None:
868
+ embedding_output = self.embeddings(
869
+ input_ids=input_ids,
870
+ position_ids=position_ids,
871
+ token_type_ids=token_type_ids,
872
+ inputs_embeds=inputs_embeds,
873
+ past_key_values_length=past_key_values_length,
874
+ )
875
+ else:
876
+ embedding_output = encoder_embeds
877
+
878
+ encoder_outputs = self.encoder(
879
+ embedding_output,
880
+ attention_mask=extended_attention_mask,
881
+ head_mask=head_mask,
882
+ encoder_hidden_states=encoder_hidden_states,
883
+ encoder_attention_mask=encoder_extended_attention_mask,
884
+ past_key_values=past_key_values,
885
+ use_cache=use_cache,
886
+ output_attentions=output_attentions,
887
+ output_hidden_states=output_hidden_states,
888
+ return_dict=return_dict,
889
+ mode=mode,
890
+ )
891
+ sequence_output = encoder_outputs[0]
892
+ pooled_output = (self.pooler(sequence_output) if self.pooler is not None else None)
893
+
894
+ if not return_dict:
895
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
896
+
897
+ return BaseModelOutputWithPoolingAndCrossAttentions(
898
+ last_hidden_state=sequence_output,
899
+ pooler_output=pooled_output,
900
+ past_key_values=encoder_outputs.past_key_values,
901
+ hidden_states=encoder_outputs.hidden_states,
902
+ attentions=encoder_outputs.attentions,
903
+ cross_attentions=encoder_outputs.cross_attentions,
904
+ )
905
+
906
+
907
+ class BertForMaskedLM(BertPreTrainedModel):
908
+
909
+ _keys_to_ignore_on_load_unexpected = [r'pooler']
910
+ _keys_to_ignore_on_load_missing = [r'position_ids', r'predictions.decoder.bias']
911
+
912
+ def __init__(self, config):
913
+ super().__init__(config)
914
+
915
+ self.bert = BertModel(config, add_pooling_layer=False)
916
+ self.cls = BertOnlyMLMHead(config)
917
+
918
+ self.init_weights()
919
+
920
+ def get_output_embeddings(self):
921
+ return self.cls.predictions.decoder
922
+
923
+ def set_output_embeddings(self, new_embeddings):
924
+ self.cls.predictions.decoder = new_embeddings
925
+
926
+ def forward(
927
+ self,
928
+ input_ids=None,
929
+ attention_mask=None,
930
+ # token_type_ids=None,
931
+ position_ids=None,
932
+ head_mask=None,
933
+ inputs_embeds=None,
934
+ encoder_embeds=None,
935
+ encoder_hidden_states=None,
936
+ encoder_attention_mask=None,
937
+ labels=None,
938
+ output_attentions=None,
939
+ output_hidden_states=None,
940
+ return_dict=None,
941
+ is_decoder=False,
942
+ mode='multimodal',
943
+ soft_labels=None,
944
+ alpha=0,
945
+ return_logits=False,
946
+ ):
947
+ r"""
948
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
949
+ Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
950
+ config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
951
+ (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
952
+ """
953
+
954
+ return_dict = (return_dict if return_dict is not None else self.config.use_return_dict)
955
+
956
+ outputs = self.bert(
957
+ input_ids,
958
+ attention_mask=attention_mask,
959
+ # token_type_ids=token_type_ids,
960
+ position_ids=position_ids,
961
+ head_mask=head_mask,
962
+ inputs_embeds=inputs_embeds,
963
+ encoder_embeds=encoder_embeds,
964
+ encoder_hidden_states=encoder_hidden_states,
965
+ encoder_attention_mask=encoder_attention_mask,
966
+ output_attentions=output_attentions,
967
+ output_hidden_states=output_hidden_states,
968
+ return_dict=return_dict,
969
+ is_decoder=is_decoder,
970
+ mode=mode,
971
+ )
972
+
973
+ sequence_output = outputs[0]
974
+ prediction_scores = self.cls(sequence_output)
975
+
976
+ if return_logits:
977
+ return prediction_scores
978
+
979
+ masked_lm_loss = None
980
+ if labels is not None:
981
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
982
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
983
+
984
+ if soft_labels is not None:
985
+ loss_distill = -torch.sum(F.log_softmax(prediction_scores, dim=-1) * soft_labels, dim=-1)
986
+ loss_distill = loss_distill[labels != -100].mean()
987
+ masked_lm_loss = (1 - alpha) * masked_lm_loss + alpha * loss_distill
988
+
989
+ if not return_dict:
990
+ output = (prediction_scores, ) + outputs[2:]
991
+ return (((masked_lm_loss, ) + output) if masked_lm_loss is not None else output)
992
+
993
+ return MaskedLMOutput(
994
+ loss=masked_lm_loss,
995
+ logits=prediction_scores,
996
+ hidden_states=outputs.hidden_states,
997
+ attentions=outputs.attentions,
998
+ )
999
+
1000
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
1001
+ input_shape = input_ids.shape
1002
+ effective_batch_size = input_shape[0]
1003
+
1004
+ # add a dummy token
1005
+ assert (self.config.pad_token_id is not None), 'The PAD token should be defined for generation'
1006
+ attention_mask = torch.cat(
1007
+ [attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))],
1008
+ dim=-1,
1009
+ )
1010
+ dummy_token = torch.full(
1011
+ (effective_batch_size, 1),
1012
+ self.config.pad_token_id,
1013
+ dtype=torch.long,
1014
+ device=input_ids.device,
1015
+ )
1016
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
1017
+
1018
+ return {'input_ids': input_ids, 'attention_mask': attention_mask}
1019
+
1020
+
1021
+ class BertLMHeadModel(BertPreTrainedModel):
1022
+
1023
+ _keys_to_ignore_on_load_unexpected = [r'pooler']
1024
+ _keys_to_ignore_on_load_missing = [r'position_ids', r'predictions.decoder.bias']
1025
+
1026
+ def __init__(self, config):
1027
+ super().__init__(config)
1028
+
1029
+ self.bert = BertModel(config, add_pooling_layer=False)
1030
+ self.cls = BertOnlyMLMHead(config)
1031
+
1032
+ self.init_weights()
1033
+
1034
+ def get_output_embeddings(self):
1035
+ return self.cls.predictions.decoder
1036
+
1037
+ def set_output_embeddings(self, new_embeddings):
1038
+ self.cls.predictions.decoder = new_embeddings
1039
+
1040
+ def forward(
1041
+ self,
1042
+ input_ids=None,
1043
+ attention_mask=None,
1044
+ position_ids=None,
1045
+ head_mask=None,
1046
+ inputs_embeds=None,
1047
+ encoder_hidden_states=None,
1048
+ encoder_attention_mask=None,
1049
+ labels=None,
1050
+ past_key_values=None,
1051
+ use_cache=None,
1052
+ output_attentions=None,
1053
+ output_hidden_states=None,
1054
+ return_dict=None,
1055
+ return_logits=False,
1056
+ is_decoder=True,
1057
+ reduction='mean',
1058
+ mode='multimodal',
1059
+ soft_labels=None,
1060
+ alpha=0,
1061
+ ):
1062
+ r"""
1063
+ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
1064
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1065
+ the model is configured as a decoder.
1066
+ encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
1067
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1068
+ the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
1069
+ - 1 for tokens that are **not masked**,
1070
+ - 0 for tokens that are **masked**.
1071
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
1072
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1073
+ ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
1074
+ ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
1075
+ past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1076
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1077
+ If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
1078
+ (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
1079
+ instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
1080
+ use_cache (:obj:`bool`, `optional`):
1081
+ If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
1082
+ decoding (see :obj:`past_key_values`).
1083
+ Returns:
1084
+ Example::
1085
+ >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
1086
+ >>> import torch
1087
+ >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
1088
+ >>> config = BertConfig.from_pretrained("bert-base-cased")
1089
+ >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
1090
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1091
+ >>> outputs = model(**inputs)
1092
+ >>> prediction_logits = outputs.logits
1093
+ """
1094
+ return_dict = (return_dict if return_dict is not None else self.config.use_return_dict)
1095
+ if labels is not None:
1096
+ use_cache = False
1097
+
1098
+ outputs = self.bert(
1099
+ input_ids,
1100
+ attention_mask=attention_mask,
1101
+ position_ids=position_ids,
1102
+ head_mask=head_mask,
1103
+ inputs_embeds=inputs_embeds,
1104
+ encoder_hidden_states=encoder_hidden_states,
1105
+ encoder_attention_mask=encoder_attention_mask,
1106
+ past_key_values=past_key_values,
1107
+ use_cache=use_cache,
1108
+ output_attentions=output_attentions,
1109
+ output_hidden_states=output_hidden_states,
1110
+ return_dict=return_dict,
1111
+ is_decoder=is_decoder,
1112
+ mode=mode,
1113
+ )
1114
+
1115
+ sequence_output = outputs[0]
1116
+ prediction_scores = self.cls(sequence_output)
1117
+
1118
+ if return_logits:
1119
+ return prediction_scores[:, :-1, :].contiguous()
1120
+
1121
+ lm_loss = None
1122
+ if labels is not None:
1123
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1124
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1125
+ labels = labels[:, 1:].contiguous()
1126
+ loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
1127
+ lm_loss = loss_fct(
1128
+ shifted_prediction_scores.view(-1, self.config.vocab_size),
1129
+ labels.view(-1),
1130
+ )
1131
+ if reduction == 'none':
1132
+ lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)
1133
+
1134
+ if soft_labels is not None:
1135
+ loss_distill = -torch.sum(F.log_softmax(shifted_prediction_scores, dim=-1) * soft_labels, dim=-1)
1136
+ loss_distill = (loss_distill * (labels != -100)).sum(1)
1137
+ lm_loss = (1 - alpha) * lm_loss + alpha * loss_distill
1138
+
1139
+ if not return_dict:
1140
+ output = (prediction_scores, ) + outputs[2:]
1141
+ return ((lm_loss, ) + output) if lm_loss is not None else output
1142
+
1143
+ return CausalLMOutputWithCrossAttentions(
1144
+ loss=lm_loss,
1145
+ logits=prediction_scores,
1146
+ past_key_values=outputs.past_key_values,
1147
+ hidden_states=outputs.hidden_states,
1148
+ attentions=outputs.attentions,
1149
+ cross_attentions=outputs.cross_attentions,
1150
+ )
1151
+
1152
+ def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
1153
+ input_shape = input_ids.shape
1154
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1155
+ if attention_mask is None:
1156
+ attention_mask = input_ids.new_ones(input_shape)
1157
+
1158
+ # cut decoder_input_ids if past is used
1159
+ if past is not None:
1160
+ input_ids = input_ids[:, -1:]
1161
+
1162
+ return {
1163
+ 'input_ids': input_ids,
1164
+ 'attention_mask': attention_mask,
1165
+ 'past_key_values': past,
1166
+ 'encoder_hidden_states': model_kwargs.get('encoder_hidden_states', None),
1167
+ 'encoder_attention_mask': model_kwargs.get('encoder_attention_mask', None),
1168
+ 'is_decoder': True,
1169
+ }
1170
+
1171
+ def _reorder_cache(self, past, beam_idx):
1172
+ reordered_past = ()
1173
+ for layer_past in past:
1174
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past), )
1175
+ return reordered_past
1176
+
1177
+
1178
+ class XBertLMHeadDecoder(BertLMHeadModel):
1179
+ """
1180
+ This class decouples the decoder forward logic from the VL model.
1181
+ In this way, different VL models can share this decoder as long as
1182
+ they feed encoder_embeds as required.
1183
+ """
1184
+
1185
+ @classmethod
1186
+ def from_config(cls, cfg, from_pretrained=False):
1187
+
1188
+ med_config_path = get_abs_path(cfg.get('med_config_path'))
1189
+ med_config = BertConfig.from_json_file(med_config_path)
1190
+
1191
+ if from_pretrained:
1192
+ return cls.from_pretrained('bert-base-uncased', config=med_config)
1193
+ else:
1194
+ return cls(config=med_config)
1195
+
1196
+ def generate_from_encoder(
1197
+ self,
1198
+ tokenized_prompt,
1199
+ visual_embeds,
1200
+ sep_token_id,
1201
+ pad_token_id,
1202
+ use_nucleus_sampling=False,
1203
+ num_beams=3,
1204
+ max_length=30,
1205
+ min_length=10,
1206
+ top_p=0.9,
1207
+ repetition_penalty=1.0,
1208
+ **kwargs
1209
+ ):
1210
+
1211
+ if not use_nucleus_sampling:
1212
+ num_beams = num_beams
1213
+ visual_embeds = visual_embeds.repeat_interleave(num_beams, dim=0)
1214
+
1215
+ image_atts = torch.ones(visual_embeds.size()[:-1], dtype=torch.long).to(self.device)
1216
+
1217
+ model_kwargs = {
1218
+ 'encoder_hidden_states': visual_embeds,
1219
+ 'encoder_attention_mask': image_atts,
1220
+ }
1221
+
1222
+ if use_nucleus_sampling:
1223
+ # nucleus sampling
1224
+ outputs = self.generate(
1225
+ input_ids=tokenized_prompt.input_ids,
1226
+ max_length=max_length,
1227
+ min_length=min_length,
1228
+ do_sample=True,
1229
+ top_p=top_p,
1230
+ num_return_sequences=1,
1231
+ eos_token_id=sep_token_id,
1232
+ pad_token_id=pad_token_id,
1233
+ repetition_penalty=1.1,
1234
+ **model_kwargs
1235
+ )
1236
+ else:
1237
+ # beam search
1238
+ outputs = self.generate(
1239
+ input_ids=tokenized_prompt.input_ids,
1240
+ max_length=max_length,
1241
+ min_length=min_length,
1242
+ num_beams=num_beams,
1243
+ eos_token_id=sep_token_id,
1244
+ pad_token_id=pad_token_id,
1245
+ repetition_penalty=repetition_penalty,
1246
+ **model_kwargs
1247
+ )
1248
+
1249
+ return outputs
1250
+
1251
+
1252
+ class XBertEncoder(BertModel, BaseEncoder):
1253
+
1254
+ @classmethod
1255
+ def from_config(cls, cfg, from_pretrained=False):
1256
+
1257
+ med_config_path = get_abs_path(cfg.get('med_config_path'))
1258
+ med_config = BertConfig.from_json_file(med_config_path)
1259
+
1260
+ if from_pretrained:
1261
+ return cls.from_pretrained('bert-base-uncased', config=med_config, add_pooling_layer=False)
1262
+ else:
1263
+ return cls(config=med_config, add_pooling_layer=False)
1264
+
1265
+ def forward_automask(self, tokenized_text, visual_embeds, **kwargs):
1266
+ image_atts = torch.ones(visual_embeds.size()[:-1], dtype=torch.long).to(self.device)
1267
+
1268
+ text = tokenized_text
1269
+ text_output = super().forward(
1270
+ text.input_ids,
1271
+ attention_mask=text.attention_mask,
1272
+ encoder_hidden_states=visual_embeds,
1273
+ encoder_attention_mask=image_atts,
1274
+ return_dict=True,
1275
+ )
1276
+
1277
+ return text_output
1278
+
1279
+ def forward_text(self, tokenized_text, **kwargs):
1280
+ text = tokenized_text
1281
+ token_type_ids = kwargs.get('token_type_ids', None)
1282
+
1283
+ text_output = super().forward(
1284
+ text.input_ids,
1285
+ attention_mask=text.attention_mask,
1286
+ token_type_ids=token_type_ids,
1287
+ return_dict=True,
1288
+ mode='text',
1289
+ )
1290
+
1291
+ return text_output