transformers 5.0.0__py3-none-any.whl → 5.0.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1606) hide show
  1. transformers/__init__.py +36 -55
  2. transformers/activations.py +1 -1
  3. transformers/audio_utils.py +33 -32
  4. transformers/cache_utils.py +139 -32
  5. transformers/cli/chat.py +3 -3
  6. transformers/cli/serve.py +19 -49
  7. transformers/cli/transformers.py +1 -2
  8. transformers/configuration_utils.py +155 -129
  9. transformers/conversion_mapping.py +22 -158
  10. transformers/convert_slow_tokenizer.py +17 -227
  11. transformers/core_model_loading.py +185 -528
  12. transformers/data/data_collator.py +4 -12
  13. transformers/data/processors/glue.py +1 -0
  14. transformers/data/processors/utils.py +1 -0
  15. transformers/data/processors/xnli.py +1 -0
  16. transformers/dependency_versions_check.py +1 -0
  17. transformers/dependency_versions_table.py +7 -5
  18. transformers/distributed/configuration_utils.py +2 -1
  19. transformers/dynamic_module_utils.py +25 -24
  20. transformers/feature_extraction_sequence_utils.py +23 -19
  21. transformers/feature_extraction_utils.py +33 -64
  22. transformers/file_utils.py +1 -0
  23. transformers/generation/__init__.py +1 -11
  24. transformers/generation/candidate_generator.py +33 -80
  25. transformers/generation/configuration_utils.py +133 -189
  26. transformers/generation/continuous_batching/__init__.py +1 -4
  27. transformers/generation/continuous_batching/cache.py +25 -83
  28. transformers/generation/continuous_batching/cache_manager.py +45 -155
  29. transformers/generation/continuous_batching/continuous_api.py +147 -270
  30. transformers/generation/continuous_batching/requests.py +3 -51
  31. transformers/generation/continuous_batching/scheduler.py +105 -160
  32. transformers/generation/logits_process.py +128 -0
  33. transformers/generation/stopping_criteria.py +1 -1
  34. transformers/generation/streamers.py +1 -0
  35. transformers/generation/utils.py +123 -122
  36. transformers/generation/watermarking.py +6 -8
  37. transformers/hf_argparser.py +13 -9
  38. transformers/hyperparameter_search.py +2 -1
  39. transformers/image_processing_base.py +23 -12
  40. transformers/image_processing_utils.py +15 -11
  41. transformers/image_processing_utils_fast.py +75 -85
  42. transformers/image_transforms.py +42 -73
  43. transformers/image_utils.py +32 -30
  44. transformers/initialization.py +0 -37
  45. transformers/integrations/__init__.py +2 -16
  46. transformers/integrations/accelerate.py +113 -58
  47. transformers/integrations/aqlm.py +66 -36
  48. transformers/integrations/awq.py +516 -45
  49. transformers/integrations/bitnet.py +105 -47
  50. transformers/integrations/bitsandbytes.py +202 -91
  51. transformers/integrations/deepspeed.py +4 -161
  52. transformers/integrations/eetq.py +82 -84
  53. transformers/integrations/executorch.py +1 -1
  54. transformers/integrations/fbgemm_fp8.py +145 -190
  55. transformers/integrations/finegrained_fp8.py +215 -249
  56. transformers/integrations/flash_attention.py +3 -3
  57. transformers/integrations/flex_attention.py +1 -1
  58. transformers/integrations/fp_quant.py +0 -90
  59. transformers/integrations/ggml.py +2 -11
  60. transformers/integrations/higgs.py +62 -37
  61. transformers/integrations/hub_kernels.py +8 -65
  62. transformers/integrations/integration_utils.py +3 -47
  63. transformers/integrations/mistral.py +0 -12
  64. transformers/integrations/mxfp4.py +80 -33
  65. transformers/integrations/peft.py +191 -483
  66. transformers/integrations/quanto.py +56 -77
  67. transformers/integrations/spqr.py +90 -42
  68. transformers/integrations/tensor_parallel.py +221 -167
  69. transformers/integrations/torchao.py +43 -35
  70. transformers/integrations/vptq.py +59 -40
  71. transformers/kernels/__init__.py +0 -0
  72. transformers/{models/pe_audio_video/processing_pe_audio_video.py → kernels/falcon_mamba/__init__.py} +3 -12
  73. transformers/kernels/falcon_mamba/selective_scan_with_ln_interface.py +529 -0
  74. transformers/loss/loss_utils.py +0 -2
  75. transformers/masking_utils.py +55 -51
  76. transformers/model_debugging_utils.py +5 -4
  77. transformers/modelcard.py +194 -15
  78. transformers/modeling_attn_mask_utils.py +19 -19
  79. transformers/modeling_flash_attention_utils.py +27 -27
  80. transformers/modeling_gguf_pytorch_utils.py +24 -79
  81. transformers/modeling_layers.py +22 -21
  82. transformers/modeling_outputs.py +253 -242
  83. transformers/modeling_rope_utils.py +117 -138
  84. transformers/modeling_utils.py +739 -850
  85. transformers/models/__init__.py +0 -27
  86. transformers/models/afmoe/configuration_afmoe.py +33 -40
  87. transformers/models/afmoe/modeling_afmoe.py +54 -42
  88. transformers/models/afmoe/modular_afmoe.py +33 -23
  89. transformers/models/aimv2/configuration_aimv2.py +10 -2
  90. transformers/models/aimv2/modeling_aimv2.py +42 -47
  91. transformers/models/aimv2/modular_aimv2.py +19 -17
  92. transformers/models/albert/configuration_albert.py +2 -8
  93. transformers/models/albert/modeling_albert.py +69 -70
  94. transformers/models/albert/tokenization_albert.py +14 -5
  95. transformers/models/align/configuration_align.py +6 -8
  96. transformers/models/align/modeling_align.py +89 -94
  97. transformers/models/align/processing_align.py +30 -2
  98. transformers/models/altclip/configuration_altclip.py +7 -4
  99. transformers/models/altclip/modeling_altclip.py +103 -114
  100. transformers/models/altclip/processing_altclip.py +15 -2
  101. transformers/models/apertus/__init__.py +1 -0
  102. transformers/models/apertus/configuration_apertus.py +28 -23
  103. transformers/models/apertus/modeling_apertus.py +40 -39
  104. transformers/models/apertus/modular_apertus.py +38 -37
  105. transformers/models/arcee/configuration_arcee.py +30 -25
  106. transformers/models/arcee/modeling_arcee.py +39 -36
  107. transformers/models/arcee/modular_arcee.py +23 -20
  108. transformers/models/aria/configuration_aria.py +44 -31
  109. transformers/models/aria/image_processing_aria.py +27 -25
  110. transformers/models/aria/modeling_aria.py +106 -110
  111. transformers/models/aria/modular_aria.py +127 -118
  112. transformers/models/aria/processing_aria.py +35 -28
  113. transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +1 -0
  114. transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py +6 -3
  115. transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +8 -6
  116. transformers/models/audioflamingo3/__init__.py +1 -0
  117. transformers/models/audioflamingo3/configuration_audioflamingo3.py +1 -0
  118. transformers/models/audioflamingo3/modeling_audioflamingo3.py +49 -58
  119. transformers/models/audioflamingo3/modular_audioflamingo3.py +43 -53
  120. transformers/models/audioflamingo3/processing_audioflamingo3.py +30 -33
  121. transformers/models/auto/auto_factory.py +7 -6
  122. transformers/models/auto/configuration_auto.py +5 -66
  123. transformers/models/auto/feature_extraction_auto.py +10 -14
  124. transformers/models/auto/image_processing_auto.py +41 -32
  125. transformers/models/auto/modeling_auto.py +188 -46
  126. transformers/models/auto/processing_auto.py +11 -24
  127. transformers/models/auto/tokenization_auto.py +588 -171
  128. transformers/models/auto/video_processing_auto.py +10 -12
  129. transformers/models/autoformer/configuration_autoformer.py +7 -4
  130. transformers/models/autoformer/modeling_autoformer.py +101 -104
  131. transformers/models/aya_vision/configuration_aya_vision.py +1 -4
  132. transformers/models/aya_vision/modeling_aya_vision.py +102 -71
  133. transformers/models/aya_vision/modular_aya_vision.py +74 -46
  134. transformers/models/aya_vision/processing_aya_vision.py +53 -25
  135. transformers/models/bamba/configuration_bamba.py +39 -34
  136. transformers/models/bamba/modeling_bamba.py +86 -82
  137. transformers/models/bamba/modular_bamba.py +72 -70
  138. transformers/models/bark/configuration_bark.py +8 -6
  139. transformers/models/bark/generation_configuration_bark.py +5 -3
  140. transformers/models/bark/modeling_bark.py +57 -54
  141. transformers/models/bark/processing_bark.py +41 -19
  142. transformers/models/bart/configuration_bart.py +6 -9
  143. transformers/models/bart/modeling_bart.py +126 -135
  144. transformers/models/barthez/tokenization_barthez.py +11 -3
  145. transformers/models/bartpho/tokenization_bartpho.py +7 -6
  146. transformers/models/beit/configuration_beit.py +11 -0
  147. transformers/models/beit/image_processing_beit.py +56 -53
  148. transformers/models/beit/image_processing_beit_fast.py +12 -10
  149. transformers/models/beit/modeling_beit.py +60 -69
  150. transformers/models/bert/configuration_bert.py +2 -12
  151. transformers/models/bert/modeling_bert.py +122 -114
  152. transformers/models/bert/tokenization_bert.py +23 -8
  153. transformers/models/bert/tokenization_bert_legacy.py +5 -3
  154. transformers/models/bert_generation/configuration_bert_generation.py +2 -17
  155. transformers/models/bert_generation/modeling_bert_generation.py +49 -49
  156. transformers/models/bert_generation/tokenization_bert_generation.py +3 -2
  157. transformers/models/bert_japanese/tokenization_bert_japanese.py +6 -5
  158. transformers/models/bertweet/tokenization_bertweet.py +3 -1
  159. transformers/models/big_bird/configuration_big_bird.py +9 -12
  160. transformers/models/big_bird/modeling_big_bird.py +109 -116
  161. transformers/models/big_bird/tokenization_big_bird.py +43 -16
  162. transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py +9 -9
  163. transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +117 -130
  164. transformers/models/biogpt/configuration_biogpt.py +2 -8
  165. transformers/models/biogpt/modeling_biogpt.py +76 -72
  166. transformers/models/biogpt/modular_biogpt.py +66 -62
  167. transformers/models/biogpt/tokenization_biogpt.py +5 -3
  168. transformers/models/bit/configuration_bit.py +1 -0
  169. transformers/models/bit/image_processing_bit.py +24 -21
  170. transformers/models/bit/image_processing_bit_fast.py +1 -0
  171. transformers/models/bit/modeling_bit.py +12 -25
  172. transformers/models/bitnet/configuration_bitnet.py +28 -23
  173. transformers/models/bitnet/modeling_bitnet.py +39 -36
  174. transformers/models/bitnet/modular_bitnet.py +6 -4
  175. transformers/models/blenderbot/configuration_blenderbot.py +5 -8
  176. transformers/models/blenderbot/modeling_blenderbot.py +96 -77
  177. transformers/models/blenderbot/tokenization_blenderbot.py +24 -18
  178. transformers/models/blenderbot_small/configuration_blenderbot_small.py +5 -8
  179. transformers/models/blenderbot_small/modeling_blenderbot_small.py +69 -79
  180. transformers/models/blenderbot_small/tokenization_blenderbot_small.py +3 -1
  181. transformers/models/blip/configuration_blip.py +10 -9
  182. transformers/models/blip/image_processing_blip.py +20 -17
  183. transformers/models/blip/image_processing_blip_fast.py +1 -0
  184. transformers/models/blip/modeling_blip.py +108 -117
  185. transformers/models/blip/modeling_blip_text.py +65 -73
  186. transformers/models/blip/processing_blip.py +36 -5
  187. transformers/models/blip_2/configuration_blip_2.py +2 -2
  188. transformers/models/blip_2/modeling_blip_2.py +118 -146
  189. transformers/models/blip_2/processing_blip_2.py +38 -8
  190. transformers/models/bloom/configuration_bloom.py +2 -5
  191. transformers/models/bloom/modeling_bloom.py +104 -77
  192. transformers/models/blt/configuration_blt.py +86 -94
  193. transformers/models/blt/modeling_blt.py +81 -238
  194. transformers/models/blt/modular_blt.py +65 -228
  195. transformers/models/bridgetower/configuration_bridgetower.py +2 -7
  196. transformers/models/bridgetower/image_processing_bridgetower.py +35 -34
  197. transformers/models/bridgetower/image_processing_bridgetower_fast.py +16 -13
  198. transformers/models/bridgetower/modeling_bridgetower.py +119 -141
  199. transformers/models/bridgetower/processing_bridgetower.py +16 -2
  200. transformers/models/bros/configuration_bros.py +18 -24
  201. transformers/models/bros/modeling_bros.py +80 -90
  202. transformers/models/bros/processing_bros.py +12 -2
  203. transformers/models/byt5/tokenization_byt5.py +6 -4
  204. transformers/models/camembert/configuration_camembert.py +2 -8
  205. transformers/models/camembert/modeling_camembert.py +195 -196
  206. transformers/models/camembert/modular_camembert.py +54 -51
  207. transformers/models/camembert/tokenization_camembert.py +13 -6
  208. transformers/models/canine/configuration_canine.py +2 -4
  209. transformers/models/canine/modeling_canine.py +75 -84
  210. transformers/models/canine/tokenization_canine.py +1 -2
  211. transformers/models/chameleon/configuration_chameleon.py +34 -29
  212. transformers/models/chameleon/image_processing_chameleon.py +24 -21
  213. transformers/models/chameleon/image_processing_chameleon_fast.py +6 -5
  214. transformers/models/chameleon/modeling_chameleon.py +93 -142
  215. transformers/models/chameleon/processing_chameleon.py +41 -16
  216. transformers/models/chinese_clip/configuration_chinese_clip.py +8 -10
  217. transformers/models/chinese_clip/image_processing_chinese_clip.py +24 -21
  218. transformers/models/chinese_clip/image_processing_chinese_clip_fast.py +1 -0
  219. transformers/models/chinese_clip/modeling_chinese_clip.py +92 -96
  220. transformers/models/chinese_clip/processing_chinese_clip.py +15 -2
  221. transformers/models/clap/configuration_clap.py +9 -4
  222. transformers/models/clap/feature_extraction_clap.py +12 -11
  223. transformers/models/clap/modeling_clap.py +123 -136
  224. transformers/models/clap/processing_clap.py +15 -2
  225. transformers/models/clip/configuration_clip.py +2 -4
  226. transformers/models/clip/image_processing_clip.py +24 -21
  227. transformers/models/clip/image_processing_clip_fast.py +1 -9
  228. transformers/models/clip/modeling_clip.py +65 -65
  229. transformers/models/clip/processing_clip.py +14 -2
  230. transformers/models/clip/tokenization_clip.py +46 -21
  231. transformers/models/clipseg/configuration_clipseg.py +2 -4
  232. transformers/models/clipseg/modeling_clipseg.py +109 -119
  233. transformers/models/clipseg/processing_clipseg.py +42 -19
  234. transformers/models/clvp/configuration_clvp.py +5 -15
  235. transformers/models/clvp/feature_extraction_clvp.py +10 -7
  236. transformers/models/clvp/modeling_clvp.py +146 -155
  237. transformers/models/clvp/number_normalizer.py +2 -1
  238. transformers/models/clvp/processing_clvp.py +20 -3
  239. transformers/models/clvp/tokenization_clvp.py +64 -1
  240. transformers/models/code_llama/tokenization_code_llama.py +44 -18
  241. transformers/models/codegen/configuration_codegen.py +4 -4
  242. transformers/models/codegen/modeling_codegen.py +53 -63
  243. transformers/models/codegen/tokenization_codegen.py +47 -17
  244. transformers/models/cohere/configuration_cohere.py +30 -25
  245. transformers/models/cohere/modeling_cohere.py +42 -40
  246. transformers/models/cohere/modular_cohere.py +29 -26
  247. transformers/models/cohere/tokenization_cohere.py +46 -15
  248. transformers/models/cohere2/configuration_cohere2.py +32 -31
  249. transformers/models/cohere2/modeling_cohere2.py +44 -42
  250. transformers/models/cohere2/modular_cohere2.py +54 -54
  251. transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py +14 -13
  252. transformers/models/cohere2_vision/modeling_cohere2_vision.py +58 -59
  253. transformers/models/cohere2_vision/modular_cohere2_vision.py +46 -45
  254. transformers/models/cohere2_vision/processing_cohere2_vision.py +36 -6
  255. transformers/models/colpali/configuration_colpali.py +1 -0
  256. transformers/models/colpali/modeling_colpali.py +16 -14
  257. transformers/models/colpali/modular_colpali.py +51 -11
  258. transformers/models/colpali/processing_colpali.py +52 -14
  259. transformers/models/colqwen2/modeling_colqwen2.py +28 -28
  260. transformers/models/colqwen2/modular_colqwen2.py +74 -37
  261. transformers/models/colqwen2/processing_colqwen2.py +52 -16
  262. transformers/models/conditional_detr/configuration_conditional_detr.py +2 -1
  263. transformers/models/conditional_detr/image_processing_conditional_detr.py +70 -67
  264. transformers/models/conditional_detr/image_processing_conditional_detr_fast.py +36 -36
  265. transformers/models/conditional_detr/modeling_conditional_detr.py +87 -99
  266. transformers/models/conditional_detr/modular_conditional_detr.py +3 -49
  267. transformers/models/convbert/configuration_convbert.py +8 -11
  268. transformers/models/convbert/modeling_convbert.py +87 -94
  269. transformers/models/convbert/tokenization_convbert.py +1 -0
  270. transformers/models/convnext/configuration_convnext.py +1 -0
  271. transformers/models/convnext/image_processing_convnext.py +23 -20
  272. transformers/models/convnext/image_processing_convnext_fast.py +21 -16
  273. transformers/models/convnext/modeling_convnext.py +12 -9
  274. transformers/models/convnextv2/configuration_convnextv2.py +1 -0
  275. transformers/models/convnextv2/modeling_convnextv2.py +12 -9
  276. transformers/models/cpm/tokenization_cpm.py +7 -6
  277. transformers/models/cpm/tokenization_cpm_fast.py +5 -3
  278. transformers/models/cpmant/configuration_cpmant.py +1 -4
  279. transformers/models/cpmant/modeling_cpmant.py +40 -38
  280. transformers/models/cpmant/tokenization_cpmant.py +3 -1
  281. transformers/models/csm/configuration_csm.py +66 -58
  282. transformers/models/csm/generation_csm.py +35 -31
  283. transformers/models/csm/modeling_csm.py +85 -85
  284. transformers/models/csm/modular_csm.py +58 -58
  285. transformers/models/csm/processing_csm.py +68 -25
  286. transformers/models/ctrl/configuration_ctrl.py +1 -16
  287. transformers/models/ctrl/modeling_ctrl.py +44 -54
  288. transformers/models/ctrl/tokenization_ctrl.py +1 -0
  289. transformers/models/cvt/configuration_cvt.py +1 -0
  290. transformers/models/cvt/modeling_cvt.py +16 -20
  291. transformers/models/cwm/__init__.py +1 -0
  292. transformers/models/cwm/configuration_cwm.py +12 -8
  293. transformers/models/cwm/modeling_cwm.py +39 -37
  294. transformers/models/cwm/modular_cwm.py +12 -10
  295. transformers/models/d_fine/configuration_d_fine.py +5 -7
  296. transformers/models/d_fine/modeling_d_fine.py +128 -138
  297. transformers/models/d_fine/modular_d_fine.py +18 -33
  298. transformers/models/dab_detr/configuration_dab_detr.py +3 -6
  299. transformers/models/dab_detr/modeling_dab_detr.py +75 -81
  300. transformers/models/dac/configuration_dac.py +1 -0
  301. transformers/models/dac/feature_extraction_dac.py +9 -6
  302. transformers/models/dac/modeling_dac.py +26 -24
  303. transformers/models/data2vec/configuration_data2vec_audio.py +2 -4
  304. transformers/models/data2vec/configuration_data2vec_text.py +3 -11
  305. transformers/models/data2vec/configuration_data2vec_vision.py +1 -0
  306. transformers/models/data2vec/modeling_data2vec_audio.py +56 -57
  307. transformers/models/data2vec/modeling_data2vec_text.py +93 -98
  308. transformers/models/data2vec/modeling_data2vec_vision.py +45 -49
  309. transformers/models/data2vec/modular_data2vec_audio.py +1 -6
  310. transformers/models/data2vec/modular_data2vec_text.py +54 -58
  311. transformers/models/dbrx/configuration_dbrx.py +22 -36
  312. transformers/models/dbrx/modeling_dbrx.py +45 -42
  313. transformers/models/dbrx/modular_dbrx.py +33 -31
  314. transformers/models/deberta/configuration_deberta.py +1 -6
  315. transformers/models/deberta/modeling_deberta.py +60 -64
  316. transformers/models/deberta/tokenization_deberta.py +21 -9
  317. transformers/models/deberta_v2/configuration_deberta_v2.py +1 -6
  318. transformers/models/deberta_v2/modeling_deberta_v2.py +65 -71
  319. transformers/models/deberta_v2/tokenization_deberta_v2.py +29 -11
  320. transformers/models/decision_transformer/configuration_decision_transformer.py +2 -3
  321. transformers/models/decision_transformer/modeling_decision_transformer.py +56 -60
  322. transformers/models/deepseek_v2/configuration_deepseek_v2.py +44 -39
  323. transformers/models/deepseek_v2/modeling_deepseek_v2.py +43 -43
  324. transformers/models/deepseek_v2/modular_deepseek_v2.py +49 -48
  325. transformers/models/deepseek_v3/configuration_deepseek_v3.py +45 -40
  326. transformers/models/deepseek_v3/modeling_deepseek_v3.py +42 -45
  327. transformers/models/deepseek_v3/modular_deepseek_v3.py +9 -14
  328. transformers/models/deepseek_vl/configuration_deepseek_vl.py +3 -2
  329. transformers/models/deepseek_vl/image_processing_deepseek_vl.py +26 -25
  330. transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py +10 -10
  331. transformers/models/deepseek_vl/modeling_deepseek_vl.py +48 -57
  332. transformers/models/deepseek_vl/modular_deepseek_vl.py +43 -14
  333. transformers/models/deepseek_vl/processing_deepseek_vl.py +41 -10
  334. transformers/models/deepseek_vl_hybrid/configuration_deepseek_vl_hybrid.py +5 -3
  335. transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py +35 -35
  336. transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid_fast.py +24 -20
  337. transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py +61 -109
  338. transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py +118 -146
  339. transformers/models/deepseek_vl_hybrid/processing_deepseek_vl_hybrid.py +44 -12
  340. transformers/models/deformable_detr/configuration_deformable_detr.py +3 -2
  341. transformers/models/deformable_detr/image_processing_deformable_detr.py +61 -59
  342. transformers/models/deformable_detr/image_processing_deformable_detr_fast.py +28 -28
  343. transformers/models/deformable_detr/modeling_deformable_detr.py +82 -88
  344. transformers/models/deformable_detr/modular_deformable_detr.py +3 -1
  345. transformers/models/deit/configuration_deit.py +1 -0
  346. transformers/models/deit/image_processing_deit.py +21 -18
  347. transformers/models/deit/image_processing_deit_fast.py +1 -0
  348. transformers/models/deit/modeling_deit.py +22 -24
  349. transformers/models/depth_anything/configuration_depth_anything.py +4 -2
  350. transformers/models/depth_anything/modeling_depth_anything.py +10 -10
  351. transformers/models/depth_pro/configuration_depth_pro.py +1 -0
  352. transformers/models/depth_pro/image_processing_depth_pro.py +23 -22
  353. transformers/models/depth_pro/image_processing_depth_pro_fast.py +10 -8
  354. transformers/models/depth_pro/modeling_depth_pro.py +27 -31
  355. transformers/models/detr/configuration_detr.py +2 -1
  356. transformers/models/detr/image_processing_detr.py +66 -64
  357. transformers/models/detr/image_processing_detr_fast.py +34 -33
  358. transformers/models/detr/modeling_detr.py +79 -95
  359. transformers/models/dia/configuration_dia.py +15 -9
  360. transformers/models/dia/feature_extraction_dia.py +9 -6
  361. transformers/models/dia/generation_dia.py +50 -48
  362. transformers/models/dia/modeling_dia.py +69 -78
  363. transformers/models/dia/modular_dia.py +56 -64
  364. transformers/models/dia/processing_dia.py +29 -39
  365. transformers/models/dia/tokenization_dia.py +6 -3
  366. transformers/models/diffllama/configuration_diffllama.py +30 -25
  367. transformers/models/diffllama/modeling_diffllama.py +49 -46
  368. transformers/models/diffllama/modular_diffllama.py +19 -17
  369. transformers/models/dinat/configuration_dinat.py +1 -0
  370. transformers/models/dinat/modeling_dinat.py +44 -47
  371. transformers/models/dinov2/configuration_dinov2.py +1 -0
  372. transformers/models/dinov2/modeling_dinov2.py +15 -15
  373. transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py +1 -1
  374. transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py +15 -16
  375. transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py +9 -9
  376. transformers/models/dinov3_convnext/configuration_dinov3_convnext.py +7 -4
  377. transformers/models/dinov3_convnext/modeling_dinov3_convnext.py +6 -3
  378. transformers/models/dinov3_vit/configuration_dinov3_vit.py +8 -5
  379. transformers/models/dinov3_vit/image_processing_dinov3_vit_fast.py +9 -7
  380. transformers/models/dinov3_vit/modeling_dinov3_vit.py +18 -19
  381. transformers/models/dinov3_vit/modular_dinov3_vit.py +15 -16
  382. transformers/models/distilbert/configuration_distilbert.py +2 -8
  383. transformers/models/distilbert/modeling_distilbert.py +55 -55
  384. transformers/models/distilbert/tokenization_distilbert.py +1 -13
  385. transformers/models/doge/__init__.py +1 -0
  386. transformers/models/doge/configuration_doge.py +32 -39
  387. transformers/models/doge/modeling_doge.py +49 -45
  388. transformers/models/doge/modular_doge.py +63 -71
  389. transformers/models/donut/configuration_donut_swin.py +1 -0
  390. transformers/models/donut/image_processing_donut.py +29 -26
  391. transformers/models/donut/image_processing_donut_fast.py +15 -9
  392. transformers/models/donut/modeling_donut_swin.py +58 -62
  393. transformers/models/donut/processing_donut.py +26 -5
  394. transformers/models/dots1/configuration_dots1.py +33 -41
  395. transformers/models/dots1/modeling_dots1.py +45 -54
  396. transformers/models/dots1/modular_dots1.py +4 -5
  397. transformers/models/dpr/configuration_dpr.py +2 -19
  398. transformers/models/dpr/modeling_dpr.py +39 -42
  399. transformers/models/dpr/tokenization_dpr.py +9 -19
  400. transformers/models/dpr/tokenization_dpr_fast.py +9 -7
  401. transformers/models/dpt/configuration_dpt.py +2 -1
  402. transformers/models/dpt/image_processing_dpt.py +66 -65
  403. transformers/models/dpt/image_processing_dpt_fast.py +20 -18
  404. transformers/models/dpt/modeling_dpt.py +30 -32
  405. transformers/models/dpt/modular_dpt.py +17 -15
  406. transformers/models/edgetam/configuration_edgetam.py +3 -2
  407. transformers/models/edgetam/modeling_edgetam.py +86 -86
  408. transformers/models/edgetam/modular_edgetam.py +26 -21
  409. transformers/models/edgetam_video/__init__.py +1 -0
  410. transformers/models/edgetam_video/configuration_edgetam_video.py +1 -0
  411. transformers/models/edgetam_video/modeling_edgetam_video.py +158 -169
  412. transformers/models/edgetam_video/modular_edgetam_video.py +37 -30
  413. transformers/models/efficientloftr/configuration_efficientloftr.py +5 -4
  414. transformers/models/efficientloftr/image_processing_efficientloftr.py +16 -14
  415. transformers/models/efficientloftr/image_processing_efficientloftr_fast.py +9 -9
  416. transformers/models/efficientloftr/modeling_efficientloftr.py +38 -59
  417. transformers/models/efficientloftr/modular_efficientloftr.py +3 -1
  418. transformers/models/efficientnet/configuration_efficientnet.py +1 -0
  419. transformers/models/efficientnet/image_processing_efficientnet.py +32 -28
  420. transformers/models/efficientnet/image_processing_efficientnet_fast.py +19 -17
  421. transformers/models/efficientnet/modeling_efficientnet.py +15 -19
  422. transformers/models/electra/configuration_electra.py +3 -13
  423. transformers/models/electra/modeling_electra.py +103 -108
  424. transformers/models/emu3/configuration_emu3.py +17 -13
  425. transformers/models/emu3/image_processing_emu3.py +39 -44
  426. transformers/models/emu3/modeling_emu3.py +108 -148
  427. transformers/models/emu3/modular_emu3.py +73 -115
  428. transformers/models/emu3/processing_emu3.py +43 -18
  429. transformers/models/encodec/configuration_encodec.py +4 -2
  430. transformers/models/encodec/feature_extraction_encodec.py +13 -10
  431. transformers/models/encodec/modeling_encodec.py +29 -39
  432. transformers/models/encoder_decoder/configuration_encoder_decoder.py +2 -12
  433. transformers/models/encoder_decoder/modeling_encoder_decoder.py +43 -37
  434. transformers/models/eomt/configuration_eomt.py +1 -0
  435. transformers/models/eomt/image_processing_eomt.py +56 -66
  436. transformers/models/eomt/image_processing_eomt_fast.py +33 -76
  437. transformers/models/eomt/modeling_eomt.py +18 -23
  438. transformers/models/eomt/modular_eomt.py +13 -18
  439. transformers/models/ernie/configuration_ernie.py +3 -24
  440. transformers/models/ernie/modeling_ernie.py +132 -127
  441. transformers/models/ernie/modular_ernie.py +103 -97
  442. transformers/models/ernie4_5/configuration_ernie4_5.py +27 -23
  443. transformers/models/ernie4_5/modeling_ernie4_5.py +38 -36
  444. transformers/models/ernie4_5/modular_ernie4_5.py +4 -3
  445. transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py +36 -32
  446. transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py +55 -56
  447. transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py +46 -18
  448. transformers/models/esm/configuration_esm.py +15 -11
  449. transformers/models/esm/modeling_esm.py +34 -38
  450. transformers/models/esm/modeling_esmfold.py +49 -53
  451. transformers/models/esm/openfold_utils/chunk_utils.py +6 -6
  452. transformers/models/esm/openfold_utils/loss.py +2 -1
  453. transformers/models/esm/openfold_utils/protein.py +16 -15
  454. transformers/models/esm/openfold_utils/tensor_utils.py +6 -6
  455. transformers/models/esm/tokenization_esm.py +4 -2
  456. transformers/models/evolla/configuration_evolla.py +40 -50
  457. transformers/models/evolla/modeling_evolla.py +66 -71
  458. transformers/models/evolla/modular_evolla.py +47 -53
  459. transformers/models/evolla/processing_evolla.py +35 -23
  460. transformers/models/exaone4/configuration_exaone4.py +25 -23
  461. transformers/models/exaone4/modeling_exaone4.py +38 -35
  462. transformers/models/exaone4/modular_exaone4.py +46 -44
  463. transformers/models/falcon/configuration_falcon.py +26 -31
  464. transformers/models/falcon/modeling_falcon.py +80 -82
  465. transformers/models/falcon_h1/configuration_falcon_h1.py +51 -45
  466. transformers/models/falcon_h1/modeling_falcon_h1.py +82 -85
  467. transformers/models/falcon_h1/modular_falcon_h1.py +51 -56
  468. transformers/models/falcon_mamba/configuration_falcon_mamba.py +2 -1
  469. transformers/models/falcon_mamba/modeling_falcon_mamba.py +82 -75
  470. transformers/models/falcon_mamba/modular_falcon_mamba.py +45 -28
  471. transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py +6 -2
  472. transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +60 -76
  473. transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py +3 -2
  474. transformers/models/flaubert/configuration_flaubert.py +5 -10
  475. transformers/models/flaubert/modeling_flaubert.py +143 -145
  476. transformers/models/flaubert/tokenization_flaubert.py +5 -3
  477. transformers/models/flava/configuration_flava.py +6 -5
  478. transformers/models/flava/image_processing_flava.py +67 -66
  479. transformers/models/flava/image_processing_flava_fast.py +49 -46
  480. transformers/models/flava/modeling_flava.py +136 -153
  481. transformers/models/flava/processing_flava.py +12 -2
  482. transformers/models/flex_olmo/__init__.py +1 -0
  483. transformers/models/flex_olmo/configuration_flex_olmo.py +32 -28
  484. transformers/models/flex_olmo/modeling_flex_olmo.py +47 -47
  485. transformers/models/flex_olmo/modular_flex_olmo.py +44 -40
  486. transformers/models/florence2/configuration_florence2.py +1 -0
  487. transformers/models/florence2/modeling_florence2.py +69 -111
  488. transformers/models/florence2/modular_florence2.py +101 -104
  489. transformers/models/florence2/processing_florence2.py +47 -18
  490. transformers/models/fnet/configuration_fnet.py +2 -6
  491. transformers/models/fnet/modeling_fnet.py +80 -83
  492. transformers/models/fnet/tokenization_fnet.py +1 -0
  493. transformers/models/focalnet/configuration_focalnet.py +1 -0
  494. transformers/models/focalnet/modeling_focalnet.py +45 -51
  495. transformers/models/fsmt/configuration_fsmt.py +17 -12
  496. transformers/models/fsmt/modeling_fsmt.py +48 -49
  497. transformers/models/fsmt/tokenization_fsmt.py +5 -3
  498. transformers/models/funnel/configuration_funnel.py +1 -8
  499. transformers/models/funnel/modeling_funnel.py +93 -99
  500. transformers/models/funnel/tokenization_funnel.py +27 -17
  501. transformers/models/fuyu/configuration_fuyu.py +34 -28
  502. transformers/models/fuyu/image_processing_fuyu.py +31 -29
  503. transformers/models/fuyu/image_processing_fuyu_fast.py +17 -17
  504. transformers/models/fuyu/modeling_fuyu.py +53 -53
  505. transformers/models/fuyu/processing_fuyu.py +34 -23
  506. transformers/models/gemma/configuration_gemma.py +30 -25
  507. transformers/models/gemma/modeling_gemma.py +50 -46
  508. transformers/models/gemma/modular_gemma.py +47 -42
  509. transformers/models/gemma/tokenization_gemma.py +30 -10
  510. transformers/models/gemma2/configuration_gemma2.py +35 -30
  511. transformers/models/gemma2/modeling_gemma2.py +42 -39
  512. transformers/models/gemma2/modular_gemma2.py +66 -63
  513. transformers/models/gemma3/configuration_gemma3.py +44 -44
  514. transformers/models/gemma3/image_processing_gemma3.py +31 -29
  515. transformers/models/gemma3/image_processing_gemma3_fast.py +13 -11
  516. transformers/models/gemma3/modeling_gemma3.py +207 -159
  517. transformers/models/gemma3/modular_gemma3.py +204 -153
  518. transformers/models/gemma3/processing_gemma3.py +5 -5
  519. transformers/models/gemma3n/configuration_gemma3n.py +26 -36
  520. transformers/models/gemma3n/feature_extraction_gemma3n.py +11 -9
  521. transformers/models/gemma3n/modeling_gemma3n.py +356 -222
  522. transformers/models/gemma3n/modular_gemma3n.py +207 -230
  523. transformers/models/gemma3n/processing_gemma3n.py +26 -12
  524. transformers/models/git/configuration_git.py +8 -5
  525. transformers/models/git/modeling_git.py +204 -266
  526. transformers/models/git/processing_git.py +14 -2
  527. transformers/models/glm/configuration_glm.py +28 -24
  528. transformers/models/glm/modeling_glm.py +40 -37
  529. transformers/models/glm/modular_glm.py +7 -4
  530. transformers/models/glm4/configuration_glm4.py +28 -24
  531. transformers/models/glm4/modeling_glm4.py +42 -40
  532. transformers/models/glm4/modular_glm4.py +10 -8
  533. transformers/models/glm46v/configuration_glm46v.py +1 -0
  534. transformers/models/glm46v/image_processing_glm46v.py +40 -35
  535. transformers/models/glm46v/image_processing_glm46v_fast.py +9 -9
  536. transformers/models/glm46v/modeling_glm46v.py +90 -137
  537. transformers/models/glm46v/modular_glm46v.py +3 -4
  538. transformers/models/glm46v/processing_glm46v.py +41 -7
  539. transformers/models/glm46v/video_processing_glm46v.py +11 -9
  540. transformers/models/glm4_moe/configuration_glm4_moe.py +32 -40
  541. transformers/models/glm4_moe/modeling_glm4_moe.py +42 -45
  542. transformers/models/glm4_moe/modular_glm4_moe.py +34 -42
  543. transformers/models/glm4v/configuration_glm4v.py +20 -18
  544. transformers/models/glm4v/image_processing_glm4v.py +40 -34
  545. transformers/models/glm4v/image_processing_glm4v_fast.py +9 -8
  546. transformers/models/glm4v/modeling_glm4v.py +205 -254
  547. transformers/models/glm4v/modular_glm4v.py +224 -210
  548. transformers/models/glm4v/processing_glm4v.py +41 -7
  549. transformers/models/glm4v/video_processing_glm4v.py +11 -9
  550. transformers/models/glm4v_moe/configuration_glm4v_moe.py +125 -136
  551. transformers/models/glm4v_moe/modeling_glm4v_moe.py +368 -377
  552. transformers/models/glm4v_moe/modular_glm4v_moe.py +169 -83
  553. transformers/models/glpn/configuration_glpn.py +1 -0
  554. transformers/models/glpn/image_processing_glpn.py +12 -11
  555. transformers/models/glpn/image_processing_glpn_fast.py +13 -11
  556. transformers/models/glpn/modeling_glpn.py +14 -16
  557. transformers/models/got_ocr2/configuration_got_ocr2.py +12 -4
  558. transformers/models/got_ocr2/image_processing_got_ocr2.py +24 -22
  559. transformers/models/got_ocr2/image_processing_got_ocr2_fast.py +11 -9
  560. transformers/models/got_ocr2/modeling_got_ocr2.py +80 -77
  561. transformers/models/got_ocr2/modular_got_ocr2.py +51 -54
  562. transformers/models/got_ocr2/processing_got_ocr2.py +63 -42
  563. transformers/models/gpt2/configuration_gpt2.py +2 -13
  564. transformers/models/gpt2/modeling_gpt2.py +115 -120
  565. transformers/models/gpt2/tokenization_gpt2.py +46 -15
  566. transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +2 -5
  567. transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +89 -79
  568. transformers/models/gpt_neo/configuration_gpt_neo.py +2 -9
  569. transformers/models/gpt_neo/modeling_gpt_neo.py +67 -83
  570. transformers/models/gpt_neox/configuration_gpt_neox.py +25 -25
  571. transformers/models/gpt_neox/modeling_gpt_neox.py +75 -76
  572. transformers/models/gpt_neox/modular_gpt_neox.py +66 -67
  573. transformers/models/gpt_neox/tokenization_gpt_neox.py +51 -9
  574. transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py +19 -24
  575. transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +47 -46
  576. transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py +3 -1
  577. transformers/models/gpt_oss/configuration_gpt_oss.py +28 -46
  578. transformers/models/gpt_oss/modeling_gpt_oss.py +121 -83
  579. transformers/models/gpt_oss/modular_gpt_oss.py +103 -64
  580. transformers/models/gpt_sw3/tokenization_gpt_sw3.py +4 -4
  581. transformers/models/gptj/configuration_gptj.py +4 -4
  582. transformers/models/gptj/modeling_gptj.py +87 -101
  583. transformers/models/granite/configuration_granite.py +33 -28
  584. transformers/models/granite/modeling_granite.py +46 -44
  585. transformers/models/granite/modular_granite.py +31 -29
  586. transformers/models/granite_speech/configuration_granite_speech.py +1 -0
  587. transformers/models/granite_speech/feature_extraction_granite_speech.py +3 -1
  588. transformers/models/granite_speech/modeling_granite_speech.py +52 -82
  589. transformers/models/granite_speech/processing_granite_speech.py +4 -11
  590. transformers/models/granitemoe/configuration_granitemoe.py +36 -31
  591. transformers/models/granitemoe/modeling_granitemoe.py +46 -41
  592. transformers/models/granitemoe/modular_granitemoe.py +27 -22
  593. transformers/models/granitemoehybrid/__init__.py +1 -0
  594. transformers/models/granitemoehybrid/configuration_granitemoehybrid.py +47 -46
  595. transformers/models/granitemoehybrid/modeling_granitemoehybrid.py +93 -97
  596. transformers/models/granitemoehybrid/modular_granitemoehybrid.py +21 -54
  597. transformers/models/granitemoeshared/configuration_granitemoeshared.py +37 -33
  598. transformers/models/granitemoeshared/modeling_granitemoeshared.py +61 -54
  599. transformers/models/granitemoeshared/modular_granitemoeshared.py +21 -19
  600. transformers/models/grounding_dino/configuration_grounding_dino.py +4 -6
  601. transformers/models/grounding_dino/image_processing_grounding_dino.py +62 -60
  602. transformers/models/grounding_dino/image_processing_grounding_dino_fast.py +29 -28
  603. transformers/models/grounding_dino/modeling_grounding_dino.py +140 -155
  604. transformers/models/grounding_dino/modular_grounding_dino.py +3 -2
  605. transformers/models/grounding_dino/processing_grounding_dino.py +38 -10
  606. transformers/models/groupvit/configuration_groupvit.py +2 -4
  607. transformers/models/groupvit/modeling_groupvit.py +93 -107
  608. transformers/models/helium/configuration_helium.py +29 -25
  609. transformers/models/helium/modeling_helium.py +40 -38
  610. transformers/models/helium/modular_helium.py +7 -3
  611. transformers/models/herbert/tokenization_herbert.py +28 -10
  612. transformers/models/hgnet_v2/configuration_hgnet_v2.py +1 -0
  613. transformers/models/hgnet_v2/modeling_hgnet_v2.py +10 -24
  614. transformers/models/hgnet_v2/modular_hgnet_v2.py +10 -24
  615. transformers/models/hiera/configuration_hiera.py +1 -0
  616. transformers/models/hiera/modeling_hiera.py +66 -72
  617. transformers/models/hubert/configuration_hubert.py +2 -4
  618. transformers/models/hubert/modeling_hubert.py +37 -42
  619. transformers/models/hubert/modular_hubert.py +11 -13
  620. transformers/models/hunyuan_v1_dense/configuration_hunyuan_v1_dense.py +31 -26
  621. transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py +38 -35
  622. transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py +6 -4
  623. transformers/models/hunyuan_v1_moe/__init__.py +1 -1
  624. transformers/models/hunyuan_v1_moe/configuration_hunyuan_v1_moe.py +36 -31
  625. transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py +42 -47
  626. transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py +9 -9
  627. transformers/models/ibert/configuration_ibert.py +2 -4
  628. transformers/models/ibert/modeling_ibert.py +62 -82
  629. transformers/models/ibert/quant_modules.py +1 -0
  630. transformers/models/idefics/configuration_idefics.py +8 -5
  631. transformers/models/idefics/image_processing_idefics.py +15 -13
  632. transformers/models/idefics/modeling_idefics.py +82 -75
  633. transformers/models/idefics/perceiver.py +3 -1
  634. transformers/models/idefics/processing_idefics.py +48 -32
  635. transformers/models/idefics/vision.py +25 -24
  636. transformers/models/idefics2/configuration_idefics2.py +3 -1
  637. transformers/models/idefics2/image_processing_idefics2.py +32 -31
  638. transformers/models/idefics2/image_processing_idefics2_fast.py +8 -8
  639. transformers/models/idefics2/modeling_idefics2.py +101 -127
  640. transformers/models/idefics2/processing_idefics2.py +68 -10
  641. transformers/models/idefics3/configuration_idefics3.py +4 -1
  642. transformers/models/idefics3/image_processing_idefics3.py +43 -42
  643. transformers/models/idefics3/image_processing_idefics3_fast.py +15 -40
  644. transformers/models/idefics3/modeling_idefics3.py +90 -115
  645. transformers/models/idefics3/processing_idefics3.py +69 -15
  646. transformers/models/ijepa/configuration_ijepa.py +1 -0
  647. transformers/models/ijepa/modeling_ijepa.py +11 -10
  648. transformers/models/ijepa/modular_ijepa.py +7 -5
  649. transformers/models/imagegpt/configuration_imagegpt.py +2 -9
  650. transformers/models/imagegpt/image_processing_imagegpt.py +18 -17
  651. transformers/models/imagegpt/image_processing_imagegpt_fast.py +16 -11
  652. transformers/models/imagegpt/modeling_imagegpt.py +65 -76
  653. transformers/models/informer/configuration_informer.py +9 -6
  654. transformers/models/informer/modeling_informer.py +86 -88
  655. transformers/models/informer/modular_informer.py +16 -14
  656. transformers/models/instructblip/configuration_instructblip.py +2 -2
  657. transformers/models/instructblip/modeling_instructblip.py +63 -103
  658. transformers/models/instructblip/processing_instructblip.py +36 -10
  659. transformers/models/instructblipvideo/configuration_instructblipvideo.py +2 -2
  660. transformers/models/instructblipvideo/modeling_instructblipvideo.py +139 -157
  661. transformers/models/instructblipvideo/modular_instructblipvideo.py +64 -73
  662. transformers/models/instructblipvideo/processing_instructblipvideo.py +33 -14
  663. transformers/models/instructblipvideo/video_processing_instructblipvideo.py +8 -6
  664. transformers/models/internvl/configuration_internvl.py +1 -0
  665. transformers/models/internvl/modeling_internvl.py +106 -85
  666. transformers/models/internvl/modular_internvl.py +67 -47
  667. transformers/models/internvl/processing_internvl.py +45 -12
  668. transformers/models/internvl/video_processing_internvl.py +12 -10
  669. transformers/models/jamba/configuration_jamba.py +8 -5
  670. transformers/models/jamba/modeling_jamba.py +66 -68
  671. transformers/models/jamba/modular_jamba.py +55 -54
  672. transformers/models/janus/configuration_janus.py +1 -0
  673. transformers/models/janus/image_processing_janus.py +37 -35
  674. transformers/models/janus/image_processing_janus_fast.py +20 -18
  675. transformers/models/janus/modeling_janus.py +191 -115
  676. transformers/models/janus/modular_janus.py +84 -133
  677. transformers/models/janus/processing_janus.py +43 -17
  678. transformers/models/jetmoe/configuration_jetmoe.py +26 -24
  679. transformers/models/jetmoe/modeling_jetmoe.py +46 -43
  680. transformers/models/jetmoe/modular_jetmoe.py +33 -31
  681. transformers/models/kosmos2/configuration_kosmos2.py +9 -10
  682. transformers/models/kosmos2/modeling_kosmos2.py +173 -208
  683. transformers/models/kosmos2/processing_kosmos2.py +55 -40
  684. transformers/models/kosmos2_5/__init__.py +1 -0
  685. transformers/models/kosmos2_5/configuration_kosmos2_5.py +9 -8
  686. transformers/models/kosmos2_5/image_processing_kosmos2_5.py +12 -10
  687. transformers/models/kosmos2_5/image_processing_kosmos2_5_fast.py +13 -4
  688. transformers/models/kosmos2_5/modeling_kosmos2_5.py +118 -132
  689. transformers/models/kosmos2_5/processing_kosmos2_5.py +29 -8
  690. transformers/models/kyutai_speech_to_text/configuration_kyutai_speech_to_text.py +28 -31
  691. transformers/models/kyutai_speech_to_text/feature_extraction_kyutai_speech_to_text.py +14 -12
  692. transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py +100 -110
  693. transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py +22 -28
  694. transformers/models/kyutai_speech_to_text/processing_kyutai_speech_to_text.py +8 -2
  695. transformers/models/layoutlm/configuration_layoutlm.py +2 -14
  696. transformers/models/layoutlm/modeling_layoutlm.py +72 -77
  697. transformers/models/layoutlmv2/configuration_layoutlmv2.py +17 -14
  698. transformers/models/layoutlmv2/image_processing_layoutlmv2.py +21 -18
  699. transformers/models/layoutlmv2/image_processing_layoutlmv2_fast.py +9 -7
  700. transformers/models/layoutlmv2/modeling_layoutlmv2.py +50 -64
  701. transformers/models/layoutlmv2/processing_layoutlmv2.py +44 -14
  702. transformers/models/layoutlmv2/tokenization_layoutlmv2.py +126 -73
  703. transformers/models/layoutlmv3/configuration_layoutlmv3.py +19 -16
  704. transformers/models/layoutlmv3/image_processing_layoutlmv3.py +26 -24
  705. transformers/models/layoutlmv3/image_processing_layoutlmv3_fast.py +11 -9
  706. transformers/models/layoutlmv3/modeling_layoutlmv3.py +56 -82
  707. transformers/models/layoutlmv3/processing_layoutlmv3.py +46 -14
  708. transformers/models/layoutlmv3/tokenization_layoutlmv3.py +134 -74
  709. transformers/models/layoutxlm/configuration_layoutxlm.py +17 -14
  710. transformers/models/layoutxlm/modular_layoutxlm.py +1 -0
  711. transformers/models/layoutxlm/processing_layoutxlm.py +44 -14
  712. transformers/models/layoutxlm/tokenization_layoutxlm.py +113 -77
  713. transformers/models/led/configuration_led.py +12 -8
  714. transformers/models/led/modeling_led.py +266 -124
  715. transformers/models/levit/configuration_levit.py +1 -0
  716. transformers/models/levit/image_processing_levit.py +21 -19
  717. transformers/models/levit/image_processing_levit_fast.py +5 -4
  718. transformers/models/levit/modeling_levit.py +19 -38
  719. transformers/models/lfm2/configuration_lfm2.py +30 -27
  720. transformers/models/lfm2/modeling_lfm2.py +50 -47
  721. transformers/models/lfm2/modular_lfm2.py +30 -29
  722. transformers/models/lfm2_moe/__init__.py +1 -0
  723. transformers/models/lfm2_moe/configuration_lfm2_moe.py +9 -6
  724. transformers/models/lfm2_moe/modeling_lfm2_moe.py +53 -61
  725. transformers/models/lfm2_moe/modular_lfm2_moe.py +37 -13
  726. transformers/models/lfm2_vl/configuration_lfm2_vl.py +1 -4
  727. transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py +12 -41
  728. transformers/models/lfm2_vl/modeling_lfm2_vl.py +66 -84
  729. transformers/models/lfm2_vl/modular_lfm2_vl.py +56 -70
  730. transformers/models/lfm2_vl/processing_lfm2_vl.py +76 -96
  731. transformers/models/lightglue/image_processing_lightglue.py +15 -16
  732. transformers/models/lightglue/image_processing_lightglue_fast.py +9 -9
  733. transformers/models/lightglue/modeling_lightglue.py +31 -31
  734. transformers/models/lightglue/modular_lightglue.py +28 -29
  735. transformers/models/lilt/configuration_lilt.py +2 -6
  736. transformers/models/lilt/modeling_lilt.py +70 -76
  737. transformers/models/llama/configuration_llama.py +31 -26
  738. transformers/models/llama/modeling_llama.py +39 -36
  739. transformers/models/llama/tokenization_llama.py +44 -14
  740. transformers/models/llama4/configuration_llama4.py +30 -27
  741. transformers/models/llama4/image_processing_llama4_fast.py +14 -12
  742. transformers/models/llama4/modeling_llama4.py +113 -120
  743. transformers/models/llama4/processing_llama4.py +57 -33
  744. transformers/models/llava/configuration_llava.py +1 -10
  745. transformers/models/llava/image_processing_llava.py +28 -25
  746. transformers/models/llava/image_processing_llava_fast.py +11 -9
  747. transformers/models/llava/modeling_llava.py +109 -85
  748. transformers/models/llava/processing_llava.py +51 -18
  749. transformers/models/llava_next/configuration_llava_next.py +2 -2
  750. transformers/models/llava_next/image_processing_llava_next.py +45 -43
  751. transformers/models/llava_next/image_processing_llava_next_fast.py +13 -11
  752. transformers/models/llava_next/modeling_llava_next.py +107 -110
  753. transformers/models/llava_next/processing_llava_next.py +47 -18
  754. transformers/models/llava_next_video/configuration_llava_next_video.py +7 -4
  755. transformers/models/llava_next_video/modeling_llava_next_video.py +158 -175
  756. transformers/models/llava_next_video/modular_llava_next_video.py +150 -155
  757. transformers/models/llava_next_video/processing_llava_next_video.py +63 -21
  758. transformers/models/llava_next_video/video_processing_llava_next_video.py +1 -0
  759. transformers/models/llava_onevision/configuration_llava_onevision.py +7 -4
  760. transformers/models/llava_onevision/image_processing_llava_onevision.py +42 -40
  761. transformers/models/llava_onevision/image_processing_llava_onevision_fast.py +15 -14
  762. transformers/models/llava_onevision/modeling_llava_onevision.py +169 -177
  763. transformers/models/llava_onevision/modular_llava_onevision.py +156 -163
  764. transformers/models/llava_onevision/processing_llava_onevision.py +53 -21
  765. transformers/models/llava_onevision/video_processing_llava_onevision.py +1 -0
  766. transformers/models/longcat_flash/__init__.py +1 -0
  767. transformers/models/longcat_flash/configuration_longcat_flash.py +42 -37
  768. transformers/models/longcat_flash/modeling_longcat_flash.py +36 -36
  769. transformers/models/longcat_flash/modular_longcat_flash.py +21 -21
  770. transformers/models/longformer/configuration_longformer.py +5 -5
  771. transformers/models/longformer/modeling_longformer.py +101 -105
  772. transformers/models/longt5/configuration_longt5.py +7 -9
  773. transformers/models/longt5/modeling_longt5.py +49 -49
  774. transformers/models/luke/configuration_luke.py +2 -8
  775. transformers/models/luke/modeling_luke.py +181 -188
  776. transformers/models/luke/tokenization_luke.py +140 -107
  777. transformers/models/lxmert/configuration_lxmert.py +1 -16
  778. transformers/models/lxmert/modeling_lxmert.py +74 -65
  779. transformers/models/m2m_100/configuration_m2m_100.py +9 -7
  780. transformers/models/m2m_100/modeling_m2m_100.py +71 -83
  781. transformers/models/m2m_100/tokenization_m2m_100.py +8 -8
  782. transformers/models/mamba/configuration_mamba.py +2 -1
  783. transformers/models/mamba/modeling_mamba.py +66 -58
  784. transformers/models/mamba2/configuration_mamba2.py +8 -5
  785. transformers/models/mamba2/modeling_mamba2.py +69 -68
  786. transformers/models/marian/configuration_marian.py +5 -10
  787. transformers/models/marian/modeling_marian.py +87 -93
  788. transformers/models/marian/tokenization_marian.py +6 -6
  789. transformers/models/markuplm/configuration_markuplm.py +7 -4
  790. transformers/models/markuplm/feature_extraction_markuplm.py +2 -1
  791. transformers/models/markuplm/modeling_markuplm.py +70 -69
  792. transformers/models/markuplm/processing_markuplm.py +38 -31
  793. transformers/models/markuplm/tokenization_markuplm.py +136 -93
  794. transformers/models/mask2former/configuration_mask2former.py +8 -5
  795. transformers/models/mask2former/image_processing_mask2former.py +85 -84
  796. transformers/models/mask2former/image_processing_mask2former_fast.py +40 -37
  797. transformers/models/mask2former/modeling_mask2former.py +103 -118
  798. transformers/models/mask2former/modular_mask2former.py +8 -6
  799. transformers/models/maskformer/configuration_maskformer.py +9 -6
  800. transformers/models/maskformer/configuration_maskformer_swin.py +1 -0
  801. transformers/models/maskformer/image_processing_maskformer.py +85 -84
  802. transformers/models/maskformer/image_processing_maskformer_fast.py +40 -36
  803. transformers/models/maskformer/modeling_maskformer.py +65 -79
  804. transformers/models/maskformer/modeling_maskformer_swin.py +32 -36
  805. transformers/models/mbart/configuration_mbart.py +4 -9
  806. transformers/models/mbart/modeling_mbart.py +116 -131
  807. transformers/models/mbart/tokenization_mbart.py +54 -11
  808. transformers/models/mbart50/tokenization_mbart50.py +13 -8
  809. transformers/models/megatron_bert/configuration_megatron_bert.py +3 -13
  810. transformers/models/megatron_bert/modeling_megatron_bert.py +150 -148
  811. transformers/models/metaclip_2/configuration_metaclip_2.py +1 -4
  812. transformers/models/metaclip_2/modeling_metaclip_2.py +84 -91
  813. transformers/models/metaclip_2/modular_metaclip_2.py +45 -61
  814. transformers/models/mgp_str/configuration_mgp_str.py +1 -0
  815. transformers/models/mgp_str/modeling_mgp_str.py +18 -20
  816. transformers/models/mgp_str/processing_mgp_str.py +20 -3
  817. transformers/models/mgp_str/tokenization_mgp_str.py +3 -1
  818. transformers/models/mimi/configuration_mimi.py +40 -42
  819. transformers/models/mimi/modeling_mimi.py +113 -142
  820. transformers/models/minimax/__init__.py +1 -0
  821. transformers/models/minimax/configuration_minimax.py +43 -37
  822. transformers/models/minimax/modeling_minimax.py +51 -61
  823. transformers/models/minimax/modular_minimax.py +62 -68
  824. transformers/models/ministral/configuration_ministral.py +29 -25
  825. transformers/models/ministral/modeling_ministral.py +38 -36
  826. transformers/models/ministral/modular_ministral.py +37 -32
  827. transformers/models/ministral3/configuration_ministral3.py +27 -24
  828. transformers/models/ministral3/modeling_ministral3.py +37 -36
  829. transformers/models/ministral3/modular_ministral3.py +5 -4
  830. transformers/models/mistral/configuration_mistral.py +29 -24
  831. transformers/models/mistral/modeling_mistral.py +37 -36
  832. transformers/models/mistral/modular_mistral.py +12 -11
  833. transformers/models/mistral3/configuration_mistral3.py +1 -4
  834. transformers/models/mistral3/modeling_mistral3.py +86 -89
  835. transformers/models/mistral3/modular_mistral3.py +68 -69
  836. transformers/models/mixtral/configuration_mixtral.py +34 -29
  837. transformers/models/mixtral/modeling_mixtral.py +45 -50
  838. transformers/models/mixtral/modular_mixtral.py +31 -32
  839. transformers/models/mlcd/configuration_mlcd.py +1 -0
  840. transformers/models/mlcd/modeling_mlcd.py +14 -20
  841. transformers/models/mlcd/modular_mlcd.py +13 -17
  842. transformers/models/mllama/configuration_mllama.py +15 -10
  843. transformers/models/mllama/image_processing_mllama.py +25 -23
  844. transformers/models/mllama/image_processing_mllama_fast.py +11 -11
  845. transformers/models/mllama/modeling_mllama.py +94 -105
  846. transformers/models/mllama/processing_mllama.py +55 -6
  847. transformers/models/mluke/tokenization_mluke.py +107 -101
  848. transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py +3 -5
  849. transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py +140 -155
  850. transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py +3 -5
  851. transformers/models/mobilebert/configuration_mobilebert.py +2 -4
  852. transformers/models/mobilebert/modeling_mobilebert.py +85 -77
  853. transformers/models/mobilebert/tokenization_mobilebert.py +1 -0
  854. transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +1 -0
  855. transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py +23 -20
  856. transformers/models/mobilenet_v1/image_processing_mobilenet_v1_fast.py +1 -0
  857. transformers/models/mobilenet_v1/modeling_mobilenet_v1.py +16 -15
  858. transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +1 -0
  859. transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py +51 -48
  860. transformers/models/mobilenet_v2/image_processing_mobilenet_v2_fast.py +15 -13
  861. transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +22 -24
  862. transformers/models/mobilevit/configuration_mobilevit.py +1 -0
  863. transformers/models/mobilevit/image_processing_mobilevit.py +49 -46
  864. transformers/models/mobilevit/image_processing_mobilevit_fast.py +14 -12
  865. transformers/models/mobilevit/modeling_mobilevit.py +21 -28
  866. transformers/models/mobilevitv2/configuration_mobilevitv2.py +1 -0
  867. transformers/models/mobilevitv2/modeling_mobilevitv2.py +22 -28
  868. transformers/models/modernbert/configuration_modernbert.py +42 -44
  869. transformers/models/modernbert/modeling_modernbert.py +133 -145
  870. transformers/models/modernbert/modular_modernbert.py +170 -186
  871. transformers/models/modernbert_decoder/configuration_modernbert_decoder.py +40 -40
  872. transformers/models/modernbert_decoder/modeling_modernbert_decoder.py +57 -62
  873. transformers/models/modernbert_decoder/modular_modernbert_decoder.py +86 -94
  874. transformers/models/moonshine/configuration_moonshine.py +31 -34
  875. transformers/models/moonshine/modeling_moonshine.py +71 -71
  876. transformers/models/moonshine/modular_moonshine.py +83 -88
  877. transformers/models/moshi/configuration_moshi.py +23 -46
  878. transformers/models/moshi/modeling_moshi.py +187 -157
  879. transformers/models/mpnet/configuration_mpnet.py +2 -6
  880. transformers/models/mpnet/modeling_mpnet.py +57 -62
  881. transformers/models/mpnet/tokenization_mpnet.py +15 -4
  882. transformers/models/mpt/configuration_mpt.py +9 -5
  883. transformers/models/mpt/modeling_mpt.py +60 -60
  884. transformers/models/mra/configuration_mra.py +2 -8
  885. transformers/models/mra/modeling_mra.py +57 -64
  886. transformers/models/mt5/configuration_mt5.py +8 -10
  887. transformers/models/mt5/modeling_mt5.py +95 -87
  888. transformers/models/musicgen/configuration_musicgen.py +8 -12
  889. transformers/models/musicgen/modeling_musicgen.py +122 -118
  890. transformers/models/musicgen/processing_musicgen.py +21 -3
  891. transformers/models/musicgen_melody/configuration_musicgen_melody.py +8 -15
  892. transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py +9 -8
  893. transformers/models/musicgen_melody/modeling_musicgen_melody.py +123 -117
  894. transformers/models/musicgen_melody/processing_musicgen_melody.py +22 -3
  895. transformers/models/mvp/configuration_mvp.py +5 -8
  896. transformers/models/mvp/modeling_mvp.py +123 -135
  897. transformers/models/myt5/tokenization_myt5.py +10 -8
  898. transformers/models/nanochat/configuration_nanochat.py +8 -5
  899. transformers/models/nanochat/modeling_nanochat.py +40 -37
  900. transformers/models/nanochat/modular_nanochat.py +14 -12
  901. transformers/models/nemotron/configuration_nemotron.py +30 -25
  902. transformers/models/nemotron/modeling_nemotron.py +57 -56
  903. transformers/models/nllb/tokenization_nllb.py +28 -12
  904. transformers/models/nllb_moe/configuration_nllb_moe.py +9 -7
  905. transformers/models/nllb_moe/modeling_nllb_moe.py +69 -77
  906. transformers/models/nougat/image_processing_nougat.py +32 -29
  907. transformers/models/nougat/image_processing_nougat_fast.py +14 -12
  908. transformers/models/nougat/processing_nougat.py +39 -37
  909. transformers/models/nougat/tokenization_nougat.py +73 -18
  910. transformers/models/nystromformer/configuration_nystromformer.py +2 -8
  911. transformers/models/nystromformer/modeling_nystromformer.py +63 -74
  912. transformers/models/olmo/configuration_olmo.py +28 -23
  913. transformers/models/olmo/modeling_olmo.py +39 -36
  914. transformers/models/olmo/modular_olmo.py +11 -7
  915. transformers/models/olmo2/configuration_olmo2.py +28 -23
  916. transformers/models/olmo2/modeling_olmo2.py +41 -37
  917. transformers/models/olmo2/modular_olmo2.py +32 -29
  918. transformers/models/olmo3/__init__.py +1 -0
  919. transformers/models/olmo3/configuration_olmo3.py +30 -26
  920. transformers/models/olmo3/modeling_olmo3.py +39 -36
  921. transformers/models/olmo3/modular_olmo3.py +40 -37
  922. transformers/models/olmoe/configuration_olmoe.py +33 -29
  923. transformers/models/olmoe/modeling_olmoe.py +46 -52
  924. transformers/models/olmoe/modular_olmoe.py +15 -16
  925. transformers/models/omdet_turbo/configuration_omdet_turbo.py +4 -2
  926. transformers/models/omdet_turbo/modeling_omdet_turbo.py +47 -53
  927. transformers/models/omdet_turbo/processing_omdet_turbo.py +67 -19
  928. transformers/models/oneformer/configuration_oneformer.py +8 -5
  929. transformers/models/oneformer/image_processing_oneformer.py +84 -83
  930. transformers/models/oneformer/image_processing_oneformer_fast.py +42 -41
  931. transformers/models/oneformer/modeling_oneformer.py +171 -147
  932. transformers/models/oneformer/processing_oneformer.py +43 -28
  933. transformers/models/openai/configuration_openai.py +1 -16
  934. transformers/models/openai/modeling_openai.py +51 -65
  935. transformers/models/openai/tokenization_openai.py +47 -8
  936. transformers/models/opt/configuration_opt.py +7 -6
  937. transformers/models/opt/modeling_opt.py +76 -78
  938. transformers/models/ovis2/__init__.py +1 -0
  939. transformers/models/ovis2/configuration_ovis2.py +1 -0
  940. transformers/models/ovis2/image_processing_ovis2.py +24 -22
  941. transformers/models/ovis2/image_processing_ovis2_fast.py +11 -9
  942. transformers/models/ovis2/modeling_ovis2.py +142 -111
  943. transformers/models/ovis2/modular_ovis2.py +45 -90
  944. transformers/models/ovis2/processing_ovis2.py +40 -12
  945. transformers/models/owlv2/configuration_owlv2.py +2 -4
  946. transformers/models/owlv2/image_processing_owlv2.py +21 -20
  947. transformers/models/owlv2/image_processing_owlv2_fast.py +15 -12
  948. transformers/models/owlv2/modeling_owlv2.py +117 -133
  949. transformers/models/owlv2/modular_owlv2.py +14 -11
  950. transformers/models/owlv2/processing_owlv2.py +49 -20
  951. transformers/models/owlvit/configuration_owlvit.py +2 -4
  952. transformers/models/owlvit/image_processing_owlvit.py +22 -21
  953. transformers/models/owlvit/image_processing_owlvit_fast.py +3 -2
  954. transformers/models/owlvit/modeling_owlvit.py +116 -132
  955. transformers/models/owlvit/processing_owlvit.py +48 -20
  956. transformers/models/paligemma/configuration_paligemma.py +1 -4
  957. transformers/models/paligemma/modeling_paligemma.py +93 -103
  958. transformers/models/paligemma/processing_paligemma.py +66 -13
  959. transformers/models/parakeet/configuration_parakeet.py +14 -7
  960. transformers/models/parakeet/feature_extraction_parakeet.py +12 -10
  961. transformers/models/parakeet/modeling_parakeet.py +28 -32
  962. transformers/models/parakeet/modular_parakeet.py +20 -23
  963. transformers/models/parakeet/processing_parakeet.py +5 -13
  964. transformers/models/parakeet/{tokenization_parakeet.py → tokenization_parakeet_fast.py} +7 -5
  965. transformers/models/patchtsmixer/configuration_patchtsmixer.py +8 -5
  966. transformers/models/patchtsmixer/modeling_patchtsmixer.py +62 -70
  967. transformers/models/patchtst/configuration_patchtst.py +9 -6
  968. transformers/models/patchtst/modeling_patchtst.py +80 -97
  969. transformers/models/pegasus/configuration_pegasus.py +5 -8
  970. transformers/models/pegasus/modeling_pegasus.py +66 -72
  971. transformers/models/pegasus/tokenization_pegasus.py +45 -15
  972. transformers/models/pegasus_x/configuration_pegasus_x.py +4 -5
  973. transformers/models/pegasus_x/modeling_pegasus_x.py +52 -55
  974. transformers/models/perceiver/configuration_perceiver.py +1 -0
  975. transformers/models/perceiver/image_processing_perceiver.py +25 -22
  976. transformers/models/perceiver/image_processing_perceiver_fast.py +9 -7
  977. transformers/models/perceiver/modeling_perceiver.py +146 -165
  978. transformers/models/perceiver/tokenization_perceiver.py +6 -3
  979. transformers/models/perception_lm/configuration_perception_lm.py +1 -0
  980. transformers/models/perception_lm/image_processing_perception_lm_fast.py +10 -8
  981. transformers/models/perception_lm/modeling_perception_lm.py +70 -71
  982. transformers/models/perception_lm/modular_perception_lm.py +61 -65
  983. transformers/models/perception_lm/processing_perception_lm.py +47 -13
  984. transformers/models/perception_lm/video_processing_perception_lm.py +1 -0
  985. transformers/models/persimmon/configuration_persimmon.py +28 -23
  986. transformers/models/persimmon/modeling_persimmon.py +45 -43
  987. transformers/models/phi/configuration_phi.py +28 -23
  988. transformers/models/phi/modeling_phi.py +43 -40
  989. transformers/models/phi/modular_phi.py +24 -23
  990. transformers/models/phi3/configuration_phi3.py +33 -28
  991. transformers/models/phi3/modeling_phi3.py +38 -36
  992. transformers/models/phi3/modular_phi3.py +17 -13
  993. transformers/models/phi4_multimodal/configuration_phi4_multimodal.py +33 -30
  994. transformers/models/phi4_multimodal/feature_extraction_phi4_multimodal.py +9 -7
  995. transformers/models/phi4_multimodal/image_processing_phi4_multimodal_fast.py +11 -11
  996. transformers/models/phi4_multimodal/modeling_phi4_multimodal.py +78 -95
  997. transformers/models/phi4_multimodal/modular_phi4_multimodal.py +80 -98
  998. transformers/models/phi4_multimodal/processing_phi4_multimodal.py +44 -7
  999. transformers/models/phimoe/configuration_phimoe.py +36 -31
  1000. transformers/models/phimoe/modeling_phimoe.py +45 -50
  1001. transformers/models/phimoe/modular_phimoe.py +4 -3
  1002. transformers/models/phobert/tokenization_phobert.py +6 -4
  1003. transformers/models/pix2struct/configuration_pix2struct.py +10 -12
  1004. transformers/models/pix2struct/image_processing_pix2struct.py +19 -15
  1005. transformers/models/pix2struct/image_processing_pix2struct_fast.py +15 -12
  1006. transformers/models/pix2struct/modeling_pix2struct.py +52 -58
  1007. transformers/models/pix2struct/processing_pix2struct.py +30 -5
  1008. transformers/models/pixtral/configuration_pixtral.py +14 -11
  1009. transformers/models/pixtral/image_processing_pixtral.py +28 -26
  1010. transformers/models/pixtral/image_processing_pixtral_fast.py +11 -10
  1011. transformers/models/pixtral/modeling_pixtral.py +34 -28
  1012. transformers/models/pixtral/processing_pixtral.py +53 -21
  1013. transformers/models/plbart/configuration_plbart.py +5 -8
  1014. transformers/models/plbart/modeling_plbart.py +106 -119
  1015. transformers/models/plbart/modular_plbart.py +33 -39
  1016. transformers/models/plbart/tokenization_plbart.py +7 -4
  1017. transformers/models/poolformer/configuration_poolformer.py +1 -0
  1018. transformers/models/poolformer/image_processing_poolformer.py +24 -21
  1019. transformers/models/poolformer/image_processing_poolformer_fast.py +15 -13
  1020. transformers/models/poolformer/modeling_poolformer.py +13 -23
  1021. transformers/models/pop2piano/configuration_pop2piano.py +8 -7
  1022. transformers/models/pop2piano/feature_extraction_pop2piano.py +9 -6
  1023. transformers/models/pop2piano/modeling_pop2piano.py +24 -26
  1024. transformers/models/pop2piano/processing_pop2piano.py +33 -25
  1025. transformers/models/pop2piano/tokenization_pop2piano.py +23 -15
  1026. transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py +3 -3
  1027. transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything.py +28 -28
  1028. transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything_fast.py +21 -20
  1029. transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py +13 -16
  1030. transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py +13 -16
  1031. transformers/models/prophetnet/configuration_prophetnet.py +38 -37
  1032. transformers/models/prophetnet/modeling_prophetnet.py +131 -114
  1033. transformers/models/prophetnet/tokenization_prophetnet.py +16 -14
  1034. transformers/models/pvt/configuration_pvt.py +1 -0
  1035. transformers/models/pvt/image_processing_pvt.py +27 -24
  1036. transformers/models/pvt/image_processing_pvt_fast.py +2 -1
  1037. transformers/models/pvt/modeling_pvt.py +21 -21
  1038. transformers/models/pvt_v2/configuration_pvt_v2.py +4 -2
  1039. transformers/models/pvt_v2/modeling_pvt_v2.py +25 -28
  1040. transformers/models/qwen2/configuration_qwen2.py +25 -32
  1041. transformers/models/qwen2/modeling_qwen2.py +38 -36
  1042. transformers/models/qwen2/modular_qwen2.py +12 -11
  1043. transformers/models/qwen2/tokenization_qwen2.py +23 -12
  1044. transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py +26 -32
  1045. transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py +277 -340
  1046. transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py +211 -278
  1047. transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py +49 -41
  1048. transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +35 -29
  1049. transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +148 -203
  1050. transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py +118 -93
  1051. transformers/models/qwen2_5_vl/processing_qwen2_5_vl.py +43 -7
  1052. transformers/models/qwen2_audio/configuration_qwen2_audio.py +1 -0
  1053. transformers/models/qwen2_audio/modeling_qwen2_audio.py +40 -40
  1054. transformers/models/qwen2_audio/processing_qwen2_audio.py +42 -13
  1055. transformers/models/qwen2_moe/configuration_qwen2_moe.py +35 -42
  1056. transformers/models/qwen2_moe/modeling_qwen2_moe.py +46 -51
  1057. transformers/models/qwen2_moe/modular_qwen2_moe.py +10 -7
  1058. transformers/models/qwen2_vl/configuration_qwen2_vl.py +34 -29
  1059. transformers/models/qwen2_vl/image_processing_qwen2_vl.py +42 -41
  1060. transformers/models/qwen2_vl/image_processing_qwen2_vl_fast.py +15 -12
  1061. transformers/models/qwen2_vl/modeling_qwen2_vl.py +153 -199
  1062. transformers/models/qwen2_vl/processing_qwen2_vl.py +44 -7
  1063. transformers/models/qwen2_vl/video_processing_qwen2_vl.py +18 -38
  1064. transformers/models/qwen3/configuration_qwen3.py +27 -34
  1065. transformers/models/qwen3/modeling_qwen3.py +39 -36
  1066. transformers/models/qwen3/modular_qwen3.py +6 -4
  1067. transformers/models/qwen3_moe/configuration_qwen3_moe.py +32 -39
  1068. transformers/models/qwen3_moe/modeling_qwen3_moe.py +46 -51
  1069. transformers/models/qwen3_moe/modular_qwen3_moe.py +13 -10
  1070. transformers/models/qwen3_next/configuration_qwen3_next.py +35 -45
  1071. transformers/models/qwen3_next/modeling_qwen3_next.py +51 -47
  1072. transformers/models/qwen3_next/modular_qwen3_next.py +35 -34
  1073. transformers/models/qwen3_omni_moe/configuration_qwen3_omni_moe.py +101 -135
  1074. transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py +252 -355
  1075. transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py +196 -250
  1076. transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py +48 -40
  1077. transformers/models/qwen3_vl/configuration_qwen3_vl.py +29 -27
  1078. transformers/models/qwen3_vl/modeling_qwen3_vl.py +155 -233
  1079. transformers/models/qwen3_vl/modular_qwen3_vl.py +179 -206
  1080. transformers/models/qwen3_vl/processing_qwen3_vl.py +42 -6
  1081. transformers/models/qwen3_vl/video_processing_qwen3_vl.py +12 -10
  1082. transformers/models/qwen3_vl_moe/configuration_qwen3_vl_moe.py +30 -23
  1083. transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py +303 -358
  1084. transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py +124 -87
  1085. transformers/models/rag/configuration_rag.py +15 -6
  1086. transformers/models/rag/modeling_rag.py +130 -127
  1087. transformers/models/rag/retrieval_rag.py +5 -3
  1088. transformers/models/rag/tokenization_rag.py +50 -0
  1089. transformers/models/recurrent_gemma/configuration_recurrent_gemma.py +30 -29
  1090. transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +42 -53
  1091. transformers/models/reformer/configuration_reformer.py +8 -7
  1092. transformers/models/reformer/modeling_reformer.py +69 -80
  1093. transformers/models/reformer/tokenization_reformer.py +31 -11
  1094. transformers/models/regnet/configuration_regnet.py +1 -0
  1095. transformers/models/regnet/modeling_regnet.py +8 -15
  1096. transformers/models/rembert/configuration_rembert.py +2 -8
  1097. transformers/models/rembert/modeling_rembert.py +111 -121
  1098. transformers/models/rembert/tokenization_rembert.py +12 -2
  1099. transformers/models/resnet/configuration_resnet.py +1 -0
  1100. transformers/models/resnet/modeling_resnet.py +13 -27
  1101. transformers/models/roberta/configuration_roberta.py +3 -11
  1102. transformers/models/roberta/modeling_roberta.py +93 -94
  1103. transformers/models/roberta/modular_roberta.py +58 -58
  1104. transformers/models/roberta/tokenization_roberta.py +29 -17
  1105. transformers/models/roberta/tokenization_roberta_old.py +4 -2
  1106. transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py +3 -11
  1107. transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +93 -94
  1108. transformers/models/roc_bert/configuration_roc_bert.py +2 -8
  1109. transformers/models/roc_bert/modeling_roc_bert.py +121 -122
  1110. transformers/models/roc_bert/tokenization_roc_bert.py +94 -88
  1111. transformers/models/roformer/configuration_roformer.py +3 -13
  1112. transformers/models/roformer/modeling_roformer.py +81 -85
  1113. transformers/models/roformer/tokenization_roformer.py +412 -74
  1114. transformers/models/roformer/tokenization_roformer_fast.py +160 -0
  1115. transformers/models/roformer/tokenization_utils.py +1 -0
  1116. transformers/models/rt_detr/configuration_rt_detr.py +2 -1
  1117. transformers/models/rt_detr/configuration_rt_detr_resnet.py +1 -0
  1118. transformers/models/rt_detr/image_processing_rt_detr.py +55 -54
  1119. transformers/models/rt_detr/image_processing_rt_detr_fast.py +26 -26
  1120. transformers/models/rt_detr/modeling_rt_detr.py +90 -99
  1121. transformers/models/rt_detr/modeling_rt_detr_resnet.py +6 -13
  1122. transformers/models/rt_detr/modular_rt_detr.py +16 -16
  1123. transformers/models/rt_detr_v2/configuration_rt_detr_v2.py +4 -6
  1124. transformers/models/rt_detr_v2/modeling_rt_detr_v2.py +90 -101
  1125. transformers/models/rt_detr_v2/modular_rt_detr_v2.py +12 -19
  1126. transformers/models/rwkv/configuration_rwkv.py +4 -2
  1127. transformers/models/rwkv/modeling_rwkv.py +32 -31
  1128. transformers/models/sam/configuration_sam.py +1 -3
  1129. transformers/models/sam/image_processing_sam.py +60 -59
  1130. transformers/models/sam/image_processing_sam_fast.py +27 -25
  1131. transformers/models/sam/modeling_sam.py +41 -47
  1132. transformers/models/sam/processing_sam.py +27 -39
  1133. transformers/models/sam2/configuration_sam2.py +3 -2
  1134. transformers/models/sam2/image_processing_sam2_fast.py +15 -14
  1135. transformers/models/sam2/modeling_sam2.py +90 -96
  1136. transformers/models/sam2/modular_sam2.py +91 -86
  1137. transformers/models/sam2/processing_sam2.py +47 -31
  1138. transformers/models/sam2_video/configuration_sam2_video.py +1 -0
  1139. transformers/models/sam2_video/modeling_sam2_video.py +144 -151
  1140. transformers/models/sam2_video/modular_sam2_video.py +104 -101
  1141. transformers/models/sam2_video/processing_sam2_video.py +66 -49
  1142. transformers/models/sam2_video/video_processing_sam2_video.py +4 -1
  1143. transformers/models/sam3/configuration_sam3.py +2 -21
  1144. transformers/models/sam3/image_processing_sam3_fast.py +20 -17
  1145. transformers/models/sam3/modeling_sam3.py +170 -184
  1146. transformers/models/sam3/modular_sam3.py +8 -3
  1147. transformers/models/sam3/processing_sam3.py +52 -37
  1148. transformers/models/sam3_tracker/__init__.py +1 -0
  1149. transformers/models/sam3_tracker/configuration_sam3_tracker.py +3 -1
  1150. transformers/models/sam3_tracker/modeling_sam3_tracker.py +77 -82
  1151. transformers/models/sam3_tracker/modular_sam3_tracker.py +3 -8
  1152. transformers/models/sam3_tracker/processing_sam3_tracker.py +48 -31
  1153. transformers/models/sam3_tracker_video/__init__.py +1 -0
  1154. transformers/models/sam3_tracker_video/configuration_sam3_tracker_video.py +1 -25
  1155. transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py +122 -135
  1156. transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py +26 -35
  1157. transformers/models/sam3_tracker_video/processing_sam3_tracker_video.py +66 -50
  1158. transformers/models/sam3_video/configuration_sam3_video.py +1 -14
  1159. transformers/models/sam3_video/modeling_sam3_video.py +34 -33
  1160. transformers/models/sam3_video/processing_sam3_video.py +46 -26
  1161. transformers/models/sam_hq/__init__.py +1 -1
  1162. transformers/models/sam_hq/configuration_sam_hq.py +1 -3
  1163. transformers/models/sam_hq/modeling_sam_hq.py +69 -74
  1164. transformers/models/sam_hq/modular_sam_hq.py +25 -23
  1165. transformers/models/sam_hq/{processing_sam_hq.py → processing_samhq.py} +29 -41
  1166. transformers/models/seamless_m4t/configuration_seamless_m4t.py +10 -8
  1167. transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py +11 -8
  1168. transformers/models/seamless_m4t/modeling_seamless_m4t.py +194 -212
  1169. transformers/models/seamless_m4t/processing_seamless_m4t.py +39 -18
  1170. transformers/models/seamless_m4t/tokenization_seamless_m4t.py +77 -40
  1171. transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py +10 -8
  1172. transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +196 -204
  1173. transformers/models/seed_oss/configuration_seed_oss.py +32 -28
  1174. transformers/models/seed_oss/modeling_seed_oss.py +35 -33
  1175. transformers/models/seed_oss/modular_seed_oss.py +4 -3
  1176. transformers/models/segformer/configuration_segformer.py +10 -0
  1177. transformers/models/segformer/image_processing_segformer.py +42 -39
  1178. transformers/models/segformer/image_processing_segformer_fast.py +12 -10
  1179. transformers/models/segformer/modeling_segformer.py +31 -34
  1180. transformers/models/segformer/modular_segformer.py +10 -8
  1181. transformers/models/seggpt/configuration_seggpt.py +1 -0
  1182. transformers/models/seggpt/image_processing_seggpt.py +41 -38
  1183. transformers/models/seggpt/modeling_seggpt.py +38 -50
  1184. transformers/models/sew/configuration_sew.py +2 -4
  1185. transformers/models/sew/modeling_sew.py +36 -38
  1186. transformers/models/sew/modular_sew.py +13 -13
  1187. transformers/models/sew_d/configuration_sew_d.py +2 -4
  1188. transformers/models/sew_d/modeling_sew_d.py +30 -31
  1189. transformers/models/shieldgemma2/configuration_shieldgemma2.py +1 -0
  1190. transformers/models/shieldgemma2/modeling_shieldgemma2.py +17 -16
  1191. transformers/models/shieldgemma2/processing_shieldgemma2.py +5 -3
  1192. transformers/models/siglip/configuration_siglip.py +2 -4
  1193. transformers/models/siglip/image_processing_siglip.py +20 -17
  1194. transformers/models/siglip/image_processing_siglip_fast.py +1 -0
  1195. transformers/models/siglip/modeling_siglip.py +75 -84
  1196. transformers/models/siglip/processing_siglip.py +14 -2
  1197. transformers/models/siglip/tokenization_siglip.py +7 -6
  1198. transformers/models/siglip2/configuration_siglip2.py +2 -5
  1199. transformers/models/siglip2/image_processing_siglip2.py +16 -15
  1200. transformers/models/siglip2/image_processing_siglip2_fast.py +7 -6
  1201. transformers/models/siglip2/modeling_siglip2.py +129 -143
  1202. transformers/models/siglip2/modular_siglip2.py +46 -47
  1203. transformers/models/siglip2/processing_siglip2.py +14 -2
  1204. transformers/models/smollm3/configuration_smollm3.py +32 -29
  1205. transformers/models/smollm3/modeling_smollm3.py +39 -36
  1206. transformers/models/smollm3/modular_smollm3.py +35 -33
  1207. transformers/models/smolvlm/configuration_smolvlm.py +4 -2
  1208. transformers/models/smolvlm/image_processing_smolvlm.py +43 -42
  1209. transformers/models/smolvlm/image_processing_smolvlm_fast.py +15 -41
  1210. transformers/models/smolvlm/modeling_smolvlm.py +94 -126
  1211. transformers/models/smolvlm/modular_smolvlm.py +39 -50
  1212. transformers/models/smolvlm/processing_smolvlm.py +83 -15
  1213. transformers/models/smolvlm/video_processing_smolvlm.py +18 -16
  1214. transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py +1 -0
  1215. transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py +27 -26
  1216. transformers/models/speech_to_text/configuration_speech_to_text.py +9 -9
  1217. transformers/models/speech_to_text/feature_extraction_speech_to_text.py +13 -10
  1218. transformers/models/speech_to_text/modeling_speech_to_text.py +54 -66
  1219. transformers/models/speech_to_text/processing_speech_to_text.py +30 -4
  1220. transformers/models/speech_to_text/tokenization_speech_to_text.py +6 -5
  1221. transformers/models/speecht5/configuration_speecht5.py +9 -7
  1222. transformers/models/speecht5/feature_extraction_speecht5.py +37 -16
  1223. transformers/models/speecht5/modeling_speecht5.py +175 -213
  1224. transformers/models/speecht5/number_normalizer.py +1 -0
  1225. transformers/models/speecht5/processing_speecht5.py +37 -3
  1226. transformers/models/speecht5/tokenization_speecht5.py +5 -4
  1227. transformers/models/splinter/configuration_splinter.py +7 -6
  1228. transformers/models/splinter/modeling_splinter.py +59 -71
  1229. transformers/models/splinter/tokenization_splinter.py +30 -9
  1230. transformers/models/squeezebert/configuration_squeezebert.py +2 -14
  1231. transformers/models/squeezebert/modeling_squeezebert.py +62 -68
  1232. transformers/models/squeezebert/tokenization_squeezebert.py +1 -0
  1233. transformers/models/stablelm/configuration_stablelm.py +29 -24
  1234. transformers/models/stablelm/modeling_stablelm.py +45 -44
  1235. transformers/models/starcoder2/configuration_starcoder2.py +27 -30
  1236. transformers/models/starcoder2/modeling_starcoder2.py +41 -39
  1237. transformers/models/starcoder2/modular_starcoder2.py +16 -14
  1238. transformers/models/superglue/configuration_superglue.py +3 -7
  1239. transformers/models/superglue/image_processing_superglue.py +15 -15
  1240. transformers/models/superglue/image_processing_superglue_fast.py +10 -9
  1241. transformers/models/superglue/modeling_superglue.py +37 -42
  1242. transformers/models/superpoint/image_processing_superpoint.py +15 -15
  1243. transformers/models/superpoint/image_processing_superpoint_fast.py +11 -8
  1244. transformers/models/superpoint/modeling_superpoint.py +16 -18
  1245. transformers/models/swiftformer/configuration_swiftformer.py +1 -0
  1246. transformers/models/swiftformer/modeling_swiftformer.py +14 -18
  1247. transformers/models/swin/configuration_swin.py +1 -0
  1248. transformers/models/swin/modeling_swin.py +86 -86
  1249. transformers/models/swin2sr/configuration_swin2sr.py +1 -0
  1250. transformers/models/swin2sr/image_processing_swin2sr.py +13 -10
  1251. transformers/models/swin2sr/image_processing_swin2sr_fast.py +8 -4
  1252. transformers/models/swin2sr/modeling_swin2sr.py +63 -81
  1253. transformers/models/swinv2/configuration_swinv2.py +1 -0
  1254. transformers/models/swinv2/modeling_swinv2.py +104 -108
  1255. transformers/models/switch_transformers/configuration_switch_transformers.py +7 -11
  1256. transformers/models/switch_transformers/modeling_switch_transformers.py +44 -37
  1257. transformers/models/switch_transformers/modular_switch_transformers.py +41 -34
  1258. transformers/models/t5/configuration_t5.py +8 -14
  1259. transformers/models/t5/modeling_t5.py +92 -88
  1260. transformers/models/t5/tokenization_t5.py +9 -3
  1261. transformers/models/t5gemma/configuration_t5gemma.py +41 -43
  1262. transformers/models/t5gemma/modeling_t5gemma.py +107 -104
  1263. transformers/models/t5gemma/modular_t5gemma.py +120 -124
  1264. transformers/models/t5gemma2/configuration_t5gemma2.py +120 -80
  1265. transformers/models/t5gemma2/modeling_t5gemma2.py +125 -141
  1266. transformers/models/t5gemma2/modular_t5gemma2.py +104 -393
  1267. transformers/models/table_transformer/configuration_table_transformer.py +2 -1
  1268. transformers/models/table_transformer/modeling_table_transformer.py +49 -51
  1269. transformers/models/tapas/configuration_tapas.py +2 -12
  1270. transformers/models/tapas/modeling_tapas.py +67 -68
  1271. transformers/models/tapas/tokenization_tapas.py +153 -115
  1272. transformers/models/textnet/configuration_textnet.py +1 -0
  1273. transformers/models/textnet/image_processing_textnet.py +25 -22
  1274. transformers/models/textnet/image_processing_textnet_fast.py +10 -8
  1275. transformers/models/textnet/modeling_textnet.py +16 -28
  1276. transformers/models/time_series_transformer/configuration_time_series_transformer.py +8 -5
  1277. transformers/models/time_series_transformer/modeling_time_series_transformer.py +81 -83
  1278. transformers/models/timesfm/configuration_timesfm.py +1 -0
  1279. transformers/models/timesfm/modeling_timesfm.py +22 -33
  1280. transformers/models/timesfm/modular_timesfm.py +21 -32
  1281. transformers/models/timesformer/configuration_timesformer.py +1 -0
  1282. transformers/models/timesformer/modeling_timesformer.py +16 -15
  1283. transformers/models/timm_backbone/configuration_timm_backbone.py +1 -0
  1284. transformers/models/timm_backbone/modeling_timm_backbone.py +15 -17
  1285. transformers/models/timm_wrapper/configuration_timm_wrapper.py +3 -5
  1286. transformers/models/timm_wrapper/image_processing_timm_wrapper.py +5 -4
  1287. transformers/models/timm_wrapper/modeling_timm_wrapper.py +29 -34
  1288. transformers/models/trocr/configuration_trocr.py +8 -11
  1289. transformers/models/trocr/modeling_trocr.py +44 -45
  1290. transformers/models/trocr/processing_trocr.py +25 -5
  1291. transformers/models/tvp/configuration_tvp.py +2 -5
  1292. transformers/models/tvp/image_processing_tvp.py +52 -50
  1293. transformers/models/tvp/image_processing_tvp_fast.py +15 -15
  1294. transformers/models/tvp/modeling_tvp.py +27 -27
  1295. transformers/models/tvp/processing_tvp.py +14 -2
  1296. transformers/models/udop/configuration_udop.py +7 -16
  1297. transformers/models/udop/modeling_udop.py +73 -71
  1298. transformers/models/udop/processing_udop.py +26 -7
  1299. transformers/models/udop/tokenization_udop.py +105 -84
  1300. transformers/models/umt5/configuration_umt5.py +7 -8
  1301. transformers/models/umt5/modeling_umt5.py +90 -94
  1302. transformers/models/unispeech/configuration_unispeech.py +2 -4
  1303. transformers/models/unispeech/modeling_unispeech.py +49 -51
  1304. transformers/models/unispeech/modular_unispeech.py +22 -22
  1305. transformers/models/unispeech_sat/configuration_unispeech_sat.py +2 -4
  1306. transformers/models/unispeech_sat/modeling_unispeech_sat.py +65 -69
  1307. transformers/models/unispeech_sat/modular_unispeech_sat.py +23 -23
  1308. transformers/models/univnet/feature_extraction_univnet.py +14 -14
  1309. transformers/models/univnet/modeling_univnet.py +8 -8
  1310. transformers/models/upernet/configuration_upernet.py +1 -0
  1311. transformers/models/upernet/modeling_upernet.py +13 -11
  1312. transformers/models/vaultgemma/__init__.py +1 -0
  1313. transformers/models/vaultgemma/configuration_vaultgemma.py +33 -29
  1314. transformers/models/vaultgemma/modeling_vaultgemma.py +41 -39
  1315. transformers/models/vaultgemma/modular_vaultgemma.py +31 -29
  1316. transformers/models/video_llama_3/configuration_video_llama_3.py +0 -4
  1317. transformers/models/video_llama_3/image_processing_video_llama_3.py +42 -43
  1318. transformers/models/video_llama_3/image_processing_video_llama_3_fast.py +14 -12
  1319. transformers/models/video_llama_3/modeling_video_llama_3.py +109 -157
  1320. transformers/models/video_llama_3/modular_video_llama_3.py +146 -155
  1321. transformers/models/video_llama_3/processing_video_llama_3.py +39 -5
  1322. transformers/models/video_llama_3/video_processing_video_llama_3.py +23 -42
  1323. transformers/models/video_llava/configuration_video_llava.py +1 -4
  1324. transformers/models/video_llava/image_processing_video_llava.py +38 -35
  1325. transformers/models/video_llava/modeling_video_llava.py +146 -146
  1326. transformers/models/video_llava/processing_video_llava.py +78 -38
  1327. transformers/models/video_llava/video_processing_video_llava.py +1 -0
  1328. transformers/models/videomae/configuration_videomae.py +1 -0
  1329. transformers/models/videomae/image_processing_videomae.py +34 -31
  1330. transformers/models/videomae/modeling_videomae.py +17 -14
  1331. transformers/models/videomae/video_processing_videomae.py +1 -0
  1332. transformers/models/vilt/configuration_vilt.py +4 -6
  1333. transformers/models/vilt/image_processing_vilt.py +30 -29
  1334. transformers/models/vilt/image_processing_vilt_fast.py +16 -15
  1335. transformers/models/vilt/modeling_vilt.py +90 -116
  1336. transformers/models/vilt/processing_vilt.py +14 -2
  1337. transformers/models/vipllava/configuration_vipllava.py +1 -4
  1338. transformers/models/vipllava/modeling_vipllava.py +70 -99
  1339. transformers/models/vipllava/modular_vipllava.py +54 -78
  1340. transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py +1 -0
  1341. transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +27 -28
  1342. transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py +1 -0
  1343. transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +41 -46
  1344. transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py +16 -2
  1345. transformers/models/visual_bert/configuration_visual_bert.py +2 -6
  1346. transformers/models/visual_bert/modeling_visual_bert.py +92 -98
  1347. transformers/models/vit/configuration_vit.py +1 -0
  1348. transformers/models/vit/image_processing_vit.py +22 -19
  1349. transformers/models/vit/image_processing_vit_fast.py +1 -0
  1350. transformers/models/vit/modeling_vit.py +17 -17
  1351. transformers/models/vit_mae/configuration_vit_mae.py +1 -0
  1352. transformers/models/vit_mae/modeling_vit_mae.py +27 -29
  1353. transformers/models/vit_msn/configuration_vit_msn.py +1 -0
  1354. transformers/models/vit_msn/modeling_vit_msn.py +16 -18
  1355. transformers/models/vitdet/configuration_vitdet.py +1 -0
  1356. transformers/models/vitdet/modeling_vitdet.py +14 -14
  1357. transformers/models/vitmatte/configuration_vitmatte.py +5 -2
  1358. transformers/models/vitmatte/image_processing_vitmatte.py +18 -15
  1359. transformers/models/vitmatte/image_processing_vitmatte_fast.py +18 -16
  1360. transformers/models/vitmatte/modeling_vitmatte.py +11 -14
  1361. transformers/models/vitpose/configuration_vitpose.py +7 -4
  1362. transformers/models/vitpose/image_processing_vitpose.py +25 -24
  1363. transformers/models/vitpose/image_processing_vitpose_fast.py +11 -9
  1364. transformers/models/vitpose/modeling_vitpose.py +14 -14
  1365. transformers/models/vitpose_backbone/configuration_vitpose_backbone.py +1 -0
  1366. transformers/models/vitpose_backbone/modeling_vitpose_backbone.py +10 -8
  1367. transformers/models/vits/configuration_vits.py +1 -4
  1368. transformers/models/vits/modeling_vits.py +42 -44
  1369. transformers/models/vits/tokenization_vits.py +4 -3
  1370. transformers/models/vivit/configuration_vivit.py +1 -0
  1371. transformers/models/vivit/image_processing_vivit.py +39 -36
  1372. transformers/models/vivit/modeling_vivit.py +8 -6
  1373. transformers/models/vjepa2/__init__.py +1 -0
  1374. transformers/models/vjepa2/configuration_vjepa2.py +1 -0
  1375. transformers/models/vjepa2/modeling_vjepa2.py +32 -31
  1376. transformers/models/vjepa2/video_processing_vjepa2.py +1 -0
  1377. transformers/models/voxtral/__init__.py +1 -0
  1378. transformers/models/voxtral/configuration_voxtral.py +2 -0
  1379. transformers/models/voxtral/modeling_voxtral.py +47 -40
  1380. transformers/models/voxtral/modular_voxtral.py +40 -37
  1381. transformers/models/voxtral/processing_voxtral.py +48 -25
  1382. transformers/models/wav2vec2/configuration_wav2vec2.py +2 -4
  1383. transformers/models/wav2vec2/feature_extraction_wav2vec2.py +10 -7
  1384. transformers/models/wav2vec2/modeling_wav2vec2.py +121 -73
  1385. transformers/models/wav2vec2/processing_wav2vec2.py +35 -6
  1386. transformers/models/wav2vec2/tokenization_wav2vec2.py +332 -20
  1387. transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py +2 -4
  1388. transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py +62 -70
  1389. transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py +48 -57
  1390. transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py +35 -6
  1391. transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py +2 -4
  1392. transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +77 -90
  1393. transformers/models/wav2vec2_conformer/modular_wav2vec2_conformer.py +30 -37
  1394. transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py +17 -16
  1395. transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py +55 -36
  1396. transformers/models/wavlm/configuration_wavlm.py +2 -4
  1397. transformers/models/wavlm/modeling_wavlm.py +48 -50
  1398. transformers/models/wavlm/modular_wavlm.py +5 -4
  1399. transformers/models/whisper/configuration_whisper.py +5 -6
  1400. transformers/models/whisper/english_normalizer.py +4 -3
  1401. transformers/models/whisper/feature_extraction_whisper.py +24 -9
  1402. transformers/models/whisper/generation_whisper.py +48 -26
  1403. transformers/models/whisper/modeling_whisper.py +73 -79
  1404. transformers/models/whisper/processing_whisper.py +20 -3
  1405. transformers/models/whisper/tokenization_whisper.py +43 -11
  1406. transformers/models/x_clip/configuration_x_clip.py +2 -4
  1407. transformers/models/x_clip/modeling_x_clip.py +93 -96
  1408. transformers/models/x_clip/processing_x_clip.py +14 -2
  1409. transformers/models/xcodec/configuration_xcodec.py +6 -4
  1410. transformers/models/xcodec/modeling_xcodec.py +17 -20
  1411. transformers/models/xglm/configuration_xglm.py +8 -9
  1412. transformers/models/xglm/modeling_xglm.py +55 -60
  1413. transformers/models/xglm/tokenization_xglm.py +11 -3
  1414. transformers/models/xlm/configuration_xlm.py +8 -10
  1415. transformers/models/xlm/modeling_xlm.py +144 -144
  1416. transformers/models/xlm/tokenization_xlm.py +5 -3
  1417. transformers/models/xlm_roberta/configuration_xlm_roberta.py +3 -11
  1418. transformers/models/xlm_roberta/modeling_xlm_roberta.py +194 -195
  1419. transformers/models/xlm_roberta/modular_xlm_roberta.py +53 -50
  1420. transformers/models/xlm_roberta/tokenization_xlm_roberta.py +18 -8
  1421. transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py +2 -10
  1422. transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +93 -94
  1423. transformers/models/xlm_roberta_xl/modular_xlm_roberta_xl.py +70 -67
  1424. transformers/models/xlnet/configuration_xlnet.py +12 -3
  1425. transformers/models/xlnet/modeling_xlnet.py +163 -152
  1426. transformers/models/xlnet/tokenization_xlnet.py +9 -2
  1427. transformers/models/xlstm/configuration_xlstm.py +12 -8
  1428. transformers/models/xlstm/modeling_xlstm.py +65 -62
  1429. transformers/models/xmod/configuration_xmod.py +3 -11
  1430. transformers/models/xmod/modeling_xmod.py +110 -108
  1431. transformers/models/yolos/configuration_yolos.py +1 -0
  1432. transformers/models/yolos/image_processing_yolos.py +62 -60
  1433. transformers/models/yolos/image_processing_yolos_fast.py +45 -42
  1434. transformers/models/yolos/modeling_yolos.py +16 -16
  1435. transformers/models/yolos/modular_yolos.py +19 -17
  1436. transformers/models/yoso/configuration_yoso.py +2 -8
  1437. transformers/models/yoso/modeling_yoso.py +63 -70
  1438. transformers/models/zamba/configuration_zamba.py +8 -5
  1439. transformers/models/zamba/modeling_zamba.py +78 -81
  1440. transformers/models/zamba2/configuration_zamba2.py +50 -44
  1441. transformers/models/zamba2/modeling_zamba2.py +97 -97
  1442. transformers/models/zamba2/modular_zamba2.py +48 -46
  1443. transformers/models/zoedepth/configuration_zoedepth.py +2 -1
  1444. transformers/models/zoedepth/image_processing_zoedepth.py +29 -28
  1445. transformers/models/zoedepth/image_processing_zoedepth_fast.py +24 -21
  1446. transformers/models/zoedepth/modeling_zoedepth.py +18 -26
  1447. transformers/pipelines/__init__.py +114 -57
  1448. transformers/pipelines/any_to_any.py +22 -14
  1449. transformers/pipelines/audio_utils.py +2 -1
  1450. transformers/pipelines/automatic_speech_recognition.py +12 -20
  1451. transformers/pipelines/base.py +27 -15
  1452. transformers/{models/pe_audio/processing_pe_audio.py → pipelines/deprecated/__init__.py} +3 -10
  1453. transformers/pipelines/deprecated/text2text_generation.py +408 -0
  1454. transformers/pipelines/document_question_answering.py +2 -4
  1455. transformers/pipelines/image_text_to_text.py +1 -0
  1456. transformers/pipelines/image_to_text.py +229 -0
  1457. transformers/pipelines/question_answering.py +44 -5
  1458. transformers/pipelines/text_classification.py +14 -1
  1459. transformers/pipelines/text_generation.py +1 -1
  1460. transformers/pipelines/text_to_audio.py +2 -2
  1461. transformers/pipelines/token_classification.py +22 -1
  1462. transformers/pipelines/video_classification.py +9 -1
  1463. transformers/pipelines/zero_shot_audio_classification.py +1 -0
  1464. transformers/pipelines/zero_shot_classification.py +6 -0
  1465. transformers/pipelines/zero_shot_image_classification.py +7 -0
  1466. transformers/processing_utils.py +145 -230
  1467. transformers/quantizers/auto.py +4 -2
  1468. transformers/quantizers/base.py +173 -53
  1469. transformers/quantizers/quantizer_aqlm.py +23 -2
  1470. transformers/quantizers/quantizer_auto_round.py +12 -2
  1471. transformers/quantizers/quantizer_awq.py +89 -20
  1472. transformers/quantizers/quantizer_bitnet.py +14 -4
  1473. transformers/quantizers/quantizer_bnb_4bit.py +155 -18
  1474. transformers/quantizers/quantizer_bnb_8bit.py +110 -24
  1475. transformers/quantizers/quantizer_compressed_tensors.py +9 -2
  1476. transformers/quantizers/quantizer_eetq.py +74 -16
  1477. transformers/quantizers/quantizer_fbgemm_fp8.py +138 -38
  1478. transformers/quantizers/quantizer_finegrained_fp8.py +113 -26
  1479. transformers/quantizers/quantizer_fp_quant.py +82 -52
  1480. transformers/quantizers/quantizer_gptq.py +28 -8
  1481. transformers/quantizers/quantizer_higgs.py +60 -42
  1482. transformers/quantizers/quantizer_hqq.py +153 -144
  1483. transformers/quantizers/quantizer_mxfp4.py +194 -14
  1484. transformers/quantizers/quantizer_quanto.py +79 -35
  1485. transformers/quantizers/quantizer_quark.py +18 -36
  1486. transformers/quantizers/quantizer_spqr.py +12 -4
  1487. transformers/quantizers/quantizer_torchao.py +325 -50
  1488. transformers/quantizers/quantizer_vptq.py +27 -4
  1489. transformers/quantizers/quantizers_utils.py +0 -20
  1490. transformers/safetensors_conversion.py +3 -9
  1491. transformers/testing_utils.py +82 -326
  1492. transformers/tokenization_mistral_common.py +903 -568
  1493. transformers/tokenization_utils_base.py +340 -220
  1494. transformers/tokenization_utils_sentencepiece.py +6 -5
  1495. transformers/tokenization_utils_tokenizers.py +113 -226
  1496. transformers/trainer.py +53 -60
  1497. transformers/trainer_callback.py +0 -8
  1498. transformers/trainer_seq2seq.py +1 -5
  1499. transformers/trainer_utils.py +1 -1
  1500. transformers/training_args.py +41 -77
  1501. transformers/utils/__init__.py +4 -8
  1502. transformers/utils/attention_visualizer.py +5 -5
  1503. transformers/utils/auto_docstring.py +37 -599
  1504. transformers/utils/doc.py +36 -4
  1505. transformers/utils/dummy_pt_objects.py +42 -0
  1506. transformers/utils/generic.py +28 -111
  1507. transformers/utils/hub.py +15 -5
  1508. transformers/utils/import_utils.py +32 -165
  1509. transformers/utils/kernel_config.py +19 -74
  1510. transformers/utils/loading_report.py +15 -25
  1511. transformers/utils/quantization_config.py +241 -72
  1512. transformers/video_processing_utils.py +39 -41
  1513. transformers/video_utils.py +22 -18
  1514. {transformers-5.0.0.dist-info → transformers-5.0.0rc0.dist-info}/METADATA +236 -284
  1515. transformers-5.0.0rc0.dist-info/RECORD +1987 -0
  1516. {transformers-5.0.0.dist-info → transformers-5.0.0rc0.dist-info}/WHEEL +1 -1
  1517. transformers/integrations/moe.py +0 -360
  1518. transformers/integrations/quark.py +0 -53
  1519. transformers/loss/loss_lw_detr.py +0 -356
  1520. transformers/models/ernie4_5_vl_moe/__init__.py +0 -31
  1521. transformers/models/ernie4_5_vl_moe/configuration_ernie4_5_vl_moe.py +0 -340
  1522. transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe.py +0 -455
  1523. transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe_fast.py +0 -231
  1524. transformers/models/ernie4_5_vl_moe/modeling_ernie4_5_vl_moe.py +0 -1936
  1525. transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py +0 -1925
  1526. transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py +0 -249
  1527. transformers/models/ernie4_5_vl_moe/video_processing_ernie4_5_vl_moe.py +0 -593
  1528. transformers/models/fast_vlm/__init__.py +0 -27
  1529. transformers/models/fast_vlm/configuration_fast_vlm.py +0 -137
  1530. transformers/models/fast_vlm/modeling_fast_vlm.py +0 -432
  1531. transformers/models/fast_vlm/modular_fast_vlm.py +0 -373
  1532. transformers/models/glm4_moe_lite/__init__.py +0 -28
  1533. transformers/models/glm4_moe_lite/configuration_glm4_moe_lite.py +0 -233
  1534. transformers/models/glm4_moe_lite/modeling_glm4_moe_lite.py +0 -740
  1535. transformers/models/glm4_moe_lite/modular_glm4_moe_lite.py +0 -302
  1536. transformers/models/glm_image/__init__.py +0 -31
  1537. transformers/models/glm_image/configuration_glm_image.py +0 -351
  1538. transformers/models/glm_image/image_processing_glm_image.py +0 -503
  1539. transformers/models/glm_image/image_processing_glm_image_fast.py +0 -294
  1540. transformers/models/glm_image/modeling_glm_image.py +0 -1642
  1541. transformers/models/glm_image/modular_glm_image.py +0 -1531
  1542. transformers/models/glm_image/processing_glm_image.py +0 -217
  1543. transformers/models/glmasr/__init__.py +0 -29
  1544. transformers/models/glmasr/configuration_glmasr.py +0 -196
  1545. transformers/models/glmasr/modeling_glmasr.py +0 -517
  1546. transformers/models/glmasr/modular_glmasr.py +0 -443
  1547. transformers/models/glmasr/processing_glmasr.py +0 -331
  1548. transformers/models/jais2/__init__.py +0 -27
  1549. transformers/models/jais2/configuration_jais2.py +0 -148
  1550. transformers/models/jais2/modeling_jais2.py +0 -484
  1551. transformers/models/jais2/modular_jais2.py +0 -194
  1552. transformers/models/lasr/__init__.py +0 -29
  1553. transformers/models/lasr/configuration_lasr.py +0 -244
  1554. transformers/models/lasr/feature_extraction_lasr.py +0 -275
  1555. transformers/models/lasr/modeling_lasr.py +0 -727
  1556. transformers/models/lasr/modular_lasr.py +0 -574
  1557. transformers/models/lasr/processing_lasr.py +0 -100
  1558. transformers/models/lasr/tokenization_lasr.py +0 -184
  1559. transformers/models/lighton_ocr/__init__.py +0 -28
  1560. transformers/models/lighton_ocr/configuration_lighton_ocr.py +0 -128
  1561. transformers/models/lighton_ocr/modeling_lighton_ocr.py +0 -463
  1562. transformers/models/lighton_ocr/modular_lighton_ocr.py +0 -404
  1563. transformers/models/lighton_ocr/processing_lighton_ocr.py +0 -229
  1564. transformers/models/lw_detr/__init__.py +0 -27
  1565. transformers/models/lw_detr/configuration_lw_detr.py +0 -374
  1566. transformers/models/lw_detr/modeling_lw_detr.py +0 -1702
  1567. transformers/models/lw_detr/modular_lw_detr.py +0 -1615
  1568. transformers/models/minimax_m2/__init__.py +0 -28
  1569. transformers/models/minimax_m2/configuration_minimax_m2.py +0 -188
  1570. transformers/models/minimax_m2/modeling_minimax_m2.py +0 -704
  1571. transformers/models/minimax_m2/modular_minimax_m2.py +0 -346
  1572. transformers/models/paddleocr_vl/__init__.py +0 -31
  1573. transformers/models/paddleocr_vl/configuration_paddleocr_vl.py +0 -335
  1574. transformers/models/paddleocr_vl/image_processing_paddleocr_vl.py +0 -503
  1575. transformers/models/paddleocr_vl/image_processing_paddleocr_vl_fast.py +0 -209
  1576. transformers/models/paddleocr_vl/modeling_paddleocr_vl.py +0 -1683
  1577. transformers/models/paddleocr_vl/modular_paddleocr_vl.py +0 -1380
  1578. transformers/models/paddleocr_vl/processing_paddleocr_vl.py +0 -133
  1579. transformers/models/pe_audio/__init__.py +0 -29
  1580. transformers/models/pe_audio/configuration_pe_audio.py +0 -204
  1581. transformers/models/pe_audio/feature_extraction_pe_audio.py +0 -160
  1582. transformers/models/pe_audio/modeling_pe_audio.py +0 -819
  1583. transformers/models/pe_audio/modular_pe_audio.py +0 -298
  1584. transformers/models/pe_audio_video/__init__.py +0 -28
  1585. transformers/models/pe_audio_video/configuration_pe_audio_video.py +0 -223
  1586. transformers/models/pe_audio_video/modeling_pe_audio_video.py +0 -971
  1587. transformers/models/pe_audio_video/modular_pe_audio_video.py +0 -763
  1588. transformers/models/pe_video/__init__.py +0 -29
  1589. transformers/models/pe_video/configuration_pe_video.py +0 -209
  1590. transformers/models/pe_video/modeling_pe_video.py +0 -647
  1591. transformers/models/pe_video/modular_pe_video.py +0 -231
  1592. transformers/models/pe_video/processing_pe_video.py +0 -10
  1593. transformers/models/pe_video/video_processing_pe_video.py +0 -64
  1594. transformers/models/pixio/__init__.py +0 -29
  1595. transformers/models/pixio/configuration_pixio.py +0 -150
  1596. transformers/models/pixio/modeling_pixio.py +0 -507
  1597. transformers/models/pixio/modular_pixio.py +0 -403
  1598. transformers/models/solar_open/__init__.py +0 -27
  1599. transformers/models/solar_open/configuration_solar_open.py +0 -184
  1600. transformers/models/solar_open/modeling_solar_open.py +0 -642
  1601. transformers/models/solar_open/modular_solar_open.py +0 -224
  1602. transformers/trainer_jit_checkpoint.py +0 -125
  1603. transformers-5.0.0.dist-info/RECORD +0 -2068
  1604. {transformers-5.0.0.dist-info/licenses → transformers-5.0.0rc0.dist-info}/LICENSE +0 -0
  1605. {transformers-5.0.0.dist-info → transformers-5.0.0rc0.dist-info}/entry_points.txt +0 -0
  1606. {transformers-5.0.0.dist-info → transformers-5.0.0rc0.dist-info}/top_level.txt +0 -0
@@ -1,1925 +0,0 @@
1
- # Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """PyTorch Ernie4.5-VL model."""
15
-
16
- import itertools
17
- from collections.abc import Callable
18
- from typing import Optional
19
-
20
- import numpy as np
21
- import torch
22
- import torch.nn as nn
23
- import torch.nn.functional as F
24
-
25
- from ... import initialization as init
26
- from ...cache_utils import Cache, DynamicCache
27
- from ...configuration_utils import PreTrainedConfig, layer_type_validation
28
- from ...generation import GenerationMixin
29
- from ...image_processing_utils import BaseImageProcessor, BatchFeature
30
- from ...image_processing_utils_fast import (
31
- group_images_by_shape,
32
- reorder_images,
33
- )
34
- from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
35
- from ...image_utils import (
36
- OPENAI_CLIP_MEAN,
37
- OPENAI_CLIP_STD,
38
- ChannelDimension,
39
- ImageInput,
40
- PILImageResampling,
41
- SizeDict,
42
- get_image_size,
43
- infer_channel_dimension_format,
44
- is_scaled_image,
45
- make_list_of_images,
46
- to_numpy_array,
47
- )
48
- from ...masking_utils import create_causal_mask
49
- from ...modeling_flash_attention_utils import FlashAttentionKwargs
50
- from ...modeling_layers import GradientCheckpointingLayer
51
- from ...modeling_outputs import BaseModelOutputWithPooling, MoeCausalLMOutputWithPast, MoeModelOutputWithPast
52
- from ...modeling_rope_utils import dynamic_rope_update
53
- from ...modeling_utils import PreTrainedModel
54
- from ...processing_utils import Unpack
55
- from ...utils import (
56
- TensorType,
57
- TransformersKwargs,
58
- auto_docstring,
59
- can_return_tuple,
60
- is_torchdynamo_compiling,
61
- logging,
62
- )
63
- from ...utils.generic import OutputRecorder, check_model_inputs, maybe_autocast
64
- from ..ernie4_5_moe.configuration_ernie4_5_moe import Ernie4_5_MoeConfig
65
- from ..ernie4_5_moe.modeling_ernie4_5_moe import (
66
- Ernie4_5_MoeAttention,
67
- Ernie4_5_MoeExperts,
68
- Ernie4_5_MoeMLP,
69
- Ernie4_5_MoeModel,
70
- Ernie4_5_MoeRMSNorm,
71
- Ernie4_5_MoeStatics,
72
- Ernie4_5_MoeTopKRouter,
73
- )
74
- from ..glm4v.image_processing_glm4v import Glm4vImageProcessor, Glm4vImageProcessorKwargs
75
- from ..glm4v.image_processing_glm4v_fast import Glm4vImageProcessorFast
76
- from ..glm4v.modeling_glm4v import Glm4vForConditionalGeneration
77
- from ..mixtral.modeling_mixtral import load_balancing_loss_func
78
- from ..qwen2_5_vl.modeling_qwen2_5_vl import (
79
- Qwen2_5_VisionPatchEmbed,
80
- Qwen2_5_VisionRotaryEmbedding,
81
- Qwen2_5_VLModel,
82
- Qwen2_5_VLPreTrainedModel,
83
- Qwen2_5_VLVisionAttention,
84
- Qwen2_5_VLVisionBlock,
85
- )
86
- from ..qwen2_vl.configuration_qwen2_vl import Qwen2VLVisionConfig
87
- from ..qwen2_vl.image_processing_qwen2_vl import smart_resize
88
- from ..qwen2_vl.modeling_qwen2_vl import Qwen2VisionTransformerPretrainedModel, VisionMlp
89
-
90
-
91
- logger = logging.get_logger(__name__)
92
-
93
-
94
- class Ernie4_5_VL_MoeVisionConfig(Qwen2VLVisionConfig):
95
- r"""
96
- This is the configuration class to store the configuration of the [`Ernie4_5_VL_MoeVisionTransformerPretrainedModel`].
97
- It is used to instantiate the vision models portion of the complete Ernie4.5-VL Moe model according to the specified
98
- arguments, defining the model architecture.
99
-
100
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
101
- documentation from [`PretrainedConfig`] for more information.
102
-
103
- Args:
104
- depth (`int`, *optional*, defaults to 32):
105
- Number of layers (depth) in the model.
106
- hidden_size (`int`, *optional*, defaults to 1280):
107
- Dimensionality of the encoder layers and the pooler layer.
108
- hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
109
- The non-linear activation function (function or string) in the encoder and pooler.
110
- intermediate_size (`int`, *optional*, defaults to 5120):
111
- Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
112
- num_heads (`int`, *optional*, defaults to 16):
113
- Number of attention heads for each attention layer in the Transformer encoder.
114
- in_channels (`int`, *optional*, defaults to 3):
115
- The number of input channels.
116
- patch_size (`int`, *optional*, defaults to 14):
117
- The size (resolution) of each patch.
118
- spatial_merge_size (`int`, *optional*, defaults to 2):
119
- The size used for merging spatial dimensions.
120
- temporal_merge_size (`int`, *optional*, defaults to 2):
121
- The size used for merge along the temporal dimension.
122
- rms_norm_eps (`float`, *optional*, defaults to 1e-06):
123
- The epsilon used by the rms normalization layers.
124
- initializer_range (`float`, *optional*, defaults to 0.02):
125
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
126
- """
127
-
128
- model_type = "ernie4_5_vl_moe_vision"
129
-
130
- base_model_tp_plan = {
131
- "blocks.*.attn.qkv": "colwise",
132
- "blocks.*.attn.proj": "rowwise",
133
- "blocks.*.mlp.fc1": "colwise",
134
- "blocks.*.mlp.fc2": "rowwise",
135
- }
136
-
137
- def __init__(
138
- self,
139
- depth=32,
140
- hidden_size=1280,
141
- hidden_act="quick_gelu",
142
- intermediate_size=4 * 1280,
143
- num_heads=16,
144
- in_channels=3,
145
- patch_size=14,
146
- spatial_merge_size=2,
147
- temporal_merge_size=2,
148
- rms_norm_eps=1e-6,
149
- initializer_range=0.02,
150
- **kwargs,
151
- ):
152
- super().__init__(
153
- depth=depth,
154
- hidden_size=hidden_size,
155
- hidden_act=hidden_act,
156
- intermediate_size=intermediate_size,
157
- num_heads=num_heads,
158
- in_channels=in_channels,
159
- patch_size=patch_size,
160
- spatial_merge_size=spatial_merge_size,
161
- temporal_merge_size=temporal_merge_size,
162
- rms_norm_eps=rms_norm_eps,
163
- initializer_range=initializer_range,
164
- **kwargs,
165
- )
166
-
167
- del self.embed_dim # noqa: F821
168
- del self.mlp_ratio # noqa: F821
169
- del self.temporal_patch_size # noqa: F821
170
-
171
- self.intermediate_size = intermediate_size
172
- self.temporal_merge_size = temporal_merge_size
173
- self.rms_norm_eps = rms_norm_eps
174
-
175
-
176
- class Ernie4_5_VL_MoeTextConfig(Ernie4_5_MoeConfig, PreTrainedConfig):
177
- r"""
178
- This is the configuration class to store the configuration of a [`Ernie4_5_VL_MoeTextModel`]. It is used to instantiate a
179
- the text model portion of the complete Ernie4.5-VL Moe model according to the specified arguments, defining the model architecture.
180
-
181
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
182
- documentation from [`PretrainedConfig`] for more information.
183
-
184
- Args:
185
- vocab_size (`int`, *optional*, defaults to 103424):
186
- Vocabulary size of the Ernie 4.5 VL model. Defines the number of different tokens that can be represented by the
187
- `inputs_ids` passed when calling [`Ernie4_5_VL_MoeTextModel`]
188
- hidden_size (`int`, *optional*, defaults to 2560):
189
- Dimension of the hidden representations.
190
- intermediate_size (`int`, *optional*, defaults to 12288):
191
- Dimension of the MLP representations.
192
- num_hidden_layers (`int`, *optional*, defaults to 28):
193
- Number of hidden layers in the Transformer encoder.
194
- num_attention_heads (`int`, *optional*, defaults to 20):
195
- Number of attention heads for each attention layer in the Transformer encoder.
196
- num_key_value_heads (`int`, *optional*, defaults to 4):
197
- This is the number of key_value heads that should be used to implement Grouped Query Attention. If
198
- `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
199
- `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
200
- converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
201
- by meanpooling all the original heads within that group. For more details, check out [this
202
- paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `4`.
203
- hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
204
- The non-linear activation function (function or string) in the decoder.
205
- max_position_embeddings (`int`, *optional*, defaults to 131072):
206
- The maximum sequence length that this model might ever be used with.
207
- initializer_range (`float`, *optional*, defaults to 0.02):
208
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
209
- rms_norm_eps (`float`, *optional*, defaults to 1e-05):
210
- The epsilon used by the rms normalization layers.
211
- use_cache (`bool`, *optional*, defaults to `True`):
212
- Whether or not the model should return the last key/values attentions (not used by all models). Only
213
- relevant if `config.is_decoder=True`.
214
- use_bias (`bool`, *optional*, defaults to `False`):
215
- Whether to use a bias in any of the projections including mlp and attention for example.
216
- rope_parameters (`RopeParameters`, *optional*):
217
- Dictionary containing the configuration parameters for the RoPE embeddings. The dictionaty should contain
218
- a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
219
- with longer `max_position_embeddings`.
220
- mlp_layer_types (`list`, *optional*):
221
- MLP (Moe vs Dense) pattern for each layer.
222
- moe_intermediate_size (`list[int]`, *optional*, defaults to `[1536, 512]`):
223
- Intermediate size of the routed experts; differs between text (first) and image (second) experts.
224
- moe_k (`int`, *optional*, defaults to 6):
225
- Number of selected experts.
226
- moe_num_experts (`int`, *optional*, defaults to 64):
227
- Number of routed experts.
228
- moe_num_shared_experts (`int`, *optional*, defaults to 2):
229
- The number of experts that are shared for all MoE forwards.
230
- moe_norm_min (`float`, *optional*, defaults to 1e-12):
231
- Minimum division value during routing normalization.
232
- output_router_logits (`bool`, *optional*, defaults to `False`):
233
- Whether or not the router logits should be returned by the model. Enabling this will also
234
- allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
235
- router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
236
- The aux loss factor for the total loss.
237
- pad_token_id (`int`, *optional*):
238
- Padding token id.
239
- eos_token_id (`int`, *optional*):
240
- End of stream token id.
241
- bos_token_id (`int`, *optional*):
242
- Beginning of stream token id.
243
- """
244
-
245
- model_type = "ernie4_5_vl_moe_text"
246
- base_config_key = "text_config"
247
-
248
- base_model_tp_plan = {
249
- "layers.*.self_attn.q_proj": "colwise",
250
- "layers.*.self_attn.k_proj": "colwise",
251
- "layers.*.self_attn.v_proj": "colwise",
252
- "layers.*.self_attn.o_proj": "rowwise",
253
- "layers.*.mlp.shared_experts.gate_proj": "colwise",
254
- "layers.*.mlp.shared_experts.up_proj": "colwise",
255
- "layers.*.mlp.shared_experts.down_proj": "rowwise",
256
- "layers.*.mlp.gate_proj": "colwise",
257
- "layers.*.mlp.up_proj": "colwise",
258
- "layers.*.mlp.down_proj": "rowwise",
259
- }
260
-
261
- def __init__(
262
- self,
263
- vocab_size=103424,
264
- hidden_size=2560,
265
- intermediate_size=12288,
266
- num_hidden_layers=28,
267
- num_attention_heads=20,
268
- num_key_value_heads=4,
269
- hidden_act="silu",
270
- max_position_embeddings=131072,
271
- initializer_range=0.02,
272
- rms_norm_eps=1e-5,
273
- use_cache=True,
274
- use_bias=False,
275
- rope_parameters=None,
276
- mlp_layer_types=None,
277
- moe_intermediate_size=None,
278
- moe_k=6,
279
- moe_num_experts=64,
280
- moe_num_shared_experts=2,
281
- moe_norm_min=1e-12,
282
- output_router_logits=False,
283
- router_aux_loss_coef=0.001,
284
- pad_token_id=None,
285
- eos_token_id=None,
286
- bos_token_id=None,
287
- **kwargs,
288
- ):
289
- self.vocab_size = vocab_size
290
- self.hidden_size = hidden_size
291
- self.intermediate_size = intermediate_size
292
- self.num_hidden_layers = num_hidden_layers
293
- self.num_attention_heads = num_attention_heads
294
- self.num_key_value_heads = num_key_value_heads
295
- self.hidden_act = hidden_act
296
- self.max_position_embeddings = max_position_embeddings
297
- self.initializer_range = initializer_range
298
- self.rms_norm_eps = rms_norm_eps
299
- self.use_cache = use_cache
300
- self.use_bias = use_bias
301
- self.rope_parameters = rope_parameters
302
-
303
- # Default to MoE from the second layer and on
304
- self.mlp_layer_types = mlp_layer_types
305
- if self.mlp_layer_types is None:
306
- self.mlp_layer_types = ["dense"] + ["sparse"] * (self.num_hidden_layers - 1)
307
- layer_type_validation(self.mlp_layer_types, self.num_hidden_layers, attention=False)
308
-
309
- self.moe_intermediate_size = moe_intermediate_size
310
- if self.moe_intermediate_size is None:
311
- self.moe_intermediate_size = [1536, 512]
312
- self.moe_k = moe_k
313
- self.moe_num_experts = moe_num_experts
314
- self.moe_num_shared_experts = moe_num_shared_experts
315
- self.moe_norm_min = moe_norm_min
316
- self.output_router_logits = output_router_logits
317
- self.router_aux_loss_coef = router_aux_loss_coef
318
- self.pad_token_id = pad_token_id
319
- self.eos_token_id = eos_token_id
320
- self.bos_token_id = bos_token_id
321
-
322
- PreTrainedConfig.__init__(ignore_keys_at_rope_validation={"mrope_section"}, **kwargs)
323
-
324
-
325
- class Ernie4_5_VL_MoeConfig(PreTrainedConfig):
326
- r"""
327
- This is the configuration class to store the configuration of a [`Ernie4_5_VL_MoeModel`]. It is used to instantiate a
328
- Ernie4.5-VL MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
329
- with the defaults will yield a similar configuration to that of
330
- Ernie 4.5 VL 28B A3B [baidu/ERNIE-4.5-VL-28B-A3B-PT](https://huggingface.co/baidu/ERNIE-4.5-VL-28B-A3B-PT).
331
-
332
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
333
- documentation from [`PretrainedConfig`] for more information.
334
-
335
- Args:
336
- text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Ernie4_5_VL_MoeTextConfig`):
337
- The config object or dictionary of the text backbone.
338
- vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Ernie4_5_VL_MoeVisionConfig`):
339
- The config object or dictionary of the vision backbone.
340
- image_start_token_id (`int`, *optional*, defaults to 101304):
341
- The image token index to encode the start of image.
342
- image_end_token_id (`int`, *optional*, defaults to 101305):
343
- The image token index to encode the end of image.
344
- image_token_id (`int`, *optional*, defaults to 100295):
345
- The image token index to encode the image prompt.
346
- video_start_token_id (`int`, *optional*, defaults to 101306):
347
- The video token index to encode the start of video.
348
- video_end_token_id (`int`, *optional*, defaults to 101307):
349
- The video token index to encode the end of video.
350
- video_token_id (`int`, *optional*, defaults to 103367):
351
- The video token index to encode the video prompt.
352
- tie_word_embeddings (`bool`, *optional*, defaults to `True`):
353
- Whether the model's input and output word embeddings should be tied.
354
-
355
- ```python
356
- >>> from transformers import Ernie4_5_VL_MoeForConditionalGeneration, Ernie4_5_VL_MoeConfig
357
-
358
- >>> # Initializing a Ernie4_5_VL_Moe style configuration
359
- >>> configuration = Ernie4_5_VL_MoeConfig()
360
-
361
- >>> # Initializing a model from the Ernie 4.5 VL 28B A3B configuration
362
- >>> model = Ernie4_5_VL_MoeForConditionalGeneration(configuration)
363
-
364
- >>> # Accessing the model configuration
365
- >>> configuration = model.config
366
- ```"""
367
-
368
- model_type = "ernie4_5_vl_moe"
369
- sub_configs = {"vision_config": Ernie4_5_VL_MoeVisionConfig, "text_config": Ernie4_5_VL_MoeTextConfig}
370
- keys_to_ignore_at_inference = ["past_key_values"]
371
-
372
- def __init__(
373
- self,
374
- text_config=None,
375
- vision_config=None,
376
- image_start_token_id=101304,
377
- image_end_token_id=101305,
378
- image_token_id=100295,
379
- video_start_token_id=101306,
380
- video_end_token_id=101307,
381
- video_token_id=103367,
382
- tie_word_embeddings=True,
383
- **kwargs,
384
- ):
385
- if isinstance(vision_config, dict):
386
- self.vision_config = self.sub_configs["vision_config"](**vision_config)
387
- elif isinstance(vision_config, Ernie4_5_VL_MoeVisionConfig):
388
- self.vision_config = vision_config
389
- elif vision_config is None:
390
- self.vision_config = self.sub_configs["vision_config"]()
391
-
392
- if isinstance(text_config, dict):
393
- self.text_config = self.sub_configs["text_config"](**text_config)
394
- elif isinstance(text_config, Ernie4_5_VL_MoeTextConfig):
395
- self.text_config = text_config
396
- elif text_config is None:
397
- self.text_config = self.sub_configs["text_config"](**kwargs)
398
-
399
- self.image_start_token_id = image_start_token_id
400
- self.image_end_token_id = image_end_token_id
401
- self.image_token_id = image_token_id
402
- self.video_start_token_id = video_start_token_id
403
- self.video_end_token_id = video_end_token_id
404
- self.video_token_id = video_token_id
405
- self.tie_word_embeddings = tie_word_embeddings
406
-
407
- super().__init__(**kwargs)
408
-
409
-
410
- class Ernie4_5_VL_MoeTextRotaryEmbedding(nn.Module):
411
- inv_freq: torch.Tensor # fix linting for `register_buffer`
412
-
413
- def __init__(self, config, device=None):
414
- super().__init__()
415
- self.max_seq_len_cached = config.max_position_embeddings
416
- self.original_max_seq_len = config.max_position_embeddings
417
-
418
- self.config = config
419
-
420
- self.rope_type = self.config.rope_parameters["rope_type"]
421
- rope_init_fn: Callable = self.compute_default_rope_parameters
422
- if self.rope_type != "default":
423
- raise ValueError(f"Ernie 4.5 VL requires the `default` rope type, but found {self.rope_type} instead.")
424
- inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
425
-
426
- self.register_buffer("inv_freq", inv_freq, persistent=False)
427
- self.original_inv_freq = inv_freq
428
-
429
- self.mrope_section = config.rope_parameters.get("mrope_section", [22, 22, 20])
430
-
431
- @staticmethod
432
- def compute_default_rope_parameters(
433
- config: Ernie4_5_VL_MoeTextConfig | None = None,
434
- device: Optional["torch.device"] = None,
435
- seq_len: int | None = None,
436
- ) -> tuple["torch.Tensor", float]:
437
- """
438
- Computes the inverse frequencies according to the original RoPE implementation
439
- Args:
440
- config ([`~transformers.PreTrainedConfig`]):
441
- The model configuration.
442
- device (`torch.device`):
443
- The device to use for initialization of the inverse frequencies.
444
- seq_len (`int`, *optional*):
445
- The current sequence length. Unused for this type of RoPE.
446
- Returns:
447
- Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
448
- post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
449
- """
450
- base = config.rope_parameters["rope_theta"]
451
- dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
452
-
453
- attention_factor = 1.0 # Unused in this type of RoPE
454
-
455
- # Compute the inverse frequencies
456
- inv_freq = 1.0 / (
457
- base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
458
- )
459
-
460
- # Special to ernie, we prerotate on the hw dim
461
- mrope_section = config.rope_parameters.get("mrope_section", [22, 22, 20])
462
- hw_dim = mrope_section[0] + mrope_section[1]
463
- t_dim = mrope_section[2]
464
-
465
- inv_freq_3d = torch.empty_like(inv_freq)
466
- # (Pre-)Rotate to avoid another rotation during the forward
467
- inv_freq_3d[:hw_dim] = torch.cat([inv_freq[:-t_dim][0::2], inv_freq[:-t_dim][1::2]])
468
- inv_freq_3d[-t_dim:] = inv_freq[-t_dim:]
469
-
470
- return inv_freq_3d, attention_factor
471
-
472
- @torch.no_grad()
473
- @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
474
- def forward(self, x, position_ids):
475
- inv_freq_expanded = (
476
- self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1).to(x.device)
477
- )
478
- position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
479
-
480
- device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
481
- with maybe_autocast(device_type=device_type, enabled=False): # Force float32
482
- freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
483
- cos = freqs.cos() * self.attention_scaling
484
- sin = freqs.sin() * self.attention_scaling
485
-
486
- sin = self.recomposition_to_3d(sin)
487
- cos = self.recomposition_to_3d(cos)
488
-
489
- return cos, sin
490
-
491
- def recomposition_to_3d(self, freq):
492
- freq_h, freq_w, freq_t = (m[(i + 1) % 3] for i, m in enumerate(freq.split([*self.mrope_section], dim=-1)))
493
- freq_hw = torch.stack([freq_h, freq_w], dim=-1).flatten(-2)
494
- freq_hwt = torch.cat([freq_hw, freq_t], dim=-1)
495
- return freq_hwt.repeat_interleave(2, dim=-1)
496
-
497
-
498
- def rotate_half_text(x):
499
- """Rotates half the hidden dims of the input."""
500
- x1 = x[..., 0::2]
501
- x2 = x[..., 1::2]
502
- return torch.stack((-x2, x1), dim=-1).flatten(-2)
503
-
504
-
505
- def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
506
- """Applies Rotary Position Embedding to the query and key tensors.
507
-
508
- Args:
509
- q (`torch.Tensor`): The query tensor.
510
- k (`torch.Tensor`): The key tensor.
511
- cos (`torch.Tensor`): The cosine part of the rotary embedding.
512
- sin (`torch.Tensor`): The sine part of the rotary embedding.
513
- unsqueeze_dim (`int`, *optional*, defaults to 1):
514
- The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
515
- sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
516
- that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
517
- k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
518
- cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
519
- the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
520
- Returns:
521
- `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
522
- """
523
- original_dtype = q.dtype
524
-
525
- cos = cos.unsqueeze(unsqueeze_dim)
526
- sin = sin.unsqueeze(unsqueeze_dim)
527
-
528
- q_embed = (q.float() * cos) + (rotate_half_text(q).float() * sin)
529
- k_embed = (k.float() * cos) + (rotate_half_text(k).float() * sin)
530
-
531
- return q_embed.to(original_dtype), k_embed.to(original_dtype)
532
-
533
-
534
- class Ernie4_5_VL_MoeTextAttention(Ernie4_5_MoeAttention):
535
- pass
536
-
537
-
538
- class Ernie4_5_VL_MoeRMSNorm(Ernie4_5_MoeRMSNorm):
539
- pass
540
-
541
-
542
- class Ernie4_5_VL_MoeMLP(Ernie4_5_MoeMLP):
543
- pass
544
-
545
-
546
- class Ernie4_5_VL_MoeMoeStatics(Ernie4_5_MoeStatics):
547
- pass
548
-
549
-
550
- class Ernie4_5_VL_MoeMoeTopKRouter(Ernie4_5_MoeTopKRouter):
551
- def __init__(self, config):
552
- super().__init__(config)
553
- self.moe_statics = Ernie4_5_VL_MoeMoeStatics(config)
554
-
555
-
556
- class Ernie4_5_VL_MoeMoeExperts(Ernie4_5_MoeExperts):
557
- def __init__(self, config, intermediate_size=None):
558
- super().__init__(config)
559
- self.intermediate_dim = config.moe_intermediate_size if intermediate_size is None else intermediate_size
560
-
561
-
562
- class Ernie4_5_VL_MoeSparseMoeBlock(nn.Module):
563
- def __init__(self, config, intermediate_size):
564
- super().__init__()
565
- self.hidden_dim = config.hidden_size
566
- self.num_experts = config.moe_num_experts
567
- self.top_k = config.moe_k
568
- self.gate = Ernie4_5_VL_MoeMoeTopKRouter(config)
569
- self.experts = Ernie4_5_VL_MoeMoeExperts(config, intermediate_size)
570
-
571
- def forward(
572
- self,
573
- hidden_states: torch.Tensor,
574
- ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
575
- hidden_states = hidden_states.view(-1, self.hidden_dim)
576
-
577
- router_logits, top_k_index, top_k_weights = self.gate(hidden_states)
578
- final_hidden_states = self.experts(hidden_states, top_k_index, top_k_weights)
579
-
580
- # moe results are changed to a flattened shape to ease the modality isolated assigning of results
581
- return final_hidden_states.flatten(), router_logits.flatten()
582
-
583
-
584
- class Ernie4_5_VL_MoeMoeBlock(nn.Module):
585
- """
586
- Similar to `Ernie4_5_Moe` where we have modality isolated experts:
587
- - A set of text experts that are only run on text tokens
588
- - A set of vision experts that are only run on vision (image/video) tokens
589
-
590
- This modality isolation is unique to the Ernie 4.5 VL Moe models.
591
- """
592
-
593
- def __init__(self, config):
594
- super().__init__()
595
- self.num_experts = config.moe_num_experts
596
-
597
- self.text_moe = Ernie4_5_VL_MoeSparseMoeBlock(config, intermediate_size=config.moe_intermediate_size[0])
598
- self.vision_moe = Ernie4_5_VL_MoeSparseMoeBlock(config, intermediate_size=config.moe_intermediate_size[1])
599
-
600
- self.shared_experts = None
601
- if config.moe_num_shared_experts > 0:
602
- self.shared_experts = Ernie4_5_VL_MoeMLP(
603
- config, config.moe_intermediate_size[0] * config.moe_num_shared_experts
604
- )
605
-
606
- def forward(
607
- self,
608
- hidden_states: torch.Tensor,
609
- moe_mm_token_type_ids: torch.IntTensor | None = None,
610
- ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
611
- batch_size, sequence_length, hidden_dim = hidden_states.shape
612
-
613
- # (Optional) shared experts
614
- if self.shared_experts is not None:
615
- shared_output = self.shared_experts(hidden_states)
616
-
617
- if moe_mm_token_type_ids is not None and moe_mm_token_type_ids.any():
618
- final_hidden_states = torch.zeros_like(hidden_states)
619
- router_logits = torch.zeros(
620
- size=(batch_size * sequence_length, self.num_experts),
621
- device=final_hidden_states.device,
622
- dtype=torch.float,
623
- )
624
-
625
- # True (1 or 2) == vision, False (0) == text tokens
626
- moe_mm_token_type_ids = moe_mm_token_type_ids.bool()
627
- token_type_ids_router = moe_mm_token_type_ids.reshape(-1)[:, None].expand(-1, self.num_experts)
628
- token_type_ids_states = moe_mm_token_type_ids[..., None].expand(-1, -1, hidden_dim)
629
-
630
- # Run moe on each modality and assign their results to the original token positions
631
- final_hidden_states[~token_type_ids_states], router_logits[~token_type_ids_router] = self.text_moe(
632
- hidden_states[~token_type_ids_states]
633
- )
634
- final_hidden_states[token_type_ids_states], router_logits[token_type_ids_router] = self.vision_moe(
635
- hidden_states[token_type_ids_states]
636
- )
637
- else:
638
- final_hidden_states, router_logits = self.text_moe(hidden_states)
639
- final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
640
- router_logits = router_logits.reshape(-1, self.num_experts)
641
-
642
- # Add (optional) shared experts to the result
643
- if self.shared_experts is not None:
644
- final_hidden_states = final_hidden_states + shared_output
645
-
646
- return final_hidden_states, router_logits
647
-
648
-
649
- class Ernie4_5_VL_MoeDecoderLayer(GradientCheckpointingLayer):
650
- def __init__(self, config, layer_idx):
651
- super().__init__()
652
- self.hidden_size = config.hidden_size
653
-
654
- self.self_attn = Ernie4_5_VL_MoeTextAttention(config, layer_idx)
655
-
656
- if config.mlp_layer_types[layer_idx] == "sparse":
657
- self.mlp = Ernie4_5_VL_MoeMoeBlock(config)
658
- else:
659
- self.mlp = Ernie4_5_VL_MoeMLP(config)
660
-
661
- self.input_layernorm = Ernie4_5_VL_MoeRMSNorm(config.hidden_size, config.rms_norm_eps)
662
- self.post_attention_layernorm = Ernie4_5_VL_MoeRMSNorm(config.hidden_size, config.rms_norm_eps)
663
-
664
- def forward(
665
- self,
666
- hidden_states: torch.Tensor,
667
- position_embeddings: tuple[torch.Tensor, torch.Tensor],
668
- attention_mask: torch.Tensor | None = None,
669
- position_ids: torch.Tensor | None = None,
670
- moe_mm_token_type_ids: torch.IntTensor | None = None,
671
- past_key_values: Cache | None = None,
672
- cache_position: torch.LongTensor | None = None,
673
- **kwargs: Unpack[FlashAttentionKwargs],
674
- ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:
675
- residual = hidden_states
676
-
677
- hidden_states = self.input_layernorm(hidden_states)
678
-
679
- # Self Attention
680
- hidden_states, _ = self.self_attn(
681
- hidden_states=hidden_states,
682
- position_embeddings=position_embeddings,
683
- attention_mask=attention_mask,
684
- position_ids=position_ids,
685
- past_key_values=past_key_values,
686
- cache_position=cache_position,
687
- **kwargs,
688
- )
689
- hidden_states = hidden_states + residual
690
-
691
- # Fully Connected
692
- residual = hidden_states
693
- hidden_states = self.post_attention_layernorm(hidden_states)
694
- if isinstance(self.mlp, Ernie4_5_VL_MoeMoeBlock):
695
- hidden_states, _ = self.mlp(hidden_states, moe_mm_token_type_ids)
696
- else:
697
- hidden_states = self.mlp(hidden_states)
698
- hidden_states = hidden_states + residual
699
-
700
- return hidden_states
701
-
702
-
703
- class Ernie4_5_VL_MoeVisionAttention(Qwen2_5_VLVisionAttention):
704
- pass
705
-
706
-
707
- class Ernie4_5_VL_MoeVisionBlock(Qwen2_5_VLVisionBlock):
708
- def __init__(self, config) -> None:
709
- super().__init__(config, None)
710
-
711
- self.norm1 = nn.LayerNorm(config.hidden_size, config.rms_norm_eps)
712
- self.norm2 = nn.LayerNorm(config.hidden_size, config.rms_norm_eps)
713
- self.mlp = Ernie4_5VLVisionMLP(
714
- dim=config.hidden_size,
715
- hidden_dim=config.intermediate_size,
716
- hidden_act=config.hidden_act,
717
- )
718
-
719
-
720
- class Ernie4_5_VL_MoePreTrainedModel(Qwen2_5_VLPreTrainedModel):
721
- _can_compile_fullgraph = False
722
-
723
- _can_record_outputs = {
724
- "router_logits": OutputRecorder(Ernie4_5_VL_MoeMoeBlock, index=1),
725
- "hidden_states": Ernie4_5_VL_MoeDecoderLayer,
726
- "attentions": Ernie4_5_VL_MoeTextAttention,
727
- }
728
- _keep_in_fp32_modules_strict = ["gate.weight", "moe_statics"]
729
-
730
- def _init_weights(self, module):
731
- PreTrainedModel._init_weights(self, module)
732
- if isinstance(module, Ernie4_5_VL_MoeMoeTopKRouter):
733
- init.zeros_(module.moe_statics.e_score_correction_bias)
734
- init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
735
- elif isinstance(module, Ernie4_5_VL_MoeMoeExperts):
736
- init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
737
- init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
738
- elif isinstance(module, Ernie4_5_VL_MoeVisionRotaryEmbedding):
739
- inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
740
- init.copy_(module.inv_freq, inv_freq)
741
-
742
-
743
- class Ernie4_5_VL_MoeTextModel(Ernie4_5_MoeModel):
744
- config: Ernie4_5_VL_MoeTextConfig
745
-
746
- def __init__(self, config: Ernie4_5_VL_MoeTextConfig):
747
- super().__init__(config)
748
- self.rotary_emb = Ernie4_5_VL_MoeTextRotaryEmbedding(config=config)
749
-
750
- @check_model_inputs
751
- @auto_docstring
752
- def forward(
753
- self,
754
- input_ids: torch.LongTensor | None = None,
755
- attention_mask: torch.Tensor | None = None,
756
- position_ids: torch.LongTensor | None = None,
757
- moe_mm_token_type_ids: torch.IntTensor | None = None,
758
- past_key_values: Cache | None = None,
759
- inputs_embeds: torch.FloatTensor | None = None,
760
- use_cache: bool | None = None,
761
- cache_position: torch.LongTensor | None = None,
762
- **kwargs: Unpack[FlashAttentionKwargs],
763
- ) -> MoeModelOutputWithPast:
764
- r"""
765
- moe_mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
766
- The same as `mm_token_type_ids` while additionally considering start/end image/video tokens as respective vision tokens.
767
- """
768
- if (input_ids is None) ^ (inputs_embeds is not None):
769
- raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
770
-
771
- if use_cache and past_key_values is None:
772
- past_key_values = DynamicCache(config=self.config)
773
-
774
- if inputs_embeds is None:
775
- inputs_embeds = self.embed_tokens(input_ids)
776
-
777
- if cache_position is None:
778
- past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
779
- cache_position = torch.arange(
780
- past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
781
- )
782
-
783
- # the hard coded `3` is for temporal, height and width.
784
- if position_ids is None:
785
- position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
786
- elif position_ids.ndim == 2:
787
- position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
788
-
789
- # NOTE: we need to pass text position ids for packing. Ernie 4.5 VL uses 3D positions
790
- # where each dim indicates visual spatial positions for temporal/height/width grids.
791
- # There are is only one scenario when FA2-like packed masking might be activated.
792
- # 1. User specifically passed packed `position_ids` and no attention mask.
793
- # In this case we expect the useer to create correct position ids for all 3 grids
794
- # and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len]
795
- if position_ids.ndim == 3 and position_ids.shape[0] == 4:
796
- text_position_ids = position_ids[0]
797
- position_ids = position_ids[1:]
798
- else:
799
- # If inputs are not packed (usual 3D positions), do not prepare mask from position_ids
800
- text_position_ids = None
801
-
802
- attention_mask = create_causal_mask(
803
- config=self.config,
804
- input_embeds=inputs_embeds,
805
- attention_mask=attention_mask,
806
- cache_position=cache_position,
807
- past_key_values=past_key_values,
808
- position_ids=text_position_ids,
809
- )
810
-
811
- hidden_states = inputs_embeds
812
-
813
- # create position embeddings to be shared across the decoder layers
814
- position_embeddings = self.rotary_emb(hidden_states, position_ids)
815
-
816
- for decoder_layer in self.layers[: self.config.num_hidden_layers]:
817
- hidden_states = decoder_layer(
818
- hidden_states,
819
- position_embeddings=position_embeddings,
820
- attention_mask=attention_mask,
821
- position_ids=position_ids,
822
- moe_mm_token_type_ids=moe_mm_token_type_ids,
823
- past_key_values=past_key_values,
824
- cache_position=cache_position,
825
- **kwargs,
826
- )
827
-
828
- hidden_states = self.norm(hidden_states)
829
-
830
- return MoeModelOutputWithPast(
831
- last_hidden_state=hidden_states,
832
- past_key_values=past_key_values,
833
- )
834
-
835
-
836
- class Ernie4_5VLVisionMLP(VisionMlp):
837
- pass
838
-
839
-
840
- class Ernie4_5_VL_MoePatchEmbed(Qwen2_5_VisionPatchEmbed):
841
- def __init__(
842
- self,
843
- patch_size: int = 14,
844
- in_channels: int = 3,
845
- embed_dim: int = 1152,
846
- ) -> None:
847
- super().__init__(patch_size, in_channels, embed_dim)
848
-
849
- del self.temporal_patch_size
850
- del kernel_size # noqa: F821
851
- self.proj = nn.Linear(in_channels * patch_size * patch_size, embed_dim, bias=False)
852
-
853
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
854
- target_dtype = self.proj.weight.dtype
855
- return self.proj(hidden_states.to(target_dtype))
856
-
857
-
858
- class Ernie4_5_VL_MoeVisionRotaryEmbedding(Qwen2_5_VisionRotaryEmbedding):
859
- pass
860
-
861
-
862
- class Ernie4_5_VL_MoeVisionTransformerPretrainedModel(Qwen2VisionTransformerPretrainedModel):
863
- _can_record_outputs = {
864
- "router_logits": OutputRecorder(Ernie4_5_VL_MoeMoeBlock, index=1),
865
- "hidden_states": Ernie4_5_VL_MoeVisionBlock,
866
- "attentions": Ernie4_5_VL_MoeVisionAttention,
867
- }
868
-
869
- def __init__(self, config) -> None:
870
- super().__init__(config)
871
-
872
- del self.merger
873
-
874
- self.patch_embed = Ernie4_5_VL_MoePatchEmbed(
875
- patch_size=config.patch_size,
876
- in_channels=config.in_channels,
877
- embed_dim=config.hidden_size,
878
- )
879
-
880
- head_dim = config.hidden_size // config.num_heads
881
- self.rotary_pos_emb = Ernie4_5_VL_MoeVisionRotaryEmbedding(head_dim // 2)
882
-
883
- self.ln = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps)
884
-
885
- def get_dtype(self):
886
- raise AttributeError("Ernie 4.5 VL Moe does not need this!")
887
-
888
- def get_device(self):
889
- raise AttributeError("Ernie 4.5 VL Moe does not need this!")
890
-
891
- @check_model_inputs
892
- def forward(
893
- self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs: Unpack[TransformersKwargs]
894
- ) -> tuple | BaseModelOutputWithPooling:
895
- hidden_states = self.patch_embed(hidden_states)
896
- rotary_pos_emb = self.rot_pos_emb(grid_thw)
897
- emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
898
- position_embeddings = (emb.cos(), emb.sin())
899
-
900
- cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
901
- dim=0,
902
- # Select dtype based on the following factors:
903
- # - FA2 requires that cu_seqlens_q must have dtype int32
904
- # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
905
- # See https://github.com/huggingface/transformers/pull/34852 for more information
906
- dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
907
- )
908
- cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
909
-
910
- for block in self.blocks:
911
- hidden_states = block(
912
- hidden_states,
913
- cu_seqlens=cu_seqlens,
914
- position_embeddings=position_embeddings,
915
- **kwargs,
916
- )
917
- hidden_states = self.ln(hidden_states)
918
- return BaseModelOutputWithPooling(last_hidden_state=hidden_states)
919
-
920
-
921
- class Ernie4_5_VL_MoeVisionMLP(nn.Module):
922
- def __init__(self, config, in_dim, out_dim):
923
- super().__init__()
924
-
925
- self.fc1 = nn.Linear(in_dim, out_dim)
926
- self.act_fn = nn.GELU()
927
- self.fc2 = nn.Linear(out_dim, out_dim)
928
- self.ln = nn.LayerNorm(out_dim, eps=config.vision_config.rms_norm_eps)
929
-
930
- def forward(self, hidden_states):
931
- hidden_states = self.fc1(hidden_states)
932
- hidden_states = self.act_fn(hidden_states)
933
- hidden_states = self.fc2(hidden_states)
934
- hidden_states = self.ln(hidden_states)
935
- return hidden_states
936
-
937
-
938
- class Ernie4_5_VL_MoeVariableResolutionResamplerModel(nn.Module):
939
- def __init__(self, config: Ernie4_5_VL_MoeConfig):
940
- super().__init__()
941
- self.config = config
942
-
943
- self.in_dim = config.vision_config.hidden_size
944
- self.out_dim = config.text_config.hidden_size
945
- self.spatial_merge_size = config.vision_config.spatial_merge_size
946
- self.temporal_merge_size = config.vision_config.temporal_merge_size
947
-
948
- # compress 2d conv(picture) to 1d
949
- self.spatial_dim = self.in_dim * self.spatial_merge_size**2
950
- # compress 3d conv(video) to 1d
951
- self.temporal_dim = self.in_dim * self.spatial_merge_size**2 * self.temporal_merge_size
952
-
953
- self.spatial_linear = Ernie4_5_VL_MoeVisionMLP(config, self.spatial_dim, self.spatial_dim)
954
- self.temporal_linear = Ernie4_5_VL_MoeVisionMLP(config, self.temporal_dim, self.spatial_dim)
955
-
956
- self.mlp = nn.Linear(self.spatial_dim, self.out_dim)
957
- self.after_norm = Ernie4_5_VL_MoeRMSNorm(self.out_dim, config.text_config.rms_norm_eps)
958
-
959
- def _temporal_slicing(self, hidden_states, grid_thw):
960
- """
961
- Slices along the temporal dimension in even/odd patterns (usually if we have a video input)
962
- or duplicates along temporal dimension (usually if we have an image input).
963
-
964
- Example:
965
- Video input with temporal pattern of [1, -1, 2, -2, 3, -3]
966
- > Even input [1, 2, 3], odd input [-1, -2, -3]
967
- > Reorderd via slices to [1, 2, 3, -1, -2, -3]
968
- Image input with temporal pattern [1]
969
- > Duplicate input [1], [1]
970
- > Reordered to [1, 1]
971
-
972
- NOTE: This is hard-coded for `temporal_merge_size == 2` and won't work otherwise.
973
- """
974
- # Calculating offsets on spatial dim (based on flattened tensors)
975
- grid_t, grid_hw = grid_thw[:, 0], grid_thw[:, 1:]
976
- grid_hw_after_conv = grid_hw.prod(-1) // (self.spatial_merge_size**2)
977
-
978
- # Calculating offsets on batch dim (based on flattened tensors)
979
- tokens_per_img_or_vid = (grid_thw.prod(-1) // (self.spatial_merge_size**2)).flatten()
980
- batch_offsets = torch.empty(tokens_per_img_or_vid.size(), dtype=tokens_per_img_or_vid.dtype)
981
- batch_offsets[0] = 0
982
- batch_offsets[1:] = tokens_per_img_or_vid.cumsum(dim=0)[:-1]
983
-
984
- first_slice_offsets = []
985
- second_slice_offsets = []
986
- for temporal_size, spatial_size, batch_offset in zip(grid_t, grid_hw_after_conv, batch_offsets):
987
- # Depending on temporal, we may interleave:
988
- # - Images have temporal == 1 --> same offsets (duplicate "frame" image)
989
- # - Videos have temporal > 1 --> different offsets (even, odd)
990
- first_offset_range = range(0, temporal_size, 2)
991
- second_offset_range = range(1 if temporal_size > 1 else 0, temporal_size, 2)
992
-
993
- for temporal_offset_even, temporal_offset_odd in zip(first_offset_range, second_offset_range):
994
- first_slice_offsets.append(
995
- torch.arange(
996
- batch_offset + (temporal_offset_even) * spatial_size,
997
- batch_offset + (temporal_offset_even + 1) * spatial_size,
998
- )
999
- )
1000
- second_slice_offsets.append(
1001
- torch.arange(
1002
- batch_offset + (temporal_offset_odd) * spatial_size,
1003
- batch_offset + (temporal_offset_odd + 1) * spatial_size,
1004
- )
1005
- )
1006
-
1007
- # Input: [1, -1, 2, -2, 3, -3] or [1]
1008
- # Indices: [0, 2, 4] (even) or [0] (duplicate)
1009
- first_slice_offsets = torch.cat(first_slice_offsets, dim=-1).to(hidden_states.device)
1010
- # Indices: [1, 3, 5] (odd) or [0] (duplicate)
1011
- second_slice_offsets = torch.cat(second_slice_offsets, dim=-1).to(hidden_states.device)
1012
-
1013
- # Output: [1, 2, 3, -1, -2, -3] or [1, 1]
1014
- return torch.concat(
1015
- [
1016
- torch.index_select(hidden_states, dim=0, index=first_slice_offsets),
1017
- torch.index_select(hidden_states, dim=0, index=second_slice_offsets),
1018
- ],
1019
- dim=-1,
1020
- )
1021
-
1022
- def forward(self, hidden_states, grid_thw):
1023
- # image spatial
1024
- # reshape imitates convolution via linear projection
1025
- hidden_states = hidden_states.reshape([-1, hidden_states.shape[-1] * (self.spatial_merge_size**2)])
1026
- hidden_states = self.spatial_linear(hidden_states)
1027
-
1028
- # video temporal
1029
- hidden_states = self._temporal_slicing(hidden_states, grid_thw)
1030
- hidden_states = self.temporal_linear(hidden_states)
1031
-
1032
- # final mlp
1033
- hidden_states = self.mlp(hidden_states)
1034
- hidden_states = self.after_norm(hidden_states)
1035
-
1036
- return hidden_states
1037
-
1038
-
1039
- class Ernie4_5_VL_MoeModel(Qwen2_5_VLModel):
1040
- _checkpoint_conversion_mapping = {"^norm": "language_model.norm"}
1041
-
1042
- def __init__(self, config: Ernie4_5_VL_MoeConfig):
1043
- super().__init__(config)
1044
-
1045
- del self.visual
1046
- self.vision_tower = Ernie4_5_VL_MoeVisionTransformerPretrainedModel._from_config(config.vision_config)
1047
- self.resampler_model = Ernie4_5_VL_MoeVariableResolutionResamplerModel(config)
1048
-
1049
- # TODO: Should be moved to generation loop instead in the future
1050
- # Relevant PR(s): https://github.com/huggingface/transformers/pull/42088
1051
- def get_position_ids(
1052
- self,
1053
- input_ids: torch.LongTensor = None,
1054
- attention_mask: torch.Tensor | None = None,
1055
- past_key_values: Cache | None = None,
1056
- inputs_embeds: torch.FloatTensor | None = None,
1057
- image_grid_thw: torch.LongTensor | None = None,
1058
- video_grid_thw: torch.LongTensor | None = None,
1059
- cache_position: torch.LongTensor | None = None,
1060
- mm_token_type_ids: torch.IntTensor | None = None,
1061
- ):
1062
- """
1063
- Calculating the 3D position ids with a custom mechanism / caching
1064
- - First forward calculates the initial positions and the respective
1065
- deltas (offset) for subsequent positions. See `get_rope_index` for
1066
- more details.
1067
- - Second and on (generation), uses the cache position combined with the
1068
- cached deltas to determine the current position.
1069
-
1070
- NOTE: We assume that the position ids are `None` and recalculate them here in any case.
1071
- """
1072
- # Calculate RoPE index once per generation in the pre-fill stage only.
1073
- # When compiling, we can't check tensor values thus we check only input length
1074
- # It is safe to assume that `length!=1` means we're in pre-fill because compiled
1075
- # models currently cannot do asssisted decoding
1076
- prefill_compiled_stage = is_torchdynamo_compiling() and (
1077
- (input_ids is not None and input_ids.shape[1] != 1)
1078
- or (inputs_embeds is not None and inputs_embeds.shape[1] != 1)
1079
- )
1080
- prefill_noncompiled_stage = not is_torchdynamo_compiling() and (
1081
- (cache_position is not None and cache_position[0] == 0)
1082
- or (past_key_values is None or past_key_values.get_seq_length() == 0)
1083
- )
1084
- if (prefill_compiled_stage or prefill_noncompiled_stage) or self.rope_deltas is None:
1085
- position_ids, rope_deltas = self.get_rope_index(
1086
- input_ids,
1087
- image_grid_thw,
1088
- video_grid_thw,
1089
- attention_mask=attention_mask,
1090
- mm_token_type_ids=mm_token_type_ids,
1091
- )
1092
- self.rope_deltas = rope_deltas
1093
- # then use the prev pre-calculated rope-deltas to get the correct position ids
1094
- else:
1095
- if input_ids is not None:
1096
- batch_size, seq_length, device = input_ids.shape[0], 1, input_ids.device
1097
- elif inputs_embeds is not None:
1098
- batch_size, seq_length, device = inputs_embeds.shape[0], 1, inputs_embeds.device
1099
- else:
1100
- raise ValueError(
1101
- "Cannot calculate position ids without any input to the model. "
1102
- "Need either `input_ids` or `inputs_embeds`!"
1103
- )
1104
-
1105
- delta = (cache_position[0] + self.rope_deltas).to(device) if cache_position is not None else 0
1106
- position_ids = torch.arange(seq_length, device=device)
1107
- position_ids = position_ids.view(1, -1).expand(batch_size, -1)
1108
- if cache_position is not None: # otherwise `deltas` is an int `0`
1109
- delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0)
1110
- position_ids = position_ids.add(delta)
1111
- position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
1112
-
1113
- return position_ids
1114
-
1115
- def get_rope_index(
1116
- self,
1117
- input_ids: torch.LongTensor | None = None,
1118
- image_grid_thw: torch.LongTensor | None = None,
1119
- video_grid_thw: torch.LongTensor | None = None,
1120
- attention_mask: torch.Tensor | None = None,
1121
- mm_token_type_ids: torch.IntTensor | None = None,
1122
- ) -> tuple[torch.Tensor, torch.Tensor]:
1123
- """
1124
- Calculate the 3D rope index based on image and video's temporal, height and width in LLM.
1125
-
1126
- Explanation:
1127
- Each embedding sequence contains vision embedding and text embedding or just contains text embedding.
1128
-
1129
- For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
1130
- Examples:
1131
- input_ids: [T T T T T], here T is for text.
1132
- temporal position_ids: [0, 1, 2, 3, 4]
1133
- height position_ids: [0, 1, 2, 3, 4]
1134
- width position_ids: [0, 1, 2, 3, 4]
1135
-
1136
- For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
1137
- and 1D rotary position embedding for text part.
1138
- Examples:
1139
- Temporal (Time): 3 patches, representing different segments of the video in time.
1140
- Height: 2 patches, dividing each frame vertically.
1141
- Width: 2 patches, dividing each frame horizontally.
1142
- We also have some important parameters:
1143
- fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second.
1144
- tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity.
1145
- temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames.
1146
- interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs.
1147
- input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
1148
- vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]
1149
- vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
1150
- vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
1151
- text temporal position_ids: [101, 102, 103, 104, 105]
1152
- text height position_ids: [101, 102, 103, 104, 105]
1153
- text width position_ids: [101, 102, 103, 104, 105]
1154
- Here we calculate the text start position_ids as the max vision position_ids plus 1.
1155
-
1156
- Args:
1157
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1158
- Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1159
- it.
1160
- image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1161
- The temporal, height and width of feature shape of each image in LLM.
1162
- video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1163
- The temporal, height and width of feature shape of each video in LLM.
1164
- attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1165
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1166
-
1167
- - 1 for tokens that are **not masked**,
1168
- - 0 for tokens that are **masked**.
1169
- mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1170
- Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2).
1171
-
1172
- Returns:
1173
- position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
1174
- mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
1175
- """
1176
-
1177
- temporal_merge_size = self.config.vision_config.temporal_merge_size
1178
- spatial_merge_size = self.config.vision_config.spatial_merge_size
1179
-
1180
- mrope_position_deltas = []
1181
- if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
1182
- total_input_ids = input_ids
1183
- if attention_mask is None:
1184
- attention_mask = torch.ones_like(total_input_ids)
1185
- position_ids = torch.ones(
1186
- 3,
1187
- input_ids.shape[0],
1188
- input_ids.shape[1],
1189
- dtype=input_ids.dtype,
1190
- device=input_ids.device,
1191
- )
1192
- image_index, video_index = 0, 0
1193
- attention_mask = attention_mask.to(total_input_ids.device)
1194
- for i, input_ids in enumerate(total_input_ids):
1195
- # If we don't have `mm_token_type_ids`, then we have text tokens only (== 0)
1196
- if mm_token_type_ids is None:
1197
- input_token_type = torch.zeros_like(input_ids)[attention_mask[i] == 1].tolist()
1198
- else:
1199
- input_token_type = mm_token_type_ids[i, attention_mask[i] == 1].tolist()
1200
-
1201
- input_type_group = []
1202
- for key, group in itertools.groupby(enumerate(input_token_type), lambda x: x[1]):
1203
- group = list(group)
1204
- start_index = group[0][0]
1205
- end_index = group[-1][0] + 1
1206
- input_type_group.append((key, start_index, end_index))
1207
-
1208
- llm_pos_ids_list = []
1209
- for modality_type, start_idx, end_idx in input_type_group:
1210
- st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
1211
-
1212
- # text == 0
1213
- if modality_type == 0:
1214
- text_len = end_idx - start_idx
1215
- llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
1216
-
1217
- # image == 1, video == 2
1218
- else:
1219
- grid_thw = image_grid_thw if modality_type == 1 else video_grid_thw
1220
- mm_index = image_index if modality_type == 1 else video_index
1221
- t_merge_size = 1 if modality_type == 1 else temporal_merge_size
1222
-
1223
- t, h, w = (
1224
- grid_thw[mm_index][0],
1225
- grid_thw[mm_index][1],
1226
- grid_thw[mm_index][2],
1227
- )
1228
- llm_grid_t, llm_grid_h, llm_grid_w = (
1229
- t.item() // t_merge_size,
1230
- h.item() // spatial_merge_size,
1231
- w.item() // spatial_merge_size,
1232
- )
1233
-
1234
- for t_idx in range(llm_grid_t):
1235
- t_index = torch.tensor(t_idx).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
1236
- h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(1, -1, llm_grid_w).flatten()
1237
- w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(1, llm_grid_h, -1).flatten()
1238
- llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + st_idx)
1239
-
1240
- if modality_type == 1:
1241
- image_index += 1
1242
- else:
1243
- video_index += 1
1244
-
1245
- llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
1246
- position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device)
1247
- mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i]))
1248
- mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1)
1249
- return position_ids, mrope_position_deltas
1250
- else:
1251
- if attention_mask is not None:
1252
- position_ids = attention_mask.long().cumsum(-1) - 1
1253
- position_ids.masked_fill_(attention_mask == 0, 1)
1254
- position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device)
1255
- max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0]
1256
- mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1]
1257
- else:
1258
- position_ids = (
1259
- torch.arange(input_ids.shape[1], device=input_ids.device)
1260
- .view(1, 1, -1)
1261
- .expand(3, input_ids.shape[0], -1)
1262
- )
1263
- mrope_position_deltas = torch.zeros(
1264
- [input_ids.shape[0], 1],
1265
- device=input_ids.device,
1266
- dtype=input_ids.dtype,
1267
- )
1268
-
1269
- return position_ids, mrope_position_deltas
1270
-
1271
- @can_return_tuple
1272
- @auto_docstring
1273
- def get_video_features(
1274
- self,
1275
- pixel_values_videos: torch.FloatTensor,
1276
- video_grid_thw: torch.LongTensor | None = None,
1277
- **kwargs: Unpack[TransformersKwargs],
1278
- ) -> tuple | BaseModelOutputWithPooling:
1279
- video_outputs = self.vision_tower(pixel_values_videos, video_grid_thw, return_dict=True, **kwargs)
1280
- video_embeds = self.resampler_model(video_outputs.last_hidden_state, video_grid_thw)
1281
- split_sizes = (
1282
- video_grid_thw.prod(-1)
1283
- // self.vision_tower.spatial_merge_size**2
1284
- // self.resampler_model.temporal_merge_size
1285
- ).tolist()
1286
- video_embeds = torch.split(video_embeds, split_sizes)
1287
- video_outputs.pooler_output = video_embeds
1288
- return video_outputs
1289
-
1290
- @can_return_tuple
1291
- @auto_docstring
1292
- def get_image_features(
1293
- self,
1294
- pixel_values: torch.FloatTensor,
1295
- image_grid_thw: torch.LongTensor | None = None,
1296
- **kwargs: Unpack[TransformersKwargs],
1297
- ) -> tuple | BaseModelOutputWithPooling:
1298
- image_outputs = self.vision_tower(pixel_values, image_grid_thw, return_dict=True, **kwargs)
1299
- image_embeds = self.resampler_model(image_outputs.last_hidden_state, image_grid_thw)
1300
- split_sizes = (image_grid_thw.prod(-1) // self.vision_tower.spatial_merge_size**2).tolist()
1301
- image_embeds = torch.split(image_embeds, split_sizes)
1302
- image_outputs.pooler_output = image_embeds
1303
- return image_outputs
1304
-
1305
- @auto_docstring
1306
- @can_return_tuple
1307
- def forward(
1308
- self,
1309
- input_ids: torch.LongTensor = None,
1310
- attention_mask: torch.Tensor | None = None,
1311
- position_ids: torch.LongTensor | None = None,
1312
- mm_token_type_ids: torch.IntTensor | None = None,
1313
- moe_mm_token_type_ids: torch.IntTensor | None = None,
1314
- past_key_values: Cache | None = None,
1315
- inputs_embeds: torch.FloatTensor | None = None,
1316
- use_cache: bool | None = None,
1317
- pixel_values: torch.Tensor | None = None,
1318
- pixel_values_videos: torch.FloatTensor | None = None,
1319
- image_grid_thw: torch.LongTensor | None = None,
1320
- video_grid_thw: torch.LongTensor | None = None,
1321
- rope_deltas: torch.LongTensor | None = None,
1322
- cache_position: torch.LongTensor | None = None,
1323
- **kwargs: Unpack[TransformersKwargs],
1324
- ) -> tuple | MoeModelOutputWithPast:
1325
- r"""
1326
- mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1327
- Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2).
1328
- moe_mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1329
- The same as `mm_token_type_ids` while additionally considering start/end image/video tokens as respective vision tokens.
1330
- image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1331
- The temporal, height and width of feature shape of each image in LLM.
1332
- video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1333
- The temporal, height and width of feature shape of each video in LLM.
1334
- rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
1335
- The rope index difference between sequence length and multimodal rope.
1336
- """
1337
- if inputs_embeds is None:
1338
- inputs_embeds = self.get_input_embeddings()(input_ids)
1339
-
1340
- if pixel_values is not None:
1341
- image_embeds = self.get_image_features(pixel_values, image_grid_thw, return_dict=True).pooler_output
1342
- image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
1343
- image_mask, _ = self.get_placeholder_mask(
1344
- input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
1345
- )
1346
- inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
1347
-
1348
- if pixel_values_videos is not None:
1349
- video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw, return_dict=True).pooler_output
1350
- video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
1351
- _, video_mask = self.get_placeholder_mask(
1352
- input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
1353
- )
1354
- inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
1355
-
1356
- if position_ids is None:
1357
- position_ids = self.get_position_ids(
1358
- input_ids=input_ids,
1359
- attention_mask=attention_mask,
1360
- past_key_values=past_key_values,
1361
- inputs_embeds=inputs_embeds,
1362
- image_grid_thw=image_grid_thw,
1363
- video_grid_thw=video_grid_thw,
1364
- cache_position=cache_position,
1365
- mm_token_type_ids=mm_token_type_ids,
1366
- )
1367
-
1368
- outputs = self.language_model(
1369
- input_ids=None,
1370
- position_ids=position_ids,
1371
- moe_mm_token_type_ids=moe_mm_token_type_ids,
1372
- attention_mask=attention_mask,
1373
- use_cache=use_cache,
1374
- past_key_values=past_key_values,
1375
- inputs_embeds=inputs_embeds,
1376
- return_dict=True,
1377
- cache_position=cache_position,
1378
- **kwargs,
1379
- )
1380
-
1381
- return MoeModelOutputWithPast(
1382
- last_hidden_state=outputs.last_hidden_state,
1383
- past_key_values=outputs.past_key_values,
1384
- hidden_states=outputs.hidden_states,
1385
- attentions=outputs.attentions,
1386
- router_logits=outputs.router_logits,
1387
- )
1388
-
1389
-
1390
- class Ernie4_5_VL_MoeForConditionalGeneration(Glm4vForConditionalGeneration, GenerationMixin):
1391
- _checkpoint_conversion_mapping = {"^model.norm": "model.language_model.norm"}
1392
-
1393
- def __init__(self, config):
1394
- super().__init__(config)
1395
-
1396
- self.router_aux_loss_coef = config.text_config.router_aux_loss_coef
1397
- self.num_experts = config.text_config.moe_num_experts
1398
- self.num_experts_per_tok = config.text_config.moe_k
1399
-
1400
- @auto_docstring
1401
- def get_video_features(self, **super_kwargs):
1402
- return super().get_video_features(**super_kwargs)
1403
-
1404
- @auto_docstring
1405
- def get_image_features(self, **super_kwargs):
1406
- return super().get_image_features(**super_kwargs)
1407
-
1408
- def prepare_inputs_for_generation(
1409
- self,
1410
- input_ids,
1411
- inputs_embeds=None,
1412
- attention_mask=None,
1413
- cache_position=None,
1414
- past_key_values=None,
1415
- image_grid_thw=None,
1416
- video_grid_thw=None,
1417
- use_cache=True,
1418
- is_first_iteration=False,
1419
- # Intentionally ignore position ids to force custom cache logic
1420
- position_ids=None,
1421
- **kwargs,
1422
- ):
1423
- model_inputs = super().prepare_inputs_for_generation(
1424
- input_ids,
1425
- inputs_embeds=inputs_embeds,
1426
- attention_mask=attention_mask,
1427
- cache_position=cache_position,
1428
- past_key_values=past_key_values,
1429
- image_grid_thw=image_grid_thw,
1430
- video_grid_thw=video_grid_thw,
1431
- use_cache=use_cache,
1432
- is_first_iteration=is_first_iteration,
1433
- **kwargs,
1434
- )
1435
-
1436
- # Using our own caching with rope delta
1437
- model_inputs["position_ids"] = self.model.get_position_ids(
1438
- input_ids=model_inputs.get("input_ids"),
1439
- attention_mask=model_inputs.get("attention_mask"),
1440
- past_key_values=model_inputs.get("past_key_values"),
1441
- inputs_embeds=model_inputs.get("inputs_embeds"),
1442
- image_grid_thw=model_inputs.get("image_grid_thw"),
1443
- video_grid_thw=model_inputs.get("video_grid_thw"),
1444
- cache_position=model_inputs.get("cache_position"),
1445
- mm_token_type_ids=model_inputs.get("mm_token_type_ids"),
1446
- )
1447
-
1448
- if not is_first_iteration and use_cache:
1449
- model_inputs["pixel_values"] = None
1450
- model_inputs["pixel_values_videos"] = None
1451
- model_inputs["mm_token_type_ids"] = None
1452
- model_inputs["moe_mm_token_type_ids"] = None
1453
-
1454
- return model_inputs
1455
-
1456
- @auto_docstring
1457
- @can_return_tuple
1458
- def forward(
1459
- self,
1460
- input_ids: torch.LongTensor = None,
1461
- attention_mask: torch.Tensor | None = None,
1462
- position_ids: torch.LongTensor | None = None,
1463
- mm_token_type_ids: torch.IntTensor | None = None,
1464
- moe_mm_token_type_ids: torch.IntTensor | None = None,
1465
- past_key_values: Cache | None = None,
1466
- inputs_embeds: torch.FloatTensor | None = None,
1467
- labels: torch.LongTensor | None = None,
1468
- use_cache: bool | None = None,
1469
- output_router_logits: bool | None = None,
1470
- pixel_values: torch.Tensor | None = None,
1471
- pixel_values_videos: torch.FloatTensor | None = None,
1472
- image_grid_thw: torch.LongTensor | None = None,
1473
- video_grid_thw: torch.LongTensor | None = None,
1474
- rope_deltas: torch.LongTensor | None = None,
1475
- cache_position: torch.LongTensor | None = None,
1476
- logits_to_keep: int | torch.Tensor = 0,
1477
- **kwargs: Unpack[TransformersKwargs],
1478
- ) -> tuple | MoeCausalLMOutputWithPast:
1479
- r"""
1480
- mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1481
- Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2).
1482
- moe_mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1483
- The same as `mm_token_type_ids` while additionally considering start/end image/video tokens as respective vision tokens.
1484
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1485
- Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1486
- config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1487
- (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1488
- image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1489
- The temporal, height and width of feature shape of each image in LLM.
1490
- video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1491
- The temporal, height and width of feature shape of each video in LLM.
1492
- rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
1493
- The rope index difference between sequence length and multimodal rope.
1494
- """
1495
- output_router_logits = (
1496
- output_router_logits if output_router_logits is not None else self.config.text_config.output_router_logits
1497
- )
1498
-
1499
- outputs = self.model(
1500
- input_ids=input_ids,
1501
- attention_mask=attention_mask,
1502
- position_ids=position_ids,
1503
- mm_token_type_ids=mm_token_type_ids,
1504
- moe_mm_token_type_ids=moe_mm_token_type_ids,
1505
- past_key_values=past_key_values,
1506
- inputs_embeds=inputs_embeds,
1507
- use_cache=use_cache,
1508
- output_router_logits=output_router_logits,
1509
- return_dict=True,
1510
- pixel_values=pixel_values,
1511
- pixel_values_videos=pixel_values_videos,
1512
- image_grid_thw=image_grid_thw,
1513
- video_grid_thw=video_grid_thw,
1514
- rope_deltas=rope_deltas,
1515
- cache_position=cache_position,
1516
- **kwargs,
1517
- )
1518
-
1519
- hidden_states = outputs.last_hidden_state
1520
- # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1521
- slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1522
- logits = self.lm_head(hidden_states[:, slice_indices, :])
1523
-
1524
- loss = None
1525
- if labels is not None:
1526
- loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
1527
-
1528
- aux_loss = None
1529
- if output_router_logits:
1530
- aux_loss = load_balancing_loss_func(
1531
- outputs.router_logits,
1532
- self.num_experts,
1533
- self.num_experts_per_tok,
1534
- attention_mask,
1535
- )
1536
- if labels is not None:
1537
- loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
1538
-
1539
- return MoeCausalLMOutputWithPast(
1540
- loss=loss,
1541
- aux_loss=aux_loss,
1542
- logits=logits,
1543
- past_key_values=outputs.past_key_values,
1544
- hidden_states=outputs.hidden_states,
1545
- attentions=outputs.attentions,
1546
- router_logits=outputs.router_logits,
1547
- )
1548
-
1549
-
1550
- class Ernie4_5_VL_MoeImageProcessorKwargs(Glm4vImageProcessorKwargs):
1551
- r"""
1552
- patch_size (`int`, *optional*, defaults to 14):
1553
- The spatial patch size of the vision encoder.
1554
- temporal_patch_size (`int`, *optional*):
1555
- The temporal patch size of the vision encoder. Unused in the image processor, only used for videos.
1556
- merge_size (`int`, *optional*, defaults to 2):
1557
- The merge size of the vision encoder to llm encoder.
1558
- """
1559
-
1560
-
1561
- class Ernie4_5_VL_MoeImageProcessor(Glm4vImageProcessor):
1562
- r"""
1563
- Constructs a Ernie 4.5 VL image processor that dynamically resizes images based on the original images.
1564
-
1565
- Args:
1566
- do_resize (`bool`, *optional*, defaults to `True`):
1567
- Whether to resize the image's (height, width) dimensions.
1568
- size (`dict[str, int]`, *optional*, defaults to `{"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 6177}`):
1569
- Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
1570
- resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
1571
- Resampling filter to use when resizing the image.
1572
- do_rescale (`bool`, *optional*, defaults to `True`):
1573
- Whether to rescale the image by the specified scale `rescale_factor`.
1574
- rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
1575
- Scale factor to use if rescaling the image.
1576
- do_normalize (`bool`, *optional*, defaults to `True`):
1577
- Whether to normalize the image.
1578
- image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
1579
- Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
1580
- image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
1581
- Standard deviation to use if normalizing the image. This is a float or list of floats for each channel
1582
- in the image.
1583
- do_convert_rgb (`bool`, *optional*, defaults to `True`):
1584
- Whether to convert the image to RGB.
1585
- patch_size (`int`, *optional*, defaults to 14):
1586
- The spatial patch size of the vision encoder.
1587
- temporal_patch_size (`int`, *optional*):
1588
- The temporal patch size of the vision encoder. Unused in the image processor, only used for videos.
1589
- merge_size (`int`, *optional*, defaults to 2):
1590
- The merge size of the vision encoder to llm encoder.
1591
- """
1592
-
1593
- def __init__(
1594
- self,
1595
- do_resize: bool = True,
1596
- size: dict[str, int] | None = None,
1597
- resample: PILImageResampling = PILImageResampling.BICUBIC,
1598
- do_rescale: bool = True,
1599
- rescale_factor: int | float = 1 / 255,
1600
- do_normalize: bool = True,
1601
- image_mean: float | list[float] | None = None,
1602
- image_std: float | list[float] | None = None,
1603
- do_convert_rgb: bool = True,
1604
- patch_size: int = 14,
1605
- temporal_patch_size: int | None = None,
1606
- merge_size: int = 2,
1607
- **kwargs,
1608
- ) -> None:
1609
- BaseImageProcessor.__init__(**kwargs)
1610
- if size is not None:
1611
- if "shortest_edge" not in size or "longest_edge" not in size:
1612
- raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
1613
- size = {"shortest_edge": size["shortest_edge"], "longest_edge": size["longest_edge"]}
1614
- else:
1615
- size = {"shortest_edge": 56 * 56, "longest_edge": 6177 * 28 * 28}
1616
- self.size = size
1617
-
1618
- self.do_resize = do_resize
1619
- self.resample = resample
1620
- self.do_rescale = do_rescale
1621
- self.rescale_factor = rescale_factor
1622
- self.do_normalize = do_normalize
1623
- self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
1624
- self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
1625
-
1626
- self.patch_size = patch_size
1627
- self.temporal_patch_size = temporal_patch_size
1628
- self.merge_size = merge_size
1629
- self.do_convert_rgb = do_convert_rgb
1630
-
1631
- def _preprocess(
1632
- self,
1633
- images: ImageInput,
1634
- do_resize: bool | None = None,
1635
- size: dict[str, int] | None = None,
1636
- resample: PILImageResampling = None,
1637
- do_rescale: bool | None = None,
1638
- rescale_factor: float | None = None,
1639
- do_normalize: bool | None = None,
1640
- image_mean: float | list[float] | None = None,
1641
- image_std: float | list[float] | None = None,
1642
- patch_size: int | None = None,
1643
- temporal_patch_size: int | None = None,
1644
- merge_size: int | None = None,
1645
- do_convert_rgb: bool | None = None,
1646
- data_format: ChannelDimension | None = ChannelDimension.FIRST,
1647
- input_data_format: str | ChannelDimension | None = None,
1648
- ):
1649
- """
1650
- Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
1651
-
1652
- Args:
1653
- images (`ImageInput`):
1654
- Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
1655
- vision_info (`list[Dict]`, *optional*):
1656
- Optional list of dictionaries containing additional information about vision inputs.
1657
- do_resize (`bool`, *optional*, defaults to `self.do_resize`):
1658
- Whether to resize the image.
1659
- size (`dict[str, int]`, *optional*, defaults to `self.size`):
1660
- Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
1661
- resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
1662
- Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
1663
- do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
1664
- Whether to rescale the image.
1665
- rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
1666
- Scale factor to use if rescaling the image.
1667
- do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
1668
- Whether to normalize the image.
1669
- image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
1670
- Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
1671
- image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
1672
- Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
1673
- patch_size (`int`, *optional*, defaults to `self.patch_size`):
1674
- The spatial patch size of the vision encoder.
1675
- temporal_patch_size (`int`, *optional*):
1676
- The temporal patch size of the vision encoder. Unused in the image processor, only used for videos.
1677
- merge_size (`int`, *optional*, defaults to `self.merge_size`):
1678
- The merge size of the vision encoder to llm encoder.
1679
- do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
1680
- Whether to convert the image to RGB.
1681
- data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
1682
- The channel dimension format for the output image. Can be one of:
1683
- - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1684
- - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1685
- - Unset: Use the channel dimension format of the input image.
1686
- input_data_format (`ChannelDimension` or `str`, *optional*):
1687
- The channel dimension format for the input image. Can be one of:
1688
- - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1689
- - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1690
- - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
1691
- """
1692
- images = make_list_of_images(images)
1693
-
1694
- if do_convert_rgb:
1695
- images = [convert_to_rgb(image) for image in images]
1696
-
1697
- # All transformations expect numpy arrays.
1698
- images = [to_numpy_array(image) for image in images]
1699
-
1700
- if do_rescale and is_scaled_image(images[0]):
1701
- logger.warning_once(
1702
- "It looks like you are trying to rescale already rescaled images. If the input"
1703
- " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
1704
- )
1705
- if input_data_format is None:
1706
- # We assume that all images have the same channel dimension format.
1707
- input_data_format = infer_channel_dimension_format(images[0])
1708
-
1709
- height, width = get_image_size(images[0], channel_dim=input_data_format)
1710
- resized_height, resized_width = height, width
1711
- processed_images = []
1712
- for image in images:
1713
- if do_resize:
1714
- resized_height, resized_width = smart_resize(
1715
- height,
1716
- width,
1717
- factor=patch_size * merge_size,
1718
- min_pixels=size["shortest_edge"],
1719
- max_pixels=size["longest_edge"],
1720
- )
1721
- image = resize(
1722
- image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
1723
- )
1724
-
1725
- if do_rescale:
1726
- image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
1727
-
1728
- if do_normalize:
1729
- image = self.normalize(
1730
- image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
1731
- )
1732
-
1733
- image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
1734
- processed_images.append(image)
1735
-
1736
- patches = np.array(processed_images)
1737
- if data_format == ChannelDimension.LAST:
1738
- patches = patches.transpose([0, 3, 1, 2])
1739
-
1740
- # Main difference to Qwen2 VL - no temporal patches
1741
- channel = patches.shape[1]
1742
- grid_t = patches.shape[0]
1743
- grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
1744
- patches = patches.reshape(
1745
- [
1746
- grid_t,
1747
- channel,
1748
- grid_h // merge_size,
1749
- merge_size,
1750
- patch_size,
1751
- grid_w // merge_size,
1752
- merge_size,
1753
- patch_size,
1754
- ]
1755
- )
1756
- # [grid_t, grid_h/merge, grid_w/merge, merge, merge, channel, patch, patch]
1757
- patches = patches.transpose([0, 2, 5, 3, 6, 1, 4, 7])
1758
- flatten_patches = patches.reshape(grid_t * grid_h * grid_w, channel * patch_size * patch_size)
1759
-
1760
- return flatten_patches, (grid_t, grid_h, grid_w)
1761
-
1762
- def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
1763
- """
1764
- A utility that returns number of image patches for a given image size.
1765
-
1766
- Args:
1767
- height (`int`):
1768
- Height of the input image.
1769
- width (`int`):
1770
- Width of the input image.
1771
- images_kwargs (`dict`, *optional*)
1772
- Any kwargs to override defaults of the image processor.
1773
- Returns:
1774
- `int`: Number of image patches per image.
1775
- """
1776
- min_pixels = self.size["shortest_edge"]
1777
- max_pixels = self.size["longest_edge"]
1778
- patch_size = images_kwargs.get("patch_size", self.patch_size)
1779
- merge_size = images_kwargs.get("merge_size", self.merge_size)
1780
-
1781
- factor = patch_size * merge_size
1782
- resized_height, resized_width = smart_resize(
1783
- height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels
1784
- )
1785
- grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
1786
- return grid_h * grid_w
1787
-
1788
-
1789
- class Ernie4_5_VL_MoeImageProcessorFast(Glm4vImageProcessorFast):
1790
- size = {"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 6177}
1791
- temporal_patch_size = None # Unused
1792
-
1793
- def _preprocess(
1794
- self,
1795
- images: list["torch.Tensor"],
1796
- do_resize: bool,
1797
- size: SizeDict,
1798
- interpolation: Optional["F.InterpolationMode"],
1799
- do_rescale: bool,
1800
- rescale_factor: float,
1801
- do_normalize: bool,
1802
- image_mean: float | list[float] | None,
1803
- image_std: float | list[float] | None,
1804
- patch_size: int,
1805
- merge_size: int,
1806
- disable_grouping: bool | None,
1807
- return_tensors: str | TensorType | None,
1808
- **kwargs,
1809
- ):
1810
- # Group images by size for batched resizing
1811
- grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
1812
- resized_images_grouped = {}
1813
- for shape, stacked_images in grouped_images.items():
1814
- height, width = stacked_images.shape[-2:]
1815
- if do_resize:
1816
- resized_height, resized_width = smart_resize(
1817
- height,
1818
- width,
1819
- factor=patch_size * merge_size,
1820
- min_pixels=size["shortest_edge"],
1821
- max_pixels=size["longest_edge"],
1822
- )
1823
- stacked_images = self.resize(
1824
- image=stacked_images,
1825
- size=SizeDict(height=resized_height, width=resized_width),
1826
- interpolation=interpolation,
1827
- )
1828
- resized_images_grouped[shape] = stacked_images
1829
- resized_images = reorder_images(resized_images_grouped, grouped_images_index)
1830
-
1831
- # Group images by size for further processing
1832
- # Needed in case do_resize is False, or resize returns images with different sizes
1833
- grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
1834
- processed_images_grouped = {}
1835
- processed_grids = {}
1836
- for shape, stacked_images in grouped_images.items():
1837
- resized_height, resized_width = stacked_images.shape[-2:]
1838
- # Fused rescale and normalize
1839
- patches = self.rescale_and_normalize(
1840
- stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
1841
- )
1842
- if patches.ndim == 4:
1843
- # add a temporal dimension if we have images
1844
- patches = patches.unsqueeze(1)
1845
-
1846
- # Main difference to Qwen2 VL - no temporal patches
1847
- batch_size, grid_t, channel = patches.shape[:3]
1848
- grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
1849
-
1850
- patches = patches.view(
1851
- batch_size,
1852
- grid_t,
1853
- channel,
1854
- grid_h // merge_size,
1855
- merge_size,
1856
- patch_size,
1857
- grid_w // merge_size,
1858
- merge_size,
1859
- patch_size,
1860
- )
1861
- # Reorder dimensions to group grid and patch information for subsequent flattening.
1862
- # [batch, grid_t, grid_h/merge, grid_w/merge, merge, merge, channel, patch, patch]
1863
- patches = patches.permute(0, 1, 3, 6, 4, 7, 2, 5, 8)
1864
-
1865
- flatten_patches = patches.reshape(
1866
- batch_size,
1867
- grid_t * grid_h * grid_w,
1868
- channel * patch_size * patch_size,
1869
- )
1870
-
1871
- processed_images_grouped[shape] = flatten_patches
1872
- processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
1873
-
1874
- processed_images = reorder_images(processed_images_grouped, grouped_images_index)
1875
- processed_grids = reorder_images(processed_grids, grouped_images_index)
1876
- pixel_values = torch.cat(processed_images, dim=0)
1877
- image_grid_thw = torch.tensor(processed_grids)
1878
-
1879
- return BatchFeature(
1880
- data={"pixel_values": pixel_values, "image_grid_thw": image_grid_thw}, tensor_type=return_tensors
1881
- )
1882
-
1883
- def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
1884
- """
1885
- A utility that returns number of image patches for a given image size.
1886
-
1887
- Note: Do not remove this method! It is used by vLLM to infer the number of patches and placeholders
1888
- without an image input.
1889
-
1890
- Args:
1891
- height (`int`):
1892
- Height of the input image.
1893
- width (`int`):
1894
- Width of the input image.
1895
- images_kwargs (`dict`, *optional*)
1896
- Any kwargs to override defaults of the image processor.
1897
- Returns:
1898
- `int`: Number of image patches per image.
1899
- """
1900
- min_pixels = self.size["shortest_edge"]
1901
- max_pixels = self.size["longest_edge"]
1902
- patch_size = images_kwargs.get("patch_size", self.patch_size)
1903
- merge_size = images_kwargs.get("merge_size", self.merge_size)
1904
-
1905
- factor = patch_size * merge_size
1906
- resized_height, resized_width = smart_resize(
1907
- height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels
1908
- )
1909
- grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
1910
- return grid_h * grid_w
1911
-
1912
-
1913
- __all__ = [
1914
- "Ernie4_5_VL_MoeConfig",
1915
- "Ernie4_5_VL_MoeTextConfig",
1916
- "Ernie4_5_VL_MoeVisionConfig",
1917
- "Ernie4_5_VL_MoePreTrainedModel",
1918
- "Ernie4_5_VL_MoeForConditionalGeneration",
1919
- "Ernie4_5_VL_MoeModel",
1920
- "Ernie4_5_VL_MoeTextModel",
1921
- "Ernie4_5_VL_MoeVisionTransformerPretrainedModel",
1922
- "Ernie4_5_VL_MoeVariableResolutionResamplerModel",
1923
- "Ernie4_5_VL_MoeImageProcessor",
1924
- "Ernie4_5_VL_MoeImageProcessorFast",
1925
- ]