transformers 5.0.0rc2__py3-none-any.whl → 5.0.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1537) hide show
  1. transformers/__init__.py +9 -28
  2. transformers/audio_utils.py +32 -32
  3. transformers/cache_utils.py +15 -124
  4. transformers/cli/chat.py +3 -3
  5. transformers/cli/serve.py +2 -2
  6. transformers/cli/transformers.py +2 -1
  7. transformers/configuration_utils.py +31 -33
  8. transformers/conversion_mapping.py +5 -1
  9. transformers/convert_slow_tokenizer.py +3 -8
  10. transformers/core_model_loading.py +14 -15
  11. transformers/data/processors/glue.py +0 -1
  12. transformers/data/processors/utils.py +0 -1
  13. transformers/data/processors/xnli.py +0 -1
  14. transformers/dependency_versions_table.py +4 -4
  15. transformers/distributed/configuration_utils.py +1 -2
  16. transformers/dynamic_module_utils.py +23 -23
  17. transformers/feature_extraction_sequence_utils.py +19 -23
  18. transformers/feature_extraction_utils.py +14 -14
  19. transformers/generation/candidate_generator.py +1 -2
  20. transformers/generation/configuration_utils.py +54 -39
  21. transformers/generation/continuous_batching/__init__.py +0 -1
  22. transformers/generation/continuous_batching/cache.py +34 -6
  23. transformers/generation/continuous_batching/cache_manager.py +25 -12
  24. transformers/generation/continuous_batching/continuous_api.py +54 -23
  25. transformers/generation/continuous_batching/requests.py +25 -4
  26. transformers/generation/continuous_batching/scheduler.py +117 -49
  27. transformers/generation/logits_process.py +0 -128
  28. transformers/generation/streamers.py +0 -1
  29. transformers/generation/utils.py +16 -26
  30. transformers/generation/watermarking.py +2 -3
  31. transformers/hf_argparser.py +9 -13
  32. transformers/hyperparameter_search.py +1 -2
  33. transformers/image_processing_base.py +9 -9
  34. transformers/image_processing_utils.py +11 -12
  35. transformers/image_processing_utils_fast.py +53 -53
  36. transformers/image_transforms.py +29 -29
  37. transformers/image_utils.py +30 -32
  38. transformers/integrations/awq.py +1 -3
  39. transformers/integrations/deepspeed.py +1 -1
  40. transformers/integrations/eetq.py +0 -1
  41. transformers/integrations/fbgemm_fp8.py +1 -2
  42. transformers/integrations/finegrained_fp8.py +8 -7
  43. transformers/integrations/flash_attention.py +1 -1
  44. transformers/integrations/flex_attention.py +1 -1
  45. transformers/integrations/fp_quant.py +4 -6
  46. transformers/integrations/ggml.py +0 -1
  47. transformers/integrations/integration_utils.py +2 -3
  48. transformers/integrations/mxfp4.py +5 -6
  49. transformers/integrations/quark.py +2 -4
  50. transformers/integrations/torchao.py +4 -6
  51. transformers/loss/loss_lw_detr.py +356 -0
  52. transformers/loss/loss_utils.py +2 -0
  53. transformers/masking_utils.py +47 -51
  54. transformers/model_debugging_utils.py +4 -5
  55. transformers/modelcard.py +14 -192
  56. transformers/modeling_attn_mask_utils.py +19 -19
  57. transformers/modeling_flash_attention_utils.py +27 -27
  58. transformers/modeling_gguf_pytorch_utils.py +5 -5
  59. transformers/modeling_layers.py +21 -22
  60. transformers/modeling_outputs.py +242 -253
  61. transformers/modeling_rope_utils.py +32 -32
  62. transformers/modeling_utils.py +67 -90
  63. transformers/models/__init__.py +4 -0
  64. transformers/models/afmoe/configuration_afmoe.py +26 -29
  65. transformers/models/afmoe/modeling_afmoe.py +30 -33
  66. transformers/models/afmoe/modular_afmoe.py +16 -18
  67. transformers/models/aimv2/configuration_aimv2.py +2 -5
  68. transformers/models/aimv2/modeling_aimv2.py +20 -21
  69. transformers/models/aimv2/modular_aimv2.py +7 -9
  70. transformers/models/albert/configuration_albert.py +0 -1
  71. transformers/models/albert/modeling_albert.py +67 -69
  72. transformers/models/albert/tokenization_albert.py +1 -4
  73. transformers/models/align/configuration_align.py +0 -1
  74. transformers/models/align/modeling_align.py +61 -62
  75. transformers/models/align/processing_align.py +2 -30
  76. transformers/models/altclip/configuration_altclip.py +0 -1
  77. transformers/models/altclip/modeling_altclip.py +76 -77
  78. transformers/models/altclip/processing_altclip.py +2 -15
  79. transformers/models/apertus/__init__.py +0 -1
  80. transformers/models/apertus/configuration_apertus.py +18 -21
  81. transformers/models/apertus/modeling_apertus.py +31 -34
  82. transformers/models/apertus/modular_apertus.py +28 -30
  83. transformers/models/arcee/configuration_arcee.py +20 -23
  84. transformers/models/arcee/modeling_arcee.py +31 -34
  85. transformers/models/arcee/modular_arcee.py +20 -23
  86. transformers/models/aria/configuration_aria.py +20 -23
  87. transformers/models/aria/image_processing_aria.py +25 -27
  88. transformers/models/aria/modeling_aria.py +63 -66
  89. transformers/models/aria/modular_aria.py +78 -85
  90. transformers/models/aria/processing_aria.py +28 -35
  91. transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +0 -1
  92. transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py +3 -6
  93. transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +6 -8
  94. transformers/models/audioflamingo3/__init__.py +0 -1
  95. transformers/models/audioflamingo3/configuration_audioflamingo3.py +0 -1
  96. transformers/models/audioflamingo3/modeling_audioflamingo3.py +22 -23
  97. transformers/models/audioflamingo3/modular_audioflamingo3.py +12 -17
  98. transformers/models/audioflamingo3/processing_audioflamingo3.py +6 -8
  99. transformers/models/auto/auto_factory.py +4 -5
  100. transformers/models/auto/configuration_auto.py +26 -5
  101. transformers/models/auto/feature_extraction_auto.py +5 -7
  102. transformers/models/auto/image_processing_auto.py +13 -26
  103. transformers/models/auto/modeling_auto.py +18 -199
  104. transformers/models/auto/processing_auto.py +2 -1
  105. transformers/models/auto/tokenization_auto.py +21 -22
  106. transformers/models/auto/video_processing_auto.py +7 -8
  107. transformers/models/autoformer/configuration_autoformer.py +4 -7
  108. transformers/models/autoformer/modeling_autoformer.py +98 -100
  109. transformers/models/aya_vision/configuration_aya_vision.py +0 -1
  110. transformers/models/aya_vision/modeling_aya_vision.py +35 -37
  111. transformers/models/aya_vision/modular_aya_vision.py +26 -29
  112. transformers/models/aya_vision/processing_aya_vision.py +25 -53
  113. transformers/models/bamba/configuration_bamba.py +29 -32
  114. transformers/models/bamba/modeling_bamba.py +60 -64
  115. transformers/models/bamba/modular_bamba.py +51 -55
  116. transformers/models/bark/configuration_bark.py +4 -7
  117. transformers/models/bark/generation_configuration_bark.py +3 -5
  118. transformers/models/bark/modeling_bark.py +40 -55
  119. transformers/models/bark/processing_bark.py +19 -41
  120. transformers/models/bart/configuration_bart.py +0 -1
  121. transformers/models/bart/modeling_bart.py +115 -117
  122. transformers/models/barthez/tokenization_barthez.py +1 -4
  123. transformers/models/bartpho/tokenization_bartpho.py +6 -7
  124. transformers/models/beit/configuration_beit.py +0 -11
  125. transformers/models/beit/image_processing_beit.py +53 -56
  126. transformers/models/beit/image_processing_beit_fast.py +8 -9
  127. transformers/models/beit/modeling_beit.py +51 -53
  128. transformers/models/bert/configuration_bert.py +0 -1
  129. transformers/models/bert/modeling_bert.py +111 -122
  130. transformers/models/bert/tokenization_bert.py +2 -4
  131. transformers/models/bert/tokenization_bert_legacy.py +3 -5
  132. transformers/models/bert_generation/configuration_bert_generation.py +0 -1
  133. transformers/models/bert_generation/modeling_bert_generation.py +47 -49
  134. transformers/models/bert_generation/tokenization_bert_generation.py +2 -3
  135. transformers/models/bert_japanese/tokenization_bert_japanese.py +5 -6
  136. transformers/models/bertweet/tokenization_bertweet.py +1 -3
  137. transformers/models/big_bird/configuration_big_bird.py +0 -1
  138. transformers/models/big_bird/modeling_big_bird.py +107 -109
  139. transformers/models/big_bird/tokenization_big_bird.py +1 -4
  140. transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py +0 -1
  141. transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +109 -111
  142. transformers/models/biogpt/configuration_biogpt.py +0 -1
  143. transformers/models/biogpt/modeling_biogpt.py +69 -71
  144. transformers/models/biogpt/modular_biogpt.py +59 -61
  145. transformers/models/biogpt/tokenization_biogpt.py +3 -5
  146. transformers/models/bit/configuration_bit.py +0 -1
  147. transformers/models/bit/image_processing_bit.py +21 -24
  148. transformers/models/bit/image_processing_bit_fast.py +0 -1
  149. transformers/models/bit/modeling_bit.py +9 -11
  150. transformers/models/bitnet/configuration_bitnet.py +18 -21
  151. transformers/models/bitnet/modeling_bitnet.py +31 -34
  152. transformers/models/bitnet/modular_bitnet.py +4 -6
  153. transformers/models/blenderbot/configuration_blenderbot.py +0 -1
  154. transformers/models/blenderbot/modeling_blenderbot.py +64 -95
  155. transformers/models/blenderbot/tokenization_blenderbot.py +0 -1
  156. transformers/models/blenderbot_small/configuration_blenderbot_small.py +0 -1
  157. transformers/models/blenderbot_small/modeling_blenderbot_small.py +66 -68
  158. transformers/models/blenderbot_small/tokenization_blenderbot_small.py +1 -3
  159. transformers/models/blip/configuration_blip.py +0 -1
  160. transformers/models/blip/image_processing_blip.py +17 -20
  161. transformers/models/blip/image_processing_blip_fast.py +0 -1
  162. transformers/models/blip/modeling_blip.py +60 -71
  163. transformers/models/blip/modeling_blip_text.py +63 -65
  164. transformers/models/blip/processing_blip.py +5 -36
  165. transformers/models/blip_2/configuration_blip_2.py +0 -1
  166. transformers/models/blip_2/modeling_blip_2.py +70 -71
  167. transformers/models/blip_2/processing_blip_2.py +8 -38
  168. transformers/models/bloom/configuration_bloom.py +0 -1
  169. transformers/models/bloom/modeling_bloom.py +58 -59
  170. transformers/models/blt/configuration_blt.py +71 -74
  171. transformers/models/blt/modeling_blt.py +73 -76
  172. transformers/models/blt/modular_blt.py +57 -59
  173. transformers/models/bridgetower/configuration_bridgetower.py +0 -1
  174. transformers/models/bridgetower/image_processing_bridgetower.py +34 -35
  175. transformers/models/bridgetower/image_processing_bridgetower_fast.py +7 -8
  176. transformers/models/bridgetower/modeling_bridgetower.py +107 -109
  177. transformers/models/bridgetower/processing_bridgetower.py +2 -16
  178. transformers/models/bros/configuration_bros.py +0 -1
  179. transformers/models/bros/modeling_bros.py +78 -80
  180. transformers/models/bros/processing_bros.py +2 -12
  181. transformers/models/byt5/tokenization_byt5.py +4 -6
  182. transformers/models/camembert/configuration_camembert.py +0 -1
  183. transformers/models/camembert/modeling_camembert.py +91 -93
  184. transformers/models/camembert/modular_camembert.py +51 -54
  185. transformers/models/camembert/tokenization_camembert.py +1 -4
  186. transformers/models/canine/configuration_canine.py +0 -1
  187. transformers/models/canine/modeling_canine.py +73 -75
  188. transformers/models/canine/tokenization_canine.py +0 -1
  189. transformers/models/chameleon/configuration_chameleon.py +24 -27
  190. transformers/models/chameleon/image_processing_chameleon.py +21 -24
  191. transformers/models/chameleon/image_processing_chameleon_fast.py +0 -1
  192. transformers/models/chameleon/modeling_chameleon.py +53 -56
  193. transformers/models/chameleon/processing_chameleon.py +16 -41
  194. transformers/models/chinese_clip/configuration_chinese_clip.py +0 -1
  195. transformers/models/chinese_clip/image_processing_chinese_clip.py +21 -24
  196. transformers/models/chinese_clip/image_processing_chinese_clip_fast.py +0 -1
  197. transformers/models/chinese_clip/modeling_chinese_clip.py +65 -66
  198. transformers/models/chinese_clip/processing_chinese_clip.py +2 -15
  199. transformers/models/clap/configuration_clap.py +0 -1
  200. transformers/models/clap/feature_extraction_clap.py +9 -10
  201. transformers/models/clap/modeling_clap.py +88 -89
  202. transformers/models/clap/processing_clap.py +2 -15
  203. transformers/models/clip/configuration_clip.py +0 -1
  204. transformers/models/clip/image_processing_clip.py +21 -24
  205. transformers/models/clip/image_processing_clip_fast.py +0 -1
  206. transformers/models/clip/modeling_clip.py +45 -46
  207. transformers/models/clip/processing_clip.py +2 -14
  208. transformers/models/clip/tokenization_clip.py +2 -5
  209. transformers/models/clipseg/configuration_clipseg.py +0 -1
  210. transformers/models/clipseg/modeling_clipseg.py +86 -87
  211. transformers/models/clipseg/processing_clipseg.py +8 -39
  212. transformers/models/clvp/configuration_clvp.py +1 -3
  213. transformers/models/clvp/feature_extraction_clvp.py +7 -10
  214. transformers/models/clvp/modeling_clvp.py +119 -115
  215. transformers/models/clvp/number_normalizer.py +1 -2
  216. transformers/models/clvp/processing_clvp.py +3 -20
  217. transformers/models/clvp/tokenization_clvp.py +0 -1
  218. transformers/models/code_llama/tokenization_code_llama.py +3 -6
  219. transformers/models/codegen/configuration_codegen.py +0 -1
  220. transformers/models/codegen/modeling_codegen.py +48 -48
  221. transformers/models/codegen/tokenization_codegen.py +5 -6
  222. transformers/models/cohere/configuration_cohere.py +20 -23
  223. transformers/models/cohere/modeling_cohere.py +35 -38
  224. transformers/models/cohere/modular_cohere.py +24 -28
  225. transformers/models/cohere/tokenization_cohere.py +5 -6
  226. transformers/models/cohere2/configuration_cohere2.py +21 -24
  227. transformers/models/cohere2/modeling_cohere2.py +34 -37
  228. transformers/models/cohere2/modular_cohere2.py +39 -41
  229. transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py +6 -7
  230. transformers/models/cohere2_vision/modeling_cohere2_vision.py +28 -30
  231. transformers/models/cohere2_vision/modular_cohere2_vision.py +21 -23
  232. transformers/models/cohere2_vision/processing_cohere2_vision.py +6 -36
  233. transformers/models/colpali/configuration_colpali.py +0 -1
  234. transformers/models/colpali/modeling_colpali.py +14 -16
  235. transformers/models/colpali/modular_colpali.py +11 -51
  236. transformers/models/colpali/processing_colpali.py +14 -52
  237. transformers/models/colqwen2/modeling_colqwen2.py +20 -22
  238. transformers/models/colqwen2/modular_colqwen2.py +29 -68
  239. transformers/models/colqwen2/processing_colqwen2.py +16 -52
  240. transformers/models/conditional_detr/configuration_conditional_detr.py +0 -1
  241. transformers/models/conditional_detr/image_processing_conditional_detr.py +64 -66
  242. transformers/models/conditional_detr/image_processing_conditional_detr_fast.py +22 -22
  243. transformers/models/conditional_detr/modeling_conditional_detr.py +78 -80
  244. transformers/models/conditional_detr/modular_conditional_detr.py +1 -3
  245. transformers/models/convbert/configuration_convbert.py +0 -1
  246. transformers/models/convbert/modeling_convbert.py +85 -87
  247. transformers/models/convbert/tokenization_convbert.py +0 -1
  248. transformers/models/convnext/configuration_convnext.py +0 -1
  249. transformers/models/convnext/image_processing_convnext.py +18 -21
  250. transformers/models/convnext/image_processing_convnext_fast.py +5 -6
  251. transformers/models/convnext/modeling_convnext.py +5 -8
  252. transformers/models/convnextv2/configuration_convnextv2.py +0 -1
  253. transformers/models/convnextv2/modeling_convnextv2.py +5 -8
  254. transformers/models/cpm/tokenization_cpm.py +6 -7
  255. transformers/models/cpm/tokenization_cpm_fast.py +3 -5
  256. transformers/models/cpmant/configuration_cpmant.py +0 -1
  257. transformers/models/cpmant/modeling_cpmant.py +38 -40
  258. transformers/models/cpmant/tokenization_cpmant.py +1 -3
  259. transformers/models/csm/configuration_csm.py +49 -51
  260. transformers/models/csm/generation_csm.py +13 -14
  261. transformers/models/csm/modeling_csm.py +78 -81
  262. transformers/models/csm/modular_csm.py +56 -58
  263. transformers/models/csm/processing_csm.py +25 -68
  264. transformers/models/ctrl/configuration_ctrl.py +0 -1
  265. transformers/models/ctrl/modeling_ctrl.py +38 -41
  266. transformers/models/ctrl/tokenization_ctrl.py +0 -1
  267. transformers/models/cvt/configuration_cvt.py +0 -1
  268. transformers/models/cvt/modeling_cvt.py +13 -15
  269. transformers/models/cwm/__init__.py +0 -1
  270. transformers/models/cwm/configuration_cwm.py +3 -5
  271. transformers/models/cwm/modeling_cwm.py +32 -34
  272. transformers/models/cwm/modular_cwm.py +10 -12
  273. transformers/models/d_fine/configuration_d_fine.py +0 -1
  274. transformers/models/d_fine/modeling_d_fine.py +81 -82
  275. transformers/models/d_fine/modular_d_fine.py +8 -9
  276. transformers/models/dab_detr/configuration_dab_detr.py +0 -1
  277. transformers/models/dab_detr/modeling_dab_detr.py +68 -70
  278. transformers/models/dac/configuration_dac.py +0 -1
  279. transformers/models/dac/feature_extraction_dac.py +6 -9
  280. transformers/models/dac/modeling_dac.py +21 -23
  281. transformers/models/data2vec/configuration_data2vec_audio.py +0 -1
  282. transformers/models/data2vec/configuration_data2vec_text.py +0 -1
  283. transformers/models/data2vec/configuration_data2vec_vision.py +0 -1
  284. transformers/models/data2vec/modeling_data2vec_audio.py +52 -56
  285. transformers/models/data2vec/modeling_data2vec_text.py +91 -93
  286. transformers/models/data2vec/modeling_data2vec_vision.py +41 -42
  287. transformers/models/data2vec/modular_data2vec_audio.py +6 -1
  288. transformers/models/data2vec/modular_data2vec_text.py +51 -54
  289. transformers/models/dbrx/configuration_dbrx.py +18 -19
  290. transformers/models/dbrx/modeling_dbrx.py +39 -42
  291. transformers/models/dbrx/modular_dbrx.py +31 -33
  292. transformers/models/deberta/configuration_deberta.py +0 -1
  293. transformers/models/deberta/modeling_deberta.py +57 -60
  294. transformers/models/deberta/tokenization_deberta.py +2 -5
  295. transformers/models/deberta_v2/configuration_deberta_v2.py +0 -1
  296. transformers/models/deberta_v2/modeling_deberta_v2.py +63 -65
  297. transformers/models/deberta_v2/tokenization_deberta_v2.py +1 -4
  298. transformers/models/decision_transformer/configuration_decision_transformer.py +0 -1
  299. transformers/models/decision_transformer/modeling_decision_transformer.py +48 -50
  300. transformers/models/deepseek_v2/configuration_deepseek_v2.py +34 -37
  301. transformers/models/deepseek_v2/modeling_deepseek_v2.py +32 -33
  302. transformers/models/deepseek_v2/modular_deepseek_v2.py +40 -42
  303. transformers/models/deepseek_v3/configuration_deepseek_v3.py +35 -38
  304. transformers/models/deepseek_v3/modeling_deepseek_v3.py +31 -33
  305. transformers/models/deepseek_v3/modular_deepseek_v3.py +4 -5
  306. transformers/models/deepseek_vl/configuration_deepseek_vl.py +2 -3
  307. transformers/models/deepseek_vl/image_processing_deepseek_vl.py +25 -26
  308. transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py +7 -6
  309. transformers/models/deepseek_vl/modeling_deepseek_vl.py +31 -31
  310. transformers/models/deepseek_vl/modular_deepseek_vl.py +11 -43
  311. transformers/models/deepseek_vl/processing_deepseek_vl.py +10 -41
  312. transformers/models/deepseek_vl_hybrid/configuration_deepseek_vl_hybrid.py +3 -5
  313. transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py +35 -35
  314. transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid_fast.py +16 -16
  315. transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py +33 -33
  316. transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py +71 -90
  317. transformers/models/deepseek_vl_hybrid/processing_deepseek_vl_hybrid.py +12 -44
  318. transformers/models/deformable_detr/configuration_deformable_detr.py +0 -1
  319. transformers/models/deformable_detr/image_processing_deformable_detr.py +59 -61
  320. transformers/models/deformable_detr/image_processing_deformable_detr_fast.py +17 -17
  321. transformers/models/deformable_detr/modeling_deformable_detr.py +66 -67
  322. transformers/models/deformable_detr/modular_deformable_detr.py +1 -3
  323. transformers/models/deit/configuration_deit.py +0 -1
  324. transformers/models/deit/image_processing_deit.py +18 -21
  325. transformers/models/deit/image_processing_deit_fast.py +0 -1
  326. transformers/models/deit/modeling_deit.py +16 -18
  327. transformers/models/depth_anything/configuration_depth_anything.py +0 -1
  328. transformers/models/depth_anything/modeling_depth_anything.py +5 -8
  329. transformers/models/depth_pro/configuration_depth_pro.py +0 -1
  330. transformers/models/depth_pro/image_processing_depth_pro.py +22 -23
  331. transformers/models/depth_pro/image_processing_depth_pro_fast.py +6 -7
  332. transformers/models/depth_pro/modeling_depth_pro.py +21 -23
  333. transformers/models/detr/configuration_detr.py +0 -1
  334. transformers/models/detr/image_processing_detr.py +64 -66
  335. transformers/models/detr/image_processing_detr_fast.py +22 -23
  336. transformers/models/detr/modeling_detr.py +70 -72
  337. transformers/models/dia/configuration_dia.py +5 -8
  338. transformers/models/dia/feature_extraction_dia.py +6 -9
  339. transformers/models/dia/generation_dia.py +40 -36
  340. transformers/models/dia/modeling_dia.py +61 -64
  341. transformers/models/dia/modular_dia.py +52 -54
  342. transformers/models/dia/processing_dia.py +39 -29
  343. transformers/models/dia/tokenization_dia.py +3 -6
  344. transformers/models/diffllama/configuration_diffllama.py +20 -23
  345. transformers/models/diffllama/modeling_diffllama.py +42 -45
  346. transformers/models/diffllama/modular_diffllama.py +16 -18
  347. transformers/models/dinat/configuration_dinat.py +0 -1
  348. transformers/models/dinat/modeling_dinat.py +40 -42
  349. transformers/models/dinov2/configuration_dinov2.py +0 -1
  350. transformers/models/dinov2/modeling_dinov2.py +11 -13
  351. transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py +1 -1
  352. transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py +12 -13
  353. transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py +5 -7
  354. transformers/models/dinov3_convnext/configuration_dinov3_convnext.py +4 -7
  355. transformers/models/dinov3_convnext/modeling_dinov3_convnext.py +3 -6
  356. transformers/models/dinov3_vit/configuration_dinov3_vit.py +5 -8
  357. transformers/models/dinov3_vit/image_processing_dinov3_vit_fast.py +5 -6
  358. transformers/models/dinov3_vit/modeling_dinov3_vit.py +14 -16
  359. transformers/models/dinov3_vit/modular_dinov3_vit.py +11 -13
  360. transformers/models/distilbert/configuration_distilbert.py +0 -1
  361. transformers/models/distilbert/modeling_distilbert.py +44 -46
  362. transformers/models/distilbert/tokenization_distilbert.py +0 -1
  363. transformers/models/doge/__init__.py +0 -1
  364. transformers/models/doge/configuration_doge.py +25 -28
  365. transformers/models/doge/modeling_doge.py +42 -45
  366. transformers/models/doge/modular_doge.py +57 -58
  367. transformers/models/donut/configuration_donut_swin.py +0 -1
  368. transformers/models/donut/image_processing_donut.py +26 -29
  369. transformers/models/donut/image_processing_donut_fast.py +5 -10
  370. transformers/models/donut/modeling_donut_swin.py +44 -46
  371. transformers/models/donut/processing_donut.py +5 -26
  372. transformers/models/dots1/configuration_dots1.py +27 -29
  373. transformers/models/dots1/modeling_dots1.py +31 -34
  374. transformers/models/dots1/modular_dots1.py +0 -1
  375. transformers/models/dpr/configuration_dpr.py +0 -1
  376. transformers/models/dpr/modeling_dpr.py +37 -39
  377. transformers/models/dpr/tokenization_dpr.py +7 -9
  378. transformers/models/dpr/tokenization_dpr_fast.py +7 -9
  379. transformers/models/dpt/configuration_dpt.py +0 -1
  380. transformers/models/dpt/image_processing_dpt.py +65 -66
  381. transformers/models/dpt/image_processing_dpt_fast.py +13 -14
  382. transformers/models/dpt/modeling_dpt.py +19 -21
  383. transformers/models/dpt/modular_dpt.py +10 -11
  384. transformers/models/edgetam/configuration_edgetam.py +0 -1
  385. transformers/models/edgetam/modeling_edgetam.py +39 -41
  386. transformers/models/edgetam/modular_edgetam.py +2 -6
  387. transformers/models/edgetam_video/__init__.py +0 -1
  388. transformers/models/edgetam_video/configuration_edgetam_video.py +0 -1
  389. transformers/models/edgetam_video/modeling_edgetam_video.py +76 -77
  390. transformers/models/edgetam_video/modular_edgetam_video.py +16 -18
  391. transformers/models/efficientloftr/configuration_efficientloftr.py +4 -5
  392. transformers/models/efficientloftr/image_processing_efficientloftr.py +14 -16
  393. transformers/models/efficientloftr/image_processing_efficientloftr_fast.py +4 -4
  394. transformers/models/efficientloftr/modeling_efficientloftr.py +27 -29
  395. transformers/models/efficientloftr/modular_efficientloftr.py +1 -3
  396. transformers/models/efficientnet/configuration_efficientnet.py +0 -1
  397. transformers/models/efficientnet/image_processing_efficientnet.py +23 -26
  398. transformers/models/efficientnet/image_processing_efficientnet_fast.py +14 -15
  399. transformers/models/efficientnet/modeling_efficientnet.py +12 -14
  400. transformers/models/electra/configuration_electra.py +0 -1
  401. transformers/models/electra/modeling_electra.py +101 -103
  402. transformers/models/emu3/configuration_emu3.py +5 -7
  403. transformers/models/emu3/image_processing_emu3.py +44 -39
  404. transformers/models/emu3/modeling_emu3.py +59 -62
  405. transformers/models/emu3/modular_emu3.py +32 -34
  406. transformers/models/emu3/processing_emu3.py +18 -43
  407. transformers/models/encodec/configuration_encodec.py +2 -4
  408. transformers/models/encodec/feature_extraction_encodec.py +10 -13
  409. transformers/models/encodec/modeling_encodec.py +25 -29
  410. transformers/models/encoder_decoder/configuration_encoder_decoder.py +0 -1
  411. transformers/models/encoder_decoder/modeling_encoder_decoder.py +17 -19
  412. transformers/models/eomt/configuration_eomt.py +0 -1
  413. transformers/models/eomt/image_processing_eomt.py +53 -55
  414. transformers/models/eomt/image_processing_eomt_fast.py +15 -16
  415. transformers/models/eomt/modeling_eomt.py +16 -18
  416. transformers/models/eomt/modular_eomt.py +11 -13
  417. transformers/models/ernie/configuration_ernie.py +0 -1
  418. transformers/models/ernie/modeling_ernie.py +121 -132
  419. transformers/models/ernie/modular_ernie.py +91 -103
  420. transformers/models/ernie4_5/configuration_ernie4_5.py +18 -20
  421. transformers/models/ernie4_5/modeling_ernie4_5.py +31 -33
  422. transformers/models/ernie4_5/modular_ernie4_5.py +1 -3
  423. transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py +27 -29
  424. transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py +36 -38
  425. transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py +7 -9
  426. transformers/models/ernie4_5_vl_moe/configuration_ernie4_5_vl_moe.py +0 -1
  427. transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe.py +34 -35
  428. transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe_fast.py +6 -7
  429. transformers/models/ernie4_5_vl_moe/modeling_ernie4_5_vl_moe.py +84 -87
  430. transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py +86 -89
  431. transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py +3 -5
  432. transformers/models/ernie4_5_vl_moe/video_processing_ernie4_5_vl_moe.py +17 -18
  433. transformers/models/esm/configuration_esm.py +2 -4
  434. transformers/models/esm/modeling_esm.py +32 -34
  435. transformers/models/esm/modeling_esmfold.py +42 -44
  436. transformers/models/esm/openfold_utils/chunk_utils.py +6 -6
  437. transformers/models/esm/openfold_utils/loss.py +1 -2
  438. transformers/models/esm/openfold_utils/protein.py +13 -13
  439. transformers/models/esm/openfold_utils/tensor_utils.py +6 -6
  440. transformers/models/esm/tokenization_esm.py +2 -4
  441. transformers/models/evolla/configuration_evolla.py +29 -32
  442. transformers/models/evolla/modeling_evolla.py +58 -61
  443. transformers/models/evolla/modular_evolla.py +45 -47
  444. transformers/models/evolla/processing_evolla.py +23 -35
  445. transformers/models/exaone4/configuration_exaone4.py +19 -22
  446. transformers/models/exaone4/modeling_exaone4.py +32 -35
  447. transformers/models/exaone4/modular_exaone4.py +40 -42
  448. transformers/models/falcon/configuration_falcon.py +22 -25
  449. transformers/models/falcon/modeling_falcon.py +73 -76
  450. transformers/models/falcon_h1/configuration_falcon_h1.py +40 -43
  451. transformers/models/falcon_h1/modeling_falcon_h1.py +52 -55
  452. transformers/models/falcon_h1/modular_falcon_h1.py +47 -48
  453. transformers/models/falcon_mamba/configuration_falcon_mamba.py +0 -1
  454. transformers/models/falcon_mamba/modeling_falcon_mamba.py +46 -47
  455. transformers/models/falcon_mamba/modular_falcon_mamba.py +10 -13
  456. transformers/models/fast_vlm/configuration_fast_vlm.py +1 -0
  457. transformers/models/fast_vlm/modeling_fast_vlm.py +36 -36
  458. transformers/models/fast_vlm/modular_fast_vlm.py +2 -3
  459. transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py +2 -5
  460. transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +45 -47
  461. transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py +1 -3
  462. transformers/models/flaubert/configuration_flaubert.py +0 -1
  463. transformers/models/flaubert/modeling_flaubert.py +124 -128
  464. transformers/models/flaubert/tokenization_flaubert.py +3 -5
  465. transformers/models/flava/configuration_flava.py +5 -6
  466. transformers/models/flava/image_processing_flava.py +66 -67
  467. transformers/models/flava/image_processing_flava_fast.py +42 -43
  468. transformers/models/flava/modeling_flava.py +108 -107
  469. transformers/models/flava/processing_flava.py +2 -12
  470. transformers/models/flex_olmo/__init__.py +0 -1
  471. transformers/models/flex_olmo/configuration_flex_olmo.py +23 -25
  472. transformers/models/flex_olmo/modeling_flex_olmo.py +37 -39
  473. transformers/models/flex_olmo/modular_flex_olmo.py +35 -37
  474. transformers/models/florence2/configuration_florence2.py +0 -1
  475. transformers/models/florence2/modeling_florence2.py +39 -40
  476. transformers/models/florence2/modular_florence2.py +52 -81
  477. transformers/models/florence2/processing_florence2.py +18 -47
  478. transformers/models/fnet/configuration_fnet.py +0 -1
  479. transformers/models/fnet/modeling_fnet.py +69 -80
  480. transformers/models/fnet/tokenization_fnet.py +0 -1
  481. transformers/models/focalnet/configuration_focalnet.py +0 -1
  482. transformers/models/focalnet/modeling_focalnet.py +39 -41
  483. transformers/models/fsmt/configuration_fsmt.py +0 -1
  484. transformers/models/fsmt/modeling_fsmt.py +47 -48
  485. transformers/models/fsmt/tokenization_fsmt.py +3 -5
  486. transformers/models/funnel/configuration_funnel.py +0 -1
  487. transformers/models/funnel/modeling_funnel.py +91 -93
  488. transformers/models/funnel/tokenization_funnel.py +2 -5
  489. transformers/models/fuyu/configuration_fuyu.py +23 -26
  490. transformers/models/fuyu/image_processing_fuyu.py +29 -31
  491. transformers/models/fuyu/image_processing_fuyu_fast.py +12 -13
  492. transformers/models/fuyu/modeling_fuyu.py +26 -29
  493. transformers/models/fuyu/processing_fuyu.py +9 -36
  494. transformers/models/gemma/configuration_gemma.py +20 -23
  495. transformers/models/gemma/modeling_gemma.py +32 -34
  496. transformers/models/gemma/modular_gemma.py +28 -29
  497. transformers/models/gemma/tokenization_gemma.py +3 -6
  498. transformers/models/gemma2/configuration_gemma2.py +25 -28
  499. transformers/models/gemma2/modeling_gemma2.py +34 -37
  500. transformers/models/gemma2/modular_gemma2.py +55 -57
  501. transformers/models/gemma3/configuration_gemma3.py +28 -29
  502. transformers/models/gemma3/image_processing_gemma3.py +29 -31
  503. transformers/models/gemma3/image_processing_gemma3_fast.py +9 -10
  504. transformers/models/gemma3/modeling_gemma3.py +86 -89
  505. transformers/models/gemma3/modular_gemma3.py +85 -86
  506. transformers/models/gemma3/processing_gemma3.py +5 -5
  507. transformers/models/gemma3n/configuration_gemma3n.py +9 -10
  508. transformers/models/gemma3n/feature_extraction_gemma3n.py +9 -11
  509. transformers/models/gemma3n/modeling_gemma3n.py +80 -89
  510. transformers/models/gemma3n/modular_gemma3n.py +66 -75
  511. transformers/models/gemma3n/processing_gemma3n.py +12 -26
  512. transformers/models/git/configuration_git.py +0 -1
  513. transformers/models/git/modeling_git.py +84 -86
  514. transformers/models/git/processing_git.py +2 -14
  515. transformers/models/glm/configuration_glm.py +19 -21
  516. transformers/models/glm/modeling_glm.py +32 -35
  517. transformers/models/glm/modular_glm.py +4 -7
  518. transformers/models/glm4/configuration_glm4.py +19 -21
  519. transformers/models/glm4/modeling_glm4.py +35 -37
  520. transformers/models/glm4/modular_glm4.py +8 -10
  521. transformers/models/glm46v/configuration_glm46v.py +0 -1
  522. transformers/models/glm46v/image_processing_glm46v.py +35 -36
  523. transformers/models/glm46v/image_processing_glm46v_fast.py +7 -7
  524. transformers/models/glm46v/modeling_glm46v.py +51 -51
  525. transformers/models/glm46v/modular_glm46v.py +1 -3
  526. transformers/models/glm46v/processing_glm46v.py +7 -41
  527. transformers/models/glm46v/video_processing_glm46v.py +9 -11
  528. transformers/models/glm4_moe/configuration_glm4_moe.py +25 -28
  529. transformers/models/glm4_moe/modeling_glm4_moe.py +32 -35
  530. transformers/models/glm4_moe/modular_glm4_moe.py +26 -29
  531. transformers/models/glm4_moe_lite/__init__.py +28 -0
  532. transformers/models/glm4_moe_lite/configuration_glm4_moe_lite.py +235 -0
  533. transformers/models/glm4_moe_lite/modeling_glm4_moe_lite.py +740 -0
  534. transformers/models/glm4_moe_lite/modular_glm4_moe_lite.py +304 -0
  535. transformers/models/glm4v/configuration_glm4v.py +14 -17
  536. transformers/models/glm4v/image_processing_glm4v.py +34 -36
  537. transformers/models/glm4v/image_processing_glm4v_fast.py +6 -7
  538. transformers/models/glm4v/modeling_glm4v.py +133 -151
  539. transformers/models/glm4v/modular_glm4v.py +131 -182
  540. transformers/models/glm4v/processing_glm4v.py +7 -41
  541. transformers/models/glm4v/video_processing_glm4v.py +9 -11
  542. transformers/models/glm4v_moe/configuration_glm4v_moe.py +119 -122
  543. transformers/models/glm4v_moe/modeling_glm4v_moe.py +237 -297
  544. transformers/models/glm4v_moe/modular_glm4v_moe.py +54 -163
  545. transformers/models/glm_image/__init__.py +31 -0
  546. transformers/models/glm_image/configuration_glm_image.py +352 -0
  547. transformers/models/glm_image/image_processing_glm_image.py +503 -0
  548. transformers/models/glm_image/image_processing_glm_image_fast.py +296 -0
  549. transformers/models/glm_image/modeling_glm_image.py +1590 -0
  550. transformers/models/glm_image/modular_glm_image.py +1480 -0
  551. transformers/models/glm_image/processing_glm_image.py +217 -0
  552. transformers/models/glmasr/__init__.py +0 -1
  553. transformers/models/glmasr/configuration_glmasr.py +0 -1
  554. transformers/models/glmasr/modeling_glmasr.py +17 -18
  555. transformers/models/glmasr/modular_glmasr.py +16 -18
  556. transformers/models/glmasr/processing_glmasr.py +7 -8
  557. transformers/models/glpn/configuration_glpn.py +0 -1
  558. transformers/models/glpn/image_processing_glpn.py +11 -12
  559. transformers/models/glpn/image_processing_glpn_fast.py +8 -9
  560. transformers/models/glpn/modeling_glpn.py +10 -12
  561. transformers/models/got_ocr2/configuration_got_ocr2.py +5 -8
  562. transformers/models/got_ocr2/image_processing_got_ocr2.py +22 -24
  563. transformers/models/got_ocr2/image_processing_got_ocr2_fast.py +6 -7
  564. transformers/models/got_ocr2/modeling_got_ocr2.py +40 -42
  565. transformers/models/got_ocr2/modular_got_ocr2.py +31 -34
  566. transformers/models/got_ocr2/processing_got_ocr2.py +42 -63
  567. transformers/models/gpt2/configuration_gpt2.py +0 -1
  568. transformers/models/gpt2/modeling_gpt2.py +106 -108
  569. transformers/models/gpt2/tokenization_gpt2.py +6 -9
  570. transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +0 -1
  571. transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +73 -80
  572. transformers/models/gpt_neo/configuration_gpt_neo.py +0 -1
  573. transformers/models/gpt_neo/modeling_gpt_neo.py +63 -64
  574. transformers/models/gpt_neox/configuration_gpt_neox.py +19 -22
  575. transformers/models/gpt_neox/modeling_gpt_neox.py +70 -72
  576. transformers/models/gpt_neox/modular_gpt_neox.py +64 -66
  577. transformers/models/gpt_neox/tokenization_gpt_neox.py +2 -5
  578. transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py +15 -18
  579. transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +41 -44
  580. transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py +1 -3
  581. transformers/models/gpt_oss/configuration_gpt_oss.py +21 -24
  582. transformers/models/gpt_oss/modeling_gpt_oss.py +34 -35
  583. transformers/models/gpt_oss/modular_gpt_oss.py +17 -19
  584. transformers/models/gpt_sw3/tokenization_gpt_sw3.py +4 -4
  585. transformers/models/gptj/configuration_gptj.py +0 -1
  586. transformers/models/gptj/modeling_gptj.py +82 -81
  587. transformers/models/granite/configuration_granite.py +23 -26
  588. transformers/models/granite/modeling_granite.py +39 -41
  589. transformers/models/granite/modular_granite.py +29 -31
  590. transformers/models/granite_speech/configuration_granite_speech.py +0 -1
  591. transformers/models/granite_speech/feature_extraction_granite_speech.py +1 -3
  592. transformers/models/granite_speech/modeling_granite_speech.py +21 -23
  593. transformers/models/granite_speech/processing_granite_speech.py +11 -4
  594. transformers/models/granitemoe/configuration_granitemoe.py +26 -29
  595. transformers/models/granitemoe/modeling_granitemoe.py +35 -37
  596. transformers/models/granitemoe/modular_granitemoe.py +21 -23
  597. transformers/models/granitemoehybrid/__init__.py +0 -1
  598. transformers/models/granitemoehybrid/configuration_granitemoehybrid.py +38 -41
  599. transformers/models/granitemoehybrid/modeling_granitemoehybrid.py +60 -64
  600. transformers/models/granitemoehybrid/modular_granitemoehybrid.py +18 -20
  601. transformers/models/granitemoeshared/configuration_granitemoeshared.py +27 -30
  602. transformers/models/granitemoeshared/modeling_granitemoeshared.py +48 -52
  603. transformers/models/granitemoeshared/modular_granitemoeshared.py +19 -21
  604. transformers/models/grounding_dino/configuration_grounding_dino.py +0 -1
  605. transformers/models/grounding_dino/image_processing_grounding_dino.py +60 -62
  606. transformers/models/grounding_dino/image_processing_grounding_dino_fast.py +17 -18
  607. transformers/models/grounding_dino/modeling_grounding_dino.py +94 -96
  608. transformers/models/grounding_dino/modular_grounding_dino.py +2 -3
  609. transformers/models/grounding_dino/processing_grounding_dino.py +10 -38
  610. transformers/models/groupvit/configuration_groupvit.py +0 -1
  611. transformers/models/groupvit/modeling_groupvit.py +69 -70
  612. transformers/models/helium/configuration_helium.py +20 -22
  613. transformers/models/helium/modeling_helium.py +33 -36
  614. transformers/models/helium/modular_helium.py +3 -7
  615. transformers/models/herbert/tokenization_herbert.py +4 -6
  616. transformers/models/hgnet_v2/configuration_hgnet_v2.py +0 -1
  617. transformers/models/hgnet_v2/modeling_hgnet_v2.py +6 -9
  618. transformers/models/hgnet_v2/modular_hgnet_v2.py +6 -9
  619. transformers/models/hiera/configuration_hiera.py +0 -1
  620. transformers/models/hiera/modeling_hiera.py +60 -62
  621. transformers/models/hubert/configuration_hubert.py +0 -1
  622. transformers/models/hubert/modeling_hubert.py +35 -37
  623. transformers/models/hubert/modular_hubert.py +8 -11
  624. transformers/models/hunyuan_v1_dense/configuration_hunyuan_v1_dense.py +21 -24
  625. transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py +30 -33
  626. transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py +3 -5
  627. transformers/models/hunyuan_v1_moe/configuration_hunyuan_v1_moe.py +25 -28
  628. transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py +32 -35
  629. transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py +5 -7
  630. transformers/models/ibert/configuration_ibert.py +0 -1
  631. transformers/models/ibert/modeling_ibert.py +60 -62
  632. transformers/models/ibert/quant_modules.py +0 -1
  633. transformers/models/idefics/configuration_idefics.py +0 -1
  634. transformers/models/idefics/image_processing_idefics.py +13 -15
  635. transformers/models/idefics/modeling_idefics.py +60 -61
  636. transformers/models/idefics/perceiver.py +1 -3
  637. transformers/models/idefics/processing_idefics.py +32 -48
  638. transformers/models/idefics/vision.py +22 -24
  639. transformers/models/idefics2/configuration_idefics2.py +0 -1
  640. transformers/models/idefics2/image_processing_idefics2.py +31 -32
  641. transformers/models/idefics2/image_processing_idefics2_fast.py +7 -8
  642. transformers/models/idefics2/modeling_idefics2.py +56 -58
  643. transformers/models/idefics2/processing_idefics2.py +10 -68
  644. transformers/models/idefics3/configuration_idefics3.py +0 -1
  645. transformers/models/idefics3/image_processing_idefics3.py +42 -43
  646. transformers/models/idefics3/image_processing_idefics3_fast.py +11 -12
  647. transformers/models/idefics3/modeling_idefics3.py +52 -54
  648. transformers/models/idefics3/processing_idefics3.py +15 -69
  649. transformers/models/ijepa/configuration_ijepa.py +0 -1
  650. transformers/models/ijepa/modeling_ijepa.py +10 -11
  651. transformers/models/ijepa/modular_ijepa.py +5 -7
  652. transformers/models/imagegpt/configuration_imagegpt.py +0 -1
  653. transformers/models/imagegpt/image_processing_imagegpt.py +17 -18
  654. transformers/models/imagegpt/image_processing_imagegpt_fast.py +8 -9
  655. transformers/models/imagegpt/modeling_imagegpt.py +57 -58
  656. transformers/models/informer/configuration_informer.py +6 -9
  657. transformers/models/informer/modeling_informer.py +84 -86
  658. transformers/models/informer/modular_informer.py +13 -16
  659. transformers/models/instructblip/configuration_instructblip.py +0 -1
  660. transformers/models/instructblip/modeling_instructblip.py +43 -44
  661. transformers/models/instructblip/processing_instructblip.py +10 -36
  662. transformers/models/instructblipvideo/configuration_instructblipvideo.py +0 -1
  663. transformers/models/instructblipvideo/modeling_instructblipvideo.py +55 -55
  664. transformers/models/instructblipvideo/modular_instructblipvideo.py +34 -36
  665. transformers/models/instructblipvideo/processing_instructblipvideo.py +14 -33
  666. transformers/models/instructblipvideo/video_processing_instructblipvideo.py +4 -5
  667. transformers/models/internvl/configuration_internvl.py +0 -1
  668. transformers/models/internvl/modeling_internvl.py +41 -43
  669. transformers/models/internvl/modular_internvl.py +19 -21
  670. transformers/models/internvl/processing_internvl.py +12 -45
  671. transformers/models/internvl/video_processing_internvl.py +8 -9
  672. transformers/models/jais2/configuration_jais2.py +20 -22
  673. transformers/models/jais2/modeling_jais2.py +32 -34
  674. transformers/models/jais2/modular_jais2.py +20 -22
  675. transformers/models/jamba/configuration_jamba.py +0 -1
  676. transformers/models/jamba/modeling_jamba.py +43 -46
  677. transformers/models/jamba/modular_jamba.py +37 -38
  678. transformers/models/janus/configuration_janus.py +0 -1
  679. transformers/models/janus/image_processing_janus.py +35 -37
  680. transformers/models/janus/image_processing_janus_fast.py +12 -13
  681. transformers/models/janus/modeling_janus.py +41 -43
  682. transformers/models/janus/modular_janus.py +60 -63
  683. transformers/models/janus/processing_janus.py +17 -43
  684. transformers/models/jetmoe/configuration_jetmoe.py +20 -23
  685. transformers/models/jetmoe/modeling_jetmoe.py +39 -42
  686. transformers/models/jetmoe/modular_jetmoe.py +30 -33
  687. transformers/models/kosmos2/configuration_kosmos2.py +0 -1
  688. transformers/models/kosmos2/modeling_kosmos2.py +145 -146
  689. transformers/models/kosmos2/processing_kosmos2.py +40 -55
  690. transformers/models/kosmos2_5/__init__.py +0 -1
  691. transformers/models/kosmos2_5/configuration_kosmos2_5.py +0 -1
  692. transformers/models/kosmos2_5/image_processing_kosmos2_5.py +10 -12
  693. transformers/models/kosmos2_5/image_processing_kosmos2_5_fast.py +2 -11
  694. transformers/models/kosmos2_5/modeling_kosmos2_5.py +108 -109
  695. transformers/models/kosmos2_5/processing_kosmos2_5.py +8 -29
  696. transformers/models/kyutai_speech_to_text/configuration_kyutai_speech_to_text.py +23 -25
  697. transformers/models/kyutai_speech_to_text/feature_extraction_kyutai_speech_to_text.py +12 -14
  698. transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py +59 -66
  699. transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py +19 -21
  700. transformers/models/kyutai_speech_to_text/processing_kyutai_speech_to_text.py +2 -8
  701. transformers/models/lasr/configuration_lasr.py +1 -3
  702. transformers/models/lasr/feature_extraction_lasr.py +10 -12
  703. transformers/models/lasr/modeling_lasr.py +18 -21
  704. transformers/models/lasr/modular_lasr.py +8 -10
  705. transformers/models/lasr/processing_lasr.py +12 -6
  706. transformers/models/lasr/tokenization_lasr.py +2 -4
  707. transformers/models/layoutlm/configuration_layoutlm.py +0 -1
  708. transformers/models/layoutlm/modeling_layoutlm.py +67 -69
  709. transformers/models/layoutlmv2/configuration_layoutlmv2.py +0 -1
  710. transformers/models/layoutlmv2/image_processing_layoutlmv2.py +18 -21
  711. transformers/models/layoutlmv2/image_processing_layoutlmv2_fast.py +5 -6
  712. transformers/models/layoutlmv2/modeling_layoutlmv2.py +48 -50
  713. transformers/models/layoutlmv2/processing_layoutlmv2.py +14 -44
  714. transformers/models/layoutlmv2/tokenization_layoutlmv2.py +63 -74
  715. transformers/models/layoutlmv3/configuration_layoutlmv3.py +0 -1
  716. transformers/models/layoutlmv3/image_processing_layoutlmv3.py +24 -26
  717. transformers/models/layoutlmv3/image_processing_layoutlmv3_fast.py +7 -8
  718. transformers/models/layoutlmv3/modeling_layoutlmv3.py +49 -51
  719. transformers/models/layoutlmv3/processing_layoutlmv3.py +14 -46
  720. transformers/models/layoutlmv3/tokenization_layoutlmv3.py +64 -75
  721. transformers/models/layoutxlm/configuration_layoutxlm.py +0 -1
  722. transformers/models/layoutxlm/modular_layoutxlm.py +0 -1
  723. transformers/models/layoutxlm/processing_layoutxlm.py +14 -44
  724. transformers/models/layoutxlm/tokenization_layoutxlm.py +65 -76
  725. transformers/models/led/configuration_led.py +1 -4
  726. transformers/models/led/modeling_led.py +113 -267
  727. transformers/models/levit/configuration_levit.py +0 -1
  728. transformers/models/levit/image_processing_levit.py +19 -21
  729. transformers/models/levit/image_processing_levit_fast.py +0 -1
  730. transformers/models/levit/modeling_levit.py +17 -19
  731. transformers/models/lfm2/configuration_lfm2.py +22 -23
  732. transformers/models/lfm2/modeling_lfm2.py +42 -44
  733. transformers/models/lfm2/modular_lfm2.py +29 -29
  734. transformers/models/lfm2_moe/__init__.py +0 -1
  735. transformers/models/lfm2_moe/configuration_lfm2_moe.py +1 -2
  736. transformers/models/lfm2_moe/modeling_lfm2_moe.py +44 -45
  737. transformers/models/lfm2_moe/modular_lfm2_moe.py +8 -9
  738. transformers/models/lfm2_vl/configuration_lfm2_vl.py +0 -1
  739. transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py +34 -5
  740. transformers/models/lfm2_vl/modeling_lfm2_vl.py +31 -33
  741. transformers/models/lfm2_vl/modular_lfm2_vl.py +24 -27
  742. transformers/models/lfm2_vl/processing_lfm2_vl.py +14 -34
  743. transformers/models/lightglue/image_processing_lightglue.py +16 -15
  744. transformers/models/lightglue/image_processing_lightglue_fast.py +4 -4
  745. transformers/models/lightglue/modeling_lightglue.py +28 -30
  746. transformers/models/lightglue/modular_lightglue.py +28 -28
  747. transformers/models/lighton_ocr/__init__.py +28 -0
  748. transformers/models/lighton_ocr/configuration_lighton_ocr.py +128 -0
  749. transformers/models/lighton_ocr/modeling_lighton_ocr.py +460 -0
  750. transformers/models/lighton_ocr/modular_lighton_ocr.py +403 -0
  751. transformers/models/lighton_ocr/processing_lighton_ocr.py +229 -0
  752. transformers/models/lilt/configuration_lilt.py +0 -1
  753. transformers/models/lilt/modeling_lilt.py +53 -55
  754. transformers/models/llama/configuration_llama.py +21 -24
  755. transformers/models/llama/modeling_llama.py +31 -34
  756. transformers/models/llama/tokenization_llama.py +2 -4
  757. transformers/models/llama4/configuration_llama4.py +20 -22
  758. transformers/models/llama4/image_processing_llama4_fast.py +8 -9
  759. transformers/models/llama4/modeling_llama4.py +70 -71
  760. transformers/models/llama4/processing_llama4.py +33 -57
  761. transformers/models/llava/configuration_llava.py +0 -1
  762. transformers/models/llava/image_processing_llava.py +25 -28
  763. transformers/models/llava/image_processing_llava_fast.py +6 -7
  764. transformers/models/llava/modeling_llava.py +35 -37
  765. transformers/models/llava/processing_llava.py +18 -51
  766. transformers/models/llava_next/configuration_llava_next.py +0 -1
  767. transformers/models/llava_next/image_processing_llava_next.py +43 -45
  768. transformers/models/llava_next/image_processing_llava_next_fast.py +5 -6
  769. transformers/models/llava_next/modeling_llava_next.py +42 -44
  770. transformers/models/llava_next/processing_llava_next.py +18 -47
  771. transformers/models/llava_next_video/configuration_llava_next_video.py +0 -1
  772. transformers/models/llava_next_video/modeling_llava_next_video.py +53 -55
  773. transformers/models/llava_next_video/modular_llava_next_video.py +44 -46
  774. transformers/models/llava_next_video/processing_llava_next_video.py +21 -63
  775. transformers/models/llava_next_video/video_processing_llava_next_video.py +0 -1
  776. transformers/models/llava_onevision/configuration_llava_onevision.py +0 -1
  777. transformers/models/llava_onevision/image_processing_llava_onevision.py +40 -42
  778. transformers/models/llava_onevision/image_processing_llava_onevision_fast.py +6 -7
  779. transformers/models/llava_onevision/modeling_llava_onevision.py +60 -62
  780. transformers/models/llava_onevision/modular_llava_onevision.py +51 -52
  781. transformers/models/llava_onevision/processing_llava_onevision.py +21 -53
  782. transformers/models/llava_onevision/video_processing_llava_onevision.py +0 -1
  783. transformers/models/longcat_flash/__init__.py +0 -1
  784. transformers/models/longcat_flash/configuration_longcat_flash.py +32 -35
  785. transformers/models/longcat_flash/modeling_longcat_flash.py +30 -31
  786. transformers/models/longcat_flash/modular_longcat_flash.py +17 -19
  787. transformers/models/longformer/configuration_longformer.py +1 -4
  788. transformers/models/longformer/modeling_longformer.py +99 -101
  789. transformers/models/longt5/configuration_longt5.py +0 -1
  790. transformers/models/longt5/modeling_longt5.py +43 -44
  791. transformers/models/luke/configuration_luke.py +0 -1
  792. transformers/models/luke/modeling_luke.py +179 -181
  793. transformers/models/luke/tokenization_luke.py +99 -105
  794. transformers/models/lw_detr/__init__.py +27 -0
  795. transformers/models/lw_detr/configuration_lw_detr.py +374 -0
  796. transformers/models/lw_detr/modeling_lw_detr.py +1698 -0
  797. transformers/models/lw_detr/modular_lw_detr.py +1611 -0
  798. transformers/models/lxmert/configuration_lxmert.py +0 -1
  799. transformers/models/lxmert/modeling_lxmert.py +63 -74
  800. transformers/models/m2m_100/configuration_m2m_100.py +0 -1
  801. transformers/models/m2m_100/modeling_m2m_100.py +69 -71
  802. transformers/models/m2m_100/tokenization_m2m_100.py +8 -8
  803. transformers/models/mamba/configuration_mamba.py +0 -1
  804. transformers/models/mamba/modeling_mamba.py +43 -44
  805. transformers/models/mamba2/configuration_mamba2.py +0 -1
  806. transformers/models/mamba2/modeling_mamba2.py +44 -46
  807. transformers/models/marian/configuration_marian.py +0 -1
  808. transformers/models/marian/modeling_marian.py +84 -86
  809. transformers/models/marian/tokenization_marian.py +6 -6
  810. transformers/models/markuplm/configuration_markuplm.py +0 -1
  811. transformers/models/markuplm/feature_extraction_markuplm.py +1 -2
  812. transformers/models/markuplm/modeling_markuplm.py +60 -62
  813. transformers/models/markuplm/processing_markuplm.py +31 -38
  814. transformers/models/markuplm/tokenization_markuplm.py +67 -77
  815. transformers/models/mask2former/configuration_mask2former.py +4 -7
  816. transformers/models/mask2former/image_processing_mask2former.py +84 -85
  817. transformers/models/mask2former/image_processing_mask2former_fast.py +29 -29
  818. transformers/models/mask2former/modeling_mask2former.py +90 -92
  819. transformers/models/mask2former/modular_mask2former.py +6 -8
  820. transformers/models/maskformer/configuration_maskformer.py +5 -8
  821. transformers/models/maskformer/configuration_maskformer_swin.py +0 -1
  822. transformers/models/maskformer/image_processing_maskformer.py +84 -85
  823. transformers/models/maskformer/image_processing_maskformer_fast.py +28 -29
  824. transformers/models/maskformer/modeling_maskformer.py +56 -58
  825. transformers/models/maskformer/modeling_maskformer_swin.py +18 -20
  826. transformers/models/mbart/configuration_mbart.py +0 -1
  827. transformers/models/mbart/modeling_mbart.py +111 -113
  828. transformers/models/mbart/tokenization_mbart.py +2 -4
  829. transformers/models/mbart50/tokenization_mbart50.py +3 -5
  830. transformers/models/megatron_bert/configuration_megatron_bert.py +0 -1
  831. transformers/models/megatron_bert/modeling_megatron_bert.py +139 -150
  832. transformers/models/metaclip_2/modeling_metaclip_2.py +46 -46
  833. transformers/models/metaclip_2/modular_metaclip_2.py +19 -21
  834. transformers/models/mgp_str/configuration_mgp_str.py +0 -1
  835. transformers/models/mgp_str/modeling_mgp_str.py +14 -16
  836. transformers/models/mgp_str/processing_mgp_str.py +3 -20
  837. transformers/models/mgp_str/tokenization_mgp_str.py +1 -3
  838. transformers/models/mimi/configuration_mimi.py +38 -40
  839. transformers/models/mimi/modeling_mimi.py +76 -79
  840. transformers/models/minimax/__init__.py +0 -1
  841. transformers/models/minimax/configuration_minimax.py +32 -36
  842. transformers/models/minimax/modeling_minimax.py +41 -44
  843. transformers/models/minimax/modular_minimax.py +50 -53
  844. transformers/models/minimax_m2/__init__.py +28 -0
  845. transformers/models/minimax_m2/configuration_minimax_m2.py +211 -0
  846. transformers/models/minimax_m2/modeling_minimax_m2.py +704 -0
  847. transformers/models/minimax_m2/modular_minimax_m2.py +369 -0
  848. transformers/models/ministral/configuration_ministral.py +20 -22
  849. transformers/models/ministral/modeling_ministral.py +31 -33
  850. transformers/models/ministral/modular_ministral.py +27 -29
  851. transformers/models/ministral3/configuration_ministral3.py +19 -22
  852. transformers/models/ministral3/modeling_ministral3.py +31 -33
  853. transformers/models/ministral3/modular_ministral3.py +4 -5
  854. transformers/models/mistral/configuration_mistral.py +19 -22
  855. transformers/models/mistral/modeling_mistral.py +31 -33
  856. transformers/models/mistral/modular_mistral.py +11 -12
  857. transformers/models/mistral3/configuration_mistral3.py +0 -1
  858. transformers/models/mistral3/modeling_mistral3.py +43 -42
  859. transformers/models/mistral3/modular_mistral3.py +35 -35
  860. transformers/models/mixtral/configuration_mixtral.py +24 -27
  861. transformers/models/mixtral/modeling_mixtral.py +35 -38
  862. transformers/models/mixtral/modular_mixtral.py +26 -29
  863. transformers/models/mlcd/configuration_mlcd.py +0 -1
  864. transformers/models/mlcd/modeling_mlcd.py +10 -12
  865. transformers/models/mlcd/modular_mlcd.py +9 -11
  866. transformers/models/mllama/configuration_mllama.py +5 -8
  867. transformers/models/mllama/image_processing_mllama.py +23 -25
  868. transformers/models/mllama/image_processing_mllama_fast.py +5 -6
  869. transformers/models/mllama/modeling_mllama.py +81 -84
  870. transformers/models/mllama/processing_mllama.py +6 -55
  871. transformers/models/mluke/tokenization_mluke.py +97 -103
  872. transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py +0 -1
  873. transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py +94 -96
  874. transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py +0 -1
  875. transformers/models/mobilebert/configuration_mobilebert.py +0 -1
  876. transformers/models/mobilebert/modeling_mobilebert.py +75 -85
  877. transformers/models/mobilebert/tokenization_mobilebert.py +0 -1
  878. transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +0 -1
  879. transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py +20 -23
  880. transformers/models/mobilenet_v1/image_processing_mobilenet_v1_fast.py +0 -1
  881. transformers/models/mobilenet_v1/modeling_mobilenet_v1.py +13 -16
  882. transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +0 -1
  883. transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py +48 -51
  884. transformers/models/mobilenet_v2/image_processing_mobilenet_v2_fast.py +10 -11
  885. transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +17 -20
  886. transformers/models/mobilevit/configuration_mobilevit.py +0 -1
  887. transformers/models/mobilevit/image_processing_mobilevit.py +41 -44
  888. transformers/models/mobilevit/image_processing_mobilevit_fast.py +8 -9
  889. transformers/models/mobilevit/modeling_mobilevit.py +17 -19
  890. transformers/models/mobilevitv2/configuration_mobilevitv2.py +0 -1
  891. transformers/models/mobilevitv2/modeling_mobilevitv2.py +17 -20
  892. transformers/models/modernbert/configuration_modernbert.py +34 -34
  893. transformers/models/modernbert/modeling_modernbert.py +123 -125
  894. transformers/models/modernbert/modular_modernbert.py +155 -155
  895. transformers/models/modernbert_decoder/configuration_modernbert_decoder.py +30 -32
  896. transformers/models/modernbert_decoder/modeling_modernbert_decoder.py +45 -47
  897. transformers/models/modernbert_decoder/modular_modernbert_decoder.py +69 -70
  898. transformers/models/moonshine/configuration_moonshine.py +22 -24
  899. transformers/models/moonshine/modeling_moonshine.py +63 -65
  900. transformers/models/moonshine/modular_moonshine.py +72 -73
  901. transformers/models/moshi/configuration_moshi.py +18 -21
  902. transformers/models/moshi/modeling_moshi.py +130 -133
  903. transformers/models/mpnet/configuration_mpnet.py +0 -1
  904. transformers/models/mpnet/modeling_mpnet.py +55 -57
  905. transformers/models/mpnet/tokenization_mpnet.py +1 -4
  906. transformers/models/mpt/configuration_mpt.py +1 -9
  907. transformers/models/mpt/modeling_mpt.py +58 -60
  908. transformers/models/mra/configuration_mra.py +0 -1
  909. transformers/models/mra/modeling_mra.py +54 -56
  910. transformers/models/mt5/configuration_mt5.py +0 -1
  911. transformers/models/mt5/modeling_mt5.py +75 -77
  912. transformers/models/musicgen/configuration_musicgen.py +0 -1
  913. transformers/models/musicgen/modeling_musicgen.py +108 -111
  914. transformers/models/musicgen/processing_musicgen.py +3 -21
  915. transformers/models/musicgen_melody/configuration_musicgen_melody.py +0 -1
  916. transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py +8 -9
  917. transformers/models/musicgen_melody/modeling_musicgen_melody.py +106 -109
  918. transformers/models/musicgen_melody/processing_musicgen_melody.py +3 -22
  919. transformers/models/mvp/configuration_mvp.py +0 -1
  920. transformers/models/mvp/modeling_mvp.py +115 -119
  921. transformers/models/myt5/tokenization_myt5.py +8 -10
  922. transformers/models/nanochat/configuration_nanochat.py +0 -1
  923. transformers/models/nanochat/modeling_nanochat.py +32 -35
  924. transformers/models/nanochat/modular_nanochat.py +12 -14
  925. transformers/models/nemotron/configuration_nemotron.py +20 -23
  926. transformers/models/nemotron/modeling_nemotron.py +49 -52
  927. transformers/models/nllb/tokenization_nllb.py +7 -9
  928. transformers/models/nllb_moe/configuration_nllb_moe.py +0 -1
  929. transformers/models/nllb_moe/modeling_nllb_moe.py +67 -69
  930. transformers/models/nougat/image_processing_nougat.py +29 -32
  931. transformers/models/nougat/image_processing_nougat_fast.py +4 -5
  932. transformers/models/nougat/processing_nougat.py +37 -39
  933. transformers/models/nougat/tokenization_nougat.py +5 -7
  934. transformers/models/nystromformer/configuration_nystromformer.py +0 -1
  935. transformers/models/nystromformer/modeling_nystromformer.py +61 -63
  936. transformers/models/olmo/configuration_olmo.py +18 -21
  937. transformers/models/olmo/modeling_olmo.py +31 -34
  938. transformers/models/olmo/modular_olmo.py +5 -9
  939. transformers/models/olmo2/configuration_olmo2.py +18 -21
  940. transformers/models/olmo2/modeling_olmo2.py +32 -35
  941. transformers/models/olmo2/modular_olmo2.py +29 -31
  942. transformers/models/olmo3/__init__.py +0 -1
  943. transformers/models/olmo3/configuration_olmo3.py +20 -23
  944. transformers/models/olmo3/modeling_olmo3.py +31 -34
  945. transformers/models/olmo3/modular_olmo3.py +31 -33
  946. transformers/models/olmoe/configuration_olmoe.py +24 -26
  947. transformers/models/olmoe/modeling_olmoe.py +37 -39
  948. transformers/models/olmoe/modular_olmoe.py +12 -13
  949. transformers/models/omdet_turbo/configuration_omdet_turbo.py +0 -1
  950. transformers/models/omdet_turbo/modeling_omdet_turbo.py +38 -40
  951. transformers/models/omdet_turbo/processing_omdet_turbo.py +19 -67
  952. transformers/models/oneformer/configuration_oneformer.py +4 -7
  953. transformers/models/oneformer/image_processing_oneformer.py +83 -84
  954. transformers/models/oneformer/image_processing_oneformer_fast.py +33 -34
  955. transformers/models/oneformer/modeling_oneformer.py +123 -124
  956. transformers/models/oneformer/processing_oneformer.py +28 -43
  957. transformers/models/openai/configuration_openai.py +0 -1
  958. transformers/models/openai/modeling_openai.py +50 -51
  959. transformers/models/openai/tokenization_openai.py +2 -5
  960. transformers/models/opt/configuration_opt.py +0 -1
  961. transformers/models/opt/modeling_opt.py +74 -75
  962. transformers/models/ovis2/__init__.py +0 -1
  963. transformers/models/ovis2/configuration_ovis2.py +0 -1
  964. transformers/models/ovis2/image_processing_ovis2.py +22 -24
  965. transformers/models/ovis2/image_processing_ovis2_fast.py +6 -7
  966. transformers/models/ovis2/modeling_ovis2.py +43 -45
  967. transformers/models/ovis2/modular_ovis2.py +30 -32
  968. transformers/models/ovis2/processing_ovis2.py +12 -40
  969. transformers/models/owlv2/configuration_owlv2.py +0 -1
  970. transformers/models/owlv2/image_processing_owlv2.py +20 -21
  971. transformers/models/owlv2/image_processing_owlv2_fast.py +7 -8
  972. transformers/models/owlv2/modeling_owlv2.py +82 -87
  973. transformers/models/owlv2/modular_owlv2.py +6 -7
  974. transformers/models/owlv2/processing_owlv2.py +20 -49
  975. transformers/models/owlvit/configuration_owlvit.py +0 -1
  976. transformers/models/owlvit/image_processing_owlvit.py +21 -22
  977. transformers/models/owlvit/image_processing_owlvit_fast.py +2 -3
  978. transformers/models/owlvit/modeling_owlvit.py +81 -86
  979. transformers/models/owlvit/processing_owlvit.py +20 -48
  980. transformers/models/paddleocr_vl/__init__.py +0 -1
  981. transformers/models/paddleocr_vl/configuration_paddleocr_vl.py +19 -19
  982. transformers/models/paddleocr_vl/image_processing_paddleocr_vl.py +34 -35
  983. transformers/models/paddleocr_vl/image_processing_paddleocr_vl_fast.py +12 -12
  984. transformers/models/paddleocr_vl/modeling_paddleocr_vl.py +76 -76
  985. transformers/models/paddleocr_vl/modular_paddleocr_vl.py +68 -68
  986. transformers/models/paddleocr_vl/processing_paddleocr_vl.py +1 -3
  987. transformers/models/paligemma/configuration_paligemma.py +0 -1
  988. transformers/models/paligemma/modeling_paligemma.py +51 -53
  989. transformers/models/paligemma/processing_paligemma.py +13 -66
  990. transformers/models/parakeet/configuration_parakeet.py +1 -4
  991. transformers/models/parakeet/feature_extraction_parakeet.py +10 -12
  992. transformers/models/parakeet/modeling_parakeet.py +18 -22
  993. transformers/models/parakeet/modular_parakeet.py +16 -18
  994. transformers/models/parakeet/processing_parakeet.py +12 -5
  995. transformers/models/parakeet/tokenization_parakeet.py +2 -4
  996. transformers/models/patchtsmixer/configuration_patchtsmixer.py +5 -8
  997. transformers/models/patchtsmixer/modeling_patchtsmixer.py +60 -62
  998. transformers/models/patchtst/configuration_patchtst.py +6 -9
  999. transformers/models/patchtst/modeling_patchtst.py +72 -74
  1000. transformers/models/pe_audio/__init__.py +0 -1
  1001. transformers/models/pe_audio/configuration_pe_audio.py +14 -16
  1002. transformers/models/pe_audio/feature_extraction_pe_audio.py +6 -8
  1003. transformers/models/pe_audio/modeling_pe_audio.py +26 -27
  1004. transformers/models/pe_audio/modular_pe_audio.py +16 -17
  1005. transformers/models/pe_audio/processing_pe_audio.py +0 -1
  1006. transformers/models/pe_audio_video/__init__.py +0 -1
  1007. transformers/models/pe_audio_video/configuration_pe_audio_video.py +15 -17
  1008. transformers/models/pe_audio_video/modeling_pe_audio_video.py +60 -61
  1009. transformers/models/pe_audio_video/modular_pe_audio_video.py +52 -53
  1010. transformers/models/pe_audio_video/processing_pe_audio_video.py +0 -1
  1011. transformers/models/pe_video/__init__.py +0 -1
  1012. transformers/models/pe_video/configuration_pe_video.py +14 -16
  1013. transformers/models/pe_video/modeling_pe_video.py +21 -22
  1014. transformers/models/pe_video/modular_pe_video.py +11 -12
  1015. transformers/models/pe_video/video_processing_pe_video.py +2 -4
  1016. transformers/models/pegasus/configuration_pegasus.py +0 -1
  1017. transformers/models/pegasus/modeling_pegasus.py +63 -65
  1018. transformers/models/pegasus/tokenization_pegasus.py +1 -4
  1019. transformers/models/pegasus_x/configuration_pegasus_x.py +0 -1
  1020. transformers/models/pegasus_x/modeling_pegasus_x.py +50 -52
  1021. transformers/models/perceiver/configuration_perceiver.py +0 -1
  1022. transformers/models/perceiver/image_processing_perceiver.py +22 -25
  1023. transformers/models/perceiver/image_processing_perceiver_fast.py +5 -6
  1024. transformers/models/perceiver/modeling_perceiver.py +135 -136
  1025. transformers/models/perceiver/tokenization_perceiver.py +3 -6
  1026. transformers/models/perception_lm/configuration_perception_lm.py +0 -1
  1027. transformers/models/perception_lm/image_processing_perception_lm_fast.py +8 -9
  1028. transformers/models/perception_lm/modeling_perception_lm.py +38 -40
  1029. transformers/models/perception_lm/modular_perception_lm.py +31 -33
  1030. transformers/models/perception_lm/processing_perception_lm.py +13 -47
  1031. transformers/models/perception_lm/video_processing_perception_lm.py +0 -1
  1032. transformers/models/persimmon/configuration_persimmon.py +18 -21
  1033. transformers/models/persimmon/modeling_persimmon.py +39 -42
  1034. transformers/models/phi/configuration_phi.py +19 -22
  1035. transformers/models/phi/modeling_phi.py +35 -37
  1036. transformers/models/phi/modular_phi.py +23 -23
  1037. transformers/models/phi3/configuration_phi3.py +23 -26
  1038. transformers/models/phi3/modeling_phi3.py +33 -36
  1039. transformers/models/phi3/modular_phi3.py +13 -17
  1040. transformers/models/phi4_multimodal/configuration_phi4_multimodal.py +25 -26
  1041. transformers/models/phi4_multimodal/feature_extraction_phi4_multimodal.py +7 -9
  1042. transformers/models/phi4_multimodal/image_processing_phi4_multimodal_fast.py +7 -7
  1043. transformers/models/phi4_multimodal/modeling_phi4_multimodal.py +54 -56
  1044. transformers/models/phi4_multimodal/modular_phi4_multimodal.py +59 -60
  1045. transformers/models/phi4_multimodal/processing_phi4_multimodal.py +7 -42
  1046. transformers/models/phimoe/configuration_phimoe.py +26 -29
  1047. transformers/models/phimoe/modeling_phimoe.py +35 -38
  1048. transformers/models/phimoe/modular_phimoe.py +0 -1
  1049. transformers/models/phobert/tokenization_phobert.py +4 -6
  1050. transformers/models/pix2struct/configuration_pix2struct.py +0 -1
  1051. transformers/models/pix2struct/image_processing_pix2struct.py +15 -19
  1052. transformers/models/pix2struct/image_processing_pix2struct_fast.py +7 -10
  1053. transformers/models/pix2struct/modeling_pix2struct.py +42 -45
  1054. transformers/models/pix2struct/processing_pix2struct.py +5 -26
  1055. transformers/models/pixio/__init__.py +0 -1
  1056. transformers/models/pixio/configuration_pixio.py +0 -1
  1057. transformers/models/pixio/modeling_pixio.py +7 -9
  1058. transformers/models/pixio/modular_pixio.py +3 -6
  1059. transformers/models/pixtral/configuration_pixtral.py +11 -14
  1060. transformers/models/pixtral/image_processing_pixtral.py +26 -28
  1061. transformers/models/pixtral/image_processing_pixtral_fast.py +5 -6
  1062. transformers/models/pixtral/modeling_pixtral.py +22 -25
  1063. transformers/models/pixtral/processing_pixtral.py +18 -52
  1064. transformers/models/plbart/configuration_plbart.py +0 -1
  1065. transformers/models/plbart/modeling_plbart.py +100 -102
  1066. transformers/models/plbart/modular_plbart.py +30 -32
  1067. transformers/models/plbart/tokenization_plbart.py +4 -5
  1068. transformers/models/poolformer/configuration_poolformer.py +0 -1
  1069. transformers/models/poolformer/image_processing_poolformer.py +21 -24
  1070. transformers/models/poolformer/image_processing_poolformer_fast.py +6 -7
  1071. transformers/models/poolformer/modeling_poolformer.py +10 -12
  1072. transformers/models/pop2piano/configuration_pop2piano.py +0 -1
  1073. transformers/models/pop2piano/feature_extraction_pop2piano.py +6 -9
  1074. transformers/models/pop2piano/modeling_pop2piano.py +22 -23
  1075. transformers/models/pop2piano/processing_pop2piano.py +25 -33
  1076. transformers/models/pop2piano/tokenization_pop2piano.py +15 -23
  1077. transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py +1 -0
  1078. transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything.py +28 -28
  1079. transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything_fast.py +14 -15
  1080. transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py +9 -10
  1081. transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py +9 -10
  1082. transformers/models/prophetnet/configuration_prophetnet.py +26 -28
  1083. transformers/models/prophetnet/modeling_prophetnet.py +109 -130
  1084. transformers/models/prophetnet/tokenization_prophetnet.py +14 -16
  1085. transformers/models/pvt/configuration_pvt.py +0 -1
  1086. transformers/models/pvt/image_processing_pvt.py +17 -20
  1087. transformers/models/pvt/image_processing_pvt_fast.py +0 -1
  1088. transformers/models/pvt/modeling_pvt.py +19 -21
  1089. transformers/models/pvt_v2/configuration_pvt_v2.py +2 -4
  1090. transformers/models/pvt_v2/modeling_pvt_v2.py +21 -23
  1091. transformers/models/qwen2/configuration_qwen2.py +18 -21
  1092. transformers/models/qwen2/modeling_qwen2.py +31 -33
  1093. transformers/models/qwen2/modular_qwen2.py +11 -12
  1094. transformers/models/qwen2/tokenization_qwen2.py +2 -5
  1095. transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py +20 -23
  1096. transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py +135 -128
  1097. transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py +116 -109
  1098. transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py +41 -49
  1099. transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +22 -25
  1100. transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +94 -96
  1101. transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py +46 -85
  1102. transformers/models/qwen2_5_vl/processing_qwen2_5_vl.py +7 -43
  1103. transformers/models/qwen2_audio/configuration_qwen2_audio.py +0 -1
  1104. transformers/models/qwen2_audio/modeling_qwen2_audio.py +27 -29
  1105. transformers/models/qwen2_audio/processing_qwen2_audio.py +13 -42
  1106. transformers/models/qwen2_moe/configuration_qwen2_moe.py +28 -31
  1107. transformers/models/qwen2_moe/modeling_qwen2_moe.py +36 -39
  1108. transformers/models/qwen2_moe/modular_qwen2_moe.py +7 -10
  1109. transformers/models/qwen2_vl/configuration_qwen2_vl.py +22 -24
  1110. transformers/models/qwen2_vl/image_processing_qwen2_vl.py +38 -40
  1111. transformers/models/qwen2_vl/image_processing_qwen2_vl_fast.py +8 -9
  1112. transformers/models/qwen2_vl/modeling_qwen2_vl.py +91 -92
  1113. transformers/models/qwen2_vl/processing_qwen2_vl.py +7 -44
  1114. transformers/models/qwen2_vl/video_processing_qwen2_vl.py +35 -13
  1115. transformers/models/qwen3/configuration_qwen3.py +20 -23
  1116. transformers/models/qwen3/modeling_qwen3.py +31 -34
  1117. transformers/models/qwen3/modular_qwen3.py +4 -6
  1118. transformers/models/qwen3_moe/configuration_qwen3_moe.py +25 -28
  1119. transformers/models/qwen3_moe/modeling_qwen3_moe.py +36 -39
  1120. transformers/models/qwen3_moe/modular_qwen3_moe.py +10 -13
  1121. transformers/models/qwen3_next/configuration_qwen3_next.py +31 -34
  1122. transformers/models/qwen3_next/modeling_qwen3_next.py +39 -42
  1123. transformers/models/qwen3_next/modular_qwen3_next.py +33 -34
  1124. transformers/models/qwen3_omni_moe/configuration_qwen3_omni_moe.py +85 -88
  1125. transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py +107 -110
  1126. transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py +122 -148
  1127. transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py +40 -48
  1128. transformers/models/qwen3_vl/configuration_qwen3_vl.py +16 -19
  1129. transformers/models/qwen3_vl/modeling_qwen3_vl.py +74 -77
  1130. transformers/models/qwen3_vl/modular_qwen3_vl.py +68 -105
  1131. transformers/models/qwen3_vl/processing_qwen3_vl.py +6 -42
  1132. transformers/models/qwen3_vl/video_processing_qwen3_vl.py +10 -12
  1133. transformers/models/qwen3_vl_moe/configuration_qwen3_vl_moe.py +21 -25
  1134. transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py +80 -83
  1135. transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py +33 -36
  1136. transformers/models/rag/configuration_rag.py +0 -1
  1137. transformers/models/rag/modeling_rag.py +116 -118
  1138. transformers/models/rag/retrieval_rag.py +2 -4
  1139. transformers/models/rag/tokenization_rag.py +0 -50
  1140. transformers/models/recurrent_gemma/configuration_recurrent_gemma.py +21 -24
  1141. transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +31 -34
  1142. transformers/models/reformer/configuration_reformer.py +0 -1
  1143. transformers/models/reformer/modeling_reformer.py +67 -68
  1144. transformers/models/reformer/tokenization_reformer.py +3 -6
  1145. transformers/models/regnet/configuration_regnet.py +0 -1
  1146. transformers/models/regnet/modeling_regnet.py +7 -9
  1147. transformers/models/rembert/configuration_rembert.py +0 -1
  1148. transformers/models/rembert/modeling_rembert.py +108 -110
  1149. transformers/models/rembert/tokenization_rembert.py +1 -4
  1150. transformers/models/resnet/configuration_resnet.py +0 -1
  1151. transformers/models/resnet/modeling_resnet.py +8 -10
  1152. transformers/models/roberta/configuration_roberta.py +0 -1
  1153. transformers/models/roberta/modeling_roberta.py +91 -93
  1154. transformers/models/roberta/modular_roberta.py +55 -58
  1155. transformers/models/roberta/tokenization_roberta.py +2 -5
  1156. transformers/models/roberta/tokenization_roberta_old.py +2 -4
  1157. transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py +0 -1
  1158. transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +91 -93
  1159. transformers/models/roc_bert/configuration_roc_bert.py +0 -1
  1160. transformers/models/roc_bert/modeling_roc_bert.py +119 -121
  1161. transformers/models/roc_bert/tokenization_roc_bert.py +88 -94
  1162. transformers/models/roformer/configuration_roformer.py +0 -1
  1163. transformers/models/roformer/modeling_roformer.py +79 -81
  1164. transformers/models/roformer/tokenization_roformer.py +3 -6
  1165. transformers/models/roformer/tokenization_utils.py +0 -1
  1166. transformers/models/rt_detr/configuration_rt_detr.py +0 -1
  1167. transformers/models/rt_detr/configuration_rt_detr_resnet.py +0 -1
  1168. transformers/models/rt_detr/image_processing_rt_detr.py +54 -55
  1169. transformers/models/rt_detr/image_processing_rt_detr_fast.py +15 -15
  1170. transformers/models/rt_detr/modeling_rt_detr.py +80 -82
  1171. transformers/models/rt_detr/modeling_rt_detr_resnet.py +2 -4
  1172. transformers/models/rt_detr/modular_rt_detr.py +14 -14
  1173. transformers/models/rt_detr_v2/configuration_rt_detr_v2.py +0 -1
  1174. transformers/models/rt_detr_v2/modeling_rt_detr_v2.py +79 -81
  1175. transformers/models/rt_detr_v2/modular_rt_detr_v2.py +2 -4
  1176. transformers/models/rwkv/configuration_rwkv.py +0 -1
  1177. transformers/models/rwkv/modeling_rwkv.py +29 -31
  1178. transformers/models/sam/configuration_sam.py +0 -1
  1179. transformers/models/sam/image_processing_sam.py +59 -60
  1180. transformers/models/sam/image_processing_sam_fast.py +21 -22
  1181. transformers/models/sam/modeling_sam.py +33 -35
  1182. transformers/models/sam/processing_sam.py +39 -27
  1183. transformers/models/sam2/configuration_sam2.py +0 -1
  1184. transformers/models/sam2/image_processing_sam2_fast.py +14 -15
  1185. transformers/models/sam2/modeling_sam2.py +45 -47
  1186. transformers/models/sam2/modular_sam2.py +43 -44
  1187. transformers/models/sam2/processing_sam2.py +31 -47
  1188. transformers/models/sam2_video/configuration_sam2_video.py +0 -1
  1189. transformers/models/sam2_video/modeling_sam2_video.py +69 -70
  1190. transformers/models/sam2_video/modular_sam2_video.py +60 -79
  1191. transformers/models/sam2_video/processing_sam2_video.py +49 -66
  1192. transformers/models/sam2_video/video_processing_sam2_video.py +1 -4
  1193. transformers/models/sam3/configuration_sam3.py +0 -1
  1194. transformers/models/sam3/image_processing_sam3_fast.py +17 -20
  1195. transformers/models/sam3/modeling_sam3.py +54 -56
  1196. transformers/models/sam3/modular_sam3.py +3 -8
  1197. transformers/models/sam3/processing_sam3.py +29 -48
  1198. transformers/models/sam3_tracker/__init__.py +0 -1
  1199. transformers/models/sam3_tracker/configuration_sam3_tracker.py +0 -1
  1200. transformers/models/sam3_tracker/modeling_sam3_tracker.py +34 -36
  1201. transformers/models/sam3_tracker/modular_sam3_tracker.py +0 -1
  1202. transformers/models/sam3_tracker/processing_sam3_tracker.py +31 -47
  1203. transformers/models/sam3_tracker_video/__init__.py +0 -1
  1204. transformers/models/sam3_tracker_video/configuration_sam3_tracker_video.py +0 -1
  1205. transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py +70 -70
  1206. transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py +2 -4
  1207. transformers/models/sam3_tracker_video/processing_sam3_tracker_video.py +50 -66
  1208. transformers/models/sam3_video/configuration_sam3_video.py +0 -1
  1209. transformers/models/sam3_video/modeling_sam3_video.py +29 -31
  1210. transformers/models/sam3_video/processing_sam3_video.py +25 -45
  1211. transformers/models/sam_hq/__init__.py +1 -1
  1212. transformers/models/sam_hq/configuration_sam_hq.py +0 -1
  1213. transformers/models/sam_hq/modeling_sam_hq.py +39 -41
  1214. transformers/models/sam_hq/modular_sam_hq.py +17 -19
  1215. transformers/models/sam_hq/{processing_samhq.py → processing_sam_hq.py} +39 -28
  1216. transformers/models/seamless_m4t/configuration_seamless_m4t.py +0 -1
  1217. transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py +8 -11
  1218. transformers/models/seamless_m4t/modeling_seamless_m4t.py +180 -182
  1219. transformers/models/seamless_m4t/processing_seamless_m4t.py +18 -39
  1220. transformers/models/seamless_m4t/tokenization_seamless_m4t.py +15 -20
  1221. transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py +0 -1
  1222. transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +193 -195
  1223. transformers/models/seed_oss/configuration_seed_oss.py +23 -25
  1224. transformers/models/seed_oss/modeling_seed_oss.py +30 -32
  1225. transformers/models/seed_oss/modular_seed_oss.py +3 -4
  1226. transformers/models/segformer/configuration_segformer.py +0 -10
  1227. transformers/models/segformer/image_processing_segformer.py +39 -42
  1228. transformers/models/segformer/image_processing_segformer_fast.py +7 -8
  1229. transformers/models/segformer/modeling_segformer.py +24 -26
  1230. transformers/models/segformer/modular_segformer.py +5 -6
  1231. transformers/models/seggpt/configuration_seggpt.py +0 -1
  1232. transformers/models/seggpt/image_processing_seggpt.py +38 -41
  1233. transformers/models/seggpt/modeling_seggpt.py +28 -30
  1234. transformers/models/sew/configuration_sew.py +0 -1
  1235. transformers/models/sew/modeling_sew.py +33 -35
  1236. transformers/models/sew/modular_sew.py +10 -12
  1237. transformers/models/sew_d/configuration_sew_d.py +0 -1
  1238. transformers/models/sew_d/modeling_sew_d.py +28 -30
  1239. transformers/models/shieldgemma2/configuration_shieldgemma2.py +0 -1
  1240. transformers/models/shieldgemma2/modeling_shieldgemma2.py +15 -17
  1241. transformers/models/shieldgemma2/processing_shieldgemma2.py +3 -5
  1242. transformers/models/siglip/configuration_siglip.py +0 -1
  1243. transformers/models/siglip/image_processing_siglip.py +17 -20
  1244. transformers/models/siglip/image_processing_siglip_fast.py +0 -1
  1245. transformers/models/siglip/modeling_siglip.py +38 -39
  1246. transformers/models/siglip/processing_siglip.py +2 -14
  1247. transformers/models/siglip/tokenization_siglip.py +6 -7
  1248. transformers/models/siglip2/configuration_siglip2.py +1 -1
  1249. transformers/models/siglip2/image_processing_siglip2.py +15 -16
  1250. transformers/models/siglip2/image_processing_siglip2_fast.py +4 -5
  1251. transformers/models/siglip2/modeling_siglip2.py +54 -54
  1252. transformers/models/siglip2/modular_siglip2.py +23 -25
  1253. transformers/models/siglip2/processing_siglip2.py +2 -14
  1254. transformers/models/smollm3/configuration_smollm3.py +23 -26
  1255. transformers/models/smollm3/modeling_smollm3.py +31 -34
  1256. transformers/models/smollm3/modular_smollm3.py +27 -29
  1257. transformers/models/smolvlm/configuration_smolvlm.py +1 -1
  1258. transformers/models/smolvlm/image_processing_smolvlm.py +42 -43
  1259. transformers/models/smolvlm/image_processing_smolvlm_fast.py +12 -12
  1260. transformers/models/smolvlm/modeling_smolvlm.py +51 -52
  1261. transformers/models/smolvlm/modular_smolvlm.py +15 -17
  1262. transformers/models/smolvlm/processing_smolvlm.py +15 -76
  1263. transformers/models/smolvlm/video_processing_smolvlm.py +7 -8
  1264. transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py +0 -1
  1265. transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py +20 -23
  1266. transformers/models/speech_to_text/configuration_speech_to_text.py +0 -1
  1267. transformers/models/speech_to_text/feature_extraction_speech_to_text.py +10 -13
  1268. transformers/models/speech_to_text/modeling_speech_to_text.py +52 -54
  1269. transformers/models/speech_to_text/processing_speech_to_text.py +4 -30
  1270. transformers/models/speech_to_text/tokenization_speech_to_text.py +5 -6
  1271. transformers/models/speecht5/configuration_speecht5.py +0 -1
  1272. transformers/models/speecht5/feature_extraction_speecht5.py +16 -37
  1273. transformers/models/speecht5/modeling_speecht5.py +172 -174
  1274. transformers/models/speecht5/number_normalizer.py +0 -1
  1275. transformers/models/speecht5/processing_speecht5.py +3 -37
  1276. transformers/models/speecht5/tokenization_speecht5.py +4 -5
  1277. transformers/models/splinter/configuration_splinter.py +0 -1
  1278. transformers/models/splinter/modeling_splinter.py +54 -56
  1279. transformers/models/splinter/tokenization_splinter.py +2 -4
  1280. transformers/models/squeezebert/configuration_squeezebert.py +0 -1
  1281. transformers/models/squeezebert/modeling_squeezebert.py +60 -62
  1282. transformers/models/squeezebert/tokenization_squeezebert.py +0 -1
  1283. transformers/models/stablelm/configuration_stablelm.py +20 -23
  1284. transformers/models/stablelm/modeling_stablelm.py +39 -42
  1285. transformers/models/starcoder2/configuration_starcoder2.py +19 -22
  1286. transformers/models/starcoder2/modeling_starcoder2.py +33 -36
  1287. transformers/models/starcoder2/modular_starcoder2.py +13 -15
  1288. transformers/models/superglue/configuration_superglue.py +3 -3
  1289. transformers/models/superglue/image_processing_superglue.py +15 -15
  1290. transformers/models/superglue/image_processing_superglue_fast.py +4 -5
  1291. transformers/models/superglue/modeling_superglue.py +32 -33
  1292. transformers/models/superpoint/image_processing_superpoint.py +15 -15
  1293. transformers/models/superpoint/image_processing_superpoint_fast.py +4 -5
  1294. transformers/models/superpoint/modeling_superpoint.py +13 -14
  1295. transformers/models/swiftformer/configuration_swiftformer.py +0 -1
  1296. transformers/models/swiftformer/modeling_swiftformer.py +12 -14
  1297. transformers/models/swin/configuration_swin.py +0 -1
  1298. transformers/models/swin/modeling_swin.py +58 -70
  1299. transformers/models/swin2sr/configuration_swin2sr.py +0 -1
  1300. transformers/models/swin2sr/image_processing_swin2sr.py +10 -13
  1301. transformers/models/swin2sr/image_processing_swin2sr_fast.py +2 -5
  1302. transformers/models/swin2sr/modeling_swin2sr.py +26 -28
  1303. transformers/models/swinv2/configuration_swinv2.py +0 -1
  1304. transformers/models/swinv2/modeling_swinv2.py +55 -67
  1305. transformers/models/switch_transformers/configuration_switch_transformers.py +0 -1
  1306. transformers/models/switch_transformers/modeling_switch_transformers.py +32 -33
  1307. transformers/models/switch_transformers/modular_switch_transformers.py +29 -30
  1308. transformers/models/t5/configuration_t5.py +0 -1
  1309. transformers/models/t5/modeling_t5.py +75 -77
  1310. transformers/models/t5/tokenization_t5.py +1 -3
  1311. transformers/models/t5gemma/configuration_t5gemma.py +33 -34
  1312. transformers/models/t5gemma/modeling_t5gemma.py +96 -99
  1313. transformers/models/t5gemma/modular_t5gemma.py +117 -118
  1314. transformers/models/t5gemma2/configuration_t5gemma2.py +53 -54
  1315. transformers/models/t5gemma2/modeling_t5gemma2.py +96 -99
  1316. transformers/models/t5gemma2/modular_t5gemma2.py +134 -135
  1317. transformers/models/table_transformer/configuration_table_transformer.py +0 -1
  1318. transformers/models/table_transformer/modeling_table_transformer.py +46 -48
  1319. transformers/models/tapas/configuration_tapas.py +0 -1
  1320. transformers/models/tapas/modeling_tapas.py +64 -66
  1321. transformers/models/tapas/tokenization_tapas.py +115 -153
  1322. transformers/models/textnet/configuration_textnet.py +0 -1
  1323. transformers/models/textnet/image_processing_textnet.py +22 -25
  1324. transformers/models/textnet/image_processing_textnet_fast.py +5 -6
  1325. transformers/models/textnet/modeling_textnet.py +13 -14
  1326. transformers/models/time_series_transformer/configuration_time_series_transformer.py +5 -8
  1327. transformers/models/time_series_transformer/modeling_time_series_transformer.py +79 -81
  1328. transformers/models/timesfm/configuration_timesfm.py +0 -1
  1329. transformers/models/timesfm/modeling_timesfm.py +17 -19
  1330. transformers/models/timesfm/modular_timesfm.py +16 -18
  1331. transformers/models/timesformer/configuration_timesformer.py +0 -1
  1332. transformers/models/timesformer/modeling_timesformer.py +13 -16
  1333. transformers/models/timm_backbone/configuration_timm_backbone.py +0 -1
  1334. transformers/models/timm_backbone/modeling_timm_backbone.py +4 -6
  1335. transformers/models/timm_wrapper/configuration_timm_wrapper.py +2 -3
  1336. transformers/models/timm_wrapper/image_processing_timm_wrapper.py +4 -5
  1337. transformers/models/timm_wrapper/modeling_timm_wrapper.py +13 -15
  1338. transformers/models/trocr/configuration_trocr.py +0 -1
  1339. transformers/models/trocr/modeling_trocr.py +38 -40
  1340. transformers/models/trocr/processing_trocr.py +5 -25
  1341. transformers/models/tvp/configuration_tvp.py +0 -1
  1342. transformers/models/tvp/image_processing_tvp.py +50 -52
  1343. transformers/models/tvp/image_processing_tvp_fast.py +9 -10
  1344. transformers/models/tvp/modeling_tvp.py +25 -27
  1345. transformers/models/tvp/processing_tvp.py +2 -14
  1346. transformers/models/udop/configuration_udop.py +0 -1
  1347. transformers/models/udop/modeling_udop.py +63 -66
  1348. transformers/models/udop/processing_udop.py +7 -26
  1349. transformers/models/udop/tokenization_udop.py +80 -93
  1350. transformers/models/umt5/configuration_umt5.py +0 -1
  1351. transformers/models/umt5/modeling_umt5.py +80 -81
  1352. transformers/models/unispeech/configuration_unispeech.py +0 -1
  1353. transformers/models/unispeech/modeling_unispeech.py +47 -49
  1354. transformers/models/unispeech/modular_unispeech.py +20 -22
  1355. transformers/models/unispeech_sat/configuration_unispeech_sat.py +0 -1
  1356. transformers/models/unispeech_sat/modeling_unispeech_sat.py +63 -65
  1357. transformers/models/unispeech_sat/modular_unispeech_sat.py +21 -23
  1358. transformers/models/univnet/feature_extraction_univnet.py +14 -14
  1359. transformers/models/univnet/modeling_univnet.py +7 -8
  1360. transformers/models/upernet/configuration_upernet.py +0 -1
  1361. transformers/models/upernet/modeling_upernet.py +10 -13
  1362. transformers/models/vaultgemma/__init__.py +0 -1
  1363. transformers/models/vaultgemma/configuration_vaultgemma.py +24 -26
  1364. transformers/models/vaultgemma/modeling_vaultgemma.py +34 -36
  1365. transformers/models/vaultgemma/modular_vaultgemma.py +29 -31
  1366. transformers/models/video_llama_3/image_processing_video_llama_3.py +40 -40
  1367. transformers/models/video_llama_3/image_processing_video_llama_3_fast.py +8 -8
  1368. transformers/models/video_llama_3/modeling_video_llama_3.py +66 -66
  1369. transformers/models/video_llama_3/modular_video_llama_3.py +101 -112
  1370. transformers/models/video_llama_3/processing_video_llama_3.py +5 -39
  1371. transformers/models/video_llama_3/video_processing_video_llama_3.py +18 -18
  1372. transformers/models/video_llava/configuration_video_llava.py +0 -1
  1373. transformers/models/video_llava/image_processing_video_llava.py +35 -38
  1374. transformers/models/video_llava/modeling_video_llava.py +52 -54
  1375. transformers/models/video_llava/processing_video_llava.py +38 -78
  1376. transformers/models/video_llava/video_processing_video_llava.py +0 -1
  1377. transformers/models/videomae/configuration_videomae.py +0 -1
  1378. transformers/models/videomae/image_processing_videomae.py +31 -34
  1379. transformers/models/videomae/modeling_videomae.py +13 -15
  1380. transformers/models/videomae/video_processing_videomae.py +0 -1
  1381. transformers/models/vilt/configuration_vilt.py +0 -1
  1382. transformers/models/vilt/image_processing_vilt.py +29 -30
  1383. transformers/models/vilt/image_processing_vilt_fast.py +9 -10
  1384. transformers/models/vilt/modeling_vilt.py +76 -78
  1385. transformers/models/vilt/processing_vilt.py +2 -14
  1386. transformers/models/vipllava/configuration_vipllava.py +0 -1
  1387. transformers/models/vipllava/modeling_vipllava.py +38 -39
  1388. transformers/models/vipllava/modular_vipllava.py +30 -32
  1389. transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py +0 -1
  1390. transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +18 -21
  1391. transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py +0 -1
  1392. transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +18 -21
  1393. transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py +2 -16
  1394. transformers/models/visual_bert/configuration_visual_bert.py +0 -1
  1395. transformers/models/visual_bert/modeling_visual_bert.py +90 -92
  1396. transformers/models/vit/configuration_vit.py +0 -1
  1397. transformers/models/vit/image_processing_vit.py +19 -22
  1398. transformers/models/vit/image_processing_vit_fast.py +0 -1
  1399. transformers/models/vit/modeling_vit.py +13 -15
  1400. transformers/models/vit_mae/configuration_vit_mae.py +0 -1
  1401. transformers/models/vit_mae/modeling_vit_mae.py +21 -23
  1402. transformers/models/vit_msn/configuration_vit_msn.py +0 -1
  1403. transformers/models/vit_msn/modeling_vit_msn.py +10 -12
  1404. transformers/models/vitdet/configuration_vitdet.py +0 -1
  1405. transformers/models/vitdet/modeling_vitdet.py +12 -14
  1406. transformers/models/vitmatte/configuration_vitmatte.py +1 -4
  1407. transformers/models/vitmatte/image_processing_vitmatte.py +15 -18
  1408. transformers/models/vitmatte/image_processing_vitmatte_fast.py +14 -15
  1409. transformers/models/vitmatte/modeling_vitmatte.py +9 -11
  1410. transformers/models/vitpose/configuration_vitpose.py +3 -6
  1411. transformers/models/vitpose/image_processing_vitpose.py +24 -25
  1412. transformers/models/vitpose/image_processing_vitpose_fast.py +9 -10
  1413. transformers/models/vitpose/modeling_vitpose.py +10 -12
  1414. transformers/models/vitpose_backbone/configuration_vitpose_backbone.py +0 -1
  1415. transformers/models/vitpose_backbone/modeling_vitpose_backbone.py +8 -10
  1416. transformers/models/vits/configuration_vits.py +0 -1
  1417. transformers/models/vits/modeling_vits.py +34 -35
  1418. transformers/models/vits/tokenization_vits.py +3 -4
  1419. transformers/models/vivit/configuration_vivit.py +0 -1
  1420. transformers/models/vivit/image_processing_vivit.py +36 -39
  1421. transformers/models/vivit/modeling_vivit.py +5 -7
  1422. transformers/models/vjepa2/__init__.py +0 -1
  1423. transformers/models/vjepa2/configuration_vjepa2.py +0 -1
  1424. transformers/models/vjepa2/modeling_vjepa2.py +30 -32
  1425. transformers/models/vjepa2/video_processing_vjepa2.py +0 -1
  1426. transformers/models/voxtral/__init__.py +0 -1
  1427. transformers/models/voxtral/configuration_voxtral.py +0 -1
  1428. transformers/models/voxtral/modeling_voxtral.py +17 -25
  1429. transformers/models/voxtral/modular_voxtral.py +10 -19
  1430. transformers/models/voxtral/processing_voxtral.py +25 -48
  1431. transformers/models/wav2vec2/configuration_wav2vec2.py +0 -1
  1432. transformers/models/wav2vec2/feature_extraction_wav2vec2.py +7 -10
  1433. transformers/models/wav2vec2/modeling_wav2vec2.py +67 -122
  1434. transformers/models/wav2vec2/processing_wav2vec2.py +6 -35
  1435. transformers/models/wav2vec2/tokenization_wav2vec2.py +20 -332
  1436. transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py +0 -1
  1437. transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py +49 -52
  1438. transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py +45 -48
  1439. transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py +6 -35
  1440. transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py +0 -1
  1441. transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +62 -65
  1442. transformers/models/wav2vec2_conformer/modular_wav2vec2_conformer.py +15 -18
  1443. transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py +16 -17
  1444. transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py +36 -55
  1445. transformers/models/wavlm/configuration_wavlm.py +0 -1
  1446. transformers/models/wavlm/modeling_wavlm.py +45 -48
  1447. transformers/models/wavlm/modular_wavlm.py +4 -5
  1448. transformers/models/whisper/configuration_whisper.py +0 -1
  1449. transformers/models/whisper/english_normalizer.py +3 -4
  1450. transformers/models/whisper/feature_extraction_whisper.py +9 -24
  1451. transformers/models/whisper/generation_whisper.py +26 -48
  1452. transformers/models/whisper/modeling_whisper.py +68 -70
  1453. transformers/models/whisper/processing_whisper.py +3 -20
  1454. transformers/models/whisper/tokenization_whisper.py +9 -30
  1455. transformers/models/x_clip/configuration_x_clip.py +0 -1
  1456. transformers/models/x_clip/modeling_x_clip.py +68 -69
  1457. transformers/models/x_clip/processing_x_clip.py +2 -14
  1458. transformers/models/xcodec/configuration_xcodec.py +4 -6
  1459. transformers/models/xcodec/modeling_xcodec.py +15 -17
  1460. transformers/models/xglm/configuration_xglm.py +0 -1
  1461. transformers/models/xglm/modeling_xglm.py +49 -55
  1462. transformers/models/xglm/tokenization_xglm.py +1 -4
  1463. transformers/models/xlm/configuration_xlm.py +0 -1
  1464. transformers/models/xlm/modeling_xlm.py +126 -130
  1465. transformers/models/xlm/tokenization_xlm.py +3 -5
  1466. transformers/models/xlm_roberta/configuration_xlm_roberta.py +0 -1
  1467. transformers/models/xlm_roberta/modeling_xlm_roberta.py +90 -92
  1468. transformers/models/xlm_roberta/modular_xlm_roberta.py +50 -53
  1469. transformers/models/xlm_roberta/tokenization_xlm_roberta.py +1 -4
  1470. transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py +0 -1
  1471. transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +91 -93
  1472. transformers/models/xlm_roberta_xl/modular_xlm_roberta_xl.py +67 -70
  1473. transformers/models/xlnet/configuration_xlnet.py +0 -11
  1474. transformers/models/xlnet/modeling_xlnet.py +149 -162
  1475. transformers/models/xlnet/tokenization_xlnet.py +1 -4
  1476. transformers/models/xlstm/configuration_xlstm.py +3 -5
  1477. transformers/models/xlstm/modeling_xlstm.py +62 -65
  1478. transformers/models/xmod/configuration_xmod.py +0 -1
  1479. transformers/models/xmod/modeling_xmod.py +98 -100
  1480. transformers/models/yolos/configuration_yolos.py +0 -1
  1481. transformers/models/yolos/image_processing_yolos.py +60 -62
  1482. transformers/models/yolos/image_processing_yolos_fast.py +18 -18
  1483. transformers/models/yolos/modeling_yolos.py +12 -14
  1484. transformers/models/yolos/modular_yolos.py +2 -4
  1485. transformers/models/yoso/configuration_yoso.py +0 -1
  1486. transformers/models/yoso/modeling_yoso.py +60 -62
  1487. transformers/models/zamba/configuration_zamba.py +0 -1
  1488. transformers/models/zamba/modeling_zamba.py +68 -69
  1489. transformers/models/zamba2/configuration_zamba2.py +36 -37
  1490. transformers/models/zamba2/modeling_zamba2.py +84 -87
  1491. transformers/models/zamba2/modular_zamba2.py +43 -45
  1492. transformers/models/zoedepth/configuration_zoedepth.py +0 -1
  1493. transformers/models/zoedepth/image_processing_zoedepth.py +28 -29
  1494. transformers/models/zoedepth/image_processing_zoedepth_fast.py +11 -12
  1495. transformers/models/zoedepth/modeling_zoedepth.py +14 -16
  1496. transformers/pipelines/__init__.py +50 -49
  1497. transformers/pipelines/any_to_any.py +14 -22
  1498. transformers/pipelines/audio_utils.py +1 -2
  1499. transformers/pipelines/base.py +12 -16
  1500. transformers/pipelines/deprecated/__init__.py +0 -1
  1501. transformers/pipelines/image_text_to_text.py +0 -1
  1502. transformers/pipelines/image_to_text.py +4 -44
  1503. transformers/pipelines/question_answering.py +4 -43
  1504. transformers/pipelines/text_classification.py +1 -14
  1505. transformers/pipelines/token_classification.py +1 -22
  1506. transformers/pipelines/video_classification.py +1 -9
  1507. transformers/pipelines/zero_shot_audio_classification.py +0 -1
  1508. transformers/pipelines/zero_shot_classification.py +0 -6
  1509. transformers/pipelines/zero_shot_image_classification.py +0 -7
  1510. transformers/processing_utils.py +95 -95
  1511. transformers/quantizers/base.py +10 -0
  1512. transformers/quantizers/quantizer_quark.py +0 -1
  1513. transformers/quantizers/quantizer_torchao.py +3 -3
  1514. transformers/testing_utils.py +3 -37
  1515. transformers/tokenization_mistral_common.py +554 -903
  1516. transformers/tokenization_utils_base.py +109 -122
  1517. transformers/tokenization_utils_sentencepiece.py +5 -6
  1518. transformers/tokenization_utils_tokenizers.py +5 -5
  1519. transformers/trainer.py +6 -9
  1520. transformers/trainer_jit_checkpoint.py +1 -2
  1521. transformers/training_args.py +3 -3
  1522. transformers/utils/attention_visualizer.py +1 -1
  1523. transformers/utils/auto_docstring.py +564 -12
  1524. transformers/utils/doc.py +1 -1
  1525. transformers/utils/dummy_pt_objects.py +0 -42
  1526. transformers/utils/generic.py +1 -1
  1527. transformers/utils/loading_report.py +3 -3
  1528. transformers/utils/quantization_config.py +8 -10
  1529. transformers/video_processing_utils.py +19 -20
  1530. transformers/video_utils.py +18 -22
  1531. {transformers-5.0.0rc2.dist-info → transformers-5.0.0rc3.dist-info}/METADATA +19 -19
  1532. transformers-5.0.0rc3.dist-info/RECORD +2067 -0
  1533. transformers-5.0.0rc2.dist-info/RECORD +0 -2042
  1534. {transformers-5.0.0rc2.dist-info → transformers-5.0.0rc3.dist-info}/WHEEL +0 -0
  1535. {transformers-5.0.0rc2.dist-info → transformers-5.0.0rc3.dist-info}/entry_points.txt +0 -0
  1536. {transformers-5.0.0rc2.dist-info → transformers-5.0.0rc3.dist-info}/licenses/LICENSE +0 -0
  1537. {transformers-5.0.0rc2.dist-info → transformers-5.0.0rc3.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,3 @@
1
- # coding=utf-8
2
1
  # Copyright 2025 The HuggingFace Inc. team
3
2
  #
4
3
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -120,7 +119,7 @@ class RequestState:
120
119
 
121
120
  # Required fields
122
121
  request_id: str
123
- initial_tokens: list[int] # Initial prompt tokens
122
+ initial_tokens: list[int] # Initial prompt tokens # TODO: rename this as prefill tokens
124
123
  # Optional fields
125
124
  record_timestamps: bool = False # Whether to record timestamps for the generated tokens
126
125
  num_children: int = 0 # Number of children requests
@@ -138,6 +137,8 @@ class RequestState:
138
137
  error: str | None = None # Error message if the request failed
139
138
  lifespan: tuple[float, float] = (-1, -1) # (time request was no longer pending, time request finished)
140
139
  _timestamps: list[float] = field(default_factory=list) # Timestamps of the generated tokens
140
+ _true_initial_tokens: int = 0 # The true number of initial tokens, useful when soft resetting requests
141
+ # TODO: remove the attribute above to _num_initial_tokens once initial_tokens is renamed
141
142
 
142
143
  @property
143
144
  def status(self) -> RequestStatus:
@@ -221,6 +222,9 @@ class RequestState:
221
222
 
222
223
  def to_generation_output(self):
223
224
  """Convert the request state to a GenerationOutput object."""
225
+ if self._true_initial_tokens:
226
+ self.generated_tokens = self.initial_tokens[self._true_initial_tokens :] + self.generated_tokens
227
+ self.initial_tokens = self.initial_tokens[: self._true_initial_tokens]
224
228
  return GenerationOutput(
225
229
  request_id=self.request_id,
226
230
  prompt_ids=self.initial_tokens,
@@ -243,14 +247,31 @@ class RequestState:
243
247
  generated_tokens=self.generated_tokens[:],
244
248
  allocated_blocks=self.allocated_blocks,
245
249
  position_offset=self.position_offset,
246
- status=self.status,
250
+ _status=self.status,
247
251
  max_new_tokens=self.max_new_tokens,
248
252
  eos_token_id=self.eos_token_id,
249
253
  streaming=self.streaming,
250
254
  created_time=t,
251
255
  lifespan=(t, -1),
252
- timestamps=None if self.timestamps is None else self.timestamps[:],
256
+ _timestamps=None if self.timestamps is None else self.timestamps[:],
253
257
  error=self.error,
254
258
  record_timestamps=self.record_timestamps,
255
259
  )
256
260
  return new_request
261
+
262
+ def create_equivalent_initial_request(self) -> "RequestState":
263
+ """Creates an equivalent new request by removing the generated tokens and adding them to the initial prompt. The
264
+ created request has THE SAME request_id. Notably, we can retrieve the original request from the created one with
265
+ the _true_initial_tokens attribute."""
266
+ new_state = RequestState(
267
+ request_id=self.request_id,
268
+ initial_tokens=self.initial_tokens + self.generated_tokens,
269
+ num_children=self.num_children,
270
+ record_timestamps=self.record_timestamps,
271
+ tokens_to_process=self.initial_tokens + self.generated_tokens,
272
+ max_new_tokens=self.max_new_tokens - len(self.generated_tokens),
273
+ eos_token_id=self.eos_token_id,
274
+ streaming=self.streaming,
275
+ )
276
+ new_state._true_initial_tokens = self._true_initial_tokens + len(self.initial_tokens)
277
+ return new_state
@@ -1,4 +1,3 @@
1
- # coding=utf-8
2
1
  # Copyright 2025 The HuggingFace Inc. team
3
2
  #
4
3
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,7 +17,7 @@ from collections import deque
18
17
 
19
18
  from ...utils.metrics import attach_tracer, traced
20
19
  from .cache import PagedAttentionCache
21
- from .requests import RequestState, RequestStatus
20
+ from .requests import RequestState, RequestStatus, logger
22
21
 
23
22
 
24
23
  class Scheduler(ABC):
@@ -37,6 +36,10 @@ class Scheduler(ABC):
37
36
  self._cancellation_lock = threading.Lock()
38
37
  self._requests_to_cancel: set[str] = set()
39
38
  self._requests_to_fork: list[RequestState] = []
39
+ # This state is used to avoid infinite loops when offloading requests
40
+ self.block_new_requests = False
41
+ # This is to compute the cache used by a new request being scheduled
42
+ self.cache_budget_module = None if cache.num_full_attention_groups else cache.config.sliding_window
40
43
 
41
44
  @traced
42
45
  def add_waiting_request(self, state: RequestState):
@@ -52,10 +55,11 @@ class Scheduler(ABC):
52
55
  self.waiting_requests_order.append(state.request_id)
53
56
 
54
57
  @abstractmethod
55
- def schedule_batch(self, token_budget: int) -> list[RequestState]:
56
- """Schedules requests for the next batch based on available token budget. This method selects which requests
57
- should be processed in the current batch, considering the token budget and the scheduler's prioritization rules.
58
- The token_budget is the maximum number of tokens that can be processed in this batch."""
58
+ def schedule_batch(self, token_budget: int, cache_budget: int) -> list[RequestState]:
59
+ """Schedules requests for the next batch based on available token and cache budgets. This method selects which
60
+ requests should be processed in the current batch, considering the budgets and the scheduler's prioritization
61
+ rules. The token_budget is the maximum number of tokens that can be processed in a batch, and the cache_budget
62
+ is the maximum number of KV cache entries that can be read in a batch."""
59
63
 
60
64
  @traced
61
65
  def has_pending_requests(self) -> bool:
@@ -63,14 +67,13 @@ class Scheduler(ABC):
63
67
  return len(self.active_requests) or len(self.waiting_requests)
64
68
 
65
69
  @traced
66
- def finish_request(self, request_id: str, evict_from_cache: bool = True):
70
+ def finish_request(self, request_id: str, evict_from_cache: bool = True) -> None:
67
71
  """Completes processing of a request and optionally frees its allocated cache blocks. This method is called
68
72
  when a request has finished generation or encountered an error.
69
73
  """
70
74
  if evict_from_cache:
71
75
  self.cache.free_blocks(request_id)
72
- if request_id in self.active_requests:
73
- del self.active_requests[request_id]
76
+ self.active_requests.pop(request_id, None)
74
77
 
75
78
  @traced
76
79
  def get_active_request_static_outputs(self, request_id: str) -> list[int]:
@@ -90,10 +93,8 @@ class Scheduler(ABC):
90
93
  """Remove all cancelled requests from active and waiting queues."""
91
94
  with self._cancellation_lock:
92
95
  for request_id in self._requests_to_cancel:
93
- if request_id in self.active_requests:
94
- del self.active_requests[request_id]
95
- if request_id in self.waiting_requests:
96
- del self.waiting_requests[request_id]
96
+ self.active_requests.pop(request_id, None)
97
+ self.waiting_requests.pop(request_id, None)
97
98
  if request_id in self.waiting_requests_order:
98
99
  self.waiting_requests_order.remove(request_id)
99
100
  self.cache.free_blocks(request_id)
@@ -107,7 +108,7 @@ class Scheduler(ABC):
107
108
  )
108
109
 
109
110
  @traced
110
- def _allocate_blocks_if_needed(self, state: RequestState) -> bool:
111
+ def _allocate_blocks_if_needed(self, state: RequestState, len_next_tokens: int) -> bool:
111
112
  """Allocate additional cache blocks for a request if the currently allocated blocks are insufficient to
112
113
  accommodate the next tokens. It calculates how many blocks are needed based on the request's current
113
114
  cache occupancy and the number of tokens to be processed. The allocation itself is done by the CacheAllocator
@@ -116,20 +117,16 @@ class Scheduler(ABC):
116
117
  # 1. we check that the occupancy is less than the requested length
117
118
  # 2. we allocate enough blocks to cover the requested length
118
119
  current_len = state.current_len()
119
- len_next_tokens = len(state.tokens_to_process)
120
120
  occupancy = state.allocated_blocks * self.cache.block_size - current_len
121
121
  if occupancy < len_next_tokens or state.allocated_blocks == 0:
122
122
  blocks_needed = ((len_next_tokens - occupancy + 1) // self.cache.block_size) + 1
123
- allocated = self.cache.allocate_blocks(blocks_needed, state)
123
+ allocated = self.cache.allocate_blocks(blocks_needed, state.request_id, state.allocated_blocks)
124
124
  if allocated is None:
125
125
  return False
126
126
  state.allocated_blocks += allocated
127
127
  return True
128
128
 
129
- @traced(span_name="prepare_request")
130
- def _prepare_request_for_processing(
131
- self, state: RequestState, token_budget: int, request_ids_to_remove_from_waiting: set[str]
132
- ) -> None:
129
+ def _infer_request_tokens(self, state: RequestState, request_ids_to_remove_from_waiting: set[str]) -> list[int]:
133
130
  """Prepares a request for processing in the current batch. If prefix sharing is enabled, and the request was
134
131
  pending, this is where we look for a prefix match and split the request if found."""
135
132
  # If prefix sharing is enabled, we look for a prefix match and split the request if found
@@ -139,6 +136,8 @@ class Scheduler(ABC):
139
136
  self.active_requests[state.request_id] = state
140
137
  request_ids_to_remove_from_waiting.add(state.request_id)
141
138
  state.status = RequestStatus.SPLIT_PENDING_REMAINDER
139
+ # We keep track of the number of allocated blocks to avoid double allocation
140
+ state.allocated_blocks += prefill_length // self.cache.block_size
142
141
  # Even if we match the whole request, we keep at least 1 token to start decoding
143
142
  prefill_length = min(prefill_length, len(state.tokens_to_process) - 1)
144
143
  state.remaining_prefill_tokens = state.tokens_to_process[prefill_length:]
@@ -151,8 +150,19 @@ class Scheduler(ABC):
151
150
  # Otherwise, the tokens to process are the prompt ids, which are the full prompt or the last predicted tokens
152
151
  else:
153
152
  request_tokens = state.tokens_to_process
154
-
155
- # If the request has one or more children we make sure not to prefill it entrirely
153
+ return request_tokens
154
+
155
+ def _schedule_request(
156
+ self,
157
+ state: RequestState,
158
+ request_tokens: list[int],
159
+ token_budget: int,
160
+ request_ids_to_remove_from_waiting: set[str],
161
+ ) -> None:
162
+ """Schedules a request for the current batch, updating the request's status according to the token budget left.
163
+ If the request has children (for parallel decoding), it ensures at least one token remains before the request is
164
+ forked."""
165
+ # If the request has one or more children we make sure not to prefill it entirely
156
166
  if state.num_children > 0 and token_budget >= len(request_tokens) - 1:
157
167
  token_budget = len(request_tokens) - 1
158
168
  self._requests_to_fork.append(state)
@@ -196,7 +206,7 @@ class FIFOScheduler(Scheduler):
196
206
  self.safety_margin = safety_margin
197
207
 
198
208
  @traced
199
- def schedule_batch(self, token_budget: int) -> list[RequestState]:
209
+ def schedule_batch(self, token_budget: int, cache_budget: int) -> list[RequestState] | None:
200
210
  priority_states: list[RequestState] = []
201
211
  second_priority_states: list[RequestState] = []
202
212
  scheduled_requests = []
@@ -208,33 +218,60 @@ class FIFOScheduler(Scheduler):
208
218
  second_priority_states.append(state)
209
219
 
210
220
  # Add waiting requests to second priority
211
- for req_id in self.waiting_requests_order:
212
- second_priority_states.append(self.waiting_requests[req_id])
221
+ if not self.block_new_requests:
222
+ for req_id in self.waiting_requests_order:
223
+ second_priority_states.append(self.waiting_requests[req_id])
213
224
 
214
225
  candidates = priority_states + second_priority_states
215
226
  request_ids_to_remove_from_waiting = set()
216
227
  safety_margins = self.safety_margin * self.cache.num_blocks
217
228
 
229
+ one_allocation_failed = False
230
+
218
231
  for state in candidates:
219
232
  # If we are out the safety margin, we only accept decoding requests or the first prefill request
220
233
  num_free_blocks = self.cache.get_num_free_blocks()
221
234
  outside_safety_margin = num_free_blocks < safety_margins
222
235
  if outside_safety_margin and scheduled_requests and state.status != RequestStatus.DECODING:
236
+ logger.info(
237
+ f"Outside safety margin, breaking out of scheduling loop. {num_free_blocks = } {safety_margins = }"
238
+ )
223
239
  break
224
240
 
225
- self._prepare_request_for_processing(state, token_budget, request_ids_to_remove_from_waiting)
226
- request_len = len(state.tokens_to_process)
227
- # If we can't allocate blocks, do not schedule the request and break if the cache is full
228
- if not self._allocate_blocks_if_needed(state):
229
- if self.cache.get_num_free_blocks() == 0:
241
+ # Check cache budget
242
+ cache_needed = state.current_len()
243
+ cache_needed = (
244
+ cache_needed if self.cache_budget_module is None else cache_needed % self.cache_budget_module
245
+ )
246
+ if cache_budget < cache_needed:
247
+ continue
248
+
249
+ # Infer the tokens that will be present in the batch if token budget is enough
250
+ request_tokens = self._infer_request_tokens(state, request_ids_to_remove_from_waiting)
251
+ # Account for token budget
252
+ request_len = min(len(request_tokens), token_budget)
253
+ # Check there will be enough cache for the new tokens
254
+ allocation_successful = self._allocate_blocks_if_needed(state, request_len)
255
+
256
+ # If the allocation would not be successful, we move on to the next request
257
+ if not allocation_successful:
258
+ one_allocation_failed = True
259
+ # If we have reached a request that was waiting, all subsequent requests are also waiting, and will need
260
+ # allocation as well. So if there is no more free blocks, we can safely break out of the loop.
261
+ if num_free_blocks == 0 and state.request_id in self.waiting_requests:
262
+ logger.info(f"Breaking mid-loop for request {state.request_id} because the cache is full")
230
263
  break
231
264
  continue
232
265
 
233
- # Add the request to the scheduled requests
266
+ # If this point is reached, it means we can safely schedule the request
267
+ self._schedule_request(state, request_tokens, token_budget, request_ids_to_remove_from_waiting)
268
+ request_len = len(state.tokens_to_process) # it may change after scheduling
234
269
  scheduled_requests.append(state)
235
270
 
236
- # Update the token budget
271
+ # Update the token and cache budgets
237
272
  token_budget -= request_len
273
+ cache_budget -= cache_needed
274
+
238
275
  # If using prefix sharing, we make note of the blocks that will be computed in the forward pass
239
276
  if self.cache.allow_block_sharing:
240
277
  tokens_in_current_block = state.current_len() % self.cache.block_size
@@ -248,18 +285,24 @@ class FIFOScheduler(Scheduler):
248
285
  if was_waiting:
249
286
  request_ids_to_remove_from_waiting.add(req_id)
250
287
 
251
- # Early exit of the loop if we have no token budget left
252
- if token_budget == 0:
288
+ # Early exit of the loop if we have no budget left
289
+ if token_budget == 0 or cache_budget == 0:
253
290
  break
254
291
 
292
+ # We remove waiting requests before checking requests were scheduled, because there might have been prefill matches
255
293
  self.waiting_requests_order = deque(
256
294
  [req_id for req_id in self.waiting_requests_order if req_id not in request_ids_to_remove_from_waiting]
257
295
  )
258
296
 
297
+ # If no requests were scheduled and the cache is full, we signal it by returning None
298
+ if not scheduled_requests and one_allocation_failed:
299
+ return None
300
+
259
301
  return scheduled_requests
260
302
 
261
303
 
262
304
  # FIXME: prioritize adding from waiting reqs before scheduling `RequestStatus.DECODING` when cache space allows it
305
+ # TODO: further consolidate the code by making more of it common. The reference Scheduler is FIFO, not this one.
263
306
  @attach_tracer()
264
307
  class PrefillFirstScheduler(Scheduler):
265
308
  """Scheduler that prioritizes split prefill requests over decoding requests. This scheduler ensures that split
@@ -267,7 +310,7 @@ class PrefillFirstScheduler(Scheduler):
267
310
  decoding requests."""
268
311
 
269
312
  @traced
270
- def schedule_batch(self, token_budget: int) -> list[RequestState]:
313
+ def schedule_batch(self, token_budget: int, cache_budget: int) -> list[RequestState] | None:
271
314
  priority_states: list[RequestState] = []
272
315
  second_priority_states: list[RequestState] = []
273
316
  scheduled_requests = []
@@ -280,27 +323,47 @@ class PrefillFirstScheduler(Scheduler):
280
323
  second_priority_states.append(state)
281
324
 
282
325
  # Add waiting requests to second priority
283
- for req_id in self.waiting_requests_order:
284
- second_priority_states.append(self.waiting_requests[req_id])
326
+ if not self.block_new_requests:
327
+ for req_id in self.waiting_requests_order:
328
+ second_priority_states.append(self.waiting_requests[req_id])
285
329
 
286
330
  candidates = priority_states + second_priority_states
287
-
288
331
  request_ids_to_remove_from_waiting = set()
332
+ one_allocation_failed = False
289
333
 
290
334
  for state in candidates:
291
- self._prepare_request_for_processing(state, token_budget, request_ids_to_remove_from_waiting)
292
- request_len = len(state.tokens_to_process)
293
- # If we can't allocate blocks, do not schedule the request and break if the cache is full
294
- if not self._allocate_blocks_if_needed(state):
295
- if self.cache.get_num_free_blocks() == 0:
335
+ # Check cache budget
336
+ cache_needed = state.current_len()
337
+ cache_needed = (
338
+ cache_needed if self.cache_budget_module is None else cache_needed % self.cache_budget_module
339
+ )
340
+ if cache_budget < cache_needed:
341
+ continue
342
+
343
+ # Infer the tokens that will be present in the batch if token budget is enough
344
+ request_tokens = self._infer_request_tokens(state, request_ids_to_remove_from_waiting)
345
+ # Account for token budget
346
+ request_len = min(len(request_tokens), token_budget)
347
+ # Check there will be enough cache for the new tokens
348
+ allocation_successful = self._allocate_blocks_if_needed(state, request_len)
349
+
350
+ # If the allocation would not be successful, we move on to the next request
351
+ if not allocation_successful:
352
+ one_allocation_failed = True
353
+ # If the request was waiting, all requests afterwards will need allocation, so we break if the cache is full
354
+ if state.request_id in self.waiting_requests and self.cache.get_num_free_blocks() == 0:
296
355
  break
297
356
  continue
298
357
 
299
- # Add the request to the scheduled requests
358
+ # If this point is reached, it means we can safely schedule the request
359
+ self._schedule_request(state, request_tokens, token_budget, request_ids_to_remove_from_waiting)
360
+ request_len = len(state.tokens_to_process) # it may change after scheduling
300
361
  scheduled_requests.append(state)
301
362
 
302
- # Update the token budget
363
+ # Update the token and cache budgets
303
364
  token_budget -= request_len
365
+ cache_budget -= cache_needed
366
+
304
367
  # If using prefix sharing, we make note of the blocks that will be computed in the forward pass
305
368
  if self.cache.allow_block_sharing:
306
369
  tokens_in_current_block = state.current_len() % self.cache.block_size
@@ -310,18 +373,23 @@ class PrefillFirstScheduler(Scheduler):
310
373
 
311
374
  # Remove the request from the waiting queue and mark it as removed
312
375
  req_id = state.request_id
313
- if req_id in self.waiting_requests:
314
- del self.waiting_requests[req_id]
376
+ was_waiting = self.waiting_requests.pop(req_id, None) is not None
377
+ if was_waiting:
315
378
  request_ids_to_remove_from_waiting.add(req_id)
316
379
 
317
- # Early exit of the loop if we have no token budget left
318
- if token_budget == 0:
380
+ # Early exit of the loop if we have no budget left
381
+ if token_budget == 0 or cache_budget == 0:
319
382
  break
320
383
 
384
+ # We remove waiting requests before checking requests were scheduled, because there might have been prefill matches
321
385
  self.waiting_requests_order = deque(
322
386
  [req_id for req_id in self.waiting_requests_order if req_id not in request_ids_to_remove_from_waiting]
323
387
  )
324
388
 
389
+ # If no requests were scheduled and the cache is full, we signal it by returning None
390
+ if not scheduled_requests and one_allocation_failed:
391
+ return None
392
+
325
393
  return scheduled_requests
326
394
 
327
395
 
@@ -1,4 +1,3 @@
1
- # coding=utf-8
2
1
  # Copyright 2024 The HuggingFace Inc. team and Google DeepMind.
3
2
  #
4
3
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1543,133 +1542,6 @@ class PrefixConstrainedLogitsProcessor(LogitsProcessor):
1543
1542
  return scores_processed
1544
1543
 
1545
1544
 
1546
- class HammingDiversityLogitsProcessor(LogitsProcessor):
1547
- r"""
1548
- [`LogitsProcessor`] that enforces diverse beam search.
1549
- Note that this logits processor is only effective for [`PreTrainedModel.group_beam_search`]. See [Diverse Beam
1550
- Search: Decoding Diverse Solutions from Neural Sequence Models](https://huggingface.co/papers/1610.02424) for more
1551
- details.
1552
- Traditional beam search often generates very similar sequences across different beams.
1553
- `HammingDiversityLogitsProcessor` addresses this by penalizing beams that generate tokens already chosen by other
1554
- beams in the same time step.
1555
- Args:
1556
- diversity_penalty (`float`):
1557
- This value is subtracted from a beam's score if it generates a token same as any beam from other group at a
1558
- particular time. A higher `diversity_penalty` will enforce greater diversity among the beams. Adjusting
1559
- this value can help strike a balance between diversity and natural likelihood.
1560
- num_beams (`int`):
1561
- Number of beams for beam search. 1 means no beam search.
1562
- num_beam_groups (`int`):
1563
- Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
1564
- [this paper](https://huggingface.co/papers/1610.02424) for more details.
1565
- Examples:
1566
- ```python
1567
- >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
1568
- >>> import torch
1569
- >>> # Initialize the model and tokenizer
1570
- >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
1571
- >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base")
1572
- >>> # A long text about the solar system
1573
- >>> text = (
1574
- ... "The Solar System is a gravitationally bound system comprising the Sun and the objects that orbit it, "
1575
- ... "either directly or indirectly. Of the objects that orbit the Sun directly, the largest are the eight "
1576
- ... "planets, with the remainder being smaller objects, such as the five dwarf planets and small Solar System "
1577
- ... "bodies. The Solar System formed 4.6 billion years ago from the gravitational collapse of a giant "
1578
- ... "interstellar molecular cloud."
1579
- ... )
1580
- >>> inputs = tokenizer("summarize: " + text, return_tensors="pt")
1581
- >>> # Generate diverse summary
1582
- >>> outputs_diverse = model.generate(
1583
- ... **inputs,
1584
- ... num_beam_groups=2,
1585
- ... diversity_penalty=10.0,
1586
- ... max_length=100,
1587
- ... num_beams=4,
1588
- ... num_return_sequences=2,
1589
- ... )
1590
- >>> summaries_diverse = tokenizer.batch_decode(outputs_diverse, skip_special_tokens=True)
1591
- >>> # Generate non-diverse summary
1592
- >>> outputs_non_diverse = model.generate(
1593
- ... **inputs,
1594
- ... max_length=100,
1595
- ... num_beams=4,
1596
- ... num_return_sequences=2,
1597
- ... )
1598
- >>> summary_non_diverse = tokenizer.batch_decode(outputs_non_diverse, skip_special_tokens=True)
1599
- >>> # With `diversity_penalty`, the resulting beams are much more diverse
1600
- >>> print(summary_non_diverse)
1601
- ['the solar system formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets.',
1602
- 'the Solar System formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets.']
1603
- >>> print(summaries_diverse)
1604
- ['the solar system formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets.',
1605
- 'the solar system formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets. the rest of the objects are smaller objects, such as the five dwarf planets and small solar system bodies.']
1606
- ```
1607
- """
1608
-
1609
- def __init__(self, diversity_penalty: float, num_beams: int, num_beam_groups: int):
1610
- logger.warning_once(
1611
- "`HammingDiversityLogitsProcessor` is deprecated and will be removed in v4.62.0, as constrained beam search has been moved to the Hub: https://hf.co/transformers-community/constrained-beam-search."
1612
- )
1613
- if not isinstance(diversity_penalty, float) or (not diversity_penalty > 0.0):
1614
- raise ValueError("`diversity_penalty` should be a float strictly larger than 0.")
1615
- self._diversity_penalty = diversity_penalty
1616
- if not isinstance(num_beams, int) or num_beams < 2:
1617
- raise ValueError("`num_beams` should be an integer strictly larger than 1.")
1618
- self._num_beams = num_beams
1619
- if not isinstance(num_beam_groups, int) or num_beam_groups < 2:
1620
- raise ValueError("`num_beam_groups` should be an integer strictly larger than 1.")
1621
- if num_beam_groups > num_beams:
1622
- raise ValueError("`beam_groups` has to be smaller or equal to `num_beams`.")
1623
- self._num_sub_beams = num_beams // num_beam_groups
1624
-
1625
- def __call__(
1626
- self,
1627
- input_ids: torch.LongTensor,
1628
- scores: torch.FloatTensor,
1629
- current_tokens: torch.LongTensor,
1630
- beam_group_idx: int,
1631
- ) -> torch.FloatTensor:
1632
- r"""
1633
- Args:
1634
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1635
- Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
1636
- scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
1637
- Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
1638
- beam search or log softmax for each vocabulary token when using beam search
1639
- current_tokens (`torch.LongTensor` of shape `(batch_size)`):
1640
- Indices of input sequence tokens in the vocabulary, corresponding to the tokens selected by the other
1641
- beam groups in the current generation step.
1642
- beam_group_idx (`int`):
1643
- The index of the beam group currently being processed.
1644
- Return:
1645
- `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`:
1646
- The processed prediction scores.
1647
- """
1648
- # hamming diversity: penalise using same token in current group which was used in previous groups at
1649
- # the same time step
1650
- batch_size = current_tokens.shape[0] // self._num_beams
1651
- group_start_idx = beam_group_idx * self._num_sub_beams
1652
- group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams)
1653
- group_size = group_end_idx - group_start_idx
1654
- vocab_size = scores.shape[-1]
1655
-
1656
- if group_start_idx == 0:
1657
- return scores
1658
-
1659
- scores_processed = scores.clone()
1660
- for batch_idx in range(batch_size):
1661
- # predicted tokens of last time step of previous groups
1662
- previous_group_tokens = current_tokens[
1663
- batch_idx * self._num_beams : batch_idx * self._num_beams + group_start_idx
1664
- ]
1665
- token_frequency = torch.bincount(previous_group_tokens, minlength=vocab_size).to(scores.device)
1666
- scores_processed[batch_idx * group_size : (batch_idx + 1) * group_size] -= (
1667
- self._diversity_penalty * token_frequency
1668
- )
1669
-
1670
- return scores_processed
1671
-
1672
-
1673
1545
  class ForcedBOSTokenLogitsProcessor(LogitsProcessor):
1674
1546
  r"""
1675
1547
  [`LogitsProcessor`] that enforces the specified token as the first generated token. Used with encoder-decoder
@@ -1,4 +1,3 @@
1
- # coding=utf-8
2
1
  # Copyright 2023 The HuggingFace Inc. team.
3
2
  #
4
3
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,3 @@
1
- # coding=utf-8
2
1
  # Copyright 2020 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
3
2
  # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
4
3
  #
@@ -21,7 +20,7 @@ import warnings
21
20
  from collections.abc import Callable
22
21
  from contextlib import contextmanager
23
22
  from dataclasses import dataclass
24
- from typing import TYPE_CHECKING, Any, Optional, Union
23
+ from typing import TYPE_CHECKING, Any, Optional
25
24
 
26
25
  import torch
27
26
  import torch.distributed as dist
@@ -332,9 +331,9 @@ class GenerateBeamEncoderDecoderOutput(ModelOutput):
332
331
 
333
332
 
334
333
  # Typing shortcuts
335
- GenerateNonBeamOutput = Union[GenerateDecoderOnlyOutput, GenerateEncoderDecoderOutput]
336
- GenerateBeamOutput = Union[GenerateBeamDecoderOnlyOutput, GenerateBeamEncoderDecoderOutput]
337
- GenerateOutput = Union[GenerateNonBeamOutput, GenerateBeamOutput]
334
+ GenerateNonBeamOutput = GenerateDecoderOnlyOutput | GenerateEncoderDecoderOutput
335
+ GenerateBeamOutput = GenerateBeamDecoderOnlyOutput | GenerateBeamEncoderDecoderOutput
336
+ GenerateOutput = GenerateNonBeamOutput | GenerateBeamOutput
338
337
 
339
338
 
340
339
  class GenerationMixin(ContinuousMixin):
@@ -1792,26 +1791,15 @@ class GenerationMixin(ContinuousMixin):
1792
1791
  generation_config = copy.deepcopy(generation_config)
1793
1792
 
1794
1793
  # First set values from the loaded `self.generation_config`, then set default values (BC)
1795
- # Do not update any values that aren't `None`, i.e. if set by users explicitly and passed
1796
- # to `generate()`. Thus the `defaults_only=True` is used
1794
+ #
1795
+ # Only update values that are `None`, i.e. these values were not explicitly set by users to `generate()`,
1796
+ # or values that are not present in the current config, i.e. custom entries that were set via `**kwargs`.
1797
+ # Thus we use the specific kwargs `defaults_only=True` (`None` values only) and `allow_custom_entries=True`
1798
+ # (custom entries are carried over).
1797
1799
  global_defaults = self.generation_config._get_default_generation_params()
1798
- generation_config.update(**self.generation_config.to_dict(), defaults_only=True)
1800
+ generation_config.update(**self.generation_config.to_dict(), defaults_only=True, allow_custom_entries=True)
1799
1801
  generation_config.update(**global_defaults, defaults_only=True)
1800
1802
 
1801
- # Due to some values being boolean and not `None`, we need additional logic to overwrite
1802
- # them explicitly (`defaults_only=False`) on the condition that it's only a previous
1803
- # default value
1804
- default_generation_config = GenerationConfig()
1805
- generation_config.update(
1806
- **{
1807
- k: v
1808
- for k, v in self.generation_config.to_dict().items()
1809
- if isinstance(v, bool)
1810
- and hasattr(default_generation_config, k)
1811
- and getattr(generation_config, k, None) == getattr(default_generation_config, k)
1812
- }
1813
- )
1814
-
1815
1803
  # Finally, if there are any kwargs, update config with it -> highest priority at the end
1816
1804
  model_kwargs = generation_config.update(**kwargs)
1817
1805
 
@@ -1914,6 +1902,7 @@ class GenerationMixin(ContinuousMixin):
1914
1902
  # NOTE: remove xlnet/reformer when the models are deprecated, non-standard model architecture/cache name
1915
1903
  return not cls._is_stateful and all(
1916
1904
  special_model_name not in cls.__name__.lower()
1905
+ or "minimaxm2" in cls.__name__.lower() # name clash between minimax and minimax m2
1917
1906
  for special_model_name in [
1918
1907
  "reformer",
1919
1908
  "minimax",
@@ -2429,7 +2418,7 @@ class GenerationMixin(ContinuousMixin):
2429
2418
  raise NotImplementedError(
2430
2419
  f"assistant_model is not supported for continuous batching. Got {assistant_model = }"
2431
2420
  )
2432
- if streamer is not None: # TODO: actualy this could be supported
2421
+ if streamer is not None: # TODO: actually this could be supported
2433
2422
  raise NotImplementedError(f"streaming is not supported for continuous batching. Got {streamer = }")
2434
2423
  if negative_prompt_ids is not None:
2435
2424
  raise NotImplementedError(
@@ -2485,9 +2474,11 @@ class GenerationMixin(ContinuousMixin):
2485
2474
  generation_config, model_kwargs = self._prepare_generation_config(generation_config, **kwargs)
2486
2475
 
2487
2476
  generation_mode = generation_config.get_generation_mode(assistant_model)
2477
+ deprecated_mode_repo = self._get_deprecated_gen_repo(generation_mode, trust_remote_code, custom_generate)
2478
+
2488
2479
  if isinstance(custom_generate, Callable):
2489
2480
  decoding_method = custom_generate
2490
- else:
2481
+ elif deprecated_mode_repo is None:
2491
2482
  # type() required to access the unbound class-level method
2492
2483
  decoding_method = getattr(type(self), GENERATION_MODES_MAPPING[generation_mode])
2493
2484
 
@@ -2498,7 +2489,7 @@ class GenerationMixin(ContinuousMixin):
2498
2489
  # NOTE: This must come after initializing generation_config, since we need it to determine if this is a deprecated mode.
2499
2490
  # It must also be before any preparation steps, since Hub repos expect to be loaded before preparation steps.
2500
2491
  # TODO joao, manuel: remove this in v4.62.0
2501
- if deprecated_mode_repo := self._get_deprecated_gen_repo(generation_mode, trust_remote_code, custom_generate):
2492
+ if deprecated_mode_repo is not None:
2502
2493
  return GenerationMixin.generate(
2503
2494
  self,
2504
2495
  inputs=inputs,
@@ -3875,7 +3866,6 @@ class GenerationMixin(ContinuousMixin):
3875
3866
  model_kwargs["cache_position"] = torch.arange(
3876
3867
  past_length, current_length, dtype=torch.long, device=input_chunk.device
3877
3868
  )
3878
- model_kwargs["position_ids"] = model_kwargs["cache_position"].unsqueeze(0)
3879
3869
  model_inputs = self.prepare_inputs_for_generation(input_chunk, **model_kwargs)
3880
3870
 
3881
3871
  outputs = model_forward(**model_inputs, return_dict=True)