xinference 1.10.0__py3-none-any.whl → 1.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xinference might be problematic. Click here for more details.

Files changed (328) hide show
  1. xinference/_version.py +3 -3
  2. xinference/api/restful_api.py +473 -31
  3. xinference/client/restful/async_restful_client.py +178 -8
  4. xinference/client/restful/restful_client.py +151 -3
  5. xinference/core/supervisor.py +99 -53
  6. xinference/core/worker.py +10 -0
  7. xinference/deploy/cmdline.py +15 -0
  8. xinference/model/audio/core.py +21 -6
  9. xinference/model/audio/indextts2.py +166 -0
  10. xinference/model/audio/model_spec.json +58 -21
  11. xinference/model/image/model_spec.json +159 -90
  12. xinference/model/image/stable_diffusion/core.py +13 -4
  13. xinference/model/llm/__init__.py +6 -2
  14. xinference/model/llm/llm_family.json +1299 -174
  15. xinference/model/llm/mlx/distributed_models/core.py +41 -0
  16. xinference/model/llm/mlx/distributed_models/qwen2.py +1 -2
  17. xinference/model/llm/sglang/core.py +44 -11
  18. xinference/model/llm/tool_parsers/deepseek_r1_tool_parser.py +94 -32
  19. xinference/model/llm/tool_parsers/qwen_tool_parser.py +29 -4
  20. xinference/model/llm/transformers/chatglm.py +3 -0
  21. xinference/model/llm/transformers/core.py +129 -36
  22. xinference/model/llm/transformers/multimodal/minicpmv45.py +340 -0
  23. xinference/model/llm/transformers/multimodal/qwen2_vl.py +34 -8
  24. xinference/model/llm/transformers/utils.py +23 -0
  25. xinference/model/llm/utils.py +48 -32
  26. xinference/model/llm/vllm/core.py +207 -72
  27. xinference/model/utils.py +74 -31
  28. xinference/thirdparty/audiotools/__init__.py +10 -0
  29. xinference/thirdparty/audiotools/core/__init__.py +4 -0
  30. xinference/thirdparty/audiotools/core/audio_signal.py +1682 -0
  31. xinference/thirdparty/audiotools/core/display.py +194 -0
  32. xinference/thirdparty/audiotools/core/dsp.py +390 -0
  33. xinference/thirdparty/audiotools/core/effects.py +647 -0
  34. xinference/thirdparty/audiotools/core/ffmpeg.py +211 -0
  35. xinference/thirdparty/audiotools/core/loudness.py +320 -0
  36. xinference/thirdparty/audiotools/core/playback.py +252 -0
  37. xinference/thirdparty/audiotools/core/templates/__init__.py +0 -0
  38. xinference/thirdparty/audiotools/core/templates/headers.html +322 -0
  39. xinference/thirdparty/audiotools/core/templates/pandoc.css +407 -0
  40. xinference/thirdparty/audiotools/core/templates/widget.html +52 -0
  41. xinference/thirdparty/audiotools/core/util.py +671 -0
  42. xinference/thirdparty/audiotools/core/whisper.py +97 -0
  43. xinference/thirdparty/audiotools/data/__init__.py +3 -0
  44. xinference/thirdparty/audiotools/data/datasets.py +517 -0
  45. xinference/thirdparty/audiotools/data/preprocess.py +81 -0
  46. xinference/thirdparty/audiotools/data/transforms.py +1592 -0
  47. xinference/thirdparty/audiotools/metrics/__init__.py +6 -0
  48. xinference/thirdparty/audiotools/metrics/distance.py +131 -0
  49. xinference/thirdparty/audiotools/metrics/quality.py +159 -0
  50. xinference/thirdparty/audiotools/metrics/spectral.py +247 -0
  51. xinference/thirdparty/audiotools/ml/__init__.py +5 -0
  52. xinference/thirdparty/audiotools/ml/accelerator.py +184 -0
  53. xinference/thirdparty/audiotools/ml/decorators.py +440 -0
  54. xinference/thirdparty/audiotools/ml/experiment.py +90 -0
  55. xinference/thirdparty/audiotools/ml/layers/__init__.py +2 -0
  56. xinference/thirdparty/audiotools/ml/layers/base.py +328 -0
  57. xinference/thirdparty/audiotools/ml/layers/spectral_gate.py +127 -0
  58. xinference/thirdparty/audiotools/post.py +140 -0
  59. xinference/thirdparty/audiotools/preference.py +600 -0
  60. xinference/thirdparty/fish_speech/fish_speech/text/chn_text_norm/text.py +1 -1
  61. xinference/thirdparty/indextts/BigVGAN/ECAPA_TDNN.py +656 -0
  62. xinference/thirdparty/indextts/BigVGAN/__init__.py +0 -0
  63. xinference/thirdparty/indextts/BigVGAN/activations.py +122 -0
  64. xinference/thirdparty/indextts/BigVGAN/alias_free_activation/__init__.py +0 -0
  65. xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/.gitignore +1 -0
  66. xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/__init__.py +0 -0
  67. xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/activation1d.py +76 -0
  68. xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/anti_alias_activation.cpp +23 -0
  69. xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/anti_alias_activation_cuda.cu +256 -0
  70. xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/compat.h +29 -0
  71. xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/load.py +121 -0
  72. xinference/thirdparty/indextts/BigVGAN/alias_free_activation/cuda/type_shim.h +92 -0
  73. xinference/thirdparty/indextts/BigVGAN/alias_free_activation/torch/__init__.py +6 -0
  74. xinference/thirdparty/indextts/BigVGAN/alias_free_activation/torch/act.py +31 -0
  75. xinference/thirdparty/indextts/BigVGAN/alias_free_activation/torch/filter.py +102 -0
  76. xinference/thirdparty/indextts/BigVGAN/alias_free_activation/torch/resample.py +58 -0
  77. xinference/thirdparty/indextts/BigVGAN/alias_free_torch/__init__.py +6 -0
  78. xinference/thirdparty/indextts/BigVGAN/alias_free_torch/act.py +29 -0
  79. xinference/thirdparty/indextts/BigVGAN/alias_free_torch/filter.py +96 -0
  80. xinference/thirdparty/indextts/BigVGAN/alias_free_torch/resample.py +49 -0
  81. xinference/thirdparty/indextts/BigVGAN/bigvgan.py +534 -0
  82. xinference/thirdparty/indextts/BigVGAN/models.py +451 -0
  83. xinference/thirdparty/indextts/BigVGAN/nnet/CNN.py +546 -0
  84. xinference/thirdparty/indextts/BigVGAN/nnet/__init__.py +0 -0
  85. xinference/thirdparty/indextts/BigVGAN/nnet/linear.py +89 -0
  86. xinference/thirdparty/indextts/BigVGAN/nnet/normalization.py +670 -0
  87. xinference/thirdparty/indextts/BigVGAN/utils.py +101 -0
  88. xinference/thirdparty/indextts/__init__.py +0 -0
  89. xinference/thirdparty/indextts/cli.py +65 -0
  90. xinference/thirdparty/indextts/gpt/__init__.py +0 -0
  91. xinference/thirdparty/indextts/gpt/conformer/__init__.py +0 -0
  92. xinference/thirdparty/indextts/gpt/conformer/attention.py +312 -0
  93. xinference/thirdparty/indextts/gpt/conformer/embedding.py +163 -0
  94. xinference/thirdparty/indextts/gpt/conformer/subsampling.py +348 -0
  95. xinference/thirdparty/indextts/gpt/conformer_encoder.py +520 -0
  96. xinference/thirdparty/indextts/gpt/model.py +713 -0
  97. xinference/thirdparty/indextts/gpt/model_v2.py +747 -0
  98. xinference/thirdparty/indextts/gpt/perceiver.py +317 -0
  99. xinference/thirdparty/indextts/gpt/transformers_beam_search.py +1013 -0
  100. xinference/thirdparty/indextts/gpt/transformers_generation_utils.py +4747 -0
  101. xinference/thirdparty/indextts/gpt/transformers_gpt2.py +1878 -0
  102. xinference/thirdparty/indextts/gpt/transformers_modeling_utils.py +5525 -0
  103. xinference/thirdparty/indextts/infer.py +690 -0
  104. xinference/thirdparty/indextts/infer_v2.py +739 -0
  105. xinference/thirdparty/indextts/s2mel/dac/__init__.py +16 -0
  106. xinference/thirdparty/indextts/s2mel/dac/__main__.py +36 -0
  107. xinference/thirdparty/indextts/s2mel/dac/model/__init__.py +4 -0
  108. xinference/thirdparty/indextts/s2mel/dac/model/base.py +294 -0
  109. xinference/thirdparty/indextts/s2mel/dac/model/dac.py +400 -0
  110. xinference/thirdparty/indextts/s2mel/dac/model/discriminator.py +228 -0
  111. xinference/thirdparty/indextts/s2mel/dac/model/encodec.py +320 -0
  112. xinference/thirdparty/indextts/s2mel/dac/nn/__init__.py +3 -0
  113. xinference/thirdparty/indextts/s2mel/dac/nn/layers.py +33 -0
  114. xinference/thirdparty/indextts/s2mel/dac/nn/loss.py +368 -0
  115. xinference/thirdparty/indextts/s2mel/dac/nn/quantize.py +339 -0
  116. xinference/thirdparty/indextts/s2mel/dac/utils/__init__.py +123 -0
  117. xinference/thirdparty/indextts/s2mel/dac/utils/decode.py +95 -0
  118. xinference/thirdparty/indextts/s2mel/dac/utils/encode.py +94 -0
  119. xinference/thirdparty/indextts/s2mel/hf_utils.py +12 -0
  120. xinference/thirdparty/indextts/s2mel/modules/alias_free_torch/__init__.py +5 -0
  121. xinference/thirdparty/indextts/s2mel/modules/alias_free_torch/act.py +29 -0
  122. xinference/thirdparty/indextts/s2mel/modules/alias_free_torch/filter.py +96 -0
  123. xinference/thirdparty/indextts/s2mel/modules/alias_free_torch/resample.py +57 -0
  124. xinference/thirdparty/indextts/s2mel/modules/audio.py +82 -0
  125. xinference/thirdparty/indextts/s2mel/modules/bigvgan/activations.py +120 -0
  126. xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/__init__.py +0 -0
  127. xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/activation1d.py +77 -0
  128. xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/anti_alias_activation.cpp +23 -0
  129. xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/anti_alias_activation_cuda.cu +246 -0
  130. xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/compat.h +29 -0
  131. xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/load.py +86 -0
  132. xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/cuda/type_shim.h +92 -0
  133. xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/torch/__init__.py +6 -0
  134. xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/torch/act.py +30 -0
  135. xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/torch/filter.py +101 -0
  136. xinference/thirdparty/indextts/s2mel/modules/bigvgan/alias_free_activation/torch/resample.py +58 -0
  137. xinference/thirdparty/indextts/s2mel/modules/bigvgan/bigvgan.py +492 -0
  138. xinference/thirdparty/indextts/s2mel/modules/bigvgan/config.json +63 -0
  139. xinference/thirdparty/indextts/s2mel/modules/bigvgan/env.py +18 -0
  140. xinference/thirdparty/indextts/s2mel/modules/bigvgan/meldataset.py +354 -0
  141. xinference/thirdparty/indextts/s2mel/modules/bigvgan/utils.py +99 -0
  142. xinference/thirdparty/indextts/s2mel/modules/campplus/DTDNN.py +115 -0
  143. xinference/thirdparty/indextts/s2mel/modules/campplus/classifier.py +70 -0
  144. xinference/thirdparty/indextts/s2mel/modules/campplus/layers.py +253 -0
  145. xinference/thirdparty/indextts/s2mel/modules/commons.py +632 -0
  146. xinference/thirdparty/indextts/s2mel/modules/diffusion_transformer.py +257 -0
  147. xinference/thirdparty/indextts/s2mel/modules/encodec.py +292 -0
  148. xinference/thirdparty/indextts/s2mel/modules/flow_matching.py +171 -0
  149. xinference/thirdparty/indextts/s2mel/modules/gpt_fast/generate.py +436 -0
  150. xinference/thirdparty/indextts/s2mel/modules/gpt_fast/model.py +360 -0
  151. xinference/thirdparty/indextts/s2mel/modules/gpt_fast/quantize.py +622 -0
  152. xinference/thirdparty/indextts/s2mel/modules/hifigan/f0_predictor.py +55 -0
  153. xinference/thirdparty/indextts/s2mel/modules/hifigan/generator.py +454 -0
  154. xinference/thirdparty/indextts/s2mel/modules/layers.py +354 -0
  155. xinference/thirdparty/indextts/s2mel/modules/length_regulator.py +141 -0
  156. xinference/thirdparty/indextts/s2mel/modules/openvoice/__init__.py +0 -0
  157. xinference/thirdparty/indextts/s2mel/modules/openvoice/api.py +186 -0
  158. xinference/thirdparty/indextts/s2mel/modules/openvoice/attentions.py +465 -0
  159. xinference/thirdparty/indextts/s2mel/modules/openvoice/checkpoints_v2/converter/config.json +57 -0
  160. xinference/thirdparty/indextts/s2mel/modules/openvoice/commons.py +160 -0
  161. xinference/thirdparty/indextts/s2mel/modules/openvoice/mel_processing.py +183 -0
  162. xinference/thirdparty/indextts/s2mel/modules/openvoice/models.py +499 -0
  163. xinference/thirdparty/indextts/s2mel/modules/openvoice/modules.py +598 -0
  164. xinference/thirdparty/indextts/s2mel/modules/openvoice/openvoice_app.py +275 -0
  165. xinference/thirdparty/indextts/s2mel/modules/openvoice/se_extractor.py +153 -0
  166. xinference/thirdparty/indextts/s2mel/modules/openvoice/transforms.py +209 -0
  167. xinference/thirdparty/indextts/s2mel/modules/openvoice/utils.py +194 -0
  168. xinference/thirdparty/indextts/s2mel/modules/quantize.py +229 -0
  169. xinference/thirdparty/indextts/s2mel/modules/rmvpe.py +631 -0
  170. xinference/thirdparty/indextts/s2mel/modules/vocos/__init__.py +4 -0
  171. xinference/thirdparty/indextts/s2mel/modules/vocos/heads.py +164 -0
  172. xinference/thirdparty/indextts/s2mel/modules/vocos/helpers.py +71 -0
  173. xinference/thirdparty/indextts/s2mel/modules/vocos/loss.py +114 -0
  174. xinference/thirdparty/indextts/s2mel/modules/vocos/models.py +118 -0
  175. xinference/thirdparty/indextts/s2mel/modules/vocos/modules.py +213 -0
  176. xinference/thirdparty/indextts/s2mel/modules/vocos/pretrained.py +51 -0
  177. xinference/thirdparty/indextts/s2mel/modules/vocos/spectral_ops.py +192 -0
  178. xinference/thirdparty/indextts/s2mel/modules/wavenet.py +174 -0
  179. xinference/thirdparty/indextts/s2mel/optimizers.py +96 -0
  180. xinference/thirdparty/indextts/s2mel/wav2vecbert_extract.py +148 -0
  181. xinference/thirdparty/indextts/utils/__init__.py +0 -0
  182. xinference/thirdparty/indextts/utils/arch_util.py +120 -0
  183. xinference/thirdparty/indextts/utils/checkpoint.py +34 -0
  184. xinference/thirdparty/indextts/utils/common.py +121 -0
  185. xinference/thirdparty/indextts/utils/feature_extractors.py +50 -0
  186. xinference/thirdparty/indextts/utils/front.py +536 -0
  187. xinference/thirdparty/indextts/utils/maskgct/models/codec/__init__.py +0 -0
  188. xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/codec.py +427 -0
  189. xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/quantize/__init__.py +11 -0
  190. xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/quantize/factorized_vector_quantize.py +150 -0
  191. xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/quantize/lookup_free_quantize.py +77 -0
  192. xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/quantize/residual_vq.py +177 -0
  193. xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/quantize/vector_quantize.py +401 -0
  194. xinference/thirdparty/indextts/utils/maskgct/models/codec/amphion_codec/vocos.py +881 -0
  195. xinference/thirdparty/indextts/utils/maskgct/models/codec/codec_dataset.py +264 -0
  196. xinference/thirdparty/indextts/utils/maskgct/models/codec/codec_inference.py +515 -0
  197. xinference/thirdparty/indextts/utils/maskgct/models/codec/codec_sampler.py +126 -0
  198. xinference/thirdparty/indextts/utils/maskgct/models/codec/codec_trainer.py +166 -0
  199. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/__init__.py +0 -0
  200. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/alias_free_torch/__init__.py +5 -0
  201. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/alias_free_torch/act.py +29 -0
  202. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/alias_free_torch/filter.py +96 -0
  203. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/alias_free_torch/resample.py +57 -0
  204. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/facodec_dataset.py +98 -0
  205. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/facodec_inference.py +137 -0
  206. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/facodec_trainer.py +776 -0
  207. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/JDC/__init__.py +1 -0
  208. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/JDC/bst.t7 +0 -0
  209. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/JDC/model.py +219 -0
  210. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/attentions.py +437 -0
  211. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/commons.py +331 -0
  212. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/gradient_reversal.py +35 -0
  213. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/layers.py +460 -0
  214. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/quantize.py +741 -0
  215. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/style_encoder.py +110 -0
  216. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/modules/wavenet.py +224 -0
  217. xinference/thirdparty/indextts/utils/maskgct/models/codec/facodec/optimizer.py +104 -0
  218. xinference/thirdparty/indextts/utils/maskgct/models/codec/kmeans/repcodec_model.py +210 -0
  219. xinference/thirdparty/indextts/utils/maskgct/models/codec/kmeans/vocos.py +850 -0
  220. xinference/thirdparty/indextts/utils/maskgct/models/codec/melvqgan/melspec.py +108 -0
  221. xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/README.md +216 -0
  222. xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/__init__.py +6 -0
  223. xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/alias_free_torch/__init__.py +5 -0
  224. xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/alias_free_torch/act.py +29 -0
  225. xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/alias_free_torch/filter.py +96 -0
  226. xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/alias_free_torch/resample.py +57 -0
  227. xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/facodec.py +1222 -0
  228. xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/gradient_reversal.py +35 -0
  229. xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/melspec.py +102 -0
  230. xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/quantize/__init__.py +7 -0
  231. xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/quantize/fvq.py +116 -0
  232. xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/quantize/rvq.py +87 -0
  233. xinference/thirdparty/indextts/utils/maskgct/models/codec/ns3_codec/transformer.py +234 -0
  234. xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/model.py +184 -0
  235. xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/__init__.py +27 -0
  236. xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/conv.py +346 -0
  237. xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/lstm.py +46 -0
  238. xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/norm.py +37 -0
  239. xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/quantization/__init__.py +14 -0
  240. xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/quantization/ac.py +317 -0
  241. xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/quantization/core_vq.py +388 -0
  242. xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/quantization/distrib.py +135 -0
  243. xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/quantization/vq.py +125 -0
  244. xinference/thirdparty/indextts/utils/maskgct/models/codec/speechtokenizer/modules/seanet.py +414 -0
  245. xinference/thirdparty/indextts/utils/maskgct/models/codec/vevo/vevo_repcodec.py +592 -0
  246. xinference/thirdparty/indextts/utils/maskgct/models/tts/maskgct/ckpt/wav2vec2bert_stats.pt +0 -0
  247. xinference/thirdparty/indextts/utils/maskgct/models/tts/maskgct/llama_nar.py +650 -0
  248. xinference/thirdparty/indextts/utils/maskgct/models/tts/maskgct/maskgct_s2a.py +503 -0
  249. xinference/thirdparty/indextts/utils/maskgct_utils.py +259 -0
  250. xinference/thirdparty/indextts/utils/text_utils.py +41 -0
  251. xinference/thirdparty/indextts/utils/typical_sampling.py +30 -0
  252. xinference/thirdparty/indextts/utils/utils.py +93 -0
  253. xinference/thirdparty/indextts/utils/webui_utils.py +42 -0
  254. xinference/thirdparty/indextts/utils/xtransformers.py +1247 -0
  255. xinference/thirdparty/indextts/vqvae/__init__.py +0 -0
  256. xinference/thirdparty/indextts/vqvae/xtts_dvae.py +395 -0
  257. xinference/thirdparty/melo/text/chinese_mix.py +2 -2
  258. xinference/types.py +9 -0
  259. xinference/ui/gradio/media_interface.py +66 -8
  260. xinference/ui/web/ui/build/asset-manifest.json +6 -6
  261. xinference/ui/web/ui/build/index.html +1 -1
  262. xinference/ui/web/ui/build/static/css/main.5ea97072.css +2 -0
  263. xinference/ui/web/ui/build/static/css/main.5ea97072.css.map +1 -0
  264. xinference/ui/web/ui/build/static/js/main.45e78536.js +3 -0
  265. xinference/ui/web/ui/build/static/js/{main.1086c759.js.LICENSE.txt → main.45e78536.js.LICENSE.txt} +0 -7
  266. xinference/ui/web/ui/build/static/js/main.45e78536.js.map +1 -0
  267. xinference/ui/web/ui/node_modules/.cache/babel-loader/089c38df5f52348d212ed868dda5c518a42e0c2762caed4175487c0405830c35.json +1 -0
  268. xinference/ui/web/ui/node_modules/.cache/babel-loader/2b6e3a5b6eb2c5c5f2d007e68cd46c372721cd52bf63508adcdb21ecf79241d8.json +1 -0
  269. xinference/ui/web/ui/node_modules/.cache/babel-loader/2d887825fd07a56f872eda4420da25fba0b5b62a23bdcc6c6da1a5281887f618.json +1 -0
  270. xinference/ui/web/ui/node_modules/.cache/babel-loader/4001f9c3e64e73a4f2158826650c174a59d5e3f89ddecddf17cbb6bb688cc4ca.json +1 -0
  271. xinference/ui/web/ui/node_modules/.cache/babel-loader/4a7018a69e6b7f90fc313248c2aa86f2a8f1eb1db120df586047a8023549b44b.json +1 -0
  272. xinference/ui/web/ui/node_modules/.cache/babel-loader/64b12aaa1c1d1bf53820ada8a63769067c0ccc5aab46b32348eb1917ae7f2a11.json +1 -0
  273. xinference/ui/web/ui/node_modules/.cache/babel-loader/7275b67c78ec76ce38a686bb8a576d8c9cecf54e1573614c84859d538efb9be5.json +1 -0
  274. xinference/ui/web/ui/node_modules/.cache/babel-loader/a68b6ee3b31eadc051fb95ce8f8ccb9c2e8b52c60f290dbab545a1917e065282.json +1 -0
  275. xinference/ui/web/ui/node_modules/.cache/babel-loader/ae8771cc37693feb160fa8727231312a0c54ef2d1d1ca893be568cd70016ca7e.json +1 -0
  276. xinference/ui/web/ui/node_modules/.cache/babel-loader/bb4e8722d2d41d87f1fce3661bc8937bffe9448e231fc5f0462630849e851592.json +1 -0
  277. xinference/ui/web/ui/node_modules/.cache/babel-loader/be6aada1ee4adc2bbf65dbe56d17db32bb3b5478be05d6b527805a8ba6cfb2b9.json +1 -0
  278. xinference/ui/web/ui/node_modules/.cache/babel-loader/de91c352653c233cf0cb6674e6e04049a44fd0e1156560de65d5c4620521391e.json +1 -0
  279. xinference/ui/web/ui/node_modules/.cache/babel-loader/e85f7002fc325c83b9c9cd8a1619e5b3ebc701d30e811afc284b88e6ae710cb5.json +1 -0
  280. xinference/ui/web/ui/node_modules/.cache/babel-loader/e8b603c78944bf3d213639078bfe155ff5c0dfa4048a93cbb967cad6a4eb4ff3.json +1 -0
  281. xinference/ui/web/ui/node_modules/.cache/babel-loader/ea2a26361204e70cf1018d6990fb6354bed82b3ac69690391e0f100385e7abb7.json +1 -0
  282. xinference/ui/web/ui/node_modules/.cache/babel-loader/f05535160a508b2a312de546a6de234776c613db276479ea4253c0b1bdeeb7d6.json +1 -0
  283. xinference/ui/web/ui/node_modules/.cache/babel-loader/f09ba9e11106bd59a0de10cc85c55084097729dcab575f43dfcf07375961ed87.json +1 -0
  284. xinference/ui/web/ui/node_modules/.package-lock.json +0 -33
  285. xinference/ui/web/ui/package-lock.json +0 -34
  286. xinference/ui/web/ui/package.json +0 -1
  287. xinference/ui/web/ui/src/locales/en.json +9 -3
  288. xinference/ui/web/ui/src/locales/ja.json +9 -3
  289. xinference/ui/web/ui/src/locales/ko.json +9 -3
  290. xinference/ui/web/ui/src/locales/zh.json +9 -3
  291. {xinference-1.10.0.dist-info → xinference-1.11.0.dist-info}/METADATA +24 -6
  292. {xinference-1.10.0.dist-info → xinference-1.11.0.dist-info}/RECORD +296 -77
  293. xinference/ui/web/ui/build/static/css/main.013f296b.css +0 -2
  294. xinference/ui/web/ui/build/static/css/main.013f296b.css.map +0 -1
  295. xinference/ui/web/ui/build/static/js/main.1086c759.js +0 -3
  296. xinference/ui/web/ui/build/static/js/main.1086c759.js.map +0 -1
  297. xinference/ui/web/ui/node_modules/.cache/babel-loader/0b0f77000cc1b482ca091cfbcae511dfe02f08916971645fad21d0b1234d04a2.json +0 -1
  298. xinference/ui/web/ui/node_modules/.cache/babel-loader/1c5f8ff423a7c9202bea60b15680f04b1e9964b445b0da3f86c6ff70cf24e797.json +0 -1
  299. xinference/ui/web/ui/node_modules/.cache/babel-loader/44ce7993e344980e3ed4f13e8f69237d4a5dfc60e37ca6b54f51f8ee1357bd67.json +0 -1
  300. xinference/ui/web/ui/node_modules/.cache/babel-loader/4aec1cc414ac3ebb3481d3d915e4db597d9127de813291346eacb8554ab170d4.json +0 -1
  301. xinference/ui/web/ui/node_modules/.cache/babel-loader/644cfec52f3c57a6e222ce60f112237a1efefe9835efd9aad857a685f53d8eed.json +0 -1
  302. xinference/ui/web/ui/node_modules/.cache/babel-loader/663436f72af53fe0d72394f56d003fa4e0bba489e5bb4e483fd34b00f84637f7.json +0 -1
  303. xinference/ui/web/ui/node_modules/.cache/babel-loader/69db82ca9bfe27fe417cc6cf2b1716b09be9c6f0cd198530f12bfc60e801bbcf.json +0 -1
  304. xinference/ui/web/ui/node_modules/.cache/babel-loader/85087e27618d740c236bf159f30e0219db443ab55f0997388eed5fde6f9e90cc.json +0 -1
  305. xinference/ui/web/ui/node_modules/.cache/babel-loader/88b07838348864aa86c672be3bbca1e9f58f6f3a2881b32070ec27f4e7b449d1.json +0 -1
  306. xinference/ui/web/ui/node_modules/.cache/babel-loader/8b8cd408ccfbe115acef27ccfa5b233da8597131a2a5712add13e1e4d5d4504b.json +0 -1
  307. xinference/ui/web/ui/node_modules/.cache/babel-loader/a23824fe746b9c6ca5eee9159b5764d1ff1653c1d856288c0f75c742bbb0023b.json +0 -1
  308. xinference/ui/web/ui/node_modules/.cache/babel-loader/a3eb18af328280b139693c9092dff2a0ef8c9a967e6c8956ceee0996611f1984.json +0 -1
  309. xinference/ui/web/ui/node_modules/.cache/babel-loader/bc1aacc65a102db325ca61bcd2f681e1ae22c36a1f1d98a6ff5e4ad49dc7544f.json +0 -1
  310. xinference/ui/web/ui/node_modules/.cache/babel-loader/c682fd521747c19dae437d83ce3235a306ce6b68e24a117bc57c27ebb8d1f1ca.json +0 -1
  311. xinference/ui/web/ui/node_modules/.cache/babel-loader/d5c224be7081f18cba1678b7874a9782eba895df004874ff8f243f94ba79942a.json +0 -1
  312. xinference/ui/web/ui/node_modules/.cache/babel-loader/f7f18bfb539b036a6a342176dd98a85df5057a884a8da978d679f2a0264883d0.json +0 -1
  313. xinference/ui/web/ui/node_modules/clipboard/.babelrc.json +0 -11
  314. xinference/ui/web/ui/node_modules/clipboard/.eslintrc.json +0 -24
  315. xinference/ui/web/ui/node_modules/clipboard/.prettierrc.json +0 -9
  316. xinference/ui/web/ui/node_modules/clipboard/bower.json +0 -18
  317. xinference/ui/web/ui/node_modules/clipboard/composer.json +0 -25
  318. xinference/ui/web/ui/node_modules/clipboard/package.json +0 -63
  319. xinference/ui/web/ui/node_modules/delegate/package.json +0 -31
  320. xinference/ui/web/ui/node_modules/good-listener/bower.json +0 -11
  321. xinference/ui/web/ui/node_modules/good-listener/package.json +0 -35
  322. xinference/ui/web/ui/node_modules/select/bower.json +0 -13
  323. xinference/ui/web/ui/node_modules/select/package.json +0 -29
  324. xinference/ui/web/ui/node_modules/tiny-emitter/package.json +0 -53
  325. {xinference-1.10.0.dist-info → xinference-1.11.0.dist-info}/WHEEL +0 -0
  326. {xinference-1.10.0.dist-info → xinference-1.11.0.dist-info}/entry_points.txt +0 -0
  327. {xinference-1.10.0.dist-info → xinference-1.11.0.dist-info}/licenses/LICENSE +0 -0
  328. {xinference-1.10.0.dist-info → xinference-1.11.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,4747 @@
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import copy
17
+ import inspect
18
+ import warnings
19
+ from dataclasses import dataclass
20
+ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import torch
24
+ import torch.distributed as dist
25
+ from torch import nn
26
+ from torch.nn import functional as F
27
+
28
+ from transformers.cache_utils import (
29
+ Cache,
30
+ DynamicCache,
31
+ EncoderDecoderCache,
32
+ OffloadedCache,
33
+ QuantizedCacheConfig,
34
+ StaticCache,
35
+ )
36
+ from transformers.configuration_utils import PretrainedConfig
37
+ from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
38
+ from transformers.integrations.fsdp import is_fsdp_managed_module
39
+ from transformers.modeling_outputs import CausalLMOutputWithPast, Seq2SeqLMOutput
40
+ from transformers.pytorch_utils import isin_mps_friendly
41
+ from transformers.tokenization_utils import ExtensionsTrie
42
+ from transformers.utils import (
43
+ ModelOutput,
44
+ is_accelerate_available,
45
+ is_hqq_available,
46
+ is_optimum_quanto_available,
47
+ # is_quanto_available,
48
+ is_torchdynamo_compiling,
49
+ logging,
50
+ )
51
+ from transformers.generation.beam_constraints import DisjunctiveConstraint, PhrasalConstraint
52
+ from transformers.generation.beam_search import BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer
53
+ from transformers.generation.candidate_generator import (
54
+ AssistedCandidateGenerator,
55
+ AssistedCandidateGeneratorDifferentTokenizers,
56
+ CandidateGenerator,
57
+ PromptLookupCandidateGenerator,
58
+ _crop_past_key_values,
59
+ _prepare_attention_mask,
60
+ _prepare_token_type_ids,
61
+ )
62
+ from transformers.generation.configuration_utils import (
63
+ NEED_SETUP_CACHE_CLASSES_MAPPING,
64
+ QUANT_BACKEND_CLASSES_MAPPING,
65
+ GenerationConfig,
66
+ GenerationMode,
67
+ )
68
+ from transformers.generation.logits_process import (
69
+ EncoderNoRepeatNGramLogitsProcessor,
70
+ EncoderRepetitionPenaltyLogitsProcessor,
71
+ EpsilonLogitsWarper,
72
+ EtaLogitsWarper,
73
+ ExponentialDecayLengthPenalty,
74
+ ForcedBOSTokenLogitsProcessor,
75
+ ForcedEOSTokenLogitsProcessor,
76
+ HammingDiversityLogitsProcessor,
77
+ InfNanRemoveLogitsProcessor,
78
+ LogitNormalization,
79
+ LogitsProcessorList,
80
+ MinLengthLogitsProcessor,
81
+ MinNewTokensLengthLogitsProcessor,
82
+ MinPLogitsWarper,
83
+ NoBadWordsLogitsProcessor,
84
+ NoRepeatNGramLogitsProcessor,
85
+ PrefixConstrainedLogitsProcessor,
86
+ RepetitionPenaltyLogitsProcessor,
87
+ SequenceBiasLogitsProcessor,
88
+ SuppressTokensAtBeginLogitsProcessor,
89
+ SuppressTokensLogitsProcessor,
90
+ TemperatureLogitsWarper,
91
+ TopKLogitsWarper,
92
+ TopPLogitsWarper,
93
+ TypicalLogitsWarper,
94
+ UnbatchedClassifierFreeGuidanceLogitsProcessor,
95
+ )
96
+ from transformers.generation.stopping_criteria import (
97
+ ConfidenceCriteria,
98
+ EosTokenCriteria,
99
+ MaxLengthCriteria,
100
+ MaxTimeCriteria,
101
+ StoppingCriteria,
102
+ StoppingCriteriaList,
103
+ StopStringCriteria,
104
+ )
105
+
106
+
107
+ if TYPE_CHECKING:
108
+ from transformers.modeling_utils import PreTrainedModel
109
+ from transformers.tokenization_utils_base import PreTrainedTokenizerBase
110
+ from transformers.generation.streamers import BaseStreamer
111
+
112
+ logger = logging.get_logger(__name__)
113
+
114
+ if is_accelerate_available():
115
+ from accelerate.hooks import AlignDevicesHook, add_hook_to_module
116
+
117
+
118
+ @dataclass
119
+ class GenerateDecoderOnlyOutput(ModelOutput):
120
+ """
121
+ Outputs of decoder-only generation models, when using non-beam methods.
122
+
123
+ Args:
124
+ sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
125
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
126
+ if all batches finished early due to the `eos_token_id`.
127
+ scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True`):
128
+ Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
129
+ at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
130
+ each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
131
+ logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True`):
132
+ Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
133
+ at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
134
+ each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
135
+ attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):
136
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
137
+ `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
138
+ hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True`):
139
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
140
+ `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.
141
+ past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True`):
142
+ Returns the model cache, used to speed up decoding. Different models have a different cache format, check
143
+ the model's documentation. Usually, a [`~cache_utils.Cache`] instance.
144
+ """
145
+
146
+ sequences: torch.LongTensor = None
147
+ scores: Optional[Tuple[torch.FloatTensor]] = None
148
+ logits: Optional[Tuple[torch.FloatTensor]] = None
149
+ attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
150
+ hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
151
+ past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None
152
+
153
+
154
+ @dataclass
155
+ class GenerateEncoderDecoderOutput(ModelOutput):
156
+ """
157
+ Outputs of encoder-decoder generation models, when using non-beam methods.
158
+
159
+ Args:
160
+ sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
161
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
162
+ if all batches finished early due to the `eos_token_id`.
163
+ scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True`):
164
+ Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
165
+ at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
166
+ each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
167
+ logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True`):
168
+ Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
169
+ at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
170
+ each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
171
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
172
+ Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads,
173
+ sequence_length, sequence_length)`.
174
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
175
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
176
+ shape `(batch_size, sequence_length, hidden_size)`.
177
+ decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):
178
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
179
+ `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
180
+ cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):
181
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
182
+ `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
183
+ decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True`):
184
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
185
+ `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.
186
+ past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
187
+ Returns the model cache, used to speed up decoding. Different models have a different cache format, check
188
+ the model's documentation. Usually, a [`~cache_utils.Cache`] instance.
189
+ """
190
+
191
+ sequences: torch.LongTensor = None
192
+ scores: Optional[Tuple[torch.FloatTensor]] = None
193
+ logits: Optional[Tuple[torch.FloatTensor]] = None
194
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
195
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
196
+ decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
197
+ cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
198
+ decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
199
+ past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None
200
+
201
+
202
+ @dataclass
203
+ class GenerateBeamDecoderOnlyOutput(ModelOutput):
204
+ """
205
+ Outputs of decoder-only generation models, when using beam methods.
206
+
207
+ Args:
208
+ sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
209
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
210
+ if all batches finished early due to the `eos_token_id`.
211
+ sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True`):
212
+ Final beam scores of the generated `sequences`.
213
+ scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True`):
214
+ Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting
215
+ of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam.
216
+ Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token),
217
+ with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
218
+ logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True`):
219
+ Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
220
+ at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
221
+ each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
222
+ beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True`):
223
+ Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
224
+ `(batch_size*num_return_sequences, sequence_length)`.
225
+ attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):
226
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
227
+ `torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
228
+ hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True`):
229
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
230
+ `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.
231
+ past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True`):
232
+ Returns the model cache, used to speed up decoding. Different models have a different cache format, check
233
+ the model's documentation. Usually, a [`~cache_utils.Cache`] instance.
234
+ """
235
+
236
+ sequences: torch.LongTensor = None
237
+ sequences_scores: Optional[torch.FloatTensor] = None
238
+ scores: Optional[Tuple[torch.FloatTensor]] = None
239
+ logits: Optional[Tuple[torch.FloatTensor]] = None
240
+ beam_indices: Optional[torch.LongTensor] = None
241
+ attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
242
+ hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
243
+ past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None
244
+
245
+
246
+ @dataclass
247
+ class GenerateBeamEncoderDecoderOutput(ModelOutput):
248
+ """
249
+ Outputs of encoder-decoder generation models, when using beam methods.
250
+
251
+ Args:
252
+ sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
253
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
254
+ if all batches finished early due to the `eos_token_id`.
255
+ sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True`):
256
+ Final beam scores of the generated `sequences`.
257
+ scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True`):
258
+ Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting
259
+ of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam.
260
+ Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token),
261
+ with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
262
+ logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True`):
263
+ Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
264
+ at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
265
+ each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
266
+ beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True`):
267
+ Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
268
+ `(batch_size*num_return_sequences, sequence_length)`.
269
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
270
+ Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads,
271
+ sequence_length, sequence_length)`.
272
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
273
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
274
+ shape `(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`.
275
+ decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):
276
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
277
+ `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length,
278
+ sequence_length)`.
279
+ cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):
280
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
281
+ `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
282
+ decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True`):
283
+ Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
284
+ `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.
285
+ past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True`):
286
+ Returns the model cache, used to speed up decoding. Different models have a different cache format, check
287
+ the model's documentation. Usually, a [`~cache_utils.Cache`] instance.
288
+ """
289
+
290
+ sequences: torch.LongTensor = None
291
+ sequences_scores: Optional[torch.FloatTensor] = None
292
+ scores: Optional[Tuple[torch.FloatTensor]] = None
293
+ logits: Optional[Tuple[torch.FloatTensor]] = None
294
+ beam_indices: Optional[torch.LongTensor] = None
295
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
296
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
297
+ decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
298
+ cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
299
+ decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
300
+ past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None
301
+
302
+
303
+ # TODO (joao): remove the equivalent classes and typing shortcuts below in v5
304
+ # Equivalent classes (kept for retrocompatibility purposes)
305
+ GreedySearchDecoderOnlyOutput = GenerateDecoderOnlyOutput
306
+ ContrastiveSearchDecoderOnlyOutput = GenerateDecoderOnlyOutput
307
+ SampleDecoderOnlyOutput = GenerateDecoderOnlyOutput
308
+
309
+ ContrastiveSearchEncoderDecoderOutput = GenerateEncoderDecoderOutput
310
+ GreedySearchEncoderDecoderOutput = GenerateEncoderDecoderOutput
311
+ SampleEncoderDecoderOutput = GenerateEncoderDecoderOutput
312
+
313
+ BeamSearchDecoderOnlyOutput = GenerateBeamDecoderOnlyOutput
314
+ BeamSampleDecoderOnlyOutput = GenerateBeamDecoderOnlyOutput
315
+
316
+ BeamSearchEncoderDecoderOutput = GenerateBeamEncoderDecoderOutput
317
+ BeamSampleEncoderDecoderOutput = GenerateBeamEncoderDecoderOutput
318
+
319
+ GreedySearchOutput = Union[GreedySearchEncoderDecoderOutput, GreedySearchDecoderOnlyOutput]
320
+ SampleOutput = Union[SampleEncoderDecoderOutput, SampleDecoderOnlyOutput]
321
+ BeamSearchOutput = Union[BeamSearchEncoderDecoderOutput, BeamSearchDecoderOnlyOutput]
322
+ BeamSampleOutput = Union[BeamSampleEncoderDecoderOutput, BeamSampleDecoderOnlyOutput]
323
+ ContrastiveSearchOutput = Union[ContrastiveSearchEncoderDecoderOutput, ContrastiveSearchDecoderOnlyOutput]
324
+
325
+ # Typing shortcuts
326
+ GenerateNonBeamOutput = Union[GenerateDecoderOnlyOutput, GenerateEncoderDecoderOutput]
327
+ GenerateBeamOutput = Union[GenerateBeamDecoderOnlyOutput, GenerateBeamEncoderDecoderOutput]
328
+ GenerateOutput = Union[GenerateNonBeamOutput, GenerateBeamOutput]
329
+
330
+
331
+ class GenerationMixin:
332
+ """
333
+ A class containing all functions for auto-regressive text generation, to be used as a mixin in [`PreTrainedModel`].
334
+
335
+ The class exposes [`~generation.GenerationMixin.generate`], which can be used for:
336
+ - *greedy decoding* if `num_beams=1` and `do_sample=False`
337
+ - *contrastive search* if `penalty_alpha>0` and `top_k>1`
338
+ - *multinomial sampling* if `num_beams=1` and `do_sample=True`
339
+ - *beam-search decoding* if `num_beams>1` and `do_sample=False`
340
+ - *beam-search multinomial sampling* if `num_beams>1` and `do_sample=True`
341
+ - *diverse beam-search decoding* if `num_beams>1` and `num_beam_groups>1`
342
+ - *constrained beam-search decoding* if `constraints!=None` or `force_words_ids!=None`
343
+ - *assisted decoding* if `assistant_model` or `prompt_lookup_num_tokens` is passed to `.generate()`
344
+
345
+ To learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
346
+ """
347
+
348
+ def prepare_inputs_for_generation(
349
+ self,
350
+ input_ids: torch.LongTensor,
351
+ past_key_values: Optional[Cache] = None,
352
+ attention_mask: Optional[torch.LongTensor] = None,
353
+ inputs_embeds: Optional[torch.FloatTensor] = None,
354
+ cache_position: Optional[torch.LongTensor] = None,
355
+ **kwargs,
356
+ ):
357
+ """
358
+ Prepare the model inputs for generation. In includes operations like computing the 4D attention mask or
359
+ slicing inputs given the existing cache.
360
+
361
+ See the forward pass in the model documentation for expected arguments (different models might have different
362
+ requirements for e.g. `past_key_values`). This function should work as is for most LLMs.
363
+ """
364
+
365
+ # 1. Handle BC:
366
+ model_inputs = {}
367
+ # - some models don't have `Cache` support (which implies they don't expect `cache_position` in `forward`)
368
+ if self._supports_cache_class:
369
+ model_inputs["cache_position"] = cache_position
370
+ # - `cache_position` was not a mandatory input in `prepare_inputs_for_generation` for those models, and this
371
+ # function may be called outside of `generate`. Handle most use cases by creating `cache_position` on the fly
372
+ # (this alternative is not as robust as calling `generate` and letting it create `cache_position`)
373
+ elif cache_position is None:
374
+ past_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
375
+ cache_position = torch.arange(past_length, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
376
+
377
+ # 2. Generic cache-dependent input preparation
378
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
379
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
380
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
381
+ # Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case
382
+ if past_key_values is not None:
383
+ model_inputs["past_key_values"] = past_key_values
384
+ if inputs_embeds is not None or cache_position[-1] >= input_ids.shape[1]: # Exception 1 or Exception 3
385
+ input_ids = input_ids[:, -cache_position.shape[0] :]
386
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
387
+ input_ids = input_ids[:, cache_position]
388
+
389
+ # 3. Prepare base model inputs
390
+ input_ids_key = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
391
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
392
+ if not self.config.is_encoder_decoder:
393
+ if inputs_embeds is not None and cache_position[0] == 0:
394
+ model_inputs[input_ids_key] = None
395
+ model_inputs["inputs_embeds"] = inputs_embeds
396
+ else:
397
+ # `clone` calls in this function ensure a consistent stride. See #32227
398
+ model_inputs[input_ids_key] = input_ids.clone(memory_format=torch.contiguous_format)
399
+ model_inputs["inputs_embeds"] = None
400
+ else:
401
+ model_inputs[input_ids_key] = input_ids.clone(memory_format=torch.contiguous_format)
402
+
403
+ # 4. Create missing `position_ids` on the fly
404
+ if (
405
+ attention_mask is not None
406
+ and kwargs.get("position_ids") is None
407
+ and "position_ids" in set(inspect.signature(self.forward).parameters.keys())
408
+ ):
409
+ position_ids = attention_mask.long().cumsum(-1) - 1
410
+ position_ids.masked_fill_(attention_mask == 0, 1)
411
+ kwargs["position_ids"] = position_ids # placed in kwargs for further processing (see below)
412
+
413
+ # 5. Slice model inputs if it's an input that should have the same length as `input_ids`
414
+ for model_input_name in ["position_ids", "token_type_ids"]:
415
+ model_input = kwargs.get(model_input_name)
416
+ if model_input is not None:
417
+ if past_key_values:
418
+ model_input = model_input[:, -input_ids.shape[1] :]
419
+ model_input = model_input.clone(memory_format=torch.contiguous_format)
420
+ model_inputs[model_input_name] = model_input
421
+
422
+ # 6. Create 4D attention mask is we are using a `StaticCache` (important for performant compiled forward pass)
423
+ if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
424
+ if model_inputs["inputs_embeds"] is not None:
425
+ batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
426
+ device = model_inputs["inputs_embeds"].device
427
+ else:
428
+ batch_size, sequence_length = model_inputs[input_ids_key].shape
429
+ device = model_inputs[input_ids_key].device
430
+
431
+ # Create the causal mask with fixed shape in advance, to reduce recompilations. If the function to create
432
+ # the 4D causal mask exists, it should be present in the base model (XXXModel class).
433
+ base_model = getattr(self, self.base_model_prefix, None)
434
+ if base_model is None:
435
+ causal_mask_creation_function = getattr(
436
+ self, "_prepare_4d_causal_attention_mask_with_cache_position", None
437
+ )
438
+ else:
439
+ causal_mask_creation_function = getattr(
440
+ base_model, "_prepare_4d_causal_attention_mask_with_cache_position", None
441
+ )
442
+ if causal_mask_creation_function is None:
443
+ logger.warning_once(
444
+ f"{self.__class__.__name__} has no `_prepare_4d_causal_attention_mask_with_cache_position` method "
445
+ "defined in its base modeling class. Compiled forward passes will be sub-optimal. If you're "
446
+ "writing code, see Llama for an example implementation. If you're a user, please report this "
447
+ "issue on GitHub."
448
+ )
449
+ else:
450
+ attention_mask = causal_mask_creation_function(
451
+ attention_mask,
452
+ sequence_length=sequence_length,
453
+ target_length=past_key_values.get_max_cache_shape(),
454
+ dtype=self.dtype,
455
+ device=device,
456
+ cache_position=cache_position,
457
+ batch_size=batch_size,
458
+ config=self.config,
459
+ past_key_values=past_key_values,
460
+ )
461
+ if attention_mask is not None:
462
+ model_inputs["attention_mask"] = attention_mask
463
+
464
+ # 7. Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
465
+ for key, value in kwargs.items():
466
+ if key not in model_inputs:
467
+ model_inputs[key] = value
468
+
469
+ # 8. Remove unexpected `generate` inputs (TODO @joao: fix trainer and examples)
470
+ model_inputs.pop("labels", None)
471
+ return model_inputs
472
+
473
+ def _prepare_model_inputs(
474
+ self,
475
+ inputs: Optional[torch.Tensor] = None,
476
+ bos_token_id: Optional[torch.Tensor] = None,
477
+ model_kwargs: Optional[Dict[str, torch.Tensor]] = None,
478
+ ) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]:
479
+ """
480
+ This function extracts the model-specific `inputs` for generation.
481
+ """
482
+ # 1. retrieve all kwargs that are non-None or non-model input related.
483
+ # some encoder-decoder models have different names for model and encoder
484
+ if (
485
+ self.config.is_encoder_decoder
486
+ and hasattr(self, "encoder")
487
+ and self.encoder.main_input_name != self.main_input_name
488
+ ):
489
+ input_name = self.encoder.main_input_name
490
+ else:
491
+ input_name = self.main_input_name
492
+
493
+ model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name}
494
+
495
+ # 2. check whether model_input_name is passed as kwarg
496
+ # if yes and `inputs` is None use kwarg inputs
497
+ inputs_kwarg = model_kwargs.pop(input_name, None)
498
+ if inputs_kwarg is not None and inputs is not None:
499
+ raise ValueError(
500
+ f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. "
501
+ f"Make sure to either pass {inputs} or {input_name}=..."
502
+ )
503
+ elif inputs_kwarg is not None:
504
+ inputs = inputs_kwarg
505
+
506
+ # 3. In the presence of `inputs_embeds` for text models:
507
+ # - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model
508
+ # doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with
509
+ # input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`)
510
+ # - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and
511
+ # pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states.
512
+ if input_name == "input_ids" and "inputs_embeds" in model_kwargs:
513
+ if not self.config.is_encoder_decoder:
514
+ has_inputs_embeds_forwarding = "inputs_embeds" in set(
515
+ inspect.signature(self.prepare_inputs_for_generation).parameters.keys()
516
+ )
517
+ if not has_inputs_embeds_forwarding:
518
+ raise ValueError(
519
+ f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} "
520
+ "doesn't have its forwarding implemented. See the GPT2 implementation for an example "
521
+ "(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!"
522
+ )
523
+ # In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of
524
+ # the attention mask) can rely on the actual model input.
525
+ model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation(
526
+ inputs, bos_token_id, model_kwargs=model_kwargs
527
+ )
528
+ else:
529
+ if inputs is not None:
530
+ raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.")
531
+ inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds"
532
+
533
+ # 4. if `inputs` is still None, try to create `input_ids` from BOS token
534
+ inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs)
535
+ return inputs, input_name, model_kwargs
536
+
537
+ def _maybe_initialize_input_ids_for_generation(
538
+ self,
539
+ inputs: Optional[torch.Tensor] = None,
540
+ bos_token_id: Optional[torch.Tensor] = None,
541
+ model_kwargs: Optional[Dict[str, torch.Tensor]] = None,
542
+ ) -> torch.LongTensor:
543
+ """Initializes input ids for generation, if necessary."""
544
+ if inputs is not None:
545
+ return inputs
546
+
547
+ encoder_outputs = model_kwargs.get("encoder_outputs")
548
+ if self.config.is_encoder_decoder and encoder_outputs is not None:
549
+ # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding
550
+ shape = encoder_outputs.last_hidden_state.size()[:-1]
551
+ return torch.ones(shape, dtype=torch.long, device=self.device) * -100
552
+
553
+ # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with
554
+ # soft-prompting or in multimodal implementations built on top of decoder-only language models.
555
+ batch_size = 1
556
+ for value in model_kwargs.values():
557
+ if isinstance(value, torch.Tensor):
558
+ batch_size = value.shape[0]
559
+ break
560
+
561
+ if "inputs_embeds" in model_kwargs:
562
+ return torch.ones((batch_size, 0), dtype=torch.long, device=self.device)
563
+
564
+ if bos_token_id is None:
565
+ raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.")
566
+
567
+ return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id
568
+
569
+ def _prepare_attention_mask_for_generation(
570
+ self,
571
+ inputs: torch.Tensor,
572
+ pad_token_id: Optional[torch.Tensor],
573
+ eos_token_id: Optional[torch.Tensor],
574
+ ) -> torch.LongTensor:
575
+ # No information for attention mask inference -> return default attention mask
576
+ default_attention_mask = torch.ones(inputs.shape[:2], dtype=torch.long, device=inputs.device)
577
+ if pad_token_id is None:
578
+ return default_attention_mask
579
+
580
+ is_input_ids = len(inputs.shape) == 2 and inputs.dtype in [torch.int, torch.long]
581
+ if not is_input_ids:
582
+ return default_attention_mask
583
+
584
+ is_pad_token_in_inputs = (pad_token_id is not None) and (
585
+ isin_mps_friendly(elements=inputs, test_elements=pad_token_id).any()
586
+ )
587
+ is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or ~(
588
+ isin_mps_friendly(elements=eos_token_id, test_elements=pad_token_id).any()
589
+ )
590
+ can_infer_attention_mask = is_pad_token_in_inputs * is_pad_token_not_equal_to_eos_token_id
591
+ attention_mask_from_padding = inputs.ne(pad_token_id).long()
592
+
593
+ attention_mask = (
594
+ attention_mask_from_padding * can_infer_attention_mask + default_attention_mask * ~can_infer_attention_mask
595
+ )
596
+ return attention_mask
597
+
598
+ def _prepare_encoder_decoder_kwargs_for_generation(
599
+ self,
600
+ inputs_tensor: torch.Tensor,
601
+ model_kwargs,
602
+ model_input_name: Optional[str],
603
+ generation_config: GenerationConfig,
604
+ ) -> Dict[str, Any]:
605
+ # 1. get encoder
606
+ encoder = self.get_encoder()
607
+ # Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device
608
+ # as the inputs.
609
+ if hasattr(self, "hf_device_map"):
610
+ if hasattr(encoder, "_hf_hook"):
611
+ encoder._hf_hook.io_same_device = True
612
+ else:
613
+ add_hook_to_module(encoder, AlignDevicesHook(io_same_device=True))
614
+
615
+ # 2. Prepare encoder args and encoder kwargs from model kwargs and generation config.
616
+ irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"]
617
+ encoder_kwargs = {
618
+ argument: value
619
+ for argument, value in model_kwargs.items()
620
+ if not any(argument.startswith(p) for p in irrelevant_prefix)
621
+ }
622
+ encoder_signature = set(inspect.signature(encoder.forward).parameters)
623
+ encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature
624
+ if not encoder_accepts_wildcard:
625
+ encoder_kwargs = {
626
+ argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature
627
+ }
628
+ encoder_kwargs["output_attentions"] = generation_config.output_attentions
629
+ encoder_kwargs["output_hidden_states"] = generation_config.output_hidden_states
630
+
631
+ # 3. make sure that encoder returns `ModelOutput`
632
+ model_input_name = model_input_name if model_input_name is not None else self.main_input_name
633
+ encoder_kwargs["return_dict"] = True
634
+ encoder_kwargs[model_input_name] = inputs_tensor
635
+ model_kwargs["encoder_outputs"]: ModelOutput = encoder(**encoder_kwargs) # type: ignore
636
+
637
+ return model_kwargs
638
+
639
+ def _prepare_decoder_input_ids_for_generation(
640
+ self,
641
+ batch_size: int,
642
+ model_input_name: str,
643
+ model_kwargs: Dict[str, torch.Tensor],
644
+ decoder_start_token_id: torch.Tensor,
645
+ device: torch.device = None,
646
+ ) -> Tuple[torch.LongTensor, Dict[str, torch.Tensor]]:
647
+ """Prepares `decoder_input_ids` for generation with encoder-decoder models"""
648
+ # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming,
649
+ # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input.
650
+ if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
651
+ decoder_input_ids = model_kwargs.pop("decoder_input_ids")
652
+ elif "input_ids" in model_kwargs and model_input_name != "input_ids":
653
+ decoder_input_ids = model_kwargs.pop("input_ids")
654
+ else:
655
+ decoder_input_ids = None
656
+
657
+ # 2. `decoder_start_token_id` must have shape (batch_size, 1)
658
+ if device is None:
659
+ device = self.device
660
+ if decoder_start_token_id.ndim == 1:
661
+ if decoder_start_token_id.shape[0] != batch_size:
662
+ raise ValueError(
663
+ f"`decoder_start_token_id` expected to have length {batch_size} but got {decoder_start_token_id.shape[0]}"
664
+ )
665
+ decoder_start_token_id = decoder_start_token_id.view(-1, 1)
666
+ else:
667
+ decoder_start_token_id = (
668
+ torch.ones((batch_size, 1), dtype=torch.long, device=device) * decoder_start_token_id
669
+ )
670
+
671
+ # 3. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that.
672
+ # no user input -> use decoder_start_token_id as decoder_input_ids
673
+ if decoder_input_ids is None:
674
+ decoder_input_ids = decoder_start_token_id
675
+ # exception: Donut checkpoints have task-specific decoder starts and don't expect a BOS token. Note that the
676
+ # original checkpoints can't be detected through `self.__class__.__name__.lower()`, needing custom logic.
677
+ # See: https://github.com/huggingface/transformers/pull/31470
678
+ elif "donut" in self.__class__.__name__.lower() or (
679
+ self.config.model_type == "vision-encoder-decoder" and "donut" in self.config.encoder.model_type.lower()
680
+ ):
681
+ pass
682
+ elif self.config.model_type in ["whisper"]:
683
+ pass
684
+ # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust
685
+ # decoder_attention_mask if provided)
686
+ elif (decoder_input_ids[:, 0] != decoder_start_token_id[:, 0]).all().item():
687
+ decoder_input_ids = torch.cat([decoder_start_token_id, decoder_input_ids], dim=-1)
688
+ if "decoder_attention_mask" in model_kwargs:
689
+ decoder_attention_mask = model_kwargs["decoder_attention_mask"]
690
+ decoder_attention_mask = torch.cat(
691
+ (torch.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask),
692
+ dim=-1,
693
+ )
694
+ model_kwargs["decoder_attention_mask"] = decoder_attention_mask
695
+
696
+ return decoder_input_ids, model_kwargs
697
+
698
+ @staticmethod
699
+ def _expand_inputs_for_generation(
700
+ expand_size: int = 1,
701
+ is_encoder_decoder: bool = False,
702
+ input_ids: Optional[torch.LongTensor] = None,
703
+ **model_kwargs,
704
+ ) -> Tuple[torch.LongTensor, Dict[str, Any]]:
705
+ """Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...]"""
706
+ # Do not call torch.repeat_interleave if expand_size is 1 because it clones
707
+ # the input tensor and thus requires more memory although no change is applied
708
+ if expand_size == 1:
709
+ return input_ids, model_kwargs
710
+
711
+ def _expand_dict_for_generation(dict_to_expand):
712
+ for key in dict_to_expand:
713
+ if (
714
+ key != "cache_position"
715
+ and dict_to_expand[key] is not None
716
+ and isinstance(dict_to_expand[key], torch.Tensor)
717
+ ):
718
+ dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0)
719
+ return dict_to_expand
720
+
721
+ if input_ids is not None:
722
+ input_ids = input_ids.repeat_interleave(expand_size, dim=0)
723
+
724
+ model_kwargs = _expand_dict_for_generation(model_kwargs)
725
+
726
+ if is_encoder_decoder:
727
+ if model_kwargs.get("encoder_outputs") is None:
728
+ raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
729
+ model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
730
+
731
+ return input_ids, model_kwargs
732
+
733
+ def _extract_past_from_model_output(self, outputs: ModelOutput):
734
+ past_key_values = None
735
+ cache_name = "past_key_values"
736
+ if "past_key_values" in outputs:
737
+ past_key_values = outputs.past_key_values
738
+ elif "mems" in outputs:
739
+ past_key_values = outputs.mems
740
+ elif "past_buckets_states" in outputs:
741
+ past_key_values = outputs.past_buckets_states
742
+ elif "cache_params" in outputs:
743
+ past_key_values = outputs.cache_params
744
+ cache_name = "cache_params"
745
+
746
+ return cache_name, past_key_values
747
+
748
+ def _update_model_kwargs_for_generation(
749
+ self,
750
+ outputs: ModelOutput,
751
+ model_kwargs: Dict[str, Any],
752
+ is_encoder_decoder: bool = False,
753
+ num_new_tokens: int = 1,
754
+ ) -> Dict[str, Any]:
755
+ # update past_key_values keeping its naming used in model code
756
+ cache_name, cache = self._extract_past_from_model_output(outputs)
757
+ model_kwargs[cache_name] = cache
758
+ if getattr(outputs, "state", None) is not None:
759
+ model_kwargs["state"] = outputs.state
760
+
761
+ # update token_type_ids with last value
762
+ if "token_type_ids" in model_kwargs:
763
+ token_type_ids = model_kwargs["token_type_ids"]
764
+ model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1)
765
+
766
+ if not is_encoder_decoder:
767
+ # update attention mask
768
+ if "attention_mask" in model_kwargs:
769
+ attention_mask = model_kwargs["attention_mask"]
770
+ model_kwargs["attention_mask"] = torch.cat(
771
+ [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
772
+ )
773
+ else:
774
+ # update decoder attention mask
775
+ if "decoder_attention_mask" in model_kwargs:
776
+ decoder_attention_mask = model_kwargs["decoder_attention_mask"]
777
+ model_kwargs["decoder_attention_mask"] = torch.cat(
778
+ [decoder_attention_mask, decoder_attention_mask.new_ones((decoder_attention_mask.shape[0], 1))],
779
+ dim=-1,
780
+ )
781
+
782
+ if model_kwargs.get("use_cache", True):
783
+ model_kwargs["cache_position"] = model_kwargs["cache_position"][-1:] + num_new_tokens
784
+ else:
785
+ past_positions = model_kwargs.pop("cache_position")
786
+ new_positions = torch.arange(
787
+ past_positions[-1] + 1, past_positions[-1] + num_new_tokens + 1, dtype=past_positions.dtype
788
+ ).to(past_positions.device)
789
+ model_kwargs["cache_position"] = torch.cat((past_positions, new_positions))
790
+ return model_kwargs
791
+
792
+ def _reorder_cache(self, past_key_values, beam_idx):
793
+ raise NotImplementedError(
794
+ f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to"
795
+ f" enable beam search for {self.__class__}"
796
+ )
797
+
798
+ def _get_candidate_generator(
799
+ self,
800
+ generation_config: GenerationConfig,
801
+ input_ids: torch.LongTensor,
802
+ inputs_tensor: torch.Tensor,
803
+ assistant_model: "PreTrainedModel",
804
+ logits_processor: LogitsProcessorList,
805
+ target_tokenizer: "PreTrainedTokenizerBase",
806
+ assistant_tokenizer: "PreTrainedTokenizerBase",
807
+ model_kwargs: Dict,
808
+ ) -> CandidateGenerator:
809
+ """
810
+ Returns the candidate generator to be used in `assisted_generation`
811
+ """
812
+ different_tokenizers = all(v is not None for v in (assistant_model, target_tokenizer, assistant_tokenizer))
813
+
814
+ if generation_config.prompt_lookup_num_tokens is not None:
815
+ candidate_generator = PromptLookupCandidateGenerator(
816
+ eos_token_id=generation_config._eos_token_tensor,
817
+ num_output_tokens=generation_config.prompt_lookup_num_tokens,
818
+ max_matching_ngram_size=generation_config.max_matching_ngram_size,
819
+ max_length=generation_config.max_length,
820
+ )
821
+ elif different_tokenizers:
822
+ candidate_generator = AssistedCandidateGeneratorDifferentTokenizers(
823
+ input_ids=input_ids,
824
+ assistant_model=assistant_model,
825
+ generation_config=generation_config,
826
+ model_kwargs=model_kwargs,
827
+ inputs_tensor=inputs_tensor,
828
+ logits_processor=logits_processor,
829
+ target_tokenizer=target_tokenizer,
830
+ assistant_tokenizer=assistant_tokenizer,
831
+ )
832
+ else:
833
+ candidate_generator = AssistedCandidateGenerator(
834
+ input_ids=input_ids,
835
+ assistant_model=assistant_model,
836
+ generation_config=generation_config,
837
+ model_kwargs=model_kwargs,
838
+ inputs_tensor=inputs_tensor,
839
+ logits_processor=logits_processor,
840
+ )
841
+ return candidate_generator
842
+
843
+ def _get_logits_processor(
844
+ self,
845
+ generation_config: GenerationConfig,
846
+ input_ids_seq_length: int,
847
+ encoder_input_ids: torch.LongTensor,
848
+ prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]],
849
+ logits_processor: Optional[LogitsProcessorList],
850
+ device: str = None,
851
+ model_kwargs: Optional[Dict[str, Any]] = None,
852
+ negative_prompt_ids: Optional[torch.Tensor] = None,
853
+ negative_prompt_attention_mask: Optional[torch.Tensor] = None,
854
+ ) -> LogitsProcessorList:
855
+ """
856
+ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`]
857
+ instances used to modify the scores of the language model head.
858
+ """
859
+ # instantiate processors list
860
+ processors = LogitsProcessorList()
861
+
862
+ if generation_config.guidance_scale is not None and generation_config.guidance_scale != 1:
863
+ processors.append(
864
+ UnbatchedClassifierFreeGuidanceLogitsProcessor(
865
+ generation_config.guidance_scale,
866
+ self,
867
+ unconditional_ids=negative_prompt_ids,
868
+ unconditional_attention_mask=negative_prompt_attention_mask,
869
+ use_cache=generation_config.use_cache,
870
+ )
871
+ )
872
+ if generation_config.sequence_bias is not None:
873
+ processors.append(SequenceBiasLogitsProcessor(sequence_bias=generation_config.sequence_bias))
874
+
875
+ if generation_config.diversity_penalty is not None and generation_config.diversity_penalty > 0.0:
876
+ processors.append(
877
+ HammingDiversityLogitsProcessor(
878
+ diversity_penalty=generation_config.diversity_penalty,
879
+ num_beams=generation_config.num_beams,
880
+ num_beam_groups=generation_config.num_beam_groups,
881
+ )
882
+ )
883
+ if (
884
+ generation_config.encoder_repetition_penalty is not None
885
+ and generation_config.encoder_repetition_penalty != 1.0
886
+ ):
887
+ if len(encoder_input_ids.shape) == 2:
888
+ processors.append(
889
+ EncoderRepetitionPenaltyLogitsProcessor(
890
+ penalty=generation_config.encoder_repetition_penalty,
891
+ encoder_input_ids=encoder_input_ids,
892
+ )
893
+ )
894
+ else:
895
+ warnings.warn(
896
+ "Passing `encoder_repetition_penalty` requires some form of `input_ids` to be passed to "
897
+ "`generate`, ignoring the argument.",
898
+ UserWarning,
899
+ )
900
+ if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0:
901
+ processors.append(RepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty))
902
+ if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0:
903
+ processors.append(NoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size))
904
+ if (
905
+ generation_config.encoder_no_repeat_ngram_size is not None
906
+ and generation_config.encoder_no_repeat_ngram_size > 0
907
+ ):
908
+ if len(encoder_input_ids.shape) == 2:
909
+ processors.append(
910
+ EncoderNoRepeatNGramLogitsProcessor(
911
+ generation_config.encoder_no_repeat_ngram_size,
912
+ encoder_input_ids,
913
+ )
914
+ )
915
+ else:
916
+ warnings.warn(
917
+ "Passing `encoder_no_repeat_ngram_size` requires some form of `input_ids` to be passed to "
918
+ "`generate`, ignoring the argument.",
919
+ UserWarning,
920
+ )
921
+ if generation_config.bad_words_ids is not None:
922
+ processors.append(
923
+ NoBadWordsLogitsProcessor(
924
+ generation_config.bad_words_ids,
925
+ generation_config._eos_token_tensor,
926
+ )
927
+ )
928
+ if (
929
+ generation_config.min_length is not None
930
+ and generation_config._eos_token_tensor is not None
931
+ and generation_config.min_length > 0
932
+ ):
933
+ processors.append(
934
+ MinLengthLogitsProcessor(
935
+ generation_config.min_length,
936
+ generation_config._eos_token_tensor,
937
+ device=device,
938
+ )
939
+ )
940
+ if (
941
+ generation_config.min_new_tokens is not None
942
+ and generation_config._eos_token_tensor is not None
943
+ and generation_config.min_new_tokens > 0
944
+ ):
945
+ processors.append(
946
+ MinNewTokensLengthLogitsProcessor(
947
+ input_ids_seq_length,
948
+ generation_config.min_new_tokens,
949
+ generation_config._eos_token_tensor,
950
+ device=device,
951
+ )
952
+ )
953
+ if prefix_allowed_tokens_fn is not None:
954
+ processors.append(
955
+ PrefixConstrainedLogitsProcessor(
956
+ prefix_allowed_tokens_fn,
957
+ generation_config.num_beams // generation_config.num_beam_groups,
958
+ )
959
+ )
960
+ if generation_config.forced_bos_token_id is not None:
961
+ processors.append(
962
+ ForcedBOSTokenLogitsProcessor(
963
+ generation_config.forced_bos_token_id,
964
+ )
965
+ )
966
+ if generation_config.forced_eos_token_id is not None:
967
+ processors.append(
968
+ ForcedEOSTokenLogitsProcessor(
969
+ generation_config.max_length,
970
+ generation_config.forced_eos_token_id,
971
+ device=device,
972
+ )
973
+ )
974
+ if generation_config.remove_invalid_values is True:
975
+ processors.append(InfNanRemoveLogitsProcessor())
976
+ if generation_config.exponential_decay_length_penalty is not None:
977
+ processors.append(
978
+ ExponentialDecayLengthPenalty(
979
+ generation_config.exponential_decay_length_penalty,
980
+ generation_config._eos_token_tensor,
981
+ input_ids_seq_length,
982
+ )
983
+ )
984
+ if generation_config.suppress_tokens is not None:
985
+ processors.append(
986
+ SuppressTokensLogitsProcessor(
987
+ generation_config.suppress_tokens,
988
+ device=device,
989
+ )
990
+ )
991
+ if generation_config.begin_suppress_tokens is not None:
992
+ begin_index = input_ids_seq_length
993
+ begin_index = (
994
+ begin_index
995
+ if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None)
996
+ else begin_index + 1
997
+ )
998
+ processors.append(
999
+ SuppressTokensAtBeginLogitsProcessor(
1000
+ generation_config.begin_suppress_tokens,
1001
+ begin_index,
1002
+ device=device,
1003
+ )
1004
+ )
1005
+ if generation_config.forced_decoder_ids is not None:
1006
+ # TODO (sanchit): move this exception to GenerationConfig.validate() when TF & FLAX are aligned with PT
1007
+ raise ValueError(
1008
+ "You have explicitly specified `forced_decoder_ids`. Please remove the `forced_decoder_ids` argument "
1009
+ "in favour of `input_ids` or `decoder_input_ids` respectively.",
1010
+ )
1011
+ if generation_config.watermarking_config is not None:
1012
+ processors.append(
1013
+ generation_config.watermarking_config.construct_processor(self.config.vocab_size, device)
1014
+ )
1015
+
1016
+ # TODO (joao): find a strategy to specify the order of the processors
1017
+ processors = self._merge_criteria_processor_list(processors, logits_processor)
1018
+
1019
+ # Processors previously known as `LogitsWarpers`, only applied with sampling strategies
1020
+ if generation_config.do_sample:
1021
+ # In beam methods, we need to keep at least one non-eos token to explore continuations that might have a
1022
+ # better score (i.e. keep len(list(generation_config._eos_token_tensor)) + 1)
1023
+ if generation_config.num_beams > 1:
1024
+ if isinstance(generation_config._eos_token_tensor, list):
1025
+ min_tokens_to_keep = len(generation_config._eos_token_tensor) + 1
1026
+ elif isinstance(generation_config._eos_token_tensor, torch.Tensor):
1027
+ min_tokens_to_keep = generation_config._eos_token_tensor.shape[0] + 1
1028
+ else:
1029
+ min_tokens_to_keep = 2
1030
+ else:
1031
+ min_tokens_to_keep = 1
1032
+
1033
+ # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files
1034
+ # all samplers can be found in `generation_utils_samplers.py`
1035
+ if generation_config.temperature is not None and generation_config.temperature != 1.0:
1036
+ processors.append(TemperatureLogitsWarper(generation_config.temperature))
1037
+ if generation_config.top_k is not None and generation_config.top_k != 0:
1038
+ processors.append(
1039
+ TopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=min_tokens_to_keep)
1040
+ )
1041
+ if generation_config.top_p is not None and generation_config.top_p < 1.0:
1042
+ processors.append(
1043
+ TopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=min_tokens_to_keep)
1044
+ )
1045
+ if generation_config.min_p is not None:
1046
+ # Applied after temperature scaling (see https://github.com/ggerganov/llama.cpp/pull/3841#issuecomment-2073826084)
1047
+ processors.append(
1048
+ MinPLogitsWarper(min_p=generation_config.min_p, min_tokens_to_keep=min_tokens_to_keep)
1049
+ )
1050
+ if generation_config.typical_p is not None and generation_config.typical_p < 1.0:
1051
+ processors.append(
1052
+ TypicalLogitsWarper(mass=generation_config.typical_p, min_tokens_to_keep=min_tokens_to_keep)
1053
+ )
1054
+ if generation_config.epsilon_cutoff is not None and 0.0 < generation_config.epsilon_cutoff < 1.0:
1055
+ processors.append(
1056
+ EpsilonLogitsWarper(
1057
+ epsilon=generation_config.epsilon_cutoff, min_tokens_to_keep=min_tokens_to_keep
1058
+ )
1059
+ )
1060
+ if generation_config.eta_cutoff is not None and 0.0 < generation_config.eta_cutoff < 1.0:
1061
+ processors.append(
1062
+ EtaLogitsWarper(
1063
+ epsilon=generation_config.eta_cutoff, min_tokens_to_keep=min_tokens_to_keep, device=device
1064
+ )
1065
+ )
1066
+
1067
+ # `LogitNormalization` should always be the last logit processor, when present
1068
+ if generation_config.renormalize_logits is True:
1069
+ processors.append(LogitNormalization())
1070
+ return processors
1071
+
1072
+ def _get_stopping_criteria(
1073
+ self,
1074
+ generation_config: GenerationConfig,
1075
+ stopping_criteria: Optional[StoppingCriteriaList],
1076
+ tokenizer: Optional["PreTrainedTokenizerBase"] = None,
1077
+ **kwargs,
1078
+ ) -> StoppingCriteriaList:
1079
+ criteria = StoppingCriteriaList()
1080
+ if generation_config.max_length is not None:
1081
+ max_position_embeddings = getattr(self.config, "max_position_embeddings", None)
1082
+ criteria.append(
1083
+ MaxLengthCriteria(
1084
+ max_length=generation_config.max_length,
1085
+ max_position_embeddings=max_position_embeddings,
1086
+ )
1087
+ )
1088
+ if generation_config.max_time is not None:
1089
+ criteria.append(MaxTimeCriteria(max_time=generation_config.max_time))
1090
+ if generation_config.stop_strings is not None:
1091
+ if tokenizer is None:
1092
+ raise ValueError(
1093
+ "There are one or more stop strings, either in the arguments to `generate` or in the "
1094
+ "model's generation config, but we could not locate a tokenizer. When generating with "
1095
+ "stop strings, you must pass the model's tokenizer to the `tokenizer` argument of `generate`."
1096
+ )
1097
+ criteria.append(StopStringCriteria(stop_strings=generation_config.stop_strings, tokenizer=tokenizer))
1098
+ if generation_config._eos_token_tensor is not None:
1099
+ criteria.append(EosTokenCriteria(eos_token_id=generation_config._eos_token_tensor))
1100
+ if (
1101
+ generation_config.is_assistant
1102
+ and generation_config.assistant_confidence_threshold is not None
1103
+ and generation_config.assistant_confidence_threshold > 0
1104
+ ):
1105
+ criteria.append(
1106
+ ConfidenceCriteria(assistant_confidence_threshold=generation_config.assistant_confidence_threshold)
1107
+ )
1108
+ criteria = self._merge_criteria_processor_list(criteria, stopping_criteria)
1109
+ return criteria
1110
+
1111
+ def _merge_criteria_processor_list(
1112
+ self,
1113
+ default_list: Union[LogitsProcessorList, StoppingCriteriaList],
1114
+ custom_list: Union[LogitsProcessorList, StoppingCriteriaList],
1115
+ ) -> Union[LogitsProcessorList, StoppingCriteriaList]:
1116
+ if len(custom_list) == 0:
1117
+ return default_list
1118
+ for default in default_list:
1119
+ for custom in custom_list:
1120
+ if type(custom) is type(default):
1121
+ object_type = "stopping criteria" if isinstance(custom, StoppingCriteria) else "logits processor"
1122
+ raise ValueError(
1123
+ f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to"
1124
+ f" `.generate()`, but it has already been created with the values {default}. {default} has been"
1125
+ " created by passing the corresponding arguments to generate or by the model's config default"
1126
+ f" values. If you just want to change the default values of {object_type} consider passing"
1127
+ f" them as arguments to `.generate()` instead of using a custom {object_type}."
1128
+ )
1129
+ default_list.extend(custom_list)
1130
+ return default_list
1131
+
1132
+ def compute_transition_scores(
1133
+ self,
1134
+ sequences: torch.Tensor,
1135
+ scores: Tuple[torch.Tensor],
1136
+ beam_indices: Optional[torch.Tensor] = None,
1137
+ normalize_logits: bool = False,
1138
+ ) -> torch.Tensor:
1139
+ """
1140
+ Computes the transition scores of sequences given the generation scores (and beam indices, if beam search was
1141
+ used). This is a convenient method to quicky obtain the scores of the selected tokens at generation time.
1142
+
1143
+ Parameters:
1144
+ sequences (`torch.LongTensor`):
1145
+ The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or
1146
+ shorter if all batches finished early due to the `eos_token_id`.
1147
+ scores (`tuple(torch.FloatTensor)`):
1148
+ Transition scores for each vocabulary token at each generation step. Beam transition scores consisting
1149
+ of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam.
1150
+ Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token),
1151
+ with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
1152
+ beam_indices (`torch.LongTensor`, *optional*):
1153
+ Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
1154
+ `(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at
1155
+ generate-time.
1156
+ normalize_logits (`bool`, *optional*, defaults to `False`):
1157
+ Whether to normalize the logits (which, for legacy reasons, may be unnormalized).
1158
+
1159
+ Return:
1160
+ `torch.Tensor`: A `torch.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)` containing
1161
+ the transition scores (logits)
1162
+
1163
+ Examples:
1164
+
1165
+ ```python
1166
+ >>> from transformers import GPT2Tokenizer, AutoModelForCausalLM
1167
+ >>> import numpy as np
1168
+
1169
+ >>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
1170
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
1171
+ >>> tokenizer.pad_token_id = tokenizer.eos_token_id
1172
+ >>> inputs = tokenizer(["Today is"], return_tensors="pt")
1173
+
1174
+ >>> # Example 1: Print the scores for each token generated with Greedy Search
1175
+ >>> outputs = model.generate(**inputs, max_new_tokens=5, return_dict_in_generate=True, output_scores=True)
1176
+ >>> transition_scores = model.compute_transition_scores(
1177
+ ... outputs.sequences, outputs.scores, normalize_logits=True
1178
+ ... )
1179
+ >>> # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for
1180
+ >>> # encoder-decoder models, like BART or T5.
1181
+ >>> input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1]
1182
+ >>> generated_tokens = outputs.sequences[:, input_length:]
1183
+ >>> for tok, score in zip(generated_tokens[0], transition_scores[0]):
1184
+ ... # | token | token string | log probability | probability
1185
+ ... print(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}")
1186
+ | 262 | the | -1.414 | 24.33%
1187
+ | 1110 | day | -2.609 | 7.36%
1188
+ | 618 | when | -2.010 | 13.40%
1189
+ | 356 | we | -1.859 | 15.58%
1190
+ | 460 | can | -2.508 | 8.14%
1191
+
1192
+ >>> # Example 2: Reconstruct the sequence scores from Beam Search
1193
+ >>> outputs = model.generate(
1194
+ ... **inputs,
1195
+ ... max_new_tokens=5,
1196
+ ... num_beams=4,
1197
+ ... num_return_sequences=4,
1198
+ ... return_dict_in_generate=True,
1199
+ ... output_scores=True,
1200
+ ... )
1201
+ >>> transition_scores = model.compute_transition_scores(
1202
+ ... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False
1203
+ ... )
1204
+ >>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores.
1205
+ >>> # Tip 1: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the
1206
+ >>> # use case, you might want to recompute it with `normalize_logits=True`.
1207
+ >>> # Tip 2: the output length does NOT include the input length
1208
+ >>> output_length = np.sum(transition_scores.numpy() < 0, axis=1)
1209
+ >>> length_penalty = model.generation_config.length_penalty
1210
+ >>> reconstructed_scores = transition_scores.sum(axis=1) / (output_length**length_penalty)
1211
+ >>> print(np.allclose(outputs.sequences_scores, reconstructed_scores))
1212
+ True
1213
+ ```"""
1214
+ # 1. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent
1215
+ # to a beam search approach were the first (and only) beam is always selected
1216
+ if beam_indices is None:
1217
+ beam_indices = torch.arange(scores[0].shape[0]).view(-1, 1).to(sequences.device)
1218
+ beam_indices = beam_indices.expand(-1, len(scores))
1219
+
1220
+ # 2. reshape scores as [batch_size*vocab_size, # generation steps] with # generation steps being
1221
+ # seq_len - input_length
1222
+ scores = torch.stack(scores).reshape(len(scores), -1).transpose(0, 1)
1223
+
1224
+ # 3. Optionally normalize the logits (across the vocab dimension)
1225
+ if normalize_logits:
1226
+ scores = scores.reshape(-1, self.config.vocab_size, scores.shape[-1])
1227
+ scores = torch.nn.functional.log_softmax(scores, dim=1)
1228
+ scores = scores.reshape(-1, scores.shape[-1])
1229
+
1230
+ # 4. cut beam_indices to longest beam length
1231
+ beam_indices_mask = beam_indices < 0
1232
+ max_beam_length = (1 - beam_indices_mask.long()).sum(-1).max()
1233
+ beam_indices = beam_indices.clone()[:, :max_beam_length]
1234
+ beam_indices_mask = beam_indices_mask[:, :max_beam_length]
1235
+
1236
+ # 5. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards
1237
+ beam_indices[beam_indices_mask] = 0
1238
+
1239
+ # 6. multiply beam_indices with vocab size to gather correctly from scores
1240
+ beam_sequence_indices = beam_indices * self.config.vocab_size
1241
+
1242
+ # 7. Define which indices contributed to scores
1243
+ cut_idx = sequences.shape[-1] - max_beam_length
1244
+ indices = sequences[:, cut_idx:] + beam_sequence_indices
1245
+
1246
+ # 8. Compute scores
1247
+ transition_scores = scores.gather(0, indices)
1248
+
1249
+ # 9. Mask out transition_scores of beams that stopped early
1250
+ transition_scores[beam_indices_mask] = 0
1251
+
1252
+ return transition_scores
1253
+
1254
+ def _validate_model_class(self):
1255
+ """
1256
+ Confirms that the model class is compatible with generation. If not, raises an exception that points to the
1257
+ right class to use.
1258
+ """
1259
+ # TODO(joao): remove this function in v4.50, i.e. when we remove the inheritance of `GenerationMixin` from
1260
+ # `PreTrainedModel`. With that inheritance removed, all model classes inheriting from `GenerationMixin` can
1261
+ # safely call `GenerationMixin.generate`
1262
+ if not is_torchdynamo_compiling() and not self.can_generate():
1263
+ terminations_with_generation_support = [
1264
+ "ForCausalLM",
1265
+ "ForConditionalGeneration",
1266
+ "ForSpeechSeq2Seq",
1267
+ "ForVision2Seq",
1268
+ ]
1269
+ raise TypeError(
1270
+ f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as "
1271
+ "it doesn't have a language model head. Classes that support generation often end in one of these "
1272
+ f"names: {terminations_with_generation_support}."
1273
+ )
1274
+
1275
+ def _validate_assistant(self, assistant_model, tokenizer, assistant_tokenizer):
1276
+ if assistant_model is None:
1277
+ return
1278
+
1279
+ if self.config.is_encoder_decoder and not assistant_model.config.is_encoder_decoder:
1280
+ attributes_to_check = ["encoder_attention_heads", "encoder_ffn_dim", "encoder_layers"]
1281
+ attributes_to_check = [attr for attr in dir(assistant_model.config) if attr in attributes_to_check]
1282
+ are_equal = all(
1283
+ getattr(self.config, attr) == getattr(assistant_model.config, attr) for attr in attributes_to_check
1284
+ )
1285
+ if not are_equal:
1286
+ raise ValueError(
1287
+ "The main model and the assistant don't have compatible encoder-dependent input shapes. "
1288
+ "Ensure you load the assistant with the correct encoder-decoder class, e.g. `AutoModelForSpeechSeq2Seq` for Whisper."
1289
+ )
1290
+
1291
+ doc_reference = (
1292
+ "(see https://huggingface.co/docs/transformers/en/generation_strategies#universal-assisted-decoding)"
1293
+ )
1294
+ if self.config.get_text_config().vocab_size == assistant_model.config.get_text_config().vocab_size:
1295
+ if assistant_tokenizer is not None:
1296
+ raise ValueError(
1297
+ f"`assistant_tokenizer` is not required when the main and assistant models use the same tokenizer. Please omit `assistant_tokenizer` from `generate()` {doc_reference}."
1298
+ )
1299
+ else:
1300
+ if tokenizer is None or assistant_tokenizer is None:
1301
+ raise ValueError(
1302
+ f"The main and assistant moedels have different tokenizers. Please provide `tokenizer` and `assistant_tokenizer` to `generate()` {doc_reference}."
1303
+ )
1304
+
1305
+ def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
1306
+ """Validates model kwargs for generation. Generate argument typos will also be caught here."""
1307
+ # If a `Cache` instance is passed, checks whether the model is compatible with it
1308
+ if isinstance(model_kwargs.get("past_key_values", None), Cache) and not self._supports_cache_class:
1309
+ raise ValueError(
1310
+ f"{self.__class__.__name__} does not support an instance of `Cache` as `past_key_values`. Please "
1311
+ "check the model documentation for supported cache formats."
1312
+ )
1313
+
1314
+ # Excludes arguments that are handled before calling any model function
1315
+ if self.config.is_encoder_decoder:
1316
+ for key in ["decoder_input_ids"]:
1317
+ model_kwargs.pop(key, None)
1318
+
1319
+ unused_model_args = []
1320
+ model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
1321
+ # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
1322
+ # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
1323
+ if "kwargs" in model_args or "model_kwargs" in model_args:
1324
+ model_args |= set(inspect.signature(self.forward).parameters)
1325
+
1326
+ # Encoder-Decoder models may also need Encoder arguments from `model_kwargs`
1327
+ if self.config.is_encoder_decoder:
1328
+ base_model = getattr(self, self.base_model_prefix, None)
1329
+
1330
+ # allow encoder kwargs
1331
+ encoder = getattr(self, "encoder", None)
1332
+ # `MusicgenForConditionalGeneration` has `text_encoder` and `audio_encoder`.
1333
+ # Also, it has `base_model_prefix = "encoder_decoder"` but there is no `self.encoder_decoder`
1334
+ # TODO: A better way to handle this.
1335
+ if encoder is None and base_model is not None:
1336
+ encoder = getattr(base_model, "encoder", None)
1337
+
1338
+ if encoder is not None:
1339
+ encoder_model_args = set(inspect.signature(encoder.forward).parameters)
1340
+ model_args |= encoder_model_args
1341
+
1342
+ # allow decoder kwargs
1343
+ decoder = getattr(self, "decoder", None)
1344
+ if decoder is None and base_model is not None:
1345
+ decoder = getattr(base_model, "decoder", None)
1346
+
1347
+ if decoder is not None:
1348
+ decoder_model_args = set(inspect.signature(decoder.forward).parameters)
1349
+ model_args |= {f"decoder_{x}" for x in decoder_model_args}
1350
+
1351
+ # allow assistant_encoder_outputs to be passed if we're doing assisted generating
1352
+ if "assistant_encoder_outputs" in model_kwargs:
1353
+ model_args |= {"assistant_encoder_outputs"}
1354
+
1355
+ for key, value in model_kwargs.items():
1356
+ if value is not None and key not in model_args:
1357
+ unused_model_args.append(key)
1358
+
1359
+ if unused_model_args:
1360
+ raise ValueError(
1361
+ f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
1362
+ " generate arguments will also show up in this list)"
1363
+ )
1364
+
1365
+ def _validate_generated_length(self, generation_config, input_ids_length, has_default_max_length):
1366
+ """Performs validation related to the resulting generated length"""
1367
+
1368
+ # Can't throw warnings/exceptions during compilation
1369
+ if is_torchdynamo_compiling():
1370
+ return
1371
+
1372
+ # 1. Max length warnings related to poor parameterization
1373
+ if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20:
1374
+ # 20 is the default max_length of the generation config
1375
+ warnings.warn(
1376
+ f"Using the model-agnostic default `max_length` (={generation_config.max_length}) to control the "
1377
+ "generation length. We recommend setting `max_new_tokens` to control the maximum length of the "
1378
+ "generation.",
1379
+ UserWarning,
1380
+ )
1381
+ if input_ids_length >= generation_config.max_length:
1382
+ input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
1383
+ raise ValueError(
1384
+ f"Input length of {input_ids_string} is {input_ids_length}, but `max_length` is set to"
1385
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
1386
+ " increasing `max_length` or, better yet, setting `max_new_tokens`."
1387
+ )
1388
+
1389
+ # 2. Min length warnings due to unfeasible parameter combinations
1390
+ min_length_error_suffix = (
1391
+ " Generation will stop at the defined maximum length. You should decrease the minimum length and/or "
1392
+ "increase the maximum length."
1393
+ )
1394
+ if has_default_max_length:
1395
+ min_length_error_suffix += (
1396
+ f" Note that `max_length` is set to {generation_config.max_length}, its default value."
1397
+ )
1398
+ if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length:
1399
+ warnings.warn(
1400
+ f"Unfeasible length constraints: `min_length` ({generation_config.min_length}) is larger than"
1401
+ f" the maximum possible length ({generation_config.max_length})." + min_length_error_suffix,
1402
+ UserWarning,
1403
+ )
1404
+ if generation_config.min_new_tokens is not None:
1405
+ min_length = generation_config.min_new_tokens + input_ids_length
1406
+ if min_length > generation_config.max_length:
1407
+ warnings.warn(
1408
+ f"Unfeasible length constraints: `min_new_tokens` ({generation_config.min_new_tokens}), when "
1409
+ f"added to the prompt length ({input_ids_length}), is larger than"
1410
+ f" the maximum possible length ({generation_config.max_length})." + min_length_error_suffix,
1411
+ UserWarning,
1412
+ )
1413
+
1414
+ def _prepare_generated_length(
1415
+ self,
1416
+ generation_config,
1417
+ has_default_max_length,
1418
+ has_default_min_length,
1419
+ model_input_name,
1420
+ input_ids_length,
1421
+ inputs_tensor,
1422
+ ):
1423
+ """Prepared max and min length in generation configs to avoid clashes between similar attributes"""
1424
+
1425
+ if generation_config.max_new_tokens is not None:
1426
+ if not has_default_max_length and generation_config.max_length is not None:
1427
+ logger.warning(
1428
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
1429
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
1430
+ "Please refer to the documentation for more information. "
1431
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
1432
+ )
1433
+ generation_config.max_length = generation_config.max_new_tokens + input_ids_length
1434
+
1435
+ # if both `inputs_embeds` and `input_ids` are passed, we do not correct the length
1436
+ # otherwise we need total length [inputs-embeds-len + new-tokens-len] to not go beyond indicated `max_length``
1437
+ elif (
1438
+ model_input_name == "inputs_embeds"
1439
+ and input_ids_length != inputs_tensor.shape[1]
1440
+ and not self.config.is_encoder_decoder
1441
+ ):
1442
+ generation_config.max_length -= inputs_tensor.shape[1]
1443
+
1444
+ # same for min length
1445
+ if generation_config.min_new_tokens is not None:
1446
+ if not has_default_min_length:
1447
+ logger.warning(
1448
+ f"Both `min_new_tokens` (={generation_config.min_new_tokens}) and `min_length`(="
1449
+ f"{generation_config.min_length}) seem to have been set. `min_new_tokens` will take precedence. "
1450
+ "Please refer to the documentation for more information. "
1451
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
1452
+ )
1453
+ generation_config.min_length = generation_config.min_new_tokens + input_ids_length
1454
+
1455
+ elif (
1456
+ model_input_name == "inputs_embeds"
1457
+ and input_ids_length != inputs_tensor.shape[1]
1458
+ and not self.config.is_encoder_decoder
1459
+ ):
1460
+ generation_config.min_length = max(generation_config.min_length - inputs_tensor.shape[1], 0)
1461
+
1462
+ return generation_config
1463
+
1464
+ def _prepare_generation_config(
1465
+ self, generation_config: Optional[GenerationConfig], **kwargs: Dict
1466
+ ) -> Tuple[GenerationConfig, Dict]:
1467
+ """
1468
+ Prepares the base generation config, then applies any generation configuration options from kwargs. This
1469
+ function handles retrocompatibility with respect to configuration files.
1470
+ """
1471
+ # TODO joao: when we can detect `fullgraph=True` in `torch.compile` (https://github.com/pytorch/pytorch/pull/120400)
1472
+ # replace `is_torchdynamo_compiling` by the corresponding check. As it is, we are being too restrictive with
1473
+ # the parameterization in `fullgraph=False` so as to enable `fullgraph=True`.
1474
+
1475
+ # priority: `generation_config` argument > `model.generation_config` (the default generation config)
1476
+ using_model_generation_config = False
1477
+ if generation_config is None:
1478
+ # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior,
1479
+ # the following conditions must be met
1480
+ # 1) the generation config must have been created from the model config (`_from_model_config` field);
1481
+ # 2) the generation config must have seen no modification since its creation (the hash is the same);
1482
+ # 3) there are non-default generation parameters in the model config.
1483
+ # 4) the user must have set new generation parameters in the model config.
1484
+ # NOTE: `torch.compile` can't compile `hash`, this legacy support is disabled with compilation.
1485
+ if (
1486
+ not is_torchdynamo_compiling()
1487
+ and self.generation_config._from_model_config # 1)
1488
+ and self.generation_config._original_object_hash == hash(self.generation_config) # 2)
1489
+ and len(self.config._get_non_default_generation_parameters()) > 0 # 3)
1490
+ ):
1491
+ new_generation_config = GenerationConfig.from_model_config(self.config)
1492
+ if new_generation_config != self.generation_config: # 4)
1493
+ warnings.warn(
1494
+ "You have modified the pretrained model configuration to control generation. This is a"
1495
+ " deprecated strategy to control generation and will be removed in v5."
1496
+ " Please use and modify the model generation configuration (see"
1497
+ " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )",
1498
+ UserWarning,
1499
+ )
1500
+ self.generation_config = new_generation_config
1501
+
1502
+ generation_config = self.generation_config
1503
+ using_model_generation_config = True
1504
+
1505
+ # `torch.compile` can't compile `copy.deepcopy`, arguments in `kwargs` that are part of `generation_config`
1506
+ # will mutate the object with `.update`. As such, passing these arguments through `kwargs` is disabled -- an
1507
+ # exception will be raised in `_validate_model_kwargs`
1508
+ if not is_torchdynamo_compiling():
1509
+ generation_config = copy.deepcopy(generation_config)
1510
+ model_kwargs = generation_config.update(**kwargs)
1511
+ # If `generation_config` is provided, let's fallback ALL special tokens to the default values for the model
1512
+ if not using_model_generation_config:
1513
+ if generation_config.bos_token_id is None:
1514
+ generation_config.bos_token_id = self.generation_config.bos_token_id
1515
+ if generation_config.eos_token_id is None:
1516
+ generation_config.eos_token_id = self.generation_config.eos_token_id
1517
+ if generation_config.pad_token_id is None:
1518
+ generation_config.pad_token_id = self.generation_config.pad_token_id
1519
+ if generation_config.decoder_start_token_id is None:
1520
+ generation_config.decoder_start_token_id = self.generation_config.decoder_start_token_id
1521
+ else:
1522
+ model_kwargs = kwargs
1523
+
1524
+ return generation_config, model_kwargs
1525
+
1526
+ def _get_initial_cache_position(self, input_ids, model_kwargs):
1527
+ """Calculates `cache_position` for the pre-fill stage based on `input_ids` and optionally past length"""
1528
+ # `torch.compile`-friendly `torch.arange` from a shape -- the lines below are equivalent to `torch.arange`
1529
+ if "inputs_embeds" in model_kwargs and not self.config.is_encoder_decoder:
1530
+ cache_position = torch.ones_like(model_kwargs["inputs_embeds"][0, :, 0], dtype=torch.int64).cumsum(0) - 1
1531
+ elif "decoder_inputs_embeds" in model_kwargs and self.config.is_encoder_decoder:
1532
+ cache_position = (
1533
+ torch.ones_like(model_kwargs["decoder_inputs_embeds"][0, :, 0], dtype=torch.int64).cumsum(0) - 1
1534
+ )
1535
+ else:
1536
+ cache_position = torch.ones_like(input_ids[0, :], dtype=torch.int64).cumsum(0) - 1
1537
+
1538
+ past_length = 0
1539
+ if model_kwargs.get("past_key_values") is not None:
1540
+ cache = model_kwargs["past_key_values"]
1541
+ past_length = 0
1542
+ if not isinstance(cache, Cache):
1543
+ past_length = cache[0][0].shape[2]
1544
+ elif hasattr(cache, "get_seq_length") and cache.get_seq_length() is not None:
1545
+ past_length = cache.get_seq_length()
1546
+
1547
+ # TODO(joao): this is not torch.compile-friendly, find a work-around. If the cache is not empty,
1548
+ # end-to-end compilation will yield bad results because `cache_position` will be incorrect.
1549
+ if not is_torchdynamo_compiling():
1550
+ cache_position = cache_position[past_length:]
1551
+
1552
+ model_kwargs["cache_position"] = cache_position
1553
+ return model_kwargs
1554
+
1555
+ def _get_cache(
1556
+ self, cache_implementation: str, batch_size: int, max_cache_len: int, device: torch.device, model_kwargs
1557
+ ) -> Cache:
1558
+ """
1559
+ Sets a cache for `generate`, that will persist across calls. A new cache will only be initialized a
1560
+ new `generate` call requires a larger cache or uses a different batch size.
1561
+
1562
+ Returns the resulting cache object.
1563
+ """
1564
+ cache_cls: Cache = NEED_SETUP_CACHE_CLASSES_MAPPING[cache_implementation]
1565
+ requires_cross_attention_cache = (
1566
+ self.config.is_encoder_decoder or model_kwargs.get("encoder_outputs") is not None
1567
+ )
1568
+
1569
+ if hasattr(self, "_cache"):
1570
+ cache_to_check = self._cache.self_attention_cache if requires_cross_attention_cache else self._cache
1571
+
1572
+ if cache_implementation == "sliding_window":
1573
+ max_cache_len = min(self.config.sliding_window, max_cache_len)
1574
+
1575
+ need_new_cache = (
1576
+ not hasattr(self, "_cache")
1577
+ or (not isinstance(cache_to_check, cache_cls))
1578
+ or cache_to_check.batch_size != batch_size
1579
+ )
1580
+ if cache_implementation != "mamba":
1581
+ need_new_cache = need_new_cache or cache_to_check.max_cache_len < max_cache_len
1582
+
1583
+ if requires_cross_attention_cache and hasattr(self, "_cache"):
1584
+ need_new_cache = (
1585
+ need_new_cache
1586
+ or self._cache.cross_attention_cache.max_cache_len != model_kwargs["encoder_outputs"][0].shape[1]
1587
+ )
1588
+
1589
+ if need_new_cache:
1590
+ if hasattr(self.config, "_pre_quantization_dtype"):
1591
+ cache_dtype = self.config._pre_quantization_dtype
1592
+ else:
1593
+ if not is_torchdynamo_compiling():
1594
+ cache_dtype = self.dtype
1595
+ else:
1596
+ # NOTE: self.dtype is not compatible with torch.compile, as it calls `self.parameters()`.
1597
+ # Workaround: trust the lm_head, whose attribute name is somewhat consistent across generative
1598
+ # models. May cause trobles with non-text modalities.
1599
+ cache_dtype = self.get_output_embeddings().weight.dtype
1600
+
1601
+ def get_layer_device_map(execution_device_map: Optional[dict] = None):
1602
+ if execution_device_map is None:
1603
+ return None
1604
+ elif len(execution_device_map) == 1 and "" in execution_device_map:
1605
+ return {idx: execution_device_map[""] for idx in range(self.config.num_hidden_layers)}
1606
+ layer_device_map = {}
1607
+ for layer in execution_device_map:
1608
+ for idx in range(self.config.num_hidden_layers):
1609
+ if f".{idx}." in f"{layer}.":
1610
+ layer_device_map[idx] = execution_device_map[layer]
1611
+ break
1612
+ for idx in range(self.config.num_hidden_layers):
1613
+ if idx not in layer_device_map:
1614
+ raise RuntimeError(f"layer {idx} has not been mapped to a device.")
1615
+ return layer_device_map
1616
+
1617
+ execution_device_map = None
1618
+ # Taken from dispatch_model from accelerate.
1619
+ # This is needed here if we don't want to make changes in accelerate in order to save execution_device
1620
+ # For offloaded case, we need to get the execution device, not just the device where it is offloaded
1621
+ if hasattr(self, "hf_device_map"):
1622
+ main_device = [d for d in self.hf_device_map.values() if d not in ["cpu", "disk"]][0]
1623
+ execution_device_map = {
1624
+ name: main_device if device in ["cpu", "disk"] else device
1625
+ for name, device in self.hf_device_map.items()
1626
+ }
1627
+ layer_device_map = get_layer_device_map(execution_device_map)
1628
+
1629
+ cache_kwargs = {
1630
+ "config": self.config.get_text_config(),
1631
+ "batch_size": batch_size,
1632
+ "max_cache_len": max_cache_len,
1633
+ "device": device,
1634
+ "dtype": cache_dtype,
1635
+ "layer_device_map": layer_device_map,
1636
+ }
1637
+ self._cache = cache_cls(**cache_kwargs)
1638
+ if requires_cross_attention_cache:
1639
+ encoder_kwargs = cache_kwargs.copy()
1640
+ encoder_kwargs["max_cache_len"] = model_kwargs["encoder_outputs"][0].shape[1]
1641
+ self._cache = EncoderDecoderCache(self._cache, cache_cls(**encoder_kwargs))
1642
+ else:
1643
+ self._cache.reset()
1644
+ return self._cache
1645
+
1646
+ def _supports_default_dynamic_cache(self) -> bool:
1647
+ """
1648
+ Return `True` if current model can use a `DynamicCache` instance when initializing the `past_key_values`.
1649
+ This is mostly the same as `_supports_cache_class` attribute, but add exception for `Jamba` model which
1650
+ uses its own `HybridMambaAttentionDynamicCache` and do not need to initialize the Cache in advance in
1651
+ order to save memory (because no back and forth `to_legacy_cache` and `from_legacy_cache` will be performed
1652
+ for `HybridMambaAttentionDynamicCache`).
1653
+ """
1654
+ return (
1655
+ self._supports_cache_class
1656
+ and "jamba" not in self.__class__.__name__.lower()
1657
+ and "zamba" not in self.__class__.__name__.lower()
1658
+ )
1659
+
1660
+ def _prepare_cache_for_generation(
1661
+ self,
1662
+ generation_config: GenerationConfig,
1663
+ model_kwargs: Dict,
1664
+ assistant_model: "PreTrainedModel",
1665
+ batch_size: int,
1666
+ max_cache_length: int,
1667
+ device: torch.device,
1668
+ ) -> bool:
1669
+ """
1670
+ Prepares the cache for generation (if applicable), given `generate`'s parameterization. If a cache is
1671
+ instantiated, writes it to `model_kwargs`, under the name expected by the model.
1672
+ """
1673
+
1674
+ cache_name = "past_key_values" if "mamba" not in self.__class__.__name__.lower() else "cache_params"
1675
+ requires_cross_attention_cache = (
1676
+ self.config.is_encoder_decoder or model_kwargs.get("encoder_outputs") is not None
1677
+ )
1678
+
1679
+ # Quick escape route 1: if the user specifies a cache, we only need to:
1680
+ # a) check for conflicting `generate` arguments
1681
+ # b) convert to the new cache format (if the user passes a legacy cache and model supports it)
1682
+ user_defined_cache = model_kwargs.get(cache_name)
1683
+ if user_defined_cache is not None:
1684
+ if generation_config.cache_implementation is not None:
1685
+ raise ValueError(
1686
+ f"Passing both `cache_implementation` (used to initialize certain caches) and `{cache_name}` (a "
1687
+ "Cache object) is unsupported. Please use only one of the two."
1688
+ )
1689
+ if isinstance(user_defined_cache, tuple) and self._supports_default_dynamic_cache():
1690
+ model_kwargs[cache_name] = (
1691
+ DynamicCache.from_legacy_cache(user_defined_cache)
1692
+ if not requires_cross_attention_cache
1693
+ else EncoderDecoderCache.from_legacy_cache(user_defined_cache)
1694
+ )
1695
+ return
1696
+
1697
+ # Quick escape route 2: if the user specifies no cache is to be used. (conflicting arguments are handled in
1698
+ # `generation_config.validate()`)
1699
+ if generation_config.use_cache is False:
1700
+ return
1701
+
1702
+ # Quick escape route 3: model that only supports legacy caches = nothing to prepare
1703
+ if not self._supports_default_dynamic_cache():
1704
+ if generation_config.cache_implementation is not None:
1705
+ warnings.warn(
1706
+ "This model does not support `Cache` instances, it only supports the legacy cache format (tuple "
1707
+ f"of tuples). `cache_implementation` (set to {generation_config.cache_implementation}) will be "
1708
+ "ignored.",
1709
+ UserWarning,
1710
+ )
1711
+ return
1712
+
1713
+ # Otherwise we NEED to prepare a cache, based on `generation_config.cache_implementation`
1714
+
1715
+ # TODO(joao): support static caches in assisted generation. assisted generation needs to roll back caches,
1716
+ # which is only supported in dynamic caches atm
1717
+ if assistant_model is not None and generation_config.cache_implementation is not None:
1718
+ logger.warning_once(
1719
+ "An assistant model is provided, using a dynamic cache instead of a cache of type="
1720
+ f"'{generation_config.cache_implementation}'."
1721
+ )
1722
+ generation_config.cache_implementation = None
1723
+
1724
+ if generation_config.cache_implementation is not None:
1725
+ if generation_config.cache_implementation in NEED_SETUP_CACHE_CLASSES_MAPPING:
1726
+ if generation_config.cache_implementation == "static" and not self._supports_static_cache:
1727
+ raise ValueError(
1728
+ "This model does not support `cache_implementation='static'`. Please check the following "
1729
+ "issue: https://github.com/huggingface/transformers/issues/28981"
1730
+ )
1731
+ model_kwargs[cache_name] = self._get_cache(
1732
+ cache_implementation=generation_config.cache_implementation,
1733
+ batch_size=max(generation_config.num_beams, generation_config.num_return_sequences) * batch_size,
1734
+ max_cache_len=max_cache_length,
1735
+ device=device,
1736
+ model_kwargs=model_kwargs,
1737
+ )
1738
+ elif generation_config.cache_implementation == "quantized":
1739
+ if not self._supports_quantized_cache:
1740
+ raise ValueError(
1741
+ "This model does not support the quantized cache. If you want your model to support quantized "
1742
+ "cache, please open an issue and tag @zucchini-nlp."
1743
+ )
1744
+
1745
+ cache_config = (
1746
+ generation_config.cache_config
1747
+ if generation_config.cache_config is not None
1748
+ else QuantizedCacheConfig()
1749
+ )
1750
+ cache_class = QUANT_BACKEND_CLASSES_MAPPING[cache_config.backend]
1751
+
1752
+ # if cache_config.backend == "quanto" and not (is_optimum_quanto_available() or is_quanto_available()):
1753
+ if cache_config.backend == "quanto" and not is_optimum_quanto_available():
1754
+ raise ImportError(
1755
+ "You need to install optimum-quanto in order to use KV cache quantization with optimum-quanto backend. "
1756
+ "Please install it via with `pip install optimum-quanto`"
1757
+ )
1758
+ elif cache_config.backend == "HQQ" and not is_hqq_available():
1759
+ raise ImportError(
1760
+ "You need to install `HQQ` in order to use KV cache quantization with HQQ backend. "
1761
+ "Please install it via with `pip install hqq`"
1762
+ )
1763
+
1764
+ model_kwargs[cache_name] = cache_class(cache_config)
1765
+ elif generation_config.cache_implementation == "offloaded":
1766
+ model_kwargs[cache_name] = OffloadedCache()
1767
+
1768
+ # Use DynamicCache() instance by default. This will avoid back and forth from legacy format that
1769
+ # keeps copying the cache thus using much more memory
1770
+ else:
1771
+ model_kwargs[cache_name] = (
1772
+ DynamicCache()
1773
+ if not requires_cross_attention_cache
1774
+ else EncoderDecoderCache(DynamicCache(), DynamicCache())
1775
+ )
1776
+
1777
+ def _supports_num_logits_to_keep(self) -> bool:
1778
+ """
1779
+ Return True if the current model supports the keyword argument `num_logits_to_keep` in forward()
1780
+ to save memory. Checking it in this way allows to avoid using a new model attribute.
1781
+ """
1782
+ return "num_logits_to_keep" in set(inspect.signature(self.forward).parameters.keys())
1783
+
1784
+ def _prepare_special_tokens(
1785
+ self,
1786
+ generation_config: GenerationConfig,
1787
+ kwargs_has_attention_mask: Optional[bool] = None,
1788
+ device: Optional[Union[torch.device, str]] = None,
1789
+ ):
1790
+ """
1791
+ Prepares the special tokens for generation, overwriting the generation config with their processed versions
1792
+ converted to tensor.
1793
+
1794
+ Note that `generation_config` is changed in place and stops being serializable after this method is called.
1795
+ That is no problem if called within `generate` (`generation_config` is a local copy that doesn't leave the
1796
+ function). However, if called outside `generate`, consider creating a copy of `generation_config` first.
1797
+ """
1798
+
1799
+ # Convert special tokens to tensors
1800
+ def _tensor_or_none(token, device=None):
1801
+ if token is None:
1802
+ return token
1803
+
1804
+ device = device if device is not None else self.device
1805
+ if isinstance(token, torch.Tensor):
1806
+ return token.to(device)
1807
+ return torch.tensor(token, device=device, dtype=torch.long)
1808
+
1809
+ bos_token_tensor = _tensor_or_none(generation_config.bos_token_id, device=device)
1810
+ eos_token_tensor = _tensor_or_none(generation_config.eos_token_id, device=device)
1811
+ pad_token_tensor = _tensor_or_none(generation_config.pad_token_id, device=device)
1812
+ decoder_start_token_tensor = _tensor_or_none(generation_config.decoder_start_token_id, device=device)
1813
+
1814
+ # for BC we also try to get `decoder_start_token_id` or `bos_token_id` (#30892)
1815
+ if self.config.is_encoder_decoder:
1816
+ decoder_start_token_tensor = (
1817
+ decoder_start_token_tensor if decoder_start_token_tensor is not None else bos_token_tensor
1818
+ )
1819
+
1820
+ # We can have more than one eos token. Always treat it as a 1D tensor (when it exists).
1821
+ if eos_token_tensor is not None and eos_token_tensor.ndim == 0:
1822
+ eos_token_tensor = eos_token_tensor.unsqueeze(0)
1823
+
1824
+ # Set pad token if unset (and there are conditions to do so)
1825
+ if pad_token_tensor is None and eos_token_tensor is not None:
1826
+ if not is_torchdynamo_compiling():
1827
+ if kwargs_has_attention_mask is not None and not kwargs_has_attention_mask:
1828
+ logger.warning(
1829
+ "The attention mask and the pad token id were not set. As a consequence, you may observe "
1830
+ "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
1831
+ )
1832
+ logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{pad_token_tensor} for open-end generation.")
1833
+ pad_token_tensor = eos_token_tensor[0]
1834
+
1835
+ # Sanity checks/warnings
1836
+ if self.config.is_encoder_decoder and decoder_start_token_tensor is None:
1837
+ raise ValueError(
1838
+ "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
1839
+ )
1840
+ if not is_torchdynamo_compiling(): # Checks that depend on tensor-dependent control flow
1841
+ if (
1842
+ eos_token_tensor is not None
1843
+ and isin_mps_friendly(elements=eos_token_tensor, test_elements=pad_token_tensor).any()
1844
+ ):
1845
+ if kwargs_has_attention_mask is not None and not kwargs_has_attention_mask:
1846
+ logger.warning_once(
1847
+ "The attention mask is not set and cannot be inferred from input because pad token is same as "
1848
+ "eos token. As a consequence, you may observe unexpected behavior. Please pass your input's "
1849
+ "`attention_mask` to obtain reliable results."
1850
+ )
1851
+ if eos_token_tensor is not None and (
1852
+ torch.is_floating_point(eos_token_tensor) or (eos_token_tensor < 0).any()
1853
+ ):
1854
+ logger.warning(
1855
+ f"`eos_token_id` should consist of positive integers, but is {eos_token_tensor}. Your generation "
1856
+ "will not stop until the maximum length is reached. Depending on other flags, it may even crash."
1857
+ )
1858
+
1859
+ # Update generation config with the updated special tokens tensors
1860
+ # NOTE: this must be written into a different attribute name than the one holding the original special tokens
1861
+ # (in their non-tensor form), in order to enable end-to-end compilation. See
1862
+ # https://pytorch.org/docs/stable/torch.compiler_cudagraph_trees.html#limitations
1863
+ generation_config._bos_token_tensor = bos_token_tensor
1864
+ generation_config._eos_token_tensor = eos_token_tensor
1865
+ generation_config._pad_token_tensor = pad_token_tensor
1866
+ generation_config._decoder_start_token_tensor = decoder_start_token_tensor
1867
+
1868
+ @torch.no_grad()
1869
+ def generate(
1870
+ self,
1871
+ inputs: Optional[torch.Tensor] = None,
1872
+ generation_config: Optional[GenerationConfig] = None,
1873
+ logits_processor: Optional[LogitsProcessorList] = None,
1874
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1875
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
1876
+ synced_gpus: Optional[bool] = None,
1877
+ assistant_model: Optional["PreTrainedModel"] = None,
1878
+ streamer: Optional["BaseStreamer"] = None,
1879
+ negative_prompt_ids: Optional[torch.Tensor] = None,
1880
+ negative_prompt_attention_mask: Optional[torch.Tensor] = None,
1881
+ **kwargs,
1882
+ ) -> Union[GenerateOutput, torch.LongTensor]:
1883
+ r"""
1884
+
1885
+ Generates sequences of token ids for models with a language modeling head.
1886
+
1887
+ <Tip warning={true}>
1888
+
1889
+ Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
1890
+ model's default generation configuration. You can override any `generation_config` by passing the corresponding
1891
+ parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
1892
+
1893
+ For an overview of generation strategies and code examples, check out the [following
1894
+ guide](../generation_strategies).
1895
+
1896
+ </Tip>
1897
+
1898
+ Parameters:
1899
+ inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):
1900
+ The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
1901
+ method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
1902
+ should be in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of
1903
+ `input_ids`, `input_values`, `input_features`, or `pixel_values`.
1904
+ generation_config ([`~generation.GenerationConfig`], *optional*):
1905
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
1906
+ passed to generate matching the attributes of `generation_config` will override them. If
1907
+ `generation_config` is not provided, the default will be used, which has the following loading
1908
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
1909
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
1910
+ default values, whose documentation should be checked to parameterize generation.
1911
+ logits_processor (`LogitsProcessorList`, *optional*):
1912
+ Custom logits processors that complement the default logits processors built from arguments and
1913
+ generation config. If a logit processor is passed that is already created with the arguments or a
1914
+ generation config an error is thrown. This feature is intended for advanced users.
1915
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
1916
+ Custom stopping criteria that complements the default stopping criteria built from arguments and a
1917
+ generation config. If a stopping criteria is passed that is already created with the arguments or a
1918
+ generation config an error is thrown. If your stopping criteria depends on the `scores` input, make
1919
+ sure you pass `return_dict_in_generate=True, output_scores=True` to `generate`. This feature is
1920
+ intended for advanced users.
1921
+ prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):
1922
+ If provided, this function constraints the beam search to allowed tokens only at each step. If not
1923
+ provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
1924
+ `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
1925
+ on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
1926
+ for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
1927
+ Retrieval](https://arxiv.org/abs/2010.00904).
1928
+ synced_gpus (`bool`, *optional*):
1929
+ Whether to continue running the while loop until max_length. Unless overridden, this flag will be set
1930
+ to `True` if using `FullyShardedDataParallel` or DeepSpeed ZeRO Stage 3 with multiple GPUs to avoid
1931
+ deadlocking if one GPU finishes generating before other GPUs. Otherwise, defaults to `False`.
1932
+ assistant_model (`PreTrainedModel`, *optional*):
1933
+ An assistant model that can be used to accelerate generation. The assistant model must have the exact
1934
+ same tokenizer. The acceleration is achieved when forecasting candidate tokens with the assistant model
1935
+ is much faster than running generation with the model you're calling generate from. As such, the
1936
+ assistant model should be much smaller.
1937
+ streamer (`BaseStreamer`, *optional*):
1938
+ Streamer object that will be used to stream the generated sequences. Generated tokens are passed
1939
+ through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
1940
+ negative_prompt_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1941
+ The negative prompt needed for some processors such as CFG. The batch size must match the input batch
1942
+ size. This is an experimental feature, subject to breaking API changes in future versions.
1943
+ negative_prompt_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1944
+ Attention_mask for `negative_prompt_ids`.
1945
+ kwargs (`Dict[str, Any]`, *optional*):
1946
+ Ad hoc parametrization of `generation_config` and/or additional model-specific kwargs that will be
1947
+ forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
1948
+ specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
1949
+
1950
+ Return:
1951
+ [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
1952
+ or when `config.return_dict_in_generate=True`) or a `torch.LongTensor`.
1953
+
1954
+ If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
1955
+ [`~utils.ModelOutput`] types are:
1956
+
1957
+ - [`~generation.GenerateDecoderOnlyOutput`],
1958
+ - [`~generation.GenerateBeamDecoderOnlyOutput`]
1959
+
1960
+ If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
1961
+ [`~utils.ModelOutput`] types are:
1962
+
1963
+ - [`~generation.GenerateEncoderDecoderOutput`],
1964
+ - [`~generation.GenerateBeamEncoderDecoderOutput`]
1965
+ """
1966
+
1967
+ # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
1968
+ self._validate_model_class()
1969
+ tokenizer = kwargs.pop("tokenizer", None) # Pull this out first, we only use it for stopping criteria
1970
+ assistant_tokenizer = kwargs.pop("assistant_tokenizer", None) # only used for assisted generation
1971
+
1972
+ generation_config, model_kwargs = self._prepare_generation_config(generation_config, **kwargs)
1973
+ self._validate_model_kwargs(model_kwargs.copy())
1974
+ self._validate_assistant(assistant_model, tokenizer, assistant_tokenizer)
1975
+
1976
+ # 2. Set generation parameters if not already defined
1977
+ if synced_gpus is None:
1978
+ synced_gpus = (is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)) and dist.get_world_size() > 1
1979
+
1980
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
1981
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
1982
+
1983
+ accepts_attention_mask = "attention_mask" in set(inspect.signature(self.forward).parameters.keys())
1984
+ requires_attention_mask = "encoder_outputs" not in model_kwargs
1985
+ kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None
1986
+
1987
+ # 3. Define model inputs
1988
+ inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
1989
+ inputs, generation_config.bos_token_id, model_kwargs
1990
+ )
1991
+ batch_size = inputs_tensor.shape[0]
1992
+
1993
+ device = inputs_tensor.device
1994
+ self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=device)
1995
+
1996
+ # decoder-only models must use left-padding for batched generation.
1997
+ if not self.config.is_encoder_decoder and not is_torchdynamo_compiling():
1998
+ # If `input_ids` was given, check if the last id in any sequence is `pad_token_id`
1999
+ # Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off.
2000
+ if (
2001
+ generation_config._pad_token_tensor is not None
2002
+ and batch_size > 1
2003
+ and len(inputs_tensor.shape) == 2
2004
+ and torch.sum(inputs_tensor[:, -1] == generation_config._pad_token_tensor) > 0
2005
+ ):
2006
+ logger.warning(
2007
+ "A decoder-only architecture is being used, but right-padding was detected! For correct "
2008
+ "generation results, please set `padding_side='left'` when initializing the tokenizer."
2009
+ )
2010
+
2011
+ # 4. Define other model kwargs
2012
+ # decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are
2013
+ # generating the first new token or not, and we only want to use the embeddings for the first new token)
2014
+ if not self.config.is_encoder_decoder and model_input_name == "inputs_embeds":
2015
+ generation_config.use_cache = True
2016
+
2017
+ if not kwargs_has_attention_mask and requires_attention_mask and accepts_attention_mask:
2018
+ model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
2019
+ inputs_tensor, generation_config._pad_token_tensor, generation_config._eos_token_tensor
2020
+ )
2021
+ elif kwargs_has_attention_mask:
2022
+ # TODO (joao): generalize this check with other types of inputs
2023
+ if model_input_name == "input_ids" and len(model_kwargs["attention_mask"].shape) > 2:
2024
+ raise ValueError("`attention_mask` passed to `generate` must be 2D.")
2025
+
2026
+ if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs:
2027
+ # if model is encoder decoder encoder_outputs are created and added to `model_kwargs`
2028
+ model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
2029
+ inputs_tensor, model_kwargs, model_input_name, generation_config
2030
+ )
2031
+
2032
+ # 5. Prepare `input_ids` which will be used for auto-regressive generation
2033
+ if self.config.is_encoder_decoder:
2034
+ input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
2035
+ batch_size=batch_size,
2036
+ model_input_name=model_input_name,
2037
+ model_kwargs=model_kwargs,
2038
+ decoder_start_token_id=generation_config._decoder_start_token_tensor,
2039
+ device=inputs_tensor.device,
2040
+ )
2041
+ else:
2042
+ input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids")
2043
+
2044
+ if generation_config.token_healing:
2045
+ input_ids = self.heal_tokens(input_ids, tokenizer)
2046
+
2047
+ if streamer is not None:
2048
+ streamer.put(input_ids.cpu())
2049
+
2050
+ # 6. Prepare `max_length` depending on other stopping criteria.
2051
+ input_ids_length = input_ids.shape[-1]
2052
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
2053
+ has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
2054
+ generation_config = self._prepare_generated_length(
2055
+ generation_config=generation_config,
2056
+ has_default_max_length=has_default_max_length,
2057
+ has_default_min_length=has_default_min_length,
2058
+ model_input_name=model_input_name,
2059
+ inputs_tensor=inputs_tensor,
2060
+ input_ids_length=input_ids_length,
2061
+ )
2062
+
2063
+ # If the model supports `num_logits_to_keep` in forward(), set it to 1 to avoid computing the whole
2064
+ # logit matrix. This can save a lot of memory during the first forward pass. Note that assisted decoding
2065
+ # dynamically overrides this value as it can need more than the last token logits
2066
+ if self._supports_num_logits_to_keep() and "num_logits_to_keep" not in model_kwargs:
2067
+ model_kwargs["num_logits_to_keep"] = 1
2068
+
2069
+ self._validate_generated_length(generation_config, input_ids_length, has_default_max_length)
2070
+
2071
+ # 7. Prepare the cache.
2072
+ # - `model_kwargs` may be updated in place with a cache as defined by the parameters in `generation_config`.
2073
+ # - different models have a different cache name expected by the model (default = "past_key_values")
2074
+ # - `max_length`, prepared above, is used to determine the maximum cache length
2075
+ # TODO (joao): remove `user_defined_cache` after v4.47 (remove default conversion to legacy format)
2076
+ cache_name = "past_key_values" if "mamba" not in self.__class__.__name__.lower() else "cache_params"
2077
+ user_defined_cache = model_kwargs.get(cache_name)
2078
+ max_cache_length = generation_config.max_length
2079
+ if (
2080
+ inputs_tensor.shape[1] != input_ids_length
2081
+ and model_input_name == "inputs_embeds"
2082
+ and not self.config.is_encoder_decoder
2083
+ ):
2084
+ max_cache_length += inputs_tensor.shape[1]
2085
+ self._prepare_cache_for_generation(
2086
+ generation_config, model_kwargs, assistant_model, batch_size, max_cache_length, device
2087
+ )
2088
+
2089
+ # 8. determine generation mode
2090
+ generation_mode = generation_config.get_generation_mode(assistant_model)
2091
+
2092
+ if streamer is not None and (generation_config.num_beams > 1):
2093
+ raise ValueError(
2094
+ "`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1."
2095
+ )
2096
+
2097
+ if not is_torchdynamo_compiling() and self.device.type != input_ids.device.type:
2098
+ warnings.warn(
2099
+ "You are calling .generate() with the `input_ids` being on a device type different"
2100
+ f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model"
2101
+ f" is on {self.device.type}. You may experience unexpected behaviors or slower generation."
2102
+ " Please make sure that you have put `input_ids` to the"
2103
+ f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before"
2104
+ " running `.generate()`.",
2105
+ UserWarning,
2106
+ )
2107
+
2108
+ # 9. prepare logits processors and stopping criteria
2109
+ prepared_logits_processor = self._get_logits_processor(
2110
+ generation_config=generation_config,
2111
+ input_ids_seq_length=input_ids_length,
2112
+ encoder_input_ids=inputs_tensor,
2113
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
2114
+ logits_processor=logits_processor,
2115
+ device=inputs_tensor.device,
2116
+ model_kwargs=model_kwargs,
2117
+ negative_prompt_ids=negative_prompt_ids,
2118
+ negative_prompt_attention_mask=negative_prompt_attention_mask,
2119
+ )
2120
+ prepared_stopping_criteria = self._get_stopping_criteria(
2121
+ generation_config=generation_config, stopping_criteria=stopping_criteria, tokenizer=tokenizer, **kwargs
2122
+ )
2123
+
2124
+ # Set model_kwargs `use_cache` so we can use it later in forward runs
2125
+ model_kwargs["use_cache"] = generation_config.use_cache
2126
+
2127
+ # 10. go into different generation modes
2128
+ if generation_mode == GenerationMode.ASSISTED_GENERATION:
2129
+ if generation_config.num_return_sequences > 1:
2130
+ raise ValueError(
2131
+ "num_return_sequences has to be 1 when doing assisted generate, "
2132
+ f"but is {generation_config.num_return_sequences}."
2133
+ )
2134
+ if batch_size > 1:
2135
+ raise ValueError("assisted generate is only supported for batch_size = 1")
2136
+ if not model_kwargs["use_cache"]:
2137
+ raise ValueError("assisted generate requires `use_cache=True`")
2138
+ if generation_config.cache_implementation in ["static", "hybrid", "sliding_window"]:
2139
+ raise ValueError("assisted generate is not supported with Static cache classes`")
2140
+ if self._is_stateful:
2141
+ # In assisted generation we need the ability to confirm whether the model would pick certain tokens,
2142
+ # which is not possible with stateful models (they can't reset to a previous subset of generated text)
2143
+ raise ValueError(
2144
+ f"assisted generation is not supported with stateful models, such as {self.__class__.__name__}"
2145
+ )
2146
+
2147
+ # 11. Get the candidate generator, given the parameterization
2148
+ candidate_generator = self._get_candidate_generator(
2149
+ generation_config=generation_config,
2150
+ input_ids=input_ids,
2151
+ inputs_tensor=inputs_tensor,
2152
+ assistant_model=assistant_model,
2153
+ logits_processor=logits_processor,
2154
+ target_tokenizer=tokenizer,
2155
+ assistant_tokenizer=assistant_tokenizer,
2156
+ model_kwargs=model_kwargs,
2157
+ )
2158
+
2159
+ # 12. run assisted generate
2160
+ result = self._assisted_decoding(
2161
+ input_ids,
2162
+ candidate_generator=candidate_generator,
2163
+ logits_processor=prepared_logits_processor,
2164
+ stopping_criteria=prepared_stopping_criteria,
2165
+ generation_config=generation_config,
2166
+ synced_gpus=synced_gpus,
2167
+ streamer=streamer,
2168
+ **model_kwargs,
2169
+ )
2170
+ elif generation_mode == GenerationMode.DOLA_GENERATION:
2171
+ if self._is_stateful:
2172
+ # DoLa decoding was not designed for stateful models, and would require some changes
2173
+ raise ValueError(
2174
+ f"dola decoding is not supported with stateful models, such as {self.__class__.__name__}"
2175
+ )
2176
+ result = self._dola_decoding(
2177
+ input_ids,
2178
+ dola_layers=generation_config.dola_layers,
2179
+ logits_processor=prepared_logits_processor,
2180
+ stopping_criteria=prepared_stopping_criteria,
2181
+ generation_config=generation_config,
2182
+ synced_gpus=synced_gpus,
2183
+ streamer=streamer,
2184
+ **model_kwargs,
2185
+ )
2186
+
2187
+ elif generation_mode == GenerationMode.CONTRASTIVE_SEARCH:
2188
+ if not model_kwargs["use_cache"]:
2189
+ raise ValueError("Contrastive search requires `use_cache=True`")
2190
+ if self._is_stateful:
2191
+ # Just like assisted generation, we need to be able to rollback to a previous state (see comment above)
2192
+ raise ValueError(
2193
+ f"contrastive search is not supported with stateful models, such as {self.__class__.__name__}"
2194
+ )
2195
+
2196
+ result = self._contrastive_search(
2197
+ input_ids,
2198
+ logits_processor=prepared_logits_processor,
2199
+ stopping_criteria=prepared_stopping_criteria,
2200
+ generation_config=generation_config,
2201
+ synced_gpus=synced_gpus,
2202
+ streamer=streamer,
2203
+ **model_kwargs,
2204
+ )
2205
+
2206
+ elif generation_mode in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH):
2207
+ # 11. expand input_ids with `num_return_sequences` additional sequences per batch
2208
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
2209
+ input_ids=input_ids,
2210
+ expand_size=generation_config.num_return_sequences,
2211
+ is_encoder_decoder=self.config.is_encoder_decoder,
2212
+ **model_kwargs,
2213
+ )
2214
+
2215
+ # 12. run sample (it degenerates to greedy search when `generation_config.do_sample=False`)
2216
+ result = self._sample(
2217
+ input_ids,
2218
+ logits_processor=prepared_logits_processor,
2219
+ stopping_criteria=prepared_stopping_criteria,
2220
+ generation_config=generation_config,
2221
+ synced_gpus=synced_gpus,
2222
+ streamer=streamer,
2223
+ **model_kwargs,
2224
+ )
2225
+
2226
+ elif generation_mode in (GenerationMode.BEAM_SAMPLE, GenerationMode.BEAM_SEARCH):
2227
+ # 11. prepare beam search scorer
2228
+ beam_scorer = BeamSearchScorer(
2229
+ batch_size=batch_size,
2230
+ num_beams=generation_config.num_beams,
2231
+ device=inputs_tensor.device,
2232
+ length_penalty=generation_config.length_penalty,
2233
+ do_early_stopping=generation_config.early_stopping,
2234
+ num_beam_hyps_to_keep=generation_config.num_return_sequences,
2235
+ max_length=generation_config.max_length,
2236
+ )
2237
+
2238
+ # 12. interleave input_ids with `num_beams` additional sequences per batch
2239
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
2240
+ input_ids=input_ids,
2241
+ expand_size=generation_config.num_beams,
2242
+ is_encoder_decoder=self.config.is_encoder_decoder,
2243
+ **model_kwargs,
2244
+ )
2245
+
2246
+ # 13. run beam sample
2247
+ result = self._beam_search(
2248
+ input_ids,
2249
+ beam_scorer,
2250
+ logits_processor=prepared_logits_processor,
2251
+ stopping_criteria=prepared_stopping_criteria,
2252
+ generation_config=generation_config,
2253
+ synced_gpus=synced_gpus,
2254
+ **model_kwargs,
2255
+ )
2256
+
2257
+ elif generation_mode == GenerationMode.GROUP_BEAM_SEARCH:
2258
+ # 11. prepare beam search scorer
2259
+ beam_scorer = BeamSearchScorer(
2260
+ batch_size=batch_size,
2261
+ num_beams=generation_config.num_beams,
2262
+ device=inputs_tensor.device,
2263
+ length_penalty=generation_config.length_penalty,
2264
+ do_early_stopping=generation_config.early_stopping,
2265
+ num_beam_hyps_to_keep=generation_config.num_return_sequences,
2266
+ num_beam_groups=generation_config.num_beam_groups,
2267
+ max_length=generation_config.max_length,
2268
+ )
2269
+ # 12. interleave input_ids with `num_beams` additional sequences per batch
2270
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
2271
+ input_ids=input_ids,
2272
+ expand_size=generation_config.num_beams,
2273
+ is_encoder_decoder=self.config.is_encoder_decoder,
2274
+ **model_kwargs,
2275
+ )
2276
+ # 13. run beam search
2277
+ result = self._group_beam_search(
2278
+ input_ids,
2279
+ beam_scorer,
2280
+ logits_processor=prepared_logits_processor,
2281
+ stopping_criteria=prepared_stopping_criteria,
2282
+ generation_config=generation_config,
2283
+ synced_gpus=synced_gpus,
2284
+ **model_kwargs,
2285
+ )
2286
+
2287
+ elif generation_mode == GenerationMode.CONSTRAINED_BEAM_SEARCH:
2288
+ final_constraints = []
2289
+ if generation_config.constraints is not None:
2290
+ final_constraints = generation_config.constraints
2291
+
2292
+ if generation_config.force_words_ids is not None:
2293
+
2294
+ def typeerror():
2295
+ raise ValueError(
2296
+ "`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]` "
2297
+ f"of positive integers, but is {generation_config.force_words_ids}."
2298
+ )
2299
+
2300
+ if (
2301
+ not isinstance(generation_config.force_words_ids, list)
2302
+ or len(generation_config.force_words_ids) == 0
2303
+ ):
2304
+ typeerror()
2305
+
2306
+ for word_ids in generation_config.force_words_ids:
2307
+ if isinstance(word_ids[0], list):
2308
+ if not isinstance(word_ids, list) or len(word_ids) == 0:
2309
+ typeerror()
2310
+ if any(not isinstance(token_ids, list) for token_ids in word_ids):
2311
+ typeerror()
2312
+ if any(
2313
+ any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids)
2314
+ for token_ids in word_ids
2315
+ ):
2316
+ typeerror()
2317
+
2318
+ constraint = DisjunctiveConstraint(word_ids)
2319
+ else:
2320
+ if not isinstance(word_ids, list) or len(word_ids) == 0:
2321
+ typeerror()
2322
+ if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids):
2323
+ typeerror()
2324
+
2325
+ constraint = PhrasalConstraint(word_ids)
2326
+ final_constraints.append(constraint)
2327
+
2328
+ # 11. prepare beam search scorer
2329
+ constrained_beam_scorer = ConstrainedBeamSearchScorer(
2330
+ constraints=final_constraints,
2331
+ batch_size=batch_size,
2332
+ num_beams=generation_config.num_beams,
2333
+ device=inputs_tensor.device,
2334
+ length_penalty=generation_config.length_penalty,
2335
+ do_early_stopping=generation_config.early_stopping,
2336
+ num_beam_hyps_to_keep=generation_config.num_return_sequences,
2337
+ max_length=generation_config.max_length,
2338
+ )
2339
+ # 12. interleave input_ids with `num_beams` additional sequences per batch
2340
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
2341
+ input_ids=input_ids,
2342
+ expand_size=generation_config.num_beams,
2343
+ is_encoder_decoder=self.config.is_encoder_decoder,
2344
+ **model_kwargs,
2345
+ )
2346
+ # 13. run beam search
2347
+ result = self._constrained_beam_search(
2348
+ input_ids,
2349
+ constrained_beam_scorer=constrained_beam_scorer,
2350
+ logits_processor=prepared_logits_processor,
2351
+ stopping_criteria=prepared_stopping_criteria,
2352
+ generation_config=generation_config,
2353
+ synced_gpus=synced_gpus,
2354
+ **model_kwargs,
2355
+ )
2356
+
2357
+ # Convert to legacy cache format if requested
2358
+ if (
2359
+ generation_config.return_legacy_cache is not False # Should check for `True` after v4.47
2360
+ and not is_torchdynamo_compiling()
2361
+ and hasattr(result, "past_key_values")
2362
+ and hasattr(result.past_key_values, "to_legacy_cache")
2363
+ and result.past_key_values.to_legacy_cache is not None
2364
+ ):
2365
+ # handle BC (convert by default if he user hasn't passed a cache AND the cache is of the default type)
2366
+ should_convert_cache = generation_config.return_legacy_cache
2367
+ is_user_defined_cache = user_defined_cache is not None
2368
+ is_default_cache_type = (
2369
+ type(result.past_key_values) == DynamicCache # noqa E721
2370
+ or (
2371
+ isinstance(result.past_key_values, EncoderDecoderCache)
2372
+ and type(result.past_key_values.self_attention_cache) == DynamicCache # noqa E721
2373
+ and type(result.past_key_values.cross_attention_cache) == DynamicCache # noqa E721
2374
+ )
2375
+ )
2376
+ if not is_user_defined_cache and is_default_cache_type:
2377
+ logger.warning_once(
2378
+ "From v4.47 onwards, when a model cache is to be returned, `generate` will return a `Cache` "
2379
+ "instance instead by default (as opposed to the legacy tuple of tuples format). If you want to "
2380
+ "keep returning the legacy format, please set `return_legacy_cache=True`."
2381
+ )
2382
+ should_convert_cache = True
2383
+ if should_convert_cache:
2384
+ result.past_key_values = result.past_key_values.to_legacy_cache()
2385
+ return result
2386
+
2387
+ def _has_unfinished_sequences(
2388
+ self,
2389
+ this_peer_finished: bool,
2390
+ synced_gpus: bool,
2391
+ device: torch.device,
2392
+ cur_len: Optional[int] = None,
2393
+ max_length: Optional[int] = None,
2394
+ ) -> bool:
2395
+ """
2396
+ Returns whether there are still unfinished sequences in the device. The existence of unfinished sequences is
2397
+ fed through `this_peer_finished`. ZeRO stage 3-friendly.
2398
+ """
2399
+ # torch.compile does not support data-dependent control flow. This is a workaround to allow torch.compile,
2400
+ # although we lose the ability to stop when all sequences return an EOS token (and other stopping criteria)
2401
+ # TODO (joao): remove this when torch's support for control flow is not experimental (https://pytorch.org/docs/stable/generated/torch.cond.html)
2402
+ if is_torchdynamo_compiling():
2403
+ return cur_len < max_length
2404
+ else:
2405
+ if synced_gpus:
2406
+ # Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
2407
+ # The following logic allows an early break if all peers finished generating their sequence
2408
+ this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(device)
2409
+ # send 0.0 if we finished, 1.0 otherwise
2410
+ dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
2411
+ # did all peers finish? the reduced sum will be 0.0 then
2412
+ if this_peer_finished_flag.item() == 0.0:
2413
+ return False
2414
+ elif this_peer_finished:
2415
+ return False
2416
+ return True
2417
+
2418
+ def heal_tokens(
2419
+ self, input_ids: torch.LongTensor, tokenizer: Optional["PreTrainedTokenizerBase"] = None
2420
+ ) -> torch.LongTensor:
2421
+ r"""
2422
+ Generates sequences of token ids for models with a language modeling head.
2423
+ Parameters:
2424
+ input_ids (`torch.LongTensor`): The sequence used as a prompt for the generation.
2425
+ tokenizer (`PreTrainedTokenizerBase`, *optional*): The tokenizer used to decode the input ids.
2426
+ Return:
2427
+ `torch.LongTensor` where each sequence has its tail token replaced with its appropriate extension.
2428
+ """
2429
+ if tokenizer is None:
2430
+ raise ValueError(
2431
+ " When generating with token healing, you must pass the model's tokenizer to the `tokenizer` "
2432
+ "argument of `generate`."
2433
+ )
2434
+
2435
+ bos_token_id, pad_token_id = tokenizer.bos_token_id, tokenizer.pad_token_id
2436
+ vocab_trie = ExtensionsTrie(tokenizer.get_vocab())
2437
+ generation_config = GenerationConfig(max_new_tokens=1, pad_token_id=pad_token_id)
2438
+
2439
+ # assumption: leading/trailing whitespace is not meaningful, so the prompts are
2440
+ # stripped before re-tokenizing to desensitize generation to whitespace artefacts
2441
+ prompts = [p.strip() for p in tokenizer.batch_decode(input_ids, skip_special_tokens=True)]
2442
+ input_ids = tokenizer(
2443
+ prompts,
2444
+ return_tensors="pt",
2445
+ padding=True,
2446
+ ).input_ids.to(input_ids.device)
2447
+
2448
+ # replace bos with pad to not condition healing on it
2449
+ input_ids = torch.where(input_ids == bos_token_id, pad_token_id, input_ids)
2450
+
2451
+ """
2452
+ the latter code assumes the input_ids is not empty,
2453
+ input_id has to be checked if contains elements
2454
+ """
2455
+ if input_ids.numel() == 0:
2456
+ return input_ids
2457
+
2458
+ tail_ids = input_ids[:, -1].tolist()
2459
+
2460
+ space_tok = tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids(" "))[0]
2461
+ # tail tokens are used for a prefix search, thus, whitespaces are replaced with
2462
+ # their tokenization (e.g. 'Ġ') to enable search for tokens prefixed with a whitespace
2463
+ tail_toks = (tokenizer.decode(t).replace(" ", space_tok) for t in tail_ids)
2464
+
2465
+ for batch_idx, (tail_id, tail_tok) in enumerate(zip(tail_ids, tail_toks)):
2466
+ batch_ids = input_ids[batch_idx]
2467
+ if torch.all(batch_ids == pad_token_id).item():
2468
+ continue # skip empty sequences (all pad ids)
2469
+
2470
+ # apply bias for alternatives (extensions) to the tail token
2471
+ """
2472
+ seq_bias key has to be tuple with int so have to use
2473
+ tokenizer function to convert str to int
2474
+ """
2475
+ seq_bias = {
2476
+ (tokenizer.convert_tokens_to_ids(alt_tok),): 10.0 for alt_tok in vocab_trie.extensions(prefix=tail_tok)
2477
+ }
2478
+
2479
+ if len(seq_bias) == 1:
2480
+ continue # skip if there are no token alternatives to heal with
2481
+
2482
+ # slightly favor original token to limit aggressive healing e.g. 'http' -> 'https'
2483
+ seq_bias[(tail_id,)] += 1.0
2484
+ generation_config.update(sequence_bias=seq_bias)
2485
+
2486
+ trimmed_ids = batch_ids[:-1]
2487
+
2488
+ """
2489
+ the latter code assumes trimmed_ids is not empty
2490
+ so have to check the its element count
2491
+ """
2492
+ if trimmed_ids.numel() == 0:
2493
+ continue
2494
+
2495
+ # if the prompt is a single (non-pad) token, regenerate from bos
2496
+ if len(batch_ids[batch_ids != pad_token_id]) == 1:
2497
+ trimmed_ids[-1] = bos_token_id
2498
+
2499
+ input_ids[batch_idx] = self.generate(trimmed_ids.unsqueeze(0), generation_config=generation_config)
2500
+
2501
+ return input_ids
2502
+
2503
+ def _dola_decoding(
2504
+ self,
2505
+ input_ids: torch.LongTensor,
2506
+ dola_layers: Union[str, List[int]],
2507
+ logits_processor: LogitsProcessorList,
2508
+ stopping_criteria: StoppingCriteriaList,
2509
+ generation_config: GenerationConfig,
2510
+ synced_gpus: bool,
2511
+ streamer: "BaseStreamer",
2512
+ **model_kwargs,
2513
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
2514
+ r"""
2515
+ Generates sequences of token ids for models with a language modeling head using **dola decoding** and can be
2516
+ used for decoder-only text models.
2517
+ The method is based on the paper "DoLa: Decoding by Contrasting Layers Improves Factuality in Large Language
2518
+ Models" (https://arxiv.org/abs/2309.03883) in ICLR 2024.
2519
+
2520
+ Parameters:
2521
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
2522
+ The sequence used as a prompt for the generation.
2523
+ dola_layers (`Union[str, List[int]]`):
2524
+ The candidate layers used in contrasting layers of DoLa. It can be either 1) 'low' or 'high', which
2525
+ means the lower part or higher part of the model layers, respectively, or 2) a list of layer indices
2526
+ to be used for candidate layers. The 0-th layer is the word embedding layer of the model.
2527
+ logits_processor (`LogitsProcessorList`):
2528
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
2529
+ used to modify the prediction scores of the language modeling head applied at each generation step.
2530
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
2531
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
2532
+ used to tell if the generation loop should stop.
2533
+ generation_config ([`~generation.GenerationConfig`]):
2534
+ The generation configuration to be used as parametrization of the decoding method.
2535
+ synced_gpus (`bool`):
2536
+ Whether to continue running the while loop until max_length (needed to avoid deadlocking with
2537
+ `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
2538
+ streamer (`BaseStreamer`, *optional*):
2539
+ Streamer object that will be used to stream the generated sequences. Generated tokens are passed
2540
+ through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
2541
+ model_kwargs:
2542
+ Additional model specific keyword arguments will be forwarded to the `forward` function of the model.
2543
+ If model is an encoder-decoder model the kwargs should include `encoder_outputs`.
2544
+
2545
+ Return:
2546
+ [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`]
2547
+ or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
2548
+ [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
2549
+ `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if
2550
+ `model.config.is_encoder_decoder=True`.
2551
+ """
2552
+
2553
+ if self.config.is_encoder_decoder:
2554
+ raise ValueError("DoLa decoding is only available for decoder-only models.")
2555
+ # init values
2556
+
2557
+ pad_token_id = generation_config._pad_token_tensor
2558
+ output_attentions = generation_config.output_attentions
2559
+ output_hidden_states = generation_config.output_hidden_states
2560
+ output_scores = generation_config.output_scores
2561
+ output_logits = generation_config.output_logits
2562
+ return_dict_in_generate = generation_config.return_dict_in_generate
2563
+ has_eos_stopping_criteria = any(hasattr(criteria, "eos_token_id") for criteria in stopping_criteria)
2564
+ do_sample = generation_config.do_sample
2565
+
2566
+ # init attention / hidden states / scores tuples
2567
+ scores = () if (return_dict_in_generate and output_scores) else None
2568
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
2569
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
2570
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
2571
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
2572
+
2573
+ # keep track of which sequences are already finished
2574
+ batch_size = input_ids.shape[0]
2575
+ unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
2576
+ model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs)
2577
+
2578
+ this_peer_finished = False
2579
+
2580
+ # prepare layers for DoLa decoding
2581
+ final_layer = self.config.get_text_config().num_hidden_layers
2582
+ # if the model has tied word embeddings, we skip the word embeddings (0-th) layer and start from the 2nd layer,
2583
+ # as the early exit from word embeddings will become identity function
2584
+ # if the model is really shallow (<=2 layers), we use the 1st layer if it's not the final layer and the 0-th
2585
+ # layer otherwise. Notice that DoLa does not help shallow models much.
2586
+ if not self.config.tie_word_embeddings:
2587
+ start_layer = 0
2588
+ elif final_layer > 2:
2589
+ start_layer = 2
2590
+ elif final_layer == 2:
2591
+ start_layer = 1
2592
+ else:
2593
+ start_layer = 0
2594
+
2595
+ # For `N`-layer models with `N <= 40` layers, the layers of `range(0, N // 2, 2)` and `range(N // 2, N, 2)`
2596
+ # are used for `'low'` and `'high'` layers, respectively.
2597
+ # For models with `N > 40` layers, the layers of `range(0, 20, 2)` and `range(N - 20, N, 2)` are used for
2598
+ # `'low'` and `'high'` layers, respectively.
2599
+ if isinstance(dola_layers, str) and dola_layers == "low":
2600
+ if start_layer == final_layer // 2:
2601
+ candidate_premature_layers = [start_layer]
2602
+ else:
2603
+ candidate_premature_layers = (
2604
+ list(range(start_layer, final_layer // 2, 2))
2605
+ if final_layer <= 40
2606
+ else list(range(start_layer, 20, 2))
2607
+ )
2608
+ elif isinstance(dola_layers, str) and dola_layers == "high":
2609
+ candidate_premature_layers = (
2610
+ list(range(final_layer // 2, final_layer, 2))
2611
+ if final_layer <= 40
2612
+ else list(range(final_layer - 20, final_layer, 2))
2613
+ )
2614
+ # Set the `dola_layers` to a list of integers for layer indices to contrast manually specified layers.
2615
+ elif isinstance(dola_layers, list):
2616
+ candidate_premature_layers = [i for i in dola_layers if i < final_layer]
2617
+ else:
2618
+ raise ValueError("dola_layers must be either 'low', 'high' or a list of integers.")
2619
+
2620
+ lm_head = self.get_output_embeddings()
2621
+ if lm_head is None:
2622
+ raise ValueError("DoLa is not supported for models that don't have output embeddings.")
2623
+
2624
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
2625
+ # prepare model inputs
2626
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
2627
+
2628
+ # forward pass to get next token
2629
+ outputs = self(
2630
+ **model_inputs,
2631
+ return_dict=True,
2632
+ output_attentions=output_attentions,
2633
+ output_hidden_states=True,
2634
+ )
2635
+
2636
+ # .float() is needed to retain precision for later logits manipulations
2637
+ final_layer_next_token_logits = outputs.logits[:, -1, :].detach().clone().float()
2638
+ final_logits = outputs.logits[:, -1, :].float()
2639
+ candidate_premature_logits = {}
2640
+ for candidate_premature_layer in candidate_premature_layers:
2641
+ candidate_premature_logits[candidate_premature_layer] = lm_head(
2642
+ outputs.hidden_states[candidate_premature_layer][:, -1, :]
2643
+ ).to(final_logits.device)
2644
+
2645
+ # synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
2646
+ model_kwargs = self._update_model_kwargs_for_generation(
2647
+ outputs,
2648
+ model_kwargs,
2649
+ is_encoder_decoder=self.config.is_encoder_decoder,
2650
+ )
2651
+ if synced_gpus and this_peer_finished:
2652
+ continue
2653
+
2654
+ next_token_logits = _dola_select_contrast(
2655
+ candidate_premature_layers, candidate_premature_logits, final_logits
2656
+ )
2657
+ next_token_logits = next_token_logits.to(input_ids.device)
2658
+ # pre-process distribution
2659
+ next_token_scores = logits_processor(input_ids, next_token_logits)
2660
+
2661
+ # Store scores, attentions and hidden_states when required
2662
+ if return_dict_in_generate:
2663
+ if output_scores:
2664
+ scores += (next_token_scores,)
2665
+ if output_logits:
2666
+ raw_logits += (final_layer_next_token_logits,)
2667
+ if output_attentions:
2668
+ decoder_attentions += (
2669
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
2670
+ )
2671
+ if self.config.is_encoder_decoder:
2672
+ cross_attentions += (outputs.cross_attentions,)
2673
+
2674
+ if output_hidden_states:
2675
+ decoder_hidden_states += (
2676
+ (outputs.decoder_hidden_states,)
2677
+ if self.config.is_encoder_decoder
2678
+ else (outputs.hidden_states,)
2679
+ )
2680
+
2681
+ if do_sample: # sample
2682
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
2683
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
2684
+ else: # argmax
2685
+ next_tokens = torch.argmax(next_token_scores, dim=-1)
2686
+
2687
+ # finished sentences should have their next token be a padding token
2688
+ if has_eos_stopping_criteria:
2689
+ next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
2690
+
2691
+ # update generated ids, model inputs, and length for next step
2692
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
2693
+ if streamer is not None:
2694
+ streamer.put(next_tokens.cpu())
2695
+
2696
+ # stop when each sentence is finished
2697
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores)
2698
+ this_peer_finished = unfinished_sequences.max() == 0
2699
+
2700
+ if streamer is not None:
2701
+ streamer.end()
2702
+
2703
+ if return_dict_in_generate:
2704
+ return GenerateDecoderOnlyOutput(
2705
+ sequences=input_ids,
2706
+ scores=scores,
2707
+ logits=raw_logits,
2708
+ attentions=decoder_attentions,
2709
+ hidden_states=decoder_hidden_states,
2710
+ past_key_values=model_kwargs.get("past_key_values"),
2711
+ )
2712
+ else:
2713
+ return input_ids
2714
+
2715
+ @torch.no_grad()
2716
+ def _contrastive_search(
2717
+ self,
2718
+ input_ids: torch.LongTensor,
2719
+ logits_processor: LogitsProcessorList,
2720
+ stopping_criteria: StoppingCriteriaList,
2721
+ generation_config: GenerationConfig,
2722
+ synced_gpus: bool,
2723
+ streamer: Optional["BaseStreamer"],
2724
+ **model_kwargs,
2725
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
2726
+ r"""
2727
+ Generates sequences of token ids for models with a language modeling head using **contrastive search** and can
2728
+ be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
2729
+
2730
+ Parameters:
2731
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
2732
+ The sequence used as a prompt for the generation.
2733
+ logits_processor (`LogitsProcessorList`):
2734
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
2735
+ used to modify the prediction scores of the language modeling head applied at each generation step.
2736
+ stopping_criteria (`StoppingCriteriaList`):
2737
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
2738
+ used to tell if the generation loop should stop.
2739
+ generation_config ([`~generation.GenerationConfig`]):
2740
+ The generation configuration to be used as parametrization of the decoding method.
2741
+ synced_gpus (`bool`):
2742
+ Whether to continue running the while loop until max_length (needed to avoid deadlocking with
2743
+ `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
2744
+ streamer (`BaseStreamer`, *optional*):
2745
+ Streamer object that will be used to stream the generated sequences. Generated tokens are passed
2746
+ through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
2747
+ model_kwargs:
2748
+ Additional model specific keyword arguments will be forwarded to the `forward` function of the model.
2749
+ If model is an encoder-decoder model the kwargs should include `encoder_outputs`.
2750
+
2751
+ Return:
2752
+ [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`]
2753
+ or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
2754
+ [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
2755
+ `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if
2756
+ `model.config.is_encoder_decoder=True`.
2757
+ """
2758
+ # init values
2759
+ has_eos_stopping_criteria = any(hasattr(criteria, "eos_token_id") for criteria in stopping_criteria)
2760
+ top_k = generation_config.top_k
2761
+ penalty_alpha = generation_config.penalty_alpha
2762
+ pad_token_id = generation_config._pad_token_tensor
2763
+ output_attentions = generation_config.output_attentions
2764
+ output_hidden_states = generation_config.output_hidden_states
2765
+ output_scores = generation_config.output_scores
2766
+ output_logits = generation_config.output_logits
2767
+ return_dict_in_generate = generation_config.return_dict_in_generate
2768
+ sequential = generation_config.low_memory
2769
+
2770
+ # init attention / hidden states / scores tuples
2771
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
2772
+ scores = () if (return_dict_in_generate and output_scores) else None
2773
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
2774
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
2775
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
2776
+
2777
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
2778
+ if return_dict_in_generate and self.config.is_encoder_decoder:
2779
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
2780
+ encoder_hidden_states = (
2781
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
2782
+ )
2783
+
2784
+ # keep track of which sequences are already finished
2785
+ batch_size = input_ids.shape[0]
2786
+ unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
2787
+ model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs)
2788
+
2789
+ # Create cosine_matrix_mask based on the attention_mask
2790
+ cosine_matrix_mask = torch.ones_like(input_ids, dtype=torch.long)
2791
+ if self.config.is_encoder_decoder:
2792
+ if "decoder_attention_mask" in model_kwargs and model_kwargs["decoder_attention_mask"] is not None:
2793
+ cosine_matrix_mask = model_kwargs["decoder_attention_mask"]
2794
+ else:
2795
+ cosine_matrix_mask = model_kwargs["attention_mask"]
2796
+ cosine_matrix_mask = cosine_matrix_mask.repeat_interleave(top_k, dim=0)
2797
+
2798
+ this_peer_finished = False
2799
+
2800
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
2801
+ # if the first step in the loop, encode all the prefix and obtain: (1) past_key_values;
2802
+ # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step
2803
+ if model_kwargs.get("past_key_values") is None or (
2804
+ isinstance(model_kwargs["past_key_values"], (Cache, EncoderDecoderCache))
2805
+ and model_kwargs["past_key_values"].get_seq_length() == 0
2806
+ ):
2807
+ # prepare inputs
2808
+ model_kwargs["use_cache"] = True
2809
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
2810
+
2811
+ # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save
2812
+ # the `encoder_outputs`
2813
+ outputs = self(
2814
+ **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions
2815
+ )
2816
+
2817
+ # last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with
2818
+ # previous tokens)
2819
+ if self.config.is_encoder_decoder:
2820
+ last_hidden_states = outputs.decoder_hidden_states[-1]
2821
+ else:
2822
+ last_hidden_states = outputs.hidden_states[-1]
2823
+
2824
+ # next logit for contrastive search to select top-k candidate tokens
2825
+ # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for this first iteration
2826
+ # (the clone itself is always small)
2827
+ # .float() is needed to retain precision for later logits manipulations
2828
+ logit_for_next_step = outputs.logits[:, -1, :].clone().float()
2829
+ logit_for_next_step = logit_for_next_step.to(input_ids.device)
2830
+
2831
+ model_kwargs = self._update_model_kwargs_for_generation(
2832
+ outputs,
2833
+ model_kwargs,
2834
+ is_encoder_decoder=self.config.is_encoder_decoder,
2835
+ )
2836
+
2837
+ if not sequential:
2838
+ # Expands model inputs top_k times, for batched forward passes (akin to beam search).
2839
+ _, model_kwargs = self._expand_inputs_for_generation(
2840
+ expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs
2841
+ )
2842
+
2843
+ past_key_values = model_kwargs.get("past_key_values")
2844
+ if past_key_values is None:
2845
+ raise ValueError(
2846
+ f"{self.__class__.__name__} does not support caching and therefore **can't** be used "
2847
+ "for contrastive search."
2848
+ )
2849
+ elif (
2850
+ not isinstance(past_key_values[0], (tuple, torch.Tensor))
2851
+ or past_key_values[0][0].shape[0] != batch_size
2852
+ ):
2853
+ raise ValueError(
2854
+ f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be "
2855
+ "used for contrastive search without further modifications."
2856
+ )
2857
+
2858
+ # contrastive_search main logic start:
2859
+ # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by
2860
+ # degeneration penalty
2861
+ processed_logit_for_next_step = logits_processor(input_ids, logit_for_next_step)
2862
+ next_probs = nn.functional.softmax(processed_logit_for_next_step, dim=-1)
2863
+
2864
+ top_k_probs, top_k_ids = torch.topk(next_probs, dim=-1, k=top_k)
2865
+
2866
+ # Store scores, attentions and hidden_states when required
2867
+ if return_dict_in_generate:
2868
+ if output_logits:
2869
+ raw_logits += (logit_for_next_step,)
2870
+ if output_scores:
2871
+ scores += (processed_logit_for_next_step,)
2872
+ if output_attentions:
2873
+ decoder_attentions += (
2874
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
2875
+ )
2876
+ if self.config.is_encoder_decoder:
2877
+ cross_attentions += (outputs.cross_attentions,)
2878
+
2879
+ if output_hidden_states:
2880
+ decoder_hidden_states += (
2881
+ (outputs.decoder_hidden_states,)
2882
+ if self.config.is_encoder_decoder
2883
+ else (outputs.hidden_states,)
2884
+ )
2885
+
2886
+ # This is needed to properly delete outputs.logits which may be very large for this first iteration
2887
+ # Otherwise a reference to outputs.logits is kept all along until after the next call to self.forward()
2888
+ del outputs
2889
+
2890
+ if not sequential:
2891
+ # Replicates the new past_key_values to match the `top_k` candidates
2892
+ past = model_kwargs["past_key_values"]
2893
+ # If it is a static cache, modify it in-place layer after layer to save memory
2894
+ if isinstance(past, DynamicCache) or (
2895
+ isinstance(past, EncoderDecoderCache) and isinstance(past.self_attention_cache, DynamicCache)
2896
+ ):
2897
+ past.batch_repeat_interleave(top_k)
2898
+ else:
2899
+ new_key_values = []
2900
+ for layer in past:
2901
+ items = []
2902
+ # item is either the key or the value matrix
2903
+ for item in layer:
2904
+ items.append(item.repeat_interleave(top_k, dim=0))
2905
+ new_key_values.append(tuple(items))
2906
+
2907
+ past = tuple(new_key_values)
2908
+
2909
+ model_kwargs["past_key_values"] = past
2910
+
2911
+ if sequential:
2912
+ all_outputs = []
2913
+ for i in range(top_k):
2914
+ # compute the candidate tokens by the language model and collect their hidden_states
2915
+ next_model_inputs = self.prepare_inputs_for_generation(top_k_ids[:, i].view(-1, 1), **model_kwargs)
2916
+
2917
+ outputs = self(
2918
+ **next_model_inputs,
2919
+ return_dict=True,
2920
+ output_hidden_states=True,
2921
+ output_attentions=output_attentions,
2922
+ )
2923
+ if isinstance(outputs["past_key_values"], DynamicCache) or (
2924
+ isinstance(outputs["past_key_values"], EncoderDecoderCache)
2925
+ and isinstance(outputs["past_key_values"].self_attention_cache, DynamicCache)
2926
+ ):
2927
+ # Remove past K-V from output since we don't need to stack later
2928
+ outputs["past_key_values"] = None
2929
+ # Remove last token from past K-V since we don't want to append it at this point
2930
+ model_kwargs["past_key_values"].crop(-1)
2931
+
2932
+ all_outputs.append(outputs)
2933
+ outputs = stack_model_outputs(all_outputs, self.config.get_text_config())
2934
+
2935
+ else:
2936
+ # compute the candidate tokens by the language model and collect their hidden_states
2937
+ # assembles top_k_ids into batch of size k
2938
+ next_model_inputs = self.prepare_inputs_for_generation(top_k_ids.view(-1, 1), **model_kwargs)
2939
+
2940
+ outputs = self(
2941
+ **next_model_inputs,
2942
+ return_dict=True,
2943
+ output_hidden_states=True,
2944
+ output_attentions=output_attentions,
2945
+ )
2946
+
2947
+ # This is essential to avoid having a last reference to the big past K-V and double the necessary memory
2948
+ # in the next loop
2949
+ del next_model_inputs
2950
+
2951
+ # name is different for encoder-decoder and decoder-only models
2952
+ if self.config.is_encoder_decoder:
2953
+ next_hidden = outputs.decoder_hidden_states[-1]
2954
+ full_hidden_states = outputs.decoder_hidden_states
2955
+ else:
2956
+ next_hidden = outputs.hidden_states[-1]
2957
+ full_hidden_states = outputs.hidden_states
2958
+
2959
+ # .float() is needed to retain precision for later logits manipulations
2960
+ logits = outputs.logits[:, -1, :].float()
2961
+ context_hidden = last_hidden_states.repeat_interleave(top_k, dim=0)
2962
+
2963
+ # compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the
2964
+ # model confidence. Keeping `selected_idx` on CPU enables multi-device contrastive search and doesn't
2965
+ # introduce (noticeable) slowdowns on single-device runs.
2966
+ selected_idx = _ranking_fast(
2967
+ context_hidden, next_hidden, top_k_probs, cosine_matrix_mask, penalty_alpha, top_k
2968
+ )
2969
+ cosine_matrix_mask = torch.cat(
2970
+ [cosine_matrix_mask, cosine_matrix_mask.new_ones((cosine_matrix_mask.shape[0], 1))], dim=-1
2971
+ )
2972
+ selected_idx = selected_idx.to("cpu")
2973
+
2974
+ # This will be used instead of the previous inneficient torch.stack(torch.split())
2975
+ augmented_idx = torch.tensor([x + i * top_k for i, x in enumerate(selected_idx)])
2976
+
2977
+ # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing
2978
+ # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores
2979
+ # (model confidence minus degeneration penalty); (6) decoder hidden_states
2980
+ next_tokens = top_k_ids[range(len(top_k_ids)), selected_idx]
2981
+ next_hidden = torch.stack(torch.split(next_hidden.squeeze(dim=1), top_k))
2982
+ next_hidden = next_hidden[range(batch_size), selected_idx, :]
2983
+ last_hidden_states = torch.cat([last_hidden_states, next_hidden.unsqueeze(1)], dim=1)
2984
+
2985
+ next_decoder_hidden_states = ()
2986
+ for layer in full_hidden_states:
2987
+ layer = torch.stack(torch.split(layer, top_k))[range(batch_size), selected_idx, :]
2988
+ next_decoder_hidden_states += (layer,)
2989
+
2990
+ # generate past_key_values cache of only the selected token
2991
+ if sequential:
2992
+ next_model_input = self.prepare_inputs_for_generation(
2993
+ top_k_ids[:, selected_idx].view(-1, 1), **model_kwargs
2994
+ )
2995
+
2996
+ selected_outputs = self(
2997
+ **next_model_input,
2998
+ return_dict=True,
2999
+ output_hidden_states=False,
3000
+ output_attentions=False,
3001
+ )
3002
+ next_past_key_values = selected_outputs["past_key_values"]
3003
+
3004
+ else:
3005
+ _, next_past_key_values = self._extract_past_from_model_output(outputs)
3006
+ # Do it in-place layer per layer to save memory
3007
+ if isinstance(next_past_key_values, DynamicCache) or (
3008
+ isinstance(next_past_key_values, EncoderDecoderCache)
3009
+ and isinstance(next_past_key_values.self_attention_cache, DynamicCache)
3010
+ ):
3011
+ next_past_key_values.batch_select_indices(augmented_idx)
3012
+ else:
3013
+ new_key_values = []
3014
+ for layer in next_past_key_values:
3015
+ items = []
3016
+ # item is either the key or the value matrix
3017
+ for item in layer:
3018
+ items.append(item[augmented_idx, ...])
3019
+ new_key_values.append(tuple(items))
3020
+
3021
+ next_past_key_values = tuple(new_key_values)
3022
+
3023
+ logit_for_next_step = torch.stack(torch.split(logits, top_k))[range(batch_size), selected_idx, :]
3024
+ logit_for_next_step = logit_for_next_step.to(input_ids.device)
3025
+
3026
+ # Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration
3027
+ if self.config.is_encoder_decoder:
3028
+ next_step_cross_attentions = ()
3029
+ next_step_decoder_attentions = ()
3030
+ if output_attentions:
3031
+ for layer in outputs.cross_attentions:
3032
+ layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...]
3033
+ next_step_cross_attentions += (layer,)
3034
+ for layer in outputs.decoder_attentions:
3035
+ layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...]
3036
+ next_step_decoder_attentions += (layer,)
3037
+ outputs = Seq2SeqLMOutput(
3038
+ past_key_values=next_past_key_values,
3039
+ decoder_hidden_states=next_decoder_hidden_states,
3040
+ decoder_attentions=next_step_decoder_attentions or None,
3041
+ cross_attentions=next_step_cross_attentions or None,
3042
+ )
3043
+ else:
3044
+ next_step_attentions = ()
3045
+ if output_attentions:
3046
+ for layer in outputs.attentions:
3047
+ layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...]
3048
+ next_step_attentions += (layer,)
3049
+ outputs = CausalLMOutputWithPast(
3050
+ past_key_values=next_past_key_values,
3051
+ hidden_states=next_decoder_hidden_states,
3052
+ attentions=next_step_attentions or None,
3053
+ )
3054
+ # contrastive_search main logic end
3055
+
3056
+ # synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
3057
+ model_kwargs = self._update_model_kwargs_for_generation(
3058
+ outputs,
3059
+ model_kwargs,
3060
+ is_encoder_decoder=self.config.is_encoder_decoder,
3061
+ )
3062
+ if synced_gpus and this_peer_finished:
3063
+ continue
3064
+
3065
+ # finished sentences should have their next token be a padding token
3066
+ if has_eos_stopping_criteria:
3067
+ next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
3068
+
3069
+ # update generated ids, model inputs, and length for next step
3070
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
3071
+ if streamer is not None:
3072
+ streamer.put(next_tokens.cpu())
3073
+
3074
+ # stop when each sentence is finished
3075
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores)
3076
+ this_peer_finished = unfinished_sequences.max() == 0
3077
+
3078
+ if streamer is not None:
3079
+ streamer.end()
3080
+
3081
+ if return_dict_in_generate:
3082
+ # Contrastive search works by forward looking at the next token, so we need to exclude it from
3083
+ # `past_key_values` to be consistent with the other decoding methods
3084
+ if model_kwargs.get("past_key_values") is not None:
3085
+ if isinstance(model_kwargs["past_key_values"], DynamicCache) or (
3086
+ isinstance(model_kwargs["past_key_values"], EncoderDecoderCache)
3087
+ and isinstance(model_kwargs["past_key_values"].self_attention_cache, DynamicCache)
3088
+ ):
3089
+ model_kwargs["past_key_values"].crop(-1)
3090
+ else:
3091
+ past_key_values = []
3092
+ for layer in model_kwargs["past_key_values"]:
3093
+ layer_past_key_values = []
3094
+ for item in layer:
3095
+ layer_past_key_values.append(item[..., :-1, :])
3096
+ past_key_values.append(tuple(layer_past_key_values))
3097
+ model_kwargs["past_key_values"] = tuple(past_key_values)
3098
+
3099
+ if self.config.is_encoder_decoder:
3100
+ return GenerateEncoderDecoderOutput(
3101
+ sequences=input_ids,
3102
+ scores=scores,
3103
+ logits=raw_logits,
3104
+ encoder_attentions=encoder_attentions,
3105
+ encoder_hidden_states=encoder_hidden_states,
3106
+ decoder_attentions=decoder_attentions,
3107
+ cross_attentions=cross_attentions,
3108
+ decoder_hidden_states=decoder_hidden_states,
3109
+ past_key_values=model_kwargs.get("past_key_values"),
3110
+ )
3111
+ else:
3112
+ return GenerateDecoderOnlyOutput(
3113
+ sequences=input_ids,
3114
+ scores=scores,
3115
+ logits=raw_logits,
3116
+ attentions=decoder_attentions,
3117
+ hidden_states=decoder_hidden_states,
3118
+ past_key_values=model_kwargs.get("past_key_values"),
3119
+ )
3120
+ else:
3121
+ return input_ids
3122
+
3123
+ def _sample(
3124
+ self,
3125
+ input_ids: torch.LongTensor,
3126
+ logits_processor: LogitsProcessorList,
3127
+ stopping_criteria: StoppingCriteriaList,
3128
+ generation_config: GenerationConfig,
3129
+ synced_gpus: bool,
3130
+ streamer: Optional["BaseStreamer"],
3131
+ **model_kwargs,
3132
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
3133
+ r"""
3134
+ Generates sequences of token ids for models with a language modeling head using **multinomial sampling** and
3135
+ can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
3136
+
3137
+ Parameters:
3138
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
3139
+ The sequence used as a prompt for the generation.
3140
+ logits_processor (`LogitsProcessorList`):
3141
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
3142
+ used to modify the prediction scores of the language modeling head applied at each generation step.
3143
+ stopping_criteria (`StoppingCriteriaList`):
3144
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
3145
+ used to tell if the generation loop should stop.
3146
+ generation_config ([`~generation.GenerationConfig`]):
3147
+ The generation configuration to be used as parametrization of the decoding method.
3148
+ synced_gpus (`bool`):
3149
+ Whether to continue running the while loop until max_length (needed to avoid deadlocking with
3150
+ `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
3151
+ streamer (`BaseStreamer`, *optional*):
3152
+ Streamer object that will be used to stream the generated sequences. Generated tokens are passed
3153
+ through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
3154
+ model_kwargs:
3155
+ Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is
3156
+ an encoder-decoder model the kwargs should include `encoder_outputs`.
3157
+
3158
+ Return:
3159
+ [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`] or `torch.LongTensor`:
3160
+ A `torch.LongTensor` containing the generated tokens (default behaviour) or a
3161
+ [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
3162
+ `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if
3163
+ `model.config.is_encoder_decoder=True`.
3164
+ """
3165
+ # init values
3166
+ pad_token_id = generation_config._pad_token_tensor
3167
+ output_attentions = generation_config.output_attentions
3168
+ output_hidden_states = generation_config.output_hidden_states
3169
+ output_scores = generation_config.output_scores
3170
+ output_logits = generation_config.output_logits
3171
+ return_dict_in_generate = generation_config.return_dict_in_generate
3172
+ max_length = generation_config.max_length
3173
+ has_eos_stopping_criteria = any(hasattr(criteria, "eos_token_id") for criteria in stopping_criteria)
3174
+ do_sample = generation_config.do_sample
3175
+
3176
+ # init attention / hidden states / scores tuples
3177
+ scores = () if (return_dict_in_generate and output_scores) else None
3178
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
3179
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
3180
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
3181
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
3182
+
3183
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
3184
+ if return_dict_in_generate and self.config.is_encoder_decoder:
3185
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
3186
+ encoder_hidden_states = (
3187
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
3188
+ )
3189
+
3190
+ # keep track of which sequences are already finished
3191
+ batch_size, cur_len = input_ids.shape
3192
+ this_peer_finished = False
3193
+ unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
3194
+ model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs)
3195
+
3196
+ while self._has_unfinished_sequences(
3197
+ this_peer_finished, synced_gpus, device=input_ids.device, cur_len=cur_len, max_length=max_length
3198
+ ):
3199
+ # prepare model inputs
3200
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
3201
+
3202
+ # prepare variable output controls (note: some models won't accept all output controls)
3203
+ model_inputs.update({"output_attentions": output_attentions} if output_attentions else {})
3204
+ model_inputs.update({"output_hidden_states": output_hidden_states} if output_hidden_states else {})
3205
+
3206
+ # forward pass to get next token
3207
+ outputs = self(**model_inputs, return_dict=True)
3208
+
3209
+ # synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
3210
+ model_kwargs = self._update_model_kwargs_for_generation(
3211
+ outputs,
3212
+ model_kwargs,
3213
+ is_encoder_decoder=self.config.is_encoder_decoder,
3214
+ )
3215
+ if synced_gpus and this_peer_finished:
3216
+ continue
3217
+
3218
+ # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
3219
+ # (the clone itself is always small)
3220
+ next_token_logits = outputs.logits.clone()[:, -1, :].float()
3221
+ next_token_logits = next_token_logits.to(input_ids.device)
3222
+
3223
+ # pre-process distribution
3224
+ next_token_scores = logits_processor(input_ids, next_token_logits)
3225
+
3226
+ # Store scores, attentions and hidden_states when required
3227
+ if return_dict_in_generate:
3228
+ if output_scores:
3229
+ scores += (next_token_scores,)
3230
+ if output_logits:
3231
+ raw_logits += (next_token_logits,)
3232
+ if output_attentions:
3233
+ decoder_attentions += (
3234
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
3235
+ )
3236
+ if self.config.is_encoder_decoder:
3237
+ cross_attentions += (outputs.cross_attentions,)
3238
+
3239
+ if output_hidden_states:
3240
+ decoder_hidden_states += (
3241
+ (outputs.decoder_hidden_states,)
3242
+ if self.config.is_encoder_decoder
3243
+ else (outputs.hidden_states,)
3244
+ )
3245
+
3246
+ # token selection
3247
+ if do_sample:
3248
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
3249
+ # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
3250
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
3251
+ else:
3252
+ next_tokens = torch.argmax(next_token_scores, dim=-1)
3253
+
3254
+ # finished sentences should have their next token be a padding token
3255
+ if has_eos_stopping_criteria:
3256
+ next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
3257
+
3258
+ # update generated ids, model inputs, and length for next step
3259
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
3260
+ if streamer is not None:
3261
+ streamer.put(next_tokens.cpu())
3262
+
3263
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores)
3264
+ this_peer_finished = unfinished_sequences.max() == 0
3265
+ cur_len += 1
3266
+
3267
+ # This is needed to properly delete outputs.logits which may be very large for first iteration
3268
+ # Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
3269
+ del outputs
3270
+
3271
+ if streamer is not None:
3272
+ streamer.end()
3273
+
3274
+ if return_dict_in_generate:
3275
+ if self.config.is_encoder_decoder:
3276
+ return GenerateEncoderDecoderOutput(
3277
+ sequences=input_ids,
3278
+ scores=scores,
3279
+ logits=raw_logits,
3280
+ encoder_attentions=encoder_attentions,
3281
+ encoder_hidden_states=encoder_hidden_states,
3282
+ decoder_attentions=decoder_attentions,
3283
+ cross_attentions=cross_attentions,
3284
+ decoder_hidden_states=decoder_hidden_states,
3285
+ past_key_values=model_kwargs.get("past_key_values"),
3286
+ )
3287
+ else:
3288
+ return GenerateDecoderOnlyOutput(
3289
+ sequences=input_ids,
3290
+ scores=scores,
3291
+ logits=raw_logits,
3292
+ attentions=decoder_attentions,
3293
+ hidden_states=decoder_hidden_states,
3294
+ past_key_values=model_kwargs.get("past_key_values"),
3295
+ )
3296
+ else:
3297
+ return input_ids
3298
+
3299
+ def _temporary_reorder_cache(self, past_key_values, beam_idx):
3300
+ """
3301
+ Temporary function to handle the different types of cache reordering processes while we roll out `Cache`.
3302
+
3303
+ TODO: standardize cache formats and make all models compatible with `Cache`. It would remove the need
3304
+ for this function, with `Cache.reorder_cache` being the sole remaining code path
3305
+ """
3306
+ model_class = self.__class__.__name__.lower()
3307
+ # Exception 1: code path for models using the legacy cache format
3308
+ if isinstance(past_key_values, (tuple, list)):
3309
+ past_key_values = self._reorder_cache(past_key_values, beam_idx)
3310
+ # Exception 2: models with different cache formats. These are limited to `DynamicCache` until their
3311
+ # cache format is standardized, to avoid adding complexity to the codebase.
3312
+ elif "gptbigcode" in model_class:
3313
+ if not isinstance(past_key_values, (DynamicCache, EncoderDecoderCache)):
3314
+ raise ValueError(
3315
+ f"Using an unsupported cache format with {model_class}. Currently, it only supports the "
3316
+ "legacy tuple format or `DynamicCache`"
3317
+ )
3318
+ past_key_values = self._reorder_cache(past_key_values, beam_idx)
3319
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
3320
+ # Standard code path: use the `Cache.reorder_cache`
3321
+ else:
3322
+ past_key_values.reorder_cache(beam_idx)
3323
+ return past_key_values
3324
+
3325
+ def _beam_search(
3326
+ self,
3327
+ input_ids: torch.LongTensor,
3328
+ beam_scorer: BeamScorer,
3329
+ logits_processor: LogitsProcessorList,
3330
+ stopping_criteria: StoppingCriteriaList,
3331
+ generation_config: GenerationConfig,
3332
+ synced_gpus: bool,
3333
+ **model_kwargs,
3334
+ ) -> Union[GenerateBeamOutput, torch.LongTensor]:
3335
+ r"""
3336
+ Generates sequences of token ids for models with a language modeling head using **beam search decoding** and
3337
+ can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
3338
+
3339
+ Parameters:
3340
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
3341
+ The sequence used as a prompt for the generation.
3342
+ beam_scorer (`BeamScorer`):
3343
+ An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and
3344
+ sorted during generation. For more information, the documentation of [`BeamScorer`] should be read.
3345
+ logits_processor (`LogitsProcessorList`):
3346
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
3347
+ used to modify the prediction scores of the language modeling head applied at each generation step.
3348
+ stopping_criteria (`StoppingCriteriaList`:
3349
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
3350
+ used to tell if the generation loop should stop.
3351
+ generation_config ([`~generation.GenerationConfig`]):
3352
+ The generation configuration to be used as parametrization of the decoding method.
3353
+ synced_gpus (`bool`):
3354
+ Whether to continue running the while loop until max_length (needed to avoid deadlocking with
3355
+ `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
3356
+ model_kwargs:
3357
+ Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is
3358
+ an encoder-decoder model the kwargs should include `encoder_outputs`.
3359
+
3360
+ Return:
3361
+ [`generation.GenerateBeamDecoderOnlyOutput`], [`~generation.GenerateBeamEncoderDecoderOutput`] or
3362
+ `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
3363
+ [`~generation.GenerateBeamDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
3364
+ `return_dict_in_generate=True` or a [`~generation.GenerateBeamEncoderDecoderOutput`] if
3365
+ `model.config.is_encoder_decoder=True`.
3366
+ """
3367
+ # init values
3368
+ pad_token_id = generation_config._pad_token_tensor
3369
+ eos_token_id = generation_config._eos_token_tensor
3370
+ output_attentions = generation_config.output_attentions
3371
+ output_hidden_states = generation_config.output_hidden_states
3372
+ output_scores = generation_config.output_scores
3373
+ output_logits = generation_config.output_logits
3374
+ return_dict_in_generate = generation_config.return_dict_in_generate
3375
+ sequential = generation_config.low_memory
3376
+ do_sample = generation_config.do_sample
3377
+
3378
+ batch_size = len(beam_scorer._beam_hyps)
3379
+ num_beams = beam_scorer.num_beams
3380
+
3381
+ batch_beam_size, cur_len = input_ids.shape
3382
+ model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs)
3383
+
3384
+ if num_beams * batch_size != batch_beam_size:
3385
+ raise ValueError(
3386
+ f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
3387
+ )
3388
+
3389
+ # init attention / hidden states / scores tuples
3390
+ scores = () if (return_dict_in_generate and output_scores) else None
3391
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
3392
+ beam_indices = (
3393
+ tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None
3394
+ )
3395
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
3396
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
3397
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
3398
+
3399
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
3400
+ if return_dict_in_generate and self.config.is_encoder_decoder:
3401
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
3402
+ encoder_hidden_states = (
3403
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
3404
+ )
3405
+
3406
+ # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens
3407
+ # of the first beam are considered to avoid sampling the exact same tokens across all beams.
3408
+ beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
3409
+ beam_scores[:, 1:] = -1e9
3410
+ beam_scores = beam_scores.view((batch_size * num_beams,))
3411
+
3412
+ this_peer_finished = False
3413
+
3414
+ decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder
3415
+
3416
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
3417
+
3418
+ # print("model_kwargs: ", model_kwargs)
3419
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
3420
+
3421
+ # prepare variable output controls (note: some models won't accept all output controls)
3422
+ model_inputs.update({"output_attentions": output_attentions} if output_attentions else {})
3423
+ model_inputs.update({"output_hidden_states": output_hidden_states} if output_hidden_states else {})
3424
+
3425
+ # if sequential is True, split the input to batches of batch_size and run sequentially
3426
+ if sequential:
3427
+ if any(
3428
+ model_name in self.__class__.__name__.lower()
3429
+ for model_name in [
3430
+ "fsmt",
3431
+ "reformer",
3432
+ "ctrl",
3433
+ "gpt_bigcode",
3434
+ "transo_xl",
3435
+ "xlnet",
3436
+ "cpm",
3437
+ "jamba",
3438
+ ]
3439
+ ):
3440
+ raise RuntimeError(
3441
+ f"Currently generation for {self.__class__.__name__} is not supported "
3442
+ f"for `low_memory beam_search`. Please open an issue on GitHub if you need this feature."
3443
+ )
3444
+
3445
+ inputs_per_sub_batches = _split_model_inputs(
3446
+ model_inputs,
3447
+ split_size=batch_size,
3448
+ full_batch_size=batch_beam_size,
3449
+ config=self.config.get_text_config(),
3450
+ )
3451
+ outputs_per_sub_batch = [
3452
+ self(**inputs_per_sub_batch, return_dict=True) for inputs_per_sub_batch in inputs_per_sub_batches
3453
+ ]
3454
+
3455
+ outputs = stack_model_outputs(outputs_per_sub_batch, self.config.get_text_config())
3456
+
3457
+ else: # Unchanged original behavior
3458
+ outputs = self(**model_inputs, return_dict=True)
3459
+
3460
+ # synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
3461
+ model_kwargs = self._update_model_kwargs_for_generation(
3462
+ outputs,
3463
+ model_kwargs,
3464
+ is_encoder_decoder=self.config.is_encoder_decoder,
3465
+ )
3466
+ if synced_gpus and this_peer_finished:
3467
+ cur_len = cur_len + 1
3468
+ continue
3469
+
3470
+ # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
3471
+ # (the clone itself is always small)
3472
+ # .float() is needed to retain precision for later logits manipulations
3473
+ next_token_logits = outputs.logits[:, -1, :].clone().float()
3474
+ next_token_logits = next_token_logits.to(input_ids.device)
3475
+ next_token_scores = nn.functional.log_softmax(
3476
+ next_token_logits, dim=-1
3477
+ ) # (batch_size * num_beams, vocab_size)
3478
+
3479
+ next_token_scores_processed = logits_processor(input_ids, next_token_scores)
3480
+ next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(
3481
+ next_token_scores_processed
3482
+ )
3483
+
3484
+ # Store scores, attentions and hidden_states when required
3485
+ if return_dict_in_generate:
3486
+ if output_scores:
3487
+ scores += (next_token_scores_processed,)
3488
+ if output_logits:
3489
+ raw_logits += (next_token_logits,)
3490
+ if output_attentions:
3491
+ decoder_attentions += (
3492
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
3493
+ )
3494
+ if self.config.is_encoder_decoder:
3495
+ cross_attentions += (outputs.cross_attentions,)
3496
+ if output_hidden_states:
3497
+ decoder_hidden_states += (
3498
+ (outputs.decoder_hidden_states,)
3499
+ if self.config.is_encoder_decoder
3500
+ else (outputs.hidden_states,)
3501
+ )
3502
+
3503
+ # reshape for beam search
3504
+ vocab_size = next_token_scores.shape[-1]
3505
+ next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
3506
+
3507
+ # Beam token selection: pick 1 + eos_token_id.shape[0] next tokens for each beam so we have at least 1
3508
+ # non eos token per beam.
3509
+ n_eos_tokens = eos_token_id.shape[0] if eos_token_id is not None else 0
3510
+ n_tokens_to_keep = max(2, 1 + n_eos_tokens) * num_beams
3511
+ if do_sample:
3512
+ # import time
3513
+ # start = time.time()
3514
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
3515
+ next_tokens = torch.multinomial(probs, num_samples=n_tokens_to_keep)
3516
+ next_token_scores = torch.gather(next_token_scores, -1, next_tokens)
3517
+ next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1)
3518
+ next_tokens = torch.gather(next_tokens, -1, _indices)
3519
+ # print("*"*20, probs.shape, n_tokens_to_keep, next_token_scores.shape, next_tokens.shape)
3520
+ # print("*"*20, time.time() - start)
3521
+ else:
3522
+ next_token_scores, next_tokens = torch.topk(
3523
+ next_token_scores, n_tokens_to_keep, dim=1, largest=True, sorted=True
3524
+ )
3525
+
3526
+ next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor")
3527
+ next_tokens = next_tokens % vocab_size
3528
+
3529
+ # stateless
3530
+ beam_outputs = beam_scorer.process(
3531
+ input_ids,
3532
+ next_token_scores,
3533
+ next_tokens,
3534
+ next_indices,
3535
+ pad_token_id=pad_token_id,
3536
+ eos_token_id=eos_token_id,
3537
+ beam_indices=beam_indices,
3538
+ decoder_prompt_len=decoder_prompt_len,
3539
+ )
3540
+
3541
+ beam_scores = beam_outputs["next_beam_scores"]
3542
+ beam_next_tokens = beam_outputs["next_beam_tokens"]
3543
+ beam_idx = beam_outputs["next_beam_indices"]
3544
+
3545
+ input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
3546
+
3547
+ # This is needed to properly delete outputs.logits which may be very large for first iteration
3548
+ # Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
3549
+ # IMPORTANT: Note that this should appear BEFORE the call to _reorder_cache() to save the maximum memory
3550
+ # (that way the memory peak does not include outputs.logits)
3551
+ del outputs
3552
+
3553
+ if model_kwargs.get("past_key_values", None) is not None:
3554
+ model_kwargs["past_key_values"] = self._temporary_reorder_cache(
3555
+ model_kwargs["past_key_values"], beam_idx
3556
+ )
3557
+
3558
+ if return_dict_in_generate and output_scores:
3559
+ beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices))))
3560
+
3561
+ # increase cur_len
3562
+ cur_len = cur_len + 1
3563
+
3564
+ if beam_scorer.is_done or all(stopping_criteria(input_ids, scores)):
3565
+ this_peer_finished = True
3566
+
3567
+ sequence_outputs = beam_scorer.finalize(
3568
+ input_ids,
3569
+ beam_scores,
3570
+ next_tokens,
3571
+ next_indices,
3572
+ pad_token_id=pad_token_id,
3573
+ eos_token_id=eos_token_id,
3574
+ max_length=stopping_criteria.max_length,
3575
+ beam_indices=beam_indices,
3576
+ decoder_prompt_len=decoder_prompt_len,
3577
+ )
3578
+
3579
+ if return_dict_in_generate:
3580
+ if not output_scores:
3581
+ sequence_outputs["sequence_scores"] = None
3582
+
3583
+ if self.config.is_encoder_decoder:
3584
+ return GenerateBeamEncoderDecoderOutput(
3585
+ sequences=sequence_outputs["sequences"],
3586
+ sequences_scores=sequence_outputs["sequence_scores"],
3587
+ scores=scores,
3588
+ logits=raw_logits,
3589
+ beam_indices=sequence_outputs["beam_indices"],
3590
+ encoder_attentions=encoder_attentions,
3591
+ encoder_hidden_states=encoder_hidden_states,
3592
+ decoder_attentions=decoder_attentions,
3593
+ cross_attentions=cross_attentions,
3594
+ decoder_hidden_states=decoder_hidden_states,
3595
+ past_key_values=model_kwargs.get("past_key_values"),
3596
+ )
3597
+ else:
3598
+ return GenerateBeamDecoderOnlyOutput(
3599
+ sequences=sequence_outputs["sequences"],
3600
+ sequences_scores=sequence_outputs["sequence_scores"],
3601
+ scores=scores,
3602
+ logits=raw_logits,
3603
+ beam_indices=sequence_outputs["beam_indices"],
3604
+ attentions=decoder_attentions,
3605
+ hidden_states=decoder_hidden_states,
3606
+ past_key_values=model_kwargs.get("past_key_values"),
3607
+ )
3608
+ else:
3609
+ return sequence_outputs["sequences"]
3610
+
3611
+ def _group_beam_search(
3612
+ self,
3613
+ input_ids: torch.LongTensor,
3614
+ beam_scorer: BeamScorer,
3615
+ logits_processor: LogitsProcessorList,
3616
+ stopping_criteria: StoppingCriteriaList,
3617
+ generation_config: GenerationConfig,
3618
+ synced_gpus: bool,
3619
+ **model_kwargs,
3620
+ ):
3621
+ r"""
3622
+ Generates sequences of token ids for models with a language modeling head using **diverse beam search
3623
+ decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
3624
+
3625
+ Parameters:
3626
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
3627
+ The sequence used as a prompt for the generation.
3628
+ beam_scorer (`BeamScorer`):
3629
+ An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and
3630
+ sorted during generation. For more information, the documentation of [`BeamScorer`] should be read.
3631
+ logits_processor (`LogitsProcessorList`):
3632
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
3633
+ used to modify the prediction scores of the language modeling head applied at each generation step.
3634
+ stopping_criteria (`StoppingCriteriaList`):
3635
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
3636
+ used to tell if the generation loop should stop.
3637
+ generation_config ([`~generation.GenerationConfig`]):
3638
+ The generation configuration to be used as parametrization of the decoding method.
3639
+ synced_gpus (`bool`):
3640
+ Whether to continue running the while loop until max_length (needed to avoid deadlocking with
3641
+ `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
3642
+ model_kwargs:
3643
+ Additional model specific kwargs that will be forwarded to the `forward` function of the model. If
3644
+ model is an encoder-decoder model the kwargs should include `encoder_outputs`.
3645
+
3646
+ Return:
3647
+ [`~generation.GenerateBeamDecoderOnlyOutput`], [`~generation.GenerateBeamEncoderDecoderOutput`] or
3648
+ `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
3649
+ [`~generation.GenerateBeamDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
3650
+ `return_dict_in_generate=True` or a [`~generation.GenerateBeamEncoderDecoderOutput`] if
3651
+ `model.config.is_encoder_decoder=True`.
3652
+ """
3653
+ # init values
3654
+ pad_token_id = generation_config._pad_token_tensor
3655
+ eos_token_id = generation_config._eos_token_tensor
3656
+ output_attentions = generation_config.output_attentions
3657
+ output_hidden_states = generation_config.output_hidden_states
3658
+ output_scores = generation_config.output_scores
3659
+ output_logits = generation_config.output_logits
3660
+ return_dict_in_generate = generation_config.return_dict_in_generate
3661
+
3662
+ num_beams = beam_scorer.num_beams
3663
+ num_beam_groups = beam_scorer.num_beam_groups
3664
+ num_sub_beams = num_beams // num_beam_groups
3665
+ batch_size = len(beam_scorer._beam_hyps) // num_beam_groups
3666
+ device = input_ids.device
3667
+
3668
+ batch_beam_size, cur_len = input_ids.shape
3669
+ model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs)
3670
+
3671
+ if return_dict_in_generate and output_scores:
3672
+ beam_indices = [tuple(() for _ in range(num_sub_beams * batch_size)) for _ in range(num_beam_groups)]
3673
+ else:
3674
+ beam_indices = None
3675
+
3676
+ if num_beams * batch_size != batch_beam_size:
3677
+ raise ValueError(
3678
+ f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
3679
+ )
3680
+
3681
+ # init attention / hidden states / scores tuples
3682
+ scores = () if (return_dict_in_generate and output_scores) else None
3683
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
3684
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
3685
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
3686
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
3687
+
3688
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
3689
+ if return_dict_in_generate and self.config.is_encoder_decoder:
3690
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
3691
+ encoder_hidden_states = (
3692
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
3693
+ )
3694
+
3695
+ # initialise score of first beam of each group with 0 and the rest with -1e9. This ensures that the beams in
3696
+ # the same group don't produce same tokens every time.
3697
+ beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device)
3698
+ beam_scores[:, ::num_sub_beams] = 0
3699
+ beam_scores = beam_scores.view((batch_size * num_beams,))
3700
+
3701
+ this_peer_finished = False
3702
+
3703
+ decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder
3704
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
3705
+ # predicted tokens in cur_len step
3706
+ current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device)
3707
+
3708
+ # indices which will form the beams in the next time step
3709
+ reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device)
3710
+
3711
+ # do one decoder step on all beams of all sentences in batch
3712
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
3713
+
3714
+ # prepare variable output controls (note: some models won't accept all output controls)
3715
+ model_inputs.update({"output_attentions": output_attentions} if output_attentions else {})
3716
+ model_inputs.update({"output_hidden_states": output_hidden_states} if output_hidden_states else {})
3717
+
3718
+ outputs = self(**model_inputs, return_dict=True)
3719
+
3720
+ # synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
3721
+ model_kwargs = self._update_model_kwargs_for_generation(
3722
+ outputs,
3723
+ model_kwargs,
3724
+ is_encoder_decoder=self.config.is_encoder_decoder,
3725
+ )
3726
+ if synced_gpus and this_peer_finished:
3727
+ cur_len = cur_len + 1
3728
+ continue
3729
+
3730
+ if output_scores:
3731
+ processed_score = torch.zeros_like(outputs.logits[:, -1, :])
3732
+ if output_logits:
3733
+ # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
3734
+ # (the clone itself is always small)
3735
+ raw_logit_score = outputs.logits[:, -1, :].clone()
3736
+ raw_logit_score = raw_logit_score.to(input_ids.device)
3737
+
3738
+ for beam_group_idx in range(num_beam_groups):
3739
+ group_start_idx = beam_group_idx * num_sub_beams
3740
+ group_end_idx = min(group_start_idx + num_sub_beams, num_beams)
3741
+ group_size = group_end_idx - group_start_idx
3742
+
3743
+ # indices of beams of current group among all sentences in batch
3744
+ batch_group_indices = []
3745
+
3746
+ for batch_idx in range(batch_size):
3747
+ batch_group_indices.extend(
3748
+ [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)]
3749
+ )
3750
+ group_input_ids = input_ids[batch_group_indices]
3751
+
3752
+ # select outputs of beams of current group only
3753
+ # No need to clone() the logits here as they will not retain outputs.logits at the end of the loop
3754
+ # .float() is needed to retain precision for later logits manipulations
3755
+ next_token_logits = outputs.logits[batch_group_indices, -1, :].float()
3756
+ next_token_logits = next_token_logits.to(input_ids.device)
3757
+
3758
+ next_token_scores = nn.functional.log_softmax(
3759
+ next_token_logits, dim=-1
3760
+ ) # (batch_size * group_size, vocab_size)
3761
+ vocab_size = next_token_scores.shape[-1]
3762
+
3763
+ next_token_scores_processed = logits_processor(
3764
+ group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx
3765
+ )
3766
+ next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1)
3767
+ next_token_scores = next_token_scores.expand_as(next_token_scores_processed)
3768
+
3769
+ if output_scores:
3770
+ processed_score[batch_group_indices] = next_token_scores_processed
3771
+
3772
+ # reshape for beam search
3773
+ next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size)
3774
+
3775
+ # Sample 1 + len(eos_token_id) next tokens for each beam so we have at least 1 non eos token per beam.
3776
+ n_eos_tokens = eos_token_id.shape[0] if eos_token_id is not None else 0
3777
+ next_token_scores, next_tokens = torch.topk(
3778
+ next_token_scores, max(2, 1 + n_eos_tokens) * group_size, dim=1, largest=True, sorted=True
3779
+ )
3780
+
3781
+ next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor")
3782
+ next_tokens = next_tokens % vocab_size
3783
+
3784
+ # stateless
3785
+ process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None
3786
+ beam_outputs = beam_scorer.process(
3787
+ group_input_ids,
3788
+ next_token_scores,
3789
+ next_tokens,
3790
+ next_indices,
3791
+ pad_token_id=pad_token_id,
3792
+ eos_token_id=eos_token_id,
3793
+ beam_indices=process_beam_indices,
3794
+ group_index=beam_group_idx,
3795
+ decoder_prompt_len=decoder_prompt_len,
3796
+ )
3797
+ beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"]
3798
+ beam_next_tokens = beam_outputs["next_beam_tokens"]
3799
+ beam_idx = beam_outputs["next_beam_indices"]
3800
+
3801
+ if return_dict_in_generate and output_scores:
3802
+ beam_indices[beam_group_idx] = tuple(
3803
+ beam_indices[beam_group_idx][beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices[0]))
3804
+ )
3805
+
3806
+ input_ids[batch_group_indices] = group_input_ids[beam_idx]
3807
+ group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
3808
+ current_tokens[batch_group_indices] = group_input_ids[:, -1]
3809
+
3810
+ # (beam_idx // group_size) -> batch_idx
3811
+ # (beam_idx % group_size) -> offset of idx inside the group
3812
+ reordering_indices[batch_group_indices] = (
3813
+ num_beams * torch.div(beam_idx, group_size, rounding_mode="floor")
3814
+ + group_start_idx
3815
+ + (beam_idx % group_size)
3816
+ )
3817
+
3818
+ # Store scores, attentions and hidden_states when required
3819
+ if return_dict_in_generate:
3820
+ if output_scores:
3821
+ scores += (processed_score,)
3822
+ if output_logits:
3823
+ raw_logits += (raw_logit_score,)
3824
+ if output_attentions:
3825
+ decoder_attentions += (
3826
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
3827
+ )
3828
+ if self.config.is_encoder_decoder:
3829
+ cross_attentions += (outputs.cross_attentions,)
3830
+
3831
+ if output_hidden_states:
3832
+ decoder_hidden_states += (
3833
+ (outputs.decoder_hidden_states,)
3834
+ if self.config.is_encoder_decoder
3835
+ else (outputs.hidden_states,)
3836
+ )
3837
+
3838
+ input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1)
3839
+
3840
+ # This is needed to properly delete outputs.logits which may be very large for first iteration
3841
+ # Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
3842
+ # IMPORTANT: Note that this should appear BEFORE the call to _reorder_cache() to save the maximum memory
3843
+ # (that way the memory peak does not include outputs.logits)
3844
+ del outputs
3845
+
3846
+ if model_kwargs.get("past_key_values", None) is not None:
3847
+ model_kwargs["past_key_values"] = self._temporary_reorder_cache(
3848
+ model_kwargs["past_key_values"], reordering_indices
3849
+ )
3850
+
3851
+ # increase cur_len
3852
+ cur_len = cur_len + 1
3853
+
3854
+ if beam_scorer.is_done or all(stopping_criteria(input_ids, scores)):
3855
+ this_peer_finished = True
3856
+
3857
+ final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None
3858
+ sequence_outputs = beam_scorer.finalize(
3859
+ input_ids,
3860
+ beam_scores,
3861
+ next_tokens,
3862
+ next_indices,
3863
+ pad_token_id=pad_token_id,
3864
+ eos_token_id=eos_token_id,
3865
+ max_length=stopping_criteria.max_length,
3866
+ beam_indices=final_beam_indices,
3867
+ decoder_prompt_len=decoder_prompt_len,
3868
+ )
3869
+
3870
+ if return_dict_in_generate:
3871
+ if not output_scores:
3872
+ sequence_outputs["sequence_scores"] = None
3873
+
3874
+ if self.config.is_encoder_decoder:
3875
+ return GenerateBeamEncoderDecoderOutput(
3876
+ sequences=sequence_outputs["sequences"],
3877
+ sequences_scores=sequence_outputs["sequence_scores"],
3878
+ scores=scores,
3879
+ logits=raw_logits,
3880
+ beam_indices=sequence_outputs["beam_indices"],
3881
+ encoder_attentions=encoder_attentions,
3882
+ encoder_hidden_states=encoder_hidden_states,
3883
+ decoder_attentions=decoder_attentions,
3884
+ cross_attentions=cross_attentions,
3885
+ decoder_hidden_states=decoder_hidden_states,
3886
+ past_key_values=model_kwargs.get("past_key_values"),
3887
+ )
3888
+ else:
3889
+ return GenerateBeamDecoderOnlyOutput(
3890
+ sequences=sequence_outputs["sequences"],
3891
+ sequences_scores=sequence_outputs["sequence_scores"],
3892
+ scores=scores,
3893
+ logits=raw_logits,
3894
+ beam_indices=sequence_outputs["beam_indices"],
3895
+ attentions=decoder_attentions,
3896
+ hidden_states=decoder_hidden_states,
3897
+ past_key_values=model_kwargs.get("past_key_values"),
3898
+ )
3899
+ else:
3900
+ return sequence_outputs["sequences"]
3901
+
3902
+ def _constrained_beam_search(
3903
+ self,
3904
+ input_ids: torch.LongTensor,
3905
+ constrained_beam_scorer: ConstrainedBeamSearchScorer,
3906
+ logits_processor: LogitsProcessorList,
3907
+ stopping_criteria: StoppingCriteriaList,
3908
+ generation_config: GenerationConfig,
3909
+ synced_gpus: bool,
3910
+ **model_kwargs,
3911
+ ) -> Union[GenerateBeamOutput, torch.LongTensor]:
3912
+ r"""
3913
+ Generates sequences of token ids for models with a language modeling head using **constrained beam search
3914
+ decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
3915
+
3916
+ Parameters:
3917
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
3918
+ The sequence used as a prompt for the generation.
3919
+ constrained_beam_scorer (`ConstrainedBeamSearchScorer`):
3920
+ A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and
3921
+ sorted during generation, while satisfying a list of positive constraints. For more information, the
3922
+ documentation of [`ConstrainedBeamSearchScorer`] should be read.
3923
+ logits_processor (`LogitsProcessorList`):
3924
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
3925
+ used to modify the prediction scores of the language modeling head applied at each generation step.
3926
+ stopping_criteria (`StoppingCriteriaList`):
3927
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
3928
+ used to tell if the generation loop should stop.
3929
+ generation_config ([`~generation.GenerationConfig`]):
3930
+ The generation configuration to be used as parametrization of the decoding method.
3931
+ synced_gpus (`bool`):
3932
+ Whether to continue running the while loop until max_length (needed to avoid deadlocking with
3933
+ `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
3934
+ model_kwargs:
3935
+ Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is
3936
+ an encoder-decoder model the kwargs should include `encoder_outputs`.
3937
+
3938
+ Return:
3939
+ [`~generation.GenerateBeamDecoderOnlyOutput`], [`~generation.GenerateBeamEncoderDecoderOutput`] or
3940
+ `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
3941
+ [`~generation.GenerateBeamDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
3942
+ `return_dict_in_generate=True` or a [`~generation.GenerateBeamEncoderDecoderOutput`] if
3943
+ `model.config.is_encoder_decoder=True`.
3944
+ """
3945
+ # init values
3946
+ pad_token_id = generation_config._pad_token_tensor
3947
+ eos_token_id = generation_config._eos_token_tensor
3948
+ output_attentions = generation_config.output_attentions
3949
+ output_hidden_states = generation_config.output_hidden_states
3950
+ output_scores = generation_config.output_scores
3951
+ output_logits = generation_config.output_logits
3952
+ return_dict_in_generate = generation_config.return_dict_in_generate
3953
+
3954
+ batch_size = len(constrained_beam_scorer._beam_hyps)
3955
+ num_beams = constrained_beam_scorer.num_beams
3956
+
3957
+ batch_beam_size, cur_len = input_ids.shape
3958
+ model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs)
3959
+
3960
+ if num_beams * batch_size != batch_beam_size:
3961
+ raise ValueError(
3962
+ f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
3963
+ )
3964
+
3965
+ # init attention / hidden states / scores tuples
3966
+ scores = () if (return_dict_in_generate and output_scores) else None
3967
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
3968
+ beam_indices = (
3969
+ tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None
3970
+ )
3971
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
3972
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
3973
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
3974
+
3975
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
3976
+ if return_dict_in_generate and self.config.is_encoder_decoder:
3977
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
3978
+ encoder_hidden_states = (
3979
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
3980
+ )
3981
+
3982
+ # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens
3983
+ # of the first beam are considered to avoid sampling the exact same tokens across all beams.
3984
+ beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
3985
+ beam_scores[:, 1:] = -1e9
3986
+ beam_scores = beam_scores.view((batch_size * num_beams,))
3987
+
3988
+ this_peer_finished = False
3989
+
3990
+ decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder
3991
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
3992
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
3993
+
3994
+ # prepare variable output controls (note: some models won't accept all output controls)
3995
+ model_inputs.update({"output_attentions": output_attentions} if output_attentions else {})
3996
+ model_inputs.update({"output_hidden_states": output_hidden_states} if output_hidden_states else {})
3997
+
3998
+ outputs = self(**model_inputs, return_dict=True)
3999
+
4000
+ # synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
4001
+ model_kwargs = self._update_model_kwargs_for_generation(
4002
+ outputs,
4003
+ model_kwargs,
4004
+ is_encoder_decoder=self.config.is_encoder_decoder,
4005
+ )
4006
+ if synced_gpus and this_peer_finished:
4007
+ cur_len = cur_len + 1
4008
+ continue
4009
+
4010
+ # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
4011
+ # (the clone itself is always small)
4012
+ # .float() is needed to retain precision for later logits manipulations
4013
+ next_token_logits = outputs.logits[:, -1, :].clone().float()
4014
+ next_token_logits = next_token_logits.to(input_ids.device)
4015
+ next_token_scores = nn.functional.log_softmax(
4016
+ next_token_logits, dim=-1
4017
+ ) # (batch_size * num_beams, vocab_size)
4018
+
4019
+ next_token_scores_processed = logits_processor(input_ids, next_token_scores)
4020
+
4021
+ next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(
4022
+ next_token_scores_processed
4023
+ )
4024
+
4025
+ scores_for_all_vocab = next_token_scores.clone()
4026
+
4027
+ # Store scores, attentions and hidden_states when required
4028
+ if return_dict_in_generate:
4029
+ if output_scores:
4030
+ scores += (next_token_scores,)
4031
+ if output_logits:
4032
+ raw_logits += (next_token_logits,)
4033
+ if output_attentions:
4034
+ decoder_attentions += (
4035
+ (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
4036
+ )
4037
+ if self.config.is_encoder_decoder:
4038
+ cross_attentions += (outputs.cross_attentions,)
4039
+
4040
+ if output_hidden_states:
4041
+ decoder_hidden_states += (
4042
+ (outputs.decoder_hidden_states,)
4043
+ if self.config.is_encoder_decoder
4044
+ else (outputs.hidden_states,)
4045
+ )
4046
+
4047
+ # reshape for beam search
4048
+ vocab_size = next_token_scores.shape[-1]
4049
+ next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
4050
+
4051
+ # Sample 1 + len(eos_token_id) next tokens for each beam so we have at least 1 non eos token per beam.
4052
+ n_eos_tokens = eos_token_id.shape[0] if eos_token_id is not None else 0
4053
+ next_token_scores, next_tokens = torch.topk(
4054
+ next_token_scores, max(2, 1 + n_eos_tokens) * num_beams, dim=1, largest=True, sorted=True
4055
+ )
4056
+
4057
+ next_indices = (next_tokens / vocab_size).long()
4058
+ next_tokens = next_tokens % vocab_size
4059
+
4060
+ # stateless
4061
+ beam_outputs = constrained_beam_scorer.process(
4062
+ input_ids,
4063
+ next_token_scores,
4064
+ next_tokens,
4065
+ next_indices,
4066
+ scores_for_all_vocab,
4067
+ pad_token_id=pad_token_id,
4068
+ eos_token_id=eos_token_id,
4069
+ beam_indices=beam_indices,
4070
+ decoder_prompt_len=decoder_prompt_len,
4071
+ )
4072
+ beam_scores = beam_outputs["next_beam_scores"]
4073
+ beam_next_tokens = beam_outputs["next_beam_tokens"]
4074
+ beam_idx = beam_outputs["next_beam_indices"]
4075
+
4076
+ input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
4077
+
4078
+ # This is needed to properly delete outputs.logits which may be very large for first iteration
4079
+ # Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
4080
+ # IMPORTANT: Note that this should appear BEFORE the call to _reorder_cache() to save the maximum memory
4081
+ # (that way the memory peak does not include outputs.logits)
4082
+ del outputs
4083
+
4084
+ if model_kwargs.get("past_key_values", None) is not None:
4085
+ model_kwargs["past_key_values"] = self._temporary_reorder_cache(
4086
+ model_kwargs["past_key_values"], beam_idx
4087
+ )
4088
+
4089
+ if return_dict_in_generate and output_scores:
4090
+ beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices))))
4091
+
4092
+ # increase cur_len
4093
+ cur_len = cur_len + 1
4094
+
4095
+ if constrained_beam_scorer.is_done or all(stopping_criteria(input_ids, scores)):
4096
+ this_peer_finished = True
4097
+
4098
+ sequence_outputs = constrained_beam_scorer.finalize(
4099
+ input_ids,
4100
+ beam_scores,
4101
+ next_tokens,
4102
+ next_indices,
4103
+ pad_token_id=pad_token_id,
4104
+ eos_token_id=eos_token_id,
4105
+ max_length=stopping_criteria.max_length,
4106
+ beam_indices=beam_indices,
4107
+ decoder_prompt_len=decoder_prompt_len,
4108
+ )
4109
+
4110
+ if return_dict_in_generate:
4111
+ if not output_scores:
4112
+ sequence_outputs["sequence_scores"] = None
4113
+ if self.config.is_encoder_decoder:
4114
+ return GenerateBeamEncoderDecoderOutput(
4115
+ sequences=sequence_outputs["sequences"],
4116
+ sequences_scores=sequence_outputs["sequence_scores"],
4117
+ scores=scores,
4118
+ logits=raw_logits,
4119
+ beam_indices=sequence_outputs["beam_indices"],
4120
+ encoder_attentions=encoder_attentions,
4121
+ encoder_hidden_states=encoder_hidden_states,
4122
+ decoder_attentions=decoder_attentions,
4123
+ cross_attentions=cross_attentions,
4124
+ decoder_hidden_states=decoder_hidden_states,
4125
+ past_key_values=model_kwargs.get("past_key_values"),
4126
+ )
4127
+ else:
4128
+ return GenerateBeamDecoderOnlyOutput(
4129
+ sequences=sequence_outputs["sequences"],
4130
+ sequences_scores=sequence_outputs["sequence_scores"],
4131
+ scores=scores,
4132
+ logits=raw_logits,
4133
+ beam_indices=sequence_outputs["beam_indices"],
4134
+ attentions=decoder_attentions,
4135
+ hidden_states=decoder_hidden_states,
4136
+ past_key_values=model_kwargs.get("past_key_values"),
4137
+ )
4138
+ else:
4139
+ return sequence_outputs["sequences"]
4140
+
4141
+ def _assisted_decoding(
4142
+ self,
4143
+ input_ids: torch.LongTensor,
4144
+ candidate_generator: CandidateGenerator,
4145
+ logits_processor: LogitsProcessorList,
4146
+ stopping_criteria: StoppingCriteriaList,
4147
+ generation_config: GenerationConfig,
4148
+ synced_gpus: bool,
4149
+ streamer: Optional["BaseStreamer"],
4150
+ **model_kwargs,
4151
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
4152
+ r"""
4153
+ Generates sequences of token ids for models with a language modeling head using **greedy decoding** or
4154
+ **sample** (depending on `do_sample`), assisted by candidate sequences. Assisted generation is an example of a
4155
+ candidate decoding strategy. Can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text
4156
+ models.
4157
+
4158
+ Parameters:
4159
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
4160
+ The sequence used as a prompt for the generation.
4161
+ candidate_generator (`CandidateGenerator`):
4162
+ A derived instance of [`CandidateGenerator`] that defines how candidate sequences are generated. For
4163
+ more information, the documentation of [`CandidateGenerator`] should be read.
4164
+ logits_processor (`LogitsProcessorList`):
4165
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
4166
+ used to modify the prediction scores of the language modeling head applied at each generation step.
4167
+ stopping_criteria (`StoppingCriteriaList`):
4168
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
4169
+ used to tell if the generation loop should stop.
4170
+ generation_config ([`~generation.GenerationConfig`]):
4171
+ The generation configuration to be used as parametrization of the decoding method.
4172
+ synced_gpus (`bool`):
4173
+ Whether to continue running the while loop until max_length (needed to avoid deadlocking with
4174
+ `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
4175
+ streamer (`BaseStreamer`, *optional*):
4176
+ Streamer object that will be used to stream the generated sequences. Generated tokens are passed
4177
+ through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
4178
+ model_kwargs:
4179
+ Additional model specific keyword arguments will be forwarded to the `forward` function of the model.
4180
+ If model is an encoder-decoder model the kwargs should include `encoder_outputs`.
4181
+
4182
+ Return:
4183
+ [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`] or
4184
+ `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
4185
+ [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
4186
+ `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if
4187
+ `model.config.is_encoder_decoder=True`.
4188
+ """
4189
+ # init values
4190
+ do_sample = generation_config.do_sample
4191
+ output_attentions = generation_config.output_attentions
4192
+ output_hidden_states = generation_config.output_hidden_states
4193
+ output_scores = generation_config.output_scores
4194
+ output_logits = generation_config.output_logits
4195
+ return_dict_in_generate = generation_config.return_dict_in_generate
4196
+
4197
+ # init attention / hidden states / scores tuples
4198
+ scores = () if (return_dict_in_generate and output_scores) else None
4199
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
4200
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
4201
+ cross_attentions = () if (return_dict_in_generate and output_attentions) else None
4202
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
4203
+
4204
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
4205
+ if return_dict_in_generate and self.config.is_encoder_decoder:
4206
+ encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
4207
+ encoder_hidden_states = (
4208
+ model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
4209
+ )
4210
+
4211
+ # keep track of which sequences are already finished
4212
+ batch_size = input_ids.shape[0]
4213
+ unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
4214
+ model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs)
4215
+
4216
+ this_peer_finished = False
4217
+ is_first_iteration = True # to preserve the same API in the output as other generation methods
4218
+ while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
4219
+ cur_len = input_ids.shape[-1]
4220
+
4221
+ # 1. Fetch candidate sequences from a `CandidateGenerator`
4222
+ candidate_input_ids, candidate_logits = candidate_generator.get_candidates(input_ids)
4223
+
4224
+ if candidate_logits is not None:
4225
+ candidate_logits = candidate_logits.to(self.device)
4226
+
4227
+ candidate_length = candidate_input_ids.shape[1] - input_ids.shape[1]
4228
+ is_done_candidate = stopping_criteria(candidate_input_ids, None)
4229
+
4230
+ # 2. Use the original model to obtain the next token logits given the candidate sequence. We obtain
4231
+ # `candidate_length + 1` relevant logits from this process: in the event that all candidates are correct,
4232
+ # we use this forward pass to also pick the subsequent logits in the original model.
4233
+
4234
+ # 2.1. Prepare the model inputs
4235
+ candidate_kwargs = copy.copy(model_kwargs)
4236
+ candidate_kwargs = _prepare_attention_mask(
4237
+ candidate_kwargs, candidate_input_ids.shape[1], self.config.is_encoder_decoder
4238
+ )
4239
+ candidate_kwargs = _prepare_token_type_ids(candidate_kwargs, candidate_input_ids.shape[1])
4240
+ if "cache_position" in candidate_kwargs:
4241
+ candidate_kwargs["cache_position"] = torch.cat(
4242
+ (
4243
+ candidate_kwargs["cache_position"],
4244
+ torch.arange(cur_len, cur_len + candidate_length, device=input_ids.device, dtype=torch.long),
4245
+ ),
4246
+ dim=0,
4247
+ )
4248
+
4249
+ model_inputs = self.prepare_inputs_for_generation(candidate_input_ids, **candidate_kwargs)
4250
+ if "num_logits_to_keep" in model_inputs:
4251
+ model_inputs["num_logits_to_keep"] = candidate_length + 1
4252
+
4253
+ # 2.2. Run a forward pass on the candidate sequence
4254
+ # prepare variable output controls (note: some models won't accept all output controls)
4255
+ model_inputs.update({"output_attentions": output_attentions} if output_attentions else {})
4256
+ model_inputs.update({"output_hidden_states": output_hidden_states} if output_hidden_states else {})
4257
+
4258
+ outputs = self(**model_inputs)
4259
+
4260
+ # 2.3. Process the new logits
4261
+ # .float() is needed to retain precision for later logits manipulations
4262
+ new_logits = outputs.logits[:, -candidate_length - 1 :].float() # excludes the input prompt if present
4263
+ new_logits = new_logits.to(input_ids.device)
4264
+ next_token_logits = new_logits.clone()
4265
+ if len(logits_processor) > 0:
4266
+ for i in range(candidate_length + 1):
4267
+ new_logits[:, i, :] = logits_processor(candidate_input_ids[:, : cur_len + i], new_logits[:, i, :])
4268
+
4269
+ # 3. Select the accepted tokens. There are two possible cases:
4270
+ # Case 1: `do_sample=True` and we have logits for the candidates (originally from speculative decoding)
4271
+ # 👉 Apply algorithm 1 from the speculative decoding paper (https://arxiv.org/pdf/2211.17192.pdf).
4272
+ if do_sample and candidate_logits is not None:
4273
+ valid_tokens, n_matches = _speculative_sampling(
4274
+ candidate_input_ids,
4275
+ candidate_logits,
4276
+ candidate_length,
4277
+ new_logits,
4278
+ is_done_candidate,
4279
+ )
4280
+
4281
+ # Case 2: all other cases (originally from assisted generation) 👉 Compare the tokens selected from the
4282
+ # original model logits with the candidate tokens. We can keep the candidate tokens until the first
4283
+ # mismatch, or until the max length is reached.
4284
+ else:
4285
+ if do_sample:
4286
+ probs = new_logits.softmax(dim=-1)
4287
+ selected_tokens = torch.multinomial(probs[0, :, :], num_samples=1).squeeze(1)[None, :]
4288
+ else:
4289
+ selected_tokens = new_logits.argmax(dim=-1)
4290
+
4291
+ candidate_new_tokens = candidate_input_ids[:, cur_len:]
4292
+ n_matches = ((~(candidate_new_tokens == selected_tokens[:, :-1])).cumsum(dim=-1) < 1).sum()
4293
+
4294
+ # Ensure we don't generate beyond max_len or an EOS token
4295
+ if is_done_candidate and n_matches == candidate_length:
4296
+ n_matches -= 1
4297
+ valid_tokens = selected_tokens[:, : n_matches + 1]
4298
+
4299
+ # 4. Update variables according to the number of matching assistant tokens. Remember: the token generated
4300
+ # by the model after the last candidate match is also valid, as it is generated from a correct sequence.
4301
+ # Because of this last token, assisted generation search reduces to a normal greedy search/sample if there
4302
+ # is no match.
4303
+
4304
+ # 4.1. Get the valid continuation, after the matching tokens
4305
+ input_ids = torch.cat((input_ids, valid_tokens), dim=-1)
4306
+ if streamer is not None:
4307
+ streamer.put(valid_tokens.cpu())
4308
+ new_cur_len = input_ids.shape[-1]
4309
+
4310
+ # 4.2. Discard past key values relative to unused assistant tokens
4311
+ new_cache_size = new_cur_len - 1
4312
+ outputs.past_key_values = _crop_past_key_values(self, outputs.past_key_values, new_cache_size)
4313
+
4314
+ # 5. Update the candidate generation strategy if needed
4315
+ candidate_generator.update_candidate_strategy(input_ids, new_logits, n_matches)
4316
+
4317
+ # synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
4318
+ model_kwargs = self._update_model_kwargs_for_generation(
4319
+ outputs,
4320
+ model_kwargs,
4321
+ is_encoder_decoder=self.config.is_encoder_decoder,
4322
+ num_new_tokens=n_matches + 1,
4323
+ )
4324
+ if synced_gpus and this_peer_finished:
4325
+ continue
4326
+
4327
+ # Store scores, attentions and hidden_states when required
4328
+ # Assistant: modified to append one tuple element per token, as in the other generation methods.
4329
+ if return_dict_in_generate:
4330
+ newly_added_length = n_matches + 1
4331
+ if output_scores:
4332
+ scores += tuple(new_logits[:, i, :] for i in range(newly_added_length))
4333
+ if output_logits:
4334
+ raw_logits += tuple(next_token_logits[:, i, :] for i in range(newly_added_length))
4335
+
4336
+ newly_added_length = new_cur_len if is_first_iteration else newly_added_length
4337
+ if output_attentions:
4338
+ if self.config.is_encoder_decoder:
4339
+ cross_attentions = _split_model_outputs(
4340
+ cross_attentions, outputs.cross_attentions, cur_len, newly_added_length
4341
+ )
4342
+ decoder_attentions = _split_model_outputs(
4343
+ decoder_attentions,
4344
+ outputs.decoder_attentions,
4345
+ cur_len,
4346
+ newly_added_length,
4347
+ is_decoder_attention=True,
4348
+ )
4349
+ # some (V)LLMs have hard requirement on SDPA and thus never return attn
4350
+ elif outputs.attentions[0] is not None:
4351
+ decoder_attentions = _split_model_outputs(
4352
+ decoder_attentions,
4353
+ outputs.attentions,
4354
+ cur_len,
4355
+ newly_added_length,
4356
+ is_decoder_attention=True,
4357
+ )
4358
+ if output_hidden_states:
4359
+ if self.config.is_encoder_decoder:
4360
+ decoder_hidden_states = _split_model_outputs(
4361
+ decoder_hidden_states, outputs.decoder_hidden_states, cur_len, newly_added_length
4362
+ )
4363
+ else:
4364
+ decoder_hidden_states = _split_model_outputs(
4365
+ decoder_hidden_states, outputs.hidden_states, cur_len, newly_added_length
4366
+ )
4367
+
4368
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores)
4369
+ this_peer_finished = unfinished_sequences.max() == 0
4370
+ is_first_iteration = False
4371
+
4372
+ if streamer is not None:
4373
+ streamer.end()
4374
+
4375
+ if (
4376
+ hasattr(candidate_generator, "assistant_model")
4377
+ and candidate_generator.assistant_model.generation_config.num_assistant_tokens_schedule == "heuristic"
4378
+ ):
4379
+ candidate_generator.assistant_model.generation_config.num_assistant_tokens = (
4380
+ candidate_generator.num_assistant_tokens
4381
+ )
4382
+ if return_dict_in_generate:
4383
+ if self.config.is_encoder_decoder:
4384
+ return GenerateEncoderDecoderOutput(
4385
+ sequences=input_ids,
4386
+ scores=scores,
4387
+ logits=raw_logits,
4388
+ encoder_attentions=encoder_attentions,
4389
+ encoder_hidden_states=encoder_hidden_states,
4390
+ decoder_attentions=decoder_attentions,
4391
+ cross_attentions=cross_attentions,
4392
+ decoder_hidden_states=decoder_hidden_states,
4393
+ past_key_values=model_kwargs.get("past_key_values"),
4394
+ )
4395
+ else:
4396
+ return GenerateDecoderOnlyOutput(
4397
+ sequences=input_ids,
4398
+ scores=scores,
4399
+ logits=raw_logits,
4400
+ attentions=decoder_attentions,
4401
+ hidden_states=decoder_hidden_states,
4402
+ past_key_values=model_kwargs.get("past_key_values"),
4403
+ )
4404
+ else:
4405
+ return input_ids
4406
+
4407
+
4408
+ def _speculative_sampling(
4409
+ candidate_input_ids,
4410
+ candidate_logits,
4411
+ candidate_length,
4412
+ new_logits,
4413
+ is_done_candidate,
4414
+ ):
4415
+ """
4416
+ Applies sampling as in the speculative decoding paper (https://arxiv.org/pdf/2211.17192.pdf, algorithm 1). Returns
4417
+ the selected tokens, as well as the number of candidate matches.
4418
+
4419
+ NOTE: Unless otherwise stated, the variable names match those in the paper.
4420
+ """
4421
+ new_candidate_input_ids = candidate_input_ids[:, -candidate_length:]
4422
+ # Gets the probabilities from the logits. q_i and p_i denote the assistant and model probabilities of the tokens
4423
+ # selected by the assistant, respectively.
4424
+ q = candidate_logits.softmax(dim=-1)
4425
+ q_i = q[:, torch.arange(candidate_length), new_candidate_input_ids].squeeze(0, 1)
4426
+ p = new_logits.softmax(dim=-1)
4427
+ p_i = p[:, torch.arange(candidate_length), new_candidate_input_ids].squeeze(0, 1)
4428
+ probability_ratio = p_i / q_i
4429
+
4430
+ # When probability_ratio > 1 (i.e. q_i(x) < p_i(x), or "assistant probability of the candidate token is smaller
4431
+ # than the model probability for the same token"), keep the token. Otherwise reject with p = 1 - probability_ratio
4432
+ # (= keep with p = probability_ratio). Keep all the tokens until the first rejection
4433
+ r_i = torch.rand_like(probability_ratio)
4434
+ is_accepted = r_i <= probability_ratio
4435
+ n_matches = ((~is_accepted).cumsum(dim=-1) < 1).sum() # this is `n` in algorithm 1
4436
+
4437
+ # Ensure we don't generate beyond max_len or an EOS token (not in algorithm 1, but needed for correct behavior)
4438
+ if is_done_candidate and n_matches == candidate_length:
4439
+ # Output length is assumed to be `n_matches + 1`. Since we won't generate another token with the target model
4440
+ # due to acceptance on EOS we fix `n_matches`
4441
+ n_matches -= 1
4442
+ valid_tokens = new_candidate_input_ids[:, : n_matches + 1]
4443
+ else:
4444
+ # Next token selection: if there is a rejection, adjust the distribution from the main model before sampling.
4445
+ gamma = candidate_logits.shape[1]
4446
+ p_n_plus_1 = p[:, n_matches, :]
4447
+ if n_matches < gamma:
4448
+ q_n_plus_1 = q[:, n_matches, :]
4449
+ p_prime = torch.clamp((p_n_plus_1 - q_n_plus_1), min=0)
4450
+ p_prime.div_(p_prime.sum())
4451
+ else:
4452
+ p_prime = p_n_plus_1
4453
+ t = torch.multinomial(p_prime, num_samples=1).squeeze(1)[None, :]
4454
+
4455
+ # The selected tokens include the matches (if any) plus the next sampled tokens
4456
+ if n_matches > 0:
4457
+ valid_tokens = torch.cat((new_candidate_input_ids[:, :n_matches], t), dim=-1)
4458
+ else:
4459
+ valid_tokens = t
4460
+
4461
+ return valid_tokens, n_matches
4462
+
4463
+
4464
+ def _split_model_outputs(outputs, new_outputs, cur_len, added_len, is_decoder_attention=False):
4465
+ """
4466
+ Given the (decoder/cross attentions)/(decoder hidden states) for multiple generated tokens, splits it into a tuple
4467
+ where each member corresponds to a single generated token.
4468
+ """
4469
+ # Retrocompatibility: in our generation functions, the first iteration includes the attention/hidden states for the
4470
+ # prompt.
4471
+ if len(outputs) == 0:
4472
+ new_tuple = ()
4473
+ for layer in new_outputs:
4474
+ last_dim_size = cur_len if is_decoder_attention else layer.shape[-1]
4475
+ new_tuple += (layer[..., :cur_len, :last_dim_size],)
4476
+ outputs += (new_tuple,)
4477
+ # The first iteration contains the prompt + 1 generated token, let's update the length variables accordingly
4478
+ cur_len += 1
4479
+ added_len -= cur_len
4480
+
4481
+ for i in range(added_len):
4482
+ new_tuple = ()
4483
+ for layer in new_outputs:
4484
+ last_dim_size = cur_len + i if is_decoder_attention else layer.shape[-1]
4485
+ new_tuple += (layer[..., i : i + 1, :last_dim_size],)
4486
+ outputs += (new_tuple,)
4487
+ return outputs
4488
+
4489
+
4490
+ def _ranking_fast(
4491
+ context_hidden: torch.FloatTensor,
4492
+ next_hidden: torch.FloatTensor,
4493
+ next_top_k_probs: torch.FloatTensor,
4494
+ cosine_matrix_mask: torch.LongTensor,
4495
+ alpha: float,
4496
+ beam_width: int,
4497
+ ) -> torch.FloatTensor:
4498
+ """
4499
+ Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described
4500
+ in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each
4501
+ row in the batch.
4502
+ """
4503
+ norm_context_hidden = context_hidden / context_hidden.norm(dim=2, keepdim=True)
4504
+ norm_next_hidden = next_hidden / next_hidden.norm(dim=2, keepdim=True)
4505
+ cosine_matrix = torch.matmul(norm_context_hidden, norm_next_hidden.transpose(1, 2)).squeeze(-1) # [B*K, S]
4506
+
4507
+ # Penalize cosine_matrix based on the cosine_matrix_mask (ignore padding positions)
4508
+ # Using a large negative value for masked positions
4509
+ cosine_matrix_mask = cosine_matrix_mask.to(dtype=cosine_matrix.dtype)
4510
+ cosine_matrix_mask = (1 - cosine_matrix_mask) * torch.finfo(cosine_matrix.dtype).min
4511
+ cosine_matrix = cosine_matrix + cosine_matrix_mask
4512
+
4513
+ degeneration_penalty, _ = torch.max(cosine_matrix, dim=-1) # [B*K]
4514
+ next_top_k_probs = next_top_k_probs.view(-1) # [B*K]
4515
+ contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty
4516
+ contrastive_score = torch.stack(torch.split(contrastive_score, beam_width)) # [B, K]
4517
+ _, selected_idx = contrastive_score.max(dim=-1) # [B]
4518
+ return selected_idx
4519
+
4520
+
4521
+ def _split(data, full_batch_size: int, num_hidden_layers: int, split_size: int = None):
4522
+ """
4523
+ Takes care of three cases:
4524
+ 1. data is a tensor: e.g. last_hidden_state, pooler_output etc. split them on the batch_size dim
4525
+ 2. data is a tuple: e.g. hidden_states, attentions etc. Keep the tuple as it is and split each tensor in it and
4526
+ return a list of tuples
4527
+ 3. data is a tuple of tuples, e.g. past_key_values. Keep the tuple as it is and split each tuple in it and
4528
+ return a list of tuples of tuples
4529
+ (see documentation of ModelOutput)
4530
+ """
4531
+ if data is None:
4532
+ return [None] * (full_batch_size // split_size)
4533
+ if isinstance(data, torch.Tensor):
4534
+ return [data[i : i + split_size] for i in range(0, full_batch_size, split_size)]
4535
+ # New cache format
4536
+ elif isinstance(data, DynamicCache) or (
4537
+ isinstance(data, EncoderDecoderCache) and isinstance(data.self_attention_cache, DynamicCache)
4538
+ ):
4539
+ return data.batch_split(full_batch_size, split_size, num_hidden_layers)
4540
+ elif isinstance(data, tuple):
4541
+ # If the elements of the tuple are also tuples (e.g., past_key_values in our earlier example)
4542
+ if isinstance(data[0], tuple):
4543
+ return [
4544
+ tuple(tuple(tensor[i : i + split_size] for tensor in inner_tuple) for inner_tuple in data)
4545
+ for i in range(0, full_batch_size, split_size)
4546
+ ]
4547
+
4548
+ else:
4549
+ return [
4550
+ tuple(sub_tensor[i : i + split_size] for sub_tensor in data)
4551
+ for i in range(0, full_batch_size, split_size)
4552
+ ]
4553
+ else:
4554
+ raise TypeError(f"Unexpected attribute type: {type(data)}")
4555
+
4556
+
4557
+ def _split_model_inputs(
4558
+ model_input: Union[ModelOutput, Dict], split_size: int, full_batch_size: int, config: PretrainedConfig
4559
+ ) -> List[Union[ModelOutput, Dict]]:
4560
+ """
4561
+ Split a ModelOutput object (or its subclasses) or Dict into a list of same-class objects based on a specified split
4562
+ size. The input object is dict when it was prepared for forward pass and ModelOutput when it was returned from
4563
+ previous forward pass.
4564
+ """
4565
+ # Edge case: if model_input is None, return a list of Nones
4566
+ # this happens with Whisper where encoder_outputs is None
4567
+ if model_input is None:
4568
+ return [model_input] * (full_batch_size // split_size)
4569
+ # Infer the class from the object
4570
+ model_output_cls = type(model_input)
4571
+ if (full_batch_size % split_size) != 0:
4572
+ raise ValueError("`full_batch_size` must be divisible by `split_size`")
4573
+
4574
+ if split_size > full_batch_size:
4575
+ raise ValueError("`split_size` must be smaller or equal to `full_batch_size`")
4576
+
4577
+ # Helper function to split tensors or tuples of tensors
4578
+
4579
+ # Find all the dataclass fields (e.g., last_hidden_state, pooler_output etc.) and split them
4580
+ keys = (
4581
+ model_input.__dataclass_fields__.keys() if hasattr(model_input, "__dataclass_fields__") else model_input.keys()
4582
+ )
4583
+ # We only keep keys that are in the model_input
4584
+ keys = [k for k in keys if k in model_input]
4585
+ # Here we can have four types of values: tensors, tuples of tensors and booleans, and encoder_outputs which is a
4586
+ # ModelOutput object.
4587
+ # bool should not be split but replicated for each split
4588
+ bool_keys = [k for k in keys if isinstance(model_input[k], bool) or k == "cache_position"]
4589
+ keys_to_ignore = ["cache_position", "encoder_outputs", "num_logits_to_keep"]
4590
+ non_bool_keys = [k for k in keys if not isinstance(model_input[k], bool) and k not in keys_to_ignore]
4591
+
4592
+ num_hidden_layers = config.get_text_config().num_hidden_layers
4593
+
4594
+ # we split the tensors and tuples of tensors
4595
+ data_split_list = [
4596
+ {k: _split(model_input[k], full_batch_size, num_hidden_layers, split_size)[i] for k in non_bool_keys}
4597
+ for i in range(full_batch_size // split_size)
4598
+ ]
4599
+ # bool values are the same and replicated for each split
4600
+ bool_data = {k: model_input[k] for k in bool_keys}
4601
+ # encoder_outputs is a ModelOutput object and should be split by its own
4602
+ if "encoder_outputs" in model_input:
4603
+ encoder_outputs_split = _split_model_inputs(
4604
+ model_input["encoder_outputs"], split_size, full_batch_size, config.get_text_config()
4605
+ )
4606
+ data_split_list = [
4607
+ {**data_split, "encoder_outputs": encoder_outputs_split[i]} for i, data_split in enumerate(data_split_list)
4608
+ ]
4609
+ # num_logits_to_keep should be replicated for each split, similar to bool values
4610
+ if "num_logits_to_keep" in model_input:
4611
+ data_split_list = [
4612
+ {**data_split, "num_logits_to_keep": model_input["num_logits_to_keep"]} for data_split in data_split_list
4613
+ ]
4614
+
4615
+ # Convert each dictionary in the list to an object of the inferred class
4616
+ split_model_inputs: List[Union[ModelOutput, Dict]] = [
4617
+ model_output_cls(**data_split, **bool_data) for data_split in data_split_list
4618
+ ]
4619
+
4620
+ return split_model_inputs
4621
+
4622
+
4623
+ def stack_model_outputs(model_outputs: List[ModelOutput], config: PretrainedConfig) -> ModelOutput:
4624
+ """
4625
+ Stack a list of ModelOutput objects (or its subclasses) along the batch_size dimension. The function infers the
4626
+ specific ModelOutput subclass from the list provided.
4627
+ """
4628
+ if not model_outputs:
4629
+ raise ValueError("Input list is empty.")
4630
+
4631
+ # Infer the class from the first object in the list
4632
+ model_output_cls = type(model_outputs[0])
4633
+ num_hidden_layers = config.get_text_config().num_hidden_layers
4634
+
4635
+ # Ensure all objects are of the same type
4636
+ if not all(isinstance(obj, model_output_cls) for obj in model_outputs):
4637
+ raise ValueError("All elements in the list should be of the same type.")
4638
+
4639
+ # Helper function to concat tensors or tuples of tensors
4640
+ def _concat(data):
4641
+ """
4642
+ Reverse of `_split` function above.
4643
+ """
4644
+ if any(data is None for data in data):
4645
+ return None
4646
+ if isinstance(data[0], torch.Tensor):
4647
+ return torch.cat(data, dim=0)
4648
+ # New cache format
4649
+ elif isinstance(data[0], DynamicCache):
4650
+ return DynamicCache.from_batch_splits(data, num_hidden_layers=num_hidden_layers)
4651
+ elif isinstance(data[0], EncoderDecoderCache):
4652
+ return EncoderDecoderCache.from_batch_splits(data, num_hidden_layers=num_hidden_layers)
4653
+ elif isinstance(data[0], tuple):
4654
+ # If the elements of the tuple are also tuples (e.g., past_key_values in our earlier example)
4655
+ if isinstance(data[0][0], tuple):
4656
+ return tuple(
4657
+ tuple(torch.cat([attr[i][j] for attr in data], dim=0) for j in range(len(data[0][0])))
4658
+ for i in range(len(data[0]))
4659
+ )
4660
+ else:
4661
+ return tuple(torch.cat([attr[i] for attr in data], dim=0) for i in range(len(data[0])))
4662
+ elif isinstance(data[0], (int, float)):
4663
+ # If the elements are integers or floats, return a tensor
4664
+ return torch.tensor(data)
4665
+ else:
4666
+ raise TypeError(f"Unexpected attribute type: {type(data[0])}")
4667
+
4668
+ # Use a dictionary comprehension to gather attributes from all objects and concatenate them
4669
+ concatenated_data = {
4670
+ k: _concat([getattr(model_output, k) for model_output in model_outputs])
4671
+ for k in model_output_cls.__dataclass_fields__.keys()
4672
+ }
4673
+
4674
+ # Return a new object of the inferred class with the concatenated attributes
4675
+ return model_output_cls(**concatenated_data)
4676
+
4677
+
4678
+ def _relative_top_filter(
4679
+ scores: torch.FloatTensor,
4680
+ baseline_scores: torch.FloatTensor,
4681
+ relative_top: float = 0.1,
4682
+ filter_value: float = -float("Inf"),
4683
+ base_filter_value=-1e-3,
4684
+ min_tokens_to_keep: int = 1,
4685
+ ) -> torch.FloatTensor:
4686
+ """
4687
+ Reference: https://github.com/XiangLi1999/ContrastiveDecoding/blob/170e9142e92159c1237d731e240f5eb14aabf428/transformers/src/transformers/generation_logits_process.py#L235
4688
+ Apply filtering to only keep tokens with a probability above a certain threshold. The threshold is defined as `relative_top` * max probability in the distribution.
4689
+ """
4690
+ scores_normalized = scores.log_softmax(dim=-1)
4691
+ baseline_scores_normalized = baseline_scores.log_softmax(dim=-1)
4692
+ sorted_logits, sorted_indices = torch.sort(scores_normalized, descending=True)
4693
+ min_thresh = sorted_logits[..., min_tokens_to_keep - 1]
4694
+ probs_max = torch.max(scores_normalized, dim=-1).values
4695
+ probs_thresh = probs_max + np.log(relative_top)
4696
+ probs_thresh = torch.min(min_thresh, probs_thresh)
4697
+ probs_thresh = probs_thresh.unsqueeze(-1)
4698
+ baseline_scores_normalized[scores_normalized < probs_thresh] = base_filter_value
4699
+ scores_normalized[scores_normalized < probs_thresh] = filter_value
4700
+ return scores_normalized, baseline_scores_normalized
4701
+
4702
+
4703
+ def _dola_select_contrast(
4704
+ candidate_premature_layers: List[int],
4705
+ candidate_premature_logits: Dict[int, torch.FloatTensor],
4706
+ final_logits: torch.FloatTensor,
4707
+ ) -> torch.FloatTensor:
4708
+ if len(candidate_premature_layers) == 1:
4709
+ base_logits = candidate_premature_logits[candidate_premature_layers[0]]
4710
+ final_logits, base_logits = _relative_top_filter(final_logits, base_logits)
4711
+ logits = final_logits - base_logits
4712
+ return logits
4713
+
4714
+ # 1. Stacking all premature_layers into a new dimension
4715
+ stacked_premature_layers = torch.stack([candidate_premature_logits[i] for i in candidate_premature_layers], dim=0)
4716
+
4717
+ # 2. Calculate the softmax values for mature_layer and all premature_layers
4718
+ # shape: (batch_size, vocab_size)
4719
+ softmax_mature_layer = F.softmax(final_logits, dim=-1)
4720
+ # shape: (num_premature_layers, batch_size, vocab_size)
4721
+ softmax_premature_layers = F.softmax(stacked_premature_layers, dim=-1)
4722
+
4723
+ # 3. Calculate the average distribution
4724
+ # shape: (num_premature_layers, batch_size, vocab_size)
4725
+ avg_dist = 0.5 * (softmax_mature_layer[None, :, :] + softmax_premature_layers)
4726
+
4727
+ # 4. Calculate log-softmax for the KL divergence
4728
+ # shape: (batch_size, vocab_size)
4729
+ log_softmax_mature_layer = F.log_softmax(final_logits, dim=-1)
4730
+ # shape: (num_premature_layers, batch_size, vocab_size)
4731
+ log_softmax_premature_layers = F.log_softmax(stacked_premature_layers, dim=-1)
4732
+
4733
+ # 5. Calculate the KL divergences and then the JS divergences
4734
+ # shape: (num_premature_layers, batch_size)
4735
+ kl1 = F.kl_div(log_softmax_mature_layer[None, :, :], avg_dist, reduction="none").mean(-1)
4736
+ # shape: (num_premature_layers, batch_size)
4737
+ kl2 = F.kl_div(log_softmax_premature_layers, avg_dist, reduction="none").mean(-1)
4738
+ js_divs = 0.5 * (kl1 + kl2) # shape: (num_premature_layers, batch_size)
4739
+
4740
+ # 6. Reduce the batchmean
4741
+ js_divs = js_divs.mean(-1) # shape: (num_premature_layers,)
4742
+ premature_layer = candidate_premature_layers[int(js_divs.argmax().cpu().item())]
4743
+
4744
+ base_logits = candidate_premature_logits[premature_layer]
4745
+ final_logits, base_logits = _relative_top_filter(final_logits, base_logits)
4746
+ logits = final_logits - base_logits
4747
+ return logits