xinference 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xinference might be problematic. Click here for more details.
- xinference/_compat.py +2 -0
- xinference/_version.py +3 -3
- xinference/api/restful_api.py +72 -66
- xinference/core/model.py +78 -25
- xinference/core/supervisor.py +81 -10
- xinference/core/utils.py +12 -8
- xinference/core/worker.py +32 -0
- xinference/model/audio/core.py +5 -0
- xinference/model/audio/cosyvoice.py +25 -3
- xinference/model/audio/f5tts.py +15 -10
- xinference/model/audio/f5tts_mlx.py +260 -0
- xinference/model/audio/fish_speech.py +35 -111
- xinference/model/audio/model_spec.json +19 -3
- xinference/model/audio/model_spec_modelscope.json +9 -0
- xinference/model/audio/utils.py +32 -0
- xinference/model/image/core.py +69 -1
- xinference/model/image/model_spec.json +145 -4
- xinference/model/image/model_spec_modelscope.json +150 -4
- xinference/model/image/stable_diffusion/core.py +45 -13
- xinference/model/llm/__init__.py +2 -0
- xinference/model/llm/llm_family.json +143 -0
- xinference/model/llm/llm_family.py +15 -36
- xinference/model/llm/llm_family_modelscope.json +148 -0
- xinference/model/llm/mlx/core.py +37 -32
- xinference/model/llm/transformers/cogagent.py +272 -0
- xinference/model/llm/transformers/core.py +2 -0
- xinference/model/llm/transformers/qwen2_vl.py +12 -1
- xinference/model/llm/utils.py +28 -3
- xinference/model/llm/vllm/core.py +48 -9
- xinference/model/llm/vllm/xavier/__init__.py +13 -0
- xinference/model/llm/vllm/xavier/allocator.py +74 -0
- xinference/model/llm/vllm/xavier/block.py +112 -0
- xinference/model/llm/vllm/xavier/block_manager.py +71 -0
- xinference/model/llm/vllm/xavier/block_tracker.py +116 -0
- xinference/model/llm/vllm/xavier/engine.py +247 -0
- xinference/model/llm/vllm/xavier/executor.py +132 -0
- xinference/model/llm/vllm/xavier/scheduler.py +422 -0
- xinference/model/llm/vllm/xavier/test/__init__.py +13 -0
- xinference/model/llm/vllm/xavier/test/test_xavier.py +122 -0
- xinference/model/llm/vllm/xavier/transfer.py +298 -0
- xinference/model/video/diffusers.py +14 -0
- xinference/model/video/model_spec.json +15 -0
- xinference/model/video/model_spec_modelscope.json +16 -0
- xinference/thirdparty/cosyvoice/bin/average_model.py +92 -0
- xinference/thirdparty/cosyvoice/bin/export_jit.py +12 -2
- xinference/thirdparty/cosyvoice/bin/export_onnx.py +112 -0
- xinference/thirdparty/cosyvoice/bin/export_trt.sh +9 -0
- xinference/thirdparty/cosyvoice/bin/inference.py +5 -7
- xinference/thirdparty/cosyvoice/bin/train.py +42 -8
- xinference/thirdparty/cosyvoice/cli/cosyvoice.py +96 -25
- xinference/thirdparty/cosyvoice/cli/frontend.py +77 -30
- xinference/thirdparty/cosyvoice/cli/model.py +330 -80
- xinference/thirdparty/cosyvoice/dataset/dataset.py +6 -2
- xinference/thirdparty/cosyvoice/dataset/processor.py +76 -14
- xinference/thirdparty/cosyvoice/flow/decoder.py +92 -13
- xinference/thirdparty/cosyvoice/flow/flow.py +99 -9
- xinference/thirdparty/cosyvoice/flow/flow_matching.py +110 -13
- xinference/thirdparty/cosyvoice/flow/length_regulator.py +5 -4
- xinference/thirdparty/cosyvoice/hifigan/discriminator.py +140 -0
- xinference/thirdparty/cosyvoice/hifigan/generator.py +58 -42
- xinference/thirdparty/cosyvoice/hifigan/hifigan.py +67 -0
- xinference/thirdparty/cosyvoice/llm/llm.py +139 -6
- xinference/thirdparty/cosyvoice/tokenizer/assets/multilingual_zh_ja_yue_char_del.tiktoken +58836 -0
- xinference/thirdparty/cosyvoice/tokenizer/tokenizer.py +279 -0
- xinference/thirdparty/cosyvoice/transformer/embedding.py +2 -2
- xinference/thirdparty/cosyvoice/transformer/encoder_layer.py +7 -7
- xinference/thirdparty/cosyvoice/transformer/upsample_encoder.py +318 -0
- xinference/thirdparty/cosyvoice/utils/common.py +28 -1
- xinference/thirdparty/cosyvoice/utils/executor.py +69 -7
- xinference/thirdparty/cosyvoice/utils/file_utils.py +2 -12
- xinference/thirdparty/cosyvoice/utils/frontend_utils.py +9 -5
- xinference/thirdparty/cosyvoice/utils/losses.py +20 -0
- xinference/thirdparty/cosyvoice/utils/scheduler.py +1 -2
- xinference/thirdparty/cosyvoice/utils/train_utils.py +101 -45
- xinference/thirdparty/fish_speech/fish_speech/conversation.py +94 -83
- xinference/thirdparty/fish_speech/fish_speech/models/text2semantic/llama.py +63 -20
- xinference/thirdparty/fish_speech/fish_speech/text/clean.py +1 -26
- xinference/thirdparty/fish_speech/fish_speech/text/spliter.py +1 -1
- xinference/thirdparty/fish_speech/fish_speech/tokenizer.py +152 -0
- xinference/thirdparty/fish_speech/fish_speech/train.py +2 -2
- xinference/thirdparty/fish_speech/fish_speech/webui/manage.py +1 -1
- xinference/thirdparty/fish_speech/tools/{post_api.py → api_client.py} +7 -13
- xinference/thirdparty/fish_speech/tools/api_server.py +98 -0
- xinference/thirdparty/fish_speech/tools/download_models.py +5 -5
- xinference/thirdparty/fish_speech/tools/fish_e2e.py +2 -2
- xinference/thirdparty/fish_speech/tools/inference_engine/__init__.py +192 -0
- xinference/thirdparty/fish_speech/tools/inference_engine/reference_loader.py +125 -0
- xinference/thirdparty/fish_speech/tools/inference_engine/utils.py +39 -0
- xinference/thirdparty/fish_speech/tools/inference_engine/vq_manager.py +57 -0
- xinference/thirdparty/fish_speech/tools/llama/eval_in_context.py +2 -2
- xinference/thirdparty/fish_speech/tools/llama/generate.py +117 -89
- xinference/thirdparty/fish_speech/tools/run_webui.py +104 -0
- xinference/thirdparty/fish_speech/tools/schema.py +11 -28
- xinference/thirdparty/fish_speech/tools/server/agent/__init__.py +57 -0
- xinference/thirdparty/fish_speech/tools/server/agent/generate.py +119 -0
- xinference/thirdparty/fish_speech/tools/server/agent/generation_utils.py +122 -0
- xinference/thirdparty/fish_speech/tools/server/agent/pre_generation_utils.py +72 -0
- xinference/thirdparty/fish_speech/tools/server/api_utils.py +75 -0
- xinference/thirdparty/fish_speech/tools/server/exception_handler.py +27 -0
- xinference/thirdparty/fish_speech/tools/server/inference.py +45 -0
- xinference/thirdparty/fish_speech/tools/server/model_manager.py +122 -0
- xinference/thirdparty/fish_speech/tools/server/model_utils.py +129 -0
- xinference/thirdparty/fish_speech/tools/server/views.py +246 -0
- xinference/thirdparty/fish_speech/tools/webui/__init__.py +173 -0
- xinference/thirdparty/fish_speech/tools/webui/inference.py +91 -0
- xinference/thirdparty/fish_speech/tools/webui/variables.py +14 -0
- xinference/thirdparty/matcha/utils/utils.py +2 -2
- xinference/types.py +13 -0
- xinference/web/ui/build/asset-manifest.json +6 -6
- xinference/web/ui/build/index.html +1 -1
- xinference/web/ui/build/static/css/main.51a587ff.css +2 -0
- xinference/web/ui/build/static/css/main.51a587ff.css.map +1 -0
- xinference/web/ui/build/static/js/main.1eb206d1.js +3 -0
- xinference/web/ui/build/static/js/main.1eb206d1.js.map +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/03c4052f1b91f6ba0c5389bdcf49c43319b4076c08e4b8585dab312538ae290a.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/1786b83003b8e9605a0f5f855a185d4d16e38fc893dfb326a2a9cca206b4240a.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/17cbc181dd674b9150b80c73ed6a82656de0082d857f6e5f66d9716129ac0b38.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/185ceb8872d562e032b47e79df6a45670e06345b8ed70aad1a131e0476783c5c.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/2213d49de260e1f67c888081b18f120f5225462b829ae57c9e05a05cec83689d.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/26b8c9f34b0bed789b3a833767672e39302d1e0c09b4276f4d58d1df7b6bd93b.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/2b484da66c724d0d56a40849c109327408796a668b1381511b6e9e03baa48658.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/2cbbbce9b84df73330d4c42b82436ed881b3847628f2fbc346aa62e2859fd88c.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/2ec9b14431ed33ce6901bf9f27007be4e6e472709c99d6e22b50ce528e4b78ee.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/3b966db018f96be4a055d6ca205f0990d4d0b370e2980c17d8bca2c9a021819c.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/3eefb411b24c2b3ce053570ef50daccf154022f0e168be5ed0fec21394baf9f4.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/522b229e3cac219123f0d69673f5570e191c2d2a505dc65b312d336eae2279c0.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/52e45f17ba300580ea3fcc9f9228ccba194bb092b76f25e9255af311f8b05aab.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/5a0bc4631f936459afc1a3b1d3ec2420118b1f00e11f60ccac3e08088f3f27a8.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/611fa2c6c53b66039991d06dfb0473b5ab37fc63b4564e0f6e1718523768a045.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/6329bc76c406fe5eb305412383fbde5950f847bb5e43261f73f37622c365acb4.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/63c8e07687ea53a4f8a910ee5e42e0eb26cd1acbfbe820f3e3248a786ee51401.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/69b2d5001684174ec9da57e07914eed3eac4960018bceb6cbfa801d861301d7c.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/710c1acda69e561e30a933b98c6a56d50197868b15c21e2aad55ab6d46649eb6.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/720deca1fce5a1dc5056048fa8258fd138a82ea855f350b6613f104a73fb761f.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/76a23b92d26a499c57e61eea2b895fbc9771bd0849a72e66f8e633192017978b.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/858063f23b34dfe600254eb5afd85518b0002ec4b30b7386616c45600826e3b2.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/920b82c1c89124cf217109eeedbfcd3aae3b917be50c9dfb6bbb4ce26bdfd2e7.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/94d8b7aeb0076f2ce07db598cea0e87b13bc8d5614eb530b8d6e696c2daf6f88.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/9e917fe7022d01b2ccbe5cc0ce73d70bb72bee584ff293bad71bdff6695dee28.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/9f28fdb8399f1d0474f0aca86f1658dc94f5bf0c90f6146352de150692de8862.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/a0dfafa06b2bb7cba8cad41c482503f61944f759f4318139362602ef5cc47ccb.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/afb8084f539534cd594755ea2205ecd5bd1f62dddcfdf75a2eace59a28131278.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/b57b1438b77294c1f3f6cfce12ac487d8106c6f016975ba0aec94d98997e2e1e.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/b9917b0bf8e4d55ccbac1c334aa04d6ff3c5b6ed9e5d38b9ea2c687fa7d3f5a9.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/bbcc94b0149963d1d6f267ee1f4f03d3925b758392ce2f516c3fe8af0e0169fc.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/bdee44abeadc4abc17d41c52eb49c6e19a4b1a267b6e16876ce91bdeeebfc52d.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/beb112b70f4a56db95920a9e20efb6c97c37b68450716730217a9ee1a9ae92be.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/c88db97be0cdf440193b3995996e83510a04cb00048135485fc0e26d197e80b5.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/d49e5314d34310a62d01a03067ce1bec5da00abce84c5196aa9c6842fa79a430.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/d7664d18c4ddbad9c3a6a31b91f7c00fb0dde804608674a9860ee50f33e54708.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/d9072c318b819b7c90a0f7e9cc0b6413b4dbeb8e9859898e53d75ea882fcde99.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/db16a983bc08a05f0439cc61ca0840e49e1d8400eef678909f16c032a418a3d6.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/dc249829767b8abcbc3677e0b07b6d3ecbfdfe6d08cfe23a665eb33373a9aa9d.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/e242c583c2dbc2784f0fcf513523975f7d5df447e106c1c17e49e8578a6fc3ed.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/eac5f1296513e69e4b96f750ddccd4d0264e2bae4e4c449144e83274a48698d9.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/ed57202cb79649bb716400436590245547df241988fc7c8e1d85d132299542d2.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/f125bf72e773a14cdaebd0c343e80adb909d12e317ee5c00cd4a57442fbe2c62.json +1 -0
- xinference/web/ui/node_modules/.cache/babel-loader/f91af913d7f91c410719ab13136aaed3aaf0f8dda06652f25c42cb5231587398.json +1 -0
- xinference/web/ui/node_modules/.package-lock.json +67 -3
- xinference/web/ui/node_modules/@babel/runtime/package.json +592 -538
- xinference/web/ui/node_modules/html-parse-stringify/package.json +50 -0
- xinference/web/ui/node_modules/i18next/dist/esm/package.json +1 -0
- xinference/web/ui/node_modules/i18next/package.json +129 -0
- xinference/web/ui/node_modules/react-i18next/.eslintrc.json +74 -0
- xinference/web/ui/node_modules/react-i18next/dist/es/package.json +1 -0
- xinference/web/ui/node_modules/react-i18next/package.json +162 -0
- xinference/web/ui/node_modules/void-elements/package.json +34 -0
- xinference/web/ui/package-lock.json +69 -3
- xinference/web/ui/package.json +2 -0
- xinference/web/ui/src/locales/en.json +186 -0
- xinference/web/ui/src/locales/zh.json +186 -0
- {xinference-1.1.0.dist-info → xinference-1.2.0.dist-info}/METADATA +19 -11
- {xinference-1.1.0.dist-info → xinference-1.2.0.dist-info}/RECORD +178 -111
- xinference/thirdparty/cosyvoice/bin/__init__.py +0 -0
- xinference/thirdparty/cosyvoice/bin/export_trt.py +0 -8
- xinference/thirdparty/cosyvoice/flow/__init__.py +0 -0
- xinference/thirdparty/cosyvoice/hifigan/__init__.py +0 -0
- xinference/thirdparty/cosyvoice/llm/__init__.py +0 -0
- xinference/thirdparty/fish_speech/tools/__init__.py +0 -0
- xinference/thirdparty/fish_speech/tools/api.py +0 -943
- xinference/thirdparty/fish_speech/tools/msgpack_api.py +0 -95
- xinference/thirdparty/fish_speech/tools/webui.py +0 -548
- xinference/web/ui/build/static/css/main.5061c4c3.css +0 -2
- xinference/web/ui/build/static/css/main.5061c4c3.css.map +0 -1
- xinference/web/ui/build/static/js/main.4eb4ee80.js +0 -3
- xinference/web/ui/build/static/js/main.4eb4ee80.js.map +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/07ce9e632e6aff24d7aa3ad8e48224433bbfeb0d633fca723453f1fcae0c9f1c.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/1130403f9e46f5738a23b45ac59b57de8f360c908c713e2c0670c2cce9bd367a.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/131091b25d26b17cdca187d7542a21475c211138d900cf667682260e76ef9463.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/1f269fb2a368363c1cb2237825f1dba093b6bdd8c44cc05954fd19ec2c1fff03.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/331312668fa8bd3d7401818f4a25fa98135d7f61371cd6bfff78b18cf4fbdd92.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/40f17338fc75ae095de7d2b4d8eae0d5ca0193a7e2bcece4ee745b22a7a2f4b7.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/4de9a6942c5f1749d6cbfdd54279699975f16016b182848bc253886f52ec2ec3.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/822586ed1077201b64b954f12f25e3f9b45678c1acbabe53d8af3ca82ca71f33.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/8c5eeb02f772d02cbe8b89c05428d0dd41a97866f75f7dc1c2164a67f5a1cf98.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/8d33354bd2100c8602afc3341f131a88cc36aaeecd5a4b365ed038514708e350.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/9375a35b05d56989b2755bf72161fa707c92f28569d33765a75f91a568fda6e9.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/a158a9ffa0c9b169aee53dd4a0c44501a596755b4e4f6ede7746d65a72e2a71f.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/c7bf40bab396765f67d0fed627ed3665890608b2d0edaa3e8cb7cfc96310db45.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/d6c643278a0b28320e6f33a60f5fb64c053997cbdc39a60e53ccc574688ade9e.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/e42b72d4cc1ea412ebecbb8d040dc6c6bfee462c33903c2f1f3facb602ad742e.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/e64b7e8cedcf43d4c95deba60ec1341855c887705805bb62431693118b870c69.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/f5039ddbeb815c51491a1989532006b96fc3ae49c6c60e3c097f875b4ae915ae.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/f72f011744c4649fabddca6f7a9327861ac0a315a89b1a2e62a39774e7863845.json +0 -1
- xinference/web/ui/node_modules/.cache/babel-loader/feabb04b4aa507102da0a64398a40818e878fd1df9b75dda8461b3e1e7ff3f11.json +0 -1
- /xinference/web/ui/build/static/js/{main.4eb4ee80.js.LICENSE.txt → main.1eb206d1.js.LICENSE.txt} +0 -0
- {xinference-1.1.0.dist-info → xinference-1.2.0.dist-info}/LICENSE +0 -0
- {xinference-1.1.0.dist-info → xinference-1.2.0.dist-info}/WHEEL +0 -0
- {xinference-1.1.0.dist-info → xinference-1.2.0.dist-info}/entry_points.txt +0 -0
- {xinference-1.1.0.dist-info → xinference-1.2.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
# Copyright 2022-2025 XProbe Inc.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
from typing import TYPE_CHECKING, List, Optional, Set, Tuple, Union
|
|
15
|
+
|
|
16
|
+
import xoscar as xo
|
|
17
|
+
from vllm.executor.gpu_executor import GPUExecutorAsync
|
|
18
|
+
from vllm.model_executor.layers.sampler import SamplerOutput
|
|
19
|
+
from vllm.sequence import ExecuteModelRequest, PoolerOutput
|
|
20
|
+
from vllm.utils import is_pin_memory_available
|
|
21
|
+
from vllm.worker.cache_engine import CacheEngine
|
|
22
|
+
|
|
23
|
+
if TYPE_CHECKING:
|
|
24
|
+
from .scheduler import XavierScheduler
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class XavierExecutor(GPUExecutorAsync):
|
|
28
|
+
scheduler: Optional[List["XavierScheduler"]] = None
|
|
29
|
+
|
|
30
|
+
def _init_executor(self) -> None:
|
|
31
|
+
super()._init_executor()
|
|
32
|
+
self._transfer_ref = None
|
|
33
|
+
self._block_tracker_ref = None
|
|
34
|
+
|
|
35
|
+
async def init_transfer(self):
|
|
36
|
+
"""
|
|
37
|
+
In vllm, the `cache_engine` is the entity that truly manages the KV cache tensors.
|
|
38
|
+
Retrieve the necessary transmission information from the `cache_engine`.
|
|
39
|
+
"""
|
|
40
|
+
transfer_ref = await self._get_transfer_ref()
|
|
41
|
+
ref_cache_engine: CacheEngine = self.driver_worker.cache_engine[0]
|
|
42
|
+
buffer_dtype = ref_cache_engine.dtype
|
|
43
|
+
buffer_device = "cpu"
|
|
44
|
+
buffer_pin_memory = is_pin_memory_available()
|
|
45
|
+
num_attn_layers = ref_cache_engine.num_attention_layers
|
|
46
|
+
kv_cache_shape = ref_cache_engine.gpu_cache[0].shape
|
|
47
|
+
assert kv_cache_shape[0] == 2
|
|
48
|
+
buffer_num = 2
|
|
49
|
+
transfer_block_num = self.vllm_config.xavier_config.get("transfer_block_num")
|
|
50
|
+
buffer_shape = (
|
|
51
|
+
transfer_block_num,
|
|
52
|
+
num_attn_layers,
|
|
53
|
+
kv_cache_shape[0],
|
|
54
|
+
*kv_cache_shape[2:],
|
|
55
|
+
)
|
|
56
|
+
await transfer_ref.setup(
|
|
57
|
+
self.driver_worker.cache_engine,
|
|
58
|
+
self.scheduler,
|
|
59
|
+
num_buffer=buffer_num,
|
|
60
|
+
buffer_shape=buffer_shape,
|
|
61
|
+
buffer_dtype=buffer_dtype,
|
|
62
|
+
buffer_device=buffer_device,
|
|
63
|
+
pin_memory=buffer_pin_memory,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
async def _get_block_tracker_ref(self):
|
|
67
|
+
from .block_tracker import VLLMBlockTracker
|
|
68
|
+
|
|
69
|
+
if self._block_tracker_ref is None:
|
|
70
|
+
block_tracker_address = self.vllm_config.xavier_config.get(
|
|
71
|
+
"block_tracker_address"
|
|
72
|
+
)
|
|
73
|
+
self._block_tracker_ref = await xo.actor_ref(
|
|
74
|
+
address=block_tracker_address, uid=VLLMBlockTracker.default_uid()
|
|
75
|
+
)
|
|
76
|
+
return self._block_tracker_ref
|
|
77
|
+
|
|
78
|
+
async def _get_transfer_ref(self):
|
|
79
|
+
from .transfer import TransferActor
|
|
80
|
+
|
|
81
|
+
if self._transfer_ref is None:
|
|
82
|
+
transfer_address = self.vllm_config.xavier_config.get("rank_address")
|
|
83
|
+
rank = self.vllm_config.xavier_config.get("rank")
|
|
84
|
+
self._transfer_ref = await xo.actor_ref(
|
|
85
|
+
address=transfer_address, uid=f"{TransferActor.default_uid()}-{rank}"
|
|
86
|
+
)
|
|
87
|
+
return self._transfer_ref
|
|
88
|
+
|
|
89
|
+
def get_rank_address(self) -> str:
|
|
90
|
+
return self.vllm_config.xavier_config.get("rank_address")
|
|
91
|
+
|
|
92
|
+
async def execute_model_async(
|
|
93
|
+
self,
|
|
94
|
+
execute_model_req: ExecuteModelRequest,
|
|
95
|
+
) -> List[Union[SamplerOutput, PoolerOutput]]:
|
|
96
|
+
"""
|
|
97
|
+
Collect information about the blocks involved in the execution before the vllm `ModelRunner` executes.
|
|
98
|
+
This information will be used by the tracker after execution to register the locally computed blocks.
|
|
99
|
+
"""
|
|
100
|
+
virtual_engine = execute_model_req.virtual_engine
|
|
101
|
+
block_tracker_ref = await self._get_block_tracker_ref()
|
|
102
|
+
scheduler = self.scheduler[virtual_engine] # type: ignore
|
|
103
|
+
rank_address = self.get_rank_address()
|
|
104
|
+
executed_blocks_details: Set[Tuple[int, int]] = set()
|
|
105
|
+
for meta in execute_model_req.seq_group_metadata_list:
|
|
106
|
+
block_tables = meta.block_tables
|
|
107
|
+
for seq_id, block_ids in block_tables.items():
|
|
108
|
+
for _id in block_ids:
|
|
109
|
+
b = scheduler.block_manager.get_block_by_block_id(seq_id, _id)
|
|
110
|
+
# The `executed` attribute is used to prevent duplicate registration of the block.
|
|
111
|
+
executed = scheduler.block_manager.get_block_status_by_block_id(
|
|
112
|
+
"executed", _id
|
|
113
|
+
)
|
|
114
|
+
detail = (b.content_hash, b.block_id)
|
|
115
|
+
if (b.content_hash is not None) and (not executed):
|
|
116
|
+
executed_blocks_details.add(detail)
|
|
117
|
+
|
|
118
|
+
res = await super().execute_model_async(execute_model_req)
|
|
119
|
+
|
|
120
|
+
"""
|
|
121
|
+
Why not collect and register the information after execution?
|
|
122
|
+
Because after execution, the model's execution callback hook will release the block_id,
|
|
123
|
+
causing the block manager to lose access to the correct information.
|
|
124
|
+
"""
|
|
125
|
+
await block_tracker_ref.register_blocks(
|
|
126
|
+
virtual_engine, list(executed_blocks_details), rank_address
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
for _, _id in executed_blocks_details:
|
|
130
|
+
scheduler.block_manager.set_block_status_by_block_id("executed", _id, True)
|
|
131
|
+
|
|
132
|
+
return res
|
|
@@ -0,0 +1,422 @@
|
|
|
1
|
+
# Copyright 2022-2025 XProbe Inc.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
import asyncio
|
|
15
|
+
import logging
|
|
16
|
+
import time
|
|
17
|
+
from collections import deque
|
|
18
|
+
from typing import Callable, Deque, Dict, List, Optional, Set, Tuple, no_type_check
|
|
19
|
+
|
|
20
|
+
import xoscar as xo
|
|
21
|
+
from vllm.config import CacheConfig, LoRAConfig, SchedulerConfig
|
|
22
|
+
from vllm.core.block.interfaces import Block
|
|
23
|
+
from vllm.core.interfaces import BlockSpaceManager
|
|
24
|
+
from vllm.core.scheduler import Scheduler, SchedulerOutputs
|
|
25
|
+
from vllm.sequence import (
|
|
26
|
+
SequenceData,
|
|
27
|
+
SequenceGroup,
|
|
28
|
+
SequenceGroupMetadata,
|
|
29
|
+
SequenceGroupMetadataDelta,
|
|
30
|
+
SequenceStage,
|
|
31
|
+
SequenceStatus,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
from .block_manager import XavierBlockManager
|
|
35
|
+
|
|
36
|
+
logger = logging.getLogger(__name__)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class XavierScheduler(Scheduler):
|
|
40
|
+
@staticmethod
|
|
41
|
+
def _get_block_space_manager_class(version: str):
|
|
42
|
+
logger.debug("Init xavier block manager.")
|
|
43
|
+
return XavierBlockManager
|
|
44
|
+
|
|
45
|
+
def __init__(
|
|
46
|
+
self,
|
|
47
|
+
scheduler_config: SchedulerConfig,
|
|
48
|
+
cache_config: CacheConfig,
|
|
49
|
+
lora_config: Optional[LoRAConfig],
|
|
50
|
+
pipeline_parallel_size: int = 1,
|
|
51
|
+
output_proc_callback: Optional[Callable] = None,
|
|
52
|
+
xavier_config: Optional[Dict] = None,
|
|
53
|
+
virtual_engine: Optional[int] = 0,
|
|
54
|
+
) -> None:
|
|
55
|
+
BlockSpaceManager.get_block_space_manager_class = (
|
|
56
|
+
self._get_block_space_manager_class
|
|
57
|
+
)
|
|
58
|
+
super().__init__(
|
|
59
|
+
scheduler_config,
|
|
60
|
+
cache_config,
|
|
61
|
+
lora_config,
|
|
62
|
+
pipeline_parallel_size,
|
|
63
|
+
output_proc_callback,
|
|
64
|
+
)
|
|
65
|
+
xavier_config["virtual_engine"] = virtual_engine # type: ignore
|
|
66
|
+
self.block_manager.xavier_config = xavier_config
|
|
67
|
+
self._xavier_config = xavier_config
|
|
68
|
+
self._virtual_engine = virtual_engine
|
|
69
|
+
self._block_tracker_ref = None
|
|
70
|
+
self._transfer_ref = None
|
|
71
|
+
self._transferring: Deque[SequenceGroup] = deque()
|
|
72
|
+
self._transfer_status: Dict[SequenceGroup, Set[int]] = {}
|
|
73
|
+
|
|
74
|
+
async def _get_block_tracker_ref(self):
|
|
75
|
+
from .block_tracker import VLLMBlockTracker
|
|
76
|
+
|
|
77
|
+
if self._block_tracker_ref is None:
|
|
78
|
+
block_tracker_address = self._xavier_config.get("block_tracker_address")
|
|
79
|
+
self._block_tracker_ref = await xo.actor_ref(
|
|
80
|
+
address=block_tracker_address, uid=VLLMBlockTracker.default_uid()
|
|
81
|
+
)
|
|
82
|
+
return self._block_tracker_ref
|
|
83
|
+
|
|
84
|
+
async def _get_transfer_ref(self):
|
|
85
|
+
from .transfer import TransferActor
|
|
86
|
+
|
|
87
|
+
if self._transfer_ref is None:
|
|
88
|
+
transfer_address = self._xavier_config.get("rank_address")
|
|
89
|
+
rank = self._xavier_config.get("rank")
|
|
90
|
+
self._transfer_ref = await xo.actor_ref(
|
|
91
|
+
address=transfer_address, uid=f"{TransferActor.default_uid()}-{rank}"
|
|
92
|
+
)
|
|
93
|
+
return self._transfer_ref
|
|
94
|
+
|
|
95
|
+
async def _get_transfer_details(
|
|
96
|
+
self,
|
|
97
|
+
virtual_engine: int,
|
|
98
|
+
block_tables: Dict[int, List[int]],
|
|
99
|
+
seq_group: SequenceGroup,
|
|
100
|
+
) -> Tuple[Set[int], Dict[str, Set[Tuple[int, int, int]]]]:
|
|
101
|
+
"""
|
|
102
|
+
Retrieve information from other replicas to check if any blocks have already been computed,
|
|
103
|
+
for the purpose of data transfer.
|
|
104
|
+
"""
|
|
105
|
+
details: Set[Tuple[int, int]] = set()
|
|
106
|
+
for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
|
|
107
|
+
block_ids = block_tables[seq.seq_id]
|
|
108
|
+
for _id in block_ids:
|
|
109
|
+
block: Block = self.block_manager.get_block_by_block_id(seq.seq_id, _id)
|
|
110
|
+
detail = (block.content_hash, _id)
|
|
111
|
+
"""
|
|
112
|
+
1. `block.content_hash is not None` means that the block has been filled with tokens.
|
|
113
|
+
Unless it is evicted from the cache, the computation result of this block is constant.
|
|
114
|
+
2. Check the `transferred` status of the block.
|
|
115
|
+
If it is `True`, it means the block has already been transferred locally
|
|
116
|
+
and does not need to be transferred again.
|
|
117
|
+
3. Check the `executed` status of the block.
|
|
118
|
+
If it is `True`, it means the block has already been computed locally
|
|
119
|
+
and does not need to be transferred.
|
|
120
|
+
"""
|
|
121
|
+
if (
|
|
122
|
+
(block.content_hash is not None)
|
|
123
|
+
and (
|
|
124
|
+
not self.block_manager.get_block_status_by_block_id(
|
|
125
|
+
"transferred", block.block_id
|
|
126
|
+
)
|
|
127
|
+
)
|
|
128
|
+
and (
|
|
129
|
+
not self.block_manager.get_block_status_by_block_id(
|
|
130
|
+
"executed", block.block_id
|
|
131
|
+
)
|
|
132
|
+
)
|
|
133
|
+
):
|
|
134
|
+
details.add(detail)
|
|
135
|
+
tracker_ref = await self._get_block_tracker_ref()
|
|
136
|
+
remote = await tracker_ref.query_blocks(virtual_engine, list(details))
|
|
137
|
+
# Not all queried blocks have corresponding results in other replicas.
|
|
138
|
+
# Therefore, it is necessary to record which local block data was actually transferred.
|
|
139
|
+
local: Set[int] = set()
|
|
140
|
+
for _, remote_details in remote.items():
|
|
141
|
+
for _, _, local_block_id in remote_details:
|
|
142
|
+
local.add(local_block_id)
|
|
143
|
+
if local:
|
|
144
|
+
logger.debug(
|
|
145
|
+
f"Data in local blocks: {local} will be transmitted from the remote."
|
|
146
|
+
)
|
|
147
|
+
return local, remote
|
|
148
|
+
|
|
149
|
+
async def _do_transfer_inner(
|
|
150
|
+
self, virtual_engine: int, remote: Dict[str, Set[Tuple[int, int, int]]]
|
|
151
|
+
):
|
|
152
|
+
transfer_ref = await self._get_transfer_ref()
|
|
153
|
+
for addr, hash_and_block_id in remote.items():
|
|
154
|
+
src_to_dst: Dict[int, int] = {x[1]: x[2] for x in hash_and_block_id}
|
|
155
|
+
await transfer_ref.recv(virtual_engine, addr, src_to_dst)
|
|
156
|
+
|
|
157
|
+
async def _do_transfer(
|
|
158
|
+
self,
|
|
159
|
+
virtual_engine: int,
|
|
160
|
+
local: Set[int],
|
|
161
|
+
remote: Dict[str, Set[Tuple[int, int, int]]],
|
|
162
|
+
seq_group: SequenceGroup,
|
|
163
|
+
is_prefill: bool,
|
|
164
|
+
):
|
|
165
|
+
await self._do_transfer_inner(virtual_engine, remote)
|
|
166
|
+
# After the transfer is completed, update the corresponding metadata.
|
|
167
|
+
self._transfer_status[seq_group] = local
|
|
168
|
+
for _id in local:
|
|
169
|
+
self.block_manager.set_block_status_by_block_id("transferred", _id, True)
|
|
170
|
+
# After the transfer, place the `seq_group` back into the appropriate queue to
|
|
171
|
+
# wait for the next scheduling execution.
|
|
172
|
+
if is_prefill:
|
|
173
|
+
self.waiting.appendleft(seq_group)
|
|
174
|
+
else:
|
|
175
|
+
self.running.appendleft(seq_group)
|
|
176
|
+
self._transferring.remove(seq_group)
|
|
177
|
+
|
|
178
|
+
@no_type_check
|
|
179
|
+
async def schedule(
|
|
180
|
+
self,
|
|
181
|
+
) -> Tuple[List[SequenceGroupMetadata], SchedulerOutputs, bool]:
|
|
182
|
+
virtual_engine = self._virtual_engine
|
|
183
|
+
|
|
184
|
+
# Schedule sequence groups.
|
|
185
|
+
# This function call changes the internal states of the scheduler
|
|
186
|
+
# such as self.running, self.swapped, and self.waiting.
|
|
187
|
+
scheduler_start_time = time.perf_counter()
|
|
188
|
+
|
|
189
|
+
scheduler_outputs: SchedulerOutputs = self._schedule()
|
|
190
|
+
now = time.time()
|
|
191
|
+
|
|
192
|
+
if not self.cache_config.enable_prefix_caching:
|
|
193
|
+
common_computed_block_nums = []
|
|
194
|
+
|
|
195
|
+
allow_async_output_proc: bool = self.use_async_output_proc
|
|
196
|
+
|
|
197
|
+
"""Xinference Change!!!
|
|
198
|
+
Additional data structures required by Xavier.
|
|
199
|
+
"""
|
|
200
|
+
scheduled_seq_groups = []
|
|
201
|
+
has_transferring = False
|
|
202
|
+
|
|
203
|
+
# Create input data structures.
|
|
204
|
+
seq_group_metadata_list: List[SequenceGroupMetadata] = []
|
|
205
|
+
for i, scheduled_seq_group in enumerate(scheduler_outputs.scheduled_seq_groups):
|
|
206
|
+
seq_group = scheduled_seq_group.seq_group
|
|
207
|
+
token_chunk_size = scheduled_seq_group.token_chunk_size
|
|
208
|
+
seq_group.maybe_set_first_scheduled_time(now)
|
|
209
|
+
|
|
210
|
+
seq_group_metadata = self._seq_group_metadata_cache[
|
|
211
|
+
self.cache_id
|
|
212
|
+
].get_object()
|
|
213
|
+
seq_group_metadata.seq_data.clear()
|
|
214
|
+
seq_group_metadata.block_tables.clear()
|
|
215
|
+
|
|
216
|
+
# seq_id -> SequenceData
|
|
217
|
+
seq_data: Dict[int, SequenceData] = {}
|
|
218
|
+
# seq_id -> physical block numbers
|
|
219
|
+
block_tables: Dict[int, List[int]] = {}
|
|
220
|
+
|
|
221
|
+
if seq_group.is_encoder_decoder():
|
|
222
|
+
# Encoder associated with SequenceGroup
|
|
223
|
+
encoder_seq = seq_group.get_encoder_seq()
|
|
224
|
+
assert encoder_seq is not None
|
|
225
|
+
encoder_seq_data = encoder_seq.data
|
|
226
|
+
# Block table for cross-attention
|
|
227
|
+
# Also managed at SequenceGroup level
|
|
228
|
+
cross_block_table = self.block_manager.get_cross_block_table(seq_group)
|
|
229
|
+
else:
|
|
230
|
+
encoder_seq_data = None
|
|
231
|
+
cross_block_table = None
|
|
232
|
+
|
|
233
|
+
for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
|
|
234
|
+
seq_id = seq.seq_id
|
|
235
|
+
seq_data[seq_id] = seq.data
|
|
236
|
+
block_tables[seq_id] = self.block_manager.get_block_table(seq)
|
|
237
|
+
self.block_manager.access_all_blocks_in_seq(seq, now)
|
|
238
|
+
|
|
239
|
+
"""Xinference Change!!!
|
|
240
|
+
After completing the scheduling, the blocks have been allocated.
|
|
241
|
+
Therefore, it is possible to check whether some blocks have already been computed on other replicas based on this information,
|
|
242
|
+
and subsequently initiate the transfer.
|
|
243
|
+
"""
|
|
244
|
+
local, remote = await self._get_transfer_details(
|
|
245
|
+
virtual_engine, block_tables, seq_group
|
|
246
|
+
)
|
|
247
|
+
if remote:
|
|
248
|
+
running_seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING)
|
|
249
|
+
# According to the internal code comments in vllm,
|
|
250
|
+
# whether `token_chunk_size` is 1 can indicate whether the `seq_group` is in the decode or prefill stage.
|
|
251
|
+
is_prefill = token_chunk_size != 1
|
|
252
|
+
for seq in running_seqs:
|
|
253
|
+
seq.status = (
|
|
254
|
+
SequenceStatus.WAITING if is_prefill else SequenceStatus.RUNNING
|
|
255
|
+
)
|
|
256
|
+
# Additional attribute `transferred` to mark that this `seq_group` involves a transfer process.
|
|
257
|
+
# During the next scheduling, block allocation will no longer be required
|
|
258
|
+
# since it has already been completed.
|
|
259
|
+
seq.transferred = True
|
|
260
|
+
seq.data._stage = (
|
|
261
|
+
SequenceStage.PREFILL if is_prefill else SequenceStage.DECODE
|
|
262
|
+
)
|
|
263
|
+
self._transfer_status[seq_group] = set()
|
|
264
|
+
# Use `create_task` to avoid blocking subsequent scheduling.
|
|
265
|
+
asyncio.create_task(
|
|
266
|
+
self._do_transfer(
|
|
267
|
+
virtual_engine, local, remote, seq_group, is_prefill
|
|
268
|
+
)
|
|
269
|
+
)
|
|
270
|
+
# The `seq_group` that is currently being transferred enters a new queue.
|
|
271
|
+
self._transferring.append(seq_group)
|
|
272
|
+
has_transferring = True
|
|
273
|
+
continue
|
|
274
|
+
else:
|
|
275
|
+
scheduled_seq_groups.append(seq_group)
|
|
276
|
+
|
|
277
|
+
if self.cache_config.enable_prefix_caching:
|
|
278
|
+
common_computed_block_nums = (
|
|
279
|
+
self.block_manager.get_common_computed_block_ids(
|
|
280
|
+
seq_group.get_seqs(status=SequenceStatus.RUNNING)
|
|
281
|
+
)
|
|
282
|
+
)
|
|
283
|
+
"""Xinference Change!!!
|
|
284
|
+
This is very important and is the core of Xavier.
|
|
285
|
+
`computed_block_nums` is the key attribute that determines which blocks do not need to be computed,
|
|
286
|
+
as decided by the `model_runner`.
|
|
287
|
+
Therefore, after the transfer is completed, this attribute needs to be updated.
|
|
288
|
+
"""
|
|
289
|
+
if seq_group in self._transfer_status:
|
|
290
|
+
transferred_blocks = self._transfer_status[seq_group]
|
|
291
|
+
if transferred_blocks:
|
|
292
|
+
common_computed_block_nums.extend(transferred_blocks)
|
|
293
|
+
common_computed_block_nums = list(
|
|
294
|
+
sorted(common_computed_block_nums)
|
|
295
|
+
)
|
|
296
|
+
del self._transfer_status[seq_group]
|
|
297
|
+
|
|
298
|
+
do_sample = True
|
|
299
|
+
is_prompt = seq_group.is_prefill()
|
|
300
|
+
# We should send the metadata to workers when the first prefill
|
|
301
|
+
# is sent. Subsequent requests could be chunked prefill or decode.
|
|
302
|
+
is_first_prefill = False
|
|
303
|
+
if is_prompt:
|
|
304
|
+
seqs = seq_group.get_seqs()
|
|
305
|
+
# Prefill has only 1 sequence.
|
|
306
|
+
assert len(seqs) == 1
|
|
307
|
+
num_computed_tokens = seqs[0].data.get_num_computed_tokens()
|
|
308
|
+
is_first_prefill = num_computed_tokens == 0
|
|
309
|
+
# In the next iteration, all prompt tokens are not computed.
|
|
310
|
+
# It means the prefill is chunked, and we don't need sampling.
|
|
311
|
+
# NOTE: We use get_len instead of get_prompt_len because when
|
|
312
|
+
# a sequence is preempted, prefill includes previous generated
|
|
313
|
+
# output tokens.
|
|
314
|
+
if token_chunk_size + num_computed_tokens < seqs[0].data.get_len():
|
|
315
|
+
do_sample = False
|
|
316
|
+
|
|
317
|
+
# It assumes the scheduled_seq_groups is ordered by
|
|
318
|
+
# prefill < decoding.
|
|
319
|
+
if is_first_prefill or not self.scheduler_config.send_delta_data:
|
|
320
|
+
seq_group_metadata = SequenceGroupMetadata(
|
|
321
|
+
request_id=seq_group.request_id,
|
|
322
|
+
is_prompt=is_prompt,
|
|
323
|
+
seq_data=seq_data,
|
|
324
|
+
sampling_params=seq_group.sampling_params,
|
|
325
|
+
block_tables=block_tables,
|
|
326
|
+
do_sample=do_sample,
|
|
327
|
+
pooling_params=seq_group.pooling_params,
|
|
328
|
+
token_chunk_size=token_chunk_size,
|
|
329
|
+
lora_request=seq_group.lora_request,
|
|
330
|
+
computed_block_nums=common_computed_block_nums,
|
|
331
|
+
encoder_seq_data=encoder_seq_data,
|
|
332
|
+
cross_block_table=cross_block_table,
|
|
333
|
+
state=seq_group.state,
|
|
334
|
+
token_type_ids=seq_group.token_type_ids,
|
|
335
|
+
# `multi_modal_data` will only be present for the 1st comm
|
|
336
|
+
# between engine and worker.
|
|
337
|
+
# the subsequent comms can still use delta, but
|
|
338
|
+
# `multi_modal_data` will be None.
|
|
339
|
+
multi_modal_data=seq_group.multi_modal_data
|
|
340
|
+
if scheduler_outputs.num_prefill_groups > 0
|
|
341
|
+
else None,
|
|
342
|
+
multi_modal_placeholders=seq_group.multi_modal_placeholders
|
|
343
|
+
if scheduler_outputs.num_prefill_groups > 0
|
|
344
|
+
else None,
|
|
345
|
+
mm_processor_kwargs=seq_group.mm_processor_kwargs,
|
|
346
|
+
prompt_adapter_request=seq_group.prompt_adapter_request,
|
|
347
|
+
)
|
|
348
|
+
else:
|
|
349
|
+
# When SPMD mode is enabled, we only send delta data except for
|
|
350
|
+
# the first request to reduce serialization cost.
|
|
351
|
+
seq_data_delta = {}
|
|
352
|
+
for id, data in seq_data.items():
|
|
353
|
+
seq_data_delta[id] = data.get_delta_and_reset()
|
|
354
|
+
seq_group_metadata = SequenceGroupMetadataDelta(
|
|
355
|
+
seq_data_delta,
|
|
356
|
+
seq_group.request_id,
|
|
357
|
+
block_tables,
|
|
358
|
+
is_prompt,
|
|
359
|
+
do_sample=do_sample,
|
|
360
|
+
token_chunk_size=token_chunk_size,
|
|
361
|
+
computed_block_nums=common_computed_block_nums,
|
|
362
|
+
)
|
|
363
|
+
seq_group_metadata_list.append(seq_group_metadata)
|
|
364
|
+
|
|
365
|
+
if allow_async_output_proc:
|
|
366
|
+
allow_async_output_proc = self._allow_async_output_proc(seq_group)
|
|
367
|
+
|
|
368
|
+
"""Xinference Change!!!
|
|
369
|
+
If the `seq_group` in this scheduling triggers a transfer,
|
|
370
|
+
it needs to be removed from the running queue (as it is already in the transferring queue).
|
|
371
|
+
It should remain in the transferring queue until the transfer is complete,
|
|
372
|
+
and then it can be placed back into the appropriate queue for scheduling.
|
|
373
|
+
"""
|
|
374
|
+
if has_transferring:
|
|
375
|
+
scheduler_outputs.scheduled_seq_groups = scheduled_seq_groups
|
|
376
|
+
for seq_group in self.running.copy():
|
|
377
|
+
if seq_group in self._transfer_status:
|
|
378
|
+
self.running.remove(seq_group)
|
|
379
|
+
|
|
380
|
+
# Now that the batch has been created, we can assume all blocks in the
|
|
381
|
+
# batch will have been computed before the next scheduling invocation.
|
|
382
|
+
# This is because the engine assumes that a failure in model execution
|
|
383
|
+
# will crash the vLLM instance / will not retry.
|
|
384
|
+
for scheduled_seq_group in scheduler_outputs.scheduled_seq_groups:
|
|
385
|
+
self.block_manager.mark_blocks_as_computed(
|
|
386
|
+
scheduled_seq_group.seq_group, scheduled_seq_group.token_chunk_size
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
self._seq_group_metadata_cache[self.next_cache_id].reset()
|
|
390
|
+
|
|
391
|
+
scheduler_time = time.perf_counter() - scheduler_start_time
|
|
392
|
+
# Add this to scheduler time to all the sequences that are currently
|
|
393
|
+
# running. This will help estimate if the scheduler is a significant
|
|
394
|
+
# component in the e2e latency.
|
|
395
|
+
for seq_group in self.running:
|
|
396
|
+
if seq_group is not None and seq_group.metrics is not None:
|
|
397
|
+
if seq_group.metrics.scheduler_time is not None:
|
|
398
|
+
seq_group.metrics.scheduler_time += scheduler_time
|
|
399
|
+
else:
|
|
400
|
+
seq_group.metrics.scheduler_time = scheduler_time
|
|
401
|
+
|
|
402
|
+
# Move to next cache (if exists)
|
|
403
|
+
self.cache_id = self.next_cache_id
|
|
404
|
+
|
|
405
|
+
# Return results
|
|
406
|
+
return (seq_group_metadata_list, scheduler_outputs, allow_async_output_proc)
|
|
407
|
+
|
|
408
|
+
def has_unfinished_seqs(self) -> bool:
|
|
409
|
+
"""
|
|
410
|
+
This interface is used to determine whether the scheduling process should stop,
|
|
411
|
+
so it needs to include information about the transferring queue.
|
|
412
|
+
"""
|
|
413
|
+
res = super().has_unfinished_seqs()
|
|
414
|
+
return res or len(self._transferring) != 0
|
|
415
|
+
|
|
416
|
+
def get_num_unfinished_seq_groups(self) -> int:
|
|
417
|
+
"""
|
|
418
|
+
When retrieving information from this interface,
|
|
419
|
+
the information from the transferring queue needs to be taken into account.
|
|
420
|
+
"""
|
|
421
|
+
res = super().get_num_unfinished_seq_groups()
|
|
422
|
+
return res + len(self._transferring)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# Copyright 2022-2025 XProbe Inc.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
# Copyright 2022-2025 XProbe Inc.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
import os
|
|
15
|
+
import sys
|
|
16
|
+
|
|
17
|
+
import pytest
|
|
18
|
+
import xoscar as xo
|
|
19
|
+
|
|
20
|
+
from ..block_tracker import VLLMBlockTracker
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ExtendedBlockTracker(VLLMBlockTracker):
|
|
24
|
+
def get_hash_to_address_and_block_id(self):
|
|
25
|
+
return self._hash_to_address_and_block_id
|
|
26
|
+
|
|
27
|
+
def get_address_to_hash_and_block_id(self):
|
|
28
|
+
return self._address_to_hash_and_block_id
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@pytest.fixture
|
|
32
|
+
async def actor_pool_context():
|
|
33
|
+
start_method = (
|
|
34
|
+
os.environ.get("POOL_START_METHOD", "forkserver")
|
|
35
|
+
if sys.platform != "win32"
|
|
36
|
+
else None
|
|
37
|
+
)
|
|
38
|
+
pool = await xo.create_actor_pool(
|
|
39
|
+
"127.0.0.1", n_process=2, subprocess_start_method=start_method
|
|
40
|
+
)
|
|
41
|
+
async with pool:
|
|
42
|
+
yield pool
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@pytest.mark.asyncio
|
|
46
|
+
async def test_block_tracker(actor_pool_context):
|
|
47
|
+
actor_pool = actor_pool_context
|
|
48
|
+
addr = actor_pool.external_address
|
|
49
|
+
tracker_ref: xo.ActorRefType[ExtendedBlockTracker] = await xo.create_actor(
|
|
50
|
+
ExtendedBlockTracker,
|
|
51
|
+
address=addr,
|
|
52
|
+
uid=VLLMBlockTracker.default_uid(),
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
virtual_engine = 0
|
|
56
|
+
block_infos = [(123, 0), (456, 1), (789, 2)]
|
|
57
|
+
|
|
58
|
+
# register blocks
|
|
59
|
+
await tracker_ref.register_blocks(virtual_engine, block_infos, addr)
|
|
60
|
+
|
|
61
|
+
# query blocks
|
|
62
|
+
res = await tracker_ref.query_blocks(virtual_engine, [(123, 4), (789, 5)])
|
|
63
|
+
assert len(res) == 1
|
|
64
|
+
assert addr in res
|
|
65
|
+
assert len(res[addr]) == 2
|
|
66
|
+
assert {x[0] for x in res[addr]} == {123, 789}
|
|
67
|
+
assert {x[1] for x in res[addr]} == {0, 2}
|
|
68
|
+
assert {x[2] for x in res[addr]} == {4, 5}
|
|
69
|
+
|
|
70
|
+
# query with extra info
|
|
71
|
+
res = await tracker_ref.query_blocks(virtual_engine, [(123, 4), (789, 5), (110, 6)])
|
|
72
|
+
assert len(res) == 1
|
|
73
|
+
assert addr in res
|
|
74
|
+
assert len(res[addr]) == 2
|
|
75
|
+
assert {x[0] for x in res[addr]} == {123, 789}
|
|
76
|
+
assert {x[1] for x in res[addr]} == {0, 2}
|
|
77
|
+
assert {x[2] for x in res[addr]} == {4, 5}
|
|
78
|
+
|
|
79
|
+
# unregister block
|
|
80
|
+
await tracker_ref.unregister_block(virtual_engine, addr, 1)
|
|
81
|
+
res = await tracker_ref.query_blocks(virtual_engine, [(123, 4), (456, 7)])
|
|
82
|
+
assert len(res) == 1
|
|
83
|
+
assert addr in res
|
|
84
|
+
assert len(res[addr]) == 1
|
|
85
|
+
assert {x[0] for x in res[addr]} == {123}
|
|
86
|
+
assert {x[1] for x in res[addr]} == {
|
|
87
|
+
0,
|
|
88
|
+
}
|
|
89
|
+
assert {x[2] for x in res[addr]} == {
|
|
90
|
+
4,
|
|
91
|
+
}
|
|
92
|
+
# nothing happens
|
|
93
|
+
await tracker_ref.unregister_block(virtual_engine, addr, 3)
|
|
94
|
+
res = await tracker_ref.query_blocks(virtual_engine, [(123, 4), (456, 7)])
|
|
95
|
+
assert len(res) == 1
|
|
96
|
+
assert addr in res
|
|
97
|
+
assert len(res[addr]) == 1
|
|
98
|
+
assert {x[0] for x in res[addr]} == {123}
|
|
99
|
+
assert {x[1] for x in res[addr]} == {
|
|
100
|
+
0,
|
|
101
|
+
}
|
|
102
|
+
assert {x[2] for x in res[addr]} == {
|
|
103
|
+
4,
|
|
104
|
+
}
|
|
105
|
+
# query returns empty
|
|
106
|
+
res = await tracker_ref.query_blocks(virtual_engine, [(456, 8)])
|
|
107
|
+
assert res == {}
|
|
108
|
+
|
|
109
|
+
# check internal data
|
|
110
|
+
hash_to_address_and_block_id = await tracker_ref.get_hash_to_address_and_block_id()
|
|
111
|
+
assert virtual_engine in hash_to_address_and_block_id
|
|
112
|
+
assert hash_to_address_and_block_id[virtual_engine] == {
|
|
113
|
+
123: {
|
|
114
|
+
(addr, 0),
|
|
115
|
+
},
|
|
116
|
+
456: set(),
|
|
117
|
+
789: {(addr, 2)},
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
address_to_hash_and_block_id = await tracker_ref.get_address_to_hash_and_block_id()
|
|
121
|
+
assert virtual_engine in address_to_hash_and_block_id
|
|
122
|
+
assert address_to_hash_and_block_id[virtual_engine] == {addr: {(123, 0), (789, 2)}}
|