xinference 1.1.1__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xinference might be problematic. Click here for more details.

Files changed (125) hide show
  1. xinference/_version.py +3 -3
  2. xinference/api/restful_api.py +49 -65
  3. xinference/core/model.py +77 -19
  4. xinference/core/supervisor.py +81 -10
  5. xinference/core/utils.py +2 -2
  6. xinference/core/worker.py +32 -0
  7. xinference/model/image/model_spec.json +18 -0
  8. xinference/model/image/model_spec_modelscope.json +20 -0
  9. xinference/model/llm/__init__.py +2 -0
  10. xinference/model/llm/llm_family.json +96 -0
  11. xinference/model/llm/llm_family_modelscope.json +99 -0
  12. xinference/model/llm/mlx/core.py +23 -73
  13. xinference/model/llm/transformers/cogagent.py +272 -0
  14. xinference/model/llm/transformers/core.py +1 -0
  15. xinference/model/llm/transformers/qwen2_vl.py +10 -1
  16. xinference/model/llm/utils.py +27 -3
  17. xinference/model/llm/vllm/core.py +37 -7
  18. xinference/model/llm/vllm/xavier/__init__.py +13 -0
  19. xinference/model/llm/vllm/xavier/allocator.py +74 -0
  20. xinference/model/llm/vllm/xavier/block.py +112 -0
  21. xinference/model/llm/vllm/xavier/block_manager.py +71 -0
  22. xinference/model/llm/vllm/xavier/block_tracker.py +116 -0
  23. xinference/model/llm/vllm/xavier/engine.py +247 -0
  24. xinference/model/llm/vllm/xavier/executor.py +132 -0
  25. xinference/model/llm/vllm/xavier/scheduler.py +422 -0
  26. xinference/model/llm/vllm/xavier/test/__init__.py +13 -0
  27. xinference/model/llm/vllm/xavier/test/test_xavier.py +122 -0
  28. xinference/model/llm/vllm/xavier/transfer.py +298 -0
  29. xinference/model/video/diffusers.py +14 -0
  30. xinference/model/video/model_spec.json +15 -0
  31. xinference/model/video/model_spec_modelscope.json +16 -0
  32. xinference/types.py +13 -0
  33. xinference/web/ui/build/asset-manifest.json +6 -6
  34. xinference/web/ui/build/index.html +1 -1
  35. xinference/web/ui/build/static/css/main.51a587ff.css +2 -0
  36. xinference/web/ui/build/static/css/main.51a587ff.css.map +1 -0
  37. xinference/web/ui/build/static/js/main.1eb206d1.js +3 -0
  38. xinference/web/ui/build/static/js/main.1eb206d1.js.map +1 -0
  39. xinference/web/ui/node_modules/.cache/babel-loader/03c4052f1b91f6ba0c5389bdcf49c43319b4076c08e4b8585dab312538ae290a.json +1 -0
  40. xinference/web/ui/node_modules/.cache/babel-loader/1786b83003b8e9605a0f5f855a185d4d16e38fc893dfb326a2a9cca206b4240a.json +1 -0
  41. xinference/web/ui/node_modules/.cache/babel-loader/17cbc181dd674b9150b80c73ed6a82656de0082d857f6e5f66d9716129ac0b38.json +1 -0
  42. xinference/web/ui/node_modules/.cache/babel-loader/185ceb8872d562e032b47e79df6a45670e06345b8ed70aad1a131e0476783c5c.json +1 -0
  43. xinference/web/ui/node_modules/.cache/babel-loader/2213d49de260e1f67c888081b18f120f5225462b829ae57c9e05a05cec83689d.json +1 -0
  44. xinference/web/ui/node_modules/.cache/babel-loader/26b8c9f34b0bed789b3a833767672e39302d1e0c09b4276f4d58d1df7b6bd93b.json +1 -0
  45. xinference/web/ui/node_modules/.cache/babel-loader/2b484da66c724d0d56a40849c109327408796a668b1381511b6e9e03baa48658.json +1 -0
  46. xinference/web/ui/node_modules/.cache/babel-loader/2cbbbce9b84df73330d4c42b82436ed881b3847628f2fbc346aa62e2859fd88c.json +1 -0
  47. xinference/web/ui/node_modules/.cache/babel-loader/2ec9b14431ed33ce6901bf9f27007be4e6e472709c99d6e22b50ce528e4b78ee.json +1 -0
  48. xinference/web/ui/node_modules/.cache/babel-loader/3b966db018f96be4a055d6ca205f0990d4d0b370e2980c17d8bca2c9a021819c.json +1 -0
  49. xinference/web/ui/node_modules/.cache/babel-loader/3eefb411b24c2b3ce053570ef50daccf154022f0e168be5ed0fec21394baf9f4.json +1 -0
  50. xinference/web/ui/node_modules/.cache/babel-loader/522b229e3cac219123f0d69673f5570e191c2d2a505dc65b312d336eae2279c0.json +1 -0
  51. xinference/web/ui/node_modules/.cache/babel-loader/52e45f17ba300580ea3fcc9f9228ccba194bb092b76f25e9255af311f8b05aab.json +1 -0
  52. xinference/web/ui/node_modules/.cache/babel-loader/5a0bc4631f936459afc1a3b1d3ec2420118b1f00e11f60ccac3e08088f3f27a8.json +1 -0
  53. xinference/web/ui/node_modules/.cache/babel-loader/611fa2c6c53b66039991d06dfb0473b5ab37fc63b4564e0f6e1718523768a045.json +1 -0
  54. xinference/web/ui/node_modules/.cache/babel-loader/6329bc76c406fe5eb305412383fbde5950f847bb5e43261f73f37622c365acb4.json +1 -0
  55. xinference/web/ui/node_modules/.cache/babel-loader/63c8e07687ea53a4f8a910ee5e42e0eb26cd1acbfbe820f3e3248a786ee51401.json +1 -0
  56. xinference/web/ui/node_modules/.cache/babel-loader/69b2d5001684174ec9da57e07914eed3eac4960018bceb6cbfa801d861301d7c.json +1 -0
  57. xinference/web/ui/node_modules/.cache/babel-loader/710c1acda69e561e30a933b98c6a56d50197868b15c21e2aad55ab6d46649eb6.json +1 -0
  58. xinference/web/ui/node_modules/.cache/babel-loader/720deca1fce5a1dc5056048fa8258fd138a82ea855f350b6613f104a73fb761f.json +1 -0
  59. xinference/web/ui/node_modules/.cache/babel-loader/76a23b92d26a499c57e61eea2b895fbc9771bd0849a72e66f8e633192017978b.json +1 -0
  60. xinference/web/ui/node_modules/.cache/babel-loader/858063f23b34dfe600254eb5afd85518b0002ec4b30b7386616c45600826e3b2.json +1 -0
  61. xinference/web/ui/node_modules/.cache/babel-loader/920b82c1c89124cf217109eeedbfcd3aae3b917be50c9dfb6bbb4ce26bdfd2e7.json +1 -0
  62. xinference/web/ui/node_modules/.cache/babel-loader/94d8b7aeb0076f2ce07db598cea0e87b13bc8d5614eb530b8d6e696c2daf6f88.json +1 -0
  63. xinference/web/ui/node_modules/.cache/babel-loader/9e917fe7022d01b2ccbe5cc0ce73d70bb72bee584ff293bad71bdff6695dee28.json +1 -0
  64. xinference/web/ui/node_modules/.cache/babel-loader/9f28fdb8399f1d0474f0aca86f1658dc94f5bf0c90f6146352de150692de8862.json +1 -0
  65. xinference/web/ui/node_modules/.cache/babel-loader/a0dfafa06b2bb7cba8cad41c482503f61944f759f4318139362602ef5cc47ccb.json +1 -0
  66. xinference/web/ui/node_modules/.cache/babel-loader/afb8084f539534cd594755ea2205ecd5bd1f62dddcfdf75a2eace59a28131278.json +1 -0
  67. xinference/web/ui/node_modules/.cache/babel-loader/b57b1438b77294c1f3f6cfce12ac487d8106c6f016975ba0aec94d98997e2e1e.json +1 -0
  68. xinference/web/ui/node_modules/.cache/babel-loader/b9917b0bf8e4d55ccbac1c334aa04d6ff3c5b6ed9e5d38b9ea2c687fa7d3f5a9.json +1 -0
  69. xinference/web/ui/node_modules/.cache/babel-loader/bbcc94b0149963d1d6f267ee1f4f03d3925b758392ce2f516c3fe8af0e0169fc.json +1 -0
  70. xinference/web/ui/node_modules/.cache/babel-loader/bdee44abeadc4abc17d41c52eb49c6e19a4b1a267b6e16876ce91bdeeebfc52d.json +1 -0
  71. xinference/web/ui/node_modules/.cache/babel-loader/beb112b70f4a56db95920a9e20efb6c97c37b68450716730217a9ee1a9ae92be.json +1 -0
  72. xinference/web/ui/node_modules/.cache/babel-loader/c88db97be0cdf440193b3995996e83510a04cb00048135485fc0e26d197e80b5.json +1 -0
  73. xinference/web/ui/node_modules/.cache/babel-loader/d49e5314d34310a62d01a03067ce1bec5da00abce84c5196aa9c6842fa79a430.json +1 -0
  74. xinference/web/ui/node_modules/.cache/babel-loader/d7664d18c4ddbad9c3a6a31b91f7c00fb0dde804608674a9860ee50f33e54708.json +1 -0
  75. xinference/web/ui/node_modules/.cache/babel-loader/d9072c318b819b7c90a0f7e9cc0b6413b4dbeb8e9859898e53d75ea882fcde99.json +1 -0
  76. xinference/web/ui/node_modules/.cache/babel-loader/db16a983bc08a05f0439cc61ca0840e49e1d8400eef678909f16c032a418a3d6.json +1 -0
  77. xinference/web/ui/node_modules/.cache/babel-loader/dc249829767b8abcbc3677e0b07b6d3ecbfdfe6d08cfe23a665eb33373a9aa9d.json +1 -0
  78. xinference/web/ui/node_modules/.cache/babel-loader/e242c583c2dbc2784f0fcf513523975f7d5df447e106c1c17e49e8578a6fc3ed.json +1 -0
  79. xinference/web/ui/node_modules/.cache/babel-loader/eac5f1296513e69e4b96f750ddccd4d0264e2bae4e4c449144e83274a48698d9.json +1 -0
  80. xinference/web/ui/node_modules/.cache/babel-loader/ed57202cb79649bb716400436590245547df241988fc7c8e1d85d132299542d2.json +1 -0
  81. xinference/web/ui/node_modules/.cache/babel-loader/f125bf72e773a14cdaebd0c343e80adb909d12e317ee5c00cd4a57442fbe2c62.json +1 -0
  82. xinference/web/ui/node_modules/.cache/babel-loader/f91af913d7f91c410719ab13136aaed3aaf0f8dda06652f25c42cb5231587398.json +1 -0
  83. xinference/web/ui/node_modules/.package-lock.json +67 -3
  84. xinference/web/ui/node_modules/@babel/runtime/package.json +592 -538
  85. xinference/web/ui/node_modules/html-parse-stringify/package.json +50 -0
  86. xinference/web/ui/node_modules/i18next/dist/esm/package.json +1 -0
  87. xinference/web/ui/node_modules/i18next/package.json +129 -0
  88. xinference/web/ui/node_modules/react-i18next/.eslintrc.json +74 -0
  89. xinference/web/ui/node_modules/react-i18next/dist/es/package.json +1 -0
  90. xinference/web/ui/node_modules/react-i18next/package.json +162 -0
  91. xinference/web/ui/node_modules/void-elements/package.json +34 -0
  92. xinference/web/ui/package-lock.json +69 -3
  93. xinference/web/ui/package.json +2 -0
  94. xinference/web/ui/src/locales/en.json +186 -0
  95. xinference/web/ui/src/locales/zh.json +186 -0
  96. {xinference-1.1.1.dist-info → xinference-1.2.0.dist-info}/METADATA +9 -6
  97. {xinference-1.1.1.dist-info → xinference-1.2.0.dist-info}/RECORD +102 -56
  98. xinference/web/ui/build/static/css/main.5061c4c3.css +0 -2
  99. xinference/web/ui/build/static/css/main.5061c4c3.css.map +0 -1
  100. xinference/web/ui/build/static/js/main.4eb4ee80.js +0 -3
  101. xinference/web/ui/build/static/js/main.4eb4ee80.js.map +0 -1
  102. xinference/web/ui/node_modules/.cache/babel-loader/07ce9e632e6aff24d7aa3ad8e48224433bbfeb0d633fca723453f1fcae0c9f1c.json +0 -1
  103. xinference/web/ui/node_modules/.cache/babel-loader/1130403f9e46f5738a23b45ac59b57de8f360c908c713e2c0670c2cce9bd367a.json +0 -1
  104. xinference/web/ui/node_modules/.cache/babel-loader/131091b25d26b17cdca187d7542a21475c211138d900cf667682260e76ef9463.json +0 -1
  105. xinference/web/ui/node_modules/.cache/babel-loader/1f269fb2a368363c1cb2237825f1dba093b6bdd8c44cc05954fd19ec2c1fff03.json +0 -1
  106. xinference/web/ui/node_modules/.cache/babel-loader/331312668fa8bd3d7401818f4a25fa98135d7f61371cd6bfff78b18cf4fbdd92.json +0 -1
  107. xinference/web/ui/node_modules/.cache/babel-loader/40f17338fc75ae095de7d2b4d8eae0d5ca0193a7e2bcece4ee745b22a7a2f4b7.json +0 -1
  108. xinference/web/ui/node_modules/.cache/babel-loader/4de9a6942c5f1749d6cbfdd54279699975f16016b182848bc253886f52ec2ec3.json +0 -1
  109. xinference/web/ui/node_modules/.cache/babel-loader/822586ed1077201b64b954f12f25e3f9b45678c1acbabe53d8af3ca82ca71f33.json +0 -1
  110. xinference/web/ui/node_modules/.cache/babel-loader/8c5eeb02f772d02cbe8b89c05428d0dd41a97866f75f7dc1c2164a67f5a1cf98.json +0 -1
  111. xinference/web/ui/node_modules/.cache/babel-loader/8d33354bd2100c8602afc3341f131a88cc36aaeecd5a4b365ed038514708e350.json +0 -1
  112. xinference/web/ui/node_modules/.cache/babel-loader/9375a35b05d56989b2755bf72161fa707c92f28569d33765a75f91a568fda6e9.json +0 -1
  113. xinference/web/ui/node_modules/.cache/babel-loader/a158a9ffa0c9b169aee53dd4a0c44501a596755b4e4f6ede7746d65a72e2a71f.json +0 -1
  114. xinference/web/ui/node_modules/.cache/babel-loader/c7bf40bab396765f67d0fed627ed3665890608b2d0edaa3e8cb7cfc96310db45.json +0 -1
  115. xinference/web/ui/node_modules/.cache/babel-loader/d6c643278a0b28320e6f33a60f5fb64c053997cbdc39a60e53ccc574688ade9e.json +0 -1
  116. xinference/web/ui/node_modules/.cache/babel-loader/e42b72d4cc1ea412ebecbb8d040dc6c6bfee462c33903c2f1f3facb602ad742e.json +0 -1
  117. xinference/web/ui/node_modules/.cache/babel-loader/e64b7e8cedcf43d4c95deba60ec1341855c887705805bb62431693118b870c69.json +0 -1
  118. xinference/web/ui/node_modules/.cache/babel-loader/f5039ddbeb815c51491a1989532006b96fc3ae49c6c60e3c097f875b4ae915ae.json +0 -1
  119. xinference/web/ui/node_modules/.cache/babel-loader/f72f011744c4649fabddca6f7a9327861ac0a315a89b1a2e62a39774e7863845.json +0 -1
  120. xinference/web/ui/node_modules/.cache/babel-loader/feabb04b4aa507102da0a64398a40818e878fd1df9b75dda8461b3e1e7ff3f11.json +0 -1
  121. /xinference/web/ui/build/static/js/{main.4eb4ee80.js.LICENSE.txt → main.1eb206d1.js.LICENSE.txt} +0 -0
  122. {xinference-1.1.1.dist-info → xinference-1.2.0.dist-info}/LICENSE +0 -0
  123. {xinference-1.1.1.dist-info → xinference-1.2.0.dist-info}/WHEEL +0 -0
  124. {xinference-1.1.1.dist-info → xinference-1.2.0.dist-info}/entry_points.txt +0 -0
  125. {xinference-1.1.1.dist-info → xinference-1.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,71 @@
1
+ # Copyright 2022-2025 XProbe Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import logging
15
+ from typing import Any, Dict, Optional
16
+
17
+ from vllm.core.block.cpu_gpu_block_allocator import CpuGpuBlockAllocator
18
+ from vllm.core.block.interfaces import Block
19
+ from vllm.core.block_manager import SelfAttnBlockSpaceManager
20
+ from vllm.sequence import SequenceGroup, SequenceStatus
21
+ from vllm.utils import Device
22
+
23
+ from .allocator import XavierCpuGpuBlockAllocator
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ class XavierBlockManager(SelfAttnBlockSpaceManager):
29
+ def __init__(self, *args, **kwargs):
30
+ # Monkey patch
31
+ CpuGpuBlockAllocator.create = XavierCpuGpuBlockAllocator.create
32
+ super().__init__(*args, **kwargs)
33
+ self._xavier_config: Optional[Dict[str, Any]] = None
34
+ logger.debug("Init xavier block manager done.")
35
+
36
+ @property
37
+ def xavier_config(self):
38
+ return self._xavier_config
39
+
40
+ @xavier_config.setter
41
+ def xavier_config(self, value: Dict[str, Any]):
42
+ self._xavier_config = value
43
+ self.block_allocator.xavier_config = value
44
+
45
+ def get_block_by_block_id(self, seq_id: int, block_id: int) -> Block:
46
+ table = self.block_tables[seq_id]
47
+ for b in table.blocks:
48
+ if b.block_id == block_id:
49
+ return b
50
+
51
+ def get_block_status_by_block_id(self, status_name: str, block_id: int) -> bool:
52
+ tracker = self.block_allocator._allocators[Device.GPU]._block_tracker[block_id]
53
+ return getattr(tracker, status_name)
54
+
55
+ def set_block_status_by_block_id(
56
+ self, status_name: str, block_id: int, status: bool
57
+ ) -> None:
58
+ tracker = self.block_allocator._allocators[Device.GPU]._block_tracker[block_id]
59
+ assert getattr(tracker, status_name, None) is not None
60
+ setattr(tracker, status_name, status)
61
+
62
+ def allocate(self, seq_group: SequenceGroup) -> None:
63
+ """
64
+ If the `seq_group` has the `transferred` attribute,
65
+ it indicates that the `seq_group` has gone through the transfer process,
66
+ so the block allocation logic should not be executed again.
67
+ """
68
+ waiting_seqs = seq_group.get_seqs(status=SequenceStatus.WAITING)
69
+ if all([getattr(s, "transferred", False) for s in waiting_seqs]):
70
+ return
71
+ super().allocate(seq_group)
@@ -0,0 +1,116 @@
1
+ # Copyright 2022-2025 XProbe Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import random
15
+ from typing import Dict, List, Optional, Set, Tuple
16
+
17
+ import xoscar as xo
18
+
19
+
20
+ class VLLMBlockTracker(xo.StatelessActor):
21
+ @classmethod
22
+ def default_uid(cls):
23
+ return f"vllm-block-tracker-actor"
24
+
25
+ def __init__(self):
26
+ super().__init__()
27
+ # engine -> hash_to_address_and_block_id
28
+ self._hash_to_address_and_block_id: Dict[
29
+ int, Dict[int, Set[Tuple[str, int]]]
30
+ ] = {}
31
+ # engine -> address_to_hash_and_block_id
32
+ self._address_to_hash_and_block_id: Dict[
33
+ int, Dict[str, Set[Tuple[int, int]]]
34
+ ] = {}
35
+
36
+ def register_blocks(
37
+ self, virtual_engine: int, block_infos: List[Tuple[int, int]], address: str
38
+ ):
39
+ # Update query meta
40
+ if virtual_engine not in self._hash_to_address_and_block_id:
41
+ self._hash_to_address_and_block_id[virtual_engine] = {}
42
+ hash_to_address_and_block_id = self._hash_to_address_and_block_id[
43
+ virtual_engine
44
+ ]
45
+ for hash_content, block_id in block_infos:
46
+ if hash_content not in hash_to_address_and_block_id:
47
+ hash_to_address_and_block_id[hash_content] = {
48
+ (address, block_id),
49
+ }
50
+ else:
51
+ hash_to_address_and_block_id[hash_content].add((address, block_id))
52
+
53
+ # Update remove meta
54
+ if virtual_engine not in self._address_to_hash_and_block_id:
55
+ self._address_to_hash_and_block_id[virtual_engine] = {}
56
+ address_to_hash_and_block_id = self._address_to_hash_and_block_id[
57
+ virtual_engine
58
+ ]
59
+ if address not in address_to_hash_and_block_id:
60
+ address_to_hash_and_block_id[address] = set()
61
+ address_to_hash_and_block_id[address].update(block_infos)
62
+
63
+ def query_blocks(
64
+ self, virtual_engine: int, hash_contents: List[Tuple[int, int]]
65
+ ) -> Dict[str, Set[Tuple[int, int, int]]]:
66
+ if virtual_engine not in self._hash_to_address_and_block_id:
67
+ return {}
68
+ hash_to_address_and_block_id = self._hash_to_address_and_block_id[
69
+ virtual_engine
70
+ ]
71
+ remote: Dict[str, Set[Tuple[int, int, int]]] = {}
72
+ for hash_content, _id in hash_contents:
73
+ if (
74
+ hash_content in hash_to_address_and_block_id
75
+ ) and hash_to_address_and_block_id[hash_content]:
76
+ # TODO: Randomly select here, and try to distribute requests as evenly as possible.
77
+ # There may be better methods in the future.
78
+ address, block_id = random.choice(
79
+ list(hash_to_address_and_block_id[hash_content])
80
+ )
81
+ if address not in remote:
82
+ remote[address] = {
83
+ (hash_content, block_id, _id),
84
+ }
85
+ else:
86
+ remote[address].add((hash_content, block_id, _id))
87
+ return remote
88
+
89
+ def unregister_block(self, virtual_engine: int, address: str, block_id: int):
90
+ if (virtual_engine not in self._address_to_hash_and_block_id) or (
91
+ virtual_engine not in self._hash_to_address_and_block_id
92
+ ):
93
+ return
94
+
95
+ # Update remove meta
96
+ address_to_hash_and_block_id = self._address_to_hash_and_block_id[
97
+ virtual_engine
98
+ ]
99
+ if address not in address_to_hash_and_block_id:
100
+ return
101
+ hash_and_block_id = address_to_hash_and_block_id[address]
102
+ detail: Optional[Tuple[int, int]] = None
103
+ for hash_content, _id in hash_and_block_id.copy():
104
+ if _id == block_id:
105
+ detail = (hash_content, block_id)
106
+ hash_and_block_id.discard(detail)
107
+ break
108
+
109
+ # Update query meta
110
+ if detail is not None:
111
+ hash_to_address_and_block_id = self._hash_to_address_and_block_id[
112
+ virtual_engine
113
+ ]
114
+ _hash = detail[0]
115
+ if _hash in hash_to_address_and_block_id:
116
+ hash_to_address_and_block_id[_hash].discard((address, detail[1]))
@@ -0,0 +1,247 @@
1
+ # Copyright 2022-2025 XProbe Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import logging
15
+ from typing import Dict, List, Optional, Type, Union
16
+
17
+ from vllm import AsyncEngineArgs, EmbeddingRequestOutput, RequestOutput
18
+ from vllm.config import VllmConfig
19
+ from vllm.engine.async_llm_engine import AsyncLLMEngine, _AsyncLLMEngine
20
+ from vllm.engine.llm_engine import SchedulerOutputState
21
+ from vllm.engine.metrics_types import StatLoggerBase
22
+ from vllm.executor.executor_base import ExecutorBase
23
+ from vllm.sequence import ExecuteModelRequest
24
+ from vllm.usage.usage_lib import UsageContext
25
+
26
+ from .executor import XavierExecutor
27
+ from .scheduler import XavierScheduler
28
+
29
+ logger = logging.getLogger(__name__)
30
+
31
+
32
+ class XavierInternalEngine(_AsyncLLMEngine):
33
+ def __init__(self, *args, **kwargs):
34
+ super().__init__(*args, **kwargs)
35
+ self._xavier_config = kwargs["vllm_config"].xavier_config
36
+ self.scheduler = [
37
+ XavierScheduler(
38
+ self.scheduler_config,
39
+ self.cache_config,
40
+ self.lora_config,
41
+ self.parallel_config.pipeline_parallel_size,
42
+ self.async_callbacks[v_id]
43
+ if self.model_config.use_async_output_proc
44
+ else None,
45
+ xavier_config=self._xavier_config,
46
+ virtual_engine=v_id,
47
+ )
48
+ for v_id in range(self.parallel_config.pipeline_parallel_size)
49
+ ]
50
+ self.output_processor.scheduler = self.scheduler
51
+ self.model_executor.scheduler = self.scheduler
52
+
53
+ async def step_async(
54
+ self, virtual_engine: int
55
+ ) -> List[Union[RequestOutput, EmbeddingRequestOutput]]:
56
+ """Performs one decoding iteration and returns newly generated results.
57
+ The workers are ran asynchronously if possible.
58
+
59
+ This function performs one decoding iteration of the engine. It first
60
+ schedules the sequences to be executed in the next iteration and the
61
+ token blocks to be swapped in/out/copy. Then, it executes the model
62
+ and updates the scheduler with the model outputs. Finally, it decodes
63
+ the sequences and returns the newly generated results.
64
+ """
65
+ # these are cached outputs from previous iterations. None if on first
66
+ # iteration
67
+ cached_outputs = self.cached_scheduler_outputs[virtual_engine]
68
+ seq_group_metadata_list = cached_outputs.seq_group_metadata_list
69
+ scheduler_outputs = cached_outputs.scheduler_outputs
70
+ allow_async_output_proc = cached_outputs.allow_async_output_proc
71
+
72
+ ctx = self.scheduler_contexts[virtual_engine]
73
+
74
+ # Clear outputs for each new scheduler iteration
75
+ ctx.request_outputs.clear()
76
+
77
+ # skip the scheduler if there are any remaining steps in the seq groups.
78
+ # This ensures that the scheduler is only called again when the current
79
+ # batch has completed.
80
+ if not self._has_remaining_steps(seq_group_metadata_list):
81
+ # Schedule iteration
82
+ """Xinference Change!!!
83
+ Why copy the entire function code of vllm:
84
+ The purpose here is to modify the way the `schedule` function is invoked to asynchronous calling.
85
+ No other modifications were made elsewhere.
86
+ """
87
+ (
88
+ seq_group_metadata_list,
89
+ scheduler_outputs,
90
+ allow_async_output_proc,
91
+ ) = await self.scheduler[virtual_engine].schedule()
92
+
93
+ ctx.seq_group_metadata_list = seq_group_metadata_list
94
+ ctx.scheduler_outputs = scheduler_outputs
95
+
96
+ # Maybe switch from async mode to sync mode
97
+ if not allow_async_output_proc and len(ctx.output_queue) > 0:
98
+ self._process_model_outputs(ctx=ctx)
99
+
100
+ if (
101
+ self.scheduler_config.is_multi_step
102
+ and scheduler_outputs.num_lookahead_slots > 0
103
+ ):
104
+ # cache the scheduler outputs for the next iteration if we have
105
+ # lookahead slots
106
+ self._cache_scheduler_outputs_for_multi_step(
107
+ virtual_engine,
108
+ seq_group_metadata_list,
109
+ scheduler_outputs,
110
+ allow_async_output_proc,
111
+ )
112
+
113
+ assert seq_group_metadata_list is not None
114
+ assert scheduler_outputs is not None
115
+
116
+ if not scheduler_outputs.is_empty():
117
+ finished_requests_ids = self.scheduler[
118
+ virtual_engine
119
+ ].get_and_reset_finished_requests_ids()
120
+
121
+ # Check if we have a cached last_output from the previous iteration.
122
+ # For supporting PP this is probably the best way to pass the
123
+ # sampled_token_ids, as a separate broadcast over all the PP stages
124
+ # will cause one virtual engine's microbatch to block the pipeline.
125
+ last_sampled_token_ids = self._get_last_sampled_token_ids(virtual_engine)
126
+
127
+ execute_model_req = ExecuteModelRequest(
128
+ seq_group_metadata_list=seq_group_metadata_list,
129
+ blocks_to_swap_in=scheduler_outputs.blocks_to_swap_in,
130
+ blocks_to_swap_out=scheduler_outputs.blocks_to_swap_out,
131
+ blocks_to_copy=scheduler_outputs.blocks_to_copy,
132
+ virtual_engine=virtual_engine,
133
+ num_lookahead_slots=scheduler_outputs.num_lookahead_slots,
134
+ running_queue_size=scheduler_outputs.running_queue_size,
135
+ finished_requests_ids=finished_requests_ids,
136
+ # We use ExecuteModelRequest to pass the last sampled_token_ids
137
+ # to each of the non-last PP stages for in-place prepare_input.
138
+ last_sampled_token_ids=last_sampled_token_ids,
139
+ )
140
+
141
+ if allow_async_output_proc:
142
+ execute_model_req.async_callback = self.async_callbacks[virtual_engine]
143
+
144
+ # Execute the model.
145
+ outputs = await self.model_executor.execute_model_async(execute_model_req)
146
+
147
+ # we need to do this here so that last step's sampled_token_ids can
148
+ # be passed to the next iteration for PP.
149
+ if self.scheduler_config.is_multi_step:
150
+ self._update_cached_scheduler_output(virtual_engine, outputs)
151
+ else:
152
+ if len(ctx.output_queue) > 0:
153
+ self._process_model_outputs(ctx=ctx)
154
+ outputs = []
155
+
156
+ # Finish the current step for all the sequence groups.
157
+ if self.scheduler_config.is_multi_step:
158
+ for seq_group in seq_group_metadata_list:
159
+ seq_group.finish_step()
160
+
161
+ if not self._has_remaining_steps(seq_group_metadata_list):
162
+ # Clear the cache if we have finished all the steps
163
+ if self.scheduler_config.is_multi_step:
164
+ self.cached_scheduler_outputs[virtual_engine] = SchedulerOutputState()
165
+
166
+ # is_first_step_output is True only when the num_steps of all
167
+ # the sequences are 1. When the num_steps > 1,
168
+ # multi_step_model_runner does the first-step output append.
169
+ is_first_step_output: bool = (
170
+ False
171
+ if not seq_group_metadata_list
172
+ else seq_group_metadata_list[0].state.num_steps == 1
173
+ )
174
+
175
+ ctx.append_output(
176
+ outputs=outputs,
177
+ seq_group_metadata_list=seq_group_metadata_list,
178
+ scheduler_outputs=scheduler_outputs,
179
+ is_async=allow_async_output_proc,
180
+ is_last_step=True,
181
+ is_first_step_output=is_first_step_output,
182
+ )
183
+
184
+ if outputs and allow_async_output_proc:
185
+ assert (
186
+ len(outputs) == 1
187
+ ), "Async postprocessor expects only a single output set"
188
+ self._advance_to_next_step(
189
+ outputs[0],
190
+ seq_group_metadata_list,
191
+ scheduler_outputs.scheduled_seq_groups,
192
+ )
193
+
194
+ if not allow_async_output_proc:
195
+ self._process_model_outputs(ctx=ctx)
196
+
197
+ # Log stats.
198
+ self.do_log_stats(scheduler_outputs, outputs)
199
+
200
+ # Tracing
201
+ self.do_tracing(scheduler_outputs)
202
+
203
+ else:
204
+ # Multi-step case
205
+ return ctx.request_outputs
206
+
207
+ if not self.has_unfinished_requests():
208
+ # Drain async postprocessor (if exists)
209
+ if len(ctx.output_queue) > 0:
210
+ self._process_model_outputs(ctx=ctx)
211
+ assert len(ctx.output_queue) == 0
212
+
213
+ return ctx.request_outputs
214
+
215
+
216
+ class XavierEngine(AsyncLLMEngine):
217
+ _engine_class: Type[_AsyncLLMEngine] = XavierInternalEngine
218
+ _xavier_config: Optional[Dict] = None
219
+
220
+ @classmethod
221
+ def _get_executor_cls(cls, engine_config: VllmConfig) -> Type[ExecutorBase]:
222
+ logger.debug(f"Initializing Xavier executor.")
223
+ return XavierExecutor
224
+
225
+ @classmethod
226
+ def from_engine_args(
227
+ cls,
228
+ engine_args: AsyncEngineArgs,
229
+ engine_config: Optional[VllmConfig] = None,
230
+ start_engine_loop: bool = True,
231
+ usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
232
+ stat_loggers: Optional[Dict[str, StatLoggerBase]] = None,
233
+ xavier_config: Optional[Dict] = None,
234
+ ) -> "AsyncLLMEngine":
235
+ cls._xavier_config = xavier_config
236
+ return super().from_engine_args(
237
+ engine_args, engine_config, start_engine_loop, usage_context, stat_loggers
238
+ )
239
+
240
+ def __init__(self, *args, **kwargs):
241
+ # set xavier_config to `vllm_config`,
242
+ # because it may be needed everywhere in the vllm internal components
243
+ kwargs["vllm_config"].xavier_config = self._xavier_config
244
+ super().__init__(*args, **kwargs)
245
+
246
+ async def init_xavier(self):
247
+ await self.engine.model_executor.init_transfer()
@@ -0,0 +1,132 @@
1
+ # Copyright 2022-2025 XProbe Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING, List, Optional, Set, Tuple, Union
15
+
16
+ import xoscar as xo
17
+ from vllm.executor.gpu_executor import GPUExecutorAsync
18
+ from vllm.model_executor.layers.sampler import SamplerOutput
19
+ from vllm.sequence import ExecuteModelRequest, PoolerOutput
20
+ from vllm.utils import is_pin_memory_available
21
+ from vllm.worker.cache_engine import CacheEngine
22
+
23
+ if TYPE_CHECKING:
24
+ from .scheduler import XavierScheduler
25
+
26
+
27
+ class XavierExecutor(GPUExecutorAsync):
28
+ scheduler: Optional[List["XavierScheduler"]] = None
29
+
30
+ def _init_executor(self) -> None:
31
+ super()._init_executor()
32
+ self._transfer_ref = None
33
+ self._block_tracker_ref = None
34
+
35
+ async def init_transfer(self):
36
+ """
37
+ In vllm, the `cache_engine` is the entity that truly manages the KV cache tensors.
38
+ Retrieve the necessary transmission information from the `cache_engine`.
39
+ """
40
+ transfer_ref = await self._get_transfer_ref()
41
+ ref_cache_engine: CacheEngine = self.driver_worker.cache_engine[0]
42
+ buffer_dtype = ref_cache_engine.dtype
43
+ buffer_device = "cpu"
44
+ buffer_pin_memory = is_pin_memory_available()
45
+ num_attn_layers = ref_cache_engine.num_attention_layers
46
+ kv_cache_shape = ref_cache_engine.gpu_cache[0].shape
47
+ assert kv_cache_shape[0] == 2
48
+ buffer_num = 2
49
+ transfer_block_num = self.vllm_config.xavier_config.get("transfer_block_num")
50
+ buffer_shape = (
51
+ transfer_block_num,
52
+ num_attn_layers,
53
+ kv_cache_shape[0],
54
+ *kv_cache_shape[2:],
55
+ )
56
+ await transfer_ref.setup(
57
+ self.driver_worker.cache_engine,
58
+ self.scheduler,
59
+ num_buffer=buffer_num,
60
+ buffer_shape=buffer_shape,
61
+ buffer_dtype=buffer_dtype,
62
+ buffer_device=buffer_device,
63
+ pin_memory=buffer_pin_memory,
64
+ )
65
+
66
+ async def _get_block_tracker_ref(self):
67
+ from .block_tracker import VLLMBlockTracker
68
+
69
+ if self._block_tracker_ref is None:
70
+ block_tracker_address = self.vllm_config.xavier_config.get(
71
+ "block_tracker_address"
72
+ )
73
+ self._block_tracker_ref = await xo.actor_ref(
74
+ address=block_tracker_address, uid=VLLMBlockTracker.default_uid()
75
+ )
76
+ return self._block_tracker_ref
77
+
78
+ async def _get_transfer_ref(self):
79
+ from .transfer import TransferActor
80
+
81
+ if self._transfer_ref is None:
82
+ transfer_address = self.vllm_config.xavier_config.get("rank_address")
83
+ rank = self.vllm_config.xavier_config.get("rank")
84
+ self._transfer_ref = await xo.actor_ref(
85
+ address=transfer_address, uid=f"{TransferActor.default_uid()}-{rank}"
86
+ )
87
+ return self._transfer_ref
88
+
89
+ def get_rank_address(self) -> str:
90
+ return self.vllm_config.xavier_config.get("rank_address")
91
+
92
+ async def execute_model_async(
93
+ self,
94
+ execute_model_req: ExecuteModelRequest,
95
+ ) -> List[Union[SamplerOutput, PoolerOutput]]:
96
+ """
97
+ Collect information about the blocks involved in the execution before the vllm `ModelRunner` executes.
98
+ This information will be used by the tracker after execution to register the locally computed blocks.
99
+ """
100
+ virtual_engine = execute_model_req.virtual_engine
101
+ block_tracker_ref = await self._get_block_tracker_ref()
102
+ scheduler = self.scheduler[virtual_engine] # type: ignore
103
+ rank_address = self.get_rank_address()
104
+ executed_blocks_details: Set[Tuple[int, int]] = set()
105
+ for meta in execute_model_req.seq_group_metadata_list:
106
+ block_tables = meta.block_tables
107
+ for seq_id, block_ids in block_tables.items():
108
+ for _id in block_ids:
109
+ b = scheduler.block_manager.get_block_by_block_id(seq_id, _id)
110
+ # The `executed` attribute is used to prevent duplicate registration of the block.
111
+ executed = scheduler.block_manager.get_block_status_by_block_id(
112
+ "executed", _id
113
+ )
114
+ detail = (b.content_hash, b.block_id)
115
+ if (b.content_hash is not None) and (not executed):
116
+ executed_blocks_details.add(detail)
117
+
118
+ res = await super().execute_model_async(execute_model_req)
119
+
120
+ """
121
+ Why not collect and register the information after execution?
122
+ Because after execution, the model's execution callback hook will release the block_id,
123
+ causing the block manager to lose access to the correct information.
124
+ """
125
+ await block_tracker_ref.register_blocks(
126
+ virtual_engine, list(executed_blocks_details), rank_address
127
+ )
128
+
129
+ for _, _id in executed_blocks_details:
130
+ scheduler.block_manager.set_block_status_by_block_id("executed", _id, True)
131
+
132
+ return res