ipex-llm 2.2.0b20250121__py3-none-win_amd64.whl → 2.2.0b20250123__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. ipex_llm/libs/bloom-api.dll +0 -0
  2. ipex_llm/libs/bloom.dll +0 -0
  3. ipex_llm/libs/gptneox-api.dll +0 -0
  4. ipex_llm/libs/gptneox.dll +0 -0
  5. ipex_llm/libs/libbloom_avx.dll +0 -0
  6. ipex_llm/libs/libbloom_vnni.dll +0 -0
  7. ipex_llm/libs/libgptneox_avx.dll +0 -0
  8. ipex_llm/libs/libgptneox_vnni.dll +0 -0
  9. ipex_llm/libs/libllama_avx.dll +0 -0
  10. ipex_llm/libs/libllama_vnni.dll +0 -0
  11. ipex_llm/libs/libstarcoder_avx.dll +0 -0
  12. ipex_llm/libs/libstarcoder_vnni.dll +0 -0
  13. ipex_llm/libs/llama-api.dll +0 -0
  14. ipex_llm/libs/llama.dll +0 -0
  15. ipex_llm/libs/main-bloom.exe +0 -0
  16. ipex_llm/libs/main-gptneox.exe +0 -0
  17. ipex_llm/libs/main-llama.exe +0 -0
  18. ipex_llm/libs/main-starcoder.exe +0 -0
  19. ipex_llm/libs/pipeline.dll +0 -0
  20. ipex_llm/libs/quantize-bloom.exe +0 -0
  21. ipex_llm/libs/quantize-bloom_vnni.exe +0 -0
  22. ipex_llm/libs/quantize-gptneox.exe +0 -0
  23. ipex_llm/libs/quantize-gptneox_vnni.exe +0 -0
  24. ipex_llm/libs/quantize-llama.exe +0 -0
  25. ipex_llm/libs/quantize-llama_vnni.exe +0 -0
  26. ipex_llm/libs/quantize-starcoder.exe +0 -0
  27. ipex_llm/libs/quantize-starcoder_vnni.exe +0 -0
  28. ipex_llm/libs/starcoder-api.dll +0 -0
  29. ipex_llm/libs/starcoder.dll +0 -0
  30. ipex_llm/transformers/convert.py +0 -1
  31. ipex_llm/transformers/low_bit_linear.py +1 -1
  32. ipex_llm/transformers/model.py +1 -3
  33. ipex_llm/transformers/npu_models/mp_models_base.py +3 -1
  34. ipex_llm/transformers/patches.py +0 -11
  35. ipex_llm/vllm/cpu/engine/__init__.py +2 -1
  36. ipex_llm/vllm/cpu/engine/engine.py +159 -75
  37. ipex_llm/vllm/cpu/entrypoints/api_server.py +787 -0
  38. ipex_llm/vllm/cpu/entrypoints/openai/api_server.py +680 -95
  39. ipex_llm/vllm/cpu/entrypoints/openai/cli_args.py +277 -0
  40. ipex_llm/vllm/cpu/ipex_llm_v1_wrapper.py +23 -0
  41. ipex_llm/vllm/cpu/ipex_llm_wrapper.py +24 -0
  42. ipex_llm/vllm/cpu/model_convert.py +126 -233
  43. {ipex_llm-2.2.0b20250121.dist-info → ipex_llm-2.2.0b20250123.dist-info}/METADATA +20 -20
  44. {ipex_llm-2.2.0b20250121.dist-info → ipex_llm-2.2.0b20250123.dist-info}/RECORD +50 -46
  45. {ipex_llm-2.2.0b20250121.data → ipex_llm-2.2.0b20250123.data}/scripts/ipex-llm-init.bat +0 -0
  46. {ipex_llm-2.2.0b20250121.data → ipex_llm-2.2.0b20250123.data}/scripts/llm-chat.ps1 +0 -0
  47. {ipex_llm-2.2.0b20250121.data → ipex_llm-2.2.0b20250123.data}/scripts/llm-cli.ps1 +0 -0
  48. {ipex_llm-2.2.0b20250121.dist-info → ipex_llm-2.2.0b20250123.dist-info}/WHEEL +0 -0
  49. {ipex_llm-2.2.0b20250121.dist-info → ipex_llm-2.2.0b20250123.dist-info}/entry_points.txt +0 -0
  50. {ipex_llm-2.2.0b20250121.dist-info → ipex_llm-2.2.0b20250123.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,787 @@
1
+ import asyncio
2
+ import atexit
3
+ import importlib
4
+ import inspect
5
+ import multiprocessing
6
+ import os
7
+ import re
8
+ import signal
9
+ import socket
10
+ import tempfile
11
+ import uuid
12
+ from argparse import Namespace
13
+ from contextlib import asynccontextmanager
14
+ from functools import partial
15
+ from http import HTTPStatus
16
+ from typing import AsyncIterator, Optional, Set, Tuple
17
+
18
+ import uvloop
19
+ from fastapi import APIRouter, FastAPI, Request
20
+ from fastapi.exceptions import RequestValidationError
21
+ from fastapi.middleware.cors import CORSMiddleware
22
+ from fastapi.responses import JSONResponse, Response, StreamingResponse
23
+ from starlette.datastructures import State
24
+ from starlette.routing import Mount
25
+ from typing_extensions import assert_never
26
+
27
+ import vllm.envs as envs
28
+ from vllm.config import ModelConfig
29
+ from vllm.engine.arg_utils import AsyncEngineArgs
30
+ # from vllm.engine.async_llm_engine import AsyncLLMEngine # type: ignore
31
+ from ipex_llm.vllm.cpu.engine import IPEXLLMAsyncLLMEngine as AsyncLLMEngine
32
+ from vllm.engine.multiprocessing.client import MQLLMEngineClient
33
+ # from vllm.engine.multiprocessing.engine import run_mp_engine
34
+ from ipex_llm.vllm.cpu.engine import run_mp_engine
35
+ from vllm.engine.protocol import EngineClient
36
+ from vllm.entrypoints.chat_utils import load_chat_template
37
+ from vllm.entrypoints.launcher import serve_http
38
+ from vllm.entrypoints.logger import RequestLogger
39
+ from vllm.entrypoints.openai.cli_args import (make_arg_parser,
40
+ validate_parsed_serve_args)
41
+ from vllm.entrypoints.openai.serving_engine import OpenAIServing
42
+ # yapf conflicts with isort for this block
43
+ # yapf: disable
44
+ from vllm.entrypoints.openai.protocol import (ChatCompletionRequest,
45
+ ChatCompletionResponse,
46
+ CompletionRequest,
47
+ CompletionResponse,
48
+ DetokenizeRequest,
49
+ DetokenizeResponse,
50
+ EmbeddingRequest,
51
+ EmbeddingResponse,
52
+ EmbeddingResponseData,
53
+ ErrorResponse,
54
+ LoadLoraAdapterRequest,
55
+ PoolingRequest, PoolingResponse,
56
+ ScoreRequest, ScoreResponse,
57
+ TokenizeRequest,
58
+ TokenizeResponse,
59
+ UnloadLoraAdapterRequest)
60
+ # yapf: enable
61
+ from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
62
+ from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion
63
+ from vllm.entrypoints.openai.serving_embedding import OpenAIServingEmbedding
64
+ # from vllm.entrypoints.openai.serving_engine import BaseModelPath, OpenAIServing
65
+ from vllm.entrypoints.openai.serving_models import (BaseModelPath,
66
+ OpenAIServingModels)
67
+
68
+ from vllm.entrypoints.openai.serving_pooling import OpenAIServingPooling
69
+ from vllm.entrypoints.openai.serving_score import OpenAIServingScores
70
+ from vllm.entrypoints.openai.serving_tokenization import (
71
+ OpenAIServingTokenization)
72
+ from vllm.entrypoints.openai.tool_parsers import ToolParserManager
73
+ from vllm.entrypoints.utils import with_cancellation
74
+ from vllm.logger import init_logger
75
+ from vllm.usage.usage_lib import UsageContext
76
+ from vllm.utils import (FlexibleArgumentParser, get_open_zmq_ipc_path,
77
+ is_valid_ipv6_address, set_ulimit)
78
+ from vllm.version import __version__ as VLLM_VERSION
79
+
80
+ TIMEOUT_KEEP_ALIVE = 5 # seconds
81
+
82
+ prometheus_multiproc_dir: tempfile.TemporaryDirectory
83
+
84
+ # Cannot use __name__ (https://github.com/vllm-project/vllm/pull/4765)
85
+ logger = init_logger('vllm.entrypoints.openai.api_server')
86
+
87
+ _running_tasks: Set[asyncio.Task] = set()
88
+
89
+
90
+ @asynccontextmanager
91
+ async def lifespan(app: FastAPI):
92
+ try:
93
+ if app.state.log_stats:
94
+ engine_client: EngineClient = app.state.engine_client
95
+
96
+ async def _force_log():
97
+ while True:
98
+ await asyncio.sleep(10.)
99
+ await engine_client.do_log_stats()
100
+
101
+ task = asyncio.create_task(_force_log())
102
+ _running_tasks.add(task)
103
+ task.add_done_callback(_running_tasks.remove)
104
+ else:
105
+ task = None
106
+ try:
107
+ yield
108
+ finally:
109
+ if task is not None:
110
+ task.cancel()
111
+ finally:
112
+ # Ensure app state including engine ref is gc'd
113
+ del app.state
114
+
115
+
116
+ @asynccontextmanager
117
+ async def build_async_engine_client(
118
+ args: Namespace) -> AsyncIterator[EngineClient]:
119
+
120
+ # Context manager to handle engine_client lifecycle
121
+ # Ensures everything is shutdown and cleaned up on error/exit
122
+ engine_args = AsyncEngineArgs.from_cli_args(args)
123
+
124
+ async with build_async_engine_client_from_engine_args(
125
+ engine_args, args.disable_frontend_multiprocessing, args.load_in_low_bit) as engine:
126
+ yield engine
127
+
128
+
129
+ @asynccontextmanager
130
+ async def build_async_engine_client_from_engine_args(
131
+ engine_args: AsyncEngineArgs,
132
+ disable_frontend_multiprocessing: bool = False,
133
+ load_in_low_bit: str = "sym_int4",
134
+ ) -> AsyncIterator[EngineClient]:
135
+ """
136
+ Create EngineClient, either:
137
+ - in-process using the AsyncLLMEngine Directly
138
+ - multiprocess using AsyncLLMEngine RPC
139
+
140
+ Returns the Client or None if the creation failed.
141
+ """
142
+
143
+ # Fall back
144
+ # TODO: fill out feature matrix.
145
+ if (MQLLMEngineClient.is_unsupported_config(engine_args)
146
+ or envs.VLLM_USE_V1 or disable_frontend_multiprocessing):
147
+ engine_config = engine_args.create_engine_config(
148
+ UsageContext.OPENAI_API_SERVER)
149
+ uses_ray = getattr(AsyncLLMEngine._get_executor_cls(engine_config),
150
+ "uses_ray", False)
151
+
152
+ build_engine = partial(AsyncLLMEngine.from_engine_args,
153
+ load_in_low_bit=load_in_low_bit,
154
+ engine_args=engine_args,
155
+ engine_config=engine_config,
156
+ usage_context=UsageContext.OPENAI_API_SERVER)
157
+ if uses_ray:
158
+ # Must run in main thread with ray for its signal handlers to work
159
+ engine_client = build_engine()
160
+ else:
161
+ engine_client = await asyncio.get_running_loop().run_in_executor(
162
+ None, build_engine)
163
+
164
+ yield engine_client
165
+ if hasattr(engine_client, "shutdown"):
166
+ engine_client.shutdown()
167
+ return
168
+
169
+ # Otherwise, use the multiprocessing AsyncLLMEngine.
170
+ else:
171
+ if "PROMETHEUS_MULTIPROC_DIR" not in os.environ:
172
+ # Make TemporaryDirectory for prometheus multiprocessing
173
+ # Note: global TemporaryDirectory will be automatically
174
+ # cleaned up upon exit.
175
+ global prometheus_multiproc_dir
176
+ prometheus_multiproc_dir = tempfile.TemporaryDirectory()
177
+ os.environ[
178
+ "PROMETHEUS_MULTIPROC_DIR"] = prometheus_multiproc_dir.name
179
+ else:
180
+ logger.warning(
181
+ "Found PROMETHEUS_MULTIPROC_DIR was set by user. "
182
+ "This directory must be wiped between vLLM runs or "
183
+ "you will find inaccurate metrics. Unset the variable "
184
+ "and vLLM will properly handle cleanup.")
185
+
186
+ # Select random path for IPC.
187
+ ipc_path = get_open_zmq_ipc_path()
188
+ logger.debug("Multiprocessing frontend to use %s for IPC Path.",
189
+ ipc_path)
190
+
191
+ # Start RPCServer in separate process (holds the LLMEngine).
192
+ # the current process might have CUDA context,
193
+ # so we need to spawn a new process
194
+ context = multiprocessing.get_context("spawn")
195
+
196
+ # The Process can raise an exception during startup, which may
197
+ # not actually result in an exitcode being reported. As a result
198
+ # we use a shared variable to communicate the information.
199
+ engine_alive = multiprocessing.Value('b', True, lock=False)
200
+ engine_process = context.Process(target=run_mp_engine,
201
+ args=(engine_args,
202
+ UsageContext.OPENAI_API_SERVER,
203
+ ipc_path, load_in_low_bit, engine_alive))
204
+ engine_process.start()
205
+ engine_pid = engine_process.pid
206
+ assert engine_pid is not None, "Engine process failed to start."
207
+ logger.info("Started engine process with PID %d", engine_pid)
208
+
209
+ def _cleanup_ipc_path():
210
+ socket_path = ipc_path.replace("ipc://", "")
211
+ if os.path.exists(socket_path):
212
+ os.remove(socket_path)
213
+
214
+ # Ensure we clean up the local IPC socket file on exit.
215
+ atexit.register(_cleanup_ipc_path)
216
+
217
+ # Build RPCClient, which conforms to EngineClient Protocol.
218
+ engine_config = engine_args.create_engine_config()
219
+ build_client = partial(MQLLMEngineClient, ipc_path, engine_config,
220
+ engine_pid)
221
+ mq_engine_client = await asyncio.get_running_loop().run_in_executor(
222
+ None, build_client)
223
+ try:
224
+ while True:
225
+ try:
226
+ await mq_engine_client.setup()
227
+ break
228
+ except TimeoutError:
229
+ if (not engine_process.is_alive()
230
+ or not engine_alive.value):
231
+ raise RuntimeError(
232
+ "Engine process failed to start. See stack "
233
+ "trace for the root cause.") from None
234
+
235
+ yield mq_engine_client # type: ignore[misc]
236
+ finally:
237
+ # Ensure rpc server process was terminated
238
+ engine_process.terminate()
239
+
240
+ # Close all open connections to the backend
241
+ mq_engine_client.close()
242
+
243
+ # Wait for engine process to join
244
+ engine_process.join(4)
245
+ if engine_process.exitcode is None:
246
+ # Kill if taking longer than 5 seconds to stop
247
+ engine_process.kill()
248
+
249
+ # Lazy import for prometheus multiprocessing.
250
+ # We need to set PROMETHEUS_MULTIPROC_DIR environment variable
251
+ # before prometheus_client is imported.
252
+ # See https://prometheus.github.io/client_python/multiprocess/
253
+ from prometheus_client import multiprocess
254
+ multiprocess.mark_process_dead(engine_process.pid)
255
+
256
+
257
+ router = APIRouter()
258
+
259
+
260
+ def mount_metrics(app: FastAPI):
261
+ # Lazy import for prometheus multiprocessing.
262
+ # We need to set PROMETHEUS_MULTIPROC_DIR environment variable
263
+ # before prometheus_client is imported.
264
+ # See https://prometheus.github.io/client_python/multiprocess/
265
+ from prometheus_client import (CollectorRegistry, make_asgi_app,
266
+ multiprocess)
267
+
268
+ prometheus_multiproc_dir_path = os.getenv("PROMETHEUS_MULTIPROC_DIR", None)
269
+ if prometheus_multiproc_dir_path is not None:
270
+ logger.debug("vLLM to use %s as PROMETHEUS_MULTIPROC_DIR",
271
+ prometheus_multiproc_dir_path)
272
+ registry = CollectorRegistry()
273
+ multiprocess.MultiProcessCollector(registry)
274
+
275
+ # Add prometheus asgi middleware to route /metrics requests
276
+ metrics_route = Mount("/metrics", make_asgi_app(registry=registry))
277
+ else:
278
+ # Add prometheus asgi middleware to route /metrics requests
279
+ metrics_route = Mount("/metrics", make_asgi_app())
280
+
281
+ # Workaround for 307 Redirect for /metrics
282
+ metrics_route.path_regex = re.compile("^/metrics(?P<path>.*)$")
283
+ app.routes.append(metrics_route)
284
+
285
+
286
+ def base(request: Request) -> OpenAIServing:
287
+ # Reuse the existing instance
288
+ return tokenization(request)
289
+
290
+
291
+ def chat(request: Request) -> Optional[OpenAIServingChat]:
292
+ return request.app.state.openai_serving_chat
293
+
294
+
295
+ def completion(request: Request) -> Optional[OpenAIServingCompletion]:
296
+ return request.app.state.openai_serving_completion
297
+
298
+
299
+ def pooling(request: Request) -> Optional[OpenAIServingPooling]:
300
+ return request.app.state.openai_serving_pooling
301
+
302
+
303
+ def embedding(request: Request) -> Optional[OpenAIServingEmbedding]:
304
+ return request.app.state.openai_serving_embedding
305
+
306
+
307
+ def score(request: Request) -> Optional[OpenAIServingScores]:
308
+ return request.app.state.openai_serving_scores
309
+
310
+
311
+ def tokenization(request: Request) -> OpenAIServingTokenization:
312
+ return request.app.state.openai_serving_tokenization
313
+
314
+
315
+ def engine_client(request: Request) -> EngineClient:
316
+ return request.app.state.engine_client
317
+
318
+
319
+ @router.get("/health")
320
+ async def health(raw_request: Request) -> Response:
321
+ """Health check."""
322
+ await engine_client(raw_request).check_health()
323
+ return Response(status_code=200)
324
+
325
+
326
+ @router.post("/tokenize")
327
+ @with_cancellation
328
+ async def tokenize(request: TokenizeRequest, raw_request: Request):
329
+ handler = tokenization(raw_request)
330
+
331
+ generator = await handler.create_tokenize(request, raw_request)
332
+ if isinstance(generator, ErrorResponse):
333
+ return JSONResponse(content=generator.model_dump(),
334
+ status_code=generator.code)
335
+ elif isinstance(generator, TokenizeResponse):
336
+ return JSONResponse(content=generator.model_dump())
337
+
338
+ assert_never(generator)
339
+
340
+
341
+ @router.post("/detokenize")
342
+ @with_cancellation
343
+ async def detokenize(request: DetokenizeRequest, raw_request: Request):
344
+ handler = tokenization(raw_request)
345
+
346
+ generator = await handler.create_detokenize(request, raw_request)
347
+ if isinstance(generator, ErrorResponse):
348
+ return JSONResponse(content=generator.model_dump(),
349
+ status_code=generator.code)
350
+ elif isinstance(generator, DetokenizeResponse):
351
+ return JSONResponse(content=generator.model_dump())
352
+
353
+ assert_never(generator)
354
+
355
+
356
+ @router.get("/v1/models")
357
+ async def show_available_models(raw_request: Request):
358
+ handler = base(raw_request)
359
+
360
+ models = await handler.show_available_models()
361
+ return JSONResponse(content=models.model_dump())
362
+
363
+
364
+ @router.get("/version")
365
+ async def show_version():
366
+ ver = {"version": VLLM_VERSION}
367
+ return JSONResponse(content=ver)
368
+
369
+
370
+ @router.post("/v1/chat/completions")
371
+ @with_cancellation
372
+ async def create_chat_completion(request: ChatCompletionRequest,
373
+ raw_request: Request):
374
+ handler = chat(raw_request)
375
+ if handler is None:
376
+ return base(raw_request).create_error_response(
377
+ message="The model does not support Chat Completions API")
378
+
379
+ generator = await handler.create_chat_completion(request, raw_request)
380
+
381
+ if isinstance(generator, ErrorResponse):
382
+ return JSONResponse(content=generator.model_dump(),
383
+ status_code=generator.code)
384
+
385
+ elif isinstance(generator, ChatCompletionResponse):
386
+ return JSONResponse(content=generator.model_dump())
387
+
388
+ return StreamingResponse(content=generator, media_type="text/event-stream")
389
+
390
+
391
+ @router.post("/v1/completions")
392
+ @with_cancellation
393
+ async def create_completion(request: CompletionRequest, raw_request: Request):
394
+ handler = completion(raw_request)
395
+ if handler is None:
396
+ return base(raw_request).create_error_response(
397
+ message="The model does not support Completions API")
398
+
399
+ generator = await handler.create_completion(request, raw_request)
400
+ if isinstance(generator, ErrorResponse):
401
+ return JSONResponse(content=generator.model_dump(),
402
+ status_code=generator.code)
403
+ elif isinstance(generator, CompletionResponse):
404
+ return JSONResponse(content=generator.model_dump())
405
+
406
+ return StreamingResponse(content=generator, media_type="text/event-stream")
407
+
408
+
409
+ @router.post("/v1/embeddings")
410
+ @with_cancellation
411
+ async def create_embedding(request: EmbeddingRequest, raw_request: Request):
412
+ handler = embedding(raw_request)
413
+ if handler is None:
414
+ fallback_handler = pooling(raw_request)
415
+ if fallback_handler is None:
416
+ return base(raw_request).create_error_response(
417
+ message="The model does not support Embeddings API")
418
+
419
+ logger.warning(
420
+ "Embeddings API will become exclusive to embedding models "
421
+ "in a future release. To return the hidden states directly, "
422
+ "use the Pooling API (`/pooling`) instead.")
423
+
424
+ res = await fallback_handler.create_pooling(request, raw_request)
425
+ if isinstance(res, PoolingResponse):
426
+ generator = EmbeddingResponse(
427
+ id=res.id,
428
+ object=res.object,
429
+ created=res.created,
430
+ model=res.model,
431
+ data=[
432
+ EmbeddingResponseData(
433
+ index=d.index,
434
+ embedding=d.data, # type: ignore
435
+ ) for d in res.data
436
+ ],
437
+ usage=res.usage,
438
+ )
439
+ else:
440
+ generator = res
441
+ else:
442
+ generator = await handler.create_embedding(request, raw_request)
443
+
444
+ if isinstance(generator, ErrorResponse):
445
+ return JSONResponse(content=generator.model_dump(),
446
+ status_code=generator.code)
447
+ elif isinstance(generator, EmbeddingResponse):
448
+ return JSONResponse(content=generator.model_dump())
449
+
450
+ assert_never(generator)
451
+
452
+
453
+ @router.post("/pooling")
454
+ @with_cancellation
455
+ async def create_pooling(request: PoolingRequest, raw_request: Request):
456
+ handler = pooling(raw_request)
457
+ if handler is None:
458
+ return base(raw_request).create_error_response(
459
+ message="The model does not support Pooling API")
460
+
461
+ generator = await handler.create_pooling(request, raw_request)
462
+ if isinstance(generator, ErrorResponse):
463
+ return JSONResponse(content=generator.model_dump(),
464
+ status_code=generator.code)
465
+ elif isinstance(generator, PoolingResponse):
466
+ return JSONResponse(content=generator.model_dump())
467
+
468
+ assert_never(generator)
469
+
470
+
471
+ @router.post("/score")
472
+ @with_cancellation
473
+ async def create_score(request: ScoreRequest, raw_request: Request):
474
+ handler = score(raw_request)
475
+ if handler is None:
476
+ return base(raw_request).create_error_response(
477
+ message="The model does not support Score API")
478
+
479
+ generator = await handler.create_score(request, raw_request)
480
+ if isinstance(generator, ErrorResponse):
481
+ return JSONResponse(content=generator.model_dump(),
482
+ status_code=generator.code)
483
+ elif isinstance(generator, ScoreResponse):
484
+ return JSONResponse(content=generator.model_dump())
485
+
486
+ assert_never(generator)
487
+
488
+
489
+ @router.post("/v1/score")
490
+ @with_cancellation
491
+ async def create_score_v1(request: ScoreRequest, raw_request: Request):
492
+ logger.warning(
493
+ "To indicate that Score API is not part of standard OpenAI API, we "
494
+ "have moved it to `/score`. Please update your client accordingly.")
495
+
496
+ return await create_score(request, raw_request)
497
+
498
+
499
+ if envs.VLLM_TORCH_PROFILER_DIR:
500
+ logger.warning(
501
+ "Torch Profiler is enabled in the API server. This should ONLY be "
502
+ "used for local development!")
503
+
504
+ @router.post("/start_profile")
505
+ async def start_profile(raw_request: Request):
506
+ logger.info("Starting profiler...")
507
+ await engine_client(raw_request).start_profile()
508
+ logger.info("Profiler started.")
509
+ return Response(status_code=200)
510
+
511
+ @router.post("/stop_profile")
512
+ async def stop_profile(raw_request: Request):
513
+ logger.info("Stopping profiler...")
514
+ await engine_client(raw_request).stop_profile()
515
+ logger.info("Profiler stopped.")
516
+ return Response(status_code=200)
517
+
518
+
519
+ if envs.VLLM_ALLOW_RUNTIME_LORA_UPDATING:
520
+ logger.warning(
521
+ "Lora dynamic loading & unloading is enabled in the API server. "
522
+ "This should ONLY be used for local development!")
523
+
524
+ @router.post("/v1/load_lora_adapter")
525
+ async def load_lora_adapter(request: LoadLoraAdapterRequest,
526
+ raw_request: Request):
527
+ for route in [chat, completion, embedding]:
528
+ handler = route(raw_request)
529
+ if handler is not None:
530
+ response = await handler.load_lora_adapter(request)
531
+ if isinstance(response, ErrorResponse):
532
+ return JSONResponse(content=response.model_dump(),
533
+ status_code=response.code)
534
+
535
+ return Response(status_code=200, content=response)
536
+
537
+ @router.post("/v1/unload_lora_adapter")
538
+ async def unload_lora_adapter(request: UnloadLoraAdapterRequest,
539
+ raw_request: Request):
540
+ for route in [chat, completion, embedding]:
541
+ handler = route(raw_request)
542
+ if handler is not None:
543
+ response = await handler.unload_lora_adapter(request)
544
+ if isinstance(response, ErrorResponse):
545
+ return JSONResponse(content=response.model_dump(),
546
+ status_code=response.code)
547
+
548
+ return Response(status_code=200, content=response)
549
+
550
+
551
+ def build_app(args: Namespace) -> FastAPI:
552
+ if args.disable_fastapi_docs:
553
+ app = FastAPI(openapi_url=None,
554
+ docs_url=None,
555
+ redoc_url=None,
556
+ lifespan=lifespan)
557
+ else:
558
+ app = FastAPI(lifespan=lifespan)
559
+ app.include_router(router)
560
+ app.root_path = args.root_path
561
+
562
+ mount_metrics(app)
563
+
564
+ app.add_middleware(
565
+ CORSMiddleware,
566
+ allow_origins=args.allowed_origins,
567
+ allow_credentials=args.allow_credentials,
568
+ allow_methods=args.allowed_methods,
569
+ allow_headers=args.allowed_headers,
570
+ )
571
+
572
+ @app.exception_handler(RequestValidationError)
573
+ async def validation_exception_handler(_, exc):
574
+ err = ErrorResponse(message=str(exc),
575
+ type="BadRequestError",
576
+ code=HTTPStatus.BAD_REQUEST)
577
+ return JSONResponse(err.model_dump(),
578
+ status_code=HTTPStatus.BAD_REQUEST)
579
+
580
+ if token := envs.VLLM_API_KEY or args.api_key:
581
+
582
+ @app.middleware("http")
583
+ async def authentication(request: Request, call_next):
584
+ if request.method == "OPTIONS":
585
+ return await call_next(request)
586
+ url_path = request.url.path
587
+ if app.root_path and url_path.startswith(app.root_path):
588
+ url_path = url_path[len(app.root_path):]
589
+ if not url_path.startswith("/v1"):
590
+ return await call_next(request)
591
+ if request.headers.get("Authorization") != "Bearer " + token:
592
+ return JSONResponse(content={"error": "Unauthorized"},
593
+ status_code=401)
594
+ return await call_next(request)
595
+
596
+ if args.enable_request_id_headers:
597
+ logger.warning(
598
+ "CAUTION: Enabling X-Request-Id headers in the API Server. "
599
+ "This can harm performance at high QPS.")
600
+
601
+ @app.middleware("http")
602
+ async def add_request_id(request: Request, call_next):
603
+ request_id = request.headers.get(
604
+ "X-Request-Id") or uuid.uuid4().hex
605
+ response = await call_next(request)
606
+ response.headers["X-Request-Id"] = request_id
607
+ return response
608
+
609
+ for middleware in args.middleware:
610
+ module_path, object_name = middleware.rsplit(".", 1)
611
+ imported = getattr(importlib.import_module(module_path), object_name)
612
+ if inspect.isclass(imported):
613
+ app.add_middleware(imported)
614
+ elif inspect.iscoroutinefunction(imported):
615
+ app.middleware("http")(imported)
616
+ else:
617
+ raise ValueError(f"Invalid middleware {middleware}. "
618
+ f"Must be a function or a class.")
619
+
620
+ return app
621
+
622
+
623
+ def init_app_state(
624
+ engine_client: EngineClient,
625
+ model_config: ModelConfig,
626
+ state: State,
627
+ args: Namespace,
628
+ ) -> None:
629
+ if args.served_model_name is not None:
630
+ served_model_names = args.served_model_name
631
+ else:
632
+ served_model_names = [args.model]
633
+
634
+ if args.disable_log_requests:
635
+ request_logger = None
636
+ else:
637
+ request_logger = RequestLogger(max_log_len=args.max_log_len)
638
+
639
+ base_model_paths = [
640
+ BaseModelPath(name=name, model_path=args.model)
641
+ for name in served_model_names
642
+ ]
643
+
644
+ state.engine_client = engine_client
645
+ state.log_stats = not args.disable_log_stats
646
+
647
+ resolved_chat_template = load_chat_template(args.chat_template)
648
+ logger.info("Using supplied chat template:\n%s", resolved_chat_template)
649
+
650
+ state.openai_serving_chat = OpenAIServingChat(
651
+ engine_client,
652
+ model_config,
653
+ base_model_paths,
654
+ args.response_role,
655
+ lora_modules=args.lora_modules,
656
+ prompt_adapters=args.prompt_adapters,
657
+ request_logger=request_logger,
658
+ chat_template=resolved_chat_template,
659
+ chat_template_content_format=args.chat_template_content_format,
660
+ return_tokens_as_token_ids=args.return_tokens_as_token_ids,
661
+ enable_auto_tools=args.enable_auto_tool_choice,
662
+ tool_parser=args.tool_call_parser,
663
+ enable_prompt_tokens_details=args.enable_prompt_tokens_details,
664
+ ) if model_config.runner_type == "generate" else None
665
+ state.openai_serving_completion = OpenAIServingCompletion(
666
+ engine_client,
667
+ model_config,
668
+ base_model_paths,
669
+ lora_modules=args.lora_modules,
670
+ prompt_adapters=args.prompt_adapters,
671
+ request_logger=request_logger,
672
+ return_tokens_as_token_ids=args.return_tokens_as_token_ids,
673
+ ) if model_config.runner_type == "generate" else None
674
+ state.openai_serving_pooling = OpenAIServingPooling(
675
+ engine_client,
676
+ model_config,
677
+ base_model_paths,
678
+ request_logger=request_logger,
679
+ chat_template=resolved_chat_template,
680
+ chat_template_content_format=args.chat_template_content_format,
681
+ ) if model_config.runner_type == "pooling" else None
682
+ state.openai_serving_embedding = OpenAIServingEmbedding(
683
+ engine_client,
684
+ model_config,
685
+ base_model_paths,
686
+ request_logger=request_logger,
687
+ chat_template=resolved_chat_template,
688
+ chat_template_content_format=args.chat_template_content_format,
689
+ ) if model_config.task == "embed" else None
690
+ state.openai_serving_scores = OpenAIServingScores(
691
+ engine_client,
692
+ model_config,
693
+ base_model_paths,
694
+ request_logger=request_logger
695
+ ) if model_config.task == "score" else None
696
+ state.openai_serving_tokenization = OpenAIServingTokenization(
697
+ engine_client,
698
+ model_config,
699
+ base_model_paths,
700
+ lora_modules=args.lora_modules,
701
+ request_logger=request_logger,
702
+ chat_template=resolved_chat_template,
703
+ chat_template_content_format=args.chat_template_content_format,
704
+ )
705
+
706
+
707
+ def create_server_socket(addr: Tuple[str, int]) -> socket.socket:
708
+ family = socket.AF_INET
709
+ if is_valid_ipv6_address(addr[0]):
710
+ family = socket.AF_INET6
711
+
712
+ sock = socket.socket(family=family, type=socket.SOCK_STREAM)
713
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
714
+ sock.bind(addr)
715
+
716
+ return sock
717
+
718
+
719
+ async def run_server(args, **uvicorn_kwargs) -> None:
720
+ logger.info("vLLM API server version %s", VLLM_VERSION)
721
+ logger.info("args: %s", args)
722
+
723
+ if args.tool_parser_plugin and len(args.tool_parser_plugin) > 3:
724
+ ToolParserManager.import_tool_parser(args.tool_parser_plugin)
725
+
726
+ valide_tool_parses = ToolParserManager.tool_parsers.keys()
727
+ if args.enable_auto_tool_choice \
728
+ and args.tool_call_parser not in valide_tool_parses:
729
+ raise KeyError(f"invalid tool call parser: {args.tool_call_parser} "
730
+ f"(chose from {{ {','.join(valide_tool_parses)} }})")
731
+
732
+ # workaround to make sure that we bind the port before the engine is set up.
733
+ # This avoids race conditions with ray.
734
+ # see https://github.com/vllm-project/vllm/issues/8204
735
+ sock_addr = (args.host or "", args.port)
736
+ sock = create_server_socket(sock_addr)
737
+
738
+ # workaround to avoid footguns where uvicorn drops requests with too
739
+ # many concurrent requests active
740
+ set_ulimit()
741
+
742
+ def signal_handler(*_) -> None:
743
+ # Interrupt server on sigterm while initializing
744
+ raise KeyboardInterrupt("terminated")
745
+
746
+ signal.signal(signal.SIGTERM, signal_handler)
747
+
748
+ async with build_async_engine_client(args) as engine_client:
749
+ app = build_app(args)
750
+
751
+ model_config = await engine_client.get_model_config()
752
+ init_app_state(engine_client, model_config, app.state, args)
753
+
754
+ shutdown_task = await serve_http(
755
+ app,
756
+ host=args.host,
757
+ port=args.port,
758
+ log_level=args.uvicorn_log_level,
759
+ timeout_keep_alive=TIMEOUT_KEEP_ALIVE,
760
+ ssl_keyfile=args.ssl_keyfile,
761
+ ssl_certfile=args.ssl_certfile,
762
+ ssl_ca_certs=args.ssl_ca_certs,
763
+ ssl_cert_reqs=args.ssl_cert_reqs,
764
+ **uvicorn_kwargs,
765
+ )
766
+
767
+ # NB: Await server shutdown only after the backend context is exited
768
+ await shutdown_task
769
+
770
+ sock.close()
771
+
772
+
773
+ if __name__ == "__main__":
774
+ # NOTE(simon):
775
+ # This section should be in sync with vllm/scripts.py for CLI entrypoints.
776
+ parser = FlexibleArgumentParser(
777
+ description="vLLM OpenAI-Compatible RESTful API server.")
778
+ parser = make_arg_parser(parser)
779
+ parser.add_argument(
780
+ "--load-in-low-bit",
781
+ type=str,
782
+ default="sym_int4",
783
+ help="Low-bit quantization for IPEX-LLM models")
784
+ args = parser.parse_args()
785
+ validate_parsed_serve_args(args)
786
+
787
+ uvloop.run(run_server(args))