guidellm 0.4.0a21__py3-none-any.whl → 0.4.0a155__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of guidellm might be problematic. Click here for more details.

Files changed (116) hide show
  1. guidellm/__init__.py +5 -2
  2. guidellm/__main__.py +451 -252
  3. guidellm/backends/__init__.py +33 -0
  4. guidellm/backends/backend.py +110 -0
  5. guidellm/backends/openai.py +355 -0
  6. guidellm/backends/response_handlers.py +455 -0
  7. guidellm/benchmark/__init__.py +53 -39
  8. guidellm/benchmark/benchmarker.py +148 -317
  9. guidellm/benchmark/entrypoints.py +466 -128
  10. guidellm/benchmark/output.py +517 -771
  11. guidellm/benchmark/profile.py +580 -280
  12. guidellm/benchmark/progress.py +568 -549
  13. guidellm/benchmark/scenarios/__init__.py +40 -0
  14. guidellm/benchmark/scenarios/chat.json +6 -0
  15. guidellm/benchmark/scenarios/rag.json +6 -0
  16. guidellm/benchmark/schemas.py +2085 -0
  17. guidellm/data/__init__.py +28 -4
  18. guidellm/data/collators.py +16 -0
  19. guidellm/data/deserializers/__init__.py +53 -0
  20. guidellm/data/deserializers/deserializer.py +109 -0
  21. guidellm/data/deserializers/file.py +222 -0
  22. guidellm/data/deserializers/huggingface.py +94 -0
  23. guidellm/data/deserializers/memory.py +192 -0
  24. guidellm/data/deserializers/synthetic.py +346 -0
  25. guidellm/data/loaders.py +145 -0
  26. guidellm/data/preprocessors/__init__.py +25 -0
  27. guidellm/data/preprocessors/formatters.py +412 -0
  28. guidellm/data/preprocessors/mappers.py +198 -0
  29. guidellm/data/preprocessors/preprocessor.py +29 -0
  30. guidellm/data/processor.py +30 -0
  31. guidellm/data/schemas.py +13 -0
  32. guidellm/data/utils/__init__.py +10 -0
  33. guidellm/data/utils/dataset.py +94 -0
  34. guidellm/data/utils/functions.py +18 -0
  35. guidellm/extras/__init__.py +4 -0
  36. guidellm/extras/audio.py +215 -0
  37. guidellm/extras/vision.py +242 -0
  38. guidellm/logger.py +2 -2
  39. guidellm/mock_server/__init__.py +8 -0
  40. guidellm/mock_server/config.py +84 -0
  41. guidellm/mock_server/handlers/__init__.py +17 -0
  42. guidellm/mock_server/handlers/chat_completions.py +280 -0
  43. guidellm/mock_server/handlers/completions.py +280 -0
  44. guidellm/mock_server/handlers/tokenizer.py +142 -0
  45. guidellm/mock_server/models.py +510 -0
  46. guidellm/mock_server/server.py +168 -0
  47. guidellm/mock_server/utils.py +302 -0
  48. guidellm/preprocess/dataset.py +23 -26
  49. guidellm/presentation/builder.py +2 -2
  50. guidellm/presentation/data_models.py +25 -21
  51. guidellm/presentation/injector.py +2 -3
  52. guidellm/scheduler/__init__.py +65 -26
  53. guidellm/scheduler/constraints.py +1035 -0
  54. guidellm/scheduler/environments.py +252 -0
  55. guidellm/scheduler/scheduler.py +140 -368
  56. guidellm/scheduler/schemas.py +272 -0
  57. guidellm/scheduler/strategies.py +519 -0
  58. guidellm/scheduler/worker.py +391 -420
  59. guidellm/scheduler/worker_group.py +707 -0
  60. guidellm/schemas/__init__.py +31 -0
  61. guidellm/schemas/info.py +159 -0
  62. guidellm/schemas/request.py +216 -0
  63. guidellm/schemas/response.py +119 -0
  64. guidellm/schemas/stats.py +228 -0
  65. guidellm/{config.py → settings.py} +32 -21
  66. guidellm/utils/__init__.py +95 -8
  67. guidellm/utils/auto_importer.py +98 -0
  68. guidellm/utils/cli.py +46 -2
  69. guidellm/utils/console.py +183 -0
  70. guidellm/utils/encoding.py +778 -0
  71. guidellm/utils/functions.py +134 -0
  72. guidellm/utils/hf_datasets.py +1 -2
  73. guidellm/utils/hf_transformers.py +4 -4
  74. guidellm/utils/imports.py +9 -0
  75. guidellm/utils/messaging.py +1118 -0
  76. guidellm/utils/mixins.py +115 -0
  77. guidellm/utils/pydantic_utils.py +411 -0
  78. guidellm/utils/random.py +3 -4
  79. guidellm/utils/registry.py +220 -0
  80. guidellm/utils/singleton.py +133 -0
  81. guidellm/{objects → utils}/statistics.py +341 -247
  82. guidellm/utils/synchronous.py +159 -0
  83. guidellm/utils/text.py +163 -50
  84. guidellm/utils/typing.py +41 -0
  85. guidellm/version.py +1 -1
  86. {guidellm-0.4.0a21.dist-info → guidellm-0.4.0a155.dist-info}/METADATA +33 -10
  87. guidellm-0.4.0a155.dist-info/RECORD +96 -0
  88. guidellm/backend/__init__.py +0 -23
  89. guidellm/backend/backend.py +0 -259
  90. guidellm/backend/openai.py +0 -705
  91. guidellm/backend/response.py +0 -136
  92. guidellm/benchmark/aggregator.py +0 -760
  93. guidellm/benchmark/benchmark.py +0 -837
  94. guidellm/benchmark/scenario.py +0 -104
  95. guidellm/data/prideandprejudice.txt.gz +0 -0
  96. guidellm/dataset/__init__.py +0 -22
  97. guidellm/dataset/creator.py +0 -213
  98. guidellm/dataset/entrypoints.py +0 -42
  99. guidellm/dataset/file.py +0 -92
  100. guidellm/dataset/hf_datasets.py +0 -62
  101. guidellm/dataset/in_memory.py +0 -132
  102. guidellm/dataset/synthetic.py +0 -287
  103. guidellm/objects/__init__.py +0 -18
  104. guidellm/objects/pydantic.py +0 -89
  105. guidellm/request/__init__.py +0 -18
  106. guidellm/request/loader.py +0 -284
  107. guidellm/request/request.py +0 -79
  108. guidellm/request/types.py +0 -10
  109. guidellm/scheduler/queues.py +0 -25
  110. guidellm/scheduler/result.py +0 -155
  111. guidellm/scheduler/strategy.py +0 -495
  112. guidellm-0.4.0a21.dist-info/RECORD +0 -62
  113. {guidellm-0.4.0a21.dist-info → guidellm-0.4.0a155.dist-info}/WHEEL +0 -0
  114. {guidellm-0.4.0a21.dist-info → guidellm-0.4.0a155.dist-info}/entry_points.txt +0 -0
  115. {guidellm-0.4.0a21.dist-info → guidellm-0.4.0a155.dist-info}/licenses/LICENSE +0 -0
  116. {guidellm-0.4.0a21.dist-info → guidellm-0.4.0a155.dist-info}/top_level.txt +0 -0
@@ -1,705 +0,0 @@
1
- import base64
2
- import copy
3
- import json
4
- import time
5
- from collections.abc import AsyncGenerator
6
- from pathlib import Path
7
- from typing import Any, Literal, Optional, Union
8
-
9
- import httpx
10
- from loguru import logger
11
- from PIL import Image
12
-
13
- from guidellm.backend.backend import Backend
14
- from guidellm.backend.response import (
15
- RequestArgs,
16
- ResponseSummary,
17
- StreamingTextResponse,
18
- )
19
- from guidellm.config import settings
20
-
21
- __all__ = [
22
- "CHAT_COMPLETIONS",
23
- "CHAT_COMPLETIONS_PATH",
24
- "MODELS",
25
- "TEXT_COMPLETIONS",
26
- "TEXT_COMPLETIONS_PATH",
27
- "OpenAIHTTPBackend",
28
- ]
29
-
30
-
31
- TEXT_COMPLETIONS_PATH = "/v1/completions"
32
- CHAT_COMPLETIONS_PATH = "/v1/chat/completions"
33
-
34
- EndpointType = Literal["chat_completions", "models", "text_completions"]
35
- CHAT_COMPLETIONS: EndpointType = "chat_completions"
36
- MODELS: EndpointType = "models"
37
- TEXT_COMPLETIONS: EndpointType = "text_completions"
38
-
39
-
40
- @Backend.register("openai_http")
41
- class OpenAIHTTPBackend(Backend):
42
- """
43
- A HTTP-based backend implementation for requests to an OpenAI compatible server.
44
- For example, a vLLM server instance or requests to OpenAI's API.
45
-
46
- :param target: The target URL string for the OpenAI server. ex: http://0.0.0.0:8000
47
- :param model: The model to use for all requests on the target server.
48
- If none is provided, the first available model will be used.
49
- :param api_key: The API key to use for requests to the OpenAI server.
50
- If provided, adds an Authorization header with the value
51
- "Authorization: Bearer {api_key}".
52
- If not provided, no Authorization header is added.
53
- :param organization: The organization to use for requests to the OpenAI server.
54
- For example, if set to "org_123", adds an OpenAI-Organization header with the
55
- value "OpenAI-Organization: org_123".
56
- If not provided, no OpenAI-Organization header is added.
57
- :param project: The project to use for requests to the OpenAI server.
58
- For example, if set to "project_123", adds an OpenAI-Project header with the
59
- value "OpenAI-Project: project_123".
60
- If not provided, no OpenAI-Project header is added.
61
- :param timeout: The timeout to use for requests to the OpenAI server.
62
- If not provided, the default timeout provided from settings is used.
63
- :param http2: If True, uses HTTP/2 for requests to the OpenAI server.
64
- Defaults to True.
65
- :param follow_redirects: If True, the HTTP client will follow redirect responses.
66
- If not provided, the default value from settings is used.
67
- :param max_output_tokens: The maximum number of tokens to request for completions.
68
- If not provided, the default maximum tokens provided from settings is used.
69
- :param extra_query: Query parameters to include in requests to the OpenAI server.
70
- If "chat_completions", "models", or "text_completions" are included as keys,
71
- the values of these keys will be used as the parameters for the respective
72
- endpoint.
73
- If not provided, no extra query parameters are added.
74
- :param extra_body: Body parameters to include in requests to the OpenAI server.
75
- If "chat_completions", "models", or "text_completions" are included as keys,
76
- the values of these keys will be included in the body for the respective
77
- endpoint.
78
- If not provided, no extra body parameters are added.
79
- :param remove_from_body: Parameters that should be removed from the body of each
80
- request.
81
- If not provided, no parameters are removed from the body.
82
- """
83
-
84
- def __init__(
85
- self,
86
- target: Optional[str] = None,
87
- model: Optional[str] = None,
88
- api_key: Optional[str] = None,
89
- organization: Optional[str] = None,
90
- project: Optional[str] = None,
91
- timeout: Optional[float] = None,
92
- http2: Optional[bool] = True,
93
- follow_redirects: Optional[bool] = None,
94
- max_output_tokens: Optional[int] = None,
95
- extra_query: Optional[dict] = None,
96
- extra_body: Optional[dict] = None,
97
- remove_from_body: Optional[list[str]] = None,
98
- headers: Optional[dict] = None,
99
- verify: Optional[bool] = None,
100
- ):
101
- super().__init__(type_="openai_http")
102
- self._target = target or settings.openai.base_url
103
-
104
- if not self._target:
105
- raise ValueError("Target URL must be provided for OpenAI HTTP backend.")
106
-
107
- if self._target.endswith("/v1") or self._target.endswith("/v1/"):
108
- # backwards compatability, strip v1 off
109
- self._target = self._target[:-3]
110
-
111
- if self._target.endswith("/"):
112
- self._target = self._target[:-1]
113
-
114
- self._model = model
115
-
116
- # Start with default headers based on other params
117
- default_headers: dict[str, str] = {}
118
- api_key = api_key or settings.openai.api_key
119
- bearer_token = settings.openai.bearer_token
120
- if api_key:
121
- default_headers["Authorization"] = f"Bearer {api_key}"
122
- elif bearer_token:
123
- default_headers["Authorization"] = bearer_token
124
-
125
- self.organization = organization or settings.openai.organization
126
- if self.organization:
127
- default_headers["OpenAI-Organization"] = self.organization
128
-
129
- self.project = project or settings.openai.project
130
- if self.project:
131
- default_headers["OpenAI-Project"] = self.project
132
-
133
- # User-provided headers from kwargs or settings override defaults
134
- merged_headers = default_headers.copy()
135
- merged_headers.update(settings.openai.headers or {})
136
- if headers:
137
- merged_headers.update(headers)
138
-
139
- # Remove headers with None values for backward compatibility and convenience
140
- self.headers = {k: v for k, v in merged_headers.items() if v is not None}
141
-
142
- self.timeout = timeout if timeout is not None else settings.request_timeout
143
- self.http2 = http2 if http2 is not None else settings.request_http2
144
- self.follow_redirects = (
145
- follow_redirects
146
- if follow_redirects is not None
147
- else settings.request_follow_redirects
148
- )
149
- self.verify = verify if verify is not None else settings.openai.verify
150
- self.max_output_tokens = (
151
- max_output_tokens
152
- if max_output_tokens is not None
153
- else settings.openai.max_output_tokens
154
- )
155
- self.extra_query = extra_query
156
- self.extra_body = extra_body
157
- self.remove_from_body = remove_from_body
158
- self._async_client: Optional[httpx.AsyncClient] = None
159
-
160
- @property
161
- def target(self) -> str:
162
- """
163
- :return: The target URL string for the OpenAI server.
164
- """
165
- return self._target
166
-
167
- @property
168
- def model(self) -> Optional[str]:
169
- """
170
- :return: The model to use for all requests on the target server.
171
- If validate hasn't been called yet and no model was passed in,
172
- this will be None until validate is called to set the default.
173
- """
174
- return self._model
175
-
176
- @property
177
- def info(self) -> dict[str, Any]:
178
- """
179
- :return: The information about the backend.
180
- """
181
- return {
182
- "max_output_tokens": self.max_output_tokens,
183
- "timeout": self.timeout,
184
- "http2": self.http2,
185
- "follow_redirects": self.follow_redirects,
186
- "headers": self.headers,
187
- "text_completions_path": TEXT_COMPLETIONS_PATH,
188
- "chat_completions_path": CHAT_COMPLETIONS_PATH,
189
- }
190
-
191
- async def reset(self) -> None:
192
- """
193
- Reset the connection object. This is useful for backends that
194
- reuse connections or have state that needs to be cleared.
195
- For this backend, it closes the async client if it exists.
196
- """
197
- if self._async_client is not None:
198
- await self._async_client.aclose()
199
-
200
- async def check_setup(self):
201
- """
202
- Check if the backend is setup correctly and can be used for requests.
203
- Specifically, if a model is not provided, it grabs the first available model.
204
- If no models are available, raises a ValueError.
205
- If a model is provided and not available, raises a ValueError.
206
-
207
- :raises ValueError: If no models or the provided model is not available.
208
- """
209
- models = await self.available_models()
210
- if not models:
211
- raise ValueError(f"No models available for target: {self.target}")
212
-
213
- if not self.model:
214
- self._model = models[0]
215
- elif self.model not in models:
216
- raise ValueError(
217
- f"Model {self.model} not found in available models:"
218
- f"{models} for target: {self.target}"
219
- )
220
-
221
- async def prepare_multiprocessing(self):
222
- """
223
- Prepare the backend for use in a multiprocessing environment.
224
- Clears out the sync and async clients to ensure they are re-initialized
225
- for each process.
226
- """
227
- if self._async_client is not None:
228
- await self._async_client.aclose()
229
- self._async_client = None
230
-
231
- async def available_models(self) -> list[str]:
232
- """
233
- Get the available models for the target server using the OpenAI models endpoint:
234
- /v1/models
235
- """
236
- target = f"{self.target}/v1/models"
237
- headers = self._headers()
238
- params = self._params(MODELS)
239
- response = await self._get_async_client().get(
240
- target, headers=headers, params=params
241
- )
242
- response.raise_for_status()
243
-
244
- models = []
245
-
246
- for item in response.json()["data"]:
247
- models.append(item["id"])
248
-
249
- return models
250
-
251
- async def text_completions( # type: ignore[override]
252
- self,
253
- prompt: Union[str, list[str]],
254
- request_id: Optional[str] = None,
255
- prompt_token_count: Optional[int] = None,
256
- output_token_count: Optional[int] = None,
257
- **kwargs,
258
- ) -> AsyncGenerator[Union[StreamingTextResponse, ResponseSummary], None]:
259
- """
260
- Generate text completions for the given prompt using the OpenAI
261
- completions endpoint: /v1/completions.
262
-
263
- :param prompt: The prompt (or list of prompts) to generate a completion for.
264
- If a list is supplied, these are concatenated and run through the model
265
- for a single prompt.
266
- :param request_id: The unique identifier for the request, if any.
267
- Added to logging statements and the response for tracking purposes.
268
- :param prompt_token_count: The number of tokens measured in the prompt, if any.
269
- Returned in the response stats for later analysis, if applicable.
270
- :param output_token_count: If supplied, the number of tokens to enforce
271
- generation of for the output for this request.
272
- :param kwargs: Additional keyword arguments to pass with the request.
273
- :return: An async generator that yields a StreamingTextResponse for start,
274
- a StreamingTextResponse for each received iteration,
275
- and a ResponseSummary for the final response.
276
- """
277
- logger.debug("{} invocation with args: {}", self.__class__.__name__, locals())
278
-
279
- if isinstance(prompt, list):
280
- raise ValueError(
281
- "List prompts (batching) is currently not supported for "
282
- f"text_completions OpenAI pathways. Received: {prompt}"
283
- )
284
-
285
- headers = self._headers()
286
- params = self._params(TEXT_COMPLETIONS)
287
- payload = self._completions_payload(
288
- endpoint_type=TEXT_COMPLETIONS,
289
- orig_kwargs=kwargs,
290
- max_output_tokens=output_token_count,
291
- prompt=prompt,
292
- )
293
-
294
- try:
295
- async for resp in self._iterative_completions_request(
296
- type_="text_completions",
297
- request_id=request_id,
298
- request_prompt_tokens=prompt_token_count,
299
- request_output_tokens=output_token_count,
300
- headers=headers,
301
- params=params,
302
- payload=payload,
303
- ):
304
- yield resp
305
- except Exception as ex:
306
- logger.error(
307
- "{} request with headers: {} and params: {} and payload: {} failed: {}",
308
- self.__class__.__name__,
309
- headers,
310
- params,
311
- payload,
312
- ex,
313
- )
314
- raise ex
315
-
316
- async def chat_completions( # type: ignore[override]
317
- self,
318
- content: Union[
319
- str,
320
- list[Union[str, dict[str, Union[str, dict[str, str]]], Path, Image.Image]],
321
- Any,
322
- ],
323
- request_id: Optional[str] = None,
324
- prompt_token_count: Optional[int] = None,
325
- output_token_count: Optional[int] = None,
326
- raw_content: bool = False,
327
- **kwargs,
328
- ) -> AsyncGenerator[Union[StreamingTextResponse, ResponseSummary], None]:
329
- """
330
- Generate chat completions for the given content using the OpenAI
331
- chat completions endpoint: /v1/chat/completions.
332
-
333
- :param content: The content (or list of content) to generate a completion for.
334
- This supports any combination of text, images, and audio (model dependent).
335
- Supported text only request examples:
336
- content="Sample prompt", content=["Sample prompt", "Second prompt"],
337
- content=[{"type": "text", "value": "Sample prompt"}.
338
- Supported text and image request examples:
339
- content=["Describe the image", PIL.Image.open("image.jpg")],
340
- content=["Describe the image", Path("image.jpg")],
341
- content=["Describe the image", {"type": "image_url",
342
- "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}].
343
- Supported text and audio request examples:
344
- content=["Transcribe the audio", Path("audio.wav")],
345
- content=["Transcribe the audio", {"type": "input_audio",
346
- "input_audio": {"data": f"{base64_bytes}", "format": "wav}].
347
- Additionally, if raw_content=True then the content is passed directly to the
348
- backend without any processing.
349
- :param request_id: The unique identifier for the request, if any.
350
- Added to logging statements and the response for tracking purposes.
351
- :param prompt_token_count: The number of tokens measured in the prompt, if any.
352
- Returned in the response stats for later analysis, if applicable.
353
- :param output_token_count: If supplied, the number of tokens to enforce
354
- generation of for the output for this request.
355
- :param kwargs: Additional keyword arguments to pass with the request.
356
- :return: An async generator that yields a StreamingTextResponse for start,
357
- a StreamingTextResponse for each received iteration,
358
- and a ResponseSummary for the final response.
359
- """
360
- logger.debug("{} invocation with args: {}", self.__class__.__name__, locals())
361
- headers = self._headers()
362
- params = self._params(CHAT_COMPLETIONS)
363
- messages = (
364
- content if raw_content else self._create_chat_messages(content=content)
365
- )
366
- payload = self._completions_payload(
367
- endpoint_type=CHAT_COMPLETIONS,
368
- orig_kwargs=kwargs,
369
- max_output_tokens=output_token_count,
370
- messages=messages,
371
- )
372
-
373
- try:
374
- async for resp in self._iterative_completions_request(
375
- type_="chat_completions",
376
- request_id=request_id,
377
- request_prompt_tokens=prompt_token_count,
378
- request_output_tokens=output_token_count,
379
- headers=headers,
380
- params=params,
381
- payload=payload,
382
- ):
383
- yield resp
384
- except Exception as ex:
385
- logger.error(
386
- "{} request with headers: {} and params: {} and payload: {} failed: {}",
387
- self.__class__.__name__,
388
- headers,
389
- params,
390
- payload,
391
- ex,
392
- )
393
- raise ex
394
-
395
- def _get_async_client(self) -> httpx.AsyncClient:
396
- """
397
- Get the async HTTP client for making requests.
398
- If the client has not been created yet, it will create one.
399
-
400
- :return: The async HTTP client.
401
- """
402
- if self._async_client is None or self._async_client.is_closed:
403
- client = httpx.AsyncClient(
404
- http2=self.http2,
405
- timeout=self.timeout,
406
- follow_redirects=self.follow_redirects,
407
- verify=self.verify,
408
- )
409
- self._async_client = client
410
- else:
411
- client = self._async_client
412
-
413
- return client
414
-
415
- def _headers(self) -> dict[str, str]:
416
- headers = {
417
- "Content-Type": "application/json",
418
- }
419
- headers.update(self.headers)
420
- return headers
421
-
422
- def _params(self, endpoint_type: EndpointType) -> dict[str, str]:
423
- if self.extra_query is None:
424
- return {}
425
-
426
- if (
427
- CHAT_COMPLETIONS in self.extra_query
428
- or MODELS in self.extra_query
429
- or TEXT_COMPLETIONS in self.extra_query
430
- ):
431
- return self.extra_query.get(endpoint_type, {})
432
-
433
- return self.extra_query
434
-
435
- def _extra_body(self, endpoint_type: EndpointType) -> dict[str, Any]:
436
- if self.extra_body is None:
437
- return {}
438
-
439
- if (
440
- CHAT_COMPLETIONS in self.extra_body
441
- or MODELS in self.extra_body
442
- or TEXT_COMPLETIONS in self.extra_body
443
- ):
444
- return copy.deepcopy(self.extra_body.get(endpoint_type, {}))
445
-
446
- return copy.deepcopy(self.extra_body)
447
-
448
- def _completions_payload(
449
- self,
450
- endpoint_type: EndpointType,
451
- orig_kwargs: Optional[dict],
452
- max_output_tokens: Optional[int],
453
- **kwargs,
454
- ) -> dict:
455
- payload = self._extra_body(endpoint_type)
456
- payload.update(orig_kwargs or {})
457
- payload.update(kwargs)
458
- payload["model"] = self.model
459
- payload["stream"] = True
460
- payload["stream_options"] = {
461
- "include_usage": True,
462
- }
463
-
464
- if max_output_tokens or self.max_output_tokens:
465
- logger.debug(
466
- "{} adding payload args for setting output_token_count: {}",
467
- self.__class__.__name__,
468
- max_output_tokens or self.max_output_tokens,
469
- )
470
- payload["max_tokens"] = max_output_tokens or self.max_output_tokens
471
- payload["max_completion_tokens"] = payload["max_tokens"]
472
-
473
- if max_output_tokens:
474
- # only set stop and ignore_eos if max_output_tokens set at request level
475
- # otherwise the instance value is just the max to enforce we stay below
476
- payload["stop"] = None
477
- payload["ignore_eos"] = True
478
-
479
- if self.remove_from_body:
480
- for key in self.remove_from_body:
481
- payload.pop(key, None)
482
-
483
- return payload
484
-
485
- @staticmethod
486
- def _create_chat_messages(
487
- content: Union[
488
- str,
489
- list[Union[str, dict[str, Union[str, dict[str, str]]], Path, Image.Image]],
490
- Any,
491
- ],
492
- ) -> list[dict]:
493
- if isinstance(content, str):
494
- return [
495
- {
496
- "role": "user",
497
- "content": content,
498
- }
499
- ]
500
-
501
- if isinstance(content, list):
502
- resolved_content = []
503
-
504
- for item in content:
505
- if isinstance(item, dict):
506
- resolved_content.append(item)
507
- elif isinstance(item, str):
508
- resolved_content.append({"type": "text", "text": item})
509
- elif isinstance(item, Image.Image) or (
510
- isinstance(item, Path) and item.suffix.lower() in [".jpg", ".jpeg"]
511
- ):
512
- image = item if isinstance(item, Image.Image) else Image.open(item)
513
- encoded = base64.b64encode(image.tobytes()).decode("utf-8")
514
- resolved_content.append(
515
- {
516
- "type": "image",
517
- "image": {
518
- "url": f"data:image/jpeg;base64,{encoded}",
519
- },
520
- }
521
- )
522
- elif isinstance(item, Path) and item.suffix.lower() in [".wav"]:
523
- encoded = base64.b64encode(item.read_bytes()).decode("utf-8")
524
- resolved_content.append(
525
- {
526
- "type": "input_audio",
527
- "input_audio": {
528
- "data": f"{encoded}",
529
- "format": "wav",
530
- },
531
- }
532
- )
533
- else:
534
- raise ValueError(
535
- f"Unsupported content item type: {item} in list: {content}"
536
- )
537
-
538
- return [
539
- {
540
- "role": "user",
541
- "content": resolved_content,
542
- }
543
- ]
544
-
545
- raise ValueError(f"Unsupported content type: {content}")
546
-
547
- async def _iterative_completions_request(
548
- self,
549
- type_: Literal["text_completions", "chat_completions"],
550
- request_id: Optional[str],
551
- request_prompt_tokens: Optional[int],
552
- request_output_tokens: Optional[int],
553
- headers: dict[str, str],
554
- params: dict[str, str],
555
- payload: dict[str, Any],
556
- ) -> AsyncGenerator[Union[StreamingTextResponse, ResponseSummary], None]:
557
- if type_ == "text_completions":
558
- target = f"{self.target}{TEXT_COMPLETIONS_PATH}"
559
- elif type_ == "chat_completions":
560
- target = f"{self.target}{CHAT_COMPLETIONS_PATH}"
561
- else:
562
- raise ValueError(f"Unsupported type: {type_}")
563
-
564
- logger.info(
565
- "{} making request: {} to target: {} using http2: {} following "
566
- "redirects: {} for timeout: {} with headers: {} and params: {} and ",
567
- "payload: {}",
568
- self.__class__.__name__,
569
- request_id,
570
- target,
571
- self.http2,
572
- self.follow_redirects,
573
- self.timeout,
574
- headers,
575
- params,
576
- payload,
577
- )
578
-
579
- response_value = ""
580
- response_prompt_count: Optional[int] = None
581
- response_output_count: Optional[int] = None
582
- iter_count = 0
583
- start_time = time.time()
584
- iter_time = start_time
585
- first_iter_time: Optional[float] = None
586
- last_iter_time: Optional[float] = None
587
-
588
- yield StreamingTextResponse(
589
- type_="start",
590
- value="",
591
- start_time=start_time,
592
- first_iter_time=None,
593
- iter_count=iter_count,
594
- delta="",
595
- time=start_time,
596
- request_id=request_id,
597
- )
598
-
599
- # reset start time after yielding start response to ensure accurate timing
600
- start_time = time.time()
601
-
602
- async with self._get_async_client().stream(
603
- "POST", target, headers=headers, params=params, json=payload
604
- ) as stream:
605
- stream.raise_for_status()
606
-
607
- async for line in stream.aiter_lines():
608
- iter_time = time.time()
609
- logger.debug(
610
- "{} request: {} recieved iter response line: {}",
611
- self.__class__.__name__,
612
- request_id,
613
- line,
614
- )
615
-
616
- if not line or not line.strip().startswith("data:"):
617
- continue
618
-
619
- if line.strip() == "data: [DONE]":
620
- break
621
-
622
- data = json.loads(line.strip()[len("data: ") :])
623
- if delta := self._extract_completions_delta_content(type_, data):
624
- if first_iter_time is None:
625
- first_iter_time = iter_time
626
- last_iter_time = iter_time
627
-
628
- iter_count += 1
629
- response_value += delta
630
-
631
- yield StreamingTextResponse(
632
- type_="iter",
633
- value=response_value,
634
- iter_count=iter_count,
635
- start_time=start_time,
636
- first_iter_time=first_iter_time,
637
- delta=delta,
638
- time=iter_time,
639
- request_id=request_id,
640
- )
641
-
642
- if usage := self._extract_completions_usage(data):
643
- response_prompt_count = usage["prompt"]
644
- response_output_count = usage["output"]
645
-
646
- logger.info(
647
- "{} request: {} with headers: {} and params: {} and payload: {} completed"
648
- "with: {}",
649
- self.__class__.__name__,
650
- request_id,
651
- headers,
652
- params,
653
- payload,
654
- response_value,
655
- )
656
-
657
- yield ResponseSummary(
658
- value=response_value,
659
- request_args=RequestArgs(
660
- target=target,
661
- headers=headers,
662
- params=params,
663
- payload=payload,
664
- timeout=self.timeout,
665
- http2=self.http2,
666
- follow_redirects=self.follow_redirects,
667
- ),
668
- start_time=start_time,
669
- end_time=iter_time,
670
- first_iter_time=first_iter_time,
671
- last_iter_time=last_iter_time,
672
- iterations=iter_count,
673
- request_prompt_tokens=request_prompt_tokens,
674
- request_output_tokens=request_output_tokens,
675
- response_prompt_tokens=response_prompt_count,
676
- response_output_tokens=response_output_count,
677
- request_id=request_id,
678
- )
679
-
680
- @staticmethod
681
- def _extract_completions_delta_content(
682
- type_: Literal["text_completions", "chat_completions"], data: dict
683
- ) -> Optional[str]:
684
- if "choices" not in data or not data["choices"]:
685
- return None
686
-
687
- if type_ == "text_completions":
688
- return data["choices"][0]["text"]
689
-
690
- if type_ == "chat_completions":
691
- return data.get("choices", [{}])[0].get("delta", {}).get("content")
692
-
693
- raise ValueError(f"Unsupported type: {type_}")
694
-
695
- @staticmethod
696
- def _extract_completions_usage(
697
- data: dict,
698
- ) -> Optional[dict[Literal["prompt", "output"], int]]:
699
- if "usage" not in data or not data["usage"]:
700
- return None
701
-
702
- return {
703
- "prompt": data["usage"]["prompt_tokens"],
704
- "output": data["usage"]["completion_tokens"],
705
- }