guidellm 0.3.1__py3-none-any.whl → 0.6.0a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. guidellm/__init__.py +5 -2
  2. guidellm/__main__.py +524 -255
  3. guidellm/backends/__init__.py +33 -0
  4. guidellm/backends/backend.py +109 -0
  5. guidellm/backends/openai.py +340 -0
  6. guidellm/backends/response_handlers.py +428 -0
  7. guidellm/benchmark/__init__.py +69 -39
  8. guidellm/benchmark/benchmarker.py +160 -316
  9. guidellm/benchmark/entrypoints.py +560 -127
  10. guidellm/benchmark/outputs/__init__.py +24 -0
  11. guidellm/benchmark/outputs/console.py +633 -0
  12. guidellm/benchmark/outputs/csv.py +721 -0
  13. guidellm/benchmark/outputs/html.py +473 -0
  14. guidellm/benchmark/outputs/output.py +169 -0
  15. guidellm/benchmark/outputs/serialized.py +69 -0
  16. guidellm/benchmark/profiles.py +718 -0
  17. guidellm/benchmark/progress.py +553 -556
  18. guidellm/benchmark/scenarios/__init__.py +40 -0
  19. guidellm/benchmark/scenarios/chat.json +6 -0
  20. guidellm/benchmark/scenarios/rag.json +6 -0
  21. guidellm/benchmark/schemas/__init__.py +66 -0
  22. guidellm/benchmark/schemas/base.py +402 -0
  23. guidellm/benchmark/schemas/generative/__init__.py +55 -0
  24. guidellm/benchmark/schemas/generative/accumulator.py +841 -0
  25. guidellm/benchmark/schemas/generative/benchmark.py +163 -0
  26. guidellm/benchmark/schemas/generative/entrypoints.py +381 -0
  27. guidellm/benchmark/schemas/generative/metrics.py +927 -0
  28. guidellm/benchmark/schemas/generative/report.py +158 -0
  29. guidellm/data/__init__.py +34 -4
  30. guidellm/data/builders.py +541 -0
  31. guidellm/data/collators.py +16 -0
  32. guidellm/data/config.py +120 -0
  33. guidellm/data/deserializers/__init__.py +49 -0
  34. guidellm/data/deserializers/deserializer.py +141 -0
  35. guidellm/data/deserializers/file.py +223 -0
  36. guidellm/data/deserializers/huggingface.py +94 -0
  37. guidellm/data/deserializers/memory.py +194 -0
  38. guidellm/data/deserializers/synthetic.py +246 -0
  39. guidellm/data/entrypoints.py +52 -0
  40. guidellm/data/loaders.py +190 -0
  41. guidellm/data/preprocessors/__init__.py +27 -0
  42. guidellm/data/preprocessors/formatters.py +410 -0
  43. guidellm/data/preprocessors/mappers.py +196 -0
  44. guidellm/data/preprocessors/preprocessor.py +30 -0
  45. guidellm/data/processor.py +29 -0
  46. guidellm/data/schemas.py +175 -0
  47. guidellm/data/utils/__init__.py +6 -0
  48. guidellm/data/utils/dataset.py +94 -0
  49. guidellm/extras/__init__.py +4 -0
  50. guidellm/extras/audio.py +220 -0
  51. guidellm/extras/vision.py +242 -0
  52. guidellm/logger.py +2 -2
  53. guidellm/mock_server/__init__.py +8 -0
  54. guidellm/mock_server/config.py +84 -0
  55. guidellm/mock_server/handlers/__init__.py +17 -0
  56. guidellm/mock_server/handlers/chat_completions.py +280 -0
  57. guidellm/mock_server/handlers/completions.py +280 -0
  58. guidellm/mock_server/handlers/tokenizer.py +142 -0
  59. guidellm/mock_server/models.py +510 -0
  60. guidellm/mock_server/server.py +238 -0
  61. guidellm/mock_server/utils.py +302 -0
  62. guidellm/scheduler/__init__.py +69 -26
  63. guidellm/scheduler/constraints/__init__.py +49 -0
  64. guidellm/scheduler/constraints/constraint.py +325 -0
  65. guidellm/scheduler/constraints/error.py +411 -0
  66. guidellm/scheduler/constraints/factory.py +182 -0
  67. guidellm/scheduler/constraints/request.py +312 -0
  68. guidellm/scheduler/constraints/saturation.py +722 -0
  69. guidellm/scheduler/environments.py +252 -0
  70. guidellm/scheduler/scheduler.py +137 -368
  71. guidellm/scheduler/schemas.py +358 -0
  72. guidellm/scheduler/strategies.py +617 -0
  73. guidellm/scheduler/worker.py +413 -419
  74. guidellm/scheduler/worker_group.py +712 -0
  75. guidellm/schemas/__init__.py +65 -0
  76. guidellm/schemas/base.py +417 -0
  77. guidellm/schemas/info.py +188 -0
  78. guidellm/schemas/request.py +235 -0
  79. guidellm/schemas/request_stats.py +349 -0
  80. guidellm/schemas/response.py +124 -0
  81. guidellm/schemas/statistics.py +1018 -0
  82. guidellm/{config.py → settings.py} +31 -24
  83. guidellm/utils/__init__.py +71 -8
  84. guidellm/utils/auto_importer.py +98 -0
  85. guidellm/utils/cli.py +132 -5
  86. guidellm/utils/console.py +566 -0
  87. guidellm/utils/encoding.py +778 -0
  88. guidellm/utils/functions.py +159 -0
  89. guidellm/utils/hf_datasets.py +1 -2
  90. guidellm/utils/hf_transformers.py +4 -4
  91. guidellm/utils/imports.py +9 -0
  92. guidellm/utils/messaging.py +1118 -0
  93. guidellm/utils/mixins.py +115 -0
  94. guidellm/utils/random.py +3 -4
  95. guidellm/utils/registry.py +220 -0
  96. guidellm/utils/singleton.py +133 -0
  97. guidellm/utils/synchronous.py +159 -0
  98. guidellm/utils/text.py +163 -50
  99. guidellm/utils/typing.py +41 -0
  100. guidellm/version.py +2 -2
  101. guidellm-0.6.0a5.dist-info/METADATA +364 -0
  102. guidellm-0.6.0a5.dist-info/RECORD +109 -0
  103. guidellm/backend/__init__.py +0 -23
  104. guidellm/backend/backend.py +0 -259
  105. guidellm/backend/openai.py +0 -708
  106. guidellm/backend/response.py +0 -136
  107. guidellm/benchmark/aggregator.py +0 -760
  108. guidellm/benchmark/benchmark.py +0 -837
  109. guidellm/benchmark/output.py +0 -997
  110. guidellm/benchmark/profile.py +0 -409
  111. guidellm/benchmark/scenario.py +0 -104
  112. guidellm/data/prideandprejudice.txt.gz +0 -0
  113. guidellm/dataset/__init__.py +0 -22
  114. guidellm/dataset/creator.py +0 -213
  115. guidellm/dataset/entrypoints.py +0 -42
  116. guidellm/dataset/file.py +0 -92
  117. guidellm/dataset/hf_datasets.py +0 -62
  118. guidellm/dataset/in_memory.py +0 -132
  119. guidellm/dataset/synthetic.py +0 -287
  120. guidellm/objects/__init__.py +0 -18
  121. guidellm/objects/pydantic.py +0 -89
  122. guidellm/objects/statistics.py +0 -953
  123. guidellm/preprocess/__init__.py +0 -3
  124. guidellm/preprocess/dataset.py +0 -374
  125. guidellm/presentation/__init__.py +0 -28
  126. guidellm/presentation/builder.py +0 -27
  127. guidellm/presentation/data_models.py +0 -232
  128. guidellm/presentation/injector.py +0 -66
  129. guidellm/request/__init__.py +0 -18
  130. guidellm/request/loader.py +0 -284
  131. guidellm/request/request.py +0 -79
  132. guidellm/request/types.py +0 -10
  133. guidellm/scheduler/queues.py +0 -25
  134. guidellm/scheduler/result.py +0 -155
  135. guidellm/scheduler/strategy.py +0 -495
  136. guidellm-0.3.1.dist-info/METADATA +0 -329
  137. guidellm-0.3.1.dist-info/RECORD +0 -62
  138. {guidellm-0.3.1.dist-info → guidellm-0.6.0a5.dist-info}/WHEEL +0 -0
  139. {guidellm-0.3.1.dist-info → guidellm-0.6.0a5.dist-info}/entry_points.txt +0 -0
  140. {guidellm-0.3.1.dist-info → guidellm-0.6.0a5.dist-info}/licenses/LICENSE +0 -0
  141. {guidellm-0.3.1.dist-info → guidellm-0.6.0a5.dist-info}/top_level.txt +0 -0
@@ -1,259 +0,0 @@
1
- from abc import ABC, abstractmethod
2
- from collections.abc import AsyncGenerator
3
- from pathlib import Path
4
- from typing import Any, Literal, Optional, Union
5
-
6
- from loguru import logger
7
- from PIL import Image
8
-
9
- from guidellm.backend.response import ResponseSummary, StreamingTextResponse
10
- from guidellm.config import settings
11
-
12
- __all__ = [
13
- "Backend",
14
- "BackendType",
15
- ]
16
-
17
-
18
- BackendType = Literal["openai_http"]
19
-
20
-
21
- class Backend(ABC):
22
- """
23
- Abstract base class for generative AI backends.
24
-
25
- This class provides a common interface for creating and interacting with different
26
- generative AI backends. Subclasses should implement the abstract methods to
27
- define specific backend behavior.
28
-
29
- :cvar _registry: A registration dictionary that maps BackendType to backend classes.
30
- :param type_: The type of the backend.
31
- """
32
-
33
- _registry: dict[BackendType, "type[Backend]"] = {}
34
-
35
- @classmethod
36
- def register(cls, backend_type: BackendType):
37
- """
38
- A decorator to register a backend class in the backend registry.
39
-
40
- :param backend_type: The type of backend to register.
41
- :type backend_type: BackendType
42
- :return: The decorated backend class.
43
- :rtype: Type[Backend]
44
- """
45
- if backend_type in cls._registry:
46
- raise ValueError(f"Backend type already registered: {backend_type}")
47
-
48
- if not issubclass(cls, Backend):
49
- raise TypeError("Only subclasses of Backend can be registered")
50
-
51
- def inner_wrapper(wrapped_class: type["Backend"]):
52
- cls._registry[backend_type] = wrapped_class
53
- logger.info("Registered backend type: {}", backend_type)
54
- return wrapped_class
55
-
56
- return inner_wrapper
57
-
58
- @classmethod
59
- def create(cls, type_: BackendType, **kwargs) -> "Backend":
60
- """
61
- Factory method to create a backend instance based on the backend type.
62
-
63
- :param type_: The type of backend to create.
64
- :type type_: BackendType
65
- :param kwargs: Additional arguments for backend initialization.
66
- :return: An instance of a subclass of Backend.
67
- :rtype: Backend
68
- :raises ValueError: If the backend type is not registered.
69
- """
70
-
71
- logger.info("Creating backend of type {}", type_)
72
-
73
- if type_ not in cls._registry:
74
- err = ValueError(f"Unsupported backend type: {type_}")
75
- logger.error("{}", err)
76
- raise err
77
-
78
- return Backend._registry[type_](**kwargs)
79
-
80
- def __init__(self, type_: BackendType):
81
- self._type = type_
82
-
83
- @property
84
- def type_(self) -> BackendType:
85
- """
86
- :return: The type of the backend.
87
- """
88
- return self._type
89
-
90
- @property
91
- @abstractmethod
92
- def target(self) -> str:
93
- """
94
- :return: The target location for the backend.
95
- """
96
- ...
97
-
98
- @property
99
- @abstractmethod
100
- def model(self) -> Optional[str]:
101
- """
102
- :return: The model used for the backend requests.
103
- """
104
- ...
105
-
106
- @property
107
- @abstractmethod
108
- def info(self) -> dict[str, Any]:
109
- """
110
- :return: The information about the backend.
111
- """
112
- ...
113
-
114
- @abstractmethod
115
- async def reset(self) -> None:
116
- """
117
- Reset the connection object. This is useful for backends that
118
- reuse connections or have state that needs to be cleared.
119
- """
120
- ...
121
-
122
- async def validate(self):
123
- """
124
- Handle final setup and validate the backend is ready for use.
125
- If not successful, raises the appropriate exception.
126
- """
127
- logger.info("{} validating backend {}", self.__class__.__name__, self.type_)
128
- await self.check_setup()
129
- models = await self.available_models()
130
- if not models:
131
- raise ValueError("No models available for the backend")
132
-
133
- # Use the preferred route defined in the global settings when performing the
134
- # validation request. This avoids calling an unavailable endpoint (ie
135
- # /v1/completions) when the deployment only supports the chat completions
136
- # endpoint.
137
- if settings.preferred_route == "chat_completions":
138
- async for _ in self.chat_completions( # type: ignore[attr-defined]
139
- content="Test connection", output_token_count=1
140
- ):
141
- pass
142
- else:
143
- async for _ in self.text_completions( # type: ignore[attr-defined]
144
- prompt="Test connection", output_token_count=1
145
- ):
146
- pass
147
-
148
- await self.reset()
149
-
150
- @abstractmethod
151
- async def check_setup(self):
152
- """
153
- Check the setup for the backend.
154
- If unsuccessful, raises the appropriate exception.
155
-
156
- :raises ValueError: If the setup check fails.
157
- """
158
- ...
159
-
160
- @abstractmethod
161
- async def prepare_multiprocessing(self):
162
- """
163
- Prepare the backend for use in a multiprocessing environment.
164
- This is useful for backends that have instance state that can not
165
- be shared across processes and should be cleared out and re-initialized
166
- for each new process.
167
- """
168
- ...
169
-
170
- @abstractmethod
171
- async def available_models(self) -> list[str]:
172
- """
173
- Get the list of available models for the backend.
174
-
175
- :return: The list of available models.
176
- :rtype: List[str]
177
- """
178
- ...
179
-
180
- @abstractmethod
181
- async def text_completions(
182
- self,
183
- prompt: Union[str, list[str]],
184
- request_id: Optional[str] = None,
185
- prompt_token_count: Optional[int] = None,
186
- output_token_count: Optional[int] = None,
187
- **kwargs,
188
- ) -> AsyncGenerator[Union[StreamingTextResponse, ResponseSummary], None]:
189
- """
190
- Generate text only completions for the given prompt.
191
- Does not support multiple modalities, complicated chat interfaces,
192
- or chat templates. Specifically, it requests with only the prompt.
193
-
194
- :param prompt: The prompt (or list of prompts) to generate a completion for.
195
- If a list is supplied, these are concatenated and run through the model
196
- for a single prompt.
197
- :param request_id: The unique identifier for the request, if any.
198
- Added to logging statements and the response for tracking purposes.
199
- :param prompt_token_count: The number of tokens measured in the prompt, if any.
200
- Returned in the response stats for later analysis, if applicable.
201
- :param output_token_count: If supplied, the number of tokens to enforce
202
- generation of for the output for this request.
203
- :param kwargs: Additional keyword arguments to pass with the request.
204
- :return: An async generator that yields a StreamingTextResponse for start,
205
- a StreamingTextResponse for each received iteration,
206
- and a ResponseSummary for the final response.
207
- """
208
- ...
209
-
210
- @abstractmethod
211
- async def chat_completions(
212
- self,
213
- content: Union[
214
- str,
215
- list[Union[str, dict[str, Union[str, dict[str, str]]], Path, Image.Image]],
216
- Any,
217
- ],
218
- request_id: Optional[str] = None,
219
- prompt_token_count: Optional[int] = None,
220
- output_token_count: Optional[int] = None,
221
- raw_content: bool = False,
222
- **kwargs,
223
- ) -> AsyncGenerator[Union[StreamingTextResponse, ResponseSummary], None]:
224
- """
225
- Generate chat completions for the given content.
226
- Supports multiple modalities, complicated chat interfaces, and chat templates.
227
- Specifically, it requests with the content, which can be any combination of
228
- text, images, and audio provided the target model supports it,
229
- and returns the output text. Additionally, any chat templates
230
- for the model are applied within the backend.
231
-
232
- :param content: The content (or list of content) to generate a completion for.
233
- This supports any combination of text, images, and audio (model dependent).
234
- Supported text only request examples:
235
- content="Sample prompt", content=["Sample prompt", "Second prompt"],
236
- content=[{"type": "text", "value": "Sample prompt"}.
237
- Supported text and image request examples:
238
- content=["Describe the image", PIL.Image.open("image.jpg")],
239
- content=["Describe the image", Path("image.jpg")],
240
- content=["Describe the image", {"type": "image_url",
241
- "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}].
242
- Supported text and audio request examples:
243
- content=["Transcribe the audio", Path("audio.wav")],
244
- content=["Transcribe the audio", {"type": "input_audio",
245
- "input_audio": {"data": f"{base64_bytes}", "format": "wav}].
246
- Additionally, if raw_content=True then the content is passed directly to the
247
- backend without any processing.
248
- :param request_id: The unique identifier for the request, if any.
249
- Added to logging statements and the response for tracking purposes.
250
- :param prompt_token_count: The number of tokens measured in the prompt, if any.
251
- Returned in the response stats for later analysis, if applicable.
252
- :param output_token_count: If supplied, the number of tokens to enforce
253
- generation of for the output for this request.
254
- :param kwargs: Additional keyword arguments to pass with the request.
255
- :return: An async generator that yields a StreamingTextResponse for start,
256
- a StreamingTextResponse for each received iteration,
257
- and a ResponseSummary for the final response.
258
- """
259
- ...