guidellm 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of guidellm might be problematic. Click here for more details.
- guidellm/__init__.py +38 -6
- guidellm/__main__.py +294 -0
- guidellm/backend/__init__.py +19 -6
- guidellm/backend/backend.py +238 -0
- guidellm/backend/openai.py +532 -122
- guidellm/backend/response.py +132 -0
- guidellm/benchmark/__init__.py +73 -0
- guidellm/benchmark/aggregator.py +760 -0
- guidellm/benchmark/benchmark.py +838 -0
- guidellm/benchmark/benchmarker.py +334 -0
- guidellm/benchmark/entrypoints.py +141 -0
- guidellm/benchmark/output.py +946 -0
- guidellm/benchmark/profile.py +409 -0
- guidellm/benchmark/progress.py +720 -0
- guidellm/config.py +34 -56
- guidellm/data/__init__.py +4 -0
- guidellm/data/prideandprejudice.txt.gz +0 -0
- guidellm/dataset/__init__.py +22 -0
- guidellm/dataset/creator.py +213 -0
- guidellm/dataset/entrypoints.py +42 -0
- guidellm/dataset/file.py +90 -0
- guidellm/dataset/hf_datasets.py +62 -0
- guidellm/dataset/in_memory.py +132 -0
- guidellm/dataset/synthetic.py +262 -0
- guidellm/objects/__init__.py +18 -0
- guidellm/objects/pydantic.py +60 -0
- guidellm/objects/statistics.py +947 -0
- guidellm/request/__init__.py +12 -10
- guidellm/request/loader.py +281 -0
- guidellm/request/request.py +79 -0
- guidellm/scheduler/__init__.py +51 -3
- guidellm/scheduler/result.py +137 -0
- guidellm/scheduler/scheduler.py +382 -0
- guidellm/scheduler/strategy.py +493 -0
- guidellm/scheduler/types.py +7 -0
- guidellm/scheduler/worker.py +511 -0
- guidellm/utils/__init__.py +16 -29
- guidellm/utils/colors.py +8 -0
- guidellm/utils/hf_transformers.py +35 -0
- guidellm/utils/random.py +43 -0
- guidellm/utils/text.py +118 -357
- {guidellm-0.1.0.dist-info → guidellm-0.2.0.dist-info}/METADATA +96 -79
- guidellm-0.2.0.dist-info/RECORD +48 -0
- {guidellm-0.1.0.dist-info → guidellm-0.2.0.dist-info}/WHEEL +1 -1
- guidellm-0.2.0.dist-info/entry_points.txt +2 -0
- guidellm/backend/base.py +0 -320
- guidellm/core/__init__.py +0 -24
- guidellm/core/distribution.py +0 -190
- guidellm/core/report.py +0 -321
- guidellm/core/request.py +0 -44
- guidellm/core/result.py +0 -545
- guidellm/core/serializable.py +0 -169
- guidellm/executor/__init__.py +0 -10
- guidellm/executor/base.py +0 -213
- guidellm/executor/profile_generator.py +0 -343
- guidellm/main.py +0 -336
- guidellm/request/base.py +0 -194
- guidellm/request/emulated.py +0 -391
- guidellm/request/file.py +0 -76
- guidellm/request/transformers.py +0 -100
- guidellm/scheduler/base.py +0 -374
- guidellm/scheduler/load_generator.py +0 -196
- guidellm/utils/injector.py +0 -70
- guidellm/utils/progress.py +0 -196
- guidellm/utils/transformers.py +0 -151
- guidellm-0.1.0.dist-info/RECORD +0 -35
- guidellm-0.1.0.dist-info/entry_points.txt +0 -3
- {guidellm-0.1.0.dist-info → guidellm-0.2.0.dist-info/licenses}/LICENSE +0 -0
- {guidellm-0.1.0.dist-info → guidellm-0.2.0.dist-info}/top_level.txt +0 -0
guidellm/utils/progress.py
DELETED
|
@@ -1,196 +0,0 @@
|
|
|
1
|
-
from datetime import datetime
|
|
2
|
-
from typing import List
|
|
3
|
-
|
|
4
|
-
from loguru import logger
|
|
5
|
-
from rich.console import Group
|
|
6
|
-
from rich.live import Live
|
|
7
|
-
from rich.panel import Panel
|
|
8
|
-
from rich.progress import (
|
|
9
|
-
BarColumn,
|
|
10
|
-
Progress,
|
|
11
|
-
SpinnerColumn,
|
|
12
|
-
TaskID,
|
|
13
|
-
TaskProgressColumn,
|
|
14
|
-
TextColumn,
|
|
15
|
-
TimeElapsedColumn,
|
|
16
|
-
TimeRemainingColumn,
|
|
17
|
-
)
|
|
18
|
-
|
|
19
|
-
__all__ = ["BenchmarkReportProgress"]
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class BenchmarkReportProgress:
|
|
23
|
-
"""
|
|
24
|
-
Manages the progress display for benchmarks and report generation using Rich.
|
|
25
|
-
|
|
26
|
-
This class provides a visual representation of the benchmarking process
|
|
27
|
-
and report generation using Rich's progress bars and panels.
|
|
28
|
-
"""
|
|
29
|
-
|
|
30
|
-
def __init__(self):
|
|
31
|
-
"""
|
|
32
|
-
Initialize the BenchmarkReportProgress with default settings.
|
|
33
|
-
|
|
34
|
-
This method sets up the progress displays for both individual benchmarks
|
|
35
|
-
and the overall report, as well as initializing internal task management
|
|
36
|
-
structures.
|
|
37
|
-
"""
|
|
38
|
-
logger.info("Initializing BenchmarkReportProgress instance")
|
|
39
|
-
|
|
40
|
-
self.benchmarks_progress = Progress(
|
|
41
|
-
TextColumn("[{task.fields[start_time_str]}]"),
|
|
42
|
-
SpinnerColumn(),
|
|
43
|
-
TaskProgressColumn(),
|
|
44
|
-
TextColumn("{task.description}"),
|
|
45
|
-
TextColumn(" "),
|
|
46
|
-
TextColumn(
|
|
47
|
-
"[bold cyan]({task.fields[req_per_sec]} req/sec avg)[/bold cyan]"
|
|
48
|
-
),
|
|
49
|
-
)
|
|
50
|
-
self.benchmarks_panel = Panel(
|
|
51
|
-
self.benchmarks_progress,
|
|
52
|
-
title="Benchmarks",
|
|
53
|
-
title_align="left",
|
|
54
|
-
expand=True,
|
|
55
|
-
)
|
|
56
|
-
self.report_progress = Progress(
|
|
57
|
-
SpinnerColumn(),
|
|
58
|
-
TextColumn("Generating report..."),
|
|
59
|
-
BarColumn(bar_width=None),
|
|
60
|
-
TextColumn(
|
|
61
|
-
"({task.fields[completed_benchmarks]}/{task.fields[total_benchmarks]})"
|
|
62
|
-
),
|
|
63
|
-
TextColumn("["),
|
|
64
|
-
TimeElapsedColumn(),
|
|
65
|
-
TextColumn("<"),
|
|
66
|
-
TimeRemainingColumn(),
|
|
67
|
-
TextColumn("]"),
|
|
68
|
-
)
|
|
69
|
-
self.render_group = Group(self.benchmarks_panel, self.report_progress)
|
|
70
|
-
self.live = Live(self.render_group, redirect_stdout=True, redirect_stderr=True)
|
|
71
|
-
|
|
72
|
-
self.report_task: TaskID = None # type: ignore # noqa: PGH003
|
|
73
|
-
self.benchmark_tasks: List[TaskID] = []
|
|
74
|
-
self.benchmark_tasks_started: List[bool] = []
|
|
75
|
-
self.benchmark_tasks_completed: List[bool] = []
|
|
76
|
-
self.benchmark_tasks_progress: List[float] = []
|
|
77
|
-
|
|
78
|
-
def start(self, task_descriptions: List[str]) -> None:
|
|
79
|
-
"""
|
|
80
|
-
Starts the live progress display and initializes benchmark tasks.
|
|
81
|
-
|
|
82
|
-
:param task_descriptions: List of descriptions for each benchmark task.
|
|
83
|
-
:type task_descriptions: List[str]
|
|
84
|
-
"""
|
|
85
|
-
logger.info(
|
|
86
|
-
"Starting BenchmarkReportProgress with task descriptions: {}",
|
|
87
|
-
task_descriptions,
|
|
88
|
-
)
|
|
89
|
-
self.live.start()
|
|
90
|
-
|
|
91
|
-
for task_description in task_descriptions:
|
|
92
|
-
logger.debug("Adding task with description: {}", task_description)
|
|
93
|
-
task_id = self.benchmarks_progress.add_task(
|
|
94
|
-
task_description,
|
|
95
|
-
start=False,
|
|
96
|
-
total=None,
|
|
97
|
-
start_time_str="--:--:--",
|
|
98
|
-
req_per_sec="#.##",
|
|
99
|
-
)
|
|
100
|
-
self.benchmark_tasks.append(task_id)
|
|
101
|
-
self.benchmark_tasks_started.append(False)
|
|
102
|
-
self.benchmark_tasks_completed.append(False)
|
|
103
|
-
self.benchmark_tasks_progress.append(0)
|
|
104
|
-
|
|
105
|
-
self.report_task = self.report_progress.add_task(
|
|
106
|
-
"",
|
|
107
|
-
total=len(self.benchmark_tasks) * 100, # 100 points per report
|
|
108
|
-
completed_benchmarks=0,
|
|
109
|
-
total_benchmarks=len(task_descriptions),
|
|
110
|
-
)
|
|
111
|
-
logger.info("Initialized {} benchmark tasks", len(task_descriptions))
|
|
112
|
-
|
|
113
|
-
def update_benchmark(
|
|
114
|
-
self,
|
|
115
|
-
index: int,
|
|
116
|
-
description: str,
|
|
117
|
-
completed: bool,
|
|
118
|
-
completed_count: int,
|
|
119
|
-
completed_total: int,
|
|
120
|
-
start_time: float,
|
|
121
|
-
req_per_sec: float,
|
|
122
|
-
) -> None:
|
|
123
|
-
"""
|
|
124
|
-
Updates the progress of a specific benchmark task.
|
|
125
|
-
|
|
126
|
-
:param index: Index of the benchmark task to update.
|
|
127
|
-
:type index: int
|
|
128
|
-
:param description: Description of the current benchmark task.
|
|
129
|
-
:type description: str
|
|
130
|
-
:param completed: Flag indicating if the benchmark is completed.
|
|
131
|
-
:type completed: bool
|
|
132
|
-
:param completed_count: Number of completed operations for the task.
|
|
133
|
-
:type completed_count: int
|
|
134
|
-
:param completed_total: Total number of operations for the task.
|
|
135
|
-
:type completed_total: int
|
|
136
|
-
:param start_time: Start time of the benchmark in timestamp format.
|
|
137
|
-
:type start_time: float
|
|
138
|
-
:param req_per_sec: Average requests per second.
|
|
139
|
-
:type req_per_sec: float
|
|
140
|
-
:raises ValueError: If trying to update a completed benchmark.
|
|
141
|
-
"""
|
|
142
|
-
if self.benchmark_tasks_completed[index]:
|
|
143
|
-
err = ValueError(f"Benchmark {index} already completed")
|
|
144
|
-
logger.error("Error updating benchmark: {}", err)
|
|
145
|
-
raise err
|
|
146
|
-
|
|
147
|
-
if not self.benchmark_tasks_started[index]:
|
|
148
|
-
self.benchmark_tasks_started[index] = True
|
|
149
|
-
self.benchmarks_progress.start_task(self.benchmark_tasks[index])
|
|
150
|
-
logger.info("Starting benchmark task at index {}", index)
|
|
151
|
-
|
|
152
|
-
if completed:
|
|
153
|
-
self.benchmark_tasks_completed[index] = True
|
|
154
|
-
self.benchmark_tasks_progress[index] = 100
|
|
155
|
-
self.benchmarks_progress.stop_task(self.benchmark_tasks[index])
|
|
156
|
-
logger.info("Completed benchmark task at index {}", index)
|
|
157
|
-
|
|
158
|
-
self.benchmark_tasks_progress[index] = completed_count / completed_total * 100
|
|
159
|
-
self.benchmarks_progress.update(
|
|
160
|
-
self.benchmark_tasks[index],
|
|
161
|
-
description=description,
|
|
162
|
-
total=completed_total,
|
|
163
|
-
completed=completed_count if not completed else completed_total,
|
|
164
|
-
req_per_sec=(f"{req_per_sec:.2f}" if req_per_sec else "#.##"),
|
|
165
|
-
start_time_str=datetime.fromtimestamp(start_time).strftime("%H:%M:%S")
|
|
166
|
-
if start_time
|
|
167
|
-
else "--:--:--",
|
|
168
|
-
)
|
|
169
|
-
logger.debug(
|
|
170
|
-
"Updated benchmark task at index {}: {}% complete",
|
|
171
|
-
index,
|
|
172
|
-
self.benchmark_tasks_progress[index],
|
|
173
|
-
)
|
|
174
|
-
self.report_progress.update(
|
|
175
|
-
self.report_task,
|
|
176
|
-
total=len(self.benchmark_tasks) * 100,
|
|
177
|
-
completed=sum(self.benchmark_tasks_progress),
|
|
178
|
-
completed_benchmarks=sum(self.benchmark_tasks_completed),
|
|
179
|
-
total_benchmarks=len(self.benchmark_tasks),
|
|
180
|
-
)
|
|
181
|
-
|
|
182
|
-
def finish(self) -> None:
|
|
183
|
-
"""
|
|
184
|
-
Marks the overall report task as finished and stops the live display.
|
|
185
|
-
"""
|
|
186
|
-
logger.info("Finishing BenchmarkReportProgress")
|
|
187
|
-
self.report_progress.update(
|
|
188
|
-
self.report_task,
|
|
189
|
-
total=len(self.benchmark_tasks) * 100,
|
|
190
|
-
completed=len(self.benchmark_tasks) * 100,
|
|
191
|
-
completed_benchmarks=len(self.benchmark_tasks),
|
|
192
|
-
total_benchmarks=len(self.benchmark_tasks),
|
|
193
|
-
)
|
|
194
|
-
self.report_progress.stop_task(self.report_task)
|
|
195
|
-
self.live.stop()
|
|
196
|
-
logger.info("BenchmarkReportProgress finished and live display stopped")
|
guidellm/utils/transformers.py
DELETED
|
@@ -1,151 +0,0 @@
|
|
|
1
|
-
from pathlib import Path
|
|
2
|
-
from typing import List, Optional, Union
|
|
3
|
-
|
|
4
|
-
from datasets import ( # type: ignore # noqa: PGH003
|
|
5
|
-
Dataset,
|
|
6
|
-
DatasetDict,
|
|
7
|
-
IterableDataset,
|
|
8
|
-
IterableDatasetDict,
|
|
9
|
-
load_dataset,
|
|
10
|
-
)
|
|
11
|
-
from loguru import logger
|
|
12
|
-
|
|
13
|
-
from guidellm.config import settings
|
|
14
|
-
|
|
15
|
-
__all__ = [
|
|
16
|
-
"load_transformers_dataset",
|
|
17
|
-
"resolve_transformers_dataset",
|
|
18
|
-
"resolve_transformers_dataset_column",
|
|
19
|
-
"resolve_transformers_dataset_split",
|
|
20
|
-
]
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
def load_transformers_dataset(
|
|
24
|
-
dataset: Union[
|
|
25
|
-
str, Path, DatasetDict, Dataset, IterableDatasetDict, IterableDataset
|
|
26
|
-
],
|
|
27
|
-
split: Optional[str] = None,
|
|
28
|
-
preferred_splits: Optional[List[str]] = settings.dataset.preferred_data_splits,
|
|
29
|
-
**kwargs,
|
|
30
|
-
) -> Union[Dataset, IterableDataset]:
|
|
31
|
-
"""
|
|
32
|
-
Load a dataset from a file or a script and resolve the preferred split.
|
|
33
|
-
|
|
34
|
-
:param dataset: the dataset file or script to load
|
|
35
|
-
:param split: the dataset split to use
|
|
36
|
-
(overrides preferred_splits, must be in dataset)
|
|
37
|
-
:param preferred_splits: the preferred dataset splits to use
|
|
38
|
-
:param kwargs: additional keyword arguments to pass to the dataset loader
|
|
39
|
-
:return: the loaded dataset
|
|
40
|
-
"""
|
|
41
|
-
dataset = resolve_transformers_dataset(dataset, **kwargs)
|
|
42
|
-
|
|
43
|
-
return resolve_transformers_dataset_split(dataset, split, preferred_splits)
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
def resolve_transformers_dataset(
|
|
47
|
-
dataset: Union[
|
|
48
|
-
str, Path, DatasetDict, Dataset, IterableDatasetDict, IterableDataset
|
|
49
|
-
],
|
|
50
|
-
**kwargs,
|
|
51
|
-
) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]:
|
|
52
|
-
"""
|
|
53
|
-
Resolve the dataset from a file (csv, json, script) or a dataset name.
|
|
54
|
-
|
|
55
|
-
:param dataset: the dataset file or script to load
|
|
56
|
-
:param kwargs: additional keyword arguments to pass to the dataset loader
|
|
57
|
-
:return: the loaded dataset
|
|
58
|
-
"""
|
|
59
|
-
if isinstance(
|
|
60
|
-
dataset, (DatasetDict, Dataset, IterableDatasetDict, IterableDataset)
|
|
61
|
-
):
|
|
62
|
-
return dataset
|
|
63
|
-
|
|
64
|
-
if not isinstance(dataset, (str, Path)):
|
|
65
|
-
raise ValueError(f"Invalid dataset type: {type(dataset)}")
|
|
66
|
-
|
|
67
|
-
dataset = str(dataset)
|
|
68
|
-
|
|
69
|
-
if dataset.endswith((".csv", ".json")):
|
|
70
|
-
logger.debug("Loading dataset from local path: {}", dataset)
|
|
71
|
-
extension = dataset.split(".")[-1]
|
|
72
|
-
|
|
73
|
-
return load_dataset(extension, data_files=dataset, **kwargs)
|
|
74
|
-
|
|
75
|
-
if dataset.endswith(".py"):
|
|
76
|
-
logger.debug("Loading dataset from local script: {}", dataset)
|
|
77
|
-
|
|
78
|
-
return load_dataset(dataset, **kwargs)
|
|
79
|
-
|
|
80
|
-
logger.debug("Loading dataset: {}", dataset)
|
|
81
|
-
|
|
82
|
-
return load_dataset(dataset, **kwargs)
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
def resolve_transformers_dataset_split(
|
|
86
|
-
dataset: Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset],
|
|
87
|
-
split: Optional[str] = None,
|
|
88
|
-
preferred_splits: Optional[List[str]] = settings.dataset.preferred_data_splits,
|
|
89
|
-
) -> Union[Dataset, IterableDataset]:
|
|
90
|
-
"""
|
|
91
|
-
Resolve the preferred split from a dataset dictionary.
|
|
92
|
-
|
|
93
|
-
:param dataset: the dataset to resolve the split from
|
|
94
|
-
:param split: the dataset split to use
|
|
95
|
-
(overrides preferred_splits, must be in dataset)
|
|
96
|
-
:param preferred_splits: the preferred dataset splits to use
|
|
97
|
-
:return: the resolved dataset split
|
|
98
|
-
"""
|
|
99
|
-
if not isinstance(dataset, (DatasetDict, IterableDatasetDict)):
|
|
100
|
-
logger.debug("Dataset is not a dictionary, using default split")
|
|
101
|
-
return dataset
|
|
102
|
-
|
|
103
|
-
if split:
|
|
104
|
-
if split not in dataset:
|
|
105
|
-
raise ValueError(f"Split '{split}' not found in dataset")
|
|
106
|
-
|
|
107
|
-
return dataset[split]
|
|
108
|
-
|
|
109
|
-
if preferred_splits:
|
|
110
|
-
for spl in preferred_splits:
|
|
111
|
-
if spl not in dataset:
|
|
112
|
-
continue
|
|
113
|
-
return dataset[spl]
|
|
114
|
-
|
|
115
|
-
return list(dataset.values())[0]
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
def resolve_transformers_dataset_column(
|
|
119
|
-
dataset: Union[Dataset, IterableDataset],
|
|
120
|
-
column: Optional[str] = None,
|
|
121
|
-
preferred_columns: Optional[List[str]] = settings.dataset.preferred_data_columns,
|
|
122
|
-
) -> str:
|
|
123
|
-
"""
|
|
124
|
-
Resolve the preferred column from a dataset.
|
|
125
|
-
|
|
126
|
-
:param dataset: the dataset to resolve the column from
|
|
127
|
-
:param column: the dataset column to use
|
|
128
|
-
(overrides preferred_columns, must be in dataset)
|
|
129
|
-
:param preferred_columns: the preferred dataset columns to use
|
|
130
|
-
:return: the resolved dataset column
|
|
131
|
-
"""
|
|
132
|
-
column_names = dataset.column_names
|
|
133
|
-
|
|
134
|
-
if not column_names:
|
|
135
|
-
# grab from the first item
|
|
136
|
-
first_item = next(iter(dataset))
|
|
137
|
-
column_names = list(first_item.keys())
|
|
138
|
-
|
|
139
|
-
if column:
|
|
140
|
-
if column not in column_names:
|
|
141
|
-
raise ValueError(f"Column '{column}' not found in dataset")
|
|
142
|
-
|
|
143
|
-
return column
|
|
144
|
-
|
|
145
|
-
if preferred_columns:
|
|
146
|
-
for col in preferred_columns:
|
|
147
|
-
if col not in column_names:
|
|
148
|
-
continue
|
|
149
|
-
return col
|
|
150
|
-
|
|
151
|
-
return list(column_names)[0]
|
guidellm-0.1.0.dist-info/RECORD
DELETED
|
@@ -1,35 +0,0 @@
|
|
|
1
|
-
guidellm/__init__.py,sha256=ZUgTGFF7KLx2ISgtNfLBBjUEPc6KbXYPMfRKPGs9ZJA,567
|
|
2
|
-
guidellm/config.py,sha256=ilLTZU0WqVJGzk7ANABjrOm3bmK98l_SroFbTMYmSuA,6505
|
|
3
|
-
guidellm/logger.py,sha256=O4sU2QKHn_swJIEmayiEt6nIXzGHGmXqZ_Mg8CdIE5Q,2609
|
|
4
|
-
guidellm/main.py,sha256=NvsW8Dhtw-Surc9EZnW4pGYY9aEIv3ZgOSEq_eKrKCA,11028
|
|
5
|
-
guidellm/backend/__init__.py,sha256=uJmCwrHzzoUE7XPNEB4RmSwdr00eTH1wdMxCxGiQtvU,241
|
|
6
|
-
guidellm/backend/base.py,sha256=do9t7hOCQEyeCNOQeXmdwpUHMrFz8mAXtbvO6Ee2Lu0,10630
|
|
7
|
-
guidellm/backend/openai.py,sha256=7tlp5tWw1KDvJhJZ8s9NK0JuuU__GJPMvupVMjnqjFo,5669
|
|
8
|
-
guidellm/core/__init__.py,sha256=rxfr51aAgRJIHTXPZT9tf9ng1dXfDAC5pR8EcGssJfE,646
|
|
9
|
-
guidellm/core/distribution.py,sha256=8S76zg00e4Ymg8bq36nfratHsMJNdqFtq_InsC1vs8I,6563
|
|
10
|
-
guidellm/core/report.py,sha256=fv5F4NY-5jLUsD7-HZ4yVPB26ZpnIu0t-Kt4LePXKUI,10814
|
|
11
|
-
guidellm/core/request.py,sha256=oIjLtBZUus0huEBsVbLGB19yQvza9ugZnwUDTNBjwcU,1376
|
|
12
|
-
guidellm/core/result.py,sha256=VWu00HAPep3x3shl6DOKnmFTUK_36FQL2IdOeWqtkAk,16646
|
|
13
|
-
guidellm/core/serializable.py,sha256=CdEOUsHPFQnxTl1Ole-23IVwW16L1xMEnqkUWcdFQ3s,5028
|
|
14
|
-
guidellm/executor/__init__.py,sha256=6nZArM68BjcpP5HnaAgjFfMPRCCpevLn441MhRZ5SVw,244
|
|
15
|
-
guidellm/executor/base.py,sha256=0YV4qWg9crvY6xLYUsRokFa21rWnrQmGHPAaO6Qz5b4,7444
|
|
16
|
-
guidellm/executor/profile_generator.py,sha256=urKLg7aqNtaWMzpYpOAeaPgoWWThm3bXQXxCkJwjP9o,11432
|
|
17
|
-
guidellm/request/__init__.py,sha256=AjHRmkL6N-M7Y1ft_GCAzwIzW9GwJEWXn1mourN2j90,401
|
|
18
|
-
guidellm/request/base.py,sha256=kffEG8Y0OWImu4LcsckUuSiajTQUm9pyOF-CRyzUNhk,5607
|
|
19
|
-
guidellm/request/emulated.py,sha256=B9D4mcwV2Bgs6IhouSWDPDS71QZBC9Z3Rb-1iA1AX-Y,13273
|
|
20
|
-
guidellm/request/file.py,sha256=UsfSJvR8JeSJlhm3DzyDS4qLytVMdK6trhuV5384oS4,2488
|
|
21
|
-
guidellm/request/transformers.py,sha256=ndNSjTXCQdZed5DoJ8bf7In69bGRid3Z9SdQbKaqi34,3292
|
|
22
|
-
guidellm/scheduler/__init__.py,sha256=JaOLOg5fIsC3YLfKx8iBJfGQgeLGd77gRpbQXthXfp4,190
|
|
23
|
-
guidellm/scheduler/base.py,sha256=FJ2IiOXE7ledcGSgun2m2pLXB_em6iqhqc_A6JHKOHI,12350
|
|
24
|
-
guidellm/scheduler/load_generator.py,sha256=bkIBK2dCcEP1DK_cuOSGNheM-iNo-hiUrs--g07-eLU,7074
|
|
25
|
-
guidellm/utils/__init__.py,sha256=SEtNDH83200nVwtN-F2KJiywWqFVkulQHD7wKIepa0E,921
|
|
26
|
-
guidellm/utils/injector.py,sha256=KjuFhkfGSPr7NdXjt8v_9eh8GjElfffz9fWuGIfwIJY,2089
|
|
27
|
-
guidellm/utils/progress.py,sha256=Bqi6zY7wa-J1n3N-p-rkuFW7w6gK8h98IfhEg-NSG40,7357
|
|
28
|
-
guidellm/utils/text.py,sha256=vaplcd3fhdZDRbUaqEww0kEhG38WUAHKpDoN4k-mwUk,13241
|
|
29
|
-
guidellm/utils/transformers.py,sha256=K2Jowre11CJBLX6l3fK-F5qkvZto1YkTSdFq7wqtdpg,4752
|
|
30
|
-
guidellm-0.1.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
31
|
-
guidellm-0.1.0.dist-info/METADATA,sha256=lDHMCgpkhb88ULXtHYplG87xGxPORwPW3HA2so-Thvs,28386
|
|
32
|
-
guidellm-0.1.0.dist-info/WHEEL,sha256=UvcQYKBHoFqaQd6LKyqHw9fxEolWLQnlzP0h_LgJAfI,91
|
|
33
|
-
guidellm-0.1.0.dist-info/entry_points.txt,sha256=IJyVp0dPC8QpPfCu8MPFbdDKdatFNpHO2CkCEv67_v4,120
|
|
34
|
-
guidellm-0.1.0.dist-info/top_level.txt,sha256=EXRGjnvFtL6MeZTe0tnHRMYcEWUW3vEqoG2zO7vFOtk,9
|
|
35
|
-
guidellm-0.1.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|