guidellm 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of guidellm might be problematic. Click here for more details.

Files changed (69) hide show
  1. guidellm/__init__.py +38 -6
  2. guidellm/__main__.py +294 -0
  3. guidellm/backend/__init__.py +19 -6
  4. guidellm/backend/backend.py +238 -0
  5. guidellm/backend/openai.py +532 -122
  6. guidellm/backend/response.py +132 -0
  7. guidellm/benchmark/__init__.py +73 -0
  8. guidellm/benchmark/aggregator.py +760 -0
  9. guidellm/benchmark/benchmark.py +838 -0
  10. guidellm/benchmark/benchmarker.py +334 -0
  11. guidellm/benchmark/entrypoints.py +141 -0
  12. guidellm/benchmark/output.py +946 -0
  13. guidellm/benchmark/profile.py +409 -0
  14. guidellm/benchmark/progress.py +720 -0
  15. guidellm/config.py +34 -56
  16. guidellm/data/__init__.py +4 -0
  17. guidellm/data/prideandprejudice.txt.gz +0 -0
  18. guidellm/dataset/__init__.py +22 -0
  19. guidellm/dataset/creator.py +213 -0
  20. guidellm/dataset/entrypoints.py +42 -0
  21. guidellm/dataset/file.py +90 -0
  22. guidellm/dataset/hf_datasets.py +62 -0
  23. guidellm/dataset/in_memory.py +132 -0
  24. guidellm/dataset/synthetic.py +262 -0
  25. guidellm/objects/__init__.py +18 -0
  26. guidellm/objects/pydantic.py +60 -0
  27. guidellm/objects/statistics.py +947 -0
  28. guidellm/request/__init__.py +12 -10
  29. guidellm/request/loader.py +281 -0
  30. guidellm/request/request.py +79 -0
  31. guidellm/scheduler/__init__.py +51 -3
  32. guidellm/scheduler/result.py +137 -0
  33. guidellm/scheduler/scheduler.py +382 -0
  34. guidellm/scheduler/strategy.py +493 -0
  35. guidellm/scheduler/types.py +7 -0
  36. guidellm/scheduler/worker.py +511 -0
  37. guidellm/utils/__init__.py +16 -29
  38. guidellm/utils/colors.py +8 -0
  39. guidellm/utils/hf_transformers.py +35 -0
  40. guidellm/utils/random.py +43 -0
  41. guidellm/utils/text.py +118 -357
  42. {guidellm-0.1.0.dist-info → guidellm-0.2.0.dist-info}/METADATA +96 -79
  43. guidellm-0.2.0.dist-info/RECORD +48 -0
  44. {guidellm-0.1.0.dist-info → guidellm-0.2.0.dist-info}/WHEEL +1 -1
  45. guidellm-0.2.0.dist-info/entry_points.txt +2 -0
  46. guidellm/backend/base.py +0 -320
  47. guidellm/core/__init__.py +0 -24
  48. guidellm/core/distribution.py +0 -190
  49. guidellm/core/report.py +0 -321
  50. guidellm/core/request.py +0 -44
  51. guidellm/core/result.py +0 -545
  52. guidellm/core/serializable.py +0 -169
  53. guidellm/executor/__init__.py +0 -10
  54. guidellm/executor/base.py +0 -213
  55. guidellm/executor/profile_generator.py +0 -343
  56. guidellm/main.py +0 -336
  57. guidellm/request/base.py +0 -194
  58. guidellm/request/emulated.py +0 -391
  59. guidellm/request/file.py +0 -76
  60. guidellm/request/transformers.py +0 -100
  61. guidellm/scheduler/base.py +0 -374
  62. guidellm/scheduler/load_generator.py +0 -196
  63. guidellm/utils/injector.py +0 -70
  64. guidellm/utils/progress.py +0 -196
  65. guidellm/utils/transformers.py +0 -151
  66. guidellm-0.1.0.dist-info/RECORD +0 -35
  67. guidellm-0.1.0.dist-info/entry_points.txt +0 -3
  68. {guidellm-0.1.0.dist-info → guidellm-0.2.0.dist-info/licenses}/LICENSE +0 -0
  69. {guidellm-0.1.0.dist-info → guidellm-0.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,946 @@
1
+ import csv
2
+ import json
3
+ import math
4
+ from collections import OrderedDict
5
+ from datetime import datetime
6
+ from pathlib import Path
7
+ from typing import Any, Literal, Optional, Union
8
+
9
+ import yaml
10
+ from pydantic import Field
11
+ from rich.console import Console
12
+ from rich.padding import Padding
13
+ from rich.text import Text
14
+
15
+ from guidellm.benchmark.benchmark import GenerativeBenchmark, GenerativeMetrics
16
+ from guidellm.benchmark.profile import (
17
+ AsyncProfile,
18
+ ConcurrentProfile,
19
+ SweepProfile,
20
+ ThroughputProfile,
21
+ )
22
+ from guidellm.config import settings
23
+ from guidellm.objects import (
24
+ DistributionSummary,
25
+ StandardBaseModel,
26
+ StatusDistributionSummary,
27
+ )
28
+ from guidellm.scheduler import strategy_display_str
29
+ from guidellm.utils import Colors, split_text_list_by_length
30
+
31
+ __all__ = [
32
+ "GenerativeBenchmarksReport",
33
+ "GenerativeBenchmarksConsole",
34
+ ]
35
+
36
+
37
+ class GenerativeBenchmarksReport(StandardBaseModel):
38
+ """
39
+ A pydantic model representing a completed benchmark report.
40
+ Contains a list of benchmarks along with convenience methods for finalizing
41
+ and saving the report.
42
+ """
43
+
44
+ @staticmethod
45
+ def load_file(path: Union[str, Path]) -> "GenerativeBenchmarksReport":
46
+ """
47
+ Load a report from a file. The file type is determined by the file extension.
48
+ If the file is a directory, it expects a file named benchmarks.json under the
49
+ directory.
50
+
51
+ :param path: The path to load the report from.
52
+ :return: The loaded report.
53
+ """
54
+ path, type_ = GenerativeBenchmarksReport._file_setup(path)
55
+
56
+ if type_ == "json":
57
+ with path.open("r") as file:
58
+ model_dict = json.load(file)
59
+
60
+ return GenerativeBenchmarksReport.model_validate(model_dict)
61
+
62
+ if type_ == "yaml":
63
+ with path.open("r") as file:
64
+ model_dict = yaml.safe_load(file)
65
+
66
+ return GenerativeBenchmarksReport.model_validate(model_dict)
67
+
68
+ if type_ == "csv":
69
+ raise ValueError(f"CSV file type is not supported for loading: {path}.")
70
+
71
+ raise ValueError(f"Unsupported file type: {type_} for {path}.")
72
+
73
+ benchmarks: list[GenerativeBenchmark] = Field(
74
+ description="The list of completed benchmarks contained within the report.",
75
+ default_factory=list,
76
+ )
77
+
78
+ def set_sample_size(
79
+ self, sample_size: Optional[int]
80
+ ) -> "GenerativeBenchmarksReport":
81
+ """
82
+ Set the sample size for each benchmark in the report. In doing this, it will
83
+ reduce the contained requests of each benchmark to the sample size.
84
+ If sample size is None, it will return the report as is.
85
+
86
+ :param sample_size: The sample size to set for each benchmark.
87
+ If None, the report will be returned as is.
88
+ :return: The report with the sample size set for each benchmark.
89
+ """
90
+
91
+ if sample_size is not None:
92
+ for benchmark in self.benchmarks:
93
+ benchmark.set_sample_size(sample_size)
94
+
95
+ return self
96
+
97
+ def save_file(self, path: Union[str, Path]) -> Path:
98
+ """
99
+ Save the report to a file. The file type is determined by the file extension.
100
+ If the file is a directory, it will save the report to a file named
101
+ benchmarks.json under the directory.
102
+
103
+ :param path: The path to save the report to.
104
+ :return: The path to the saved report.
105
+ """
106
+ path, type_ = GenerativeBenchmarksReport._file_setup(path)
107
+
108
+ if type_ == "json":
109
+ return self.save_json(path)
110
+
111
+ if type_ == "yaml":
112
+ return self.save_yaml(path)
113
+
114
+ if type_ == "csv":
115
+ return self.save_csv(path)
116
+
117
+ raise ValueError(f"Unsupported file type: {type_} for {path}.")
118
+
119
+ def save_json(self, path: Union[str, Path]) -> Path:
120
+ """
121
+ Save the report to a JSON file containing all of the report data which is
122
+ reloadable using the pydantic model. If the file is a directory, it will save
123
+ the report to a file named benchmarks.json under the directory.
124
+
125
+ :param path: The path to save the report to.
126
+ :return: The path to the saved report.
127
+ """
128
+ path, type_ = GenerativeBenchmarksReport._file_setup(path, "json")
129
+
130
+ if type_ != "json":
131
+ raise ValueError(
132
+ f"Unsupported file type for saving a JSON: {type_} for {path}."
133
+ )
134
+
135
+ model_dict = self.model_dump()
136
+ model_json = json.dumps(model_dict)
137
+
138
+ with path.open("w") as file:
139
+ file.write(model_json)
140
+
141
+ return path
142
+
143
+ def save_yaml(self, path: Union[str, Path]) -> Path:
144
+ """
145
+ Save the report to a YAML file containing all of the report data which is
146
+ reloadable using the pydantic model. If the file is a directory, it will save
147
+ the report to a file named benchmarks.yaml under the directory.
148
+
149
+ :param path: The path to save the report to.
150
+ :return: The path to the saved report.
151
+ """
152
+
153
+ path, type_ = GenerativeBenchmarksReport._file_setup(path, "yaml")
154
+
155
+ if type_ != "yaml":
156
+ raise ValueError(
157
+ f"Unsupported file type for saving a YAML: {type_} for {path}."
158
+ )
159
+
160
+ model_dict = self.model_dump()
161
+ model_yaml = yaml.dump(model_dict)
162
+
163
+ with path.open("w") as file:
164
+ file.write(model_yaml)
165
+
166
+ return path
167
+
168
+ def save_csv(self, path: Union[str, Path]) -> Path:
169
+ """
170
+ Save the report to a CSV file containing the summarized statistics and values
171
+ for each report. Note, this data is not reloadable using the pydantic model.
172
+ If the file is a directory, it will save the report to a file named
173
+ benchmarks.csv under the directory.
174
+
175
+ :param path: The path to save the report to.
176
+ :return: The path to the saved report.
177
+ """
178
+ path, type_ = GenerativeBenchmarksReport._file_setup(path, "csv")
179
+
180
+ if type_ != "csv":
181
+ raise ValueError(
182
+ f"Unsupported file type for saving a CSV: {type_} for {path}."
183
+ )
184
+
185
+ with path.open("w", newline="") as file:
186
+ writer = csv.writer(file)
187
+ headers: list[str] = []
188
+ rows: list[list[Union[str, float, list[float]]]] = []
189
+
190
+ for benchmark in self.benchmarks:
191
+ benchmark_headers: list[str] = []
192
+ benchmark_values: list[Union[str, float, list[float]]] = []
193
+
194
+ desc_headers, desc_values = self._benchmark_desc_headers_and_values(
195
+ benchmark
196
+ )
197
+ benchmark_headers += desc_headers
198
+ benchmark_values += desc_values
199
+
200
+ for status in StatusDistributionSummary.model_fields:
201
+ status_headers, status_values = (
202
+ self._benchmark_status_headers_and_values(benchmark, status)
203
+ )
204
+ benchmark_headers += status_headers
205
+ benchmark_values += status_values
206
+
207
+ benchmark_extra_headers, benchmark_extra_values = (
208
+ self._benchmark_extras_headers_and_values(benchmark)
209
+ )
210
+ benchmark_headers += benchmark_extra_headers
211
+ benchmark_values += benchmark_extra_values
212
+
213
+ if not headers:
214
+ headers = benchmark_headers
215
+ rows.append(benchmark_values)
216
+
217
+ writer.writerow(headers)
218
+ for row in rows:
219
+ writer.writerow(row)
220
+
221
+ return path
222
+
223
+ @staticmethod
224
+ def _file_setup(
225
+ path: Union[str, Path],
226
+ default_file_type: Literal["json", "yaml", "csv"] = "json",
227
+ ) -> tuple[Path, Literal["json", "yaml", "csv"]]:
228
+ path = Path(path) if not isinstance(path, Path) else path
229
+
230
+ if path.is_dir():
231
+ path = path / f"benchmarks.{default_file_type}"
232
+
233
+ path.parent.mkdir(parents=True, exist_ok=True)
234
+ path_suffix = path.suffix.lower()
235
+
236
+ if path_suffix == ".json":
237
+ return path, "json"
238
+
239
+ if path_suffix in [".yaml", ".yml"]:
240
+ return path, "yaml"
241
+
242
+ if path_suffix in [".csv"]:
243
+ return path, "csv"
244
+
245
+ raise ValueError(f"Unsupported file extension: {path_suffix} for {path}.")
246
+
247
+ @staticmethod
248
+ def _benchmark_desc_headers_and_values(
249
+ benchmark: GenerativeBenchmark,
250
+ ) -> tuple[list[str], list[Union[str, float]]]:
251
+ headers = [
252
+ "Type",
253
+ "Run Id",
254
+ "Id",
255
+ "Name",
256
+ "Start Time",
257
+ "End Time",
258
+ "Duration",
259
+ ]
260
+ values: list[Union[str, float]] = [
261
+ benchmark.type_,
262
+ benchmark.run_id,
263
+ benchmark.id_,
264
+ strategy_display_str(benchmark.args.strategy),
265
+ datetime.fromtimestamp(benchmark.start_time).strftime("%Y-%m-%d %H:%M:%S"),
266
+ datetime.fromtimestamp(benchmark.end_time).strftime("%Y-%m-%d %H:%M:%S"),
267
+ benchmark.duration,
268
+ ]
269
+
270
+ if len(headers) != len(values):
271
+ raise ValueError("Headers and values length mismatch.")
272
+
273
+ return headers, values
274
+
275
+ @staticmethod
276
+ def _benchmark_extras_headers_and_values(
277
+ benchmark: GenerativeBenchmark,
278
+ ) -> tuple[list[str], list[str]]:
279
+ headers = ["Args", "Worker", "Request Loader", "Extras"]
280
+ values: list[str] = [
281
+ json.dumps(benchmark.args.model_dump()),
282
+ json.dumps(benchmark.worker.model_dump()),
283
+ json.dumps(benchmark.request_loader.model_dump()),
284
+ json.dumps(benchmark.extras),
285
+ ]
286
+
287
+ if len(headers) != len(values):
288
+ raise ValueError("Headers and values length mismatch.")
289
+
290
+ return headers, values
291
+
292
+ @staticmethod
293
+ def _benchmark_status_headers_and_values(
294
+ benchmark: GenerativeBenchmark, status: str
295
+ ) -> tuple[list[str], list[Union[float, list[float]]]]:
296
+ headers = [
297
+ f"{status.capitalize()} Requests",
298
+ ]
299
+ values = [
300
+ getattr(benchmark.request_totals, status),
301
+ ]
302
+
303
+ for metric in GenerativeMetrics.model_fields:
304
+ metric_headers, metric_values = (
305
+ GenerativeBenchmarksReport._benchmark_status_metrics_stats(
306
+ benchmark, status, metric
307
+ )
308
+ )
309
+ headers += metric_headers
310
+ values += metric_values
311
+
312
+ if len(headers) != len(values):
313
+ raise ValueError("Headers and values length mismatch.")
314
+
315
+ return headers, values
316
+
317
+ @staticmethod
318
+ def _benchmark_status_metrics_stats(
319
+ benchmark: GenerativeBenchmark,
320
+ status: str,
321
+ metric: str,
322
+ ) -> tuple[list[str], list[Union[float, list[float]]]]:
323
+ status_display = status.capitalize()
324
+ metric_display = metric.replace("_", " ").capitalize()
325
+ status_dist_summary: StatusDistributionSummary = getattr(
326
+ benchmark.metrics, metric
327
+ )
328
+ dist_summary: DistributionSummary = getattr(status_dist_summary, status)
329
+ headers = [
330
+ f"{status_display} {metric_display} mean",
331
+ f"{status_display} {metric_display} median",
332
+ f"{status_display} {metric_display} std dev",
333
+ (
334
+ f"{status_display} {metric_display} "
335
+ "[min, 0.1, 1, 5, 10, 25, 75, 90, 95, 99, max]"
336
+ ),
337
+ ]
338
+ values: list[Union[float, list[float]]] = [
339
+ dist_summary.mean,
340
+ dist_summary.median,
341
+ dist_summary.std_dev,
342
+ [
343
+ dist_summary.min,
344
+ dist_summary.percentiles.p001,
345
+ dist_summary.percentiles.p01,
346
+ dist_summary.percentiles.p05,
347
+ dist_summary.percentiles.p10,
348
+ dist_summary.percentiles.p25,
349
+ dist_summary.percentiles.p75,
350
+ dist_summary.percentiles.p90,
351
+ dist_summary.percentiles.p95,
352
+ dist_summary.percentiles.p99,
353
+ dist_summary.max,
354
+ ],
355
+ ]
356
+
357
+ if len(headers) != len(values):
358
+ raise ValueError("Headers and values length mismatch.")
359
+
360
+ return headers, values
361
+
362
+
363
+ class GenerativeBenchmarksConsole:
364
+ """
365
+ A class for outputting progress and benchmark results to the console.
366
+ Utilizes the rich library for formatting, enabling colored and styled output.
367
+ """
368
+
369
+ def __init__(self, enabled: bool = True):
370
+ """
371
+ :param enabled: Whether to enable console output. Defaults to True.
372
+ If False, all console output will be suppressed.
373
+ """
374
+ self.enabled = enabled
375
+ self.benchmarks: Optional[list[GenerativeBenchmark]] = None
376
+ self.console = Console()
377
+
378
+ @property
379
+ def benchmarks_profile_str(self) -> str:
380
+ """
381
+ :return: A string representation of the profile used for the benchmarks.
382
+ """
383
+ profile = self.benchmarks[0].args.profile if self.benchmarks else None
384
+
385
+ if profile is None:
386
+ return "None"
387
+
388
+ profile_args = OrderedDict(
389
+ {
390
+ "type": profile.type_,
391
+ "strategies": profile.strategy_types,
392
+ }
393
+ )
394
+
395
+ if isinstance(profile, ConcurrentProfile):
396
+ profile_args["streams"] = str(profile.streams)
397
+ elif isinstance(profile, ThroughputProfile):
398
+ profile_args["max_concurrency"] = str(profile.max_concurrency)
399
+ elif isinstance(profile, AsyncProfile):
400
+ profile_args["max_concurrency"] = str(profile.max_concurrency)
401
+ profile_args["rate"] = str(profile.rate)
402
+ profile_args["initial_burst"] = str(profile.initial_burst)
403
+ elif isinstance(profile, SweepProfile):
404
+ profile_args["sweep_size"] = str(profile.sweep_size)
405
+
406
+ return ", ".join(f"{key}={value}" for key, value in profile_args.items())
407
+
408
+ @property
409
+ def benchmarks_args_str(self) -> str:
410
+ """
411
+ :return: A string representation of the arguments used for the benchmarks.
412
+ """
413
+ args = self.benchmarks[0].args if self.benchmarks else None
414
+
415
+ if args is None:
416
+ return "None"
417
+
418
+ args_dict = OrderedDict(
419
+ {
420
+ "max_number": args.max_number,
421
+ "max_duration": args.max_duration,
422
+ "warmup_number": args.warmup_number,
423
+ "warmup_duration": args.warmup_duration,
424
+ "cooldown_number": args.cooldown_number,
425
+ "cooldown_duration": args.cooldown_duration,
426
+ }
427
+ )
428
+
429
+ return ", ".join(f"{key}={value}" for key, value in args_dict.items())
430
+
431
+ @property
432
+ def benchmarks_worker_desc_str(self) -> str:
433
+ """
434
+ :return: A string representation of the worker used for the benchmarks.
435
+ """
436
+ return str(self.benchmarks[0].worker) if self.benchmarks else "None"
437
+
438
+ @property
439
+ def benchmarks_request_loader_desc_str(self) -> str:
440
+ """
441
+ :return: A string representation of the request loader used for the benchmarks.
442
+ """
443
+ return str(self.benchmarks[0].request_loader) if self.benchmarks else "None"
444
+
445
+ @property
446
+ def benchmarks_extras_str(self) -> str:
447
+ """
448
+ :return: A string representation of the extras used for the benchmarks.
449
+ """
450
+ extras = self.benchmarks[0].extras if self.benchmarks else None
451
+
452
+ if not extras:
453
+ return "None"
454
+
455
+ return ", ".join(f"{key}={value}" for key, value in extras.items())
456
+
457
+ def print_section_header(self, title: str, indent: int = 0, new_lines: int = 2):
458
+ """
459
+ Print out a styled section header to the console.
460
+ The title is underlined, bolded, and colored with the INFO color.
461
+
462
+ :param title: The title of the section.
463
+ :param indent: The number of spaces to indent the title.
464
+ Defaults to 0.
465
+ :param new_lines: The number of new lines to print before the title.
466
+ Defaults to 2.
467
+ """
468
+ self.print_line(
469
+ value=f"{title}:",
470
+ style=f"bold underline {Colors.INFO}",
471
+ indent=indent,
472
+ new_lines=new_lines,
473
+ )
474
+
475
+ def print_labeled_line(
476
+ self, label: str, value: str, indent: int = 4, new_lines: int = 0
477
+ ):
478
+ """
479
+ Print out a styled, labeled line (label: value) to the console.
480
+ The label is bolded and colored with the INFO color,
481
+ and the value is italicized.
482
+
483
+ :param label: The label of the line.
484
+ :param value: The value of the line.
485
+ :param indent: The number of spaces to indent the line.
486
+ Defaults to 4.
487
+ :param new_lines: The number of new lines to print before the line.
488
+ Defaults to 0.
489
+ """
490
+ self.print_line(
491
+ value=[label + ":", value],
492
+ style=["bold " + Colors.INFO, "italic"],
493
+ new_lines=new_lines,
494
+ indent=indent,
495
+ )
496
+
497
+ def print_line(
498
+ self,
499
+ value: Union[str, list[str]],
500
+ style: Union[str, list[str]] = "",
501
+ indent: int = 0,
502
+ new_lines: int = 0,
503
+ ):
504
+ """
505
+ Print out a a value to the console as a line with optional indentation.
506
+
507
+ :param value: The value to print.
508
+ :param style: The style to apply to the value.
509
+ Defaults to none.
510
+ :param indent: The number of spaces to indent the line.
511
+ Defaults to 0.
512
+ :param new_lines: The number of new lines to print before the value.
513
+ Defaults to 0.
514
+ """
515
+ if not self.enabled:
516
+ return
517
+
518
+ text = Text()
519
+
520
+ for _ in range(new_lines):
521
+ text.append("\n")
522
+
523
+ if not isinstance(value, list):
524
+ value = [value]
525
+
526
+ if not isinstance(style, list):
527
+ style = [style for _ in range(len(value))]
528
+
529
+ if len(value) != len(style):
530
+ raise ValueError(
531
+ f"Value and style length mismatch. Value length: {len(value)}, "
532
+ f"Style length: {len(style)}."
533
+ )
534
+
535
+ for val, sty in zip(value, style):
536
+ text.append(val, style=sty)
537
+
538
+ self.console.print(Padding.indent(text, indent))
539
+
540
+ def print_table(
541
+ self,
542
+ headers: list[str],
543
+ rows: list[list[Any]],
544
+ title: str,
545
+ sections: Optional[dict[str, tuple[int, int]]] = None,
546
+ max_char_per_col: int = 2**10,
547
+ indent: int = 0,
548
+ new_lines: int = 2,
549
+ ):
550
+ """
551
+ Print a table to the console with the given headers and rows.
552
+
553
+ :param headers: The headers of the table.
554
+ :param rows: The rows of the table.
555
+ :param title: The title of the table.
556
+ :param sections: The sections of the table grouping columns together.
557
+ This is a mapping of the section display name to a tuple of the start and
558
+ end column indices. If None, no sections are added (default).
559
+ :param max_char_per_col: The maximum number of characters per column.
560
+ :param indent: The number of spaces to indent the table.
561
+ Defaults to 0.
562
+ :param new_lines: The number of new lines to print before the table.
563
+ Defaults to 0.
564
+ """
565
+
566
+ if rows and any(len(row) != len(headers) for row in rows):
567
+ raise ValueError(
568
+ f"Headers and rows length mismatch. Headers length: {len(headers)}, "
569
+ f"Row length: {len(rows[0]) if rows else 'N/A'}."
570
+ )
571
+
572
+ max_characters_per_column = self.calculate_max_chars_per_column(
573
+ headers, rows, sections, max_char_per_col
574
+ )
575
+
576
+ self.print_section_header(title, indent=indent, new_lines=new_lines)
577
+ self.print_table_divider(
578
+ max_characters_per_column, include_separators=False, indent=indent
579
+ )
580
+ if sections:
581
+ self.print_table_sections(
582
+ sections, max_characters_per_column, indent=indent
583
+ )
584
+ self.print_table_row(
585
+ split_text_list_by_length(headers, max_characters_per_column),
586
+ style=f"bold {Colors.INFO}",
587
+ indent=indent,
588
+ )
589
+ self.print_table_divider(
590
+ max_characters_per_column, include_separators=True, indent=indent
591
+ )
592
+ for row in rows:
593
+ self.print_table_row(
594
+ split_text_list_by_length(row, max_characters_per_column),
595
+ style="italic",
596
+ indent=indent,
597
+ )
598
+ self.print_table_divider(
599
+ max_characters_per_column, include_separators=False, indent=indent
600
+ )
601
+
602
+ def calculate_max_chars_per_column(
603
+ self,
604
+ headers: list[str],
605
+ rows: list[list[Any]],
606
+ sections: Optional[dict[str, tuple[int, int]]],
607
+ max_char_per_col: int,
608
+ ) -> list[int]:
609
+ """
610
+ Calculate the maximum number of characters per column in the table.
611
+ This is done by checking the length of the headers, rows, and optional sections
612
+ to ensure all columns are accounted for and spaced correctly.
613
+
614
+ :param headers: The headers of the table.
615
+ :param rows: The rows of the table.
616
+ :param sections: The sections of the table grouping columns together.
617
+ This is a mapping of the section display name to a tuple of the start and
618
+ end column indices. If None, no sections are added (default).
619
+ :param max_char_per_col: The maximum number of characters per column.
620
+ :return: A list of the maximum number of characters per column.
621
+ """
622
+ max_characters_per_column = []
623
+ for ind in range(len(headers)):
624
+ max_characters_per_column.append(min(len(headers[ind]), max_char_per_col))
625
+
626
+ for row in rows:
627
+ max_characters_per_column[ind] = max(
628
+ max_characters_per_column[ind], len(str(row[ind]))
629
+ )
630
+
631
+ if not sections:
632
+ return max_characters_per_column
633
+
634
+ for section in sections:
635
+ start_col, end_col = sections[section]
636
+ min_section_len = len(section) + (
637
+ end_col - start_col
638
+ ) # ensure we have enough space for separators
639
+ chars_in_columns = sum(
640
+ max_characters_per_column[start_col : end_col + 1]
641
+ ) + 2 * (end_col - start_col)
642
+ if min_section_len > chars_in_columns:
643
+ add_chars_per_col = math.ceil(
644
+ (min_section_len - chars_in_columns) / (end_col - start_col + 1)
645
+ )
646
+ for col in range(start_col, end_col + 1):
647
+ max_characters_per_column[col] += add_chars_per_col
648
+
649
+ return max_characters_per_column
650
+
651
+ def print_table_divider(
652
+ self, max_chars_per_column: list[int], include_separators: bool, indent: int = 0
653
+ ):
654
+ """
655
+ Print a divider line for the table (top and bottom of table with '=' characters)
656
+
657
+ :param max_chars_per_column: The maximum number of characters per column.
658
+ :param include_separators: Whether to include separators between columns.
659
+ :param indent: The number of spaces to indent the line.
660
+ Defaults to 0.
661
+ """
662
+ if include_separators:
663
+ columns = [
664
+ settings.table_headers_border_char * max_chars
665
+ + settings.table_column_separator_char
666
+ + settings.table_headers_border_char
667
+ for max_chars in max_chars_per_column
668
+ ]
669
+ else:
670
+ columns = [
671
+ settings.table_border_char * (max_chars + 2)
672
+ for max_chars in max_chars_per_column
673
+ ]
674
+
675
+ columns[-1] = columns[-1][:-2]
676
+ self.print_line(value=columns, style=Colors.INFO, indent=indent)
677
+
678
+ def print_table_sections(
679
+ self,
680
+ sections: dict[str, tuple[int, int]],
681
+ max_chars_per_column: list[int],
682
+ indent: int = 0,
683
+ ):
684
+ """
685
+ Print the sections of the table with corresponding separators to the columns
686
+ the sections are mapped to to ensure it is compliant with a CSV format.
687
+ For example, a section named "Metadata" with columns 0-3 will print this:
688
+ Metadata ,,,,
689
+ Where the spaces plus the separators at the end will span the columns 0-3.
690
+ All columns must be accounted for in the sections.
691
+
692
+ :param sections: The sections of the table.
693
+ :param max_chars_per_column: The maximum number of characters per column.
694
+ :param indent: The number of spaces to indent the line.
695
+ Defaults to 0.
696
+ """
697
+ section_tuples = [(start, end, name) for name, (start, end) in sections.items()]
698
+ section_tuples.sort(key=lambda x: x[0])
699
+
700
+ if any(start > end for start, end, _ in section_tuples):
701
+ raise ValueError(f"Invalid section ranges: {section_tuples}")
702
+
703
+ if (
704
+ any(
705
+ section_tuples[ind][1] + 1 != section_tuples[ind + 1][0]
706
+ for ind in range(len(section_tuples) - 1)
707
+ )
708
+ or section_tuples[0][0] != 0
709
+ or section_tuples[-1][1] != len(max_chars_per_column) - 1
710
+ ):
711
+ raise ValueError(f"Invalid section ranges: {section_tuples}")
712
+
713
+ line_values = []
714
+ line_styles = []
715
+ for section, (start_col, end_col) in sections.items():
716
+ section_length = sum(max_chars_per_column[start_col : end_col + 1]) + 2 * (
717
+ end_col - start_col + 1
718
+ )
719
+ num_separators = end_col - start_col
720
+ line_values.append(section)
721
+ line_styles.append("bold " + Colors.INFO)
722
+ line_values.append(
723
+ " " * (section_length - len(section) - num_separators - 2)
724
+ )
725
+ line_styles.append("")
726
+ line_values.append(settings.table_column_separator_char * num_separators)
727
+ line_styles.append("")
728
+ line_values.append(settings.table_column_separator_char + " ")
729
+ line_styles.append(Colors.INFO)
730
+ line_values = line_values[:-1]
731
+ line_styles = line_styles[:-1]
732
+ self.print_line(value=line_values, style=line_styles, indent=indent)
733
+
734
+ def print_table_row(
735
+ self, column_lines: list[list[str]], style: str, indent: int = 0
736
+ ):
737
+ """
738
+ Print a single row of a table to the console.
739
+
740
+ :param column_lines: The lines of text to print for each column.
741
+ :param indent: The number of spaces to indent the line.
742
+ Defaults to 0.
743
+ """
744
+ for row in range(len(column_lines[0])):
745
+ print_line = []
746
+ print_styles = []
747
+ for column in range(len(column_lines)):
748
+ print_line.extend(
749
+ [
750
+ column_lines[column][row],
751
+ settings.table_column_separator_char,
752
+ " ",
753
+ ]
754
+ )
755
+ print_styles.extend([style, Colors.INFO, ""])
756
+ print_line = print_line[:-2]
757
+ print_styles = print_styles[:-2]
758
+ self.print_line(value=print_line, style=print_styles, indent=indent)
759
+
760
+ def print_benchmarks_metadata(self):
761
+ """
762
+ Print out the metadata of the benchmarks to the console including the run id,
763
+ duration, profile, args, worker, request loader, and extras.
764
+ """
765
+
766
+ if not self.benchmarks:
767
+ raise ValueError(
768
+ "No benchmarks to print metadata for. Please set benchmarks first."
769
+ )
770
+
771
+ start_time = self.benchmarks[0].run_stats.start_time
772
+ end_time = self.benchmarks[-1].run_stats.end_time
773
+ duration = end_time - start_time
774
+
775
+ self.print_section_header(title="Benchmarks Metadata")
776
+ self.print_labeled_line(
777
+ label="Run id",
778
+ value=str(self.benchmarks[0].run_id),
779
+ )
780
+ self.print_labeled_line(
781
+ label="Duration",
782
+ value=f"{duration:.1f} seconds",
783
+ )
784
+ self.print_labeled_line(
785
+ label="Profile",
786
+ value=self.benchmarks_profile_str,
787
+ )
788
+ self.print_labeled_line(
789
+ label="Args",
790
+ value=self.benchmarks_args_str,
791
+ )
792
+ self.print_labeled_line(
793
+ label="Worker",
794
+ value=self.benchmarks_worker_desc_str,
795
+ )
796
+ self.print_labeled_line(
797
+ label="Request Loader",
798
+ value=self.benchmarks_request_loader_desc_str,
799
+ )
800
+ self.print_labeled_line(
801
+ label="Extras",
802
+ value=self.benchmarks_extras_str,
803
+ )
804
+
805
+ def print_benchmarks_info(self):
806
+ """
807
+ Print out the benchmark information to the console including the start time,
808
+ end time, duration, request totals, and token totals for each benchmark.
809
+ """
810
+ if not self.benchmarks:
811
+ raise ValueError(
812
+ "No benchmarks to print info for. Please set benchmarks first."
813
+ )
814
+
815
+ sections = {
816
+ "Metadata": (0, 3),
817
+ "Requests Made": (4, 6),
818
+ "Prompt Tok/Req": (7, 9),
819
+ "Output Tok/Req": (10, 12),
820
+ "Prompt Tok Total": (13, 15),
821
+ "Output Tok Total": (16, 18),
822
+ }
823
+ headers = [
824
+ "Benchmark",
825
+ "Start Time",
826
+ "End Time",
827
+ "Duration (s)",
828
+ "Comp",
829
+ "Inc",
830
+ "Err",
831
+ "Comp",
832
+ "Inc",
833
+ "Err",
834
+ "Comp",
835
+ "Inc",
836
+ "Err",
837
+ "Comp",
838
+ "Inc",
839
+ "Err",
840
+ "Comp",
841
+ "Inc",
842
+ "Err",
843
+ ]
844
+ rows = []
845
+
846
+ for benchmark in self.benchmarks:
847
+ rows.append(
848
+ [
849
+ strategy_display_str(benchmark.args.strategy),
850
+ f"{datetime.fromtimestamp(benchmark.start_time).strftime('%H:%M:%S')}",
851
+ f"{datetime.fromtimestamp(benchmark.end_time).strftime('%H:%M:%S')}",
852
+ f"{(benchmark.end_time - benchmark.start_time):.1f}",
853
+ f"{benchmark.request_totals.successful:.0f}",
854
+ f"{benchmark.request_totals.incomplete:.0f}",
855
+ f"{benchmark.request_totals.errored:.0f}",
856
+ f"{benchmark.metrics.prompt_token_count.successful.mean:.1f}",
857
+ f"{benchmark.metrics.prompt_token_count.incomplete.mean:.1f}",
858
+ f"{benchmark.metrics.prompt_token_count.errored.mean:.1f}",
859
+ f"{benchmark.metrics.output_token_count.successful.mean:.1f}",
860
+ f"{benchmark.metrics.output_token_count.incomplete.mean:.1f}",
861
+ f"{benchmark.metrics.output_token_count.errored.mean:.1f}",
862
+ f"{benchmark.metrics.prompt_token_count.successful.total_sum:.0f}",
863
+ f"{benchmark.metrics.prompt_token_count.incomplete.total_sum:.0f}",
864
+ f"{benchmark.metrics.prompt_token_count.errored.total_sum:.0f}",
865
+ f"{benchmark.metrics.output_token_count.successful.total_sum:.0f}",
866
+ f"{benchmark.metrics.output_token_count.incomplete.total_sum:.0f}",
867
+ f"{benchmark.metrics.output_token_count.errored.total_sum:.0f}",
868
+ ]
869
+ )
870
+
871
+ self.print_table(
872
+ headers=headers, rows=rows, title="Benchmarks Info", sections=sections
873
+ )
874
+
875
+ def print_benchmarks_stats(self):
876
+ """
877
+ Print out the benchmark statistics to the console including the requests per
878
+ second, request concurrency, output tokens per second, total tokens per second,
879
+ request latency, time to first token, inter token latency, and time per output
880
+ token for each benchmark.
881
+ """
882
+ if not self.benchmarks:
883
+ raise ValueError(
884
+ "No benchmarks to print stats for. Please set benchmarks first."
885
+ )
886
+
887
+ sections = {
888
+ "Metadata": (0, 0),
889
+ "Request Stats": (1, 2),
890
+ "Out Tok/sec": (3, 3),
891
+ "Tot Tok/sec": (4, 4),
892
+ "Req Latency (ms)": (5, 7),
893
+ "TTFT (ms)": (8, 10),
894
+ "ITL (ms)": (11, 13),
895
+ "TPOT (ms)": (14, 16),
896
+ }
897
+ headers = [
898
+ "Benchmark",
899
+ "Per Second",
900
+ "Concurrency",
901
+ "mean",
902
+ "mean",
903
+ "mean",
904
+ "median",
905
+ "p99",
906
+ "mean",
907
+ "median",
908
+ "p99",
909
+ "mean",
910
+ "median",
911
+ "p99",
912
+ "mean",
913
+ "median",
914
+ "p99",
915
+ ]
916
+ rows = []
917
+
918
+ for benchmark in self.benchmarks:
919
+ rows.append(
920
+ [
921
+ strategy_display_str(benchmark.args.strategy),
922
+ f"{benchmark.metrics.requests_per_second.successful.mean:.2f}",
923
+ f"{benchmark.metrics.request_concurrency.successful.mean:.2f}",
924
+ f"{benchmark.metrics.output_tokens_per_second.successful.mean:.1f}",
925
+ f"{benchmark.metrics.tokens_per_second.successful.mean:.1f}",
926
+ f"{benchmark.metrics.request_latency.successful.mean:.2f}",
927
+ f"{benchmark.metrics.request_latency.successful.median:.2f}",
928
+ f"{benchmark.metrics.request_latency.successful.percentiles.p99:.2f}",
929
+ f"{benchmark.metrics.time_to_first_token_ms.successful.mean:.1f}",
930
+ f"{benchmark.metrics.time_to_first_token_ms.successful.median:.1f}",
931
+ f"{benchmark.metrics.time_to_first_token_ms.successful.percentiles.p99:.1f}",
932
+ f"{benchmark.metrics.inter_token_latency_ms.successful.mean:.1f}",
933
+ f"{benchmark.metrics.inter_token_latency_ms.successful.median:.1f}",
934
+ f"{benchmark.metrics.inter_token_latency_ms.successful.percentiles.p99:.1f}",
935
+ f"{benchmark.metrics.time_per_output_token_ms.successful.mean:.1f}",
936
+ f"{benchmark.metrics.time_per_output_token_ms.successful.median:.1f}",
937
+ f"{benchmark.metrics.time_per_output_token_ms.successful.percentiles.p99:.1f}",
938
+ ]
939
+ )
940
+
941
+ self.print_table(
942
+ headers=headers,
943
+ rows=rows,
944
+ title="Benchmarks Stats",
945
+ sections=sections,
946
+ )