guidellm 0.1.0__py3-none-any.whl → 0.2.0.dev0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of guidellm might be problematic. Click here for more details.

Files changed (69) hide show
  1. guidellm/__init__.py +38 -6
  2. guidellm/__main__.py +294 -0
  3. guidellm/backend/__init__.py +19 -6
  4. guidellm/backend/backend.py +238 -0
  5. guidellm/backend/openai.py +532 -122
  6. guidellm/backend/response.py +132 -0
  7. guidellm/benchmark/__init__.py +73 -0
  8. guidellm/benchmark/aggregator.py +760 -0
  9. guidellm/benchmark/benchmark.py +838 -0
  10. guidellm/benchmark/benchmarker.py +334 -0
  11. guidellm/benchmark/entrypoints.py +141 -0
  12. guidellm/benchmark/output.py +946 -0
  13. guidellm/benchmark/profile.py +409 -0
  14. guidellm/benchmark/progress.py +720 -0
  15. guidellm/config.py +34 -56
  16. guidellm/data/__init__.py +4 -0
  17. guidellm/data/prideandprejudice.txt.gz +0 -0
  18. guidellm/dataset/__init__.py +22 -0
  19. guidellm/dataset/creator.py +213 -0
  20. guidellm/dataset/entrypoints.py +42 -0
  21. guidellm/dataset/file.py +90 -0
  22. guidellm/dataset/hf_datasets.py +62 -0
  23. guidellm/dataset/in_memory.py +132 -0
  24. guidellm/dataset/synthetic.py +262 -0
  25. guidellm/objects/__init__.py +18 -0
  26. guidellm/objects/pydantic.py +60 -0
  27. guidellm/objects/statistics.py +947 -0
  28. guidellm/request/__init__.py +12 -10
  29. guidellm/request/loader.py +281 -0
  30. guidellm/request/request.py +79 -0
  31. guidellm/scheduler/__init__.py +51 -3
  32. guidellm/scheduler/result.py +137 -0
  33. guidellm/scheduler/scheduler.py +382 -0
  34. guidellm/scheduler/strategy.py +493 -0
  35. guidellm/scheduler/types.py +7 -0
  36. guidellm/scheduler/worker.py +511 -0
  37. guidellm/utils/__init__.py +16 -29
  38. guidellm/utils/colors.py +8 -0
  39. guidellm/utils/hf_transformers.py +35 -0
  40. guidellm/utils/random.py +43 -0
  41. guidellm/utils/text.py +118 -357
  42. {guidellm-0.1.0.dist-info → guidellm-0.2.0.dev0.dist-info}/METADATA +96 -79
  43. guidellm-0.2.0.dev0.dist-info/RECORD +48 -0
  44. {guidellm-0.1.0.dist-info → guidellm-0.2.0.dev0.dist-info}/WHEEL +1 -1
  45. guidellm-0.2.0.dev0.dist-info/entry_points.txt +2 -0
  46. guidellm/backend/base.py +0 -320
  47. guidellm/core/__init__.py +0 -24
  48. guidellm/core/distribution.py +0 -190
  49. guidellm/core/report.py +0 -321
  50. guidellm/core/request.py +0 -44
  51. guidellm/core/result.py +0 -545
  52. guidellm/core/serializable.py +0 -169
  53. guidellm/executor/__init__.py +0 -10
  54. guidellm/executor/base.py +0 -213
  55. guidellm/executor/profile_generator.py +0 -343
  56. guidellm/main.py +0 -336
  57. guidellm/request/base.py +0 -194
  58. guidellm/request/emulated.py +0 -391
  59. guidellm/request/file.py +0 -76
  60. guidellm/request/transformers.py +0 -100
  61. guidellm/scheduler/base.py +0 -374
  62. guidellm/scheduler/load_generator.py +0 -196
  63. guidellm/utils/injector.py +0 -70
  64. guidellm/utils/progress.py +0 -196
  65. guidellm/utils/transformers.py +0 -151
  66. guidellm-0.1.0.dist-info/RECORD +0 -35
  67. guidellm-0.1.0.dist-info/entry_points.txt +0 -3
  68. {guidellm-0.1.0.dist-info → guidellm-0.2.0.dev0.dist-info/licenses}/LICENSE +0 -0
  69. {guidellm-0.1.0.dist-info → guidellm-0.2.0.dev0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,947 @@
1
+ import math
2
+ import time as timer
3
+ from collections import defaultdict
4
+ from typing import Any, Literal, Optional
5
+
6
+ import numpy as np
7
+ from pydantic import Field, computed_field
8
+
9
+ from guidellm.objects.pydantic import StandardBaseModel, StatusBreakdown
10
+
11
+ __all__ = [
12
+ "Percentiles",
13
+ "DistributionSummary",
14
+ "StatusDistributionSummary",
15
+ "RunningStats",
16
+ "TimeRunningStats",
17
+ ]
18
+
19
+
20
+ class Percentiles(StandardBaseModel):
21
+ """
22
+ A pydantic model representing the standard percentiles of a distribution.
23
+ """
24
+
25
+ p001: float = Field(
26
+ description="The 0.1th percentile of the distribution.",
27
+ )
28
+ p01: float = Field(
29
+ description="The 1st percentile of the distribution.",
30
+ )
31
+ p05: float = Field(
32
+ description="The 5th percentile of the distribution.",
33
+ )
34
+ p10: float = Field(
35
+ description="The 10th percentile of the distribution.",
36
+ )
37
+ p25: float = Field(
38
+ description="The 25th percentile of the distribution.",
39
+ )
40
+ p75: float = Field(
41
+ description="The 75th percentile of the distribution.",
42
+ )
43
+ p90: float = Field(
44
+ description="The 90th percentile of the distribution.",
45
+ )
46
+ p95: float = Field(
47
+ description="The 95th percentile of the distribution.",
48
+ )
49
+ p99: float = Field(
50
+ description="The 99th percentile of the distribution.",
51
+ )
52
+ p999: float = Field(
53
+ description="The 99.9th percentile of the distribution.",
54
+ )
55
+
56
+
57
+ class DistributionSummary(StandardBaseModel):
58
+ """
59
+ A pydantic model representing a statistical summary for a given
60
+ distribution of numerical values.
61
+ """
62
+
63
+ mean: float = Field(
64
+ description="The mean/average of the distribution.",
65
+ )
66
+ median: float = Field(
67
+ description="The median of the distribution.",
68
+ )
69
+ mode: float = Field(
70
+ description="The mode of the distribution.",
71
+ )
72
+ variance: float = Field(
73
+ description="The variance of the distribution.",
74
+ )
75
+ std_dev: float = Field(
76
+ description="The standard deviation of the distribution.",
77
+ )
78
+ min: float = Field(
79
+ description="The minimum value of the distribution.",
80
+ )
81
+ max: float = Field(
82
+ description="The maximum value of the distribution.",
83
+ )
84
+ count: int = Field(
85
+ description="The number of values in the distribution.",
86
+ )
87
+ total_sum: float = Field(
88
+ description="The total sum of the values in the distribution.",
89
+ )
90
+ percentiles: Percentiles = Field(
91
+ description="The percentiles of the distribution.",
92
+ )
93
+ cumulative_distribution_function: Optional[list[tuple[float, float]]] = Field(
94
+ description="The cumulative distribution function (CDF) of the distribution.",
95
+ default=None,
96
+ )
97
+
98
+ @staticmethod
99
+ def from_distribution_function(
100
+ distribution: list[tuple[float, float]],
101
+ include_cdf: bool = False,
102
+ ) -> "DistributionSummary":
103
+ """
104
+ Create a statistical summary for a given distribution of weighted numerical
105
+ values or a probability distribution function (PDF).
106
+ 1. If the distribution is a PDF, it is expected to be a list of tuples
107
+ where each tuple contains (value, probability). The sum of the
108
+ probabilities should be 1. If it is not, it will be normalized.
109
+ 2. If the distribution is a values distribution function, it is expected
110
+ to be a list of tuples where each tuple contains (value, weight).
111
+ The weights are normalized to a probability distribution function.
112
+
113
+ :param distribution: A list of tuples representing the distribution.
114
+ Each tuple contains (value, weight) or (value, probability).
115
+ :param include_cdf: Whether to include the calculated cumulative distribution
116
+ function (CDF) in the output DistributionSummary.
117
+ :return: An instance of DistributionSummary with calculated values.
118
+ """
119
+ values, weights = zip(*distribution) if distribution else ([], [])
120
+ values = np.array(values) # type: ignore[assignment]
121
+ weights = np.array(weights) # type: ignore[assignment]
122
+
123
+ # create the PDF
124
+ probabilities = weights / np.sum(weights) # type: ignore[operator]
125
+ pdf = np.column_stack((values, probabilities))
126
+ pdf = pdf[np.argsort(pdf[:, 0])]
127
+ values = pdf[:, 0] # type: ignore[assignment]
128
+ probabilities = pdf[:, 1]
129
+
130
+ # calculate the CDF
131
+ cumulative_probabilities = np.cumsum(probabilities)
132
+ cdf = np.column_stack((values, cumulative_probabilities))
133
+
134
+ # calculate statistics
135
+ mean = np.sum(values * probabilities).item() # type: ignore[attr-defined]
136
+ median = cdf[np.argmax(cdf[:, 1] >= 0.5), 0].item() if len(cdf) > 0 else 0 # noqa: PLR2004
137
+ mode = values[np.argmax(probabilities)].item() if len(values) > 0 else 0 # type: ignore[call-overload]
138
+ variance = np.sum((values - mean) ** 2 * probabilities).item() # type: ignore[attr-defined]
139
+ std_dev = math.sqrt(variance)
140
+ minimum = values[0].item() if len(values) > 0 else 0
141
+ maximum = values[-1].item() if len(values) > 0 else 0
142
+ count = len(values)
143
+ total_sum = np.sum(values).item() # type: ignore[attr-defined]
144
+
145
+ return DistributionSummary(
146
+ mean=mean,
147
+ median=median,
148
+ mode=mode,
149
+ variance=variance,
150
+ std_dev=std_dev,
151
+ min=minimum,
152
+ max=maximum,
153
+ count=count,
154
+ total_sum=total_sum,
155
+ percentiles=(
156
+ Percentiles(
157
+ p001=cdf[np.argmax(cdf[:, 1] >= 0.001), 0].item(), # noqa: PLR2004
158
+ p01=cdf[np.argmax(cdf[:, 1] >= 0.01), 0].item(), # noqa: PLR2004
159
+ p05=cdf[np.argmax(cdf[:, 1] >= 0.05), 0].item(), # noqa: PLR2004
160
+ p10=cdf[np.argmax(cdf[:, 1] >= 0.1), 0].item(), # noqa: PLR2004
161
+ p25=cdf[np.argmax(cdf[:, 1] >= 0.25), 0].item(), # noqa: PLR2004
162
+ p75=cdf[np.argmax(cdf[:, 1] >= 0.75), 0].item(), # noqa: PLR2004
163
+ p90=cdf[np.argmax(cdf[:, 1] >= 0.9), 0].item(), # noqa: PLR2004
164
+ p95=cdf[np.argmax(cdf[:, 1] >= 0.95), 0].item(), # noqa: PLR2004
165
+ p99=cdf[np.argmax(cdf[:, 1] >= 0.99), 0].item(), # noqa: PLR2004
166
+ p999=cdf[np.argmax(cdf[:, 1] >= 0.999), 0].item(), # noqa: PLR2004
167
+ )
168
+ if len(cdf) > 0
169
+ else Percentiles(
170
+ p001=0,
171
+ p01=0,
172
+ p05=0,
173
+ p10=0,
174
+ p25=0,
175
+ p75=0,
176
+ p90=0,
177
+ p95=0,
178
+ p99=0,
179
+ p999=0,
180
+ )
181
+ ),
182
+ cumulative_distribution_function=cdf.tolist() if include_cdf else None,
183
+ )
184
+
185
+ @staticmethod
186
+ def from_values(
187
+ values: list[float],
188
+ weights: Optional[list[float]] = None,
189
+ include_cdf: bool = False,
190
+ ) -> "DistributionSummary":
191
+ """
192
+ Create a statistical summary for a given distribution of numerical values.
193
+ This is a wrapper around from_distribution_function to handle the optional case
194
+ of including weights for the values. If weights are not provided, they are
195
+ automatically set to 1.0 for each value, so each value is equally weighted.
196
+
197
+ :param values: A list of numerical values representing the distribution.
198
+ :param weights: A list of weights for each value in the distribution.
199
+ If not provided, all values are equally weighted.
200
+ :param include_cdf: Whether to include the calculated cumulative distribution
201
+ function (CDF) in the output DistributionSummary.
202
+ """
203
+ if weights is None:
204
+ weights = [1.0] * len(values)
205
+
206
+ if len(values) != len(weights):
207
+ raise ValueError(
208
+ "The length of values and weights must be the same.",
209
+ )
210
+
211
+ return DistributionSummary.from_distribution_function(
212
+ distribution=list(zip(values, weights)),
213
+ include_cdf=include_cdf,
214
+ )
215
+
216
+ @staticmethod
217
+ def from_request_times(
218
+ requests: list[tuple[float, float]],
219
+ distribution_type: Literal["concurrency", "rate"],
220
+ include_cdf: bool = False,
221
+ epsilon: float = 1e-6,
222
+ ) -> "DistributionSummary":
223
+ """
224
+ Create a statistical summary for a given distribution of request times.
225
+ Specifically, this is used to measure concurrency or rate of requests
226
+ given an input list containing the start and end time of each request.
227
+ This will first convert the request times into a distribution function
228
+ and then calculate the statistics with from_distribution_function.
229
+
230
+ :param requests: A list of tuples representing the start and end times of
231
+ each request. Example: [(start_1, end_1), (start_2, end_2), ...]
232
+ :param distribution_type: The type of distribution to calculate.
233
+ Either "concurrency" or "rate".
234
+ :param include_cdf: Whether to include the calculated cumulative distribution
235
+ function (CDF) in the output DistributionSummary.
236
+ :param epsilon: The epsilon value for merging close events.
237
+ :return: An instance of DistributionSummary with calculated values.
238
+ """
239
+ if distribution_type == "concurrency":
240
+ # convert to delta changes based on when requests were running
241
+ time_deltas: dict[float, int] = defaultdict(int)
242
+ for start, end in requests:
243
+ time_deltas[start] += 1
244
+ time_deltas[end] -= 1
245
+
246
+ # convert to the events over time measuring concurrency changes
247
+ events = []
248
+ active = 0
249
+
250
+ for time, delta in sorted(time_deltas.items()):
251
+ active += delta
252
+ events.append((time, active))
253
+ elif distribution_type == "rate":
254
+ # convert to events for when requests finished
255
+ global_start = min(start for start, _ in requests) if requests else 0
256
+ events = [(global_start, 1)] + [(end, 1) for _, end in requests]
257
+ else:
258
+ raise ValueError(
259
+ f"Invalid distribution_type '{distribution_type}'. "
260
+ "Must be 'concurrency' or 'rate'."
261
+ )
262
+
263
+ # combine any events that are very close together
264
+ flattened_events: list[tuple[float, float]] = []
265
+ for time, val in sorted(events):
266
+ last_time, last_val = (
267
+ flattened_events[-1] if flattened_events else (None, None)
268
+ )
269
+
270
+ if (
271
+ last_time is not None
272
+ and last_val is not None
273
+ and abs(last_time - time) <= epsilon
274
+ ):
275
+ flattened_events[-1] = (last_time, last_val + val)
276
+ else:
277
+ flattened_events.append((time, val))
278
+
279
+ # convert to value distribution function
280
+ distribution: dict[float, float] = defaultdict(float)
281
+
282
+ for ind in range(len(flattened_events) - 1):
283
+ start_time, value = flattened_events[ind]
284
+ end_time, _ = flattened_events[ind + 1]
285
+ duration = end_time - start_time
286
+
287
+ if distribution_type == "concurrency":
288
+ # weight the concurrency value by the duration
289
+ distribution[value] += duration
290
+ elif distribution_type == "rate":
291
+ # weight the rate value by the duration
292
+ rate = value / duration
293
+ distribution[rate] += duration
294
+
295
+ distribution_list: list[tuple[float, float]] = sorted(distribution.items())
296
+
297
+ return DistributionSummary.from_distribution_function(
298
+ distribution=distribution_list,
299
+ include_cdf=include_cdf,
300
+ )
301
+
302
+ @staticmethod
303
+ def from_iterable_request_times(
304
+ requests: list[tuple[float, float]],
305
+ first_iter_times: list[float],
306
+ iter_counts: list[int],
307
+ first_iter_counts: Optional[list[int]] = None,
308
+ include_cdf: bool = False,
309
+ epsilon: float = 1e-6,
310
+ ) -> "DistributionSummary":
311
+ """
312
+ Create a statistical summary for a given distribution of request times
313
+ for a request with iterable responses between the start and end.
314
+ For example, this is used to measure auto regressive requests where
315
+ a request is started and at some later point, iterative responses are
316
+ received. This will convert the request times and iterable values into
317
+ a distribution function and then calculate the statistics with
318
+ from_distribution_function.
319
+
320
+ :param requests: A list of tuples representing the start and end times of
321
+ each request. Example: [(start_1, end_1), (start_2, end_2), ...]
322
+ :param first_iter_times: A list of times when the first iteration of
323
+ each request was received. Must be the same length as requests.
324
+ :param iter_counts: A list of the total number of iterations for each
325
+ request that occurred starting at the first iteration and ending
326
+ at the request end time. Must be the same length as requests.
327
+ :param first_iter_counts: A list of the number of iterations to log
328
+ for the first iteration of each request. For example, when calculating
329
+ total number of tokens processed, this is set to the prompt tokens number.
330
+ If not provided, defaults to 1 for each request.
331
+ :param include_cdf: Whether to include the calculated cumulative distribution
332
+ function (CDF) in the output DistributionSummary.
333
+ :param epsilon: The epsilon value for merging close events.
334
+ :return: An instance of DistributionSummary with calculated values.
335
+ """
336
+
337
+ if first_iter_counts is None:
338
+ first_iter_counts = [1] * len(requests)
339
+
340
+ if (
341
+ len(requests) != len(first_iter_times)
342
+ or len(requests) != len(iter_counts)
343
+ or len(requests) != len(first_iter_counts)
344
+ ):
345
+ raise ValueError(
346
+ "requests, first_iter_times, iter_counts, and first_iter_counts must"
347
+ "be the same length."
348
+ f"Given {len(requests)}, {len(first_iter_times)}, {len(iter_counts)}, "
349
+ f"{len(first_iter_counts)}",
350
+ )
351
+
352
+ # first break up the requests into individual iterable events
353
+ events = defaultdict(int)
354
+ global_start = min(start for start, _ in requests) if requests else 0
355
+ global_end = max(end for _, end in requests) if requests else 0
356
+ events[global_start] = 0
357
+ events[global_end] = 0
358
+
359
+ for (_, end), first_iter, first_iter_count, total_count in zip(
360
+ requests, first_iter_times, first_iter_counts, iter_counts
361
+ ):
362
+ events[first_iter] += first_iter_count
363
+
364
+ if total_count > 1:
365
+ iter_latency = (end - first_iter) / (total_count - 1)
366
+ for ind in range(1, total_count):
367
+ events[first_iter + ind * iter_latency] += 1
368
+
369
+ # combine any events that are very close together
370
+ flattened_events: list[tuple[float, int]] = []
371
+
372
+ for time, count in sorted(events.items()):
373
+ last_time, last_count = (
374
+ flattened_events[-1] if flattened_events else (None, None)
375
+ )
376
+
377
+ if (
378
+ last_time is not None
379
+ and last_count is not None
380
+ and abs(last_time - time) <= epsilon
381
+ ):
382
+ flattened_events[-1] = (last_time, last_count + count)
383
+ else:
384
+ flattened_events.append((time, count))
385
+
386
+ # convert to value distribution function
387
+ distribution: dict[float, float] = defaultdict(float)
388
+
389
+ for ind in range(len(flattened_events) - 1):
390
+ start_time, count = flattened_events[ind]
391
+ end_time, _ = flattened_events[ind + 1]
392
+ duration = end_time - start_time
393
+ rate = count / duration
394
+ distribution[rate] += duration
395
+
396
+ distribution_list = sorted(distribution.items())
397
+
398
+ return DistributionSummary.from_distribution_function(
399
+ distribution=distribution_list,
400
+ include_cdf=include_cdf,
401
+ )
402
+
403
+
404
+ class StatusDistributionSummary(
405
+ StatusBreakdown[
406
+ DistributionSummary,
407
+ DistributionSummary,
408
+ DistributionSummary,
409
+ DistributionSummary,
410
+ ]
411
+ ):
412
+ """
413
+ A pydantic model representing a statistical summary for a given
414
+ distribution of numerical values grouped by status.
415
+ Specifically used to represent the total, successful, incomplete,
416
+ and errored values for a benchmark or other statistical summary.
417
+ """
418
+
419
+ @staticmethod
420
+ def from_values(
421
+ value_types: list[Literal["successful", "incomplete", "error"]],
422
+ values: list[float],
423
+ weights: Optional[list[float]] = None,
424
+ include_cdf: bool = False,
425
+ ) -> "StatusDistributionSummary":
426
+ """
427
+ Create a statistical summary by status for a given distribution of numerical
428
+ values. This is used to measure the distribution of values for different
429
+ statuses (e.g., successful, incomplete, error) and calculate the statistics
430
+ for each status. Weights are optional to weight the probability distribution
431
+ for each value by. If not provided, all values are equally weighted.
432
+
433
+ :param value_types: A list of status types for each value in the distribution.
434
+ Must be one of 'successful', 'incomplete', or 'error'.
435
+ :param values: A list of numerical values representing the distribution.
436
+ Must be the same length as value_types.
437
+ :param weights: A list of weights for each value in the distribution.
438
+ If not provided, all values are equally weighted (set to 1).
439
+ Must be the same length as value_types.
440
+ :param include_cdf: Whether to include the calculated cumulative distribution
441
+ function (CDF) in the output StatusDistributionSummary.
442
+ :return: An instance of StatusDistributionSummary with calculated values.
443
+ """
444
+ if any(
445
+ type_ not in {"successful", "incomplete", "error"} for type_ in value_types
446
+ ):
447
+ raise ValueError(
448
+ "value_types must be one of 'successful', 'incomplete', or 'error'. "
449
+ f"Got {value_types} instead.",
450
+ )
451
+
452
+ if weights is None:
453
+ weights = [1.0] * len(values)
454
+
455
+ if len(value_types) != len(values) or len(value_types) != len(weights):
456
+ raise ValueError(
457
+ "The length of value_types, values, and weights must be the same.",
458
+ )
459
+
460
+ _, successful_values, successful_weights = (
461
+ zip(*successful)
462
+ if (
463
+ successful := list(
464
+ filter(
465
+ lambda val: val[0] == "successful",
466
+ zip(value_types, values, weights),
467
+ )
468
+ )
469
+ )
470
+ else ([], [], [])
471
+ )
472
+ _, incomplete_values, incomplete_weights = (
473
+ zip(*incomplete)
474
+ if (
475
+ incomplete := list(
476
+ filter(
477
+ lambda val: val[0] == "incomplete",
478
+ zip(value_types, values, weights),
479
+ )
480
+ )
481
+ )
482
+ else ([], [], [])
483
+ )
484
+ _, errored_values, errored_weights = (
485
+ zip(*errored)
486
+ if (
487
+ errored := list(
488
+ filter(
489
+ lambda val: val[0] == "error",
490
+ zip(value_types, values, weights),
491
+ )
492
+ )
493
+ )
494
+ else ([], [], [])
495
+ )
496
+
497
+ return StatusDistributionSummary(
498
+ total=DistributionSummary.from_values(
499
+ values,
500
+ weights,
501
+ include_cdf=include_cdf,
502
+ ),
503
+ successful=DistributionSummary.from_values(
504
+ successful_values, # type: ignore[arg-type]
505
+ successful_weights, # type: ignore[arg-type]
506
+ include_cdf=include_cdf,
507
+ ),
508
+ incomplete=DistributionSummary.from_values(
509
+ incomplete_values, # type: ignore[arg-type]
510
+ incomplete_weights, # type: ignore[arg-type]
511
+ include_cdf=include_cdf,
512
+ ),
513
+ errored=DistributionSummary.from_values(
514
+ errored_values, # type: ignore[arg-type]
515
+ errored_weights, # type: ignore[arg-type]
516
+ include_cdf=include_cdf,
517
+ ),
518
+ )
519
+
520
+ @staticmethod
521
+ def from_request_times(
522
+ request_types: list[Literal["successful", "incomplete", "error"]],
523
+ requests: list[tuple[float, float]],
524
+ distribution_type: Literal["concurrency", "rate"],
525
+ include_cdf: bool = False,
526
+ epsilon: float = 1e-6,
527
+ ) -> "StatusDistributionSummary":
528
+ """
529
+ Create a statistical summary by status for given distribution of request times.
530
+ This is used to measure the distribution of request times for different statuses
531
+ (e.g., successful, incomplete, error) for concurrency and rates.
532
+ This will call into DistributionSummary.from_request_times to calculate
533
+ the statistics for each status.
534
+
535
+ :param request_types: List of status types for each request in the distribution.
536
+ Must be one of 'successful', 'incomplete', or 'error'.
537
+ :param requests: A list of tuples representing the start and end times of
538
+ each request. Example: [(start_1, end_1), (start_2, end_2), ...].
539
+ Must be the same length as request_types.
540
+ :param distribution_type: The type of distribution to calculate.
541
+ Either "concurrency" or "rate".
542
+ :param include_cdf: Whether to include the calculated cumulative distribution
543
+ function (CDF) in the output StatusDistributionSummary.
544
+ :param epsilon: The epsilon value for merging close events.
545
+ :return: An instance of StatusDistributionSummary with calculated values.
546
+ """
547
+ if distribution_type not in {"concurrency", "rate"}:
548
+ raise ValueError(
549
+ f"Invalid distribution_type '{distribution_type}'. "
550
+ "Must be 'concurrency' or 'rate'."
551
+ )
552
+
553
+ if any(
554
+ type_ not in {"successful", "incomplete", "error"}
555
+ for type_ in request_types
556
+ ):
557
+ raise ValueError(
558
+ "request_types must be one of 'successful', 'incomplete', or 'error'. "
559
+ f"Got {request_types} instead.",
560
+ )
561
+
562
+ if len(request_types) != len(requests):
563
+ raise ValueError(
564
+ "The length of request_types and requests must be the same. "
565
+ f"Got {len(request_types)} and {len(requests)} instead.",
566
+ )
567
+
568
+ _, successful_requests = (
569
+ zip(*successful)
570
+ if (
571
+ successful := list(
572
+ filter(
573
+ lambda val: val[0] == "successful",
574
+ zip(request_types, requests),
575
+ )
576
+ )
577
+ )
578
+ else ([], [])
579
+ )
580
+ _, incomplete_requests = (
581
+ zip(*incomplete)
582
+ if (
583
+ incomplete := list(
584
+ filter(
585
+ lambda val: val[0] == "incomplete",
586
+ zip(request_types, requests),
587
+ )
588
+ )
589
+ )
590
+ else ([], [])
591
+ )
592
+ _, errored_requests = (
593
+ zip(*errored)
594
+ if (
595
+ errored := list(
596
+ filter(
597
+ lambda val: val[0] == "error",
598
+ zip(request_types, requests),
599
+ )
600
+ )
601
+ )
602
+ else ([], [])
603
+ )
604
+
605
+ return StatusDistributionSummary(
606
+ total=DistributionSummary.from_request_times(
607
+ requests,
608
+ distribution_type=distribution_type,
609
+ include_cdf=include_cdf,
610
+ epsilon=epsilon,
611
+ ),
612
+ successful=DistributionSummary.from_request_times(
613
+ successful_requests, # type: ignore[arg-type]
614
+ distribution_type=distribution_type,
615
+ include_cdf=include_cdf,
616
+ epsilon=epsilon,
617
+ ),
618
+ incomplete=DistributionSummary.from_request_times(
619
+ incomplete_requests, # type: ignore[arg-type]
620
+ distribution_type=distribution_type,
621
+ include_cdf=include_cdf,
622
+ epsilon=epsilon,
623
+ ),
624
+ errored=DistributionSummary.from_request_times(
625
+ errored_requests, # type: ignore[arg-type]
626
+ distribution_type=distribution_type,
627
+ include_cdf=include_cdf,
628
+ epsilon=epsilon,
629
+ ),
630
+ )
631
+
632
+ @staticmethod
633
+ def from_iterable_request_times(
634
+ request_types: list[Literal["successful", "incomplete", "error"]],
635
+ requests: list[tuple[float, float]],
636
+ first_iter_times: list[float],
637
+ iter_counts: Optional[list[int]] = None,
638
+ first_iter_counts: Optional[list[int]] = None,
639
+ include_cdf: bool = False,
640
+ epsilon: float = 1e-6,
641
+ ) -> "StatusDistributionSummary":
642
+ """
643
+ Create a statistical summary by status for given distribution of request times
644
+ for a request with iterable responses between the start and end.
645
+ For example, this is used to measure auto regressive requests where
646
+ a request is started and at some later point, iterative responses are
647
+ received. This will call into DistributionSummary.from_iterable_request_times
648
+ to calculate the statistics for each status.
649
+
650
+ :param request_types: List of status types for each request in the distribution.
651
+ Must be one of 'successful', 'incomplete', or 'error'.
652
+ :param requests: A list of tuples representing the start and end times of
653
+ each request. Example: [(start_1, end_1), (start_2, end_2), ...].
654
+ Must be the same length as request_types.
655
+ :param first_iter_times: A list of times when the first iteration of
656
+ each request was received. Must be the same length as requests.
657
+ :param iter_counts: A list of the total number of iterations for each
658
+ request that occurred starting at the first iteration and ending
659
+ at the request end time. Must be the same length as requests.
660
+ If not provided, defaults to 1 for each request.
661
+ :param first_iter_counts: A list of the number of iterations to log
662
+ for the first iteration of each request. For example, when calculating
663
+ total number of tokens processed, this is set to the prompt tokens number.
664
+ If not provided, defaults to 1 for each request.
665
+ :param include_cdf: Whether to include the calculated cumulative distribution
666
+ function (CDF) in the output StatusDistributionSummary.
667
+ :param epsilon: The epsilon value for merging close events.
668
+ :return: An instance of StatusDistributionSummary with calculated values.
669
+ """
670
+ if any(
671
+ type_ not in {"successful", "incomplete", "error"}
672
+ for type_ in request_types
673
+ ):
674
+ raise ValueError(
675
+ "request_types must be one of 'successful', 'incomplete', or 'error'. "
676
+ f"Got {request_types} instead.",
677
+ )
678
+
679
+ if iter_counts is None:
680
+ iter_counts = [1] * len(requests)
681
+
682
+ if first_iter_counts is None:
683
+ first_iter_counts = [1] * len(requests)
684
+
685
+ if (
686
+ len(request_types) != len(requests)
687
+ or len(requests) != len(first_iter_times)
688
+ or len(requests) != len(iter_counts)
689
+ or len(requests) != len(first_iter_counts)
690
+ ):
691
+ raise ValueError(
692
+ "request_types, requests, first_iter_times, iter_counts, and "
693
+ "first_iter_counts must be the same length."
694
+ f"Given {len(request_types)}, {len(requests)}, "
695
+ f"{len(first_iter_times)}, {len(iter_counts)}, "
696
+ f"{len(first_iter_counts)}",
697
+ )
698
+
699
+ (
700
+ _,
701
+ successful_requests,
702
+ successful_first_iter_times,
703
+ successful_iter_counts,
704
+ successful_first_iter_counts,
705
+ ) = (
706
+ zip(*successful)
707
+ if (
708
+ successful := list(
709
+ filter(
710
+ lambda val: val[0] == "successful",
711
+ zip(
712
+ request_types,
713
+ requests,
714
+ first_iter_times,
715
+ iter_counts,
716
+ first_iter_counts,
717
+ ),
718
+ )
719
+ )
720
+ )
721
+ else ([], [], [], [], [])
722
+ )
723
+ (
724
+ _,
725
+ incomplete_requests,
726
+ incomplete_first_iter_times,
727
+ incomplete_iter_counts,
728
+ incomplete_first_iter_counts,
729
+ ) = (
730
+ zip(*incomplete)
731
+ if (
732
+ incomplete := list(
733
+ filter(
734
+ lambda val: val[0] == "incomplete",
735
+ zip(
736
+ request_types,
737
+ requests,
738
+ first_iter_times,
739
+ iter_counts,
740
+ first_iter_counts,
741
+ ),
742
+ )
743
+ )
744
+ )
745
+ else ([], [], [], [], [])
746
+ )
747
+ (
748
+ _,
749
+ errored_requests,
750
+ errored_first_iter_times,
751
+ errored_iter_counts,
752
+ errored_first_iter_counts,
753
+ ) = (
754
+ zip(*errored)
755
+ if (
756
+ errored := list(
757
+ filter(
758
+ lambda val: val[0] == "error",
759
+ zip(
760
+ request_types,
761
+ requests,
762
+ first_iter_times,
763
+ iter_counts,
764
+ first_iter_counts,
765
+ ),
766
+ )
767
+ )
768
+ )
769
+ else ([], [], [], [], [])
770
+ )
771
+
772
+ return StatusDistributionSummary(
773
+ total=DistributionSummary.from_iterable_request_times(
774
+ requests,
775
+ first_iter_times,
776
+ iter_counts,
777
+ first_iter_counts,
778
+ include_cdf=include_cdf,
779
+ epsilon=epsilon,
780
+ ),
781
+ successful=DistributionSummary.from_iterable_request_times(
782
+ successful_requests, # type: ignore[arg-type]
783
+ successful_first_iter_times, # type: ignore[arg-type]
784
+ successful_iter_counts, # type: ignore[arg-type]
785
+ successful_first_iter_counts, # type: ignore[arg-type]
786
+ include_cdf=include_cdf,
787
+ epsilon=epsilon,
788
+ ),
789
+ incomplete=DistributionSummary.from_iterable_request_times(
790
+ incomplete_requests, # type: ignore[arg-type]
791
+ incomplete_first_iter_times, # type: ignore[arg-type]
792
+ incomplete_iter_counts, # type: ignore[arg-type]
793
+ incomplete_first_iter_counts, # type: ignore[arg-type]
794
+ include_cdf=include_cdf,
795
+ epsilon=epsilon,
796
+ ),
797
+ errored=DistributionSummary.from_iterable_request_times(
798
+ errored_requests, # type: ignore[arg-type]
799
+ errored_first_iter_times, # type: ignore[arg-type]
800
+ errored_iter_counts, # type: ignore[arg-type]
801
+ errored_first_iter_counts, # type: ignore[arg-type]
802
+ include_cdf=include_cdf,
803
+ epsilon=epsilon,
804
+ ),
805
+ )
806
+
807
+
808
+ class RunningStats(StandardBaseModel):
809
+ """
810
+ Create a running statistics object to track the mean, rate, and other
811
+ statistics of a stream of values.
812
+ 1. The start time is set to the time the object is created.
813
+ 2. The count is set to 0.
814
+ 3. The total is set to 0.
815
+ 4. The last value is set to 0.
816
+ 5. The mean is calculated as the total / count.
817
+ """
818
+
819
+ start_time: float = Field(
820
+ default_factory=timer.time,
821
+ description=(
822
+ "The time the running statistics object was created. "
823
+ "This is used to calculate the rate of the statistics."
824
+ ),
825
+ )
826
+ count: int = Field(
827
+ default=0,
828
+ description="The number of values added to the running statistics.",
829
+ )
830
+ total: float = Field(
831
+ default=0.0,
832
+ description="The total sum of the values added to the running statistics.",
833
+ )
834
+ last: float = Field(
835
+ default=0.0,
836
+ description="The last value added to the running statistics.",
837
+ )
838
+
839
+ @computed_field # type: ignore[misc]
840
+ @property
841
+ def mean(self) -> float:
842
+ """
843
+ :return: The mean of the running statistics (total / count).
844
+ If count is 0, return 0.0.
845
+ """
846
+ if self.count == 0:
847
+ return 0.0
848
+ return self.total / self.count
849
+
850
+ @computed_field # type: ignore[misc]
851
+ @property
852
+ def rate(self) -> float:
853
+ """
854
+ :return: The rate of the running statistics
855
+ (total / (time.time() - start_time)).
856
+ If count is 0, return 0.0.
857
+ """
858
+ if self.count == 0:
859
+ return 0.0
860
+ return self.total / (timer.time() - self.start_time)
861
+
862
+ def __add__(self, value: Any) -> float:
863
+ """
864
+ Enable the use of the + operator to add a value to the running statistics.
865
+
866
+ :param value: The value to add to the running statistics.
867
+ :return: The mean of the running statistics.
868
+ """
869
+ if not isinstance(value, (int, float)):
870
+ raise ValueError(
871
+ f"Value must be an int or float, got {type(value)} instead.",
872
+ )
873
+
874
+ self.update(value)
875
+
876
+ return self.mean
877
+
878
+ def __iadd__(self, value: Any) -> "RunningStats":
879
+ """
880
+ Enable the use of the += operator to add a value to the running statistics.
881
+
882
+ :param value: The value to add to the running statistics.
883
+ :return: The running statistics object.
884
+ """
885
+ if not isinstance(value, (int, float)):
886
+ raise ValueError(
887
+ f"Value must be an int or float, got {type(value)} instead.",
888
+ )
889
+
890
+ self.update(value)
891
+
892
+ return self
893
+
894
+ def update(self, value: float, count: int = 1) -> None:
895
+ """
896
+ Update the running statistics with a new value.
897
+
898
+ :param value: The new value to add to the running statistics.
899
+ :param count: The number of times to 'count' for the value.
900
+ If not provided, defaults to 1.
901
+ """
902
+ self.count += count
903
+ self.total += value
904
+ self.last = value
905
+
906
+
907
+ class TimeRunningStats(RunningStats):
908
+ """
909
+ Create a running statistics object to track the mean, rate, and other
910
+ statistics of a stream of time values. This is used to track time values
911
+ in milliseconds and seconds.
912
+
913
+ Adds time specific computed_fields such as measurements in milliseconds and seconds.
914
+ """
915
+
916
+ @computed_field # type: ignore[misc]
917
+ @property
918
+ def total_ms(self) -> float:
919
+ """
920
+ :return: The total time multiplied by 1000.0 to convert to milliseconds.
921
+ """
922
+ return self.total * 1000.0
923
+
924
+ @computed_field # type: ignore[misc]
925
+ @property
926
+ def last_ms(self) -> float:
927
+ """
928
+ :return: The last time multiplied by 1000.0 to convert to milliseconds.
929
+ """
930
+ return self.last * 1000.0
931
+
932
+ @computed_field # type: ignore[misc]
933
+ @property
934
+ def mean_ms(self) -> float:
935
+ """
936
+ :return: The mean time multiplied by 1000.0 to convert to milliseconds.
937
+ """
938
+ return self.mean * 1000.0
939
+
940
+ @computed_field # type: ignore[misc]
941
+ @property
942
+ def rate_ms(self) -> float:
943
+ """
944
+ :return: The rate of the running statistics multiplied by 1000.0
945
+ to convert to milliseconds.
946
+ """
947
+ return self.rate * 1000.0