guidellm 0.3.1__py3-none-any.whl → 0.6.0a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. guidellm/__init__.py +5 -2
  2. guidellm/__main__.py +524 -255
  3. guidellm/backends/__init__.py +33 -0
  4. guidellm/backends/backend.py +109 -0
  5. guidellm/backends/openai.py +340 -0
  6. guidellm/backends/response_handlers.py +428 -0
  7. guidellm/benchmark/__init__.py +69 -39
  8. guidellm/benchmark/benchmarker.py +160 -316
  9. guidellm/benchmark/entrypoints.py +560 -127
  10. guidellm/benchmark/outputs/__init__.py +24 -0
  11. guidellm/benchmark/outputs/console.py +633 -0
  12. guidellm/benchmark/outputs/csv.py +721 -0
  13. guidellm/benchmark/outputs/html.py +473 -0
  14. guidellm/benchmark/outputs/output.py +169 -0
  15. guidellm/benchmark/outputs/serialized.py +69 -0
  16. guidellm/benchmark/profiles.py +718 -0
  17. guidellm/benchmark/progress.py +553 -556
  18. guidellm/benchmark/scenarios/__init__.py +40 -0
  19. guidellm/benchmark/scenarios/chat.json +6 -0
  20. guidellm/benchmark/scenarios/rag.json +6 -0
  21. guidellm/benchmark/schemas/__init__.py +66 -0
  22. guidellm/benchmark/schemas/base.py +402 -0
  23. guidellm/benchmark/schemas/generative/__init__.py +55 -0
  24. guidellm/benchmark/schemas/generative/accumulator.py +841 -0
  25. guidellm/benchmark/schemas/generative/benchmark.py +163 -0
  26. guidellm/benchmark/schemas/generative/entrypoints.py +381 -0
  27. guidellm/benchmark/schemas/generative/metrics.py +927 -0
  28. guidellm/benchmark/schemas/generative/report.py +158 -0
  29. guidellm/data/__init__.py +34 -4
  30. guidellm/data/builders.py +541 -0
  31. guidellm/data/collators.py +16 -0
  32. guidellm/data/config.py +120 -0
  33. guidellm/data/deserializers/__init__.py +49 -0
  34. guidellm/data/deserializers/deserializer.py +141 -0
  35. guidellm/data/deserializers/file.py +223 -0
  36. guidellm/data/deserializers/huggingface.py +94 -0
  37. guidellm/data/deserializers/memory.py +194 -0
  38. guidellm/data/deserializers/synthetic.py +246 -0
  39. guidellm/data/entrypoints.py +52 -0
  40. guidellm/data/loaders.py +190 -0
  41. guidellm/data/preprocessors/__init__.py +27 -0
  42. guidellm/data/preprocessors/formatters.py +410 -0
  43. guidellm/data/preprocessors/mappers.py +196 -0
  44. guidellm/data/preprocessors/preprocessor.py +30 -0
  45. guidellm/data/processor.py +29 -0
  46. guidellm/data/schemas.py +175 -0
  47. guidellm/data/utils/__init__.py +6 -0
  48. guidellm/data/utils/dataset.py +94 -0
  49. guidellm/extras/__init__.py +4 -0
  50. guidellm/extras/audio.py +220 -0
  51. guidellm/extras/vision.py +242 -0
  52. guidellm/logger.py +2 -2
  53. guidellm/mock_server/__init__.py +8 -0
  54. guidellm/mock_server/config.py +84 -0
  55. guidellm/mock_server/handlers/__init__.py +17 -0
  56. guidellm/mock_server/handlers/chat_completions.py +280 -0
  57. guidellm/mock_server/handlers/completions.py +280 -0
  58. guidellm/mock_server/handlers/tokenizer.py +142 -0
  59. guidellm/mock_server/models.py +510 -0
  60. guidellm/mock_server/server.py +238 -0
  61. guidellm/mock_server/utils.py +302 -0
  62. guidellm/scheduler/__init__.py +69 -26
  63. guidellm/scheduler/constraints/__init__.py +49 -0
  64. guidellm/scheduler/constraints/constraint.py +325 -0
  65. guidellm/scheduler/constraints/error.py +411 -0
  66. guidellm/scheduler/constraints/factory.py +182 -0
  67. guidellm/scheduler/constraints/request.py +312 -0
  68. guidellm/scheduler/constraints/saturation.py +722 -0
  69. guidellm/scheduler/environments.py +252 -0
  70. guidellm/scheduler/scheduler.py +137 -368
  71. guidellm/scheduler/schemas.py +358 -0
  72. guidellm/scheduler/strategies.py +617 -0
  73. guidellm/scheduler/worker.py +413 -419
  74. guidellm/scheduler/worker_group.py +712 -0
  75. guidellm/schemas/__init__.py +65 -0
  76. guidellm/schemas/base.py +417 -0
  77. guidellm/schemas/info.py +188 -0
  78. guidellm/schemas/request.py +235 -0
  79. guidellm/schemas/request_stats.py +349 -0
  80. guidellm/schemas/response.py +124 -0
  81. guidellm/schemas/statistics.py +1018 -0
  82. guidellm/{config.py → settings.py} +31 -24
  83. guidellm/utils/__init__.py +71 -8
  84. guidellm/utils/auto_importer.py +98 -0
  85. guidellm/utils/cli.py +132 -5
  86. guidellm/utils/console.py +566 -0
  87. guidellm/utils/encoding.py +778 -0
  88. guidellm/utils/functions.py +159 -0
  89. guidellm/utils/hf_datasets.py +1 -2
  90. guidellm/utils/hf_transformers.py +4 -4
  91. guidellm/utils/imports.py +9 -0
  92. guidellm/utils/messaging.py +1118 -0
  93. guidellm/utils/mixins.py +115 -0
  94. guidellm/utils/random.py +3 -4
  95. guidellm/utils/registry.py +220 -0
  96. guidellm/utils/singleton.py +133 -0
  97. guidellm/utils/synchronous.py +159 -0
  98. guidellm/utils/text.py +163 -50
  99. guidellm/utils/typing.py +41 -0
  100. guidellm/version.py +2 -2
  101. guidellm-0.6.0a5.dist-info/METADATA +364 -0
  102. guidellm-0.6.0a5.dist-info/RECORD +109 -0
  103. guidellm/backend/__init__.py +0 -23
  104. guidellm/backend/backend.py +0 -259
  105. guidellm/backend/openai.py +0 -708
  106. guidellm/backend/response.py +0 -136
  107. guidellm/benchmark/aggregator.py +0 -760
  108. guidellm/benchmark/benchmark.py +0 -837
  109. guidellm/benchmark/output.py +0 -997
  110. guidellm/benchmark/profile.py +0 -409
  111. guidellm/benchmark/scenario.py +0 -104
  112. guidellm/data/prideandprejudice.txt.gz +0 -0
  113. guidellm/dataset/__init__.py +0 -22
  114. guidellm/dataset/creator.py +0 -213
  115. guidellm/dataset/entrypoints.py +0 -42
  116. guidellm/dataset/file.py +0 -92
  117. guidellm/dataset/hf_datasets.py +0 -62
  118. guidellm/dataset/in_memory.py +0 -132
  119. guidellm/dataset/synthetic.py +0 -287
  120. guidellm/objects/__init__.py +0 -18
  121. guidellm/objects/pydantic.py +0 -89
  122. guidellm/objects/statistics.py +0 -953
  123. guidellm/preprocess/__init__.py +0 -3
  124. guidellm/preprocess/dataset.py +0 -374
  125. guidellm/presentation/__init__.py +0 -28
  126. guidellm/presentation/builder.py +0 -27
  127. guidellm/presentation/data_models.py +0 -232
  128. guidellm/presentation/injector.py +0 -66
  129. guidellm/request/__init__.py +0 -18
  130. guidellm/request/loader.py +0 -284
  131. guidellm/request/request.py +0 -79
  132. guidellm/request/types.py +0 -10
  133. guidellm/scheduler/queues.py +0 -25
  134. guidellm/scheduler/result.py +0 -155
  135. guidellm/scheduler/strategy.py +0 -495
  136. guidellm-0.3.1.dist-info/METADATA +0 -329
  137. guidellm-0.3.1.dist-info/RECORD +0 -62
  138. {guidellm-0.3.1.dist-info → guidellm-0.6.0a5.dist-info}/WHEEL +0 -0
  139. {guidellm-0.3.1.dist-info → guidellm-0.6.0a5.dist-info}/entry_points.txt +0 -0
  140. {guidellm-0.3.1.dist-info → guidellm-0.6.0a5.dist-info}/licenses/LICENSE +0 -0
  141. {guidellm-0.3.1.dist-info → guidellm-0.6.0a5.dist-info}/top_level.txt +0 -0
@@ -1,495 +0,0 @@
1
- import math
2
- import random
3
- import time
4
- from collections.abc import Generator
5
- from typing import (
6
- Literal,
7
- Optional,
8
- Union,
9
- )
10
-
11
- from pydantic import Field
12
-
13
- from guidellm.config import settings
14
- from guidellm.objects import StandardBaseModel
15
-
16
- __all__ = [
17
- "AsyncConstantStrategy",
18
- "AsyncPoissonStrategy",
19
- "ConcurrentStrategy",
20
- "SchedulingStrategy",
21
- "StrategyType",
22
- "SynchronousStrategy",
23
- "ThroughputStrategy",
24
- "strategy_display_str",
25
- ]
26
-
27
-
28
- StrategyType = Literal["synchronous", "concurrent", "throughput", "constant", "poisson"]
29
-
30
-
31
- class SchedulingStrategy(StandardBaseModel):
32
- """
33
- An abstract base class for scheduling strategies.
34
- This class defines the interface for scheduling requests and provides
35
- a common structure for all scheduling strategies.
36
- Subclasses should implement the `request_times` method to provide
37
- specific scheduling behavior.
38
-
39
- :param type_: The type of scheduling strategy to use.
40
- This should be one of the predefined strategy types.
41
- """
42
-
43
- type_: Literal["strategy"] = Field(
44
- description="The type of scheduling strategy schedule requests with.",
45
- )
46
- start_time: float = Field(
47
- default_factory=time.time,
48
- description="The start time for the scheduling strategy.",
49
- )
50
-
51
- @property
52
- def processing_mode(self) -> Literal["sync", "async"]:
53
- """
54
- The processing mode for the scheduling strategy, either 'sync' or 'async'.
55
- This property determines how the worker processes are setup:
56
- either to run synchronously with one request at a time or asynchronously.
57
- This property should be implemented by subclasses to return
58
- the appropriate processing mode.
59
-
60
- :return: The processing mode for the scheduling strategy,
61
- either 'sync' or 'async'.
62
- """
63
- return "async"
64
-
65
- @property
66
- def processes_limit(self) -> int:
67
- """
68
- The limit on the number of worker processes for the scheduling strategy.
69
- It determines how many worker processes are created
70
- for the scheduling strategy and must be implemented by subclasses.
71
-
72
- :return: The number of processes for the scheduling strategy.
73
- """
74
- return settings.max_worker_processes
75
-
76
- @property
77
- def queued_requests_limit(self) -> Optional[int]:
78
- """
79
- The maximum number of queued requests for the scheduling strategy.
80
- It determines how many requests can be queued at one time
81
- for the scheduling strategy and must be implemented by subclasses.
82
-
83
- :return: The maximum number of queued requests for the scheduling strategy.
84
- """
85
- return settings.max_concurrency
86
-
87
- @property
88
- def processing_requests_limit(self) -> int:
89
- """
90
- The maximum number of processing requests for the scheduling strategy.
91
- It determines how many requests can be processed at one time
92
- for the scheduling strategy and must be implemented by subclasses.
93
-
94
- :return: The maximum number of processing requests for the scheduling strategy.
95
- """
96
- return settings.max_concurrency
97
-
98
- def request_times(self) -> Generator[float, None, None]:
99
- """
100
- A generator that yields timestamps for when requests should be sent.
101
- This method should be implemented by subclasses to provide specific
102
- scheduling behavior.
103
-
104
- :return: A generator that yields timestamps for request scheduling
105
- or -1 for requests that should be sent immediately.
106
- """
107
- raise NotImplementedError("Subclasses must implement request_times() method.")
108
-
109
-
110
- class SynchronousStrategy(SchedulingStrategy):
111
- """
112
- A class representing a synchronous scheduling strategy.
113
- This strategy schedules requests synchronously, one at a time,
114
- with the maximum rate possible.
115
- It inherits from the `SchedulingStrategy` base class and
116
- implements the `request_times` method to provide the specific
117
- behavior for synchronous scheduling.
118
-
119
- :param type_: The synchronous StrategyType to schedule requests synchronously.
120
- """
121
-
122
- type_: Literal["synchronous"] = "synchronous" # type: ignore[assignment]
123
-
124
- @property
125
- def processing_mode(self) -> Literal["sync"]:
126
- """
127
- The processing mode for the scheduling strategy, either 'sync' or 'async'.
128
- This property determines how the worker processes are setup:
129
- either to run synchronously with one request at a time or asynchronously.
130
-
131
- :return: 'sync' for synchronous scheduling strategy
132
- for the single worker process.
133
- """
134
- return "sync"
135
-
136
- @property
137
- def processes_limit(self) -> int:
138
- """
139
- The limit on the number of worker processes for the scheduling strategy.
140
- It determines how many worker processes are created
141
- for the scheduling strategy and must be implemented by subclasses.
142
-
143
- :return: 1 for the synchronous scheduling strategy to limit
144
- the worker processes to one.
145
- """
146
- return 1
147
-
148
- @property
149
- def queued_requests_limit(self) -> int:
150
- """
151
- The maximum number of queued requests for the scheduling strategy.
152
- It determines how many requests can be queued at one time
153
- for the scheduling strategy and must be implemented by subclasses.
154
-
155
- :return: 1 for the synchronous scheduling strategy to limit
156
- the queued requests to one that is ready to be processed.
157
- """
158
- return 1
159
-
160
- @property
161
- def processing_requests_limit(self) -> int:
162
- """
163
- The maximum number of processing requests for the scheduling strategy.
164
- It determines how many requests can be processed at one time
165
- for the scheduling strategy and must be implemented by subclasses.
166
-
167
- :return: 1 for the synchronous scheduling strategy to limit
168
- the processing requests to one that is ready to be processed.
169
- """
170
- return 1
171
-
172
- def request_times(self) -> Generator[float, None, None]:
173
- """
174
- A generator that yields time.time() so requests are sent immediately,
175
- while scheduling them synchronously.
176
-
177
- :return: A generator that yields time.time() for immediate request scheduling.
178
- """
179
- init_time = self.start_time
180
- while True:
181
- yield max(init_time, time.time())
182
-
183
-
184
- class ConcurrentStrategy(SchedulingStrategy):
185
- """
186
- A class representing a concurrent scheduling strategy.
187
- This strategy schedules requests concurrently with the specified
188
- number of streams.
189
- It inherits from the `SchedulingStrategy` base class and
190
- implements the `request_times` method to provide the specific
191
- behavior for concurrent scheduling.
192
-
193
- :param type_: The concurrent StrategyType to schedule requests concurrently.
194
- :param streams: The number of concurrent streams to use for scheduling requests.
195
- Each stream runs synchronously with the maximum rate possible.
196
- This must be a positive integer.
197
- """
198
-
199
- type_: Literal["concurrent"] = "concurrent" # type: ignore[assignment]
200
- streams: int = Field(
201
- description=(
202
- "The number of concurrent streams to use for scheduling requests. "
203
- "Each stream runs sychronously with the maximum rate possible. "
204
- "This must be a positive integer."
205
- ),
206
- gt=0,
207
- )
208
-
209
- @property
210
- def processing_mode(self) -> Literal["sync"]:
211
- """
212
- The processing mode for the scheduling strategy, either 'sync' or 'async'.
213
- This property determines how the worker processes are setup:
214
- either to run synchronously with one request at a time or asynchronously.
215
-
216
- :return: 'sync' for synchronous scheduling strategy
217
- for the multiple worker processes equal to streams.
218
- """
219
- return "sync"
220
-
221
- @property
222
- def processes_limit(self) -> int:
223
- """
224
- The limit on the number of worker processes for the scheduling strategy.
225
- It determines how many worker processes are created
226
- for the scheduling strategy and must be implemented by subclasses.
227
-
228
- :return: {self.streams} for the concurrent scheduling strategy to limit
229
- the worker processes to the number of streams.
230
- """
231
-
232
- return min(self.streams, settings.max_worker_processes)
233
-
234
- @property
235
- def queued_requests_limit(self) -> int:
236
- """
237
- The maximum number of queued requests for the scheduling strategy.
238
- It determines how many requests can be queued at one time
239
- for the scheduling strategy and must be implemented by subclasses.
240
-
241
- :return: {self.streams} for the concurrent scheduling strategy to limit
242
- the queued requests to the number of streams that are ready to be processed.
243
- """
244
- return self.streams
245
-
246
- @property
247
- def processing_requests_limit(self) -> int:
248
- """
249
- The maximum number of processing requests for the scheduling strategy.
250
- It determines how many requests can be processed at one time
251
- for the scheduling strategy and must be implemented by subclasses.
252
-
253
- :return: {self.streams} for the concurrent scheduling strategy to limit
254
- the processing requests to the number of streams that ready to be processed.
255
- """
256
- return self.streams
257
-
258
- def request_times(self) -> Generator[float, None, None]:
259
- """
260
- A generator that yields time.time() so requests are sent
261
- immediately, while scheduling them concurrently with the specified
262
- number of streams.
263
-
264
- :return: A generator that yields time.time() for immediate request scheduling.
265
- """
266
- init_time = self.start_time
267
- while True:
268
- yield max(init_time, time.time())
269
-
270
-
271
- class ThroughputStrategy(SchedulingStrategy):
272
- """
273
- A class representing a throughput scheduling strategy.
274
- This strategy schedules as many requests asynchronously as possible,
275
- with the maximum rate possible.
276
- It inherits from the `SchedulingStrategy` base class and
277
- implements the `request_times` method to provide the specific
278
- behavior for throughput scheduling.
279
-
280
- :param type_: The throughput StrategyType to schedule requests asynchronously.
281
- """
282
-
283
- type_: Literal["throughput"] = "throughput" # type: ignore[assignment]
284
- max_concurrency: Optional[int] = Field(
285
- default=None,
286
- description=(
287
- "The maximum number of concurrent requests to schedule. "
288
- "If set to None, the concurrency value from settings will be used. "
289
- "This must be a positive integer greater than 0."
290
- ),
291
- gt=0,
292
- )
293
-
294
- @property
295
- def processing_mode(self) -> Literal["async"]:
296
- """
297
- The processing mode for the scheduling strategy, either 'sync' or 'async'.
298
- This property determines how the worker processes are setup:
299
- either to run synchronously with one request at a time or asynchronously.
300
-
301
- :return: 'async' for asynchronous scheduling strategy
302
- for the multiple worker processes handling requests.
303
- """
304
- return "async"
305
-
306
- @property
307
- def queued_requests_limit(self) -> int:
308
- """
309
- The maximum number of queued requests for the scheduling strategy.
310
- It determines how many requests can be queued at one time
311
- for the scheduling strategy and must be implemented by subclasses.
312
-
313
- :return: The processing requests limit to ensure that there are enough
314
- requests even for the worst case scenario where the max concurrent
315
- requests are pulled at once for processing.
316
- """
317
- return self.processing_requests_limit
318
-
319
- @property
320
- def processing_requests_limit(self) -> int:
321
- """
322
- The maximum number of processing requests for the scheduling strategy.
323
- It determines how many requests can be processed at one time
324
- for the scheduling strategy and must be implemented by subclasses.
325
-
326
- :return: {self.max_concurrency} for the throughput scheduling strategy to limit
327
- the processing requests to the maximum concurrency.
328
- If max_concurrency is None, then the default processing requests limit
329
- will be used.
330
- """
331
- return self.max_concurrency or super().processing_requests_limit
332
-
333
- def request_times(self) -> Generator[float, None, None]:
334
- """
335
- A generator that yields the start time.time() so requests are sent
336
- immediately, while scheduling as many asynchronously as possible.
337
-
338
- :return: A generator that yields the start time.time()
339
- for immediate request scheduling.
340
- """
341
- init_time = self.start_time
342
- while True:
343
- yield init_time
344
-
345
-
346
- class AsyncConstantStrategy(ThroughputStrategy):
347
- """
348
- A class representing an asynchronous constant scheduling strategy.
349
- This strategy schedules requests asynchronously at a constant request rate
350
- in requests per second.
351
- If initial_burst is set, it will send an initial burst of math.floor(rate)
352
- requests to reach the target rate.
353
- This is useful to ensure that the target rate is reached quickly
354
- and then maintained.
355
- It inherits from the `SchedulingStrategy` base class and
356
- implements the `request_times` method to provide the specific
357
- behavior for asynchronous constant scheduling.
358
-
359
- :param type_: The constant StrategyType to schedule requests asynchronously.
360
- :param rate: The rate at which to schedule requests asynchronously in
361
- requests per second. This must be a positive float.
362
- :param initial_burst: True to send an initial burst of requests
363
- (math.floor(self.rate)) to reach target rate.
364
- False to not send an initial burst.
365
- """
366
-
367
- type_: Literal["constant"] = "constant" # type: ignore[assignment]
368
- rate: float = Field(
369
- description=(
370
- "The rate at which to schedule requests asynchronously in "
371
- "requests per second. This must be a positive float."
372
- ),
373
- gt=0,
374
- )
375
- initial_burst: bool = Field(
376
- default=True,
377
- description=(
378
- "True to send an initial burst of requests (math.floor(self.rate)) "
379
- "to reach target rate. False to not send an initial burst."
380
- ),
381
- )
382
-
383
- def request_times(self) -> Generator[float, None, None]:
384
- """
385
- A generator that yields timestamps for when requests should be sent.
386
- This method schedules requests asynchronously at a constant rate
387
- in requests per second.
388
- If burst_time is set, it will send an initial burst of requests
389
- to reach the target rate.
390
- This is useful to ensure that the target rate is reached quickly
391
- and then maintained.
392
-
393
- :return: A generator that yields timestamps for request scheduling.
394
- """
395
- constant_increment = 1.0 / self.rate
396
-
397
- init_time = self.start_time
398
- # handle bursts first to get to the desired rate
399
- if self.initial_burst is not None:
400
- # send an initial burst equal to the rate
401
- # to reach the target rate
402
- burst_count = math.floor(self.rate)
403
- for _ in range(burst_count):
404
- yield init_time
405
-
406
- init_time += constant_increment
407
-
408
- counter = 0
409
-
410
- # continue with constant rate after bursting
411
- while True:
412
- yield init_time + constant_increment * counter
413
- counter += 1
414
-
415
-
416
- class AsyncPoissonStrategy(ThroughputStrategy):
417
- """
418
- A class representing an asynchronous Poisson scheduling strategy.
419
- This strategy schedules requests asynchronously at a Poisson request rate
420
- in requests per second.
421
- If initial_burst is set, it will send an initial burst of math.floor(rate)
422
- requests to reach the target rate.
423
- It inherits from the `SchedulingStrategy` base class and
424
- implements the `request_times` method to provide the specific
425
- behavior for asynchronous Poisson scheduling.
426
-
427
- :param type_: The Poisson StrategyType to schedule requests asynchronously.
428
- :param rate: The rate at which to schedule requests asynchronously in
429
- requests per second. This must be a positive float.
430
- :param initial_burst: True to send an initial burst of requests
431
- (math.floor(self.rate)) to reach target rate.
432
- False to not send an initial burst.
433
- """
434
-
435
- type_: Literal["poisson"] = "poisson" # type: ignore[assignment]
436
- rate: float = Field(
437
- description=(
438
- "The rate at which to schedule requests asynchronously in "
439
- "requests per second. This must be a positive float."
440
- ),
441
- gt=0,
442
- )
443
- initial_burst: bool = Field(
444
- default=True,
445
- description=(
446
- "True to send an initial burst of requests (math.floor(self.rate)) "
447
- "to reach target rate. False to not send an initial burst."
448
- ),
449
- )
450
- random_seed: int = Field(
451
- default=42,
452
- description=("The random seed to use for the Poisson distribution. "),
453
- )
454
-
455
- def request_times(self) -> Generator[float, None, None]:
456
- """
457
- A generator that yields timestamps for when requests should be sent.
458
- This method schedules requests asynchronously at a Poisson rate
459
- in requests per second.
460
- The inter arrival time between requests is exponentially distributed
461
- based on the rate.
462
-
463
- :return: A generator that yields timestamps for request scheduling.
464
- """
465
- init_time = self.start_time
466
- if self.initial_burst is not None:
467
- # send an initial burst equal to the rate
468
- # to reach the target rate
469
- burst_count = math.floor(self.rate)
470
- for _ in range(burst_count):
471
- yield init_time
472
- else:
473
- yield init_time
474
-
475
- # set the random seed for reproducibility
476
- rand = random.Random(self.random_seed) # noqa: S311
477
-
478
- while True:
479
- inter_arrival_time = rand.expovariate(self.rate)
480
- init_time += inter_arrival_time
481
- yield init_time
482
-
483
-
484
- def strategy_display_str(strategy: Union[StrategyType, SchedulingStrategy]) -> str:
485
- strategy_type = strategy if isinstance(strategy, str) else strategy.type_
486
- strategy_instance = strategy if isinstance(strategy, SchedulingStrategy) else None
487
-
488
- if strategy_type == "concurrent":
489
- rate = f"@{strategy_instance.streams}" if strategy_instance else "@##" # type: ignore[attr-defined]
490
- elif strategy_type in ("constant", "poisson"):
491
- rate = f"@{strategy_instance.rate:.2f}" if strategy_instance else "@#.##" # type: ignore[attr-defined]
492
- else:
493
- rate = ""
494
-
495
- return f"{strategy_type}{rate}"