guidellm 0.4.0a21__py3-none-any.whl → 0.4.0a169__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of guidellm might be problematic. Click here for more details.

Files changed (115) hide show
  1. guidellm/__init__.py +5 -2
  2. guidellm/__main__.py +452 -252
  3. guidellm/backends/__init__.py +33 -0
  4. guidellm/backends/backend.py +110 -0
  5. guidellm/backends/openai.py +355 -0
  6. guidellm/backends/response_handlers.py +455 -0
  7. guidellm/benchmark/__init__.py +53 -39
  8. guidellm/benchmark/benchmarker.py +150 -317
  9. guidellm/benchmark/entrypoints.py +467 -128
  10. guidellm/benchmark/output.py +519 -771
  11. guidellm/benchmark/profile.py +580 -280
  12. guidellm/benchmark/progress.py +568 -549
  13. guidellm/benchmark/scenarios/__init__.py +40 -0
  14. guidellm/benchmark/scenarios/chat.json +6 -0
  15. guidellm/benchmark/scenarios/rag.json +6 -0
  16. guidellm/benchmark/schemas.py +2086 -0
  17. guidellm/data/__init__.py +28 -4
  18. guidellm/data/collators.py +16 -0
  19. guidellm/data/deserializers/__init__.py +53 -0
  20. guidellm/data/deserializers/deserializer.py +144 -0
  21. guidellm/data/deserializers/file.py +222 -0
  22. guidellm/data/deserializers/huggingface.py +94 -0
  23. guidellm/data/deserializers/memory.py +194 -0
  24. guidellm/data/deserializers/synthetic.py +348 -0
  25. guidellm/data/loaders.py +149 -0
  26. guidellm/data/preprocessors/__init__.py +25 -0
  27. guidellm/data/preprocessors/formatters.py +404 -0
  28. guidellm/data/preprocessors/mappers.py +198 -0
  29. guidellm/data/preprocessors/preprocessor.py +31 -0
  30. guidellm/data/processor.py +31 -0
  31. guidellm/data/schemas.py +13 -0
  32. guidellm/data/utils/__init__.py +6 -0
  33. guidellm/data/utils/dataset.py +94 -0
  34. guidellm/extras/__init__.py +4 -0
  35. guidellm/extras/audio.py +215 -0
  36. guidellm/extras/vision.py +242 -0
  37. guidellm/logger.py +2 -2
  38. guidellm/mock_server/__init__.py +8 -0
  39. guidellm/mock_server/config.py +84 -0
  40. guidellm/mock_server/handlers/__init__.py +17 -0
  41. guidellm/mock_server/handlers/chat_completions.py +280 -0
  42. guidellm/mock_server/handlers/completions.py +280 -0
  43. guidellm/mock_server/handlers/tokenizer.py +142 -0
  44. guidellm/mock_server/models.py +510 -0
  45. guidellm/mock_server/server.py +168 -0
  46. guidellm/mock_server/utils.py +302 -0
  47. guidellm/preprocess/dataset.py +23 -26
  48. guidellm/presentation/builder.py +2 -2
  49. guidellm/presentation/data_models.py +25 -21
  50. guidellm/presentation/injector.py +2 -3
  51. guidellm/scheduler/__init__.py +65 -26
  52. guidellm/scheduler/constraints.py +1035 -0
  53. guidellm/scheduler/environments.py +252 -0
  54. guidellm/scheduler/scheduler.py +140 -368
  55. guidellm/scheduler/schemas.py +272 -0
  56. guidellm/scheduler/strategies.py +519 -0
  57. guidellm/scheduler/worker.py +391 -420
  58. guidellm/scheduler/worker_group.py +707 -0
  59. guidellm/schemas/__init__.py +31 -0
  60. guidellm/schemas/info.py +159 -0
  61. guidellm/schemas/request.py +226 -0
  62. guidellm/schemas/response.py +119 -0
  63. guidellm/schemas/stats.py +228 -0
  64. guidellm/{config.py → settings.py} +32 -21
  65. guidellm/utils/__init__.py +95 -8
  66. guidellm/utils/auto_importer.py +98 -0
  67. guidellm/utils/cli.py +71 -2
  68. guidellm/utils/console.py +183 -0
  69. guidellm/utils/encoding.py +778 -0
  70. guidellm/utils/functions.py +134 -0
  71. guidellm/utils/hf_datasets.py +1 -2
  72. guidellm/utils/hf_transformers.py +4 -4
  73. guidellm/utils/imports.py +9 -0
  74. guidellm/utils/messaging.py +1118 -0
  75. guidellm/utils/mixins.py +115 -0
  76. guidellm/utils/pydantic_utils.py +411 -0
  77. guidellm/utils/random.py +3 -4
  78. guidellm/utils/registry.py +220 -0
  79. guidellm/utils/singleton.py +133 -0
  80. guidellm/{objects → utils}/statistics.py +341 -247
  81. guidellm/utils/synchronous.py +159 -0
  82. guidellm/utils/text.py +163 -50
  83. guidellm/utils/typing.py +41 -0
  84. guidellm/version.py +1 -1
  85. {guidellm-0.4.0a21.dist-info → guidellm-0.4.0a169.dist-info}/METADATA +33 -10
  86. guidellm-0.4.0a169.dist-info/RECORD +95 -0
  87. guidellm/backend/__init__.py +0 -23
  88. guidellm/backend/backend.py +0 -259
  89. guidellm/backend/openai.py +0 -705
  90. guidellm/backend/response.py +0 -136
  91. guidellm/benchmark/aggregator.py +0 -760
  92. guidellm/benchmark/benchmark.py +0 -837
  93. guidellm/benchmark/scenario.py +0 -104
  94. guidellm/data/prideandprejudice.txt.gz +0 -0
  95. guidellm/dataset/__init__.py +0 -22
  96. guidellm/dataset/creator.py +0 -213
  97. guidellm/dataset/entrypoints.py +0 -42
  98. guidellm/dataset/file.py +0 -92
  99. guidellm/dataset/hf_datasets.py +0 -62
  100. guidellm/dataset/in_memory.py +0 -132
  101. guidellm/dataset/synthetic.py +0 -287
  102. guidellm/objects/__init__.py +0 -18
  103. guidellm/objects/pydantic.py +0 -89
  104. guidellm/request/__init__.py +0 -18
  105. guidellm/request/loader.py +0 -284
  106. guidellm/request/request.py +0 -79
  107. guidellm/request/types.py +0 -10
  108. guidellm/scheduler/queues.py +0 -25
  109. guidellm/scheduler/result.py +0 -155
  110. guidellm/scheduler/strategy.py +0 -495
  111. guidellm-0.4.0a21.dist-info/RECORD +0 -62
  112. {guidellm-0.4.0a21.dist-info → guidellm-0.4.0a169.dist-info}/WHEEL +0 -0
  113. {guidellm-0.4.0a21.dist-info → guidellm-0.4.0a169.dist-info}/entry_points.txt +0 -0
  114. {guidellm-0.4.0a21.dist-info → guidellm-0.4.0a169.dist-info}/licenses/LICENSE +0 -0
  115. {guidellm-0.4.0a21.dist-info → guidellm-0.4.0a169.dist-info}/top_level.txt +0 -0
@@ -1,472 +1,443 @@
1
- import asyncio
2
- import math
3
- import time
4
- from abc import ABC, abstractmethod
5
- from collections.abc import AsyncGenerator
6
- from dataclasses import dataclass
7
- from itertools import islice
8
- from threading import Event
9
- from typing import (
10
- Any,
11
- Generic,
12
- Literal,
13
- Optional,
14
- Union,
15
- )
1
+ """
2
+ Worker process implementation for distributed request execution and coordination.
16
3
 
17
- from loguru import logger
18
- from pydantic import Field
4
+ Manages individual worker processes within the scheduler system, handling request
5
+ lifecycle from queue consumption through backend processing and status publication.
6
+ Workers coordinate with other processes through barriers and events, apply timing
7
+ strategies for request scheduling, maintain concurrency limits, and publish real-time
8
+ status updates throughout request processing.
9
+ """
19
10
 
20
- from guidellm.backend import (
21
- Backend,
22
- BackendType,
23
- RequestArgs,
24
- ResponseSummary,
25
- StreamingTextResponse,
26
- )
27
- from guidellm.objects import StandardBaseModel
28
- from guidellm.request import GenerationRequest
29
- from guidellm.request.types import RequestT, ResponseT
30
- from guidellm.scheduler.queues import MPQueues, Queue, QueueEmpty
31
- from guidellm.scheduler.result import (
32
- SchedulerRequestInfo,
33
- WorkerProcessRequest,
34
- WorkerProcessResult,
35
- )
36
- from guidellm.scheduler.strategy import SchedulingStrategy
11
+ from __future__ import annotations
37
12
 
38
- __all__ = [
39
- "GenerativeRequestsWorker",
40
- "GenerativeRequestsWorkerDescription",
41
- "RequestsWorker",
42
- "ResolveStatus",
43
- "WorkerDescription",
44
- ]
13
+ import asyncio
14
+ import time
15
+ from multiprocessing.synchronize import Barrier as ProcessingBarrier
16
+ from multiprocessing.synchronize import Event as ProcessingEvent
17
+ from typing import Annotated, Generic, Literal
45
18
 
19
+ try:
20
+ import uvloop
46
21
 
47
- @dataclass
48
- class ResolveStatus:
49
- requested: bool
50
- completed: bool
51
- errored: bool
52
- canceled: bool
22
+ HAS_UVLOOP: Annotated[
23
+ bool, "Flag indicating uvloop availability for event loop optimization"
24
+ ] = True
25
+ except ImportError:
26
+ uvloop = None
53
27
 
54
- request_start: float
55
- request_end: float
28
+ HAS_UVLOOP: Annotated[
29
+ bool, "Flag indicating uvloop availability for event loop optimization"
30
+ ] = False
56
31
 
57
32
 
58
- class WorkerDescription(StandardBaseModel):
59
- type_: Literal["worker"] = "worker"
33
+ from guidellm.scheduler.schemas import (
34
+ BackendInterface,
35
+ MultiTurnRequestT,
36
+ RequestT,
37
+ ResponseT,
38
+ )
39
+ from guidellm.scheduler.strategies import SchedulingStrategy
40
+ from guidellm.schemas import RequestInfo
41
+ from guidellm.utils import (
42
+ InterProcessMessaging,
43
+ wait_for_sync_barrier,
44
+ wait_for_sync_event,
45
+ wait_for_sync_objects,
46
+ )
60
47
 
48
+ __all__ = ["WorkerProcess"]
61
49
 
62
- class RequestsWorker(ABC, Generic[RequestT, ResponseT]):
50
+
51
+ class WorkerProcess(Generic[RequestT, ResponseT]):
63
52
  """
64
- An abstract base class for a worker that processes requests.
65
- This class defines the interface for a worker that can resolve requests
66
- asynchronously or synchronously within the Scheduler class.
67
- Subclasses must implement the `resolve` method,
68
- which takes a request directly given from the load generator,
69
- along with the desired start_time for the request and a timeout_time.
70
- The `resolve` method should return the response from the backend.
53
+ Worker process for distributed request execution in the scheduler system.
54
+
55
+ Manages complete request lifecycle including queue consumption, backend processing,
56
+ timing strategy application, and status publication. Coordinates with other workers
57
+ through synchronization primitives while maintaining concurrency limits and handling
58
+ graceful shutdown scenarios including errors and cancellations.
59
+
60
+ Example:
61
+ ::
62
+ worker = WorkerProcess(
63
+ worker_index=0,
64
+ messaging=messaging_interface,
65
+ backend=backend_instance,
66
+ strategy=timing_strategy,
67
+ async_limit=10,
68
+ fut_scheduling_time_limit=5.0,
69
+ startup_barrier=barrier,
70
+ requests_generated_event=generated_event,
71
+ constraint_reached_event=constraint_event,
72
+ shutdown_event=shutdown,
73
+ error_event=error,
74
+ )
75
+ worker.run()
71
76
  """
72
77
 
73
- @property
74
- @abstractmethod
75
- def description(self) -> WorkerDescription:
78
+ def __init__(
79
+ self,
80
+ worker_index: int,
81
+ messaging: InterProcessMessaging[
82
+ tuple[
83
+ ResponseT | None,
84
+ RequestT | MultiTurnRequestT[RequestT],
85
+ RequestInfo,
86
+ ],
87
+ ],
88
+ backend: BackendInterface[RequestT, ResponseT],
89
+ strategy: SchedulingStrategy,
90
+ async_limit: int,
91
+ fut_scheduling_time_limit: float,
92
+ startup_barrier: ProcessingBarrier,
93
+ requests_generated_event: ProcessingEvent,
94
+ constraint_reached_event: ProcessingEvent,
95
+ shutdown_event: ProcessingEvent,
96
+ error_event: ProcessingEvent,
97
+ ):
76
98
  """
77
- An abstract property that must be implemented by subclasses.
78
- This property should return a Serializable class representing the information
79
- about the worker instance.
99
+ Initialize worker process instance.
100
+
101
+ :param worker_index: Unique identifier for this worker within the process group
102
+ :param messaging: Inter-process messaging interface for request coordination
103
+ :param backend: Backend interface for processing requests
104
+ :param strategy: Scheduling strategy for determining request timing
105
+ :param async_limit: Maximum concurrent requests this worker can process
106
+ :param fut_scheduling_time_limit: Maximum time in seconds to schedule requests
107
+ into the future
108
+ :param startup_barrier: Synchronization barrier for coordinated startup
109
+ :param requests_generated_event: Event signaling request generation completion
110
+ :param constraint_reached_event: Event signaling processing constraint reached
111
+ :param shutdown_event: Event signaling graceful shutdown request
112
+ :param error_event: Event signaling error conditions across processes
113
+ """
114
+ self.worker_index = worker_index
115
+ self.messaging = messaging
116
+ self.backend = backend
117
+ self.strategy = strategy
118
+ self.async_limit = async_limit
119
+ self.fut_scheduling_time_limit = fut_scheduling_time_limit
120
+ self.startup_barrier = startup_barrier
121
+ self.requests_generated_event = requests_generated_event
122
+ self.constraint_reached_event = constraint_reached_event
123
+ self.shutdown_event = shutdown_event
124
+ self.error_event = error_event
125
+
126
+ # Internal states
127
+ self.startup_completed = False
128
+ self.backend_started = False
129
+ self.messaging_started = False
130
+
131
+ def run(self):
80
132
  """
81
- ...
133
+ Main entry point for worker process execution.
82
134
 
83
- @abstractmethod
84
- async def prepare_multiprocessing(self):
135
+ Initializes asyncio event loop with optional uvloop optimization and executes
136
+ worker async operations. Handles event loop cleanup and error propagation.
137
+
138
+ :raises RuntimeError: If worker encounters unrecoverable error during execution
85
139
  """
86
- An abstract method that must be implemented by subclasses.
87
- This is useful for workers that have instance state that can not
88
- be shared across processes and should be cleared out and re-initialized
89
- for each new process.
140
+ try:
141
+ if HAS_UVLOOP:
142
+ asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
143
+ asyncio.run(self.run_async())
144
+ except Exception as err:
145
+ self.error_event.set()
146
+ raise RuntimeError(
147
+ f"Worker process {self.messaging.worker_index} encountered an "
148
+ f"error: {err}"
149
+ ) from err
150
+
151
+ async def run_async(self):
90
152
  """
91
- ...
153
+ Execute main asynchronous worker process logic.
92
154
 
93
- @abstractmethod
94
- async def resolve(
95
- self,
96
- request: RequestT,
97
- timeout_time: float,
98
- ) -> tuple[ResolveStatus, ResponseT]:
99
- """
100
- An abstract method that must be implemented by subclasses.
101
- This method should handle the resolution of a request through asyncio,
102
- including any necessary backend processing and response handling.
103
-
104
- :param request: The request to be resolved generated by the load generator.
105
- :param timeout_time: The timeout time for the request, if there is no timeout
106
- given, then this will be math.inf.
107
- :return: The response from the worker.
155
+ Orchestrates concurrent execution of request processing and shutdown monitoring.
156
+ Handles task cleanup, error propagation, and cancellation coordination when any
157
+ task completes or encounters an error.
158
+
159
+ :raises RuntimeError: If worker tasks encounter unrecoverable errors
160
+ :raises asyncio.CancelledError: If worker process was cancelled
108
161
  """
109
- ...
162
+ stop_task = asyncio.create_task(self._stop_monitor())
163
+ request_proc_task = asyncio.create_task(self._process_requests())
164
+ caller_cancelled = False
110
165
 
111
- async def send_result(
112
- self,
113
- results_queue: Queue[WorkerProcessResult[RequestT, ResponseT]],
114
- result: WorkerProcessResult[RequestT, ResponseT],
115
- ):
116
- await asyncio.to_thread(results_queue.put, result) # type: ignore[attr-defined]
166
+ try:
167
+ await asyncio.wait(
168
+ [stop_task, request_proc_task],
169
+ return_when=asyncio.FIRST_COMPLETED,
170
+ )
171
+ except asyncio.CancelledError:
172
+ caller_cancelled = True
117
173
 
118
- async def resolve_scheduler_request(
119
- self,
120
- process_request: WorkerProcessRequest[RequestT, ResponseT],
121
- dequeued_time: float,
122
- start_time: float,
123
- results_queue: Queue[WorkerProcessResult[RequestT, ResponseT]],
124
- process_id: int,
125
- ):
126
- request = process_request.request
127
- timeout_time = process_request.timeout_time
128
- queued_time = process_request.queued_time
129
-
130
- info = SchedulerRequestInfo(
131
- targeted_start_time=start_time,
132
- queued_time=queued_time,
133
- dequeued_time=dequeued_time,
134
- scheduled_time=time.time(),
135
- process_id=process_id,
136
- )
137
- result: WorkerProcessResult[RequestT, ResponseT] = WorkerProcessResult(
138
- type_="request_scheduled",
139
- request=request,
140
- response=None,
141
- info=info,
142
- )
143
- asyncio.create_task(self.send_result(results_queue, result))
174
+ stop_task.cancel()
175
+ request_proc_task.cancel()
176
+
177
+ try:
178
+ # Ensure all child tasks cancel correctly
179
+ await asyncio.wait(
180
+ [stop_task, request_proc_task], return_when=asyncio.ALL_COMPLETED
181
+ )
182
+ except asyncio.CancelledError:
183
+ caller_cancelled = True
184
+
185
+ if (
186
+ task_err := (
187
+ request_proc_task.exception()
188
+ if not request_proc_task.cancelled()
189
+ else stop_task.exception()
190
+ if not stop_task.cancelled()
191
+ else None
192
+ )
193
+ ) is not None:
194
+ raise RuntimeError(
195
+ f"Worker process {self.messaging.worker_index} encountered an "
196
+ f"error: {task_err}"
197
+ ) from task_err
144
198
 
145
- if (wait_time := start_time - time.time()) > 0:
146
- await asyncio.sleep(wait_time)
199
+ if caller_cancelled:
200
+ raise asyncio.CancelledError("Worker process was cancelled")
147
201
 
148
- info.worker_start = time.time()
149
- result = WorkerProcessResult(
150
- type_="request_start",
151
- request=request,
152
- response=None,
153
- info=info,
154
- )
155
- asyncio.create_task(self.send_result(results_queue, result))
156
-
157
- status, response = await self.resolve(request, timeout_time)
158
- info.worker_end = time.time()
159
- info.requested = status.requested
160
- info.completed = status.completed
161
- info.errored = status.errored
162
- info.canceled = status.canceled
163
- info.request_start = status.request_start
164
- info.request_end = status.request_end
165
- result = WorkerProcessResult(
166
- type_="request_complete",
167
- request=request,
168
- response=response,
169
- info=info,
202
+ async def _stop_monitor(
203
+ self,
204
+ ) -> Literal["error_event", "shutdown_event"]:
205
+ """Monitor shutdown and error events for worker termination."""
206
+ exit_key = await wait_for_sync_objects(
207
+ {
208
+ "error_event": self.error_event,
209
+ "shutdown_event": self.shutdown_event,
210
+ },
211
+ poll_interval=self.messaging.poll_interval,
170
212
  )
171
- asyncio.create_task(self.send_result(results_queue, result))
172
213
 
173
- def process_loop_asynchronous(
174
- self,
175
- queues: MPQueues[RequestT, ResponseT],
176
- strategy: SchedulingStrategy,
177
- stop_event: Event,
178
- max_concurrency: int,
179
- process_id: int,
180
- num_processes: int,
181
- ):
182
- async def _process_runner():
183
- lock = asyncio.Semaphore(max_concurrency)
184
- times_iter = islice(
185
- strategy.request_times(),
186
- process_id,
187
- None,
188
- num_processes,
214
+ if exit_key == "error_event":
215
+ raise RuntimeError(
216
+ f"Worker process {self.messaging.worker_index} received error signal."
189
217
  )
190
218
 
191
- start_time = None
192
- while not stop_event.is_set():
193
- if start_time is None:
194
- start_time = next(times_iter)
195
-
196
- # Yield control to the event loop. Sleep if we are way ahead
197
- await asyncio.sleep(start_time - time.time() - 1)
198
- await lock.acquire()
199
-
200
- try:
201
- process_request = queues.requests.get_nowait()
202
- dequeued_time = time.time()
203
- except QueueEmpty:
204
- lock.release()
205
- continue
206
-
207
- def _request_callback(
208
- _: asyncio.Future[WorkerProcessRequest[RequestT, ResponseT]],
209
- ):
210
- nonlocal lock
211
- lock.release()
212
-
213
- task = asyncio.create_task(
214
- self.resolve_scheduler_request(
215
- process_request=process_request,
216
- dequeued_time=dequeued_time,
217
- start_time=start_time,
218
- results_queue=queues.responses,
219
- process_id=process_id,
220
- )
221
- )
222
- task.add_done_callback(_request_callback)
223
- start_time = None
219
+ async def _process_requests(self):
220
+ """
221
+ Manage request processing lifecycle from startup to shutdown.
224
222
 
223
+ Coordinates startup synchronization, processes requests until constraints are
224
+ reached, then cancels pending requests until shutdown or error occurs.
225
+ """
225
226
  try:
226
- asyncio.run(_process_runner())
227
- except Exception as exc: # noqa: BLE001
228
- logger.error(
229
- f"Error in worker process {process_id}: {exc}",
230
- exc_info=True,
231
- stack_info=True,
227
+ # 1. Start up synchronization (backend, messaging, and other processes)
228
+ # 2. Messaging startup, receive requests until requests_generated event
229
+ await self._processing_startup()
230
+
231
+ # 3. Run process requests loop until constraint_reached event
232
+ processing_task = asyncio.create_task(self._process_requests_loop())
233
+ await wait_for_sync_event(
234
+ self.constraint_reached_event,
235
+ poll_interval=self.messaging.poll_interval,
232
236
  )
237
+ processing_task.cancel()
238
+
239
+ # 4. Cancel pending requests until proc canceled (manual, shutdown, error)
240
+ await self._cancel_requests_loop()
241
+ finally:
242
+ # 5. On cancel, shut down event, error event, or internal error:
243
+ # attempt to shut down this worker cleanly (stop backend and messaging)
244
+ await self._processing_shutdown()
245
+
246
+ async def _processing_startup(self):
247
+ """Initialize backend, messaging, and synchronize with other workers."""
248
+ # Get backend ready
249
+ await self.backend.process_startup()
250
+ self.backend_started = True
251
+ await self.backend.validate()
252
+
253
+ # Get messaging system ready
254
+ await self.messaging.start(
255
+ receive_stop_criteria=[self.requests_generated_event]
256
+ )
257
+ self.messaging_started = True
233
258
 
259
+ # Wait for all processes to be ready
260
+ await wait_for_sync_barrier(
261
+ self.startup_barrier,
262
+ poll_interval=self.messaging.poll_interval,
263
+ )
234
264
 
235
- class GenerativeRequestsWorkerDescription(WorkerDescription):
236
- type_: Literal["generative_requests_worker"] = "generative_requests_worker" # type: ignore[assignment]
237
- backend_type: BackendType
238
- backend_target: str
239
- backend_model: str
240
- backend_info: dict[str, Any] = Field(
241
- default_factory=dict,
242
- )
265
+ self.startup_completed = True
243
266
 
267
+ async def _processing_shutdown(self):
268
+ if self.backend_started:
269
+ await self.backend.process_shutdown()
270
+ self.backend_started = False
244
271
 
245
- class GenerativeRequestsWorker(RequestsWorker[GenerationRequest, ResponseSummary]):
246
- """
247
- A class that handles the execution of requests using a backend.
248
- This class is responsible for sending requests to the backend,
249
- handling responses, and managing errors.
272
+ if self.messaging_started:
273
+ await self.messaging.stop()
274
+ self.messaging_started = False
250
275
 
251
- :param backend: The backend to use for handling requests.
252
- This should be an instance of Backend such as an OpenAIHTTPBackend.
253
- """
276
+ self.startup_completed = False
254
277
 
255
- def __init__(self, backend: Backend):
256
- self.backend = backend
257
-
258
- @property
259
- def description(self) -> GenerativeRequestsWorkerDescription:
260
- """
261
- Get the description of the worker.
262
- :return: The description of the worker.
278
+ async def _process_requests_loop(self):
263
279
  """
264
- return GenerativeRequestsWorkerDescription(
265
- backend_type=self.backend.type_,
266
- backend_target=self.backend.target,
267
- backend_model=self.backend.model or "None",
268
- backend_info=self.backend.info,
269
- )
280
+ Process requests continuously until cancelled with concurrency limits.
270
281
 
271
- async def prepare_multiprocessing(self):
282
+ Schedules and processes requests according to the timing strategy while
283
+ maintaining the configured concurrency limit through semaphore coordination.
272
284
  """
273
- Prepare the worker for multiprocessing.
274
- This is useful for workers that have instance state that can not
275
- be shared across processes and should be cleared out and re-initialized
276
- for each new process.
277
- """
278
- await self.backend.prepare_multiprocessing()
285
+ try:
286
+ # Run request processing
287
+ async_semaphore = asyncio.Semaphore(self.async_limit)
288
+ pending_tasks: set[asyncio.Task] = set()
289
+
290
+ def _task_done(task):
291
+ pending_tasks.discard(task)
292
+ async_semaphore.release()
293
+
294
+ if not task.cancelled() and (exception := task.exception()):
295
+ raise exception
296
+
297
+ # Main loop; loop until canceled
298
+ while True:
299
+ await async_semaphore.acquire()
300
+ request_time = await self.strategy.next_request_time(
301
+ offset=self.worker_index
302
+ )
279
303
 
280
- def process_loop_asynchronous(
281
- self,
282
- queues: MPQueues[GenerationRequest, ResponseSummary],
283
- strategy: SchedulingStrategy,
284
- stop_event: Event,
285
- max_concurrency: int,
286
- process_id: int,
287
- num_processes: int,
288
- ):
289
- asyncio.run(self.backend.validate())
290
- super().process_loop_asynchronous(
291
- queues=queues,
292
- strategy=strategy,
293
- stop_event=stop_event,
294
- max_concurrency=max_concurrency,
295
- process_id=process_id,
296
- num_processes=num_processes,
297
- )
304
+ if (
305
+ time_until := request_time - time.time()
306
+ ) >= self.fut_scheduling_time_limit:
307
+ await asyncio.sleep(time_until - self.fut_scheduling_time_limit)
298
308
 
299
- async def resolve(
300
- self,
301
- request: GenerationRequest,
302
- timeout_time: float,
303
- ) -> tuple[ResolveStatus, ResponseSummary]:
304
- """
305
- Resolve a request by sending it to the backend and handling the response.
306
- This method sends the request to the backend, waits for a response,
307
- and handles any errors that may occur during the process.
308
-
309
- :param request: The request to resolve.
310
- :param timeout_time: The time to wait for a response before timing out.
311
- If timeout_time is math.inf, the request will not timeout.
312
- :return: A ResponseSummary object containing the response from the backend.
313
- If an error occurs, the ResponseSummary will contain the error message.
309
+ request_task = asyncio.create_task(
310
+ self._process_next_request(target_start=request_time)
311
+ )
312
+ pending_tasks.add(request_task)
313
+ request_task.add_done_callback(_task_done)
314
+ except asyncio.CancelledError as err:
315
+ for task in pending_tasks:
316
+ task.cancel()
317
+ await asyncio.gather(*pending_tasks, return_exceptions=True)
318
+
319
+ raise err
320
+
321
+ async def _cancel_requests_loop(self):
322
+ """Cancel all remaining queued requests until worker process terminates."""
323
+ while True:
324
+ try:
325
+ request: RequestT
326
+ request_info: RequestInfo
327
+ request, request_info = await self.messaging.get(
328
+ timeout=self.messaging.poll_interval
329
+ )
330
+ except asyncio.TimeoutError:
331
+ continue
332
+
333
+ request_info.scheduler_node_id = self.messaging.worker_index or -1
334
+ request_info.error = "Request was cancelled"
335
+ request_info.timings.resolve_end = time.time()
336
+ self._send_update("cancelled", None, request, request_info)
337
+
338
+ async def _process_next_request(self, target_start: float):
314
339
  """
315
- resolve_start_time = time.time()
316
- response = None
317
- error: Optional[str] = None
318
- status = ResolveStatus(
319
- requested=False,
320
- completed=False,
321
- errored=False,
322
- canceled=False,
323
- request_start=-1,
324
- request_end=-1,
325
- )
340
+ Process a single request from queue to completion.
326
341
 
327
- try:
328
- if timeout_time < time.time():
329
- raise asyncio.TimeoutError(
330
- "The timeout time has already passed."
331
- ) # exit early
332
-
333
- status.requested = True
334
- request_func, request_kwargs = self._create_request_func_kwargs(request)
335
-
336
- async def _runner():
337
- # wrap function so we can enforce timeout and
338
- # still return the latest state from the backend
339
- async for resp in request_func(**request_kwargs): # type: ignore[operator]
340
- nonlocal response
341
- response = resp
342
-
343
- await asyncio.wait_for(
344
- _runner(),
345
- timeout=timeout_time - time.time() if timeout_time < math.inf else None,
346
- )
342
+ Retrieves request from messaging queue, applies timing strategy, processes
343
+ through backend, and publishes status updates throughout the lifecycle.
347
344
 
348
- if not response:
349
- raise ValueError(
350
- f"No response received for request: {request} "
351
- f"and backend: {self.backend}"
352
- )
353
- if not isinstance(response, ResponseSummary):
354
- raise ValueError(
355
- f"Received no ResponseSummary for request: {request} "
356
- f"and backend: {self.backend}, received: {response}"
357
- )
345
+ :param target_start: Unix timestamp when request should begin processing
346
+ """
347
+ request: RequestT | MultiTurnRequestT[RequestT] | None = None
348
+ request_info: RequestInfo | None = None
349
+ response: ResponseT | None = None
358
350
 
359
- status.completed = True
360
- except asyncio.TimeoutError:
361
- error = "TimeoutError: The request timed out before completing."
362
- status.errored = True
363
- status.canceled = True
351
+ try:
352
+ # Pull request from the queue, update state, and send "pending" update
353
+ request, request_info = await self.messaging.get()
354
+ request_info.timings.dequeued = time.time()
355
+ request_info.scheduler_node_id = self.messaging.worker_index or -1
356
+ request_info.timings.targeted_start = target_start
357
+ self._send_update("pending", response, request, request_info)
358
+
359
+ if request is None or request_info is None:
360
+ raise RuntimeError("Received invalid request or request info")
361
+ if isinstance(request, list | tuple):
362
+ raise NotImplementedError("Multi-turn requests are not yet supported")
363
+
364
+ # Schedule the request
365
+ current_time = time.time()
366
+ request_info.timings.scheduled_at = current_time
367
+ if target_start > current_time:
368
+ await asyncio.sleep(target_start - current_time)
369
+ # Adapt delay so that scheduled at reflects the sleep time
370
+ request_info.timings.scheduled_at = target_start
371
+
372
+ # Process the request with the backend
373
+ request_info.timings.resolve_start = time.time()
374
+ self._send_update("in_progress", response, request, request_info)
375
+ async for resp, info in self.backend.resolve(request, request_info, None):
376
+ response = resp
377
+ request_info = info
378
+
379
+ # Complete the request
380
+ request_info.timings.resolve_end = time.time()
381
+ self._send_update("completed", response, request, request_info)
382
+
383
+ response = request = request_info = None
384
+ except asyncio.CancelledError:
385
+ # Handle cancellation
386
+ if request is not None and request_info is not None:
387
+ request_info.error = "Request was cancelled"
388
+ request_info.timings.resolve_end = time.time()
389
+ self._send_update("cancelled", response, request, request_info)
390
+ raise
364
391
  except Exception as exc: # noqa: BLE001
365
- error = str(exc)
366
- status.errored = True
367
-
368
- return self._handle_response(
369
- status=status,
370
- request=request,
371
- response=response,
372
- error=error,
373
- resolve_start_time=resolve_start_time,
374
- )
375
-
376
- def _create_request_func_kwargs(
392
+ if request is not None and request_info is not None:
393
+ request_info.error = str(exc)
394
+ request_info.timings.resolve_end = time.time()
395
+ self._send_update("errored", response, request, request_info)
396
+ finally:
397
+ if request_info is not None:
398
+ self.strategy.request_completed(request_info)
399
+
400
+ def _send_update(
377
401
  self,
378
- request: GenerationRequest,
379
- ) -> tuple[
380
- AsyncGenerator[Union[StreamingTextResponse, ResponseSummary], None],
381
- dict[str, Any],
382
- ]:
383
- request_func: AsyncGenerator[
384
- Union[StreamingTextResponse, ResponseSummary], None
385
- ]
386
- request_kwargs: dict[str, Any]
387
-
388
- if request.request_type == "text_completions":
389
- request_func = self.backend.text_completions # type: ignore[assignment]
390
- request_kwargs = {
391
- "prompt": request.content,
392
- "request_id": request.request_id,
393
- "prompt_token_count": request.stats.get("prompt_tokens", None),
394
- "output_token_count": request.constraints.get("output_tokens", None),
395
- **request.params,
396
- }
397
- elif request.request_type == "chat_completions":
398
- request_func = self.backend.chat_completions # type: ignore[assignment]
399
- request_kwargs = {
400
- "content": request.content,
401
- "request_id": request.request_id,
402
- "prompt_token_count": request.stats.get("prompt_tokens", None),
403
- "output_token_count": request.constraints.get("output_tokens", None),
404
- **request.params,
405
- }
406
- else:
407
- raise ValueError(
408
- f"Invalid request type: {request.request_type} for {request}"
409
- )
402
+ new_status: Literal[
403
+ "pending", "in_progress", "completed", "errored", "cancelled"
404
+ ],
405
+ response: ResponseT | None,
406
+ request: RequestT | MultiTurnRequestT[RequestT],
407
+ request_info: RequestInfo,
408
+ ):
409
+ """
410
+ Publish request status update through messaging system.
410
411
 
411
- return request_func, request_kwargs
412
+ Updates request status and publishes to messaging queue for coordinator
413
+ consumption. Prevents duplicate status updates for the same state.
412
414
 
413
- def _handle_response(
414
- self,
415
- status: ResolveStatus,
416
- request: GenerationRequest,
417
- response: Any,
418
- error: Optional[str],
419
- resolve_start_time: float,
420
- ) -> tuple[ResolveStatus, ResponseSummary]:
421
- if response is None or not isinstance(
422
- response, (ResponseSummary, StreamingTextResponse)
423
- ):
424
- # nothing received or invalid response, fill in defaults for error
425
- if response:
426
- error = str(
427
- ValueError(
428
- f"Invalid response: {type(response)} for request: {request}; "
429
- )
430
- ) + (error or "")
431
-
432
- response = ResponseSummary(
433
- value="",
434
- request_args=RequestArgs(
435
- target=self.backend.target,
436
- headers={},
437
- params={},
438
- payload={},
439
- ),
440
- start_time=resolve_start_time,
441
- end_time=status.request_end,
442
- first_iter_time=None,
443
- last_iter_time=None,
444
- request_id=request.request_id,
445
- error=error or "Unknown error",
446
- )
447
- elif isinstance(response, StreamingTextResponse):
448
- response = ResponseSummary(
449
- value=response.value,
450
- request_args=RequestArgs(
451
- target=self.backend.target,
452
- headers={},
453
- params={},
454
- payload={},
455
- ),
456
- start_time=response.start_time,
457
- end_time=time.time(),
458
- first_iter_time=response.first_iter_time,
459
- last_iter_time=response.time if response.iter_count > 0 else None,
460
- request_prompt_tokens=request.stats.get("prompt_tokens", None),
461
- request_output_tokens=request.constraints.get("output_tokens", None),
462
- response_prompt_tokens=None,
463
- response_output_tokens=response.iter_count,
464
- request_id=request.request_id,
465
- error=error or "Unknown error",
466
- )
415
+ :param new_status: New status for the request
416
+ :param response: Response object if available, None otherwise
417
+ :param request: Request object being processed
418
+ :param request_info: Request metadata and timing information
419
+ :raises Exception: If messaging system fails to publish the update
420
+ """
421
+ prev_status = request_info.status
467
422
 
468
- response.error = error
469
- status.request_start = response.start_time
470
- status.request_end = response.end_time
423
+ if new_status == prev_status:
424
+ # already sent this update, don't send again
425
+ return
471
426
 
472
- return status, response
427
+ try:
428
+ request_info.status = new_status
429
+ request_info = (
430
+ request_info.model_copy()
431
+ if new_status not in {"completed", "errored", "cancelled"}
432
+ else request_info # last update, don't need to copy
433
+ )
434
+ self.messaging.put_sync(
435
+ (response, request, request_info),
436
+ timeout=-1,
437
+ )
438
+ prev_status = new_status
439
+ except Exception as exc:
440
+ # Reset status to last one that succeeded or started function with
441
+ # Calling logic can retry after handling error, if possible
442
+ request_info.status = prev_status
443
+ raise exc