guidellm 0.4.0a18__py3-none-any.whl → 0.4.0a155__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of guidellm might be problematic. Click here for more details.
- guidellm/__init__.py +5 -2
- guidellm/__main__.py +451 -252
- guidellm/backends/__init__.py +33 -0
- guidellm/backends/backend.py +110 -0
- guidellm/backends/openai.py +355 -0
- guidellm/backends/response_handlers.py +455 -0
- guidellm/benchmark/__init__.py +53 -39
- guidellm/benchmark/benchmarker.py +148 -317
- guidellm/benchmark/entrypoints.py +466 -128
- guidellm/benchmark/output.py +517 -771
- guidellm/benchmark/profile.py +580 -280
- guidellm/benchmark/progress.py +568 -549
- guidellm/benchmark/scenarios/__init__.py +40 -0
- guidellm/benchmark/scenarios/chat.json +6 -0
- guidellm/benchmark/scenarios/rag.json +6 -0
- guidellm/benchmark/schemas.py +2085 -0
- guidellm/data/__init__.py +28 -4
- guidellm/data/collators.py +16 -0
- guidellm/data/deserializers/__init__.py +53 -0
- guidellm/data/deserializers/deserializer.py +109 -0
- guidellm/data/deserializers/file.py +222 -0
- guidellm/data/deserializers/huggingface.py +94 -0
- guidellm/data/deserializers/memory.py +192 -0
- guidellm/data/deserializers/synthetic.py +346 -0
- guidellm/data/loaders.py +145 -0
- guidellm/data/preprocessors/__init__.py +25 -0
- guidellm/data/preprocessors/formatters.py +412 -0
- guidellm/data/preprocessors/mappers.py +198 -0
- guidellm/data/preprocessors/preprocessor.py +29 -0
- guidellm/data/processor.py +30 -0
- guidellm/data/schemas.py +13 -0
- guidellm/data/utils/__init__.py +10 -0
- guidellm/data/utils/dataset.py +94 -0
- guidellm/data/utils/functions.py +18 -0
- guidellm/extras/__init__.py +4 -0
- guidellm/extras/audio.py +215 -0
- guidellm/extras/vision.py +242 -0
- guidellm/logger.py +2 -2
- guidellm/mock_server/__init__.py +8 -0
- guidellm/mock_server/config.py +84 -0
- guidellm/mock_server/handlers/__init__.py +17 -0
- guidellm/mock_server/handlers/chat_completions.py +280 -0
- guidellm/mock_server/handlers/completions.py +280 -0
- guidellm/mock_server/handlers/tokenizer.py +142 -0
- guidellm/mock_server/models.py +510 -0
- guidellm/mock_server/server.py +168 -0
- guidellm/mock_server/utils.py +302 -0
- guidellm/preprocess/dataset.py +23 -26
- guidellm/presentation/builder.py +2 -2
- guidellm/presentation/data_models.py +25 -21
- guidellm/presentation/injector.py +2 -3
- guidellm/scheduler/__init__.py +65 -26
- guidellm/scheduler/constraints.py +1035 -0
- guidellm/scheduler/environments.py +252 -0
- guidellm/scheduler/scheduler.py +140 -368
- guidellm/scheduler/schemas.py +272 -0
- guidellm/scheduler/strategies.py +519 -0
- guidellm/scheduler/worker.py +391 -420
- guidellm/scheduler/worker_group.py +707 -0
- guidellm/schemas/__init__.py +31 -0
- guidellm/schemas/info.py +159 -0
- guidellm/schemas/request.py +216 -0
- guidellm/schemas/response.py +119 -0
- guidellm/schemas/stats.py +228 -0
- guidellm/{config.py → settings.py} +32 -21
- guidellm/utils/__init__.py +95 -8
- guidellm/utils/auto_importer.py +98 -0
- guidellm/utils/cli.py +46 -2
- guidellm/utils/console.py +183 -0
- guidellm/utils/encoding.py +778 -0
- guidellm/utils/functions.py +134 -0
- guidellm/utils/hf_datasets.py +1 -2
- guidellm/utils/hf_transformers.py +4 -4
- guidellm/utils/imports.py +9 -0
- guidellm/utils/messaging.py +1118 -0
- guidellm/utils/mixins.py +115 -0
- guidellm/utils/pydantic_utils.py +411 -0
- guidellm/utils/random.py +3 -4
- guidellm/utils/registry.py +220 -0
- guidellm/utils/singleton.py +133 -0
- guidellm/{objects → utils}/statistics.py +341 -247
- guidellm/utils/synchronous.py +159 -0
- guidellm/utils/text.py +163 -50
- guidellm/utils/typing.py +41 -0
- guidellm/version.py +1 -1
- {guidellm-0.4.0a18.dist-info → guidellm-0.4.0a155.dist-info}/METADATA +33 -10
- guidellm-0.4.0a155.dist-info/RECORD +96 -0
- guidellm/backend/__init__.py +0 -23
- guidellm/backend/backend.py +0 -259
- guidellm/backend/openai.py +0 -705
- guidellm/backend/response.py +0 -136
- guidellm/benchmark/aggregator.py +0 -760
- guidellm/benchmark/benchmark.py +0 -837
- guidellm/benchmark/scenario.py +0 -104
- guidellm/data/prideandprejudice.txt.gz +0 -0
- guidellm/dataset/__init__.py +0 -22
- guidellm/dataset/creator.py +0 -213
- guidellm/dataset/entrypoints.py +0 -42
- guidellm/dataset/file.py +0 -92
- guidellm/dataset/hf_datasets.py +0 -62
- guidellm/dataset/in_memory.py +0 -132
- guidellm/dataset/synthetic.py +0 -287
- guidellm/objects/__init__.py +0 -18
- guidellm/objects/pydantic.py +0 -89
- guidellm/request/__init__.py +0 -18
- guidellm/request/loader.py +0 -284
- guidellm/request/request.py +0 -79
- guidellm/request/types.py +0 -10
- guidellm/scheduler/queues.py +0 -25
- guidellm/scheduler/result.py +0 -155
- guidellm/scheduler/strategy.py +0 -495
- guidellm-0.4.0a18.dist-info/RECORD +0 -62
- {guidellm-0.4.0a18.dist-info → guidellm-0.4.0a155.dist-info}/WHEEL +0 -0
- {guidellm-0.4.0a18.dist-info → guidellm-0.4.0a155.dist-info}/entry_points.txt +0 -0
- {guidellm-0.4.0a18.dist-info → guidellm-0.4.0a155.dist-info}/licenses/LICENSE +0 -0
- {guidellm-0.4.0a18.dist-info → guidellm-0.4.0a155.dist-info}/top_level.txt +0 -0
guidellm/scheduler/worker.py
CHANGED
|
@@ -1,472 +1,443 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
import time
|
|
4
|
-
from abc import ABC, abstractmethod
|
|
5
|
-
from collections.abc import AsyncGenerator
|
|
6
|
-
from dataclasses import dataclass
|
|
7
|
-
from itertools import islice
|
|
8
|
-
from threading import Event
|
|
9
|
-
from typing import (
|
|
10
|
-
Any,
|
|
11
|
-
Generic,
|
|
12
|
-
Literal,
|
|
13
|
-
Optional,
|
|
14
|
-
Union,
|
|
15
|
-
)
|
|
1
|
+
"""
|
|
2
|
+
Worker process implementation for distributed request execution and coordination.
|
|
16
3
|
|
|
17
|
-
|
|
18
|
-
from
|
|
4
|
+
Manages individual worker processes within the scheduler system, handling request
|
|
5
|
+
lifecycle from queue consumption through backend processing and status publication.
|
|
6
|
+
Workers coordinate with other processes through barriers and events, apply timing
|
|
7
|
+
strategies for request scheduling, maintain concurrency limits, and publish real-time
|
|
8
|
+
status updates throughout request processing.
|
|
9
|
+
"""
|
|
19
10
|
|
|
20
|
-
from
|
|
21
|
-
Backend,
|
|
22
|
-
BackendType,
|
|
23
|
-
RequestArgs,
|
|
24
|
-
ResponseSummary,
|
|
25
|
-
StreamingTextResponse,
|
|
26
|
-
)
|
|
27
|
-
from guidellm.objects import StandardBaseModel
|
|
28
|
-
from guidellm.request import GenerationRequest
|
|
29
|
-
from guidellm.request.types import RequestT, ResponseT
|
|
30
|
-
from guidellm.scheduler.queues import MPQueues, Queue, QueueEmpty
|
|
31
|
-
from guidellm.scheduler.result import (
|
|
32
|
-
SchedulerRequestInfo,
|
|
33
|
-
WorkerProcessRequest,
|
|
34
|
-
WorkerProcessResult,
|
|
35
|
-
)
|
|
36
|
-
from guidellm.scheduler.strategy import SchedulingStrategy
|
|
11
|
+
from __future__ import annotations
|
|
37
12
|
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
"WorkerDescription",
|
|
44
|
-
]
|
|
13
|
+
import asyncio
|
|
14
|
+
import time
|
|
15
|
+
from multiprocessing.synchronize import Barrier as ProcessingBarrier
|
|
16
|
+
from multiprocessing.synchronize import Event as ProcessingEvent
|
|
17
|
+
from typing import Annotated, Generic, Literal
|
|
45
18
|
|
|
19
|
+
try:
|
|
20
|
+
import uvloop
|
|
46
21
|
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
canceled: bool
|
|
22
|
+
HAS_UVLOOP: Annotated[
|
|
23
|
+
bool, "Flag indicating uvloop availability for event loop optimization"
|
|
24
|
+
] = True
|
|
25
|
+
except ImportError:
|
|
26
|
+
uvloop = None
|
|
53
27
|
|
|
54
|
-
|
|
55
|
-
|
|
28
|
+
HAS_UVLOOP: Annotated[
|
|
29
|
+
bool, "Flag indicating uvloop availability for event loop optimization"
|
|
30
|
+
] = False
|
|
56
31
|
|
|
57
32
|
|
|
58
|
-
|
|
59
|
-
|
|
33
|
+
from guidellm.scheduler.schemas import (
|
|
34
|
+
BackendInterface,
|
|
35
|
+
MultiTurnRequestT,
|
|
36
|
+
RequestT,
|
|
37
|
+
ResponseT,
|
|
38
|
+
)
|
|
39
|
+
from guidellm.scheduler.strategies import SchedulingStrategy
|
|
40
|
+
from guidellm.schemas import RequestInfo
|
|
41
|
+
from guidellm.utils import (
|
|
42
|
+
InterProcessMessaging,
|
|
43
|
+
wait_for_sync_barrier,
|
|
44
|
+
wait_for_sync_event,
|
|
45
|
+
wait_for_sync_objects,
|
|
46
|
+
)
|
|
60
47
|
|
|
48
|
+
__all__ = ["WorkerProcess"]
|
|
61
49
|
|
|
62
|
-
|
|
50
|
+
|
|
51
|
+
class WorkerProcess(Generic[RequestT, ResponseT]):
|
|
63
52
|
"""
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
53
|
+
Worker process for distributed request execution in the scheduler system.
|
|
54
|
+
|
|
55
|
+
Manages complete request lifecycle including queue consumption, backend processing,
|
|
56
|
+
timing strategy application, and status publication. Coordinates with other workers
|
|
57
|
+
through synchronization primitives while maintaining concurrency limits and handling
|
|
58
|
+
graceful shutdown scenarios including errors and cancellations.
|
|
59
|
+
|
|
60
|
+
Example:
|
|
61
|
+
::
|
|
62
|
+
worker = WorkerProcess(
|
|
63
|
+
worker_index=0,
|
|
64
|
+
messaging=messaging_interface,
|
|
65
|
+
backend=backend_instance,
|
|
66
|
+
strategy=timing_strategy,
|
|
67
|
+
async_limit=10,
|
|
68
|
+
fut_scheduling_time_limit=5.0,
|
|
69
|
+
startup_barrier=barrier,
|
|
70
|
+
requests_generated_event=generated_event,
|
|
71
|
+
constraint_reached_event=constraint_event,
|
|
72
|
+
shutdown_event=shutdown,
|
|
73
|
+
error_event=error,
|
|
74
|
+
)
|
|
75
|
+
worker.run()
|
|
71
76
|
"""
|
|
72
77
|
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
78
|
+
def __init__(
|
|
79
|
+
self,
|
|
80
|
+
worker_index: int,
|
|
81
|
+
messaging: InterProcessMessaging[
|
|
82
|
+
tuple[
|
|
83
|
+
ResponseT | None,
|
|
84
|
+
RequestT | MultiTurnRequestT[RequestT],
|
|
85
|
+
RequestInfo,
|
|
86
|
+
],
|
|
87
|
+
],
|
|
88
|
+
backend: BackendInterface[RequestT, ResponseT],
|
|
89
|
+
strategy: SchedulingStrategy,
|
|
90
|
+
async_limit: int,
|
|
91
|
+
fut_scheduling_time_limit: float,
|
|
92
|
+
startup_barrier: ProcessingBarrier,
|
|
93
|
+
requests_generated_event: ProcessingEvent,
|
|
94
|
+
constraint_reached_event: ProcessingEvent,
|
|
95
|
+
shutdown_event: ProcessingEvent,
|
|
96
|
+
error_event: ProcessingEvent,
|
|
97
|
+
):
|
|
76
98
|
"""
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
99
|
+
Initialize worker process instance.
|
|
100
|
+
|
|
101
|
+
:param worker_index: Unique identifier for this worker within the process group
|
|
102
|
+
:param messaging: Inter-process messaging interface for request coordination
|
|
103
|
+
:param backend: Backend interface for processing requests
|
|
104
|
+
:param strategy: Scheduling strategy for determining request timing
|
|
105
|
+
:param async_limit: Maximum concurrent requests this worker can process
|
|
106
|
+
:param fut_scheduling_time_limit: Maximum time in seconds to schedule requests
|
|
107
|
+
into the future
|
|
108
|
+
:param startup_barrier: Synchronization barrier for coordinated startup
|
|
109
|
+
:param requests_generated_event: Event signaling request generation completion
|
|
110
|
+
:param constraint_reached_event: Event signaling processing constraint reached
|
|
111
|
+
:param shutdown_event: Event signaling graceful shutdown request
|
|
112
|
+
:param error_event: Event signaling error conditions across processes
|
|
113
|
+
"""
|
|
114
|
+
self.worker_index = worker_index
|
|
115
|
+
self.messaging = messaging
|
|
116
|
+
self.backend = backend
|
|
117
|
+
self.strategy = strategy
|
|
118
|
+
self.async_limit = async_limit
|
|
119
|
+
self.fut_scheduling_time_limit = fut_scheduling_time_limit
|
|
120
|
+
self.startup_barrier = startup_barrier
|
|
121
|
+
self.requests_generated_event = requests_generated_event
|
|
122
|
+
self.constraint_reached_event = constraint_reached_event
|
|
123
|
+
self.shutdown_event = shutdown_event
|
|
124
|
+
self.error_event = error_event
|
|
125
|
+
|
|
126
|
+
# Internal states
|
|
127
|
+
self.startup_completed = False
|
|
128
|
+
self.backend_started = False
|
|
129
|
+
self.messaging_started = False
|
|
130
|
+
|
|
131
|
+
def run(self):
|
|
80
132
|
"""
|
|
81
|
-
|
|
133
|
+
Main entry point for worker process execution.
|
|
82
134
|
|
|
83
|
-
|
|
84
|
-
|
|
135
|
+
Initializes asyncio event loop with optional uvloop optimization and executes
|
|
136
|
+
worker async operations. Handles event loop cleanup and error propagation.
|
|
137
|
+
|
|
138
|
+
:raises RuntimeError: If worker encounters unrecoverable error during execution
|
|
85
139
|
"""
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
140
|
+
try:
|
|
141
|
+
if HAS_UVLOOP:
|
|
142
|
+
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
|
|
143
|
+
asyncio.run(self.run_async())
|
|
144
|
+
except Exception as err:
|
|
145
|
+
self.error_event.set()
|
|
146
|
+
raise RuntimeError(
|
|
147
|
+
f"Worker process {self.messaging.worker_index} encountered an "
|
|
148
|
+
f"error: {err}"
|
|
149
|
+
) from err
|
|
150
|
+
|
|
151
|
+
async def run_async(self):
|
|
90
152
|
"""
|
|
91
|
-
|
|
153
|
+
Execute main asynchronous worker process logic.
|
|
92
154
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
"""
|
|
100
|
-
An abstract method that must be implemented by subclasses.
|
|
101
|
-
This method should handle the resolution of a request through asyncio,
|
|
102
|
-
including any necessary backend processing and response handling.
|
|
103
|
-
|
|
104
|
-
:param request: The request to be resolved generated by the load generator.
|
|
105
|
-
:param timeout_time: The timeout time for the request, if there is no timeout
|
|
106
|
-
given, then this will be math.inf.
|
|
107
|
-
:return: The response from the worker.
|
|
155
|
+
Orchestrates concurrent execution of request processing and shutdown monitoring.
|
|
156
|
+
Handles task cleanup, error propagation, and cancellation coordination when any
|
|
157
|
+
task completes or encounters an error.
|
|
158
|
+
|
|
159
|
+
:raises RuntimeError: If worker tasks encounter unrecoverable errors
|
|
160
|
+
:raises asyncio.CancelledError: If worker process was cancelled
|
|
108
161
|
"""
|
|
109
|
-
|
|
162
|
+
stop_task = asyncio.create_task(self._stop_monitor())
|
|
163
|
+
request_proc_task = asyncio.create_task(self._process_requests())
|
|
164
|
+
caller_cancelled = False
|
|
110
165
|
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
166
|
+
try:
|
|
167
|
+
await asyncio.wait(
|
|
168
|
+
[stop_task, request_proc_task],
|
|
169
|
+
return_when=asyncio.FIRST_COMPLETED,
|
|
170
|
+
)
|
|
171
|
+
except asyncio.CancelledError:
|
|
172
|
+
caller_cancelled = True
|
|
117
173
|
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
)
|
|
143
|
-
asyncio.create_task(self.send_result(results_queue, result))
|
|
174
|
+
stop_task.cancel()
|
|
175
|
+
request_proc_task.cancel()
|
|
176
|
+
|
|
177
|
+
try:
|
|
178
|
+
# Ensure all child tasks cancel correctly
|
|
179
|
+
await asyncio.wait(
|
|
180
|
+
[stop_task, request_proc_task], return_when=asyncio.ALL_COMPLETED
|
|
181
|
+
)
|
|
182
|
+
except asyncio.CancelledError:
|
|
183
|
+
caller_cancelled = True
|
|
184
|
+
|
|
185
|
+
if (
|
|
186
|
+
task_err := (
|
|
187
|
+
request_proc_task.exception()
|
|
188
|
+
if not request_proc_task.cancelled()
|
|
189
|
+
else stop_task.exception()
|
|
190
|
+
if not stop_task.cancelled()
|
|
191
|
+
else None
|
|
192
|
+
)
|
|
193
|
+
) is not None:
|
|
194
|
+
raise RuntimeError(
|
|
195
|
+
f"Worker process {self.messaging.worker_index} encountered an "
|
|
196
|
+
f"error: {task_err}"
|
|
197
|
+
) from task_err
|
|
144
198
|
|
|
145
|
-
if
|
|
146
|
-
|
|
199
|
+
if caller_cancelled:
|
|
200
|
+
raise asyncio.CancelledError("Worker process was cancelled")
|
|
147
201
|
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
info.worker_end = time.time()
|
|
159
|
-
info.requested = status.requested
|
|
160
|
-
info.completed = status.completed
|
|
161
|
-
info.errored = status.errored
|
|
162
|
-
info.canceled = status.canceled
|
|
163
|
-
info.request_start = status.request_start
|
|
164
|
-
info.request_end = status.request_end
|
|
165
|
-
result = WorkerProcessResult(
|
|
166
|
-
type_="request_complete",
|
|
167
|
-
request=request,
|
|
168
|
-
response=response,
|
|
169
|
-
info=info,
|
|
202
|
+
async def _stop_monitor(
|
|
203
|
+
self,
|
|
204
|
+
) -> Literal["error_event", "shutdown_event"]:
|
|
205
|
+
"""Monitor shutdown and error events for worker termination."""
|
|
206
|
+
exit_key = await wait_for_sync_objects(
|
|
207
|
+
{
|
|
208
|
+
"error_event": self.error_event,
|
|
209
|
+
"shutdown_event": self.shutdown_event,
|
|
210
|
+
},
|
|
211
|
+
poll_interval=self.messaging.poll_interval,
|
|
170
212
|
)
|
|
171
|
-
asyncio.create_task(self.send_result(results_queue, result))
|
|
172
213
|
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
strategy: SchedulingStrategy,
|
|
177
|
-
stop_event: Event,
|
|
178
|
-
max_concurrency: int,
|
|
179
|
-
process_id: int,
|
|
180
|
-
num_processes: int,
|
|
181
|
-
):
|
|
182
|
-
async def _process_runner():
|
|
183
|
-
lock = asyncio.Semaphore(max_concurrency)
|
|
184
|
-
times_iter = islice(
|
|
185
|
-
strategy.request_times(),
|
|
186
|
-
process_id,
|
|
187
|
-
None,
|
|
188
|
-
num_processes,
|
|
214
|
+
if exit_key == "error_event":
|
|
215
|
+
raise RuntimeError(
|
|
216
|
+
f"Worker process {self.messaging.worker_index} received error signal."
|
|
189
217
|
)
|
|
190
218
|
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
start_time = next(times_iter)
|
|
195
|
-
|
|
196
|
-
# Yield control to the event loop. Sleep if we are way ahead
|
|
197
|
-
await asyncio.sleep(start_time - time.time() - 1)
|
|
198
|
-
await lock.acquire()
|
|
199
|
-
|
|
200
|
-
try:
|
|
201
|
-
process_request = queues.requests.get_nowait()
|
|
202
|
-
dequeued_time = time.time()
|
|
203
|
-
except QueueEmpty:
|
|
204
|
-
lock.release()
|
|
205
|
-
continue
|
|
206
|
-
|
|
207
|
-
def _request_callback(
|
|
208
|
-
_: asyncio.Future[WorkerProcessRequest[RequestT, ResponseT]],
|
|
209
|
-
):
|
|
210
|
-
nonlocal lock
|
|
211
|
-
lock.release()
|
|
212
|
-
|
|
213
|
-
task = asyncio.create_task(
|
|
214
|
-
self.resolve_scheduler_request(
|
|
215
|
-
process_request=process_request,
|
|
216
|
-
dequeued_time=dequeued_time,
|
|
217
|
-
start_time=start_time,
|
|
218
|
-
results_queue=queues.responses,
|
|
219
|
-
process_id=process_id,
|
|
220
|
-
)
|
|
221
|
-
)
|
|
222
|
-
task.add_done_callback(_request_callback)
|
|
223
|
-
start_time = None
|
|
219
|
+
async def _process_requests(self):
|
|
220
|
+
"""
|
|
221
|
+
Manage request processing lifecycle from startup to shutdown.
|
|
224
222
|
|
|
223
|
+
Coordinates startup synchronization, processes requests until constraints are
|
|
224
|
+
reached, then cancels pending requests until shutdown or error occurs.
|
|
225
|
+
"""
|
|
225
226
|
try:
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
227
|
+
# 1. Start up synchronization (backend, messaging, and other processes)
|
|
228
|
+
# 2. Messaging startup, receive requests until requests_generated event
|
|
229
|
+
await self._processing_startup()
|
|
230
|
+
|
|
231
|
+
# 3. Run process requests loop until constraint_reached event
|
|
232
|
+
processing_task = asyncio.create_task(self._process_requests_loop())
|
|
233
|
+
await wait_for_sync_event(
|
|
234
|
+
self.constraint_reached_event,
|
|
235
|
+
poll_interval=self.messaging.poll_interval,
|
|
232
236
|
)
|
|
237
|
+
processing_task.cancel()
|
|
238
|
+
|
|
239
|
+
# 4. Cancel pending requests until proc canceled (manual, shutdown, error)
|
|
240
|
+
await self._cancel_requests_loop()
|
|
241
|
+
finally:
|
|
242
|
+
# 5. On cancel, shut down event, error event, or internal error:
|
|
243
|
+
# attempt to shut down this worker cleanly (stop backend and messaging)
|
|
244
|
+
await self._processing_shutdown()
|
|
245
|
+
|
|
246
|
+
async def _processing_startup(self):
|
|
247
|
+
"""Initialize backend, messaging, and synchronize with other workers."""
|
|
248
|
+
# Get backend ready
|
|
249
|
+
await self.backend.process_startup()
|
|
250
|
+
self.backend_started = True
|
|
251
|
+
await self.backend.validate()
|
|
252
|
+
|
|
253
|
+
# Get messaging system ready
|
|
254
|
+
await self.messaging.start(
|
|
255
|
+
receive_stop_criteria=[self.requests_generated_event]
|
|
256
|
+
)
|
|
257
|
+
self.messaging_started = True
|
|
233
258
|
|
|
259
|
+
# Wait for all processes to be ready
|
|
260
|
+
await wait_for_sync_barrier(
|
|
261
|
+
self.startup_barrier,
|
|
262
|
+
poll_interval=self.messaging.poll_interval,
|
|
263
|
+
)
|
|
234
264
|
|
|
235
|
-
|
|
236
|
-
type_: Literal["generative_requests_worker"] = "generative_requests_worker" # type: ignore[assignment]
|
|
237
|
-
backend_type: BackendType
|
|
238
|
-
backend_target: str
|
|
239
|
-
backend_model: str
|
|
240
|
-
backend_info: dict[str, Any] = Field(
|
|
241
|
-
default_factory=dict,
|
|
242
|
-
)
|
|
265
|
+
self.startup_completed = True
|
|
243
266
|
|
|
267
|
+
async def _processing_shutdown(self):
|
|
268
|
+
if self.backend_started:
|
|
269
|
+
await self.backend.process_shutdown()
|
|
270
|
+
self.backend_started = False
|
|
244
271
|
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
This class is responsible for sending requests to the backend,
|
|
249
|
-
handling responses, and managing errors.
|
|
272
|
+
if self.messaging_started:
|
|
273
|
+
await self.messaging.stop()
|
|
274
|
+
self.messaging_started = False
|
|
250
275
|
|
|
251
|
-
|
|
252
|
-
This should be an instance of Backend such as an OpenAIHTTPBackend.
|
|
253
|
-
"""
|
|
276
|
+
self.startup_completed = False
|
|
254
277
|
|
|
255
|
-
def
|
|
256
|
-
self.backend = backend
|
|
257
|
-
|
|
258
|
-
@property
|
|
259
|
-
def description(self) -> GenerativeRequestsWorkerDescription:
|
|
260
|
-
"""
|
|
261
|
-
Get the description of the worker.
|
|
262
|
-
:return: The description of the worker.
|
|
278
|
+
async def _process_requests_loop(self):
|
|
263
279
|
"""
|
|
264
|
-
|
|
265
|
-
backend_type=self.backend.type_,
|
|
266
|
-
backend_target=self.backend.target,
|
|
267
|
-
backend_model=self.backend.model or "None",
|
|
268
|
-
backend_info=self.backend.info,
|
|
269
|
-
)
|
|
280
|
+
Process requests continuously until cancelled with concurrency limits.
|
|
270
281
|
|
|
271
|
-
|
|
282
|
+
Schedules and processes requests according to the timing strategy while
|
|
283
|
+
maintaining the configured concurrency limit through semaphore coordination.
|
|
272
284
|
"""
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
285
|
+
try:
|
|
286
|
+
# Run request processing
|
|
287
|
+
async_semaphore = asyncio.Semaphore(self.async_limit)
|
|
288
|
+
pending_tasks: set[asyncio.Task] = set()
|
|
289
|
+
|
|
290
|
+
def _task_done(task):
|
|
291
|
+
pending_tasks.discard(task)
|
|
292
|
+
async_semaphore.release()
|
|
293
|
+
|
|
294
|
+
if not task.cancelled() and (exception := task.exception()):
|
|
295
|
+
raise exception
|
|
296
|
+
|
|
297
|
+
# Main loop; loop until canceled
|
|
298
|
+
while True:
|
|
299
|
+
await async_semaphore.acquire()
|
|
300
|
+
request_time = await self.strategy.next_request_time(
|
|
301
|
+
offset=self.worker_index
|
|
302
|
+
)
|
|
279
303
|
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
stop_event: Event,
|
|
285
|
-
max_concurrency: int,
|
|
286
|
-
process_id: int,
|
|
287
|
-
num_processes: int,
|
|
288
|
-
):
|
|
289
|
-
asyncio.run(self.backend.validate())
|
|
290
|
-
super().process_loop_asynchronous(
|
|
291
|
-
queues=queues,
|
|
292
|
-
strategy=strategy,
|
|
293
|
-
stop_event=stop_event,
|
|
294
|
-
max_concurrency=max_concurrency,
|
|
295
|
-
process_id=process_id,
|
|
296
|
-
num_processes=num_processes,
|
|
297
|
-
)
|
|
304
|
+
if (
|
|
305
|
+
time_until := request_time - time.time()
|
|
306
|
+
) >= self.fut_scheduling_time_limit:
|
|
307
|
+
await asyncio.sleep(time_until - self.fut_scheduling_time_limit)
|
|
298
308
|
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
309
|
+
request_task = asyncio.create_task(
|
|
310
|
+
self._process_next_request(target_start=request_time)
|
|
311
|
+
)
|
|
312
|
+
pending_tasks.add(request_task)
|
|
313
|
+
request_task.add_done_callback(_task_done)
|
|
314
|
+
except asyncio.CancelledError as err:
|
|
315
|
+
for task in pending_tasks:
|
|
316
|
+
task.cancel()
|
|
317
|
+
await asyncio.gather(*pending_tasks, return_exceptions=True)
|
|
318
|
+
|
|
319
|
+
raise err
|
|
320
|
+
|
|
321
|
+
async def _cancel_requests_loop(self):
|
|
322
|
+
"""Cancel all remaining queued requests until worker process terminates."""
|
|
323
|
+
while True:
|
|
324
|
+
try:
|
|
325
|
+
request: RequestT
|
|
326
|
+
request_info: RequestInfo
|
|
327
|
+
request, request_info = await self.messaging.get(
|
|
328
|
+
timeout=self.messaging.poll_interval
|
|
329
|
+
)
|
|
330
|
+
except asyncio.TimeoutError:
|
|
331
|
+
continue
|
|
332
|
+
|
|
333
|
+
request_info.scheduler_node_id = self.messaging.worker_index or -1
|
|
334
|
+
request_info.error = "Request was cancelled"
|
|
335
|
+
request_info.timings.resolve_end = time.time()
|
|
336
|
+
self._send_update("cancelled", None, request, request_info)
|
|
337
|
+
|
|
338
|
+
async def _process_next_request(self, target_start: float):
|
|
314
339
|
"""
|
|
315
|
-
|
|
316
|
-
response = None
|
|
317
|
-
error: Optional[str] = None
|
|
318
|
-
status = ResolveStatus(
|
|
319
|
-
requested=False,
|
|
320
|
-
completed=False,
|
|
321
|
-
errored=False,
|
|
322
|
-
canceled=False,
|
|
323
|
-
request_start=-1,
|
|
324
|
-
request_end=-1,
|
|
325
|
-
)
|
|
340
|
+
Process a single request from queue to completion.
|
|
326
341
|
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
raise asyncio.TimeoutError(
|
|
330
|
-
"The timeout time has already passed."
|
|
331
|
-
) # exit early
|
|
332
|
-
|
|
333
|
-
status.requested = True
|
|
334
|
-
request_func, request_kwargs = self._create_request_func_kwargs(request)
|
|
335
|
-
|
|
336
|
-
async def _runner():
|
|
337
|
-
# wrap function so we can enforce timeout and
|
|
338
|
-
# still return the latest state from the backend
|
|
339
|
-
async for resp in request_func(**request_kwargs): # type: ignore[operator]
|
|
340
|
-
nonlocal response
|
|
341
|
-
response = resp
|
|
342
|
-
|
|
343
|
-
await asyncio.wait_for(
|
|
344
|
-
_runner(),
|
|
345
|
-
timeout=timeout_time - time.time() if timeout_time < math.inf else None,
|
|
346
|
-
)
|
|
342
|
+
Retrieves request from messaging queue, applies timing strategy, processes
|
|
343
|
+
through backend, and publishes status updates throughout the lifecycle.
|
|
347
344
|
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
if not isinstance(response, ResponseSummary):
|
|
354
|
-
raise ValueError(
|
|
355
|
-
f"Received no ResponseSummary for request: {request} "
|
|
356
|
-
f"and backend: {self.backend}, received: {response}"
|
|
357
|
-
)
|
|
345
|
+
:param target_start: Unix timestamp when request should begin processing
|
|
346
|
+
"""
|
|
347
|
+
request: RequestT | MultiTurnRequestT[RequestT] | None = None
|
|
348
|
+
request_info: RequestInfo | None = None
|
|
349
|
+
response: ResponseT | None = None
|
|
358
350
|
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
351
|
+
try:
|
|
352
|
+
# Pull request from the queue, update state, and send "pending" update
|
|
353
|
+
request, request_info = await self.messaging.get()
|
|
354
|
+
request_info.timings.dequeued = time.time()
|
|
355
|
+
request_info.scheduler_node_id = self.messaging.worker_index or -1
|
|
356
|
+
request_info.timings.targeted_start = target_start
|
|
357
|
+
self._send_update("pending", response, request, request_info)
|
|
358
|
+
|
|
359
|
+
if request is None or request_info is None:
|
|
360
|
+
raise RuntimeError("Received invalid request or request info")
|
|
361
|
+
if isinstance(request, list | tuple):
|
|
362
|
+
raise NotImplementedError("Multi-turn requests are not yet supported")
|
|
363
|
+
|
|
364
|
+
# Schedule the request
|
|
365
|
+
current_time = time.time()
|
|
366
|
+
request_info.timings.scheduled_at = current_time
|
|
367
|
+
if target_start > current_time:
|
|
368
|
+
await asyncio.sleep(target_start - current_time)
|
|
369
|
+
# Adapt delay so that scheduled at reflects the sleep time
|
|
370
|
+
request_info.timings.scheduled_at = target_start
|
|
371
|
+
|
|
372
|
+
# Process the request with the backend
|
|
373
|
+
request_info.timings.resolve_start = time.time()
|
|
374
|
+
self._send_update("in_progress", response, request, request_info)
|
|
375
|
+
async for resp, info in self.backend.resolve(request, request_info, None):
|
|
376
|
+
response = resp
|
|
377
|
+
request_info = info
|
|
378
|
+
|
|
379
|
+
# Complete the request
|
|
380
|
+
request_info.timings.resolve_end = time.time()
|
|
381
|
+
self._send_update("completed", response, request, request_info)
|
|
382
|
+
|
|
383
|
+
response = request = request_info = None
|
|
384
|
+
except asyncio.CancelledError:
|
|
385
|
+
# Handle cancellation
|
|
386
|
+
if request is not None and request_info is not None:
|
|
387
|
+
request_info.error = "Request was cancelled"
|
|
388
|
+
request_info.timings.resolve_end = time.time()
|
|
389
|
+
self._send_update("cancelled", response, request, request_info)
|
|
390
|
+
raise
|
|
364
391
|
except Exception as exc: # noqa: BLE001
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
)
|
|
375
|
-
|
|
376
|
-
def _create_request_func_kwargs(
|
|
392
|
+
if request is not None and request_info is not None:
|
|
393
|
+
request_info.error = str(exc)
|
|
394
|
+
request_info.timings.resolve_end = time.time()
|
|
395
|
+
self._send_update("errored", response, request, request_info)
|
|
396
|
+
finally:
|
|
397
|
+
if request_info is not None:
|
|
398
|
+
self.strategy.request_completed(request_info)
|
|
399
|
+
|
|
400
|
+
def _send_update(
|
|
377
401
|
self,
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
if request.request_type == "text_completions":
|
|
389
|
-
request_func = self.backend.text_completions # type: ignore[assignment]
|
|
390
|
-
request_kwargs = {
|
|
391
|
-
"prompt": request.content,
|
|
392
|
-
"request_id": request.request_id,
|
|
393
|
-
"prompt_token_count": request.stats.get("prompt_tokens", None),
|
|
394
|
-
"output_token_count": request.constraints.get("output_tokens", None),
|
|
395
|
-
**request.params,
|
|
396
|
-
}
|
|
397
|
-
elif request.request_type == "chat_completions":
|
|
398
|
-
request_func = self.backend.chat_completions # type: ignore[assignment]
|
|
399
|
-
request_kwargs = {
|
|
400
|
-
"content": request.content,
|
|
401
|
-
"request_id": request.request_id,
|
|
402
|
-
"prompt_token_count": request.stats.get("prompt_tokens", None),
|
|
403
|
-
"output_token_count": request.constraints.get("output_tokens", None),
|
|
404
|
-
**request.params,
|
|
405
|
-
}
|
|
406
|
-
else:
|
|
407
|
-
raise ValueError(
|
|
408
|
-
f"Invalid request type: {request.request_type} for {request}"
|
|
409
|
-
)
|
|
402
|
+
new_status: Literal[
|
|
403
|
+
"pending", "in_progress", "completed", "errored", "cancelled"
|
|
404
|
+
],
|
|
405
|
+
response: ResponseT | None,
|
|
406
|
+
request: RequestT | MultiTurnRequestT[RequestT],
|
|
407
|
+
request_info: RequestInfo,
|
|
408
|
+
):
|
|
409
|
+
"""
|
|
410
|
+
Publish request status update through messaging system.
|
|
410
411
|
|
|
411
|
-
|
|
412
|
+
Updates request status and publishes to messaging queue for coordinator
|
|
413
|
+
consumption. Prevents duplicate status updates for the same state.
|
|
412
414
|
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
) -> tuple[ResolveStatus, ResponseSummary]:
|
|
421
|
-
if response is None or not isinstance(
|
|
422
|
-
response, (ResponseSummary, StreamingTextResponse)
|
|
423
|
-
):
|
|
424
|
-
# nothing received or invalid response, fill in defaults for error
|
|
425
|
-
if response:
|
|
426
|
-
error = str(
|
|
427
|
-
ValueError(
|
|
428
|
-
f"Invalid response: {type(response)} for request: {request}; "
|
|
429
|
-
)
|
|
430
|
-
) + (error or "")
|
|
431
|
-
|
|
432
|
-
response = ResponseSummary(
|
|
433
|
-
value="",
|
|
434
|
-
request_args=RequestArgs(
|
|
435
|
-
target=self.backend.target,
|
|
436
|
-
headers={},
|
|
437
|
-
params={},
|
|
438
|
-
payload={},
|
|
439
|
-
),
|
|
440
|
-
start_time=resolve_start_time,
|
|
441
|
-
end_time=status.request_end,
|
|
442
|
-
first_iter_time=None,
|
|
443
|
-
last_iter_time=None,
|
|
444
|
-
request_id=request.request_id,
|
|
445
|
-
error=error or "Unknown error",
|
|
446
|
-
)
|
|
447
|
-
elif isinstance(response, StreamingTextResponse):
|
|
448
|
-
response = ResponseSummary(
|
|
449
|
-
value=response.value,
|
|
450
|
-
request_args=RequestArgs(
|
|
451
|
-
target=self.backend.target,
|
|
452
|
-
headers={},
|
|
453
|
-
params={},
|
|
454
|
-
payload={},
|
|
455
|
-
),
|
|
456
|
-
start_time=response.start_time,
|
|
457
|
-
end_time=time.time(),
|
|
458
|
-
first_iter_time=response.first_iter_time,
|
|
459
|
-
last_iter_time=response.time if response.iter_count > 0 else None,
|
|
460
|
-
request_prompt_tokens=request.stats.get("prompt_tokens", None),
|
|
461
|
-
request_output_tokens=request.constraints.get("output_tokens", None),
|
|
462
|
-
response_prompt_tokens=None,
|
|
463
|
-
response_output_tokens=response.iter_count,
|
|
464
|
-
request_id=request.request_id,
|
|
465
|
-
error=error or "Unknown error",
|
|
466
|
-
)
|
|
415
|
+
:param new_status: New status for the request
|
|
416
|
+
:param response: Response object if available, None otherwise
|
|
417
|
+
:param request: Request object being processed
|
|
418
|
+
:param request_info: Request metadata and timing information
|
|
419
|
+
:raises Exception: If messaging system fails to publish the update
|
|
420
|
+
"""
|
|
421
|
+
prev_status = request_info.status
|
|
467
422
|
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
423
|
+
if new_status == prev_status:
|
|
424
|
+
# already sent this update, don't send again
|
|
425
|
+
return
|
|
471
426
|
|
|
472
|
-
|
|
427
|
+
try:
|
|
428
|
+
request_info.status = new_status
|
|
429
|
+
request_info = (
|
|
430
|
+
request_info.model_copy()
|
|
431
|
+
if new_status not in {"completed", "errored", "cancelled"}
|
|
432
|
+
else request_info # last update, don't need to copy
|
|
433
|
+
)
|
|
434
|
+
self.messaging.put_sync(
|
|
435
|
+
(response, request, request_info),
|
|
436
|
+
timeout=-1,
|
|
437
|
+
)
|
|
438
|
+
prev_status = new_status
|
|
439
|
+
except Exception as exc:
|
|
440
|
+
# Reset status to last one that succeeded or started function with
|
|
441
|
+
# Calling logic can retry after handling error, if possible
|
|
442
|
+
request_info.status = prev_status
|
|
443
|
+
raise exc
|