agentscope-runtime 0.1.5b2__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentscope_runtime/engine/agents/agentscope_agent.py +447 -0
- agentscope_runtime/engine/agents/agno_agent.py +19 -18
- agentscope_runtime/engine/agents/autogen_agent.py +13 -8
- agentscope_runtime/engine/agents/utils.py +53 -0
- agentscope_runtime/engine/deployers/__init__.py +0 -13
- agentscope_runtime/engine/deployers/local_deployer.py +501 -356
- agentscope_runtime/engine/helpers/helper.py +60 -41
- agentscope_runtime/engine/runner.py +11 -36
- agentscope_runtime/engine/schemas/agent_schemas.py +2 -70
- agentscope_runtime/engine/services/sandbox_service.py +62 -70
- agentscope_runtime/engine/services/tablestore_memory_service.py +304 -0
- agentscope_runtime/engine/services/tablestore_rag_service.py +143 -0
- agentscope_runtime/engine/services/tablestore_session_history_service.py +293 -0
- agentscope_runtime/engine/services/utils/tablestore_service_utils.py +352 -0
- agentscope_runtime/sandbox/__init__.py +2 -0
- agentscope_runtime/sandbox/box/base/__init__.py +4 -0
- agentscope_runtime/sandbox/box/base/base_sandbox.py +4 -3
- agentscope_runtime/sandbox/box/browser/__init__.py +4 -0
- agentscope_runtime/sandbox/box/browser/browser_sandbox.py +8 -13
- agentscope_runtime/sandbox/box/dummy/__init__.py +4 -0
- agentscope_runtime/sandbox/box/filesystem/__init__.py +4 -0
- agentscope_runtime/sandbox/box/filesystem/filesystem_sandbox.py +8 -6
- agentscope_runtime/sandbox/box/gui/__init__.py +4 -0
- agentscope_runtime/sandbox/box/gui/gui_sandbox.py +80 -0
- agentscope_runtime/sandbox/box/sandbox.py +5 -2
- agentscope_runtime/sandbox/box/shared/routers/generic.py +20 -1
- agentscope_runtime/sandbox/box/training_box/__init__.py +4 -0
- agentscope_runtime/sandbox/box/training_box/training_box.py +10 -15
- agentscope_runtime/sandbox/build.py +143 -58
- agentscope_runtime/sandbox/client/http_client.py +43 -49
- agentscope_runtime/sandbox/client/training_client.py +0 -1
- agentscope_runtime/sandbox/constant.py +24 -1
- agentscope_runtime/sandbox/custom/custom_sandbox.py +5 -5
- agentscope_runtime/sandbox/custom/example.py +2 -2
- agentscope_runtime/sandbox/enums.py +1 -0
- agentscope_runtime/sandbox/manager/collections/in_memory_mapping.py +11 -6
- agentscope_runtime/sandbox/manager/collections/redis_mapping.py +25 -9
- agentscope_runtime/sandbox/manager/container_clients/__init__.py +0 -10
- agentscope_runtime/sandbox/manager/container_clients/agentrun_client.py +1098 -0
- agentscope_runtime/sandbox/manager/container_clients/docker_client.py +33 -205
- agentscope_runtime/sandbox/manager/container_clients/kubernetes_client.py +8 -555
- agentscope_runtime/sandbox/manager/sandbox_manager.py +187 -88
- agentscope_runtime/sandbox/manager/server/app.py +82 -14
- agentscope_runtime/sandbox/manager/server/config.py +50 -3
- agentscope_runtime/sandbox/model/container.py +6 -23
- agentscope_runtime/sandbox/model/manager_config.py +93 -5
- agentscope_runtime/sandbox/tools/gui/__init__.py +7 -0
- agentscope_runtime/sandbox/tools/gui/tool.py +77 -0
- agentscope_runtime/sandbox/tools/mcp_tool.py +6 -2
- agentscope_runtime/sandbox/utils.py +124 -0
- agentscope_runtime/version.py +1 -1
- {agentscope_runtime-0.1.5b2.dist-info → agentscope_runtime-0.1.6.dist-info}/METADATA +168 -77
- {agentscope_runtime-0.1.5b2.dist-info → agentscope_runtime-0.1.6.dist-info}/RECORD +59 -78
- {agentscope_runtime-0.1.5b2.dist-info → agentscope_runtime-0.1.6.dist-info}/entry_points.txt +0 -1
- agentscope_runtime/engine/agents/agentscope_agent/__init__.py +0 -6
- agentscope_runtime/engine/agents/agentscope_agent/agent.py +0 -401
- agentscope_runtime/engine/agents/agentscope_agent/hooks.py +0 -169
- agentscope_runtime/engine/agents/llm_agent.py +0 -51
- agentscope_runtime/engine/deployers/adapter/responses/response_api_adapter_utils.py +0 -2886
- agentscope_runtime/engine/deployers/adapter/responses/response_api_agent_adapter.py +0 -51
- agentscope_runtime/engine/deployers/adapter/responses/response_api_protocol_adapter.py +0 -314
- agentscope_runtime/engine/deployers/cli_fc_deploy.py +0 -184
- agentscope_runtime/engine/deployers/kubernetes_deployer.py +0 -265
- agentscope_runtime/engine/deployers/modelstudio_deployer.py +0 -677
- agentscope_runtime/engine/deployers/utils/deployment_modes.py +0 -14
- agentscope_runtime/engine/deployers/utils/docker_image_utils/__init__.py +0 -8
- agentscope_runtime/engine/deployers/utils/docker_image_utils/docker_image_builder.py +0 -429
- agentscope_runtime/engine/deployers/utils/docker_image_utils/dockerfile_generator.py +0 -240
- agentscope_runtime/engine/deployers/utils/docker_image_utils/runner_image_factory.py +0 -297
- agentscope_runtime/engine/deployers/utils/package_project_utils.py +0 -932
- agentscope_runtime/engine/deployers/utils/service_utils/__init__.py +0 -9
- agentscope_runtime/engine/deployers/utils/service_utils/fastapi_factory.py +0 -504
- agentscope_runtime/engine/deployers/utils/service_utils/fastapi_templates.py +0 -157
- agentscope_runtime/engine/deployers/utils/service_utils/process_manager.py +0 -268
- agentscope_runtime/engine/deployers/utils/service_utils/service_config.py +0 -75
- agentscope_runtime/engine/deployers/utils/service_utils/service_factory.py +0 -220
- agentscope_runtime/engine/deployers/utils/wheel_packager.py +0 -389
- agentscope_runtime/engine/helpers/agent_api_builder.py +0 -651
- agentscope_runtime/engine/llms/__init__.py +0 -3
- agentscope_runtime/engine/llms/base_llm.py +0 -60
- agentscope_runtime/engine/llms/qwen_llm.py +0 -47
- agentscope_runtime/engine/schemas/embedding.py +0 -37
- agentscope_runtime/engine/schemas/modelstudio_llm.py +0 -310
- agentscope_runtime/engine/schemas/oai_llm.py +0 -538
- agentscope_runtime/engine/schemas/realtime.py +0 -254
- /agentscope_runtime/engine/{deployers/adapter/responses → services/utils}/__init__.py +0 -0
- /agentscope_runtime/{engine/deployers/utils → sandbox/box/gui/box}/__init__.py +0 -0
- {agentscope_runtime-0.1.5b2.dist-info → agentscope_runtime-0.1.6.dist-info}/WHEEL +0 -0
- {agentscope_runtime-0.1.5b2.dist-info → agentscope_runtime-0.1.6.dist-info}/licenses/LICENSE +0 -0
- {agentscope_runtime-0.1.5b2.dist-info → agentscope_runtime-0.1.6.dist-info}/top_level.txt +0 -0
|
@@ -1,385 +1,525 @@
|
|
|
1
1
|
# -*- coding: utf-8 -*-
|
|
2
|
-
# pylint:disable=protected-access
|
|
3
|
-
|
|
4
2
|
import asyncio
|
|
3
|
+
import json
|
|
5
4
|
import logging
|
|
6
|
-
import os
|
|
7
5
|
import socket
|
|
8
6
|
import threading
|
|
9
|
-
|
|
7
|
+
import time
|
|
8
|
+
import uuid
|
|
9
|
+
from contextlib import asynccontextmanager
|
|
10
|
+
from typing import Optional, Dict, Any, Callable, Type, Tuple, Union
|
|
10
11
|
|
|
11
12
|
import uvicorn
|
|
13
|
+
from fastapi import FastAPI, HTTPException, Request, Response
|
|
14
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
15
|
+
from fastapi.responses import StreamingResponse
|
|
16
|
+
from pydantic import BaseModel
|
|
12
17
|
|
|
13
|
-
from .adapter.protocol_adapter import ProtocolAdapter
|
|
14
18
|
from .base import DeployManager
|
|
15
|
-
from .
|
|
16
|
-
from .
|
|
17
|
-
from .utils.service_utils import (
|
|
18
|
-
FastAPIAppFactory,
|
|
19
|
-
FastAPITemplateManager,
|
|
20
|
-
ProcessManager,
|
|
21
|
-
ServicesConfig,
|
|
22
|
-
)
|
|
19
|
+
from .adapter.protocol_adapter import ProtocolAdapter
|
|
20
|
+
from ..schemas.agent_schemas import AgentRequest, AgentResponse, Error
|
|
23
21
|
|
|
24
22
|
|
|
25
23
|
class LocalDeployManager(DeployManager):
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
def __init__(
|
|
29
|
-
self,
|
|
30
|
-
host: str = "127.0.0.1",
|
|
31
|
-
port: int = 8000,
|
|
32
|
-
shutdown_timeout: int = 120,
|
|
33
|
-
logger: Optional[logging.Logger] = None,
|
|
34
|
-
):
|
|
35
|
-
"""Initialize LocalDeployManager.
|
|
36
|
-
|
|
37
|
-
Args:
|
|
38
|
-
host: Host to bind to
|
|
39
|
-
port: Port to bind to
|
|
40
|
-
shutdown_timeout: Timeout for graceful shutdown
|
|
41
|
-
logger: Logger instance
|
|
42
|
-
"""
|
|
24
|
+
def __init__(self, host: str = "localhost", port: int = 8090):
|
|
43
25
|
super().__init__()
|
|
44
26
|
self.host = host
|
|
45
27
|
self.port = port
|
|
46
|
-
self.
|
|
47
|
-
self.
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
self.
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
self.
|
|
54
|
-
self.
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
self._detached_pid_file: Optional[str] = None
|
|
60
|
-
self.process_manager = ProcessManager(
|
|
61
|
-
shutdown_timeout=shutdown_timeout,
|
|
28
|
+
self._server = None
|
|
29
|
+
self._server_task = None
|
|
30
|
+
self._server_thread = None # Add thread for server
|
|
31
|
+
self._is_running = False
|
|
32
|
+
self._logger = logging.getLogger(__name__)
|
|
33
|
+
self._app = None
|
|
34
|
+
self._startup_timeout = 30 # seconds
|
|
35
|
+
self._shutdown_timeout = 10 # seconds
|
|
36
|
+
self._setup_logging()
|
|
37
|
+
|
|
38
|
+
def _setup_logging(self):
|
|
39
|
+
formatter = logging.Formatter(
|
|
40
|
+
"%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
62
41
|
)
|
|
63
42
|
|
|
64
|
-
|
|
65
|
-
|
|
43
|
+
app_logger = logging.getLogger("app")
|
|
44
|
+
app_logger.setLevel(logging.INFO)
|
|
66
45
|
|
|
67
|
-
|
|
46
|
+
file_handler = logging.handlers.RotatingFileHandler(
|
|
47
|
+
"app.log",
|
|
48
|
+
maxBytes=10 * 1024 * 1024, # 10MB
|
|
49
|
+
backupCount=5,
|
|
50
|
+
)
|
|
51
|
+
file_handler.setFormatter(formatter)
|
|
52
|
+
app_logger.addHandler(file_handler)
|
|
53
|
+
console_handler = logging.StreamHandler()
|
|
54
|
+
console_handler.setFormatter(formatter)
|
|
55
|
+
app_logger.addHandler(console_handler)
|
|
56
|
+
|
|
57
|
+
access_logger = logging.getLogger("access")
|
|
58
|
+
access_logger.setLevel(logging.INFO)
|
|
59
|
+
access_file_handler = logging.handlers.RotatingFileHandler(
|
|
60
|
+
"access.log",
|
|
61
|
+
maxBytes=10 * 1024 * 1024,
|
|
62
|
+
backupCount=5,
|
|
63
|
+
)
|
|
64
|
+
access_file_handler.setFormatter(
|
|
65
|
+
logging.Formatter("%(asctime)s - %(message)s"),
|
|
66
|
+
)
|
|
67
|
+
access_logger.addHandler(access_file_handler)
|
|
68
|
+
|
|
69
|
+
self.app_logger = app_logger
|
|
70
|
+
self.access_logger = access_logger
|
|
71
|
+
|
|
72
|
+
def _create_fastapi_app(self) -> FastAPI:
|
|
73
|
+
"""Create and configure FastAPI application with lifespan
|
|
74
|
+
management."""
|
|
75
|
+
|
|
76
|
+
@asynccontextmanager
|
|
77
|
+
async def lifespan(app: FastAPI) -> Any:
|
|
78
|
+
"""Manage the application lifespan."""
|
|
79
|
+
if hasattr(self, "before_start") and self.before_start:
|
|
80
|
+
if asyncio.iscoroutinefunction(self.before_start):
|
|
81
|
+
await self.before_start(app, **getattr(self, "kwargs", {}))
|
|
82
|
+
else:
|
|
83
|
+
self.before_start(app, **getattr(self, "kwargs", {}))
|
|
84
|
+
yield
|
|
85
|
+
if hasattr(self, "after_finish") and self.after_finish:
|
|
86
|
+
if asyncio.iscoroutinefunction(self.after_finish):
|
|
87
|
+
await self.after_finish(app, **getattr(self, "kwargs", {}))
|
|
88
|
+
else:
|
|
89
|
+
self.after_finish(app, **getattr(self, "kwargs", {}))
|
|
90
|
+
|
|
91
|
+
app = FastAPI(
|
|
92
|
+
title="Agent Service",
|
|
93
|
+
version="1.0.0",
|
|
94
|
+
description="Production-ready Agent Service API",
|
|
95
|
+
lifespan=lifespan,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
self._add_middleware(app)
|
|
99
|
+
self._add_health_endpoints(app)
|
|
100
|
+
|
|
101
|
+
if hasattr(self, "func") and self.func:
|
|
102
|
+
self._add_main_endpoint(app)
|
|
103
|
+
|
|
104
|
+
return app
|
|
105
|
+
|
|
106
|
+
def _add_middleware(self, app: FastAPI) -> None:
|
|
107
|
+
"""Add middleware to the FastAPI application."""
|
|
108
|
+
|
|
109
|
+
@app.middleware("http")
|
|
110
|
+
async def log_requests(request: Request, call_next):
|
|
111
|
+
start_time = time.time()
|
|
112
|
+
|
|
113
|
+
self.app_logger.info(f"Request: {request.method} {request.url}")
|
|
114
|
+
response = await call_next(
|
|
115
|
+
request,
|
|
116
|
+
)
|
|
117
|
+
process_time = time.time() - start_time
|
|
118
|
+
self.access_logger.info(
|
|
119
|
+
f'{request.client.host} - "{request.method} {request.url}" '
|
|
120
|
+
f"{response.status_code} - {process_time:.3f}s",
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
return response
|
|
124
|
+
|
|
125
|
+
@app.middleware("http")
|
|
126
|
+
async def custom_middleware(
|
|
127
|
+
request: Request,
|
|
128
|
+
call_next: Callable,
|
|
129
|
+
) -> Response:
|
|
130
|
+
"""Custom middleware for request processing."""
|
|
131
|
+
response: Response = await call_next(request)
|
|
132
|
+
return response
|
|
133
|
+
|
|
134
|
+
app.add_middleware(
|
|
135
|
+
CORSMiddleware,
|
|
136
|
+
allow_origins=["*"],
|
|
137
|
+
allow_credentials=True,
|
|
138
|
+
allow_methods=["*"],
|
|
139
|
+
allow_headers=["*"],
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
def _add_health_endpoints(self, app: FastAPI) -> None:
|
|
143
|
+
"""Add health check endpoints to the FastAPI application."""
|
|
144
|
+
|
|
145
|
+
@app.get("/health")
|
|
146
|
+
async def health_check():
|
|
147
|
+
return {
|
|
148
|
+
"status": "healthy",
|
|
149
|
+
"timestamp": time.time(),
|
|
150
|
+
"service": "agent-service",
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
@app.get("/readiness")
|
|
154
|
+
async def readiness() -> str:
|
|
155
|
+
"""Check if the application is ready to serve requests."""
|
|
156
|
+
if getattr(app.state, "is_ready", True):
|
|
157
|
+
return "success"
|
|
158
|
+
raise HTTPException(
|
|
159
|
+
status_code=500,
|
|
160
|
+
detail="Application is not ready",
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
@app.get("/liveness")
|
|
164
|
+
async def liveness() -> str:
|
|
165
|
+
"""Check if the application is alive and healthy."""
|
|
166
|
+
if getattr(app.state, "is_healthy", True):
|
|
167
|
+
return "success"
|
|
168
|
+
raise HTTPException(
|
|
169
|
+
status_code=500,
|
|
170
|
+
detail="Application is not healthy",
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
@app.get("/")
|
|
174
|
+
async def root():
|
|
175
|
+
return {"message": "Agent Service is running"}
|
|
176
|
+
|
|
177
|
+
def _add_main_endpoint(self, app: FastAPI) -> None:
|
|
178
|
+
"""Add the main processing endpoint to the FastAPI application."""
|
|
179
|
+
|
|
180
|
+
async def _get_request_info(request: Request) -> Tuple[Dict, Any, str]:
|
|
181
|
+
"""Extract request information from the HTTP request."""
|
|
182
|
+
body = await request.body()
|
|
183
|
+
request_body = json.loads(body.decode("utf-8")) if body else {}
|
|
184
|
+
|
|
185
|
+
user_id = request_body.get("user_id", "")
|
|
186
|
+
|
|
187
|
+
if hasattr(self, "request_model") and self.request_model:
|
|
188
|
+
try:
|
|
189
|
+
request_body_obj = self.request_model.model_validate(
|
|
190
|
+
request_body,
|
|
191
|
+
)
|
|
192
|
+
except Exception as e:
|
|
193
|
+
raise HTTPException(
|
|
194
|
+
status_code=400,
|
|
195
|
+
detail=f"Invalid request format: {e}",
|
|
196
|
+
) from e
|
|
197
|
+
else:
|
|
198
|
+
request_body_obj = request_body
|
|
199
|
+
|
|
200
|
+
query_params = dict(request.query_params)
|
|
201
|
+
return query_params, request_body_obj, user_id
|
|
202
|
+
|
|
203
|
+
def _get_request_id(request_body_obj: Any) -> str:
|
|
204
|
+
"""Extract or generate a request ID from the request body."""
|
|
205
|
+
if hasattr(request_body_obj, "header") and hasattr(
|
|
206
|
+
request_body_obj.header,
|
|
207
|
+
"request_id",
|
|
208
|
+
):
|
|
209
|
+
request_id = request_body_obj.header.request_id
|
|
210
|
+
elif (
|
|
211
|
+
isinstance(
|
|
212
|
+
request_body_obj,
|
|
213
|
+
dict,
|
|
214
|
+
)
|
|
215
|
+
and "request_id" in request_body_obj
|
|
216
|
+
):
|
|
217
|
+
request_id = request_body_obj["request_id"]
|
|
218
|
+
else:
|
|
219
|
+
request_id = str(uuid.uuid4())
|
|
220
|
+
return request_id
|
|
221
|
+
|
|
222
|
+
@app.post(self.endpoint_path)
|
|
223
|
+
async def main_endpoint(request: Request):
|
|
224
|
+
"""Main endpoint handler for processing requests."""
|
|
225
|
+
try:
|
|
226
|
+
(
|
|
227
|
+
_, # query_params
|
|
228
|
+
request_body_obj,
|
|
229
|
+
user_id,
|
|
230
|
+
) = await _get_request_info(
|
|
231
|
+
request=request,
|
|
232
|
+
)
|
|
233
|
+
request_id = _get_request_id(request_body_obj)
|
|
234
|
+
if (
|
|
235
|
+
hasattr(
|
|
236
|
+
self,
|
|
237
|
+
"response_type",
|
|
238
|
+
)
|
|
239
|
+
and self.response_type == "sse"
|
|
240
|
+
):
|
|
241
|
+
return self._handle_sse_response(
|
|
242
|
+
user_id=user_id,
|
|
243
|
+
request_body_obj=request_body_obj,
|
|
244
|
+
request_id=request_id,
|
|
245
|
+
)
|
|
246
|
+
else:
|
|
247
|
+
return await self._handle_standard_response(
|
|
248
|
+
user_id=user_id,
|
|
249
|
+
request_body_obj=request_body_obj,
|
|
250
|
+
request_id=request_id,
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
except Exception as e:
|
|
254
|
+
self._logger.error(f"Request processing failed: {e}")
|
|
255
|
+
raise HTTPException(status_code=500, detail=str(e)) from e
|
|
256
|
+
|
|
257
|
+
def _handle_sse_response(
|
|
258
|
+
self,
|
|
259
|
+
user_id: str,
|
|
260
|
+
request_body_obj: Any,
|
|
261
|
+
request_id: str,
|
|
262
|
+
) -> StreamingResponse:
|
|
263
|
+
"""Handle Server-Sent Events response."""
|
|
264
|
+
|
|
265
|
+
async def stream_generator():
|
|
266
|
+
"""Generate streaming response data."""
|
|
267
|
+
try:
|
|
268
|
+
if asyncio.iscoroutinefunction(self.func):
|
|
269
|
+
async for output in self.func(
|
|
270
|
+
user_id=user_id,
|
|
271
|
+
request=request_body_obj,
|
|
272
|
+
request_id=request_id,
|
|
273
|
+
):
|
|
274
|
+
_data = self._create_success_result(
|
|
275
|
+
output=output,
|
|
276
|
+
)
|
|
277
|
+
yield f"data: {_data}\n\n"
|
|
278
|
+
else:
|
|
279
|
+
# For sync functions, we need to handle differently
|
|
280
|
+
result = self.func(
|
|
281
|
+
user_id=user_id,
|
|
282
|
+
request=request_body_obj,
|
|
283
|
+
request_id=request_id,
|
|
284
|
+
)
|
|
285
|
+
if hasattr(result, "__aiter__"):
|
|
286
|
+
async for output in result:
|
|
287
|
+
_data = self._create_success_result(
|
|
288
|
+
output=output,
|
|
289
|
+
)
|
|
290
|
+
yield f"data: {_data}\n\n"
|
|
291
|
+
else:
|
|
292
|
+
_data = self._create_success_result(
|
|
293
|
+
output=result,
|
|
294
|
+
)
|
|
295
|
+
yield f"data: {_data}\n\n"
|
|
296
|
+
except Exception as e:
|
|
297
|
+
_data = self._create_error_response(
|
|
298
|
+
request_id=request_id,
|
|
299
|
+
error=e,
|
|
300
|
+
)
|
|
301
|
+
yield f"data: {_data}\n\n"
|
|
302
|
+
|
|
303
|
+
return StreamingResponse(
|
|
304
|
+
stream_generator(),
|
|
305
|
+
media_type="text/event-stream",
|
|
306
|
+
headers={
|
|
307
|
+
"Cache-Control": "no-cache",
|
|
308
|
+
"Connection": "keep-alive",
|
|
309
|
+
},
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
async def _handle_standard_response(
|
|
68
313
|
self,
|
|
69
|
-
|
|
314
|
+
user_id: str,
|
|
315
|
+
request_body_obj: Any,
|
|
316
|
+
request_id: str,
|
|
317
|
+
):
|
|
318
|
+
"""Handle standard JSON response."""
|
|
319
|
+
try:
|
|
320
|
+
if asyncio.iscoroutinefunction(self.func):
|
|
321
|
+
result = await self.func(
|
|
322
|
+
user_id=user_id,
|
|
323
|
+
request=request_body_obj,
|
|
324
|
+
request_id=request_id,
|
|
325
|
+
)
|
|
326
|
+
else:
|
|
327
|
+
result = self.func(
|
|
328
|
+
user_id=user_id,
|
|
329
|
+
request=request_body_obj,
|
|
330
|
+
request_id=request_id,
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
return self._create_success_result(
|
|
334
|
+
output=result,
|
|
335
|
+
)
|
|
336
|
+
except Exception as e:
|
|
337
|
+
return self._create_error_response(request_id=request_id, error=e)
|
|
338
|
+
|
|
339
|
+
def _create_success_result(
|
|
340
|
+
self,
|
|
341
|
+
output: Union[BaseModel, Dict, str],
|
|
342
|
+
) -> str:
|
|
343
|
+
"""Create a success response."""
|
|
344
|
+
if isinstance(output, BaseModel):
|
|
345
|
+
return output.model_dump_json()
|
|
346
|
+
elif isinstance(output, dict):
|
|
347
|
+
return json.dumps(output)
|
|
348
|
+
else:
|
|
349
|
+
return output
|
|
350
|
+
|
|
351
|
+
def _create_error_response(
|
|
352
|
+
self,
|
|
353
|
+
request_id: str,
|
|
354
|
+
error: Exception,
|
|
355
|
+
) -> str:
|
|
356
|
+
"""Create an error response."""
|
|
357
|
+
response = AgentResponse(id=request_id)
|
|
358
|
+
response.failed(Error(code=str(error), message=str(error)))
|
|
359
|
+
return response.model_dump_json()
|
|
360
|
+
|
|
361
|
+
def deploy_sync(
|
|
362
|
+
self,
|
|
363
|
+
func: Callable,
|
|
70
364
|
endpoint_path: str = "/process",
|
|
71
|
-
request_model: Optional[Type] =
|
|
365
|
+
request_model: Optional[Type] = AgentRequest,
|
|
72
366
|
response_type: str = "sse",
|
|
73
|
-
stream: bool = True,
|
|
74
367
|
before_start: Optional[Callable] = None,
|
|
75
368
|
after_finish: Optional[Callable] = None,
|
|
76
|
-
mode: DeploymentMode = DeploymentMode.DAEMON_THREAD,
|
|
77
|
-
services_config: Optional[ServicesConfig] = None,
|
|
78
|
-
protocol_adapters: Optional[list[ProtocolAdapter]] = None,
|
|
79
369
|
**kwargs: Any,
|
|
80
370
|
) -> Dict[str, str]:
|
|
81
|
-
"""
|
|
371
|
+
"""
|
|
372
|
+
Deploy the agent as a FastAPI service (synchronous version).
|
|
82
373
|
|
|
83
374
|
Args:
|
|
84
|
-
|
|
85
|
-
endpoint_path: API endpoint path
|
|
375
|
+
func: Custom processing function
|
|
376
|
+
endpoint_path: API endpoint path for the processing function
|
|
86
377
|
request_model: Pydantic model for request validation
|
|
87
378
|
response_type: Response type - "json", "sse", or "text"
|
|
88
|
-
stream: Enable streaming responses
|
|
89
379
|
before_start: Callback function called before server starts
|
|
90
380
|
after_finish: Callback function called after server finishes
|
|
91
|
-
|
|
92
|
-
services_config: Services configuration
|
|
93
|
-
protocol_adapters: Protocol adapters
|
|
94
|
-
**kwargs: Additional keyword arguments
|
|
381
|
+
**kwargs: Additional keyword arguments passed to callbacks
|
|
95
382
|
|
|
96
383
|
Returns:
|
|
97
|
-
Dict containing deploy_id and url
|
|
384
|
+
Dict[str, str]: Dictionary containing deploy_id and url of the
|
|
385
|
+
deployed service
|
|
98
386
|
|
|
99
387
|
Raises:
|
|
100
388
|
RuntimeError: If deployment fails
|
|
101
389
|
"""
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
before_start=before_start,
|
|
114
|
-
after_finish=after_finish,
|
|
115
|
-
services_config=services_config,
|
|
116
|
-
protocol_adapters=protocol_adapters,
|
|
117
|
-
**kwargs,
|
|
118
|
-
)
|
|
119
|
-
elif mode == DeploymentMode.DETACHED_PROCESS:
|
|
120
|
-
return await self._deploy_detached_process(
|
|
121
|
-
runner=runner,
|
|
122
|
-
endpoint_path=endpoint_path,
|
|
123
|
-
request_model=request_model,
|
|
124
|
-
response_type=response_type,
|
|
125
|
-
stream=stream,
|
|
126
|
-
before_start=before_start,
|
|
127
|
-
after_finish=after_finish,
|
|
128
|
-
services_config=services_config,
|
|
129
|
-
protocol_adapters=protocol_adapters,
|
|
130
|
-
**kwargs,
|
|
131
|
-
)
|
|
132
|
-
else:
|
|
133
|
-
raise ValueError(
|
|
134
|
-
f"Unsupported deployment mode for LocalDeployManager: "
|
|
135
|
-
f"{mode}",
|
|
136
|
-
)
|
|
137
|
-
|
|
138
|
-
except Exception as e:
|
|
139
|
-
self._logger.error(f"Deployment failed: {e}")
|
|
140
|
-
raise RuntimeError(f"Failed to deploy service: {e}") from e
|
|
390
|
+
return asyncio.run(
|
|
391
|
+
self._deploy_async(
|
|
392
|
+
func=func,
|
|
393
|
+
endpoint_path=endpoint_path,
|
|
394
|
+
request_model=request_model,
|
|
395
|
+
response_type=response_type,
|
|
396
|
+
before_start=before_start,
|
|
397
|
+
after_finish=after_finish,
|
|
398
|
+
**kwargs,
|
|
399
|
+
),
|
|
400
|
+
)
|
|
141
401
|
|
|
142
|
-
async def
|
|
402
|
+
async def deploy(
|
|
143
403
|
self,
|
|
144
|
-
|
|
404
|
+
func: Callable,
|
|
405
|
+
endpoint_path: str = "/process",
|
|
406
|
+
request_model: Optional[Type] = AgentRequest,
|
|
407
|
+
response_type: str = "sse",
|
|
408
|
+
before_start: Optional[Callable] = None,
|
|
409
|
+
after_finish: Optional[Callable] = None,
|
|
145
410
|
protocol_adapters: Optional[list[ProtocolAdapter]] = None,
|
|
146
|
-
**kwargs,
|
|
411
|
+
**kwargs: Any,
|
|
147
412
|
) -> Dict[str, str]:
|
|
148
|
-
"""
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
# Create FastAPI app using factory
|
|
152
|
-
app = FastAPIAppFactory.create_app(
|
|
153
|
-
runner=runner,
|
|
154
|
-
mode=DeploymentMode.DAEMON_THREAD,
|
|
155
|
-
protocol_adapters=protocol_adapters,
|
|
156
|
-
**kwargs,
|
|
157
|
-
)
|
|
158
|
-
|
|
159
|
-
# Create uvicorn server
|
|
160
|
-
config = uvicorn.Config(
|
|
161
|
-
app=app,
|
|
162
|
-
host=self.host,
|
|
163
|
-
port=self.port,
|
|
164
|
-
loop="asyncio",
|
|
165
|
-
log_level="info",
|
|
166
|
-
)
|
|
167
|
-
self._server = uvicorn.Server(config)
|
|
168
|
-
|
|
169
|
-
# Start server in daemon thread
|
|
170
|
-
def run_server():
|
|
171
|
-
asyncio.run(self._server.serve())
|
|
172
|
-
|
|
173
|
-
self._server_thread = threading.Thread(target=run_server, daemon=True)
|
|
174
|
-
self._server_thread.start()
|
|
413
|
+
"""
|
|
414
|
+
Deploy the agent as a FastAPI service (asynchronous version).
|
|
175
415
|
|
|
176
|
-
|
|
177
|
-
|
|
416
|
+
Args:
|
|
417
|
+
func: Custom processing function
|
|
418
|
+
endpoint_path: API endpoint path for the processing function
|
|
419
|
+
request_model: Pydantic model for request validation
|
|
420
|
+
response_type: Response type - "json", "sse", or "text"
|
|
421
|
+
before_start: Callback function called before server starts
|
|
422
|
+
after_finish: Callback function called after server finishes
|
|
423
|
+
**kwargs: Additional keyword arguments passed to callbacks
|
|
178
424
|
|
|
179
|
-
|
|
180
|
-
|
|
425
|
+
Returns:
|
|
426
|
+
Dict[str, str]: Dictionary containing deploy_id and url of the
|
|
427
|
+
deployed service
|
|
181
428
|
|
|
182
|
-
|
|
183
|
-
|
|
429
|
+
Raises:
|
|
430
|
+
RuntimeError: If deployment fails
|
|
431
|
+
"""
|
|
432
|
+
return await self._deploy_async(
|
|
433
|
+
func=func,
|
|
434
|
+
endpoint_path=endpoint_path,
|
|
435
|
+
request_model=request_model,
|
|
436
|
+
response_type=response_type,
|
|
437
|
+
before_start=before_start,
|
|
438
|
+
after_finish=after_finish,
|
|
439
|
+
protocol_adapters=protocol_adapters,
|
|
440
|
+
**kwargs,
|
|
184
441
|
)
|
|
185
442
|
|
|
186
|
-
|
|
187
|
-
"deploy_id": self.deploy_id,
|
|
188
|
-
"url": f"http://{self.host}:{self.port}",
|
|
189
|
-
}
|
|
190
|
-
|
|
191
|
-
async def _deploy_detached_process(
|
|
443
|
+
async def _deploy_async(
|
|
192
444
|
self,
|
|
193
|
-
|
|
194
|
-
|
|
445
|
+
func: Callable,
|
|
446
|
+
endpoint_path: str = "/process",
|
|
447
|
+
request_model: Optional[Type] = None,
|
|
448
|
+
response_type: str = "sse",
|
|
449
|
+
before_start: Optional[Callable] = None,
|
|
450
|
+
after_finish: Optional[Callable] = None,
|
|
195
451
|
protocol_adapters: Optional[list[ProtocolAdapter]] = None,
|
|
196
|
-
**kwargs,
|
|
452
|
+
**kwargs: Any,
|
|
197
453
|
) -> Dict[str, str]:
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
"Deploying FastAPI service in detached process mode...",
|
|
201
|
-
)
|
|
202
|
-
|
|
203
|
-
# Extract agent from runner
|
|
204
|
-
if not runner or not runner._agent:
|
|
205
|
-
raise ValueError(
|
|
206
|
-
"Detached process mode requires a runner with an agent",
|
|
207
|
-
)
|
|
208
|
-
|
|
209
|
-
agent = runner._agent
|
|
210
|
-
|
|
211
|
-
# Create package project for detached deployment
|
|
212
|
-
project_dir = await self.create_detached_project(
|
|
213
|
-
agent=agent,
|
|
214
|
-
services_config=services_config,
|
|
215
|
-
protocol_adapters=protocol_adapters,
|
|
216
|
-
**kwargs,
|
|
217
|
-
)
|
|
454
|
+
if self._is_running:
|
|
455
|
+
raise RuntimeError("Service is already running")
|
|
218
456
|
|
|
219
457
|
try:
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
458
|
+
self._logger.info("Starting FastAPI service deployment...")
|
|
459
|
+
|
|
460
|
+
# Store callable configuration
|
|
461
|
+
self.func = func
|
|
462
|
+
self.endpoint_path = endpoint_path
|
|
463
|
+
self.request_model = request_model
|
|
464
|
+
self.response_type = response_type
|
|
465
|
+
self.before_start = before_start
|
|
466
|
+
self.after_finish = after_finish
|
|
467
|
+
self.kwargs = kwargs
|
|
468
|
+
|
|
469
|
+
# Create FastAPI app
|
|
470
|
+
self._app = self._create_fastapi_app()
|
|
471
|
+
|
|
472
|
+
# Support extension protocol
|
|
473
|
+
if protocol_adapters:
|
|
474
|
+
for protocol_adapter in protocol_adapters:
|
|
475
|
+
protocol_adapter.add_endpoint(app=self._app, func=func)
|
|
476
|
+
|
|
477
|
+
# Configure uvicorn server
|
|
478
|
+
config = uvicorn.Config(
|
|
479
|
+
self._app,
|
|
224
480
|
host=self.host,
|
|
225
481
|
port=self.port,
|
|
482
|
+
log_level="info",
|
|
483
|
+
access_log=False,
|
|
484
|
+
timeout_keep_alive=30,
|
|
226
485
|
)
|
|
227
486
|
|
|
228
|
-
self.
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
# Wait for service to become available
|
|
235
|
-
service_ready = await self.process_manager.wait_for_port(
|
|
236
|
-
self.host,
|
|
237
|
-
self.port,
|
|
238
|
-
timeout=30,
|
|
487
|
+
self._server = uvicorn.Server(config)
|
|
488
|
+
# Run the server in a separate thread
|
|
489
|
+
self._server_thread = threading.Thread(target=self._server.run)
|
|
490
|
+
self._server_thread.daemon = (
|
|
491
|
+
True # Ensure thread doesn't block exit
|
|
239
492
|
)
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
self.
|
|
245
|
-
|
|
246
|
-
|
|
493
|
+
self._server_thread.start()
|
|
494
|
+
|
|
495
|
+
# Wait for server to start with timeout
|
|
496
|
+
start_time = time.time()
|
|
497
|
+
while not self._is_server_ready():
|
|
498
|
+
if time.time() - start_time > self._startup_timeout:
|
|
499
|
+
# Clean up the thread if server fails to start
|
|
500
|
+
if self._server:
|
|
501
|
+
self._server.should_exit = True
|
|
502
|
+
self._server_thread.join(timeout=self._shutdown_timeout)
|
|
503
|
+
raise RuntimeError(
|
|
504
|
+
f"Server startup timeout after "
|
|
505
|
+
f"{self._startup_timeout} seconds",
|
|
506
|
+
)
|
|
507
|
+
await asyncio.sleep(0.1)
|
|
508
|
+
|
|
509
|
+
self._is_running = True
|
|
510
|
+
url = f"http://{self.host}:{self.port}"
|
|
247
511
|
self._logger.info(
|
|
248
|
-
f"FastAPI service
|
|
512
|
+
f"FastAPI service deployed successfully at {url}",
|
|
249
513
|
)
|
|
250
|
-
|
|
251
514
|
return {
|
|
252
515
|
"deploy_id": self.deploy_id,
|
|
253
|
-
"url":
|
|
516
|
+
"url": url,
|
|
254
517
|
}
|
|
255
518
|
|
|
256
519
|
except Exception as e:
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
import shutil
|
|
261
|
-
|
|
262
|
-
shutil.rmtree(project_dir)
|
|
263
|
-
except OSError:
|
|
264
|
-
pass
|
|
265
|
-
raise e
|
|
266
|
-
|
|
267
|
-
@staticmethod
|
|
268
|
-
async def create_detached_project(
|
|
269
|
-
agent: Any,
|
|
270
|
-
endpoint_path: str = "/process",
|
|
271
|
-
requirements: Optional[Union[str, List[str]]] = None,
|
|
272
|
-
extra_packages: Optional[List[str]] = None,
|
|
273
|
-
services_config: Optional[ServicesConfig] = None,
|
|
274
|
-
protocol_adapters: Optional[list[ProtocolAdapter]] = None,
|
|
275
|
-
**kwargs, # pylint: disable=unused-argument
|
|
276
|
-
) -> str:
|
|
277
|
-
"""Create detached project using package_project method."""
|
|
278
|
-
if requirements is None:
|
|
279
|
-
requirements = []
|
|
280
|
-
|
|
281
|
-
if isinstance(requirements, str):
|
|
282
|
-
requirements = [requirements]
|
|
283
|
-
|
|
284
|
-
# Create package configuration for detached deployment
|
|
285
|
-
package_config = PackageConfig(
|
|
286
|
-
endpoint_path=endpoint_path,
|
|
287
|
-
deployment_mode="detached_process",
|
|
288
|
-
extra_packages=extra_packages,
|
|
289
|
-
protocol_adapters=protocol_adapters,
|
|
290
|
-
services_config=services_config,
|
|
291
|
-
requirements=requirements
|
|
292
|
-
+ (
|
|
293
|
-
["redis"]
|
|
294
|
-
if services_config
|
|
295
|
-
and any(
|
|
296
|
-
getattr(config, "provider", None) == "redis"
|
|
297
|
-
for config in [
|
|
298
|
-
services_config.memory,
|
|
299
|
-
services_config.session_history,
|
|
300
|
-
]
|
|
301
|
-
if config
|
|
302
|
-
)
|
|
303
|
-
else []
|
|
304
|
-
),
|
|
305
|
-
)
|
|
306
|
-
|
|
307
|
-
# Use package_project to create the detached project
|
|
308
|
-
project_dir, _ = package_project(
|
|
309
|
-
agent=agent,
|
|
310
|
-
config=package_config,
|
|
311
|
-
)
|
|
312
|
-
|
|
313
|
-
return project_dir
|
|
314
|
-
|
|
315
|
-
async def stop(self) -> None:
|
|
316
|
-
"""Stop the FastAPI service (unified method for all modes)."""
|
|
317
|
-
if not self.is_running:
|
|
318
|
-
self._logger.warning("Service is not running")
|
|
319
|
-
return
|
|
320
|
-
|
|
321
|
-
try:
|
|
322
|
-
if self._detached_process_pid:
|
|
323
|
-
# Detached process mode
|
|
324
|
-
await self._stop_detached_process()
|
|
325
|
-
else:
|
|
326
|
-
# Daemon thread mode
|
|
327
|
-
await self._stop_daemon_thread()
|
|
328
|
-
|
|
329
|
-
except Exception as e:
|
|
330
|
-
self._logger.error(f"Failed to stop service: {e}")
|
|
331
|
-
raise RuntimeError(f"Failed to stop FastAPI service: {e}") from e
|
|
332
|
-
|
|
333
|
-
async def _stop_daemon_thread(self):
|
|
334
|
-
"""Stop daemon thread mode service."""
|
|
335
|
-
self._logger.info("Stopping FastAPI daemon thread service...")
|
|
336
|
-
|
|
337
|
-
# Stop the server gracefully
|
|
338
|
-
if self._server:
|
|
339
|
-
self._server.should_exit = True
|
|
340
|
-
|
|
341
|
-
# Wait for the server thread to finish
|
|
342
|
-
if self._server_thread and self._server_thread.is_alive():
|
|
343
|
-
self._server_thread.join(timeout=self._shutdown_timeout)
|
|
344
|
-
if self._server_thread.is_alive():
|
|
345
|
-
self._logger.warning(
|
|
346
|
-
"Server thread did not terminate, potential resource leak",
|
|
347
|
-
)
|
|
348
|
-
|
|
349
|
-
await self._cleanup_daemon_thread()
|
|
350
|
-
self.is_running = False
|
|
351
|
-
self._logger.info("FastAPI daemon thread service stopped successfully")
|
|
352
|
-
|
|
353
|
-
async def _stop_detached_process(self):
|
|
354
|
-
"""Stop detached process mode service."""
|
|
355
|
-
self._logger.info("Stopping FastAPI detached process service...")
|
|
356
|
-
|
|
357
|
-
if self._detached_process_pid:
|
|
358
|
-
await self.process_manager.stop_process_gracefully(
|
|
359
|
-
self._detached_process_pid,
|
|
360
|
-
)
|
|
361
|
-
|
|
362
|
-
await self._cleanup_detached_process()
|
|
363
|
-
self.is_running = False
|
|
364
|
-
self._logger.info(
|
|
365
|
-
"FastAPI detached process service stopped successfully",
|
|
366
|
-
)
|
|
367
|
-
|
|
368
|
-
async def _cleanup_daemon_thread(self):
|
|
369
|
-
"""Clean up daemon thread resources."""
|
|
370
|
-
self._server = None
|
|
371
|
-
self._server_task = None
|
|
372
|
-
self._server_thread = None
|
|
373
|
-
|
|
374
|
-
async def _cleanup_detached_process(self):
|
|
375
|
-
"""Clean up detached process resources."""
|
|
376
|
-
# Cleanup PID file
|
|
377
|
-
if self._detached_pid_file:
|
|
378
|
-
self.process_manager.cleanup_pid_file(self._detached_pid_file)
|
|
379
|
-
|
|
380
|
-
# Reset state
|
|
381
|
-
self._detached_process_pid = None
|
|
382
|
-
self._detached_pid_file = None
|
|
520
|
+
self._logger.error(f"Deployment failed: {e}")
|
|
521
|
+
await self._cleanup_server()
|
|
522
|
+
raise RuntimeError(f"Failed to deploy FastAPI service: {e}") from e
|
|
383
523
|
|
|
384
524
|
def _is_server_ready(self) -> bool:
|
|
385
525
|
"""Check if the server is ready to accept connections."""
|
|
@@ -391,51 +531,56 @@ class LocalDeployManager(DeployManager):
|
|
|
391
531
|
except Exception:
|
|
392
532
|
return False
|
|
393
533
|
|
|
394
|
-
async def
|
|
395
|
-
"""
|
|
396
|
-
|
|
534
|
+
async def stop(self) -> None:
|
|
535
|
+
"""
|
|
536
|
+
Stop the FastAPI service.
|
|
397
537
|
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
538
|
+
Raises:
|
|
539
|
+
RuntimeError: If stopping fails
|
|
540
|
+
"""
|
|
541
|
+
if not self._is_running:
|
|
542
|
+
self._logger.warning("Service is not running")
|
|
543
|
+
return
|
|
401
544
|
|
|
402
|
-
|
|
545
|
+
try:
|
|
546
|
+
self._logger.info("Stopping FastAPI service...")
|
|
403
547
|
|
|
404
|
-
|
|
548
|
+
# Stop the server gracefully
|
|
549
|
+
if self._server:
|
|
550
|
+
self._server.should_exit = True
|
|
405
551
|
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
552
|
+
# Wait for the server thread to finish
|
|
553
|
+
if self._server_thread and self._server_thread.is_alive():
|
|
554
|
+
self._server_thread.join(timeout=self._shutdown_timeout)
|
|
555
|
+
if self._server_thread.is_alive():
|
|
556
|
+
self._logger.warning(
|
|
557
|
+
"Server thread did not terminate, "
|
|
558
|
+
"potential resource leak",
|
|
559
|
+
)
|
|
410
560
|
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
"pid": self._detached_process_pid,
|
|
431
|
-
"url": f"http://{self.host}:{self.port}"
|
|
432
|
-
if self.is_running
|
|
433
|
-
else None,
|
|
434
|
-
}
|
|
561
|
+
await self._cleanup_server()
|
|
562
|
+
self._is_running = False
|
|
563
|
+
self._logger.info("FastAPI service stopped successfully")
|
|
564
|
+
|
|
565
|
+
except Exception as e:
|
|
566
|
+
self._logger.error(f"Failed to stop service: {e}")
|
|
567
|
+
raise RuntimeError(f"Failed to stop FastAPI service: {e}") from e
|
|
568
|
+
|
|
569
|
+
async def _cleanup_server(self):
|
|
570
|
+
"""Clean up server resources."""
|
|
571
|
+
self._server = None
|
|
572
|
+
self._server_task = None
|
|
573
|
+
self._server_thread = None
|
|
574
|
+
self._app = None
|
|
575
|
+
|
|
576
|
+
@property
|
|
577
|
+
def is_running(self) -> bool:
|
|
578
|
+
"""Check if the service is currently running."""
|
|
579
|
+
return self._is_running
|
|
435
580
|
|
|
436
581
|
@property
|
|
437
582
|
def service_url(self) -> Optional[str]:
|
|
438
583
|
"""Get the current service URL if running."""
|
|
439
|
-
if self.
|
|
584
|
+
if self._is_running and self.port:
|
|
440
585
|
return f"http://{self.host}:{self.port}"
|
|
441
586
|
return None
|