agentscope-runtime 0.1.3__py3-none-any.whl → 0.1.5b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. agentscope_runtime/engine/agents/agentscope_agent/agent.py +3 -0
  2. agentscope_runtime/engine/deployers/__init__.py +13 -0
  3. agentscope_runtime/engine/deployers/adapter/responses/__init__.py +0 -0
  4. agentscope_runtime/engine/deployers/adapter/responses/response_api_adapter_utils.py +2886 -0
  5. agentscope_runtime/engine/deployers/adapter/responses/response_api_agent_adapter.py +51 -0
  6. agentscope_runtime/engine/deployers/adapter/responses/response_api_protocol_adapter.py +314 -0
  7. agentscope_runtime/engine/deployers/cli_fc_deploy.py +143 -0
  8. agentscope_runtime/engine/deployers/kubernetes_deployer.py +265 -0
  9. agentscope_runtime/engine/deployers/local_deployer.py +356 -501
  10. agentscope_runtime/engine/deployers/modelstudio_deployer.py +626 -0
  11. agentscope_runtime/engine/deployers/utils/__init__.py +0 -0
  12. agentscope_runtime/engine/deployers/utils/deployment_modes.py +14 -0
  13. agentscope_runtime/engine/deployers/utils/docker_image_utils/__init__.py +8 -0
  14. agentscope_runtime/engine/deployers/utils/docker_image_utils/docker_image_builder.py +429 -0
  15. agentscope_runtime/engine/deployers/utils/docker_image_utils/dockerfile_generator.py +240 -0
  16. agentscope_runtime/engine/deployers/utils/docker_image_utils/runner_image_factory.py +297 -0
  17. agentscope_runtime/engine/deployers/utils/package_project_utils.py +932 -0
  18. agentscope_runtime/engine/deployers/utils/service_utils/__init__.py +9 -0
  19. agentscope_runtime/engine/deployers/utils/service_utils/fastapi_factory.py +504 -0
  20. agentscope_runtime/engine/deployers/utils/service_utils/fastapi_templates.py +157 -0
  21. agentscope_runtime/engine/deployers/utils/service_utils/process_manager.py +268 -0
  22. agentscope_runtime/engine/deployers/utils/service_utils/service_config.py +75 -0
  23. agentscope_runtime/engine/deployers/utils/service_utils/service_factory.py +220 -0
  24. agentscope_runtime/engine/deployers/utils/wheel_packager.py +389 -0
  25. agentscope_runtime/engine/helpers/agent_api_builder.py +651 -0
  26. agentscope_runtime/engine/runner.py +36 -10
  27. agentscope_runtime/engine/schemas/agent_schemas.py +70 -2
  28. agentscope_runtime/engine/schemas/embedding.py +37 -0
  29. agentscope_runtime/engine/schemas/modelstudio_llm.py +310 -0
  30. agentscope_runtime/engine/schemas/oai_llm.py +538 -0
  31. agentscope_runtime/engine/schemas/realtime.py +254 -0
  32. agentscope_runtime/engine/services/context_manager.py +2 -0
  33. agentscope_runtime/engine/services/mem0_memory_service.py +124 -0
  34. agentscope_runtime/engine/services/memory_service.py +2 -1
  35. agentscope_runtime/engine/services/redis_session_history_service.py +4 -3
  36. agentscope_runtime/engine/services/session_history_service.py +4 -3
  37. agentscope_runtime/sandbox/manager/container_clients/kubernetes_client.py +555 -10
  38. agentscope_runtime/version.py +1 -1
  39. {agentscope_runtime-0.1.3.dist-info → agentscope_runtime-0.1.5b1.dist-info}/METADATA +25 -5
  40. {agentscope_runtime-0.1.3.dist-info → agentscope_runtime-0.1.5b1.dist-info}/RECORD +44 -17
  41. {agentscope_runtime-0.1.3.dist-info → agentscope_runtime-0.1.5b1.dist-info}/entry_points.txt +1 -0
  42. {agentscope_runtime-0.1.3.dist-info → agentscope_runtime-0.1.5b1.dist-info}/WHEEL +0 -0
  43. {agentscope_runtime-0.1.3.dist-info → agentscope_runtime-0.1.5b1.dist-info}/licenses/LICENSE +0 -0
  44. {agentscope_runtime-0.1.3.dist-info → agentscope_runtime-0.1.5b1.dist-info}/top_level.txt +0 -0
@@ -1,586 +1,441 @@
1
1
  # -*- coding: utf-8 -*-
2
+ # pylint:disable=protected-access
3
+
2
4
  import asyncio
3
- import json
4
5
  import logging
6
+ import os
5
7
  import socket
6
8
  import threading
7
- import time
8
- import uuid
9
- from contextlib import asynccontextmanager
10
- from typing import Optional, Dict, Any, Callable, Type, Tuple, Union
9
+ from typing import Callable, Optional, Type, Any, Dict, Union, List
11
10
 
12
11
  import uvicorn
13
- from fastapi import FastAPI, HTTPException, Request, Response
14
- from fastapi.middleware.cors import CORSMiddleware
15
- from fastapi.responses import StreamingResponse
16
- from pydantic import BaseModel
17
12
 
18
- from .base import DeployManager
19
13
  from .adapter.protocol_adapter import ProtocolAdapter
20
- from ..schemas.agent_schemas import AgentRequest, AgentResponse, Error
14
+ from .base import DeployManager
15
+ from .utils.deployment_modes import DeploymentMode
16
+ from .utils.package_project_utils import package_project, PackageConfig
17
+ from .utils.service_utils import (
18
+ FastAPIAppFactory,
19
+ FastAPITemplateManager,
20
+ ProcessManager,
21
+ ServicesConfig,
22
+ )
21
23
 
22
24
 
23
25
  class LocalDeployManager(DeployManager):
24
- def __init__(self, host: str = "localhost", port: int = 8090):
25
- super().__init__()
26
- self.host = host
27
- self.port = port
28
- self._server = None
29
- self._server_task = None
30
- self._server_thread = None # Add thread for server
31
- self._is_running = False
32
- self._logger = logging.getLogger(__name__)
33
- self._app = None
34
- self._startup_timeout = 30 # seconds
35
- self._shutdown_timeout = 10 # seconds
36
- self._setup_logging()
37
-
38
- def _setup_logging(self):
39
- formatter = logging.Formatter(
40
- "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
41
- )
42
-
43
- app_logger = logging.getLogger("app")
44
- app_logger.setLevel(logging.INFO)
45
-
46
- file_handler = logging.handlers.RotatingFileHandler(
47
- "app.log",
48
- maxBytes=10 * 1024 * 1024, # 10MB
49
- backupCount=5,
50
- )
51
- file_handler.setFormatter(formatter)
52
- app_logger.addHandler(file_handler)
53
- console_handler = logging.StreamHandler()
54
- console_handler.setFormatter(formatter)
55
- app_logger.addHandler(console_handler)
56
-
57
- access_logger = logging.getLogger("access")
58
- access_logger.setLevel(logging.INFO)
59
- access_file_handler = logging.handlers.RotatingFileHandler(
60
- "access.log",
61
- maxBytes=10 * 1024 * 1024,
62
- backupCount=5,
63
- )
64
- access_file_handler.setFormatter(
65
- logging.Formatter("%(asctime)s - %(message)s"),
66
- )
67
- access_logger.addHandler(access_file_handler)
68
-
69
- self.app_logger = app_logger
70
- self.access_logger = access_logger
71
-
72
- def _create_fastapi_app(self) -> FastAPI:
73
- """Create and configure FastAPI application with lifespan
74
- management."""
75
-
76
- @asynccontextmanager
77
- async def lifespan(app: FastAPI) -> Any:
78
- """Manage the application lifespan."""
79
- if hasattr(self, "before_start") and self.before_start:
80
- if asyncio.iscoroutinefunction(self.before_start):
81
- await self.before_start(app, **getattr(self, "kwargs", {}))
82
- else:
83
- self.before_start(app, **getattr(self, "kwargs", {}))
84
- yield
85
- if hasattr(self, "after_finish") and self.after_finish:
86
- if asyncio.iscoroutinefunction(self.after_finish):
87
- await self.after_finish(app, **getattr(self, "kwargs", {}))
88
- else:
89
- self.after_finish(app, **getattr(self, "kwargs", {}))
90
-
91
- app = FastAPI(
92
- title="Agent Service",
93
- version="1.0.0",
94
- description="Production-ready Agent Service API",
95
- lifespan=lifespan,
96
- )
97
-
98
- self._add_middleware(app)
99
- self._add_health_endpoints(app)
100
-
101
- if hasattr(self, "func") and self.func:
102
- self._add_main_endpoint(app)
103
-
104
- return app
105
-
106
- def _add_middleware(self, app: FastAPI) -> None:
107
- """Add middleware to the FastAPI application."""
108
-
109
- @app.middleware("http")
110
- async def log_requests(request: Request, call_next):
111
- start_time = time.time()
112
-
113
- self.app_logger.info(f"Request: {request.method} {request.url}")
114
- response = await call_next(
115
- request,
116
- )
117
- process_time = time.time() - start_time
118
- self.access_logger.info(
119
- f'{request.client.host} - "{request.method} {request.url}" '
120
- f"{response.status_code} - {process_time:.3f}s",
121
- )
122
-
123
- return response
124
-
125
- @app.middleware("http")
126
- async def custom_middleware(
127
- request: Request,
128
- call_next: Callable,
129
- ) -> Response:
130
- """Custom middleware for request processing."""
131
- response: Response = await call_next(request)
132
- return response
133
-
134
- app.add_middleware(
135
- CORSMiddleware,
136
- allow_origins=["*"],
137
- allow_credentials=True,
138
- allow_methods=["*"],
139
- allow_headers=["*"],
140
- )
26
+ """Unified LocalDeployManager supporting multiple deployment modes."""
141
27
 
142
- def _add_health_endpoints(self, app: FastAPI) -> None:
143
- """Add health check endpoints to the FastAPI application."""
144
-
145
- @app.get("/health")
146
- async def health_check():
147
- return {
148
- "status": "healthy",
149
- "timestamp": time.time(),
150
- "service": "agent-service",
151
- }
152
-
153
- @app.get("/readiness")
154
- async def readiness() -> str:
155
- """Check if the application is ready to serve requests."""
156
- if getattr(app.state, "is_ready", True):
157
- return "success"
158
- raise HTTPException(
159
- status_code=500,
160
- detail="Application is not ready",
161
- )
162
-
163
- @app.get("/liveness")
164
- async def liveness() -> str:
165
- """Check if the application is alive and healthy."""
166
- if getattr(app.state, "is_healthy", True):
167
- return "success"
168
- raise HTTPException(
169
- status_code=500,
170
- detail="Application is not healthy",
171
- )
172
-
173
- @app.get("/")
174
- async def root():
175
- return {"message": "Agent Service is running"}
176
-
177
- def _add_main_endpoint(self, app: FastAPI) -> None:
178
- """Add the main processing endpoint to the FastAPI application."""
179
-
180
- async def _get_request_info(request: Request) -> Tuple[Dict, Any, str]:
181
- """Extract request information from the HTTP request."""
182
- body = await request.body()
183
- request_body = json.loads(body.decode("utf-8")) if body else {}
184
-
185
- user_id = request_body.get("user_id", "")
186
-
187
- if hasattr(self, "request_model") and self.request_model:
188
- try:
189
- request_body_obj = self.request_model.model_validate(
190
- request_body,
191
- )
192
- except Exception as e:
193
- raise HTTPException(
194
- status_code=400,
195
- detail=f"Invalid request format: {e}",
196
- ) from e
197
- else:
198
- request_body_obj = request_body
199
-
200
- query_params = dict(request.query_params)
201
- return query_params, request_body_obj, user_id
202
-
203
- def _get_request_id(request_body_obj: Any) -> str:
204
- """Extract or generate a request ID from the request body."""
205
- if hasattr(request_body_obj, "header") and hasattr(
206
- request_body_obj.header,
207
- "request_id",
208
- ):
209
- request_id = request_body_obj.header.request_id
210
- elif (
211
- isinstance(
212
- request_body_obj,
213
- dict,
214
- )
215
- and "request_id" in request_body_obj
216
- ):
217
- request_id = request_body_obj["request_id"]
218
- else:
219
- request_id = str(uuid.uuid4())
220
- return request_id
221
-
222
- @app.post(self.endpoint_path)
223
- async def main_endpoint(request: Request):
224
- """Main endpoint handler for processing requests."""
225
- try:
226
- (
227
- _, # query_params
228
- request_body_obj,
229
- user_id,
230
- ) = await _get_request_info(
231
- request=request,
232
- )
233
- request_id = _get_request_id(request_body_obj)
234
- if (
235
- hasattr(
236
- self,
237
- "response_type",
238
- )
239
- and self.response_type == "sse"
240
- ):
241
- return self._handle_sse_response(
242
- user_id=user_id,
243
- request_body_obj=request_body_obj,
244
- request_id=request_id,
245
- )
246
- else:
247
- return await self._handle_standard_response(
248
- user_id=user_id,
249
- request_body_obj=request_body_obj,
250
- request_id=request_id,
251
- )
252
-
253
- except Exception as e:
254
- self._logger.error(f"Request processing failed: {e}")
255
- raise HTTPException(status_code=500, detail=str(e)) from e
256
-
257
- def _handle_sse_response(
258
- self,
259
- user_id: str,
260
- request_body_obj: Any,
261
- request_id: str,
262
- ) -> StreamingResponse:
263
- """Handle Server-Sent Events response."""
264
-
265
- async def stream_generator():
266
- """Generate streaming response data."""
267
- try:
268
- if asyncio.iscoroutinefunction(self.func):
269
- async for output in self.func(
270
- user_id=user_id,
271
- request=request_body_obj,
272
- request_id=request_id,
273
- ):
274
- _data = self._create_success_result(
275
- output=output,
276
- )
277
- yield f"data: {_data}\n\n"
278
- else:
279
- # For sync functions, we need to handle differently
280
- result = self.func(
281
- user_id=user_id,
282
- request=request_body_obj,
283
- request_id=request_id,
284
- )
285
- if hasattr(result, "__aiter__"):
286
- async for output in result:
287
- _data = self._create_success_result(
288
- output=output,
289
- )
290
- yield f"data: {_data}\n\n"
291
- else:
292
- _data = self._create_success_result(
293
- output=result,
294
- )
295
- yield f"data: {_data}\n\n"
296
- except Exception as e:
297
- _data = self._create_error_response(
298
- request_id=request_id,
299
- error=e,
300
- )
301
- yield f"data: {_data}\n\n"
302
-
303
- return StreamingResponse(
304
- stream_generator(),
305
- media_type="text/event-stream",
306
- headers={
307
- "Cache-Control": "no-cache",
308
- "Connection": "keep-alive",
309
- },
310
- )
311
-
312
- async def _handle_standard_response(
28
+ def __init__(
313
29
  self,
314
- user_id: str,
315
- request_body_obj: Any,
316
- request_id: str,
30
+ host: str = "127.0.0.1",
31
+ port: int = 8000,
32
+ shutdown_timeout: int = 120,
33
+ logger: Optional[logging.Logger] = None,
317
34
  ):
318
- """Handle standard JSON response."""
319
- try:
320
- if asyncio.iscoroutinefunction(self.func):
321
- result = await self.func(
322
- user_id=user_id,
323
- request=request_body_obj,
324
- request_id=request_id,
325
- )
326
- else:
327
- result = self.func(
328
- user_id=user_id,
329
- request=request_body_obj,
330
- request_id=request_id,
331
- )
332
-
333
- return self._create_success_result(
334
- output=result,
335
- )
336
- except Exception as e:
337
- return self._create_error_response(request_id=request_id, error=e)
35
+ """Initialize LocalDeployManager.
338
36
 
339
- def _create_success_result(
340
- self,
341
- output: Union[BaseModel, Dict, str],
342
- ) -> str:
343
- """Create a success response."""
344
- if isinstance(output, BaseModel):
345
- return output.model_dump_json()
346
- elif isinstance(output, dict):
347
- return json.dumps(output)
348
- else:
349
- return output
37
+ Args:
38
+ host: Host to bind to
39
+ port: Port to bind to
40
+ shutdown_timeout: Timeout for graceful shutdown
41
+ logger: Logger instance
42
+ """
43
+ super().__init__()
44
+ self.host = host
45
+ self.port = port
46
+ self._shutdown_timeout = shutdown_timeout
47
+ self._logger = logger or logging.getLogger(__name__)
48
+
49
+ # State management
50
+ self.is_running = False
51
+
52
+ # Daemon thread mode attributes
53
+ self._server: Optional[uvicorn.Server] = None
54
+ self._server_thread: Optional[threading.Thread] = None
55
+ self._server_task: Optional[asyncio.Task] = None
56
+
57
+ # Detached process mode attributes
58
+ self._detached_process_pid: Optional[int] = None
59
+ self._detached_pid_file: Optional[str] = None
60
+ self.process_manager = ProcessManager(
61
+ shutdown_timeout=shutdown_timeout,
62
+ )
350
63
 
351
- def _create_error_response(
352
- self,
353
- request_id: str,
354
- error: Exception,
355
- ) -> str:
356
- """Create an error response."""
357
- response = AgentResponse(id=request_id)
358
- response.failed(Error(code=str(error), message=str(error)))
359
- return response.model_dump_json()
64
+ # Template manager
65
+ self.template_manager = FastAPITemplateManager()
360
66
 
361
- def deploy_sync(
67
+ async def deploy(
362
68
  self,
363
- func: Callable,
69
+ runner: Optional[Any] = None,
364
70
  endpoint_path: str = "/process",
365
- request_model: Optional[Type] = AgentRequest,
71
+ request_model: Optional[Type] = None,
366
72
  response_type: str = "sse",
73
+ stream: bool = True,
367
74
  before_start: Optional[Callable] = None,
368
75
  after_finish: Optional[Callable] = None,
76
+ mode: DeploymentMode = DeploymentMode.DAEMON_THREAD,
77
+ services_config: Optional[ServicesConfig] = None,
78
+ protocol_adapters: Optional[list[ProtocolAdapter]] = None,
369
79
  **kwargs: Any,
370
80
  ) -> Dict[str, str]:
371
- """
372
- Deploy the agent as a FastAPI service (synchronous version).
81
+ """Deploy using unified FastAPI architecture.
373
82
 
374
83
  Args:
375
- func: Custom processing function
376
- endpoint_path: API endpoint path for the processing function
84
+ runner: Runner instance (for DAEMON_THREAD mode)
85
+ endpoint_path: API endpoint path
377
86
  request_model: Pydantic model for request validation
378
87
  response_type: Response type - "json", "sse", or "text"
88
+ stream: Enable streaming responses
379
89
  before_start: Callback function called before server starts
380
90
  after_finish: Callback function called after server finishes
381
- **kwargs: Additional keyword arguments passed to callbacks
91
+ mode: Deployment mode
92
+ services_config: Services configuration
93
+ protocol_adapters: Protocol adapters
94
+ **kwargs: Additional keyword arguments
382
95
 
383
96
  Returns:
384
- Dict[str, str]: Dictionary containing deploy_id and url of the
385
- deployed service
97
+ Dict containing deploy_id and url
386
98
 
387
99
  Raises:
388
100
  RuntimeError: If deployment fails
389
101
  """
390
- return asyncio.run(
391
- self._deploy_async(
392
- func=func,
393
- endpoint_path=endpoint_path,
394
- request_model=request_model,
395
- response_type=response_type,
396
- before_start=before_start,
397
- after_finish=after_finish,
398
- **kwargs,
399
- ),
400
- )
102
+ if self.is_running:
103
+ raise RuntimeError("Service is already running")
401
104
 
402
- async def deploy(
105
+ try:
106
+ if mode == DeploymentMode.DAEMON_THREAD:
107
+ return await self._deploy_daemon_thread(
108
+ runner=runner,
109
+ endpoint_path=endpoint_path,
110
+ request_model=request_model,
111
+ response_type=response_type,
112
+ stream=stream,
113
+ before_start=before_start,
114
+ after_finish=after_finish,
115
+ services_config=services_config,
116
+ protocol_adapters=protocol_adapters,
117
+ **kwargs,
118
+ )
119
+ elif mode == DeploymentMode.DETACHED_PROCESS:
120
+ return await self._deploy_detached_process(
121
+ runner=runner,
122
+ endpoint_path=endpoint_path,
123
+ request_model=request_model,
124
+ response_type=response_type,
125
+ stream=stream,
126
+ before_start=before_start,
127
+ after_finish=after_finish,
128
+ services_config=services_config,
129
+ protocol_adapters=protocol_adapters,
130
+ **kwargs,
131
+ )
132
+ else:
133
+ raise ValueError(
134
+ f"Unsupported deployment mode for LocalDeployManager: "
135
+ f"{mode}",
136
+ )
137
+
138
+ except Exception as e:
139
+ self._logger.error(f"Deployment failed: {e}")
140
+ raise RuntimeError(f"Failed to deploy service: {e}") from e
141
+
142
+ async def _deploy_daemon_thread(
403
143
  self,
404
- func: Callable,
405
- endpoint_path: str = "/process",
406
- request_model: Optional[Type] = AgentRequest,
407
- response_type: str = "sse",
408
- before_start: Optional[Callable] = None,
409
- after_finish: Optional[Callable] = None,
144
+ runner: Optional[Any] = None,
410
145
  protocol_adapters: Optional[list[ProtocolAdapter]] = None,
411
- **kwargs: Any,
146
+ **kwargs,
412
147
  ) -> Dict[str, str]:
413
- """
414
- Deploy the agent as a FastAPI service (asynchronous version).
415
-
416
- Args:
417
- func: Custom processing function
418
- endpoint_path: API endpoint path for the processing function
419
- request_model: Pydantic model for request validation
420
- response_type: Response type - "json", "sse", or "text"
421
- before_start: Callback function called before server starts
422
- after_finish: Callback function called after server finishes
423
- **kwargs: Additional keyword arguments passed to callbacks
424
-
425
- Returns:
426
- Dict[str, str]: Dictionary containing deploy_id and url of the
427
- deployed service
148
+ """Deploy in daemon thread mode."""
149
+ self._logger.info("Deploying FastAPI service in daemon thread mode...")
428
150
 
429
- Raises:
430
- RuntimeError: If deployment fails
431
- """
432
- return await self._deploy_async(
433
- func=func,
434
- endpoint_path=endpoint_path,
435
- request_model=request_model,
436
- response_type=response_type,
437
- before_start=before_start,
438
- after_finish=after_finish,
151
+ # Create FastAPI app using factory
152
+ app = FastAPIAppFactory.create_app(
153
+ runner=runner,
154
+ mode=DeploymentMode.DAEMON_THREAD,
439
155
  protocol_adapters=protocol_adapters,
440
156
  **kwargs,
441
157
  )
442
158
 
443
- async def _deploy_async(
159
+ # Create uvicorn server
160
+ config = uvicorn.Config(
161
+ app=app,
162
+ host=self.host,
163
+ port=self.port,
164
+ loop="asyncio",
165
+ log_level="info",
166
+ )
167
+ self._server = uvicorn.Server(config)
168
+
169
+ # Start server in daemon thread
170
+ def run_server():
171
+ asyncio.run(self._server.serve())
172
+
173
+ self._server_thread = threading.Thread(target=run_server, daemon=True)
174
+ self._server_thread.start()
175
+
176
+ # Wait for server to start
177
+ await self._wait_for_server_ready()
178
+
179
+ self.is_running = True
180
+ self.deploy_id = f"daemon_{self.host}_{self.port}"
181
+
182
+ self._logger.info(
183
+ f"FastAPI service started at http://{self.host}:{self.port}",
184
+ )
185
+
186
+ return {
187
+ "deploy_id": self.deploy_id,
188
+ "url": f"http://{self.host}:{self.port}",
189
+ }
190
+
191
+ async def _deploy_detached_process(
444
192
  self,
445
- func: Callable,
446
- endpoint_path: str = "/process",
447
- request_model: Optional[Type] = None,
448
- response_type: str = "sse",
449
- before_start: Optional[Callable] = None,
450
- after_finish: Optional[Callable] = None,
193
+ runner: Optional[Any] = None,
194
+ services_config: Optional[ServicesConfig] = None,
451
195
  protocol_adapters: Optional[list[ProtocolAdapter]] = None,
452
- **kwargs: Any,
196
+ **kwargs,
453
197
  ) -> Dict[str, str]:
454
- if self._is_running:
455
- raise RuntimeError("Service is already running")
198
+ """Deploy in detached process mode."""
199
+ self._logger.info(
200
+ "Deploying FastAPI service in detached process mode...",
201
+ )
202
+
203
+ # Extract agent from runner
204
+ if not runner or not runner._agent:
205
+ raise ValueError(
206
+ "Detached process mode requires a runner with an agent",
207
+ )
208
+
209
+ agent = runner._agent
210
+
211
+ # Create package project for detached deployment
212
+ project_dir = await self.create_detached_project(
213
+ agent=agent,
214
+ services_config=services_config,
215
+ protocol_adapters=protocol_adapters,
216
+ **kwargs,
217
+ )
456
218
 
457
219
  try:
458
- self._logger.info("Starting FastAPI service deployment...")
459
-
460
- # Store callable configuration
461
- self.func = func
462
- self.endpoint_path = endpoint_path
463
- self.request_model = request_model
464
- self.response_type = response_type
465
- self.before_start = before_start
466
- self.after_finish = after_finish
467
- self.kwargs = kwargs
468
-
469
- # Create FastAPI app
470
- self._app = self._create_fastapi_app()
471
-
472
- # Support extension protocol
473
- if protocol_adapters:
474
- for protocol_adapter in protocol_adapters:
475
- protocol_adapter.add_endpoint(app=self._app, func=func)
476
-
477
- # Configure uvicorn server
478
- config = uvicorn.Config(
479
- self._app,
220
+ # Start detached process using the packaged project
221
+ script_path = os.path.join(project_dir, "main.py")
222
+ pid = await self.process_manager.start_detached_process(
223
+ script_path=script_path,
480
224
  host=self.host,
481
225
  port=self.port,
482
- log_level="info",
483
- access_log=False,
484
- timeout_keep_alive=30,
485
226
  )
486
227
 
487
- self._server = uvicorn.Server(config)
488
- # Run the server in a separate thread
489
- self._server_thread = threading.Thread(target=self._server.run)
490
- self._server_thread.daemon = (
491
- True # Ensure thread doesn't block exit
228
+ self._detached_process_pid = pid
229
+ self._detached_pid_file = f"/tmp/agentscope_runtime_{pid}.pid"
230
+
231
+ # Create PID file
232
+ self.process_manager.create_pid_file(pid, self._detached_pid_file)
233
+
234
+ # Wait for service to become available
235
+ service_ready = await self.process_manager.wait_for_port(
236
+ self.host,
237
+ self.port,
238
+ timeout=30,
492
239
  )
493
- self._server_thread.start()
494
-
495
- # Wait for server to start with timeout
496
- start_time = time.time()
497
- while not self._is_server_ready():
498
- if time.time() - start_time > self._startup_timeout:
499
- # Clean up the thread if server fails to start
500
- if self._server:
501
- self._server.should_exit = True
502
- self._server_thread.join(timeout=self._shutdown_timeout)
503
- raise RuntimeError(
504
- f"Server startup timeout after "
505
- f"{self._startup_timeout} seconds",
506
- )
507
- await asyncio.sleep(0.1)
508
-
509
- self._is_running = True
510
- url = f"http://{self.host}:{self.port}"
240
+
241
+ if not service_ready:
242
+ raise RuntimeError("Service did not start within timeout")
243
+
244
+ self.is_running = True
245
+ self.deploy_id = f"detached_{pid}"
246
+
511
247
  self._logger.info(
512
- f"FastAPI service deployed successfully at {url}",
248
+ f"FastAPI service started in detached process (PID: {pid})",
513
249
  )
250
+
514
251
  return {
515
252
  "deploy_id": self.deploy_id,
516
- "url": url,
253
+ "url": f"http://{self.host}:{self.port}",
517
254
  }
518
255
 
519
256
  except Exception as e:
520
- self._logger.error(f"Deployment failed: {e}")
521
- await self._cleanup_server()
522
- raise RuntimeError(f"Failed to deploy FastAPI service: {e}") from e
257
+ # Cleanup on failure
258
+ if os.path.exists(project_dir):
259
+ try:
260
+ import shutil
523
261
 
524
- def _is_server_ready(self) -> bool:
525
- """Check if the server is ready to accept connections."""
526
- try:
527
- with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
528
- s.settimeout(0.1)
529
- result = s.connect_ex((self.host, self.port))
530
- return result == 0
531
- except Exception:
532
- return False
262
+ shutil.rmtree(project_dir)
263
+ except OSError:
264
+ pass
265
+ raise e
533
266
 
534
- async def stop(self) -> None:
535
- """
536
- Stop the FastAPI service.
267
+ @staticmethod
268
+ async def create_detached_project(
269
+ agent: Any,
270
+ endpoint_path: str = "/process",
271
+ requirements: Optional[Union[str, List[str]]] = None,
272
+ extra_packages: Optional[List[str]] = None,
273
+ services_config: Optional[ServicesConfig] = None,
274
+ protocol_adapters: Optional[list[ProtocolAdapter]] = None,
275
+ **kwargs, # pylint: disable=unused-argument
276
+ ) -> str:
277
+ """Create detached project using package_project method."""
278
+ if requirements is None:
279
+ requirements = []
537
280
 
538
- Raises:
539
- RuntimeError: If stopping fails
540
- """
541
- if not self._is_running:
542
- self._logger.warning("Service is not running")
543
- return
281
+ if isinstance(requirements, str):
282
+ requirements = [requirements]
544
283
 
545
- try:
546
- self._logger.info("Stopping FastAPI service...")
284
+ # Create package configuration for detached deployment
285
+ package_config = PackageConfig(
286
+ endpoint_path=endpoint_path,
287
+ deployment_mode="detached_process",
288
+ extra_packages=extra_packages,
289
+ protocol_adapters=protocol_adapters,
290
+ services_config=services_config,
291
+ requirements=requirements
292
+ + (
293
+ ["redis"]
294
+ if services_config
295
+ and any(
296
+ getattr(config, "provider", None) == "redis"
297
+ for config in [
298
+ services_config.memory,
299
+ services_config.session_history,
300
+ ]
301
+ if config
302
+ )
303
+ else []
304
+ ),
305
+ )
306
+
307
+ # Use package_project to create the detached project
308
+ project_dir, _ = package_project(
309
+ agent=agent,
310
+ config=package_config,
311
+ )
547
312
 
548
- # Stop the server gracefully
549
- if self._server:
550
- self._server.should_exit = True
313
+ return project_dir
551
314
 
552
- # Wait for the server thread to finish
553
- if self._server_thread and self._server_thread.is_alive():
554
- self._server_thread.join(timeout=self._shutdown_timeout)
555
- if self._server_thread.is_alive():
556
- self._logger.warning(
557
- "Server thread did not terminate, "
558
- "potential resource leak",
559
- )
315
+ async def stop(self) -> None:
316
+ """Stop the FastAPI service (unified method for all modes)."""
317
+ if not self.is_running:
318
+ self._logger.warning("Service is not running")
319
+ return
560
320
 
561
- await self._cleanup_server()
562
- self._is_running = False
563
- self._logger.info("FastAPI service stopped successfully")
321
+ try:
322
+ if self._detached_process_pid:
323
+ # Detached process mode
324
+ await self._stop_detached_process()
325
+ else:
326
+ # Daemon thread mode
327
+ await self._stop_daemon_thread()
564
328
 
565
329
  except Exception as e:
566
330
  self._logger.error(f"Failed to stop service: {e}")
567
331
  raise RuntimeError(f"Failed to stop FastAPI service: {e}") from e
568
332
 
569
- async def _cleanup_server(self):
570
- """Clean up server resources."""
333
+ async def _stop_daemon_thread(self):
334
+ """Stop daemon thread mode service."""
335
+ self._logger.info("Stopping FastAPI daemon thread service...")
336
+
337
+ # Stop the server gracefully
338
+ if self._server:
339
+ self._server.should_exit = True
340
+
341
+ # Wait for the server thread to finish
342
+ if self._server_thread and self._server_thread.is_alive():
343
+ self._server_thread.join(timeout=self._shutdown_timeout)
344
+ if self._server_thread.is_alive():
345
+ self._logger.warning(
346
+ "Server thread did not terminate, potential resource leak",
347
+ )
348
+
349
+ await self._cleanup_daemon_thread()
350
+ self.is_running = False
351
+ self._logger.info("FastAPI daemon thread service stopped successfully")
352
+
353
+ async def _stop_detached_process(self):
354
+ """Stop detached process mode service."""
355
+ self._logger.info("Stopping FastAPI detached process service...")
356
+
357
+ if self._detached_process_pid:
358
+ await self.process_manager.stop_process_gracefully(
359
+ self._detached_process_pid,
360
+ )
361
+
362
+ await self._cleanup_detached_process()
363
+ self.is_running = False
364
+ self._logger.info(
365
+ "FastAPI detached process service stopped successfully",
366
+ )
367
+
368
+ async def _cleanup_daemon_thread(self):
369
+ """Clean up daemon thread resources."""
571
370
  self._server = None
572
371
  self._server_task = None
573
372
  self._server_thread = None
574
- self._app = None
575
373
 
576
- @property
577
- def is_running(self) -> bool:
578
- """Check if the service is currently running."""
579
- return self._is_running
374
+ async def _cleanup_detached_process(self):
375
+ """Clean up detached process resources."""
376
+ # Cleanup PID file
377
+ if self._detached_pid_file:
378
+ self.process_manager.cleanup_pid_file(self._detached_pid_file)
379
+
380
+ # Reset state
381
+ self._detached_process_pid = None
382
+ self._detached_pid_file = None
383
+
384
+ def _is_server_ready(self) -> bool:
385
+ """Check if the server is ready to accept connections."""
386
+ try:
387
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
388
+ s.settimeout(0.1)
389
+ result = s.connect_ex((self.host, self.port))
390
+ return result == 0
391
+ except Exception:
392
+ return False
393
+
394
+ async def _wait_for_server_ready(self, timeout: int = 30):
395
+ """Wait for server to become ready."""
396
+ end_time = asyncio.get_event_loop().time() + timeout
397
+
398
+ while asyncio.get_event_loop().time() < end_time:
399
+ if self._is_server_ready():
400
+ return
401
+
402
+ await asyncio.sleep(0.1)
403
+
404
+ raise RuntimeError("Server did not become ready within timeout")
405
+
406
+ def is_service_running(self) -> bool:
407
+ """Check if service is running."""
408
+ if not self.is_running:
409
+ return False
410
+
411
+ if self._detached_process_pid:
412
+ # Check detached process
413
+ return self.process_manager.is_process_running(
414
+ self._detached_process_pid,
415
+ )
416
+ else:
417
+ # Check daemon thread
418
+ return self._server is not None and self._is_server_ready()
419
+
420
+ def get_deployment_info(self) -> Dict[str, Any]:
421
+ """Get deployment information."""
422
+ return {
423
+ "deploy_id": self.deploy_id,
424
+ "host": self.host,
425
+ "port": self.port,
426
+ "is_running": self.is_service_running(),
427
+ "mode": "detached_process"
428
+ if self._detached_process_pid
429
+ else "daemon_thread",
430
+ "pid": self._detached_process_pid,
431
+ "url": f"http://{self.host}:{self.port}"
432
+ if self.is_running
433
+ else None,
434
+ }
580
435
 
581
436
  @property
582
437
  def service_url(self) -> Optional[str]:
583
438
  """Get the current service URL if running."""
584
- if self._is_running and self.port:
439
+ if self.is_running and self.port:
585
440
  return f"http://{self.host}:{self.port}"
586
441
  return None