agentscope-runtime 0.1.4__py3-none-any.whl → 0.1.5b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. agentscope_runtime/engine/agents/agentscope_agent/agent.py +3 -0
  2. agentscope_runtime/engine/deployers/__init__.py +13 -0
  3. agentscope_runtime/engine/deployers/adapter/responses/__init__.py +0 -0
  4. agentscope_runtime/engine/deployers/adapter/responses/response_api_adapter_utils.py +2886 -0
  5. agentscope_runtime/engine/deployers/adapter/responses/response_api_agent_adapter.py +51 -0
  6. agentscope_runtime/engine/deployers/adapter/responses/response_api_protocol_adapter.py +314 -0
  7. agentscope_runtime/engine/deployers/cli_fc_deploy.py +143 -0
  8. agentscope_runtime/engine/deployers/kubernetes_deployer.py +265 -0
  9. agentscope_runtime/engine/deployers/local_deployer.py +356 -501
  10. agentscope_runtime/engine/deployers/modelstudio_deployer.py +626 -0
  11. agentscope_runtime/engine/deployers/utils/__init__.py +0 -0
  12. agentscope_runtime/engine/deployers/utils/deployment_modes.py +14 -0
  13. agentscope_runtime/engine/deployers/utils/docker_image_utils/__init__.py +8 -0
  14. agentscope_runtime/engine/deployers/utils/docker_image_utils/docker_image_builder.py +429 -0
  15. agentscope_runtime/engine/deployers/utils/docker_image_utils/dockerfile_generator.py +240 -0
  16. agentscope_runtime/engine/deployers/utils/docker_image_utils/runner_image_factory.py +297 -0
  17. agentscope_runtime/engine/deployers/utils/package_project_utils.py +932 -0
  18. agentscope_runtime/engine/deployers/utils/service_utils/__init__.py +9 -0
  19. agentscope_runtime/engine/deployers/utils/service_utils/fastapi_factory.py +504 -0
  20. agentscope_runtime/engine/deployers/utils/service_utils/fastapi_templates.py +157 -0
  21. agentscope_runtime/engine/deployers/utils/service_utils/process_manager.py +268 -0
  22. agentscope_runtime/engine/deployers/utils/service_utils/service_config.py +75 -0
  23. agentscope_runtime/engine/deployers/utils/service_utils/service_factory.py +220 -0
  24. agentscope_runtime/engine/deployers/utils/wheel_packager.py +389 -0
  25. agentscope_runtime/engine/helpers/agent_api_builder.py +651 -0
  26. agentscope_runtime/engine/runner.py +36 -10
  27. agentscope_runtime/engine/schemas/agent_schemas.py +70 -2
  28. agentscope_runtime/engine/schemas/embedding.py +37 -0
  29. agentscope_runtime/engine/schemas/modelstudio_llm.py +310 -0
  30. agentscope_runtime/engine/schemas/oai_llm.py +538 -0
  31. agentscope_runtime/engine/schemas/realtime.py +254 -0
  32. agentscope_runtime/engine/services/mem0_memory_service.py +124 -0
  33. agentscope_runtime/engine/services/memory_service.py +2 -1
  34. agentscope_runtime/engine/services/redis_session_history_service.py +4 -3
  35. agentscope_runtime/engine/services/session_history_service.py +4 -3
  36. agentscope_runtime/sandbox/manager/container_clients/kubernetes_client.py +555 -10
  37. agentscope_runtime/version.py +1 -1
  38. {agentscope_runtime-0.1.4.dist-info → agentscope_runtime-0.1.5b1.dist-info}/METADATA +21 -4
  39. {agentscope_runtime-0.1.4.dist-info → agentscope_runtime-0.1.5b1.dist-info}/RECORD +43 -16
  40. {agentscope_runtime-0.1.4.dist-info → agentscope_runtime-0.1.5b1.dist-info}/entry_points.txt +1 -0
  41. {agentscope_runtime-0.1.4.dist-info → agentscope_runtime-0.1.5b1.dist-info}/WHEEL +0 -0
  42. {agentscope_runtime-0.1.4.dist-info → agentscope_runtime-0.1.5b1.dist-info}/licenses/LICENSE +0 -0
  43. {agentscope_runtime-0.1.4.dist-info → agentscope_runtime-0.1.5b1.dist-info}/top_level.txt +0 -0
@@ -1,23 +1,26 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  import uuid
3
- from typing import Optional, List, AsyncGenerator, Any
4
3
  from contextlib import AsyncExitStack
4
+ from typing import Optional, List, AsyncGenerator, Any, Union, Dict
5
5
 
6
6
  from openai.types.chat import ChatCompletion
7
7
 
8
- from .deployers.adapter.protocol_adapter import ProtocolAdapter
8
+ from agentscope_runtime.engine.deployers.utils.service_utils import (
9
+ ServicesConfig,
10
+ )
9
11
  from .agents import Agent
10
- from .schemas.context import Context
11
12
  from .deployers import (
12
13
  DeployManager,
13
14
  LocalDeployManager,
14
15
  )
16
+ from .deployers.adapter.protocol_adapter import ProtocolAdapter
15
17
  from .schemas.agent_schemas import (
16
18
  Event,
17
19
  AgentRequest,
18
20
  RunStatus,
19
21
  AgentResponse,
20
22
  )
23
+ from .schemas.context import Context
21
24
  from .services.context_manager import ContextManager
22
25
  from .services.environment_manager import EnvironmentManager
23
26
  from .tracing import TraceType
@@ -77,37 +80,57 @@ class Runner:
77
80
  endpoint_path: str = "/process",
78
81
  stream: bool = True,
79
82
  protocol_adapters: Optional[list[ProtocolAdapter]] = None,
83
+ requirements: Optional[Union[str, List[str]]] = None,
84
+ extra_packages: Optional[List[str]] = None,
85
+ base_image: str = "python:3.9-slim",
86
+ environment: Optional[Dict[str, str]] = None,
87
+ runtime_config: Optional[Dict] = None,
88
+ services_config: Optional[Union[ServicesConfig, dict]] = None,
89
+ **kwargs,
80
90
  ):
81
91
  """
82
92
  Deploys the agent as a service.
83
93
 
84
94
  Args:
85
- protocol_adapters: protocol adapters
86
95
  deploy_manager: Deployment manager to handle service deployment
87
96
  endpoint_path: API endpoint path for the processing function
88
97
  stream: If start a streaming service
98
+ protocol_adapters: protocol adapters
99
+ requirements: PyPI dependencies
100
+ extra_packages: User code directory/file path
101
+ base_image: Docker base image (for containerized deployment)
102
+ environment: Environment variables dict
103
+ runtime_config: Runtime configuration dict
104
+ services_config: Services configuration dict
105
+ **kwargs: Additional arguments passed to deployment manager
89
106
  Returns:
90
107
  URL of the deployed service
91
108
 
92
109
  Raises:
93
110
  RuntimeError: If deployment fails
94
111
  """
95
- if stream:
96
- deploy_func = self.stream_query
97
- else:
98
- deploy_func = self.query
99
112
  deploy_result = await deploy_manager.deploy(
100
- deploy_func,
113
+ runner=self,
101
114
  endpoint_path=endpoint_path,
115
+ stream=stream,
102
116
  protocol_adapters=protocol_adapters,
117
+ requirements=requirements,
118
+ extra_packages=extra_packages,
119
+ base_image=base_image,
120
+ environment=environment,
121
+ runtime_config=runtime_config,
122
+ services_config=services_config,
123
+ **kwargs,
103
124
  )
125
+
126
+ # TODO: add redis or other persistant method
104
127
  self._deploy_managers[deploy_manager.deploy_id] = deploy_result
105
128
  return deploy_result
106
129
 
107
130
  @trace(TraceType.AGENT_STEP)
108
131
  async def stream_query( # pylint:disable=unused-argument
109
132
  self,
110
- request: AgentRequest,
133
+ request: Union[AgentRequest, dict],
111
134
  user_id: Optional[str] = None,
112
135
  tools: Optional[List] = None,
113
136
  **kwargs: Any,
@@ -115,6 +138,9 @@ class Runner:
115
138
  """
116
139
  Streams the agent.
117
140
  """
141
+ if isinstance(request, dict):
142
+ request = AgentRequest(**request)
143
+
118
144
  response = AgentResponse()
119
145
  yield response
120
146
 
@@ -27,6 +27,7 @@ class MessageType:
27
27
  MCP_APPROVAL_REQUEST = "mcp_approval_request"
28
28
  MCP_TOOL_CALL = "mcp_call"
29
29
  MCP_APPROVAL_RESPONSE = "mcp_approval_response"
30
+ REASONING = "reasoning"
30
31
  HEARTBEAT = "heartbeat"
31
32
  ERROR = "error"
32
33
 
@@ -45,6 +46,8 @@ class ContentType:
45
46
  DATA = "data"
46
47
  IMAGE = "image"
47
48
  AUDIO = "audio"
49
+ FILE = "file"
50
+ REFUSAL = "refusal"
48
51
 
49
52
 
50
53
  class Role:
@@ -66,6 +69,8 @@ class RunStatus:
66
69
  Failed = "failed"
67
70
  Rejected = "rejected"
68
71
  Unknown = "unknown"
72
+ Queued = "queued"
73
+ Incomplete = "incomplete"
69
74
 
70
75
 
71
76
  class FunctionParameters(BaseModel):
@@ -282,6 +287,63 @@ class DataContent(Content):
282
287
  """The data content."""
283
288
 
284
289
 
290
+ class AudioContent(Content):
291
+ type: Literal[ContentType.AUDIO] = ContentType.AUDIO
292
+ """The type of the content part."""
293
+
294
+ data: Optional[str] = None
295
+ """The audio data details."""
296
+
297
+ format: Optional[str] = None
298
+ """
299
+ The format of the audio data.
300
+ """
301
+
302
+
303
+ class FileContent(Content):
304
+ type: Literal[ContentType.FILE] = ContentType.FILE
305
+ """The type of the content part."""
306
+
307
+ file_url: Optional[str] = None
308
+ """The file URL details."""
309
+
310
+ file_id: Optional[str] = None
311
+ """The file ID details."""
312
+
313
+ filename: Optional[str] = None
314
+ """The file name details."""
315
+
316
+ file_data: Optional[str] = None
317
+ """The file data details."""
318
+
319
+
320
+ class RefusalContent(Content):
321
+ type: Literal[ContentType.REFUSAL] = ContentType.REFUSAL
322
+ """The type of the content part."""
323
+
324
+ refusal: Optional[str] = None
325
+ """The refusal content."""
326
+
327
+
328
+ class ToolCall(BaseModel):
329
+ arguments: str
330
+ """A JSON string of the arguments to pass to the function."""
331
+
332
+ call_id: str
333
+ """The unique ID of the function tool call generated by the model."""
334
+
335
+ name: str
336
+ """The name of the function to run."""
337
+
338
+
339
+ class ToolCallOutput(BaseModel):
340
+ call_id: str
341
+ """The unique ID of the function tool call generated by the model."""
342
+
343
+ output: str
344
+ """A JSON string of the output of the function tool call."""
345
+
346
+
285
347
  AgentRole: TypeAlias = Literal[
286
348
  Role.ASSISTANT,
287
349
  Role.SYSTEM,
@@ -289,9 +351,15 @@ AgentRole: TypeAlias = Literal[
289
351
  Role.TOOL,
290
352
  ]
291
353
 
292
-
293
354
  AgentContent = Annotated[
294
- Union[TextContent, ImageContent, DataContent],
355
+ Union[
356
+ TextContent,
357
+ ImageContent,
358
+ DataContent,
359
+ AudioContent,
360
+ FileContent,
361
+ RefusalContent,
362
+ ],
295
363
  Field(discriminator="type"),
296
364
  ]
297
365
 
@@ -0,0 +1,37 @@
1
+ # -*- coding: utf-8 -*-
2
+ from typing import List, Optional, Literal
3
+
4
+ from openai.types import Embedding
5
+ from pydantic import BaseModel
6
+
7
+
8
+ class Usage(BaseModel):
9
+ prompt_tokens: Optional[int] = None
10
+ """The number of tokens used by the prompt."""
11
+
12
+ total_tokens: Optional[int] = None
13
+ """The total number of tokens used by the request."""
14
+
15
+ input_tokens: Optional[int] = None
16
+
17
+ text_count: Optional[int] = None
18
+
19
+ image_count: Optional[int] = None
20
+
21
+ video_count: Optional[int] = None
22
+
23
+ duration: Optional[float] = None
24
+
25
+
26
+ class EmbeddingResponse(BaseModel):
27
+ data: List[Embedding]
28
+ """The list of embeddings generated by the model."""
29
+
30
+ model: str
31
+ """The name of the model used to generate the embedding."""
32
+
33
+ object: Literal["list"]
34
+ """The object type, which is always "list"."""
35
+
36
+ usage: Usage
37
+ """The usage information for the request."""
@@ -0,0 +1,310 @@
1
+ # -*- coding: utf-8 -*-
2
+ import os
3
+ from typing import List, Literal, Optional, Union
4
+
5
+ from openai.types.chat import ChatCompletion, ChatCompletionChunk
6
+ from pydantic import (
7
+ BaseModel,
8
+ StrictInt,
9
+ field_validator,
10
+ Field,
11
+ )
12
+
13
+ from .oai_llm import (
14
+ Parameters,
15
+ OpenAIMessage,
16
+ )
17
+
18
+
19
+ class KnowledgeHolder(BaseModel):
20
+ source: str
21
+ """The source identifier or URL where the knowledge was retrieved from."""
22
+
23
+ content: str
24
+ """The actual content or knowledge text retrieved from the source."""
25
+
26
+
27
+ class IntentionOptions(BaseModel):
28
+ white_list: List[str] = Field(default_factory=list)
29
+ """A list of allowed intentions that can be processed."""
30
+
31
+ black_list: List[str] = Field(default_factory=list)
32
+ """A list of blocked intentions that should not be processed."""
33
+
34
+ search_model: str = "search_v6"
35
+ """The searches model version to use for intentions recognition."""
36
+
37
+ intensity: Optional[int] = None
38
+ """The intensity level for intentions matching and processing."""
39
+
40
+ scene_id: Optional[str] = None
41
+ """The scene identifier for context-aware intentions processing."""
42
+
43
+
44
+ class SearchOptions(BaseModel):
45
+ """
46
+ Search Options on Modelstudio platform for knowledge retrieval and web
47
+ searches.
48
+ """
49
+
50
+ enable_source: bool = False
51
+ """Whether to include source information in searches results."""
52
+
53
+ enable_citation: bool = False
54
+ """Whether to include citation information for retrieved content."""
55
+
56
+ enable_readpage: bool = False
57
+ """Whether to enable full page reading for web content."""
58
+
59
+ enable_online_read: bool = False
60
+ """Whether to enable online reading capabilities for real-time content."""
61
+
62
+ citation_format: str = "[<number>]"
63
+ """The format string for citations in the response."""
64
+
65
+ search_strategy: Literal[
66
+ "standard",
67
+ "pro_ultra",
68
+ "pro",
69
+ "lite",
70
+ "pro_max",
71
+ "image",
72
+ "turbo",
73
+ "max",
74
+ ] = "turbo"
75
+ """The searches strategy to use ('standard', 'pro_ultra',
76
+ 'pro', 'lite','pro_max', 'image','turbo','max'). """
77
+
78
+ forced_search: bool = False
79
+ """Whether to force searches even when cached results are available."""
80
+
81
+ prepend_search_result: bool = False
82
+ """Whether to prepend searches results to the response."""
83
+
84
+ enable_search_extension: bool = False
85
+ """Whether to enable extended searches capabilities."""
86
+
87
+ item_cnt: int = 20000
88
+ """The maximum number of items to retrieve in searches results."""
89
+
90
+ top_n: int = 0
91
+ """The number of top results to return (0 means return all)."""
92
+
93
+ intention_options: Union[IntentionOptions, None] = IntentionOptions()
94
+ """Options for intentions recognition and processing during searches."""
95
+
96
+
97
+ # maximum chunk size from knowledge base [1, 20]
98
+ PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MIN = int(
99
+ os.getenv(
100
+ "PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MIN",
101
+ "1",
102
+ ),
103
+ )
104
+ PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MAX = int(
105
+ os.getenv(
106
+ "PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MAX",
107
+ "20",
108
+ ),
109
+ )
110
+
111
+
112
+ class RagOptions(BaseModel):
113
+ model_config = {"populate_by_name": True}
114
+
115
+ class FallbackOptions(BaseModel):
116
+ default_response_type: Optional[str] = "llm"
117
+ """The type of default response when RAG fails ('llm', 'template',
118
+ 'none'). """
119
+
120
+ default_response: Optional[str] = ""
121
+ """The default response text to use when RAG fails."""
122
+
123
+ class RewriteOptions(BaseModel):
124
+ model_name: Optional[str] = None
125
+ """The model name to use for rewriting."""
126
+
127
+ class_name: Optional[str] = None
128
+ """The class name to use for rewriting."""
129
+
130
+ class RerankOptions(BaseModel):
131
+ model_name: Optional[str] = None
132
+ """The model name to use for reranking."""
133
+
134
+ workspace_id: Optional[str] = ""
135
+ """The modelstudio workspace id"""
136
+
137
+ replaced_word: str = "${documents}"
138
+ """The placeholder word in prompts that will be replaced with retrieved
139
+ documents. """
140
+
141
+ index_names: Optional[List[str]] = Field(default_factory=list)
142
+ """List of index names to use for document processing and retrieval."""
143
+
144
+ pipeline_ids: Optional[List[str]] = Field(default_factory=list)
145
+ """List of pipeline IDs to use for document processing and retrieval."""
146
+
147
+ file_ids: Optional[List[str]] = Field(
148
+ default_factory=list,
149
+ alias="file_id_list",
150
+ )
151
+ """List of specific file IDs to searches within."""
152
+
153
+ prompt_strategy: Optional[str] = Field(
154
+ default="topK",
155
+ alias="prompt_strategy_name",
156
+ )
157
+ """The strategy for selecting and organizing retrieved content in
158
+ prompts. """
159
+
160
+ maximum_allowed_chunk_num: Optional[int] = 5
161
+ """The maximum number of document chunks to include in the context."""
162
+
163
+ maximum_allowed_length: Optional[int] = 2000
164
+ """The maximum total length of retrieved content in characters."""
165
+
166
+ enable_citation: bool = Field(
167
+ default=False,
168
+ alias="prompt_enable_citation",
169
+ )
170
+ """Whether to include citation information for retrieved documents."""
171
+
172
+ fallback_options: Optional[FallbackOptions] = None
173
+ """Options for handling cases when RAG retrieval fails."""
174
+
175
+ enable_web_search: bool = False
176
+ """Whether to enable web searches as part of the RAG pipeline."""
177
+
178
+ session_file_ids: Optional[List[str]] = Field(default_factory=list)
179
+ """List of file IDs that are specific to the current session."""
180
+
181
+ dense_similarity_top_k: Optional[int] = 100
182
+ """The number of most similar dense vectors to retrieve."""
183
+
184
+ sparse_similarity_top_k: Optional[int] = 100
185
+ """The number of most similar sparse vectors to retrieve."""
186
+
187
+ enable_rewrite: Optional[bool] = None
188
+ """Whether to enable content rewrite during RAG."""
189
+
190
+ rewrite: Optional[List[RewriteOptions]] = None
191
+ """Options for content rewrite."""
192
+
193
+ enable_reranking: Optional[bool] = None
194
+ """Whether to enable content reranking."""
195
+
196
+ rerank_min_score: Optional[float] = None
197
+ """The minimum score threshold for content reranking."""
198
+
199
+ rerank_top_n: Optional[int] = None
200
+ """The number of top results to return for content reranking."""
201
+
202
+ rerank: Optional[List[RerankOptions]] = None
203
+
204
+ enable_reject_filter: Optional[bool] = None
205
+ """Whether to enable content rejection filtering."""
206
+
207
+ reject_filter_type: Optional[str] = None
208
+ """The type of content rejection filter to use."""
209
+
210
+ reject_filter_model_name: Optional[str] = None
211
+ """The name of the model to use for content rejection filtering."""
212
+
213
+ reject_filter_prompt: Optional[str] = None
214
+ """The prompt to use for content rejection filtering."""
215
+
216
+ enable_agg_search: Optional[bool] = None
217
+ """Whether to enable aggregation searches."""
218
+
219
+ enable_hybrid_gen: Optional[bool] = None
220
+ """Whether to enable hybrid generations."""
221
+
222
+ @field_validator("prompt_strategy")
223
+ def prompt_strategy_check(self, value: str) -> str:
224
+ if value:
225
+ value = value.lower()
226
+ if value in ["topk", "top_k"]:
227
+ return "topK"
228
+ return value
229
+
230
+ @field_validator("maximum_allowed_chunk_num")
231
+ def maximum_allowed_chunk_num_check(self, value: int) -> int:
232
+ if value < int(PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MIN) or value > int(
233
+ PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MAX,
234
+ ):
235
+ raise KeyError(
236
+ f"Range of maximum_allowed_chunk_num should be "
237
+ f"[{PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MIN}, "
238
+ f"{PARAM_MAXIMUM_ALLOWED_CHUNK_NUM_MAX}]",
239
+ )
240
+ return value
241
+
242
+
243
+ class ModelstudioParameters(Parameters):
244
+ """
245
+ Parameters for Modelstudio platform, extending the base Parameters with
246
+ Modelstudio-specific options.
247
+ """
248
+
249
+ repetition_penalty: Union[float, None] = None
250
+ """Penalty for repeating tokens. Higher values reduce repetition."""
251
+
252
+ length_penalty: Union[float, None] = None
253
+ """Penalty applied to longer sequences. Affects the length of generated
254
+ text. """
255
+
256
+ top_k: Union[StrictInt, None] = None
257
+ """The number of highest probability vocabulary tokens to keep for top-k
258
+ filtering."""
259
+
260
+ min_tokens: Optional[int] = None
261
+ """The minimum number of tokens to generate before stopping."""
262
+
263
+ result_format: Literal["text", "message"] = "message"
264
+ """The format of the response ('text' for plain text, 'message' for
265
+ structured message) """
266
+
267
+ incremental_output: bool = False
268
+ """Whether to return incremental output during generations."""
269
+
270
+ # Search
271
+ enable_search: bool = False
272
+ """Whether to enable searches capabilities for knowledge retrieval."""
273
+
274
+ search_options: Optional[SearchOptions] = SearchOptions()
275
+ """Configuration options for searches functionality."""
276
+
277
+ # RAG
278
+ enable_rag: bool = False # RAGs of modelstudio assistant service
279
+ """Whether to enable Retrieval-Augmented Generation (RAG) for the
280
+ Modelstudio assistant service. """
281
+
282
+ rag_options: Union[RagOptions, None] = None
283
+ """Configuration options for RAG functionality."""
284
+
285
+ selected_model: Optional[str] = "qwen-max"
286
+ """The selected model name to use for generations."""
287
+
288
+ # Intention
289
+ intention_options: Optional[IntentionOptions] = None
290
+ """Options for intentions recognition and processing."""
291
+
292
+ # MCP Servers
293
+ mcp_config_file: Optional[str] = None
294
+ """Path to the MCP (Model Context Protocol) configuration file."""
295
+
296
+
297
+ class ModelstudioChatRequest(ModelstudioParameters):
298
+ messages: List[OpenAIMessage]
299
+ """A list of messages comprising the conversation so far."""
300
+
301
+ model: str
302
+ """ID of the model to use for the chat completion."""
303
+
304
+
305
+ class ModelstudioChatResponse(ChatCompletion):
306
+ pass
307
+
308
+
309
+ class ModelstudioChatCompletionChunk(ChatCompletionChunk):
310
+ pass