agentscope-runtime 1.0.2__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. agentscope_runtime/cli/commands/deploy.py +12 -0
  2. agentscope_runtime/common/collections/redis_mapping.py +4 -1
  3. agentscope_runtime/engine/app/agent_app.py +48 -5
  4. agentscope_runtime/engine/deployers/adapter/a2a/__init__.py +56 -1
  5. agentscope_runtime/engine/deployers/adapter/a2a/a2a_protocol_adapter.py +449 -41
  6. agentscope_runtime/engine/deployers/adapter/a2a/a2a_registry.py +273 -0
  7. agentscope_runtime/engine/deployers/adapter/a2a/nacos_a2a_registry.py +640 -0
  8. agentscope_runtime/engine/deployers/kubernetes_deployer.py +3 -0
  9. agentscope_runtime/engine/deployers/utils/docker_image_utils/dockerfile_generator.py +8 -2
  10. agentscope_runtime/engine/deployers/utils/docker_image_utils/image_factory.py +5 -0
  11. agentscope_runtime/engine/deployers/utils/net_utils.py +65 -0
  12. agentscope_runtime/engine/runner.py +5 -3
  13. agentscope_runtime/engine/schemas/exception.py +24 -0
  14. agentscope_runtime/engine/services/agent_state/redis_state_service.py +61 -8
  15. agentscope_runtime/engine/services/agent_state/state_service_factory.py +2 -5
  16. agentscope_runtime/engine/services/memory/redis_memory_service.py +129 -25
  17. agentscope_runtime/engine/services/session_history/redis_session_history_service.py +160 -34
  18. agentscope_runtime/sandbox/build.py +50 -57
  19. agentscope_runtime/version.py +1 -1
  20. {agentscope_runtime-1.0.2.dist-info → agentscope_runtime-1.0.3.dist-info}/METADATA +9 -3
  21. {agentscope_runtime-1.0.2.dist-info → agentscope_runtime-1.0.3.dist-info}/RECORD +25 -22
  22. {agentscope_runtime-1.0.2.dist-info → agentscope_runtime-1.0.3.dist-info}/WHEEL +0 -0
  23. {agentscope_runtime-1.0.2.dist-info → agentscope_runtime-1.0.3.dist-info}/entry_points.txt +0 -0
  24. {agentscope_runtime-1.0.2.dist-info → agentscope_runtime-1.0.3.dist-info}/licenses/LICENSE +0 -0
  25. {agentscope_runtime-1.0.2.dist-info → agentscope_runtime-1.0.3.dist-info}/top_level.txt +0 -0
@@ -40,6 +40,7 @@ class ImageConfig(BaseModel):
40
40
  port: int = 8000
41
41
  env_vars: Dict[str, str] = Field(default_factory=lambda: {})
42
42
  startup_command: Optional[str] = None
43
+ pypi_mirror: Optional[str] = None
43
44
 
44
45
  # Runtime configuration
45
46
  host: str = "0.0.0.0" # Container-friendly default
@@ -218,6 +219,7 @@ class ImageFactory:
218
219
  env_vars=config.env_vars,
219
220
  startup_command=startup_command,
220
221
  platform=config.platform,
222
+ pypi_mirror=config.pypi_mirror,
221
223
  )
222
224
 
223
225
  dockerfile_path = self.dockerfile_generator.create_dockerfile(
@@ -314,6 +316,7 @@ class ImageFactory:
314
316
  embed_task_processor: bool = True,
315
317
  extra_startup_args: Optional[Dict[str, Union[str, int, bool]]] = None,
316
318
  use_cache: bool = True,
319
+ pypi_mirror: Optional[str] = None,
317
320
  **kwargs,
318
321
  ) -> str:
319
322
  """
@@ -339,6 +342,7 @@ class ImageFactory:
339
342
  embed_task_processor: Whether to embed task processor
340
343
  extra_startup_args: Additional startup arguments
341
344
  use_cache: Enable build cache (default: True)
345
+ pypi_mirror: PyPI mirror URL for pip package installation
342
346
  **kwargs: Additional configuration options
343
347
 
344
348
  Returns:
@@ -373,6 +377,7 @@ class ImageFactory:
373
377
  host=host,
374
378
  embed_task_processor=embed_task_processor,
375
379
  extra_startup_args=extra_startup_args or {},
380
+ pypi_mirror=pypi_mirror,
376
381
  **kwargs,
377
382
  )
378
383
 
@@ -0,0 +1,65 @@
1
+ # -*- coding: utf-8 -*-
2
+ import ipaddress
3
+ import os
4
+ import socket
5
+ from typing import Optional
6
+
7
+ import psutil
8
+
9
+
10
+ def get_first_non_loopback_ip() -> Optional[str]:
11
+ """Get the first non-loopback IP address from network interfaces.
12
+
13
+ - Selects the interface with the lowest index
14
+ - Only considers interfaces that are up
15
+ - Supports IPv4/IPv6 based on environment variable
16
+ - Falls back to socket.gethostbyname() if no address found
17
+
18
+ Returns:
19
+ str | None: The first non-loopback IP address, or None if not found
20
+ """
21
+ result = None
22
+ lowest_index = float("inf")
23
+
24
+ use_ipv6 = os.environ.get("USE_IPV6", "false").lower() == "true"
25
+ target_family = socket.AF_INET6 if use_ipv6 else socket.AF_INET
26
+
27
+ net_if_stats = psutil.net_if_stats()
28
+
29
+ for index, (interface, addrs) in enumerate(
30
+ psutil.net_if_addrs().items(),
31
+ ):
32
+ stats = net_if_stats.get(interface)
33
+ if stats is None or not stats.isup:
34
+ continue
35
+
36
+ if index < lowest_index or result is None:
37
+ lowest_index = index
38
+ else:
39
+ continue
40
+
41
+ for addr in addrs:
42
+ if addr.family != target_family:
43
+ continue
44
+
45
+ try:
46
+ ip_obj = ipaddress.ip_address(
47
+ addr.address.split("%")[0],
48
+ )
49
+ if ip_obj.is_loopback:
50
+ continue
51
+ result = addr.address
52
+ except ValueError:
53
+ continue
54
+
55
+ if result is not None:
56
+ return result
57
+
58
+ try:
59
+ hostname = socket.gethostname()
60
+ fallback_ip = socket.gethostbyname(hostname)
61
+ return fallback_ip
62
+ except socket.error:
63
+ pass
64
+
65
+ return None
@@ -282,6 +282,7 @@ class Runner:
282
282
 
283
283
  stream_adapter = identity_stream_adapter
284
284
 
285
+ error = None
285
286
  try:
286
287
  async for event in stream_adapter(
287
288
  source_stream=self._call_handler_streaming(
@@ -301,8 +302,6 @@ class Runner:
301
302
  e = UnknownAgentException(original_exception=e)
302
303
  error = Error(code=e.code, message=e.message)
303
304
  logger.error(f"{error.model_dump()}: {traceback.format_exc()}")
304
- yield seq_gen.yield_with_sequence(response.failed(error))
305
- return
306
305
 
307
306
  # Obtain token usage
308
307
  try:
@@ -312,4 +311,7 @@ class Runner:
312
311
  # Avoid empty message
313
312
  pass
314
313
 
315
- yield seq_gen.yield_with_sequence(response.completed())
314
+ if error:
315
+ yield seq_gen.yield_with_sequence(response.failed(error))
316
+ else:
317
+ yield seq_gen.yield_with_sequence(response.completed())
@@ -578,3 +578,27 @@ class UnknownAgentException(AgentRuntimeErrorException):
578
578
  message,
579
579
  details,
580
580
  )
581
+
582
+
583
+ class ModelQuotaExceededException(AgentRuntimeErrorException):
584
+ """Model quota exceeded"""
585
+
586
+ def __init__(
587
+ self,
588
+ model_name: str,
589
+ details: Optional[Dict[str, Any]] = None,
590
+ ):
591
+ message = f"Model quota exceeded: {model_name}"
592
+ super().__init__("MODEL_QUOTA_EXCEEDED", message, details)
593
+
594
+
595
+ class ModelContextLengthExceededException(AgentRuntimeErrorException):
596
+ """Model context length exceeded"""
597
+
598
+ def __init__(
599
+ self,
600
+ model_name: str,
601
+ details: Optional[Dict[str, Any]] = None,
602
+ ):
603
+ message = f"Model context length exceeded: {model_name}"
604
+ super().__init__("MODEL_CONTEXT_LENGTH_EXCEEDED", message, details)
@@ -21,29 +21,68 @@ class RedisStateService(StateService):
21
21
  self,
22
22
  redis_url: str = "redis://localhost:6379/0",
23
23
  redis_client: Optional[aioredis.Redis] = None,
24
+ socket_timeout: Optional[float] = 5.0,
25
+ socket_connect_timeout: Optional[float] = 5.0,
26
+ max_connections: Optional[int] = 50,
27
+ retry_on_timeout: bool = True,
28
+ ttl_seconds: Optional[int] = 3600, # 1 hour in seconds
29
+ health_check_interval: Optional[float] = 30.0,
30
+ socket_keepalive: bool = True,
24
31
  ):
32
+ """
33
+ Initialize RedisStateService.
34
+
35
+ Args:
36
+ redis_url: Redis connection URL
37
+ redis_client: Optional pre-configured Redis client
38
+ socket_timeout: Socket timeout in seconds (default: 5.0)
39
+ socket_connect_timeout: Socket connect timeout in seconds
40
+ (default: 5.0)
41
+ max_connections: Maximum number of connections in the pool
42
+ (default: 50)
43
+ retry_on_timeout: Whether to retry on timeout (default: True)
44
+ ttl_seconds: Time-to-live in seconds for state data. If None,
45
+ data never expires (default: 3600, i.e., 1 hour)
46
+ health_check_interval: Interval in seconds for health checks on
47
+ idle connections (default: 30.0).
48
+ Connections idle longer than this will be checked before reuse.
49
+ Set to 0 to disable.
50
+ socket_keepalive: Enable TCP keepalive to prevent
51
+ silent disconnections (default: True)
52
+ """
25
53
  self._redis_url = redis_url
26
54
  self._redis = redis_client
27
- self._health = False
55
+ self._socket_timeout = socket_timeout
56
+ self._socket_connect_timeout = socket_connect_timeout
57
+ self._max_connections = max_connections
58
+ self._retry_on_timeout = retry_on_timeout
59
+ self._ttl_seconds = ttl_seconds
60
+ self._health_check_interval = health_check_interval
61
+ self._socket_keepalive = socket_keepalive
28
62
 
29
63
  async def start(self) -> None:
30
- """Initialize the Redis connection."""
64
+ """Starts the Redis connection with proper timeout and connection
65
+ pool settings."""
31
66
  if self._redis is None:
32
67
  self._redis = aioredis.from_url(
33
68
  self._redis_url,
34
69
  decode_responses=True,
70
+ socket_timeout=self._socket_timeout,
71
+ socket_connect_timeout=self._socket_connect_timeout,
72
+ max_connections=self._max_connections,
73
+ retry_on_timeout=self._retry_on_timeout,
74
+ health_check_interval=self._health_check_interval,
75
+ socket_keepalive=self._socket_keepalive,
35
76
  )
36
- self._health = True
37
77
 
38
78
  async def stop(self) -> None:
39
- """Close the Redis connection."""
79
+ """Closes the Redis connection."""
40
80
  if self._redis:
41
- await self._redis.close()
81
+ await self._redis.aclose()
42
82
  self._redis = None
43
- self._health = False
44
83
 
45
84
  async def health(self) -> bool:
46
- """Service health check."""
85
+ """Checks the health of the service."""
47
86
  if not self._redis:
48
87
  return False
49
88
  try:
@@ -81,6 +120,11 @@ class RedisStateService(StateService):
81
120
  round_id = 1
82
121
 
83
122
  await self._redis.hset(key, round_id, json.dumps(state))
123
+
124
+ # Set TTL for the state key if configured
125
+ if self._ttl_seconds is not None:
126
+ await self._redis.expire(key, self._ttl_seconds)
127
+
84
128
  return round_id
85
129
 
86
130
  async def export_state(
@@ -110,4 +154,13 @@ class RedisStateService(StateService):
110
154
 
111
155
  if state_json is None:
112
156
  return None
113
- return json.loads(state_json)
157
+
158
+ # Refresh TTL when accessing the state
159
+ if self._ttl_seconds is not None:
160
+ await self._redis.expire(key, self._ttl_seconds)
161
+
162
+ try:
163
+ return json.loads(state_json)
164
+ except json.JSONDecodeError:
165
+ # Return None for corrupted state data instead of raising exception
166
+ return None
@@ -43,13 +43,10 @@ class StateServiceFactory(ServiceFactory[StateService]):
43
43
 
44
44
  StateServiceFactory.register_backend(
45
45
  "in_memory",
46
- lambda **kwargs: InMemoryStateService(),
46
+ InMemoryStateService,
47
47
  )
48
48
 
49
49
  StateServiceFactory.register_backend(
50
50
  "redis",
51
- lambda **kwargs: RedisStateService(
52
- redis_url=kwargs.get("redis_url", "redis://localhost:6379/0"),
53
- redis_client=kwargs.get("redis_client"),
54
- ),
51
+ RedisStateService,
55
52
  )
@@ -17,23 +17,70 @@ class RedisMemoryService(MemoryService):
17
17
  self,
18
18
  redis_url: str = "redis://localhost:6379/0",
19
19
  redis_client: Optional[aioredis.Redis] = None,
20
+ socket_timeout: Optional[float] = 5.0,
21
+ socket_connect_timeout: Optional[float] = 5.0,
22
+ max_connections: Optional[int] = 50,
23
+ retry_on_timeout: bool = True,
24
+ ttl_seconds: Optional[int] = 3600, # 1 hour in seconds
25
+ max_messages_per_session: Optional[int] = None,
26
+ health_check_interval: Optional[float] = 30.0,
27
+ socket_keepalive: bool = True,
20
28
  ):
29
+ """
30
+ Initialize RedisMemoryService.
31
+
32
+ Args:
33
+ redis_url: Redis connection URL
34
+ redis_client: Optional pre-configured Redis client
35
+ socket_timeout: Socket timeout in seconds (default: 5.0)
36
+ socket_connect_timeout: Socket connect timeout in seconds
37
+ (default: 5.0)
38
+ max_connections: Maximum number of connections in the pool
39
+ (default: 50)
40
+ retry_on_timeout: Whether to retry on timeout (default: True)
41
+ ttl_seconds: Time-to-live in seconds for memory data.
42
+ If None, data never expires (default: 3600, i.e., 1 hour)
43
+ max_messages_per_session: Maximum number of messages stored per
44
+ session_id field within a user's Redis memory hash.
45
+ If None, no limit (default: None)
46
+ health_check_interval: Interval in seconds for health checks
47
+ on idle connections (default: 30.0).
48
+ Connections idle longer than this will be checked before reuse.
49
+ Set to 0 to disable.
50
+ socket_keepalive: Enable TCP keepalive to prevent
51
+ silent disconnections (default: True)
52
+ """
21
53
  self._redis_url = redis_url
22
54
  self._redis = redis_client
23
55
  self._DEFAULT_SESSION_ID = "default"
56
+ self._socket_timeout = socket_timeout
57
+ self._socket_connect_timeout = socket_connect_timeout
58
+ self._max_connections = max_connections
59
+ self._retry_on_timeout = retry_on_timeout
60
+ self._ttl_seconds = ttl_seconds
61
+ self._max_messages_per_session = max_messages_per_session
62
+ self._health_check_interval = health_check_interval
63
+ self._socket_keepalive = socket_keepalive
24
64
 
25
65
  async def start(self) -> None:
26
- """Starts the Redis connection."""
66
+ """Starts the Redis connection with proper timeout
67
+ and connection pool settings."""
27
68
  if self._redis is None:
28
69
  self._redis = aioredis.from_url(
29
70
  self._redis_url,
30
71
  decode_responses=True,
72
+ socket_timeout=self._socket_timeout,
73
+ socket_connect_timeout=self._socket_connect_timeout,
74
+ max_connections=self._max_connections,
75
+ retry_on_timeout=self._retry_on_timeout,
76
+ health_check_interval=self._health_check_interval,
77
+ socket_keepalive=self._socket_keepalive,
31
78
  )
32
79
 
33
80
  async def stop(self) -> None:
34
81
  """Closes the Redis connection."""
35
82
  if self._redis:
36
- await self._redis.close()
83
+ await self._redis.aclose()
37
84
  self._redis = None
38
85
 
39
86
  async def health(self) -> bool:
@@ -73,14 +120,27 @@ class RedisMemoryService(MemoryService):
73
120
  existing_json = await self._redis.hget(key, field)
74
121
  existing_msgs = self._deserialize(existing_json)
75
122
  all_msgs = existing_msgs + messages
123
+
124
+ # Limit the number of messages per session to prevent memory issues
125
+ if self._max_messages_per_session is not None:
126
+ if len(all_msgs) > self._max_messages_per_session:
127
+ # Keep only the most recent messages
128
+ all_msgs = all_msgs[-self._max_messages_per_session :]
129
+
76
130
  await self._redis.hset(key, field, self._serialize(all_msgs))
77
131
 
78
- async def search_memory(
132
+ # Set TTL for the key if configured
133
+ if self._ttl_seconds is not None:
134
+ await self._redis.expire(key, self._ttl_seconds)
135
+
136
+ async def search_memory( # pylint: disable=too-many-branches
79
137
  self,
80
138
  user_id: str,
81
139
  messages: list,
82
140
  filters: Optional[Dict[str, Any]] = None,
83
141
  ) -> list:
142
+ if not self._redis:
143
+ raise RuntimeError("Redis connection is not available")
84
144
  key = self._user_key(user_id)
85
145
  if (
86
146
  not messages
@@ -96,29 +156,52 @@ class RedisMemoryService(MemoryService):
96
156
 
97
157
  keywords = set(query.lower().split())
98
158
 
99
- all_msgs = []
100
- hash_keys = await self._redis.hkeys(key)
101
- for session_id in hash_keys:
102
- msgs_json = await self._redis.hget(key, session_id)
103
- msgs = self._deserialize(msgs_json)
104
- all_msgs.extend(msgs)
105
-
159
+ # Process messages in batches to avoid loading all into memory at once
106
160
  matched_messages = []
107
- for msg in all_msgs:
108
- candidate_content = await self.get_query_text(msg)
109
- if candidate_content:
110
- msg_content_lower = candidate_content.lower()
111
- if any(keyword in msg_content_lower for keyword in keywords):
112
- matched_messages.append(msg)
161
+ hash_keys = await self._redis.hkeys(key)
113
162
 
163
+ # Get top_k limit early to optimize memory usage
164
+ top_k = None
114
165
  if (
115
166
  filters
116
167
  and "top_k" in filters
117
168
  and isinstance(filters["top_k"], int)
118
169
  ):
119
- return matched_messages[-filters["top_k"] :]
170
+ top_k = filters["top_k"]
120
171
 
121
- return matched_messages
172
+ # Process each session separately to reduce memory footprint
173
+ for session_id in hash_keys:
174
+ msgs_json = await self._redis.hget(key, session_id)
175
+ if not msgs_json:
176
+ continue
177
+ try:
178
+ msgs = self._deserialize(msgs_json)
179
+ except Exception:
180
+ # Skip corrupted message data
181
+ continue
182
+
183
+ # Match messages in this session
184
+ for msg in msgs:
185
+ candidate_content = await self.get_query_text(msg)
186
+ if candidate_content:
187
+ msg_content_lower = candidate_content.lower()
188
+ if any(
189
+ keyword in msg_content_lower for keyword in keywords
190
+ ):
191
+ matched_messages.append(msg)
192
+
193
+ # Apply top_k filter if specified
194
+ if top_k is not None:
195
+ result = matched_messages[-top_k:]
196
+ else:
197
+ result = matched_messages
198
+
199
+ # Refresh TTL on read to extend lifetime of actively used data,
200
+ # if a TTL is configured and there is existing data for this key.
201
+ if self._ttl_seconds is not None and hash_keys:
202
+ await self._redis.expire(key, self._ttl_seconds)
203
+
204
+ return result
122
205
 
123
206
  async def get_query_text(self, message: Message) -> str:
124
207
  if message:
@@ -133,20 +216,39 @@ class RedisMemoryService(MemoryService):
133
216
  user_id: str,
134
217
  filters: Optional[Dict[str, Any]] = None,
135
218
  ) -> list:
219
+ if not self._redis:
220
+ raise RuntimeError("Redis connection is not available")
136
221
  key = self._user_key(user_id)
137
- all_msgs = []
138
- hash_keys = await self._redis.hkeys(key)
139
- for session_id in sorted(hash_keys):
140
- msgs_json = await self._redis.hget(key, session_id)
141
- msgs = self._deserialize(msgs_json)
142
- all_msgs.extend(msgs)
143
-
144
222
  page_num = filters.get("page_num", 1) if filters else 1
145
223
  page_size = filters.get("page_size", 10) if filters else 10
146
224
 
147
225
  start_index = (page_num - 1) * page_size
148
226
  end_index = start_index + page_size
149
227
 
228
+ # Optimize: Calculate which sessions we need to load
229
+ # For simplicity, we still load all but could be optimized further
230
+ # to only load sessions that contain the requested page range
231
+ all_msgs = []
232
+ hash_keys = await self._redis.hkeys(key)
233
+ for session_id in sorted(hash_keys):
234
+ msgs_json = await self._redis.hget(key, session_id)
235
+ if msgs_json:
236
+ try:
237
+ msgs = self._deserialize(msgs_json)
238
+ all_msgs.extend(msgs)
239
+ except json.JSONDecodeError:
240
+ # Skip corrupted message data
241
+ continue
242
+
243
+ # Early exit optimization: if we've loaded enough messages
244
+ # to cover the requested page, we can stop (but this assumes
245
+ # we need all previous messages for proper ordering)
246
+ # For now, we keep loading all for correctness
247
+
248
+ # Refresh TTL on active use to keep memory alive,
249
+ # mirroring get_session behavior
250
+ if self._ttl_seconds is not None and hash_keys:
251
+ await self._redis.expire(key, self._ttl_seconds)
150
252
  return all_msgs[start_index:end_index]
151
253
 
152
254
  async def delete_memory(
@@ -154,6 +256,8 @@ class RedisMemoryService(MemoryService):
154
256
  user_id: str,
155
257
  session_id: Optional[str] = None,
156
258
  ) -> None:
259
+ if not self._redis:
260
+ raise RuntimeError("Redis connection is not available")
157
261
  key = self._user_key(user_id)
158
262
  if session_id:
159
263
  await self._redis.hdel(key, session_id)