agentscope-runtime 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. agentscope_runtime/engine/agents/agentscope_agent/agent.py +1 -0
  2. agentscope_runtime/engine/agents/agno_agent.py +1 -0
  3. agentscope_runtime/engine/agents/autogen_agent.py +245 -0
  4. agentscope_runtime/engine/schemas/agent_schemas.py +1 -1
  5. agentscope_runtime/engine/services/memory_service.py +2 -2
  6. agentscope_runtime/engine/services/redis_memory_service.py +187 -0
  7. agentscope_runtime/engine/services/redis_session_history_service.py +155 -0
  8. agentscope_runtime/sandbox/build.py +1 -1
  9. agentscope_runtime/sandbox/custom/custom_sandbox.py +0 -1
  10. agentscope_runtime/sandbox/custom/example.py +0 -1
  11. agentscope_runtime/sandbox/manager/container_clients/__init__.py +2 -0
  12. agentscope_runtime/sandbox/manager/container_clients/docker_client.py +246 -4
  13. agentscope_runtime/sandbox/manager/container_clients/kubernetes_client.py +550 -0
  14. agentscope_runtime/sandbox/manager/sandbox_manager.py +21 -82
  15. agentscope_runtime/sandbox/manager/server/app.py +55 -24
  16. agentscope_runtime/sandbox/manager/server/config.py +28 -16
  17. agentscope_runtime/sandbox/model/container.py +3 -1
  18. agentscope_runtime/sandbox/model/manager_config.py +19 -2
  19. agentscope_runtime/sandbox/tools/tool.py +111 -0
  20. agentscope_runtime/version.py +1 -1
  21. {agentscope_runtime-0.1.0.dist-info → agentscope_runtime-0.1.1.dist-info}/METADATA +74 -13
  22. {agentscope_runtime-0.1.0.dist-info → agentscope_runtime-0.1.1.dist-info}/RECORD +26 -23
  23. agentscope_runtime/sandbox/manager/utils.py +0 -78
  24. {agentscope_runtime-0.1.0.dist-info → agentscope_runtime-0.1.1.dist-info}/WHEEL +0 -0
  25. {agentscope_runtime-0.1.0.dist-info → agentscope_runtime-0.1.1.dist-info}/entry_points.txt +0 -0
  26. {agentscope_runtime-0.1.0.dist-info → agentscope_runtime-0.1.1.dist-info}/licenses/LICENSE +0 -0
  27. {agentscope_runtime-0.1.0.dist-info → agentscope_runtime-0.1.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,550 @@
1
+ # -*- coding: utf-8 -*-
2
+ # pylint: disable=too-many-branches
3
+ import time
4
+ import hashlib
5
+ import traceback
6
+ import logging
7
+
8
+ from kubernetes import client
9
+ from kubernetes import config as k8s_config
10
+ from kubernetes.client.rest import ApiException
11
+
12
+ from .base_client import BaseClient
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class KubernetesClient(BaseClient):
18
+ def __init__(self, config=None):
19
+ self.config = config
20
+ namespace = self.config.k8s_namespace
21
+ kubeconfig = self.config.kubeconfig_path
22
+ try:
23
+ if kubeconfig:
24
+ k8s_config.load_kube_config(config_file=kubeconfig)
25
+ else:
26
+ # Try to load in-cluster config first, then fall back to
27
+ # kubeconfig
28
+ try:
29
+ k8s_config.load_incluster_config()
30
+ except k8s_config.ConfigException:
31
+ k8s_config.load_kube_config()
32
+ self.v1 = client.CoreV1Api()
33
+ self.namespace = namespace
34
+ # Test connection
35
+ self.v1.list_namespace()
36
+ logger.debug("Kubernetes client initialized successfully")
37
+ except Exception as e:
38
+ raise RuntimeError(
39
+ f"Kubernetes client initialization failed: {str(e)}\n"
40
+ "Solutions:\n"
41
+ "• Ensure kubectl is configured\n"
42
+ "• Check kubeconfig file permissions\n"
43
+ "• Verify cluster connectivity\n"
44
+ "• For in-cluster: ensure proper RBAC permissions",
45
+ ) from e
46
+
47
+ def _parse_port_spec(self, port_spec):
48
+ """
49
+ Parse port specification.
50
+ - "80/tcp" -> {"port": 80, "protocol": "TCP"}
51
+ - "80" -> {"port": 80, "protocol": "TCP"}
52
+ - 80 -> {"port": 80, "protocol": "TCP"}
53
+ """
54
+ try:
55
+ if isinstance(port_spec, int):
56
+ return {"port": port_spec, "protocol": "TCP"}
57
+
58
+ if isinstance(port_spec, str):
59
+ if "/" in port_spec:
60
+ port_str, protocol = port_spec.split("/", 1)
61
+ else:
62
+ port_str = port_spec
63
+ protocol = "TCP"
64
+
65
+ port = int(port_str)
66
+ protocol = protocol.upper()
67
+
68
+ return {"port": port, "protocol": protocol}
69
+
70
+ # Log a warning if the port_spec is neither int nor str
71
+ logger.warning(f"Unsupported port specification: {port_spec}")
72
+ return None
73
+
74
+ except ValueError as e:
75
+ logger.error(f"Failed to parse port spec '{port_spec}': {e}")
76
+ return None
77
+
78
+ def _create_pod_spec(
79
+ self,
80
+ image,
81
+ name,
82
+ ports=None,
83
+ volumes=None,
84
+ environment=None,
85
+ runtime_config=None,
86
+ ):
87
+ """Create a Kubernetes Pod specification."""
88
+ if runtime_config is None:
89
+ runtime_config = {}
90
+
91
+ container_name = name or "main-container"
92
+ # Container specification
93
+ container = client.V1Container(
94
+ name=container_name,
95
+ image=f"agentscope-registry.ap-southeast-1.cr.aliyuncs.com"
96
+ f"/{image}",
97
+ image_pull_policy=runtime_config.get(
98
+ "image_pull_policy",
99
+ "IfNotPresent",
100
+ ),
101
+ )
102
+
103
+ # Configure ports
104
+ if ports:
105
+ container_ports = []
106
+ for port_spec in ports:
107
+ port_info = self._parse_port_spec(port_spec)
108
+ if port_info:
109
+ container_ports.append(
110
+ client.V1ContainerPort(
111
+ container_port=port_info["port"],
112
+ protocol=port_info["protocol"],
113
+ ),
114
+ )
115
+ if container_ports:
116
+ container.ports = container_ports
117
+
118
+ # Configure environment variables
119
+ if environment:
120
+ env_vars = []
121
+ for key, value in environment.items():
122
+ env_vars.append(client.V1EnvVar(name=key, value=str(value)))
123
+ container.env = env_vars
124
+
125
+ # Configure volume mounts and volumes
126
+ volume_mounts = []
127
+ pod_volumes = []
128
+ if volumes:
129
+ for volume_idx, (host_path, mount_info) in enumerate(
130
+ volumes.items(),
131
+ ):
132
+ if isinstance(mount_info, dict):
133
+ container_path = mount_info["bind"]
134
+ mode = mount_info.get("mode", "rw")
135
+ else:
136
+ container_path = mount_info
137
+ mode = "rw"
138
+ volume_name = f"vol-{volume_idx}"
139
+
140
+ # Create volume mount
141
+ volume_mounts.append(
142
+ client.V1VolumeMount(
143
+ name=volume_name,
144
+ mount_path=container_path,
145
+ read_only=(mode == "ro"),
146
+ ),
147
+ )
148
+ # Create host path volume
149
+ pod_volumes.append(
150
+ client.V1Volume(
151
+ name=volume_name,
152
+ host_path=client.V1HostPathVolumeSource(
153
+ path=host_path,
154
+ ),
155
+ ),
156
+ )
157
+
158
+ if volume_mounts:
159
+ container.volume_mounts = volume_mounts
160
+
161
+ # Apply runtime config to container
162
+ if "resources" in runtime_config:
163
+ container.resources = client.V1ResourceRequirements(
164
+ **runtime_config["resources"],
165
+ )
166
+
167
+ if "security_context" in runtime_config:
168
+ container.security_context = client.V1SecurityContext(
169
+ **runtime_config["security_context"],
170
+ )
171
+
172
+ # Pod specification
173
+ pod_spec = client.V1PodSpec(
174
+ containers=[container],
175
+ restart_policy=runtime_config.get("restart_policy", "Never"),
176
+ )
177
+
178
+ if pod_volumes:
179
+ pod_spec.volumes = pod_volumes
180
+
181
+ if "node_selector" in runtime_config:
182
+ pod_spec.node_selector = runtime_config["node_selector"]
183
+
184
+ if "tolerations" in runtime_config:
185
+ pod_spec.tolerations = runtime_config["tolerations"]
186
+
187
+ # Handle image pull secrets (for ACR or other private registries)
188
+ image_pull_secrets = runtime_config.get("image_pull_secrets", [])
189
+ if image_pull_secrets:
190
+ secrets = []
191
+ for secret_name in image_pull_secrets:
192
+ secrets.append(client.V1LocalObjectReference(name=secret_name))
193
+ pod_spec.image_pull_secrets = secrets
194
+
195
+ return pod_spec
196
+
197
+ def create(
198
+ self,
199
+ image,
200
+ name=None,
201
+ ports=None,
202
+ volumes=None,
203
+ environment=None,
204
+ runtime_config=None,
205
+ ):
206
+ """Create a new Kubernetes Pod."""
207
+ if not name:
208
+ name = f"pod-{hashlib.md5(image.encode()).hexdigest()[:8]}"
209
+ try:
210
+ # Create pod specification
211
+ pod_spec = self._create_pod_spec(
212
+ image,
213
+ name,
214
+ ports,
215
+ volumes,
216
+ environment,
217
+ runtime_config,
218
+ )
219
+ # Create pod metadata
220
+ metadata = client.V1ObjectMeta(
221
+ name=name,
222
+ namespace=self.namespace,
223
+ labels={
224
+ "created-by": "kubernetes-client",
225
+ "app": name,
226
+ },
227
+ )
228
+
229
+ # Create pod object
230
+ pod = client.V1Pod(
231
+ api_version="v1",
232
+ kind="Pod",
233
+ metadata=metadata,
234
+ spec=pod_spec,
235
+ )
236
+ # Create the pod
237
+ self.v1.create_namespaced_pod(
238
+ namespace=self.namespace,
239
+ body=pod,
240
+ )
241
+ logger.debug(
242
+ f"Pod '{name}' created successfully in namespace "
243
+ f"'{self.namespace}'",
244
+ )
245
+
246
+ exposed_ports = []
247
+ # Auto-create services for exposed ports (like Docker's port
248
+ # mapping)
249
+ if ports:
250
+ parsed_ports = []
251
+ for port_spec in ports:
252
+ port_info = self._parse_port_spec(port_spec)
253
+ if port_info:
254
+ parsed_ports.append(port_info)
255
+
256
+ if parsed_ports:
257
+ service_created = self._create_multi_port_service(
258
+ name,
259
+ parsed_ports,
260
+ )
261
+ if service_created:
262
+ exposed_ports = self._get_service_node_ports(name)
263
+ logger.debug(
264
+ f"Pod '{name}' created with exposed ports: {exposed_ports}",
265
+ )
266
+
267
+ if not self.wait_for_pod_ready(name, timeout=60):
268
+ logger.error(f"Pod '{name}' failed to become ready")
269
+ return None, None
270
+
271
+ return name, exposed_ports
272
+ except Exception as e:
273
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
274
+ return None, None
275
+
276
+ def start(self, container_id):
277
+ """
278
+ Start a Kubernetes Pod.
279
+ Note: Pods start automatically upon creation in Kubernetes.
280
+ This method verifies the pod is running or can be started.
281
+ """
282
+ try:
283
+ pod = self.v1.read_namespaced_pod(
284
+ name=container_id,
285
+ namespace=self.namespace,
286
+ )
287
+
288
+ current_phase = pod.status.phase
289
+ logger.debug(
290
+ f"Pod '{container_id}' current phase: {current_phase}",
291
+ )
292
+
293
+ if current_phase in ["Running", "Pending"]:
294
+ return True
295
+ elif current_phase in ["Failed", "Succeeded"]:
296
+ logger.warning(
297
+ f"Pod '{container_id}' is in '{current_phase}' state and "
298
+ f"cannot be restarted. Consider recreating it.",
299
+ )
300
+ return False
301
+ else:
302
+ logger.debug(f"Pod '{container_id}' status: {current_phase}")
303
+ return True
304
+ except ApiException as e:
305
+ if e.status == 404:
306
+ logger.error(f"Pod '{container_id}' not found")
307
+ else:
308
+ logger.error(f"Failed to check pod status: {e.reason}")
309
+ return False
310
+ except Exception as e:
311
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
312
+ return False
313
+
314
+ def stop(self, container_id, timeout=None):
315
+ """Stop a Kubernetes Pod by deleting it gracefully."""
316
+ try:
317
+ grace_period = timeout if timeout else 30
318
+ delete_options = client.V1DeleteOptions(
319
+ grace_period_seconds=grace_period,
320
+ )
321
+ self.v1.delete_namespaced_pod(
322
+ name=container_id,
323
+ namespace=self.namespace,
324
+ body=delete_options,
325
+ )
326
+ logger.debug(
327
+ f"Pod '{container_id}' deletion initiated with"
328
+ f" {grace_period}s grace period",
329
+ )
330
+ return True
331
+ except ApiException as e:
332
+ if e.status == 404:
333
+ logger.warning(f"Pod '{container_id}' not found")
334
+ return True
335
+ logger.error(f"Failed to delete pod: {e.reason}")
336
+ return False
337
+ except Exception as e:
338
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
339
+ return False
340
+
341
+ def remove(self, container_id, force=False):
342
+ """Remove a Kubernetes Pod."""
343
+ try:
344
+ # Remove all associated services first
345
+ self._remove_pod_services(container_id)
346
+
347
+ delete_options = client.V1DeleteOptions()
348
+
349
+ if force:
350
+ delete_options.grace_period_seconds = 0
351
+ delete_options.propagation_policy = "Background"
352
+ self.v1.delete_namespaced_pod(
353
+ name=container_id,
354
+ namespace=self.namespace,
355
+ body=delete_options,
356
+ )
357
+ logger.debug(
358
+ f"Pod '{container_id}' removed"
359
+ f" {'forcefully' if force else 'gracefully'}",
360
+ )
361
+ return True
362
+ except ApiException as e:
363
+ if e.status == 404:
364
+ logger.warning(f"Pod '{container_id}' not found")
365
+ return True
366
+ logger.error(f"Failed to remove pod: {e.reason}")
367
+ return False
368
+ except Exception as e:
369
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
370
+ return False
371
+
372
+ def _remove_pod_services(self, pod_name):
373
+ """Remove the service associated with a pod"""
374
+ service_name = f"{pod_name}-service"
375
+ try:
376
+ self.v1.delete_namespaced_service(
377
+ name=service_name,
378
+ namespace=self.namespace,
379
+ )
380
+ logger.debug(f"Removed service {service_name}")
381
+ except client.ApiException as e:
382
+ if e.status == 404:
383
+ logger.debug(
384
+ f"Service {service_name} not found (already removed)",
385
+ )
386
+ else:
387
+ logger.warning(f"Failed to remove service {service_name}: {e}")
388
+ except Exception as e:
389
+ logger.error(f"Failed to remove service for pod {pod_name}: {e}")
390
+
391
+ def inspect(self, container_id):
392
+ """Inspect a Kubernetes Pod."""
393
+ try:
394
+ pod = self.v1.read_namespaced_pod(
395
+ name=container_id,
396
+ namespace=self.namespace,
397
+ )
398
+ return pod.to_dict()
399
+ except ApiException as e:
400
+ if e.status == 404:
401
+ logger.warning(f"Pod '{container_id}' not found")
402
+ else:
403
+ logger.error(f"Failed to inspect pod: {e.reason}")
404
+ return None
405
+ except Exception as e:
406
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
407
+ return None
408
+
409
+ def get_status(self, container_id):
410
+ """Get the current status of the specified pod."""
411
+ pod_info = self.inspect(container_id)
412
+ if pod_info and "status" in pod_info:
413
+ return pod_info["status"]["phase"].lower()
414
+ return None
415
+
416
+ def get_logs(
417
+ self,
418
+ container_id,
419
+ container_name=None,
420
+ tail_lines=None,
421
+ follow=False,
422
+ ):
423
+ """Get logs from a pod."""
424
+ try:
425
+ logs = self.v1.read_namespaced_pod_log(
426
+ name=container_id,
427
+ namespace=self.namespace,
428
+ container=container_name,
429
+ tail_lines=tail_lines,
430
+ follow=follow,
431
+ )
432
+ return logs
433
+ except ApiException as e:
434
+ logger.error(
435
+ f"Failed to get logs from pod '{container_id}': {e.reason}",
436
+ )
437
+ return None
438
+ except Exception as e:
439
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
440
+ return None
441
+
442
+ def list_pods(self, label_selector=None):
443
+ """List pods in the namespace."""
444
+ try:
445
+ pods = self.v1.list_namespaced_pod(
446
+ namespace=self.namespace,
447
+ label_selector=label_selector,
448
+ )
449
+ return [pod.metadata.name for pod in pods.items]
450
+ except ApiException as e:
451
+ logger.error(f"Failed to list pods: {e.reason}")
452
+ return []
453
+ except Exception as e:
454
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
455
+ return []
456
+
457
+ def wait_for_pod_ready(self, container_id, timeout=300):
458
+ """Wait for a pod to be ready."""
459
+ start_time = time.time()
460
+ while time.time() - start_time < timeout:
461
+ try:
462
+ pod = self.v1.read_namespaced_pod(
463
+ name=container_id,
464
+ namespace=self.namespace,
465
+ )
466
+ if pod.status.phase == "Running":
467
+ # Check if all containers are ready
468
+ if pod.status.container_statuses:
469
+ all_ready = all(
470
+ container.ready
471
+ for container in pod.status.container_statuses
472
+ )
473
+ if all_ready:
474
+ return True
475
+ elif pod.status.phase in ["Failed", "Succeeded"]:
476
+ return False
477
+ time.sleep(2)
478
+ except ApiException as e:
479
+ if e.status == 404:
480
+ return False
481
+ time.sleep(2)
482
+ return False
483
+
484
+ def _create_multi_port_service(self, pod_name, port_list):
485
+ """Create a single service with multiple ports for the pod."""
486
+ try:
487
+ service_name = f"{pod_name}-service"
488
+ selector = {"app": pod_name}
489
+
490
+ # Construct multi-port configuration
491
+ service_ports = []
492
+ for port_info in port_list:
493
+ port = port_info["port"]
494
+ protocol = port_info["protocol"]
495
+ service_ports.append(
496
+ client.V1ServicePort(
497
+ name=f"port-{port}", # Each port needs a unique name
498
+ port=port,
499
+ target_port=port,
500
+ protocol=protocol,
501
+ ),
502
+ )
503
+
504
+ service_spec = client.V1ServiceSpec(
505
+ selector=selector,
506
+ ports=service_ports,
507
+ type="NodePort",
508
+ )
509
+
510
+ service = client.V1Service(
511
+ api_version="v1",
512
+ kind="Service",
513
+ metadata=client.V1ObjectMeta(name=service_name),
514
+ spec=service_spec,
515
+ )
516
+
517
+ # Create the service in the specified namespace
518
+ self.v1.create_namespaced_service(
519
+ namespace=self.namespace,
520
+ body=service,
521
+ )
522
+
523
+ # Wait for service to be ready
524
+ time.sleep(1)
525
+ return True
526
+ except Exception as e:
527
+ logger.error(
528
+ f"Failed to create multi-port service for pod {pod_name}: "
529
+ f"{e}, {traceback.format_exc()}",
530
+ )
531
+ return False
532
+
533
+ def _get_service_node_ports(self, pod_name):
534
+ """Get the NodePort for a service"""
535
+ try:
536
+ service_name = f"{pod_name}-service"
537
+ service_info = self.v1.read_namespaced_service(
538
+ name=service_name,
539
+ namespace=self.namespace,
540
+ )
541
+
542
+ node_ports = []
543
+ for port in service_info.spec.ports:
544
+ if port.node_port:
545
+ node_ports.append(port.node_port)
546
+
547
+ return node_ports
548
+ except Exception as e:
549
+ logger.error(f"Failed to get node port: {e}")
550
+ return None