agentscope-runtime 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. agentscope_runtime/engine/agents/agentscope_agent/agent.py +1 -0
  2. agentscope_runtime/engine/agents/agno_agent.py +1 -0
  3. agentscope_runtime/engine/agents/autogen_agent.py +245 -0
  4. agentscope_runtime/engine/schemas/agent_schemas.py +1 -1
  5. agentscope_runtime/engine/services/context_manager.py +28 -1
  6. agentscope_runtime/engine/services/memory_service.py +2 -2
  7. agentscope_runtime/engine/services/rag_service.py +101 -0
  8. agentscope_runtime/engine/services/redis_memory_service.py +187 -0
  9. agentscope_runtime/engine/services/redis_session_history_service.py +155 -0
  10. agentscope_runtime/sandbox/box/training_box/env_service.py +1 -1
  11. agentscope_runtime/sandbox/box/training_box/environments/bfcl/bfcl_dataprocess.py +216 -0
  12. agentscope_runtime/sandbox/box/training_box/environments/bfcl/bfcl_env.py +380 -0
  13. agentscope_runtime/sandbox/box/training_box/environments/bfcl/env_handler.py +934 -0
  14. agentscope_runtime/sandbox/box/training_box/training_box.py +139 -9
  15. agentscope_runtime/sandbox/build.py +1 -1
  16. agentscope_runtime/sandbox/custom/custom_sandbox.py +0 -1
  17. agentscope_runtime/sandbox/custom/example.py +0 -1
  18. agentscope_runtime/sandbox/enums.py +2 -0
  19. agentscope_runtime/sandbox/manager/container_clients/__init__.py +2 -0
  20. agentscope_runtime/sandbox/manager/container_clients/docker_client.py +263 -11
  21. agentscope_runtime/sandbox/manager/container_clients/kubernetes_client.py +605 -0
  22. agentscope_runtime/sandbox/manager/sandbox_manager.py +112 -113
  23. agentscope_runtime/sandbox/manager/server/app.py +96 -28
  24. agentscope_runtime/sandbox/manager/server/config.py +28 -16
  25. agentscope_runtime/sandbox/model/__init__.py +1 -5
  26. agentscope_runtime/sandbox/model/container.py +3 -1
  27. agentscope_runtime/sandbox/model/manager_config.py +21 -15
  28. agentscope_runtime/sandbox/tools/tool.py +111 -0
  29. agentscope_runtime/version.py +1 -1
  30. {agentscope_runtime-0.1.0.dist-info → agentscope_runtime-0.1.2.dist-info}/METADATA +79 -13
  31. {agentscope_runtime-0.1.0.dist-info → agentscope_runtime-0.1.2.dist-info}/RECORD +35 -28
  32. agentscope_runtime/sandbox/manager/utils.py +0 -78
  33. {agentscope_runtime-0.1.0.dist-info → agentscope_runtime-0.1.2.dist-info}/WHEEL +0 -0
  34. {agentscope_runtime-0.1.0.dist-info → agentscope_runtime-0.1.2.dist-info}/entry_points.txt +0 -0
  35. {agentscope_runtime-0.1.0.dist-info → agentscope_runtime-0.1.2.dist-info}/licenses/LICENSE +0 -0
  36. {agentscope_runtime-0.1.0.dist-info → agentscope_runtime-0.1.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,605 @@
1
+ # -*- coding: utf-8 -*-
2
+ # pylint: disable=too-many-branches
3
+ import os
4
+ import time
5
+ import hashlib
6
+ import traceback
7
+ import logging
8
+
9
+ from kubernetes import client
10
+ from kubernetes import config as k8s_config
11
+ from kubernetes.client.rest import ApiException
12
+
13
+ from .base_client import BaseClient
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class KubernetesClient(BaseClient):
19
+ def __init__(self, config=None):
20
+ self.config = config
21
+ namespace = self.config.k8s_namespace
22
+ kubeconfig = self.config.kubeconfig_path
23
+ try:
24
+ if kubeconfig:
25
+ k8s_config.load_kube_config(config_file=kubeconfig)
26
+ else:
27
+ # Try to load in-cluster config first, then fall back to
28
+ # kubeconfig
29
+ try:
30
+ k8s_config.load_incluster_config()
31
+ except k8s_config.ConfigException:
32
+ k8s_config.load_kube_config()
33
+ self.v1 = client.CoreV1Api()
34
+ self.namespace = namespace
35
+ # Test connection
36
+ self.v1.list_namespace()
37
+ logger.debug("Kubernetes client initialized successfully")
38
+ except Exception as e:
39
+ raise RuntimeError(
40
+ f"Kubernetes client initialization failed: {str(e)}\n"
41
+ "Solutions:\n"
42
+ "• Ensure kubectl is configured\n"
43
+ "• Check kubeconfig file permissions\n"
44
+ "• Verify cluster connectivity\n"
45
+ "• For in-cluster: ensure proper RBAC permissions",
46
+ ) from e
47
+
48
+ def _parse_port_spec(self, port_spec):
49
+ """
50
+ Parse port specification.
51
+ - "80/tcp" -> {"port": 80, "protocol": "TCP"}
52
+ - "80" -> {"port": 80, "protocol": "TCP"}
53
+ - 80 -> {"port": 80, "protocol": "TCP"}
54
+ """
55
+ try:
56
+ if isinstance(port_spec, int):
57
+ return {"port": port_spec, "protocol": "TCP"}
58
+
59
+ if isinstance(port_spec, str):
60
+ if "/" in port_spec:
61
+ port_str, protocol = port_spec.split("/", 1)
62
+ else:
63
+ port_str = port_spec
64
+ protocol = "TCP"
65
+
66
+ port = int(port_str)
67
+ protocol = protocol.upper()
68
+
69
+ return {"port": port, "protocol": protocol}
70
+
71
+ # Log a warning if the port_spec is neither int nor str
72
+ logger.warning(f"Unsupported port specification: {port_spec}")
73
+ return None
74
+
75
+ except ValueError as e:
76
+ logger.error(f"Failed to parse port spec '{port_spec}': {e}")
77
+ return None
78
+
79
+ def _create_pod_spec(
80
+ self,
81
+ image,
82
+ name,
83
+ ports=None,
84
+ volumes=None,
85
+ environment=None,
86
+ runtime_config=None,
87
+ ):
88
+ """Create a Kubernetes Pod specification."""
89
+ if runtime_config is None:
90
+ runtime_config = {}
91
+
92
+ container_name = name or "main-container"
93
+
94
+ # Container specification
95
+ # TODO: use image from docker registry first
96
+ container = client.V1Container(
97
+ name=container_name,
98
+ image=f"agentscope-registry.ap-southeast-1.cr.aliyuncs.com"
99
+ f"/{image}",
100
+ image_pull_policy=runtime_config.get(
101
+ "image_pull_policy",
102
+ "IfNotPresent",
103
+ ),
104
+ )
105
+
106
+ # Configure ports
107
+ if ports:
108
+ container_ports = []
109
+ for port_spec in ports:
110
+ port_info = self._parse_port_spec(port_spec)
111
+ if port_info:
112
+ container_ports.append(
113
+ client.V1ContainerPort(
114
+ container_port=port_info["port"],
115
+ protocol=port_info["protocol"],
116
+ ),
117
+ )
118
+ if container_ports:
119
+ container.ports = container_ports
120
+
121
+ # Configure environment variables
122
+ if environment:
123
+ env_vars = []
124
+ for key, value in environment.items():
125
+ env_vars.append(client.V1EnvVar(name=key, value=str(value)))
126
+ container.env = env_vars
127
+
128
+ # Configure volume mounts and volumes
129
+ volume_mounts = []
130
+ pod_volumes = []
131
+ if volumes:
132
+ for volume_idx, (host_path, mount_info) in enumerate(
133
+ volumes.items(),
134
+ ):
135
+ if isinstance(mount_info, dict):
136
+ container_path = mount_info["bind"]
137
+ mode = mount_info.get("mode", "rw")
138
+ else:
139
+ container_path = mount_info
140
+ mode = "rw"
141
+ volume_name = f"vol-{volume_idx}"
142
+
143
+ # Create volume mount
144
+ volume_mounts.append(
145
+ client.V1VolumeMount(
146
+ name=volume_name,
147
+ mount_path=container_path,
148
+ read_only=(mode == "ro"),
149
+ ),
150
+ )
151
+ # Create host path volume
152
+ pod_volumes.append(
153
+ client.V1Volume(
154
+ name=volume_name,
155
+ host_path=client.V1HostPathVolumeSource(
156
+ path=host_path,
157
+ ),
158
+ ),
159
+ )
160
+
161
+ if volume_mounts:
162
+ container.volume_mounts = volume_mounts
163
+
164
+ # Apply runtime config to container
165
+ if "resources" in runtime_config:
166
+ container.resources = client.V1ResourceRequirements(
167
+ **runtime_config["resources"],
168
+ )
169
+
170
+ if "security_context" in runtime_config:
171
+ container.security_context = client.V1SecurityContext(
172
+ **runtime_config["security_context"],
173
+ )
174
+
175
+ # Pod specification
176
+ pod_spec = client.V1PodSpec(
177
+ containers=[container],
178
+ restart_policy=runtime_config.get("restart_policy", "Never"),
179
+ )
180
+
181
+ if pod_volumes:
182
+ pod_spec.volumes = pod_volumes
183
+
184
+ if "node_selector" in runtime_config:
185
+ pod_spec.node_selector = runtime_config["node_selector"]
186
+
187
+ if "tolerations" in runtime_config:
188
+ pod_spec.tolerations = runtime_config["tolerations"]
189
+
190
+ # Handle image pull secrets (for ACR or other private registries)
191
+ image_pull_secrets = runtime_config.get("image_pull_secrets", [])
192
+ if image_pull_secrets:
193
+ secrets = []
194
+ for secret_name in image_pull_secrets:
195
+ secrets.append(client.V1LocalObjectReference(name=secret_name))
196
+ pod_spec.image_pull_secrets = secrets
197
+
198
+ return pod_spec
199
+
200
+ def create(
201
+ self,
202
+ image,
203
+ name=None,
204
+ ports=None,
205
+ volumes=None,
206
+ environment=None,
207
+ runtime_config=None,
208
+ ):
209
+ """Create a new Kubernetes Pod."""
210
+ if not name:
211
+ name = f"pod-{hashlib.md5(image.encode()).hexdigest()[:8]}"
212
+ try:
213
+ # Create pod specification
214
+ pod_spec = self._create_pod_spec(
215
+ image,
216
+ name,
217
+ ports,
218
+ volumes,
219
+ environment,
220
+ runtime_config,
221
+ )
222
+ # Create pod metadata
223
+ metadata = client.V1ObjectMeta(
224
+ name=name,
225
+ namespace=self.namespace,
226
+ labels={
227
+ "created-by": "kubernetes-client",
228
+ "app": name,
229
+ },
230
+ )
231
+
232
+ # Create pod object
233
+ pod = client.V1Pod(
234
+ api_version="v1",
235
+ kind="Pod",
236
+ metadata=metadata,
237
+ spec=pod_spec,
238
+ )
239
+ # Create the pod
240
+ self.v1.create_namespaced_pod(
241
+ namespace=self.namespace,
242
+ body=pod,
243
+ )
244
+ logger.debug(
245
+ f"Pod '{name}' created successfully in namespace "
246
+ f"'{self.namespace}'",
247
+ )
248
+
249
+ exposed_ports = []
250
+ pod_node_ip = "localhost"
251
+ # Auto-create services for exposed ports (like Docker's port
252
+ # mapping)
253
+ if ports:
254
+ parsed_ports = []
255
+ for port_spec in ports:
256
+ port_info = self._parse_port_spec(port_spec)
257
+ if port_info:
258
+ parsed_ports.append(port_info)
259
+
260
+ if parsed_ports:
261
+ service_created = self._create_multi_port_service(
262
+ name,
263
+ parsed_ports,
264
+ )
265
+ if service_created:
266
+ (
267
+ exposed_ports,
268
+ pod_node_ip,
269
+ ) = self._get_service_node_ports(name)
270
+ logger.debug(
271
+ f"Pod '{name}' created with exposed ports: {exposed_ports}",
272
+ )
273
+
274
+ if not self.wait_for_pod_ready(name, timeout=60):
275
+ logger.error(f"Pod '{name}' failed to become ready")
276
+ return None, None, None
277
+
278
+ return name, exposed_ports, pod_node_ip
279
+ except Exception as e:
280
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
281
+ return None, None, None
282
+
283
+ def start(self, container_id):
284
+ """
285
+ Start a Kubernetes Pod.
286
+ Note: Pods start automatically upon creation in Kubernetes.
287
+ This method verifies the pod is running or can be started.
288
+ """
289
+ try:
290
+ pod = self.v1.read_namespaced_pod(
291
+ name=container_id,
292
+ namespace=self.namespace,
293
+ )
294
+
295
+ current_phase = pod.status.phase
296
+ logger.debug(
297
+ f"Pod '{container_id}' current phase: {current_phase}",
298
+ )
299
+
300
+ if current_phase in ["Running", "Pending"]:
301
+ return True
302
+ elif current_phase in ["Failed", "Succeeded"]:
303
+ logger.warning(
304
+ f"Pod '{container_id}' is in '{current_phase}' state and "
305
+ f"cannot be restarted. Consider recreating it.",
306
+ )
307
+ return False
308
+ else:
309
+ logger.debug(f"Pod '{container_id}' status: {current_phase}")
310
+ return True
311
+ except ApiException as e:
312
+ if e.status == 404:
313
+ logger.error(f"Pod '{container_id}' not found")
314
+ else:
315
+ logger.error(f"Failed to check pod status: {e.reason}")
316
+ return False
317
+ except Exception as e:
318
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
319
+ return False
320
+
321
+ def stop(self, container_id, timeout=None):
322
+ """Stop a Kubernetes Pod by deleting it gracefully."""
323
+ try:
324
+ grace_period = timeout if timeout else 30
325
+ delete_options = client.V1DeleteOptions(
326
+ grace_period_seconds=grace_period,
327
+ )
328
+ self.v1.delete_namespaced_pod(
329
+ name=container_id,
330
+ namespace=self.namespace,
331
+ body=delete_options,
332
+ )
333
+ logger.debug(
334
+ f"Pod '{container_id}' deletion initiated with"
335
+ f" {grace_period}s grace period",
336
+ )
337
+ return True
338
+ except ApiException as e:
339
+ if e.status == 404:
340
+ logger.warning(f"Pod '{container_id}' not found")
341
+ return True
342
+ logger.error(f"Failed to delete pod: {e.reason}")
343
+ return False
344
+ except Exception as e:
345
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
346
+ return False
347
+
348
+ def remove(self, container_id, force=False):
349
+ """Remove a Kubernetes Pod."""
350
+ try:
351
+ # Remove all associated services first
352
+ self._remove_pod_services(container_id)
353
+
354
+ delete_options = client.V1DeleteOptions()
355
+
356
+ if force:
357
+ delete_options.grace_period_seconds = 0
358
+ delete_options.propagation_policy = "Background"
359
+ self.v1.delete_namespaced_pod(
360
+ name=container_id,
361
+ namespace=self.namespace,
362
+ body=delete_options,
363
+ )
364
+ logger.debug(
365
+ f"Pod '{container_id}' removed"
366
+ f" {'forcefully' if force else 'gracefully'}",
367
+ )
368
+ return True
369
+ except ApiException as e:
370
+ if e.status == 404:
371
+ logger.warning(f"Pod '{container_id}' not found")
372
+ return True
373
+ logger.error(f"Failed to remove pod: {e.reason}")
374
+ return False
375
+ except Exception as e:
376
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
377
+ return False
378
+
379
+ def _remove_pod_services(self, pod_name):
380
+ """Remove the service associated with a pod"""
381
+ service_name = f"{pod_name}-service"
382
+ try:
383
+ self.v1.delete_namespaced_service(
384
+ name=service_name,
385
+ namespace=self.namespace,
386
+ )
387
+ logger.debug(f"Removed service {service_name}")
388
+ except client.ApiException as e:
389
+ if e.status == 404:
390
+ logger.debug(
391
+ f"Service {service_name} not found (already removed)",
392
+ )
393
+ else:
394
+ logger.warning(f"Failed to remove service {service_name}: {e}")
395
+ except Exception as e:
396
+ logger.error(f"Failed to remove service for pod {pod_name}: {e}")
397
+
398
+ def inspect(self, container_id):
399
+ """Inspect a Kubernetes Pod."""
400
+ try:
401
+ pod = self.v1.read_namespaced_pod(
402
+ name=container_id,
403
+ namespace=self.namespace,
404
+ )
405
+ return pod.to_dict()
406
+ except ApiException as e:
407
+ if e.status == 404:
408
+ logger.warning(f"Pod '{container_id}' not found")
409
+ else:
410
+ logger.error(f"Failed to inspect pod: {e.reason}")
411
+ return None
412
+ except Exception as e:
413
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
414
+ return None
415
+
416
+ def get_status(self, container_id):
417
+ """Get the current status of the specified pod."""
418
+ pod_info = self.inspect(container_id)
419
+ if pod_info and "status" in pod_info:
420
+ return pod_info["status"]["phase"].lower()
421
+ return None
422
+
423
+ def get_logs(
424
+ self,
425
+ container_id,
426
+ container_name=None,
427
+ tail_lines=None,
428
+ follow=False,
429
+ ):
430
+ """Get logs from a pod."""
431
+ try:
432
+ logs = self.v1.read_namespaced_pod_log(
433
+ name=container_id,
434
+ namespace=self.namespace,
435
+ container=container_name,
436
+ tail_lines=tail_lines,
437
+ follow=follow,
438
+ )
439
+ return logs
440
+ except ApiException as e:
441
+ logger.error(
442
+ f"Failed to get logs from pod '{container_id}': {e.reason}",
443
+ )
444
+ return None
445
+ except Exception as e:
446
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
447
+ return None
448
+
449
+ def list_pods(self, label_selector=None):
450
+ """List pods in the namespace."""
451
+ try:
452
+ pods = self.v1.list_namespaced_pod(
453
+ namespace=self.namespace,
454
+ label_selector=label_selector,
455
+ )
456
+ return [pod.metadata.name for pod in pods.items]
457
+ except ApiException as e:
458
+ logger.error(f"Failed to list pods: {e.reason}")
459
+ return []
460
+ except Exception as e:
461
+ logger.error(f"An error occurred: {e}, {traceback.format_exc()}")
462
+ return []
463
+
464
+ def wait_for_pod_ready(self, container_id, timeout=300):
465
+ """Wait for a pod to be ready."""
466
+ start_time = time.time()
467
+ while time.time() - start_time < timeout:
468
+ try:
469
+ pod = self.v1.read_namespaced_pod(
470
+ name=container_id,
471
+ namespace=self.namespace,
472
+ )
473
+ if pod.status.phase == "Running":
474
+ # Check if all containers are ready
475
+ if pod.status.container_statuses:
476
+ all_ready = all(
477
+ container.ready
478
+ for container in pod.status.container_statuses
479
+ )
480
+ if all_ready:
481
+ return True
482
+ elif pod.status.phase in ["Failed", "Succeeded"]:
483
+ return False
484
+ time.sleep(2)
485
+ except ApiException as e:
486
+ if e.status == 404:
487
+ return False
488
+ time.sleep(2)
489
+ return False
490
+
491
+ def _create_multi_port_service(self, pod_name, port_list):
492
+ """Create a single service with multiple ports for the pod."""
493
+ try:
494
+ service_name = f"{pod_name}-service"
495
+ selector = {"app": pod_name}
496
+
497
+ # Construct multi-port configuration
498
+ service_ports = []
499
+ for port_info in port_list:
500
+ port = port_info["port"]
501
+ protocol = port_info["protocol"]
502
+ service_ports.append(
503
+ client.V1ServicePort(
504
+ name=f"port-{port}", # Each port needs a unique name
505
+ port=port,
506
+ target_port=port,
507
+ protocol=protocol,
508
+ ),
509
+ )
510
+
511
+ service_spec = client.V1ServiceSpec(
512
+ selector=selector,
513
+ ports=service_ports,
514
+ type="NodePort",
515
+ )
516
+
517
+ service = client.V1Service(
518
+ api_version="v1",
519
+ kind="Service",
520
+ metadata=client.V1ObjectMeta(
521
+ name=service_name,
522
+ namespace=self.namespace,
523
+ ),
524
+ spec=service_spec,
525
+ )
526
+
527
+ # Create the service in the specified namespace
528
+ self.v1.create_namespaced_service(
529
+ namespace=self.namespace,
530
+ body=service,
531
+ )
532
+
533
+ # Wait for service to be ready
534
+ time.sleep(1)
535
+ return True
536
+ except Exception as e:
537
+ logger.error(
538
+ f"Failed to create multi-port service for pod {pod_name}: "
539
+ f"{e}, {traceback.format_exc()}",
540
+ )
541
+ return False
542
+
543
+ def _get_service_node_ports(self, pod_name):
544
+ """Get the NodePort for a service"""
545
+ try:
546
+ service_name = f"{pod_name}-service"
547
+ service_info = self.v1.read_namespaced_service(
548
+ name=service_name,
549
+ namespace=self.namespace,
550
+ )
551
+
552
+ node_ports = []
553
+ pod_node_ip = self._get_pod_node_ip(pod_name)
554
+
555
+ for port in service_info.spec.ports:
556
+ if port.node_port:
557
+ node_ports.append(port.node_port)
558
+
559
+ return node_ports, pod_node_ip
560
+ except Exception as e:
561
+ logger.error(f"Failed to get node port: {e}")
562
+ return None
563
+
564
+ def _get_pod_node_ip(self, pod_name):
565
+ """Get the IP of the node where the pod is running"""
566
+
567
+ # Check if we are running in Colima, where pod runs in VM
568
+ docker_host = os.getenv("DOCKER_HOST", "")
569
+ if "colima" in docker_host.lower():
570
+ return "localhost"
571
+
572
+ try:
573
+ pod = self.v1.read_namespaced_pod(
574
+ name=pod_name,
575
+ namespace=self.namespace,
576
+ )
577
+
578
+ node_name = pod.spec.node_name
579
+ if not node_name:
580
+ logger.warning(
581
+ f"Pod {pod_name} is not scheduled to any node yet",
582
+ )
583
+ return None
584
+
585
+ node = self.v1.read_node(name=node_name)
586
+
587
+ external_ip = None
588
+ internal_ip = None
589
+
590
+ for address in node.status.addresses:
591
+ if address.type == "ExternalIP":
592
+ external_ip = address.address
593
+ elif address.type == "InternalIP":
594
+ internal_ip = address.address
595
+
596
+ result_ip = external_ip or internal_ip
597
+ logger.debug(
598
+ f"Using IP: {result_ip} (external: {external_ip}, internal:"
599
+ f" {internal_ip})",
600
+ )
601
+ return result_ip
602
+
603
+ except Exception as e:
604
+ logger.error(f"Failed to get pod node IP: {e}")
605
+ return None