agentscope-runtime 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. agentscope_runtime/engine/agents/agentscope_agent/agent.py +56 -12
  2. agentscope_runtime/engine/agents/agentscope_agent/hooks.py +2 -1
  3. agentscope_runtime/engine/agents/agno_agent.py +11 -5
  4. agentscope_runtime/engine/agents/autogen_agent.py +10 -4
  5. agentscope_runtime/engine/agents/utils.py +53 -0
  6. agentscope_runtime/engine/services/mem0_memory_service.py +124 -0
  7. agentscope_runtime/engine/services/memory_service.py +2 -1
  8. agentscope_runtime/engine/services/redis_session_history_service.py +4 -3
  9. agentscope_runtime/engine/services/sandbox_service.py +6 -16
  10. agentscope_runtime/engine/services/session_history_service.py +4 -3
  11. agentscope_runtime/engine/services/tablestore_memory_service.py +304 -0
  12. agentscope_runtime/engine/services/tablestore_rag_service.py +143 -0
  13. agentscope_runtime/engine/services/tablestore_session_history_service.py +293 -0
  14. agentscope_runtime/engine/services/utils/__init__.py +0 -0
  15. agentscope_runtime/engine/services/utils/tablestore_service_utils.py +352 -0
  16. agentscope_runtime/sandbox/box/base/base_sandbox.py +2 -2
  17. agentscope_runtime/sandbox/box/browser/browser_sandbox.py +2 -2
  18. agentscope_runtime/sandbox/box/filesystem/filesystem_sandbox.py +2 -2
  19. agentscope_runtime/sandbox/box/training_box/training_box.py +4 -12
  20. agentscope_runtime/sandbox/build.py +37 -17
  21. agentscope_runtime/sandbox/client/http_client.py +42 -10
  22. agentscope_runtime/sandbox/client/training_client.py +0 -1
  23. agentscope_runtime/sandbox/constant.py +26 -0
  24. agentscope_runtime/sandbox/custom/custom_sandbox.py +5 -5
  25. agentscope_runtime/sandbox/custom/example.py +2 -2
  26. agentscope_runtime/sandbox/manager/collections/in_memory_mapping.py +4 -2
  27. agentscope_runtime/sandbox/manager/collections/redis_mapping.py +25 -9
  28. agentscope_runtime/sandbox/manager/container_clients/__init__.py +0 -10
  29. agentscope_runtime/sandbox/manager/container_clients/agentrun_client.py +1096 -0
  30. agentscope_runtime/sandbox/manager/container_clients/docker_client.py +25 -201
  31. agentscope_runtime/sandbox/manager/container_clients/kubernetes_client.py +1 -3
  32. agentscope_runtime/sandbox/manager/sandbox_manager.py +40 -13
  33. agentscope_runtime/sandbox/manager/server/app.py +27 -0
  34. agentscope_runtime/sandbox/manager/server/config.py +30 -2
  35. agentscope_runtime/sandbox/model/container.py +1 -1
  36. agentscope_runtime/sandbox/model/manager_config.py +93 -5
  37. agentscope_runtime/sandbox/utils.py +97 -0
  38. agentscope_runtime/version.py +1 -1
  39. {agentscope_runtime-0.1.4.dist-info → agentscope_runtime-0.1.5.dist-info}/METADATA +52 -56
  40. {agentscope_runtime-0.1.4.dist-info → agentscope_runtime-0.1.5.dist-info}/RECORD +44 -39
  41. agentscope_runtime/engine/agents/llm_agent.py +0 -51
  42. agentscope_runtime/engine/llms/__init__.py +0 -3
  43. agentscope_runtime/engine/llms/base_llm.py +0 -60
  44. agentscope_runtime/engine/llms/qwen_llm.py +0 -47
  45. {agentscope_runtime-0.1.4.dist-info → agentscope_runtime-0.1.5.dist-info}/WHEEL +0 -0
  46. {agentscope_runtime-0.1.4.dist-info → agentscope_runtime-0.1.5.dist-info}/entry_points.txt +0 -0
  47. {agentscope_runtime-0.1.4.dist-info → agentscope_runtime-0.1.5.dist-info}/licenses/LICENSE +0 -0
  48. {agentscope_runtime-0.1.4.dist-info → agentscope_runtime-0.1.5.dist-info}/top_level.txt +0 -0
@@ -1,51 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- from .base_agent import Agent
4
- from ..llms import BaseLLM
5
- from ..schemas.agent_schemas import (
6
- Message,
7
- TextContent,
8
- convert_to_openai_messages,
9
- MessageType,
10
- convert_to_openai_tools,
11
- )
12
-
13
-
14
- class LLMAgent(Agent):
15
- def __init__(
16
- self,
17
- model: BaseLLM,
18
- **kwargs,
19
- ):
20
- super().__init__(
21
- **kwargs,
22
- )
23
- self.model = model
24
-
25
- async def run_async(
26
- self,
27
- context,
28
- **kwargs,
29
- ):
30
- # agent request --> model request
31
- openai_messages = convert_to_openai_messages(context.session.messages)
32
- tools = convert_to_openai_tools(context.request.tools)
33
-
34
- # Step 3: Create initial Message
35
- message = Message(type=MessageType.MESSAGE, role="assistant")
36
- yield message.in_progress()
37
-
38
- # Step 4: LLM Content delta
39
- text_delta_content = TextContent(delta=True)
40
- async for chunk in self.model.chat_stream(openai_messages, tools):
41
- delta = chunk.choices[0].delta
42
-
43
- if delta.content:
44
- text_delta_content.text = delta.content
45
- text_delta_content = message.add_delta_content(
46
- new_content=text_delta_content,
47
- )
48
- yield text_delta_content
49
-
50
- message.completed()
51
- yield message
@@ -1,3 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- from .base_llm import BaseLLM
3
- from .qwen_llm import QwenLLM
@@ -1,60 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- from typing import AsyncGenerator
3
-
4
-
5
- class BaseLLM:
6
- base_url = None
7
-
8
- def __init__(self, model_name: str, **kwargs):
9
- self.client = None
10
- self.async_client = None
11
- self.model_name = model_name
12
- self.kwargs = kwargs
13
-
14
- def generate(self, prompt: str, **kwargs) -> str:
15
- """
16
- Generate a response from the Qwen LLM model.
17
-
18
- Args:
19
- prompt (str): The prompt to generate a response for.
20
- **kwargs: Additional keyword arguments to pass to the model.
21
-
22
- Returns:
23
- str: The generated response.
24
- """
25
- response = self.client.chat.completions.create(
26
- model=self.model_name,
27
- messages=[
28
- {"role": "system", "content": "You are a helpful assistant."},
29
- {"role": "user", "content": prompt},
30
- ],
31
- **kwargs,
32
- )
33
- return response.choices[0].message.content
34
-
35
- def chat(self, messages, **kwargs) -> str:
36
- response = self.client.chat.completions.create(
37
- model=self.model_name,
38
- messages=messages,
39
- **kwargs,
40
- )
41
- return response.choices[0].message.content
42
-
43
- async def chat_stream(
44
- self,
45
- messages,
46
- tools=None,
47
- **kwargs,
48
- ) -> AsyncGenerator[str, None]:
49
- # call model
50
- # TODO: use async client
51
- generator = self.client.chat.completions.create(
52
- model=self.model_name,
53
- messages=messages,
54
- tools=tools,
55
- stream=True,
56
- **kwargs,
57
- )
58
-
59
- for chunk in generator:
60
- yield chunk
@@ -1,47 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import os
3
-
4
- from openai import Client, AsyncClient
5
-
6
- from .base_llm import BaseLLM
7
-
8
-
9
- class QwenLLM(BaseLLM):
10
- """
11
- QwenLLM is a class that provides a wrapper around the Qwen LLM model.
12
- """
13
-
14
- base_url = None
15
-
16
- def __init__(
17
- self,
18
- model_name: str = "qwen-turbo",
19
- api_key: str = None,
20
- **kwargs,
21
- ):
22
- """
23
- Initialize the QwenLLM class.
24
-
25
- Args:
26
- model_name (str): The name of the Qwen LLM model to use.
27
- Defaults to "qwen-turbo".
28
- api_key (str): The API key for Qwen service.
29
- If None, will read from DASHSCOPE_API_KEY environment variable.
30
- """
31
- super().__init__(model_name, **kwargs)
32
-
33
- if api_key is None:
34
- api_key = os.getenv("DASHSCOPE_API_KEY")
35
- if self.base_url is None:
36
- default_base_url = (
37
- "https://dashscope.aliyuncs.com/compatible-mode/v1"
38
- )
39
- self.base_url = os.getenv("DASHSCOPE_BASE_URL", default_base_url)
40
- self.client = Client(
41
- api_key=api_key,
42
- base_url=self.base_url,
43
- )
44
- self.async_client = AsyncClient(
45
- api_key=api_key,
46
- base_url=self.base_url,
47
- )