dais-sdk 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,94 @@
1
+ from dataclasses import replace
2
+ from mcp.types import TextContent, ImageContent, AudioContent, ResourceLink, EmbeddedResource, TextResourceContents, BlobResourceContents
3
+ from .toolset import Toolset
4
+ from ...mcp_client.base_mcp_client import McpClient, Tool, ToolResult
5
+ from ...mcp_client.local_mcp_client import LocalMcpClient, LocalServerParams
6
+ from ...mcp_client.remote_mcp_client import RemoteMcpClient, RemoteServerParams, OAuthParams
7
+ from ...types.tool import ToolDef
8
+ from ...logger import logger
9
+
10
+ class McpToolset(Toolset):
11
+ def __init__(self, client: McpClient):
12
+ self._client = client
13
+ self._tools_cache: list[ToolDef] | None = None
14
+
15
+ def _mcp_tool_to_tool_def(self, mcp_tool: Tool) -> ToolDef:
16
+ async def wrapper(**kwargs) -> str:
17
+ result = await self._client.call_tool(mcp_tool.name, kwargs)
18
+ return self._format_tool_result(result)
19
+
20
+ tool_def = ToolDef(
21
+ name=mcp_tool.name,
22
+ description=mcp_tool.description or f"MCP tool: {mcp_tool.name}",
23
+ parameters=mcp_tool.inputSchema,
24
+ execute=wrapper
25
+ )
26
+ return tool_def
27
+
28
+ def _format_tool_result(self, result: ToolResult) -> str:
29
+ is_error, content = result.is_error, result.content
30
+ content_parts = []
31
+
32
+ if is_error:
33
+ content_parts.append("Error executing tool:")
34
+
35
+ for block in content:
36
+ match block:
37
+ case TextContent():
38
+ content_parts.append(block.text)
39
+ case ImageContent():
40
+ content_parts.append(f"[Generated Image: {block.mimeType}]")
41
+ case AudioContent():
42
+ content_parts.append(f"[Generated Audio: {block.mimeType}]")
43
+ case ResourceLink():
44
+ details = [f"Resource Reference: {block.uri}"]
45
+ if block.mimeType: details.append(f"Type: {block.mimeType}")
46
+ if block.size: details.append(f"Size: {block.size} bytes")
47
+ if block.description: details.append(f"Description: {block.description}")
48
+ content_parts.append("\n".join(details))
49
+ case EmbeddedResource():
50
+ resource = block.resource
51
+ header = f"Resource ({resource.uri}):"
52
+ if isinstance(resource, TextResourceContents):
53
+ content_parts.append(f"{header}\n{resource.text}")
54
+ elif isinstance(resource, BlobResourceContents):
55
+ content_parts.append(f"{header}\n[Binary data: {resource.mimeType}]")
56
+ case _:
57
+ logger.warning(f"Unknown tool result block type: {type(block)}")
58
+
59
+ return "\n\n".join(content_parts)
60
+
61
+ async def connect(self) -> None:
62
+ await self._client.connect()
63
+ await self.refresh_tools()
64
+
65
+ async def disconnect(self) -> None:
66
+ await self._client.disconnect()
67
+ self._tools_cache = None
68
+
69
+ async def refresh_tools(self) -> None:
70
+ mcp_tools = await self._client.list_tools()
71
+ self._tools_cache = [self._mcp_tool_to_tool_def(tool)
72
+ for tool in mcp_tools]
73
+
74
+ @property
75
+ def name(self) -> str:
76
+ return self._client.name
77
+
78
+ def get_tools(self, namespaced_tool_name: bool = True) -> list[ToolDef]:
79
+ if self._tools_cache is None:
80
+ raise RuntimeError(f"Not connected to MCP server. Call await {self.__class__.__name__}(...).connect() first")
81
+ if not namespaced_tool_name:
82
+ return list(self._tools_cache)
83
+ return [replace(tool, name=self.format_tool_name(tool.name)) for tool in self._tools_cache]
84
+
85
+
86
+ class LocalMcpToolset(McpToolset):
87
+ def __init__(self, name: str, params: LocalServerParams):
88
+ client = LocalMcpClient(name, params)
89
+ super().__init__(client)
90
+
91
+ class RemoteMcpToolset(McpToolset):
92
+ def __init__(self, name: str, params: RemoteServerParams):
93
+ client = RemoteMcpClient(name, params)
94
+ super().__init__(client)
@@ -0,0 +1,31 @@
1
+ import inspect
2
+ from typing import Any, Callable, TypeVar
3
+ from .toolset import Toolset
4
+ from ...types.tool import ToolDef
5
+
6
+ F = TypeVar("F", bound=Callable[..., Any])
7
+ TOOL_FLAG = "__is_tool__"
8
+
9
+ def python_tool(func: F) -> F:
10
+ setattr(func, TOOL_FLAG, True)
11
+ return func
12
+
13
+ class PythonToolset(Toolset):
14
+ @property
15
+ def name(self) -> str:
16
+ """
17
+ Since the usage of PythonToolset is to inherit and define methods as tools,
18
+ the name of the toolset is the name of the subclass.
19
+ """
20
+ return self.__class__.__name__
21
+
22
+ def get_tools(self, namespaced_tool_name: bool = True) -> list[ToolDef]:
23
+ result = []
24
+ for _, method in inspect.getmembers(self, predicate=inspect.ismethod):
25
+ if not getattr(method, TOOL_FLAG, False): continue
26
+ tool_def = ToolDef.from_tool_fn(method)
27
+ tool_def.name = (self.format_tool_name(tool_def.name)
28
+ if namespaced_tool_name
29
+ else tool_def.name)
30
+ result.append(tool_def)
31
+ return result
@@ -0,0 +1,13 @@
1
+ from abc import ABC, abstractmethod
2
+ from ...types.tool import ToolDef
3
+
4
+ class Toolset(ABC):
5
+ def format_tool_name(self, tool_name: str) -> str:
6
+ return f"{self.name}__{tool_name}"
7
+
8
+ @property
9
+ @abstractmethod
10
+ def name(self) -> str: ...
11
+
12
+ @abstractmethod
13
+ def get_tools(self) -> list[ToolDef]: ...
dais_sdk/tool/utils.py ADDED
@@ -0,0 +1,11 @@
1
+ from ..types.tool import ToolFn, ToolDef, ToolLike
2
+
3
+ def find_tool_by_name(tools: list[ToolLike], name: str) -> ToolLike | None:
4
+ for tool in tools:
5
+ if callable(tool) and tool.__name__ == name:
6
+ return tool
7
+ elif isinstance(tool, ToolDef) and tool.name == name:
8
+ return tool
9
+ elif isinstance(tool, dict) and tool.get("name") == name:
10
+ return tool
11
+ return None
@@ -0,0 +1,20 @@
1
+ import asyncio
2
+ import queue
3
+ from collections.abc import AsyncGenerator, Generator
4
+ from .message import AssistantMessage, ToolMessage, MessageChunk
5
+
6
+ # --- --- --- --- --- ---
7
+
8
+ GenerateTextResponse = list[AssistantMessage | ToolMessage]
9
+ FullMessageQueueSync = queue.Queue[AssistantMessage | ToolMessage | None]
10
+ FullMessageQueueAsync = asyncio.Queue[AssistantMessage | ToolMessage | None]
11
+ StreamTextResponseSync = tuple[Generator[MessageChunk], FullMessageQueueSync]
12
+ StreamTextResponseAsync = tuple[AsyncGenerator[MessageChunk], FullMessageQueueAsync]
13
+
14
+ __all__ = [
15
+ "GenerateTextResponse",
16
+ "StreamTextResponseSync",
17
+ "StreamTextResponseAsync",
18
+ "FullMessageQueueSync",
19
+ "FullMessageQueueAsync",
20
+ ]
@@ -0,0 +1,27 @@
1
+ from litellm.exceptions import (
2
+ AuthenticationError,
3
+ PermissionDeniedError,
4
+ RateLimitError,
5
+ ContextWindowExceededError,
6
+ BadRequestError,
7
+ InvalidRequestError,
8
+ InternalServerError,
9
+ ServiceUnavailableError,
10
+ ContentPolicyViolationError,
11
+ APIError,
12
+ Timeout,
13
+ )
14
+
15
+ __all__ = [
16
+ "AuthenticationError",
17
+ "PermissionDeniedError",
18
+ "RateLimitError",
19
+ "ContextWindowExceededError",
20
+ "BadRequestError",
21
+ "InvalidRequestError",
22
+ "InternalServerError",
23
+ "ServiceUnavailableError",
24
+ "ContentPolicyViolationError",
25
+ "APIError",
26
+ "Timeout",
27
+ ]
@@ -0,0 +1,211 @@
1
+ import json
2
+ import dataclasses
3
+ import uuid
4
+ from abc import ABC, abstractmethod
5
+ from typing import Any, Literal, NamedTuple, cast
6
+ from pydantic import BaseModel, ConfigDict, Field, field_validator
7
+ from litellm.types.utils import (
8
+ Message as LiteLlmMessage,
9
+ ModelResponse as LiteLlmModelResponse,
10
+ ModelResponseStream as LiteLlmModelResponseStream,
11
+ Choices as LiteLlmModelResponseChoices,
12
+ ChatCompletionAudioResponse,
13
+ ChatCompletionMessageToolCall,
14
+ ChatCompletionDeltaToolCall,
15
+ Usage as LiteLlmUsage
16
+ )
17
+ from litellm.types.llms.openai import (
18
+ AllMessageValues,
19
+ OpenAIMessageContent,
20
+ ChatCompletionAssistantToolCall,
21
+ ImageURLListItem as ChatCompletionImageURL,
22
+
23
+ ChatCompletionUserMessage,
24
+ ChatCompletionAssistantMessage,
25
+ ChatCompletionToolMessage,
26
+ ChatCompletionSystemMessage,
27
+ )
28
+ from ..logger import logger
29
+
30
+ class ChatMessage(BaseModel, ABC):
31
+ model_config = ConfigDict(
32
+ arbitrary_types_allowed=True,
33
+ validate_assignment=True,
34
+ )
35
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()))
36
+
37
+ @abstractmethod
38
+ def to_litellm_message(self) -> AllMessageValues: ...
39
+
40
+ class UserMessage(ChatMessage):
41
+ content: OpenAIMessageContent
42
+ role: Literal["user"] = "user"
43
+
44
+ def to_litellm_message(self) -> ChatCompletionUserMessage:
45
+ return ChatCompletionUserMessage(role=self.role, content=self.content)
46
+
47
+ class ToolMessage(ChatMessage):
48
+ tool_call_id: str
49
+ name: str
50
+ arguments: str
51
+ result: str | None = None
52
+ error: str | None = None
53
+ role: Literal["tool"] = "tool"
54
+ metadata: dict[str, Any] = Field(default_factory=dict)
55
+
56
+ @field_validator("result", mode="before")
57
+ def validate_result(cls, v: Any) -> Any:
58
+ if v is None: return v
59
+ if isinstance(v, str): return v
60
+ return json.dumps(v, ensure_ascii=False)
61
+
62
+ def to_litellm_message(self) -> ChatCompletionToolMessage:
63
+ if self.result is None and self.error is None:
64
+ raise ValueError(f"ToolMessage({self.id}, {self.name}) is incomplete, "
65
+ "result and error cannot be both None")
66
+
67
+ if self.error is not None:
68
+ content = json.dumps({"error": self.error}, ensure_ascii=False)
69
+ else:
70
+ assert self.result is not None
71
+ content = self.result
72
+
73
+ return ChatCompletionToolMessage(
74
+ role=self.role,
75
+ content=content,
76
+ tool_call_id=self.tool_call_id)
77
+
78
+ class AssistantMessage(ChatMessage):
79
+ content: str | None = None
80
+ reasoning_content: str | None = None
81
+ tool_calls: list[ChatCompletionAssistantToolCall] | None = None
82
+ audio: ChatCompletionAudioResponse | None = None
83
+ images: list[ChatCompletionImageURL] | None = None
84
+ usage: LiteLlmUsage | None = None
85
+ role: Literal["assistant"] = "assistant"
86
+
87
+ @classmethod
88
+ def from_litellm_message(cls, response: LiteLlmModelResponse) -> "AssistantMessage":
89
+ choices = cast(list[LiteLlmModelResponseChoices], response.choices)
90
+ message = choices[0].message
91
+
92
+ tool_calls: list[ChatCompletionAssistantToolCall] | None = None
93
+ if (message_tool_calls := message.get("tool_calls")) is not None:
94
+ tool_calls = [ChatCompletionAssistantToolCall(
95
+ id=tool_call.id,
96
+ function={
97
+ "name": tool_call.function.name,
98
+ "arguments": tool_call.function.arguments,
99
+ },
100
+ type="function",
101
+ ) for tool_call in cast(list[ChatCompletionMessageToolCall], message_tool_calls)]
102
+
103
+ return cls.model_construct(
104
+ content=message.get("content"),
105
+ reasoning_content=message.get("reasoning_content"),
106
+ tool_calls=tool_calls,
107
+ audio=message.get("audio"),
108
+ images=message.get("images"),
109
+ usage=response.get("usage"),
110
+ )
111
+
112
+ def to_litellm_message(self) -> ChatCompletionAssistantMessage:
113
+ return ChatCompletionAssistantMessage(role=self.role,
114
+ content=self.content or "",
115
+ reasoning_content=self.reasoning_content,
116
+ tool_calls=self.tool_calls)
117
+
118
+ def get_incomplete_tool_messages(self) -> list[ToolMessage] | None:
119
+ """
120
+ Get a incomplete tool message from the assistant message.
121
+ The returned tool message is incomplete,
122
+ which means it only contains the tool call id, name and arguments.
123
+ Returns None if there is no tool call in the assistant message.
124
+ """
125
+ if self.tool_calls is None: return None
126
+ results: list[ToolMessage] = []
127
+ for tool_call in self.tool_calls:
128
+ id = tool_call.get("id")
129
+ function = tool_call.get("function") # this can not be None
130
+ function_name = function.get("name")
131
+ function_arguments = function.get("arguments", "")
132
+ if (id is None or
133
+ function is None or
134
+ function_name is None):
135
+ logger.warning(f"Broken tool call: {tool_call}")
136
+ continue # broken tool call
137
+ results.append(ToolMessage(
138
+ tool_call_id=id,
139
+ name=function_name,
140
+ arguments=function_arguments,
141
+ result=None,
142
+ error=None))
143
+ return results
144
+
145
+ class SystemMessage(ChatMessage):
146
+ content: str
147
+ role: Literal["system"] = "system"
148
+
149
+ def to_litellm_message(self) -> ChatCompletionSystemMessage:
150
+ return ChatCompletionSystemMessage(role=self.role, content=self.content)
151
+
152
+ @dataclasses.dataclass
153
+ class TextChunk:
154
+ content: str
155
+
156
+ @dataclasses.dataclass
157
+ class UsageChunk:
158
+ input_tokens: int
159
+ output_tokens: int
160
+ total_tokens: int
161
+
162
+ @dataclasses.dataclass
163
+ class ReasoningChunk:
164
+ content: str
165
+
166
+ @dataclasses.dataclass
167
+ class AudioChunk:
168
+ data: ChatCompletionAudioResponse
169
+
170
+ @dataclasses.dataclass
171
+ class ImageChunk:
172
+ data: list[ChatCompletionImageURL]
173
+
174
+ @dataclasses.dataclass
175
+ class ToolCallChunk:
176
+ id: str | None
177
+ name: str | None
178
+ arguments: str
179
+ index: int
180
+
181
+ MessageChunk = TextChunk | UsageChunk | ReasoningChunk | AudioChunk | ImageChunk | ToolCallChunk
182
+
183
+ def openai_chunk_normalizer(
184
+ chunk: LiteLlmModelResponseStream
185
+ ) -> list[MessageChunk]:
186
+ if len(chunk.choices) == 0: return []
187
+
188
+ result = []
189
+ delta = chunk.choices[0].delta
190
+ if delta.get("content"):
191
+ result.append(TextChunk(cast(str, delta.content)))
192
+ if delta.get("reasoning_content"):
193
+ result.append(ReasoningChunk(cast(str, delta.reasoning_content)))
194
+ if delta.get("audio"):
195
+ result.append(AudioChunk(cast(ChatCompletionAudioResponse, delta.audio)))
196
+ if delta.get("images"):
197
+ result.append(ImageChunk(cast(list[ChatCompletionImageURL], delta.images)))
198
+ if delta.get("tool_calls"):
199
+ for tool_call in cast(list[ChatCompletionDeltaToolCall], delta.tool_calls):
200
+ result.append(ToolCallChunk(
201
+ tool_call.id,
202
+ tool_call.function.name,
203
+ tool_call.function.arguments,
204
+ tool_call.index))
205
+ if (usage := getattr(chunk, "usage", None)) is not None:
206
+ usage = cast(LiteLlmUsage, usage)
207
+ result.append(UsageChunk(
208
+ input_tokens=usage.prompt_tokens,
209
+ output_tokens=usage.completion_tokens,
210
+ total_tokens=usage.total_tokens))
211
+ return result
@@ -0,0 +1,55 @@
1
+ from dataclasses import dataclass, field
2
+ from typing import Any, Literal
3
+ from .tool import ToolLike
4
+ from .message import ChatMessage
5
+ from ..tool.toolset import Toolset
6
+ from ..tool.utils import find_tool_by_name
7
+
8
+ @dataclass
9
+ class LlmRequestParams:
10
+ model: str
11
+ messages: list[ChatMessage]
12
+ tools: list[ToolLike] | None = None
13
+ toolsets: list[Toolset] | None = None
14
+ tool_choice: Literal["auto", "required", "none"] = "auto"
15
+ execute_tools: bool = False
16
+
17
+ timeout_sec: float | None = None
18
+ temperature: float | None = None
19
+ max_tokens: int | None = None
20
+ headers: dict[str, str] | None = None
21
+
22
+ extra_args: dict[str, Any] | None = None
23
+
24
+ _extract_tools_cache: list[ToolLike] | None = field(default=None, init=False, repr=False)
25
+
26
+ def extract_tools(self) -> list[ToolLike] | None:
27
+ if self._extract_tools_cache is not None:
28
+ return self._extract_tools_cache
29
+
30
+ if self.tools is None and self.toolsets is None:
31
+ return None
32
+ tools = []
33
+ if self.tools:
34
+ tools = self.tools
35
+ if self.toolsets:
36
+ for toolset in self.toolsets:
37
+ tools.extend(toolset.get_tools())
38
+
39
+ self._extract_tools_cache = tools
40
+ return tools
41
+
42
+ def find_tool(self, tool_name: str) -> ToolLike | None:
43
+ has_tool_def = ((self.tools is not None and len(self.tools) > 0) or
44
+ (self.toolsets is not None and len(self.toolsets) > 0))
45
+ if not has_tool_def: return None
46
+
47
+ if (tools := self.extract_tools()) is None:
48
+ return None
49
+ return find_tool_by_name(tools, tool_name)
50
+
51
+ class ToolDoesNotExistError(Exception):
52
+ def __init__(self, tool_name: str):
53
+ self.tool_name = tool_name
54
+ self.message = f"Tool \"{tool_name}\" does not exist in the request params."
55
+ super().__init__(self.message)
dais_sdk/types/tool.py ADDED
@@ -0,0 +1,47 @@
1
+ import dataclasses
2
+ from collections.abc import Callable
3
+ from typing import Any, Awaitable
4
+ from ..logger import logger
5
+
6
+ ToolFn = Callable[..., Any] | Callable[..., Awaitable[Any]]
7
+
8
+ """
9
+ RawToolDef example:
10
+ {
11
+ "name": "get_current_weather",
12
+ "description": "Get the current weather in a given location",
13
+ "parameters": {
14
+ "type": "object",
15
+ "properties": {
16
+ "location": {
17
+ "type": "string",
18
+ "description": "The city and state, e.g. San Francisco, CA",
19
+ },
20
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
21
+ },
22
+ "required": ["location"],
23
+ }
24
+ }
25
+ """
26
+ RawToolDef = dict[str, Any]
27
+
28
+ @dataclasses.dataclass
29
+ class ToolDef:
30
+ name: str
31
+ description: str
32
+ execute: ToolFn
33
+ parameters: dict[str, Any] | None = None
34
+ metadata: dict[str, Any] = dataclasses.field(default_factory=dict)
35
+
36
+ @staticmethod
37
+ def from_tool_fn(tool_fn: ToolFn) -> "ToolDef":
38
+ if tool_fn.__doc__ is None:
39
+ logger.warning(f"Tool function {tool_fn.__name__} has no docstring, "
40
+ "which is recommended to be used as the tool description")
41
+ return ToolDef(
42
+ name=tool_fn.__name__,
43
+ description=tool_fn.__doc__ or "",
44
+ execute=tool_fn,
45
+ )
46
+
47
+ ToolLike = ToolDef | RawToolDef | ToolFn
@@ -0,0 +1,100 @@
1
+ Metadata-Version: 2.4
2
+ Name: dais-sdk
3
+ Version: 0.6.0
4
+ Summary: A wrapper of LiteLLM
5
+ Author-email: BHznJNs <bhznjns@outlook.com>
6
+ Requires-Python: >=3.10
7
+ Description-Content-Type: text/markdown
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Programming Language :: Python :: 3 :: Only
12
+ Classifier: Programming Language :: Python :: 3.10
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ License-File: LICENSE
16
+ Requires-Dist: litellm>=1.80.0
17
+ Requires-Dist: pydantic>=2.0.0
18
+ Requires-Dist: httpx==0.28.1
19
+ Requires-Dist: mcp==1.25.0
20
+ Requires-Dist: starlette==0.50.0
21
+ Requires-Dist: uvicorn==0.40.0
22
+ Requires-Dist: python-dotenv>=1.2.1 ; extra == "dev"
23
+ Requires-Dist: pytest-cov ; extra == "test"
24
+ Requires-Dist: pytest-mock ; extra == "test"
25
+ Requires-Dist: pytest-runner ; extra == "test"
26
+ Requires-Dist: pytest ; extra == "test"
27
+ Requires-Dist: pytest-github-actions-annotate-failures ; extra == "test"
28
+ Project-URL: Source, https://github.com/Dais-Project/Dais-SDK
29
+ Project-URL: Tracker, https://github.com/Dais-Project/Dais-SDK/issues
30
+ Provides-Extra: dev
31
+ Provides-Extra: test
32
+
33
+ # Dais-SDK
34
+
35
+ Dais-SDK is a wrapper of LiteLLM which provides a more intuitive API and [AI SDK](https://github.com/vercel/ai) like DX.
36
+
37
+ ## Installation
38
+
39
+ ```
40
+ pip install dais_sdk
41
+ ```
42
+
43
+ ## Examples
44
+
45
+ Below is a simple example of just a API call:
46
+
47
+ ```python
48
+ import os
49
+ from dotenv import load_dotenv
50
+ from dais_sdk import LLM, LlmProviders, LlmRequestParams, UserMessage
51
+
52
+ load_dotenv()
53
+
54
+ llm = LLM(provider=LlmProviders.OPENAI,
55
+ api_key=os.getenv("API_KEY", ""),
56
+ base_url=os.getenv("BASE_URL", ""))
57
+
58
+ response = llm.generate_text_sync( # sync API of generate_text
59
+ LlmRequestParams(
60
+ model="deepseek-v3.1",
61
+ messages=[UserMessage(content="Hello.")]))
62
+ print(response)
63
+ ```
64
+
65
+ Below is an example that shows the automatically tool call:
66
+
67
+ ```python
68
+ import os
69
+ from dotenv import load_dotenv
70
+ from dais_sdk import LLM, LlmProviders, LlmRequestParams, UserMessage
71
+
72
+ load_dotenv()
73
+
74
+ def example_tool():
75
+ """
76
+ This is a test tool that is used to test the tool calling functionality.
77
+ """
78
+ print("The example tool is called.")
79
+ return "Hello World"
80
+
81
+ llm = LLM(provider=LlmProviders.OPENAI,
82
+ api_key=os.getenv("API_KEY", ""),
83
+ base_url=os.getenv("BASE_URL", ""))
84
+
85
+ params = LlmRequestParams(
86
+ model="deepseek-v3.1",
87
+ tools=[example_tool],
88
+ execute_tools=True,
89
+ messages=[UserMessage(content="Please call the tool example_tool.")])
90
+
91
+ print("User: ", "Please call the tool example_tool.")
92
+ messages = llm.generate_text_sync(params)
93
+ for message in messages:
94
+ match message.role:
95
+ case "assistant":
96
+ print("Assistant: ", message.content)
97
+ case "tool":
98
+ print("Tool: ", message.result)
99
+ ```
100
+
@@ -0,0 +1,27 @@
1
+ dais_sdk/__init__.py,sha256=84ALTOJxrC_SPKPrB5d1eUhBWBoOzRD3hn-sPuk3MUc,11245
2
+ dais_sdk/debug.py,sha256=T7qIy1BeeUGlF40l9JCMMVn8pvvMJAEQeG4adQbOydA,69
3
+ dais_sdk/logger.py,sha256=99vJAQRKcu4CuHgZYAJ2zDQtGea6Bn3vJJrS-mtza7c,677
4
+ dais_sdk/param_parser.py,sha256=gXRFoCi74ZA9xdisqMPgQmWR2i6aTlPEeot78y2vyhM,1909
5
+ dais_sdk/stream.py,sha256=yu9Zvr3CUrPD9sGsjqwNXy_233Tw25Hd72kwjrraMAM,3170
6
+ dais_sdk/mcp_client/__init__.py,sha256=B86aC4nnGzwfjk7H0CZ38YlMDiEx3EIDEAgJKUnwqIU,405
7
+ dais_sdk/mcp_client/base_mcp_client.py,sha256=jWAfinzY00aL-qdNgyzYXKM-LhPHkmdqL24Uw439v-0,1055
8
+ dais_sdk/mcp_client/local_mcp_client.py,sha256=unuS-cp4zi0A2x2EYnDFzSpJUzOgVQbnEK0mLBFudy8,1871
9
+ dais_sdk/mcp_client/oauth_server.py,sha256=pELKmAjE1QoNpy3_6BPNoORwWYu0j2BYOnnVfMd0iOA,3361
10
+ dais_sdk/mcp_client/remote_mcp_client.py,sha256=JAy5zszMQeHdxsgK2seQ6kaN40kxhvnFTKSLT9uWDXU,5952
11
+ dais_sdk/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ dais_sdk/tool/execute.py,sha256=Yj4GHty0aHCsB38SI8WJ5ZA_FmqaKRRUqhfsQJR9l04,2507
13
+ dais_sdk/tool/prepare.py,sha256=5UZiQc64Ao30Gh3aHqeJGeyUq7ud9A--GUU5QxYPC0M,11572
14
+ dais_sdk/tool/utils.py,sha256=A_4Jx1BacRX1KmK3t_9rDXrmSXj6v4fzNtqLsN12S0I,420
15
+ dais_sdk/tool/toolset/__init__.py,sha256=uh8hGSl1KSn0JI45fCPJnnk31hflOI0mYxC8cdbH-OQ,309
16
+ dais_sdk/tool/toolset/mcp_toolset.py,sha256=-sivM6EUiC3V3gcISnh4u5dosf-lnwVjd7YM3L0U3Ik,4056
17
+ dais_sdk/tool/toolset/python_toolset.py,sha256=JlYw49LH9xDL6tk_82EogqxW2U71hhsygamrb-lNvcE,1071
18
+ dais_sdk/tool/toolset/toolset.py,sha256=X1xqWiWov4fboWQowB_YgJ_Tc-fIDmxbP8GreTj_7ME,322
19
+ dais_sdk/types/__init__.py,sha256=-i1MYWIlUfjQIX0xZJta6phQNL44vXPSIx1eGyIYZXc,710
20
+ dais_sdk/types/exceptions.py,sha256=hIGu06htOJxfEBAHx7KTvLQr0Y8GYnBLFJFlr_IGpDs,602
21
+ dais_sdk/types/message.py,sha256=M5ZOkpF3QDtHsAduDFqO_-8NLv5z9PJvEBvnaXUz4us,7503
22
+ dais_sdk/types/request_params.py,sha256=fWo6gF_DvaThvUEIGUkJ3O7BpoJXF5Oe9WYD0Ky9iws,1895
23
+ dais_sdk/types/tool.py,sha256=s0sPwXPl-BeijWgRxgXkXguz_quzmP92sVS2aT7n_nA,1362
24
+ dais_sdk-0.6.0.dist-info/licenses/LICENSE,sha256=cTeVgQVJJcRdm1boa2P1FBnOeXfA_egV6s4PouyrCxg,1064
25
+ dais_sdk-0.6.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
26
+ dais_sdk-0.6.0.dist-info/METADATA,sha256=FeP-GU4pFHaGpHedW95e-VN9FrF5SoqksH9MdaSciFo,2910
27
+ dais_sdk-0.6.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: flit 3.12.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 BHznJNs
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.