vercel-ai-sdk 0.0.1.dev1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,128 @@
1
+ Metadata-Version: 2.3
2
+ Name: vercel-ai-sdk
3
+ Version: 0.0.1.dev1
4
+ Summary: The AI Toolkit for Python
5
+ Author: Andrey Buzin
6
+ Author-email: Andrey Buzin <andrey.buzin@vercel.com>
7
+ Requires-Dist: anthropic>=0.40.0
8
+ Requires-Dist: httpx>=0.28.1
9
+ Requires-Dist: mcp>=1.18.0
10
+ Requires-Dist: openai>=2.14.0
11
+ Requires-Dist: pydantic>=2.12.5
12
+ Requires-Python: >=3.12
13
+ Description-Content-Type: text/markdown
14
+
15
+ # ai-sdk
16
+
17
+ A Python version of the [AI SDK](https://ai-sdk.dev/).
18
+
19
+ ## Quick Start
20
+
21
+ ```bash
22
+ uv add ai-sdk
23
+ ```
24
+
25
+ ```python
26
+ import ai_sdk as ai
27
+
28
+ llm = ai.openai.OpenAIModel(model="gpt-4", api_key="...")
29
+
30
+ async for msg in ai.execute(my_agent, llm, "Hello"):
31
+ print(msg.text)
32
+ ```
33
+
34
+ ## API Reference
35
+
36
+ ### `@ai.tool`
37
+
38
+ Decorator that turns an async function into a tool. Parameters are auto-extracted from type hints and docstrings become the tool description.
39
+
40
+ ```python
41
+ @ai.tool
42
+ async def get_weather(city: str, units: str = "celsius") -> str:
43
+ """Get current weather for a city."""
44
+ return f"72°F in {city}"
45
+ ```
46
+
47
+ ### `ai.stream_text(llm, messages, label=None)`
48
+
49
+ Streams text from the LLM without tool support. Returns a `Stream` that can be awaited or async-iterated.
50
+
51
+ ```python
52
+ result = await ai.stream_text(llm, messages)
53
+ # or iterate for real-time updates
54
+ async for msg in ai.stream_text(llm, messages):
55
+ print(msg.text_delta, end="")
56
+ ```
57
+
58
+ ### `ai.stream_loop(llm, messages, tools, label=None)`
59
+
60
+ Streams LLM responses and automatically executes tool calls in a loop until complete. This is the main function for agentic workflows.
61
+
62
+ ```python
63
+ result = await ai.stream_loop(llm, messages, tools=[get_weather])
64
+ ```
65
+
66
+ ### `ai.execute(root_fn, *args)`
67
+
68
+ Runs an agent function and yields all messages from nested streams. Use this as the top-level entry point for any agent workflow.
69
+
70
+ ```python
71
+ async def my_agent(llm, query):
72
+ return await ai.stream_loop(llm, messages, tools)
73
+
74
+ async for msg in ai.execute(my_agent, llm, "What's the weather?"):
75
+ print(msg)
76
+ ```
77
+
78
+ ### `ai.Message`
79
+
80
+ Universal message type with `role` ("user", "assistant", "system") and `parts`. Access text via `msg.text`. The `label` field tags messages for multi-agent routing.
81
+
82
+ ### `ai.TextPart`, `ai.ToolPart`, `ai.ReasoningPart`
83
+
84
+ Message parts. `TextPart` holds text content. `ToolPart` contains tool invocation details and results. `ReasoningPart` holds model reasoning/thinking output.
85
+
86
+ ## MCP Integration
87
+
88
+ ### `ai.mcp.get_http_tools(url, headers={}, tool_prefix="")`
89
+
90
+ Connects to an MCP server over HTTP and returns tools. Optional `tool_prefix` namespaces tool names.
91
+
92
+ ```python
93
+ tools = await ai.mcp.get_http_tools(
94
+ "https://mcp.example.com/mcp",
95
+ headers={"API_KEY": "..."},
96
+ tool_prefix="docs"
97
+ )
98
+ ```
99
+
100
+ ### `ai.mcp.get_stdio_tools(cmd, *args, tool_prefix="")`
101
+
102
+ Spawns an MCP server process via stdio. Useful for local MCP servers like npx packages.
103
+
104
+ ```python
105
+ tools = await ai.mcp.get_stdio_tools(
106
+ "npx", "-y", "@upstash/context7-mcp",
107
+ "--api-key", os.environ["CONTEXT7_API_KEY"],
108
+ tool_prefix="context7"
109
+ )
110
+ ```
111
+
112
+ ## Multi-Agent Example
113
+
114
+ ```python
115
+ async def multiagent(llm, query):
116
+ # Run two agents in parallel
117
+ stream1, stream2 = await asyncio.gather(
118
+ ai.stream_loop(llm, msgs1, tools=[add_one], label="agent1"),
119
+ ai.stream_loop(llm, msgs2, tools=[multiply], label="agent2"),
120
+ )
121
+
122
+ # Combine results and summarize
123
+ combined = stream1[-1].text + stream2[-1].text
124
+ return await ai.stream_text(llm, make_messages(combined), label="summarizer")
125
+
126
+ async for msg in ai.execute(multiagent, llm, "10"):
127
+ print(f"[{msg.label}] {msg.text_delta}", end="")
128
+ ```
@@ -0,0 +1,114 @@
1
+ # ai-sdk
2
+
3
+ A Python version of the [AI SDK](https://ai-sdk.dev/).
4
+
5
+ ## Quick Start
6
+
7
+ ```bash
8
+ uv add ai-sdk
9
+ ```
10
+
11
+ ```python
12
+ import ai_sdk as ai
13
+
14
+ llm = ai.openai.OpenAIModel(model="gpt-4", api_key="...")
15
+
16
+ async for msg in ai.execute(my_agent, llm, "Hello"):
17
+ print(msg.text)
18
+ ```
19
+
20
+ ## API Reference
21
+
22
+ ### `@ai.tool`
23
+
24
+ Decorator that turns an async function into a tool. Parameters are auto-extracted from type hints and docstrings become the tool description.
25
+
26
+ ```python
27
+ @ai.tool
28
+ async def get_weather(city: str, units: str = "celsius") -> str:
29
+ """Get current weather for a city."""
30
+ return f"72°F in {city}"
31
+ ```
32
+
33
+ ### `ai.stream_text(llm, messages, label=None)`
34
+
35
+ Streams text from the LLM without tool support. Returns a `Stream` that can be awaited or async-iterated.
36
+
37
+ ```python
38
+ result = await ai.stream_text(llm, messages)
39
+ # or iterate for real-time updates
40
+ async for msg in ai.stream_text(llm, messages):
41
+ print(msg.text_delta, end="")
42
+ ```
43
+
44
+ ### `ai.stream_loop(llm, messages, tools, label=None)`
45
+
46
+ Streams LLM responses and automatically executes tool calls in a loop until complete. This is the main function for agentic workflows.
47
+
48
+ ```python
49
+ result = await ai.stream_loop(llm, messages, tools=[get_weather])
50
+ ```
51
+
52
+ ### `ai.execute(root_fn, *args)`
53
+
54
+ Runs an agent function and yields all messages from nested streams. Use this as the top-level entry point for any agent workflow.
55
+
56
+ ```python
57
+ async def my_agent(llm, query):
58
+ return await ai.stream_loop(llm, messages, tools)
59
+
60
+ async for msg in ai.execute(my_agent, llm, "What's the weather?"):
61
+ print(msg)
62
+ ```
63
+
64
+ ### `ai.Message`
65
+
66
+ Universal message type with `role` ("user", "assistant", "system") and `parts`. Access text via `msg.text`. The `label` field tags messages for multi-agent routing.
67
+
68
+ ### `ai.TextPart`, `ai.ToolPart`, `ai.ReasoningPart`
69
+
70
+ Message parts. `TextPart` holds text content. `ToolPart` contains tool invocation details and results. `ReasoningPart` holds model reasoning/thinking output.
71
+
72
+ ## MCP Integration
73
+
74
+ ### `ai.mcp.get_http_tools(url, headers={}, tool_prefix="")`
75
+
76
+ Connects to an MCP server over HTTP and returns tools. Optional `tool_prefix` namespaces tool names.
77
+
78
+ ```python
79
+ tools = await ai.mcp.get_http_tools(
80
+ "https://mcp.example.com/mcp",
81
+ headers={"API_KEY": "..."},
82
+ tool_prefix="docs"
83
+ )
84
+ ```
85
+
86
+ ### `ai.mcp.get_stdio_tools(cmd, *args, tool_prefix="")`
87
+
88
+ Spawns an MCP server process via stdio. Useful for local MCP servers like npx packages.
89
+
90
+ ```python
91
+ tools = await ai.mcp.get_stdio_tools(
92
+ "npx", "-y", "@upstash/context7-mcp",
93
+ "--api-key", os.environ["CONTEXT7_API_KEY"],
94
+ tool_prefix="context7"
95
+ )
96
+ ```
97
+
98
+ ## Multi-Agent Example
99
+
100
+ ```python
101
+ async def multiagent(llm, query):
102
+ # Run two agents in parallel
103
+ stream1, stream2 = await asyncio.gather(
104
+ ai.stream_loop(llm, msgs1, tools=[add_one], label="agent1"),
105
+ ai.stream_loop(llm, msgs2, tools=[multiply], label="agent2"),
106
+ )
107
+
108
+ # Combine results and summarize
109
+ combined = stream1[-1].text + stream2[-1].text
110
+ return await ai.stream_text(llm, make_messages(combined), label="summarizer")
111
+
112
+ async for msg in ai.execute(multiagent, llm, "10"):
113
+ print(f"[{msg.label}] {msg.text_delta}", end="")
114
+ ```
@@ -0,0 +1,26 @@
1
+ [project]
2
+ name = "vercel-ai-sdk"
3
+ version = "0.0.1.dev1"
4
+ description = "The AI Toolkit for Python"
5
+ readme = "README.md"
6
+ authors = [
7
+ { name = "Andrey Buzin", email = "andrey.buzin@vercel.com" }
8
+ ]
9
+ requires-python = ">=3.12"
10
+ dependencies = [
11
+ "anthropic>=0.40.0",
12
+ "httpx>=0.28.1",
13
+ "mcp>=1.18.0",
14
+ "openai>=2.14.0",
15
+ "pydantic>=2.12.5",
16
+ ]
17
+
18
+ [build-system]
19
+ requires = ["uv_build>=0.9.21,<0.10.0"]
20
+ build-backend = "uv_build"
21
+
22
+ [dependency-groups]
23
+ dev = [
24
+ "python-dotenv>=1.2.1",
25
+ "rich>=14.2.0",
26
+ ]
@@ -0,0 +1,42 @@
1
+ from . import anthropic, mcp, openai, ai_sdk_ui
2
+
3
+ # Re-export core types for convenient access
4
+ from .core.messages import (
5
+ Message,
6
+ Part,
7
+ TextPart,
8
+ ToolPart,
9
+ ToolDelta,
10
+ ReasoningPart,
11
+ )
12
+ from .core.tools import Tool, tool
13
+ from .core.runtime import (
14
+ LanguageModel,
15
+ Stream,
16
+ execute,
17
+ stream_loop,
18
+ stream_text,
19
+ )
20
+
21
+ __all__ = [
22
+ # Types
23
+ "Message",
24
+ "Part",
25
+ "TextPart",
26
+ "ToolPart",
27
+ "ToolDelta",
28
+ "ReasoningPart",
29
+ "Tool",
30
+ "Stream",
31
+ "LanguageModel",
32
+ # Functions
33
+ "tool",
34
+ "execute",
35
+ "stream_loop",
36
+ "stream_text",
37
+ # Submodules
38
+ "anthropic",
39
+ "mcp",
40
+ "openai",
41
+ "ai_sdk_ui",
42
+ ]
@@ -0,0 +1,26 @@
1
+ from .adapter import (
2
+ # Internal → UI stream conversion
3
+ to_ui_message_stream,
4
+ to_sse_stream,
5
+ # UI → Internal message conversion
6
+ to_messages,
7
+ UIMessage,
8
+ UIMessagePart,
9
+ UITextPart,
10
+ UIReasoningPart,
11
+ UIToolInvocationPart,
12
+ # Headers for streaming responses
13
+ UI_MESSAGE_STREAM_HEADERS,
14
+ )
15
+
16
+ __all__ = [
17
+ "to_ui_message_stream",
18
+ "to_sse_stream",
19
+ "to_messages",
20
+ "UIMessage",
21
+ "UIMessagePart",
22
+ "UITextPart",
23
+ "UIReasoningPart",
24
+ "UIToolInvocationPart",
25
+ "UI_MESSAGE_STREAM_HEADERS",
26
+ ]