ai-query 1.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. ai_query-1.2.0/.github/workflows/ci.yml +28 -0
  2. ai_query-1.2.0/.github/workflows/release.yml +101 -0
  3. ai_query-1.2.0/.gitignore +10 -0
  4. ai_query-1.2.0/.python-version +1 -0
  5. ai_query-1.2.0/.semversioner/0.0.1.json +10 -0
  6. ai_query-1.2.0/.semversioner/0.0.2.json +10 -0
  7. ai_query-1.2.0/.semversioner/0.0.3.json +10 -0
  8. ai_query-1.2.0/.semversioner/1.0.0.json +10 -0
  9. ai_query-1.2.0/.semversioner/1.1.0.json +10 -0
  10. ai_query-1.2.0/.semversioner/1.2.0.json +10 -0
  11. ai_query-1.2.0/CHANGELOG.md +26 -0
  12. ai_query-1.2.0/LICENSE +21 -0
  13. ai_query-1.2.0/PKG-INFO +347 -0
  14. ai_query-1.2.0/README.md +312 -0
  15. ai_query-1.2.0/ai_query/__init__.py +632 -0
  16. ai_query-1.2.0/ai_query/mcp/__init__.py +69 -0
  17. ai_query-1.2.0/ai_query/mcp/client.py +202 -0
  18. ai_query-1.2.0/ai_query/mcp/transports.py +201 -0
  19. ai_query-1.2.0/ai_query/mcp/types.py +55 -0
  20. ai_query-1.2.0/ai_query/mcp/utils.py +38 -0
  21. ai_query-1.2.0/ai_query/model.py +23 -0
  22. ai_query-1.2.0/ai_query/providers/__init__.py +19 -0
  23. ai_query-1.2.0/ai_query/providers/anthropic.py +478 -0
  24. ai_query-1.2.0/ai_query/providers/base.py +160 -0
  25. ai_query-1.2.0/ai_query/providers/google.py +500 -0
  26. ai_query-1.2.0/ai_query/providers/openai.py +470 -0
  27. ai_query-1.2.0/ai_query/types.py +662 -0
  28. ai_query-1.2.0/docs/LICENSE +21 -0
  29. ai_query-1.2.0/docs/README.md +43 -0
  30. ai_query-1.2.0/docs/api-reference/generate-text.mdx +160 -0
  31. ai_query-1.2.0/docs/api-reference/mcp.mdx +191 -0
  32. ai_query-1.2.0/docs/api-reference/providers/anthropic.mdx +69 -0
  33. ai_query-1.2.0/docs/api-reference/providers/google.mdx +72 -0
  34. ai_query-1.2.0/docs/api-reference/providers/openai.mdx +71 -0
  35. ai_query-1.2.0/docs/api-reference/stream-text.mdx +170 -0
  36. ai_query-1.2.0/docs/api-reference/types/message.mdx +107 -0
  37. ai_query-1.2.0/docs/api-reference/types/results.mdx +292 -0
  38. ai_query-1.2.0/docs/api-reference/types/stop-conditions.mdx +181 -0
  39. ai_query-1.2.0/docs/api-reference/types/tool.mdx +151 -0
  40. ai_query-1.2.0/docs/api-reference/types/usage.mdx +103 -0
  41. ai_query-1.2.0/docs/cookbook/chatbot.mdx +226 -0
  42. ai_query-1.2.0/docs/cookbook/code-executor.mdx +202 -0
  43. ai_query-1.2.0/docs/cookbook/index.mdx +31 -0
  44. ai_query-1.2.0/docs/cookbook/multimodal-chatbot.mdx +257 -0
  45. ai_query-1.2.0/docs/cookbook/task-planner.mdx +185 -0
  46. ai_query-1.2.0/docs/cookbook/wikipedia-agent.mdx +149 -0
  47. ai_query-1.2.0/docs/core/agents.mdx +221 -0
  48. ai_query-1.2.0/docs/core/conversations.mdx +172 -0
  49. ai_query-1.2.0/docs/core/generate-text.mdx +134 -0
  50. ai_query-1.2.0/docs/core/mcp.mdx +294 -0
  51. ai_query-1.2.0/docs/core/streaming.mdx +160 -0
  52. ai_query-1.2.0/docs/core/tools.mdx +180 -0
  53. ai_query-1.2.0/docs/docs.json +144 -0
  54. ai_query-1.2.0/docs/favicon.svg +1 -0
  55. ai_query-1.2.0/docs/images/checks-passed.png +0 -0
  56. ai_query-1.2.0/docs/images/hero-dark.png +0 -0
  57. ai_query-1.2.0/docs/images/hero-light.png +0 -0
  58. ai_query-1.2.0/docs/index.mdx +108 -0
  59. ai_query-1.2.0/docs/logo/dark.svg +1 -0
  60. ai_query-1.2.0/docs/logo/light.svg +1 -0
  61. ai_query-1.2.0/docs/package-lock.json +6 -0
  62. ai_query-1.2.0/docs/providers/anthropic.mdx +113 -0
  63. ai_query-1.2.0/docs/providers/google.mdx +121 -0
  64. ai_query-1.2.0/docs/providers/openai.mdx +99 -0
  65. ai_query-1.2.0/docs/providers.mdx +145 -0
  66. ai_query-1.2.0/docs/quickstart.mdx +147 -0
  67. ai_query-1.2.0/examples/README.md +94 -0
  68. ai_query-1.2.0/examples/code_executor.py +149 -0
  69. ai_query-1.2.0/examples/country_explorer.py +162 -0
  70. ai_query-1.2.0/examples/hackernews_agent.py +167 -0
  71. ai_query-1.2.0/examples/multi_provider.py +119 -0
  72. ai_query-1.2.0/examples/task_planner.py +143 -0
  73. ai_query-1.2.0/examples/unit_converter.py +176 -0
  74. ai_query-1.2.0/examples/wikipedia_agent.py +126 -0
  75. ai_query-1.2.0/main.py +72 -0
  76. ai_query-1.2.0/pyproject.toml +29 -0
  77. ai_query-1.2.0/release_notes.md +1 -0
  78. ai_query-1.2.0/tests/conftest.py +473 -0
  79. ai_query-1.2.0/tests/test_generate_text.py +619 -0
  80. ai_query-1.2.0/tests/test_mcp_integration.py +91 -0
  81. ai_query-1.2.0/tests/test_mcp_server.py +199 -0
  82. ai_query-1.2.0/tests/test_providers.py +428 -0
  83. ai_query-1.2.0/tests/test_stream_text.py +555 -0
  84. ai_query-1.2.0/tests/test_types.py +595 -0
  85. ai_query-1.2.0/uv.lock +1113 -0
@@ -0,0 +1,28 @@
1
+ name: CI
2
+
3
+ on:
4
+ pull_request:
5
+ branches: [ main ]
6
+
7
+
8
+ jobs:
9
+ build:
10
+ environment: test
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v4
14
+
15
+ - name: Install uv
16
+ uses: astral-sh/setup-uv@v5
17
+
18
+ - name: Set up Python
19
+ uses: actions/setup-python@v5
20
+ with:
21
+ python-version-file: ".python-version"
22
+
23
+ - name: Install dependencies
24
+ run: uv sync --all-extras --dev
25
+ - name: Run tests
26
+ run: uv run pytest
27
+ - name: Build package
28
+ run: uv build
@@ -0,0 +1,101 @@
1
+ name: Release
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ permissions:
9
+ contents: write
10
+ id-token: write
11
+
12
+ jobs:
13
+ release:
14
+ environment: release
15
+ runs-on: ubuntu-latest
16
+ concurrency: release
17
+
18
+ steps:
19
+ - uses: actions/checkout@v4
20
+ with:
21
+ fetch-depth: 0
22
+ token: ${{ secrets.GITHUB_TOKEN }}
23
+
24
+ - name: Install uv
25
+ uses: astral-sh/setup-uv@v5
26
+
27
+ - name: Set up Python
28
+ uses: actions/setup-python@v5
29
+ with:
30
+ python-version-file: ".python-version"
31
+
32
+ - name: Configure Git
33
+ run: |
34
+ git config --global user.name "github-actions[bot]"
35
+ git config --global user.email "github-actions[bot]@users.noreply.github.com"
36
+
37
+ - name: Install dependencies
38
+ run: uv sync --all-extras --dev
39
+
40
+ - name: Check for changes
41
+ id: check_changes
42
+ run: |
43
+ if [ -z "$(ls -A .semversioner/next-release/*.json 2>/dev/null)" ]; then
44
+ echo "No changesets found, skipping release."
45
+ echo "has_changes=false" >> $GITHUB_OUTPUT
46
+ else
47
+ echo "has_changes=true" >> $GITHUB_OUTPUT
48
+ fi
49
+
50
+ - name: Run Semversioner Release
51
+ if: steps.check_changes.outputs.has_changes == 'true'
52
+ id: semversioner
53
+ run: |
54
+ # Run release to update changelog and remove changesets
55
+ uv run semversioner release
56
+
57
+ # Generate full changelog
58
+ uv run semversioner changelog > CHANGELOG.md
59
+
60
+ # Get the new version
61
+ NEW_VERSION=$(uv run semversioner current-version)
62
+ echo "New version: $NEW_VERSION"
63
+ echo "version=$NEW_VERSION" >> $GITHUB_OUTPUT
64
+
65
+ # Update pyproject.toml
66
+ # Assuming version is in the [project] section as version = "x.y.z"
67
+ sed -i "s/^version = \".*\"/version = \"$NEW_VERSION\"/" pyproject.toml
68
+
69
+ - name: Commit and Push
70
+ if: steps.check_changes.outputs.has_changes == 'true'
71
+ run: |
72
+ git add .
73
+ git commit -m "chore: release ${{ steps.semversioner.outputs.version }}"
74
+ git tag -a "v${{ steps.semversioner.outputs.version }}" -m "Release ${{ steps.semversioner.outputs.version }}"
75
+ git push origin main --follow-tags
76
+
77
+ - name: Create GitHub Release
78
+ if: steps.check_changes.outputs.has_changes == 'true'
79
+ env:
80
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
81
+ run: |
82
+ VERSION="${{ steps.semversioner.outputs.version }}"
83
+
84
+ # Extract release notes for the current version
85
+ # Using python to parse the markdown is more robust than sed
86
+ python -c "import sys, re; content=open('CHANGELOG.md').read(); match = re.search(r'## ' + re.escape('$VERSION') + r'\n(.*?)(?=\n## |\Z)', content, re.DOTALL); print(match.group(1).strip() if match else 'See CHANGELOG.md for details')" > release_notes.md
87
+
88
+ gh release create "v$VERSION" \
89
+ --title "ai-query@$VERSION" \
90
+ --notes-file release_notes.md \
91
+ --generate-notes
92
+
93
+ - name: Build
94
+ if: steps.check_changes.outputs.has_changes == 'true'
95
+ run: uv build
96
+
97
+ - name: Publish to PyPI
98
+ if: steps.check_changes.outputs.has_changes == 'true'
99
+ uses: pypa/gh-action-pypi-publish@release/v1
100
+ with:
101
+ password: ${{ secrets.PYPI_API_TOKEN }}
@@ -0,0 +1,10 @@
1
+ # Python-generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # Virtual environments
10
+ .venv
@@ -0,0 +1 @@
1
+ 3.13
@@ -0,0 +1,10 @@
1
+ {
2
+ "changes": [
3
+ {
4
+ "description": "Refactor providers to use base class fetching and fix OpenAI image fetching",
5
+ "type": "patch"
6
+ }
7
+ ],
8
+ "created_at": "2026-01-06T10:30:46+00:00",
9
+ "version": "0.0.1"
10
+ }
@@ -0,0 +1,10 @@
1
+ {
2
+ "changes": [
3
+ {
4
+ "description": "docs: add comprehensive usage guide and provider configuration details",
5
+ "type": "patch"
6
+ }
7
+ ],
8
+ "created_at": "2026-01-06T11:03:08+00:00",
9
+ "version": "0.0.2"
10
+ }
@@ -0,0 +1,10 @@
1
+ {
2
+ "changes": [
3
+ {
4
+ "description": "docs: added project urls",
5
+ "type": "patch"
6
+ }
7
+ ],
8
+ "created_at": "2026-01-06T11:21:20+00:00",
9
+ "version": "0.0.3"
10
+ }
@@ -0,0 +1,10 @@
1
+ {
2
+ "changes": [
3
+ {
4
+ "description": "Added tool calling support",
5
+ "type": "major"
6
+ }
7
+ ],
8
+ "created_at": "2026-01-06T17:04:07+00:00",
9
+ "version": "1.0.0"
10
+ }
@@ -0,0 +1,10 @@
1
+ {
2
+ "changes": [
3
+ {
4
+ "description": "Refactor tool definition to use `@tool` decorator and `Field` for parameters, updating examples and adding new tests and agent",
5
+ "type": "minor"
6
+ }
7
+ ],
8
+ "created_at": "2026-01-06T17:43:42+00:00",
9
+ "version": "1.1.0"
10
+ }
@@ -0,0 +1,10 @@
1
+ {
2
+ "changes": [
3
+ {
4
+ "description": "Experimental MCP support, with stdio, sse and streamable http transport",
5
+ "type": "minor"
6
+ }
7
+ ],
8
+ "created_at": "2026-01-08T12:40:17+00:00",
9
+ "version": "1.2.0"
10
+ }
@@ -0,0 +1,26 @@
1
+ # Changelog
2
+ Note: version releases in the 0.x.y range may introduce breaking changes.
3
+
4
+ ## 1.2.0
5
+
6
+ - minor: Experimental MCP support, with stdio, sse and streamable http transport
7
+
8
+ ## 1.1.0
9
+
10
+ - minor: Refactor tool definition to use `@tool` decorator and `Field` for parameters, updating examples and adding new tests and agent
11
+
12
+ ## 1.0.0
13
+
14
+ - major: Added tool calling support
15
+
16
+ ## 0.0.3
17
+
18
+ - patch: docs: added project urls
19
+
20
+ ## 0.0.2
21
+
22
+ - patch: docs: add comprehensive usage guide and provider configuration details
23
+
24
+ ## 0.0.1
25
+
26
+ - patch: Refactor providers to use base class fetching and fix OpenAI image fetching
ai_query-1.2.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Abdulmumin Yaqeen
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,347 @@
1
+ Metadata-Version: 2.4
2
+ Name: ai-query
3
+ Version: 1.2.0
4
+ Summary: A unified Python SDK for querying AI models from multiple providers
5
+ Project-URL: Homepage, https://github.com/Abdulmumin1/ai-query
6
+ Project-URL: Repository, https://github.com/Abdulmumin1/ai-query
7
+ License: MIT License
8
+
9
+ Copyright (c) 2026 Abdulmumin Yaqeen
10
+
11
+ Permission is hereby granted, free of charge, to any person obtaining a copy
12
+ of this software and associated documentation files (the "Software"), to deal
13
+ in the Software without restriction, including without limitation the rights
14
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15
+ copies of the Software, and to permit persons to whom the Software is
16
+ furnished to do so, subject to the following conditions:
17
+
18
+ The above copyright notice and this permission notice shall be included in all
19
+ copies or substantial portions of the Software.
20
+
21
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27
+ SOFTWARE.
28
+ License-File: LICENSE
29
+ Requires-Python: >=3.13
30
+ Requires-Dist: aiohttp>=3.9.0
31
+ Requires-Dist: mcp>=1.25.0
32
+ Provides-Extra: mcp
33
+ Requires-Dist: mcp>=1.0.0; extra == 'mcp'
34
+ Description-Content-Type: text/markdown
35
+
36
+ # ai-query
37
+
38
+ A unified Python SDK for querying AI models from multiple providers with a consistent interface.
39
+
40
+ ## Installation
41
+
42
+ ```bash
43
+ uv add ai-query
44
+ # or
45
+ pip install ai-query
46
+ ```
47
+
48
+ For MCP (Model Context Protocol) support:
49
+
50
+ ```bash
51
+ uv add ai-query[mcp]
52
+ # or
53
+ pip install ai-query[mcp]
54
+ ```
55
+
56
+ ## Quick Start
57
+
58
+ ```python
59
+ import asyncio
60
+ from ai_query import generate_text, openai
61
+
62
+ async def main():
63
+ result = await generate_text(
64
+ model=openai("gpt-4o"),
65
+ prompt="What is the capital of France?"
66
+ )
67
+ print(result.text)
68
+
69
+ asyncio.run(main())
70
+ ```
71
+
72
+ ## Streaming
73
+
74
+ ```python
75
+ from ai_query import stream_text, google
76
+
77
+ async def main():
78
+ result = stream_text(
79
+ model=google("gemini-2.0-flash"),
80
+ prompt="Write a short story."
81
+ )
82
+
83
+ async for chunk in result.text_stream:
84
+ print(chunk, end="", flush=True)
85
+
86
+ usage = await result.usage
87
+ print(f"\nTokens: {usage.total_tokens}")
88
+ ```
89
+
90
+ ## Tool Calling
91
+
92
+ Define tools and let the model use them automatically. The library handles the execution loop. Tools can be defined using the `@tool` decorator with type hints and the `Field` class for descriptions.
93
+
94
+ ```python
95
+ from ai_query import generate_text, google, tool, Field, step_count_is
96
+
97
+ # Define tools using decorators
98
+ @tool(description="Get the current weather for a location")
99
+ async def get_weather(
100
+ location: str = Field(description="City name")
101
+ ) -> str:
102
+ # Function implementation
103
+ return f"Weather in {location}: 72°F, Sunny"
104
+
105
+ @tool(description="Perform math calculations")
106
+ def calculate(
107
+ expression: str = Field(description="Math expression")
108
+ ) -> str:
109
+ return str(eval(expression))
110
+
111
+ async def main():
112
+ result = await generate_text(
113
+ model=google("gemini-2.0-flash"),
114
+ prompt="What's the weather in Paris? Also, what is 25 * 4?",
115
+ tools={
116
+ "weather": get_weather,
117
+ "calculator": calculate
118
+ },
119
+ stop_when=step_count_is(5), # Max 5 model calls
120
+ )
121
+ print(result.text)
122
+ print(f"Steps: {len(result.response['steps'])}")
123
+ ```
124
+
125
+ ### Stop Conditions
126
+
127
+ Control when the tool execution loop stops:
128
+
129
+ ```python
130
+ from ai_query import step_count_is, has_tool_call
131
+
132
+ # Stop after N model calls
133
+ stop_when=step_count_is(5)
134
+
135
+ # Stop when a specific tool is called
136
+ stop_when=has_tool_call("final_answer")
137
+
138
+ # Multiple conditions (stops when any is true)
139
+ stop_when=[step_count_is(10), has_tool_call("done")]
140
+ ```
141
+
142
+ ## MCP (Model Context Protocol) Support
143
+
144
+ ai-query supports [MCP](https://modelcontextprotocol.io/) - a standard for AI tool integration. Connect to any MCP server and use its tools seamlessly.
145
+
146
+ ### Transports
147
+
148
+ | Transport | Function | Use Case |
149
+ |-----------|----------|----------|
150
+ | **stdio** | `mcp()` | Local servers (python, node, npx) |
151
+ | **SSE** | `mcp_sse()` | Remote servers (legacy) |
152
+ | **Streamable HTTP** | `mcp_http()` | Remote servers (recommended) |
153
+
154
+ ### Local MCP Server (stdio)
155
+
156
+ ```python
157
+ from ai_query import generate_text, google, mcp
158
+
159
+ async def main():
160
+ # Connect to a local Python MCP server
161
+ async with mcp("python", "my_server.py") as server:
162
+ print(f"Available tools: {list(server.tools.keys())}")
163
+
164
+ result = await generate_text(
165
+ model=google("gemini-2.0-flash"),
166
+ prompt="Calculate 25 * 4",
167
+ tools=server.tools,
168
+ )
169
+ print(result.text)
170
+ ```
171
+
172
+ ### Using npx for npm MCP packages
173
+
174
+ ```python
175
+ from ai_query import generate_text, openai, mcp
176
+
177
+ async with mcp("npx", "-y", "@modelcontextprotocol/server-fetch") as server:
178
+ result = await generate_text(
179
+ model=openai("gpt-4o"),
180
+ prompt="Fetch and summarize https://example.com",
181
+ tools=server.tools,
182
+ )
183
+ ```
184
+
185
+ ### Remote MCP Server (SSE)
186
+
187
+ ```python
188
+ from ai_query import generate_text, openai, mcp_sse
189
+
190
+ async with mcp_sse("http://localhost:8000/sse") as server:
191
+ result = await generate_text(
192
+ model=openai("gpt-4o"),
193
+ prompt="Hello!",
194
+ tools=server.tools,
195
+ )
196
+
197
+ # With authentication
198
+ async with mcp_sse(
199
+ "https://api.example.com/mcp/sse",
200
+ headers={"Authorization": "Bearer token123"}
201
+ ) as server:
202
+ ...
203
+ ```
204
+
205
+ ### Remote MCP Server (Streamable HTTP)
206
+
207
+ ```python
208
+ from ai_query import generate_text, openai, mcp_http
209
+
210
+ async with mcp_http("http://localhost:8000/mcp") as server:
211
+ result = await generate_text(
212
+ model=openai("gpt-4o"),
213
+ prompt="Hello!",
214
+ tools=server.tools,
215
+ )
216
+ ```
217
+
218
+ ### Combining Multiple Tool Sources
219
+
220
+ Use `merge_tools` to combine tools from multiple MCP servers or mix with local tools:
221
+
222
+ ```python
223
+ from ai_query import generate_text, openai, mcp, merge_tools, tool, Field
224
+
225
+ @tool(description="Calculate math expressions")
226
+ def calculate(expr: str = Field(description="Expression")) -> str:
227
+ return str(eval(expr))
228
+
229
+ async with mcp("python", "weather_server.py") as weather:
230
+ async with mcp("python", "search_server.py") as search:
231
+ all_tools = merge_tools(
232
+ {"calculator": calculate}, # Local tool
233
+ weather, # MCP server
234
+ search, # Another MCP server
235
+ )
236
+
237
+ result = await generate_text(
238
+ model=openai("gpt-4o"),
239
+ prompt="What's the weather in Tokyo, search for news, and calculate 100/4",
240
+ tools=all_tools,
241
+ )
242
+ ```
243
+
244
+ ### Manual Connection Management
245
+
246
+ For long-lived connections, use `connect_mcp`, `connect_mcp_sse`, or `connect_mcp_http`:
247
+
248
+ ```python
249
+ from ai_query import connect_mcp
250
+
251
+ server = await connect_mcp("python", "server.py")
252
+ try:
253
+ # Use server.tools for multiple requests...
254
+ result = await generate_text(...)
255
+ finally:
256
+ await server.close()
257
+ ```
258
+
259
+ ## Step Callbacks
260
+
261
+ Monitor and react to each step in the execution loop with `on_step_start` and `on_step_finish`.
262
+
263
+ ```python
264
+ from ai_query import generate_text, google, StepStartEvent, StepFinishEvent
265
+
266
+ def on_start(event: StepStartEvent):
267
+ print(f"Step {event.step_number} starting...")
268
+ print(f" Messages: {len(event.messages)}")
269
+ # event.messages can be modified before the model call
270
+
271
+ def on_finish(event: StepFinishEvent):
272
+ print(f"Step {event.step_number} finished")
273
+
274
+ # Current step details
275
+ if event.step.tool_calls:
276
+ for tc in event.step.tool_calls:
277
+ print(f" Called: {tc.name}({tc.arguments})")
278
+ if event.step.tool_results:
279
+ for tr in event.step.tool_results:
280
+ print(f" Result: {tr.result}")
281
+
282
+ # Accumulated state
283
+ print(f" Total tokens: {event.usage.total_tokens}")
284
+ print(f" Text so far: {event.text[:50]}...")
285
+
286
+ result = await generate_text(
287
+ model=google("gemini-2.0-flash"),
288
+ prompt="What's the weather in Tokyo?",
289
+ tools={"weather": get_weather},
290
+ on_step_start=on_start,
291
+ on_step_finish=on_finish,
292
+ )
293
+ ```
294
+
295
+ ### StepStartEvent
296
+
297
+ | Field | Type | Description |
298
+ |-------|------|-------------|
299
+ | `step_number` | `int` | 1-indexed step number |
300
+ | `messages` | `list[Message]` | Conversation history (modifiable) |
301
+ | `tools` | `ToolSet \| None` | Available tools |
302
+
303
+ ### StepFinishEvent
304
+
305
+ | Field | Type | Description |
306
+ |-------|------|-------------|
307
+ | `step_number` | `int` | 1-indexed step number |
308
+ | `step` | `StepResult` | Current step (text, tool_calls, tool_results) |
309
+ | `text` | `str` | Accumulated text from all steps |
310
+ | `usage` | `Usage` | Accumulated token usage |
311
+ | `steps` | `list[StepResult]` | All completed steps |
312
+
313
+ Both callbacks support sync and async functions.
314
+
315
+ ## Providers
316
+
317
+ Built-in support for:
318
+
319
+ - **OpenAI**: `openai("gpt-4o")` - uses `OPENAI_API_KEY`
320
+ - **Anthropic**: `anthropic("claude-sonnet-4-20250514")` - uses `ANTHROPIC_API_KEY`
321
+ - **Google**: `google("gemini-2.0-flash")` - uses `GOOGLE_API_KEY`
322
+
323
+ Pass API keys directly if needed:
324
+
325
+ ```python
326
+ model = google("gemini-2.0-flash", api_key="your_key")
327
+ ```
328
+
329
+ ## Provider Options
330
+
331
+ Pass provider-specific parameters:
332
+
333
+ ```python
334
+ result = await generate_text(
335
+ model=google("gemini-2.0-flash"),
336
+ prompt="Tell me a story",
337
+ provider_options={
338
+ "google": {
339
+ "safety_settings": {"HARM_CATEGORY_VIOLENCE": "BLOCK_NONE"}
340
+ }
341
+ }
342
+ )
343
+ ```
344
+
345
+ ## Examples
346
+
347
+ See the [examples/](examples/) folder for agent implementations.