stirrup 0.1.3__tar.gz → 0.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {stirrup-0.1.3 → stirrup-0.1.5}/PKG-INFO +18 -15
  2. {stirrup-0.1.3 → stirrup-0.1.5}/README.md +13 -12
  3. {stirrup-0.1.3 → stirrup-0.1.5}/pyproject.toml +4 -2
  4. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/clients/__init__.py +5 -0
  5. stirrup-0.1.5/src/stirrup/clients/open_responses_client.py +434 -0
  6. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/core/agent.py +18 -2
  7. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/core/models.py +4 -2
  8. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/tools/__init__.py +1 -0
  9. stirrup-0.1.5/src/stirrup/tools/browser_use.py +591 -0
  10. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/tools/code_backends/base.py +88 -12
  11. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/tools/code_backends/docker.py +66 -0
  12. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/tools/code_backends/e2b.py +80 -0
  13. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/tools/code_backends/local.py +60 -0
  14. stirrup-0.1.5/src/stirrup/tools/finish.py +49 -0
  15. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/utils/logging.py +8 -7
  16. stirrup-0.1.3/src/stirrup/tools/finish.py +0 -23
  17. {stirrup-0.1.3 → stirrup-0.1.5}/LICENSE +0 -0
  18. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/__init__.py +0 -0
  19. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/clients/chat_completions_client.py +0 -0
  20. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/clients/litellm_client.py +0 -0
  21. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/clients/utils.py +0 -0
  22. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/constants.py +0 -0
  23. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/core/__init__.py +0 -0
  24. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/core/cache.py +0 -0
  25. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/core/exceptions.py +0 -0
  26. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/prompts/__init__.py +0 -0
  27. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/prompts/base_system_prompt.txt +0 -0
  28. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/prompts/message_summarizer.txt +0 -0
  29. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/prompts/message_summarizer_bridge.txt +0 -0
  30. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/py.typed +0 -0
  31. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/skills/__init__.py +0 -0
  32. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/skills/skills.py +0 -0
  33. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/tools/calculator.py +0 -0
  34. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/tools/code_backends/__init__.py +0 -0
  35. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/tools/mcp.py +0 -0
  36. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/tools/user_input.py +0 -0
  37. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/tools/view_image.py +0 -0
  38. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/tools/web.py +0 -0
  39. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/utils/__init__.py +0 -0
  40. {stirrup-0.1.3 → stirrup-0.1.5}/src/stirrup/utils/text.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: stirrup
3
- Version: 0.1.3
3
+ Version: 0.1.5
4
4
  Summary: The lightweight foundation for building agents
5
5
  Keywords: ai,agent,llm,openai,anthropic,tools,framework
6
6
  Author: Artificial Analysis, Inc.
@@ -47,17 +47,19 @@ Requires-Dist: pydantic>=2.0.0
47
47
  Requires-Dist: rich>=13.0.0
48
48
  Requires-Dist: tenacity>=5.0.0
49
49
  Requires-Dist: trafilatura>=1.9.0
50
- Requires-Dist: stirrup[litellm,e2b,docker,mcp] ; extra == 'all'
50
+ Requires-Dist: stirrup[litellm,e2b,docker,mcp,browser] ; extra == 'all'
51
+ Requires-Dist: browser-use>=0.11.3 ; extra == 'browser'
51
52
  Requires-Dist: docker>=7.0.0 ; extra == 'docker'
52
53
  Requires-Dist: python-dotenv>=1.0.0 ; extra == 'docker'
53
54
  Requires-Dist: e2b-code-interpreter>=2.3.0 ; extra == 'e2b'
54
55
  Requires-Dist: litellm>=1.79.3 ; extra == 'litellm'
55
56
  Requires-Dist: mcp>=1.9.0 ; extra == 'mcp'
56
57
  Requires-Python: >=3.12
57
- Project-URL: Documentation, https://stirrup.artificialanalysis.ai
58
58
  Project-URL: Homepage, https://github.com/ArtificialAnalysis/Stirrup
59
+ Project-URL: Documentation, https://stirrup.artificialanalysis.ai
59
60
  Project-URL: Repository, https://github.com/ArtificialAnalysis/Stirrup
60
61
  Provides-Extra: all
62
+ Provides-Extra: browser
61
63
  Provides-Extra: docker
62
64
  Provides-Extra: e2b
63
65
  Provides-Extra: litellm
@@ -75,32 +77,32 @@ Description-Content-Type: text/markdown
75
77
  <br>
76
78
  </div>
77
79
 
78
-
79
80
  <p align="center">
80
81
  <a href="https://pypi.python.org/pypi/stirrup"><img src="https://img.shields.io/pypi/v/stirrup" alt="PyPI version" /></a>&nbsp;<!--
81
82
  --><a href="https://github.com/ArtificialAnalysis/Stirrup/blob/main/LICENSE"><img src="https://img.shields.io/github/license/ArtificialAnalysis/Stirrup" alt="License" /></a>&nbsp;<!--
82
83
  --><a href="https://stirrup.artificialanalysis.ai"><img src="https://img.shields.io/badge/MkDocs-4F46E5?logo=materialformkdocs&logoColor=fff" alt="MkDocs" /></a>
83
84
  </p>
84
85
 
85
-
86
86
  Stirrup is a lightweight framework, or starting point template, for building agents. It differs from other agent frameworks by:
87
87
 
88
88
  - **Working with the model, not against it:** Stirrup gets out of the way and lets the model choose its own approach to completing tasks (similar to Claude Code). Many frameworks impose rigid workflows that can degrade results.
89
89
  - **Best practices and tools built-in:** We analyzed the leading agents (Claude Code, Codex, and others) to understand and incorporate best practices relating to topics like context management and foundational tools (e.g., code execution).
90
90
  - **Fully customizable:** Use Stirrup as a package or as a starting template to build your own fully customized agents.
91
91
 
92
+ > **Note:** This is the Python implementation, [StirrupJS](https://github.com/ArtificialAnalysis/StirrupJS) is the Typescript implementation.
93
+
92
94
  ## Features
93
95
 
94
- - **Essential tools built-in:**
95
- - Online search / web browsing
96
- - Code execution (local, Docker container, E2B sandbox)
97
- - MCP client
98
- - Document input and output
99
- - **Skills system:** Extend agent capabilities with modular, domain-specific instruction packages
100
- - **Flexible tool execution:** A generic `Tool` class allows easy tool definition and extension
101
- - **Context management:** Automatically summarizes conversation history when approaching context limits
102
- - **Flexible provider support:** Pre-built support for OpenAI-compatible APIs and LiteLLM, or bring your own client
103
- - **Multimodal support:** Process images, video, and audio with automatic format conversion
96
+ - 🧪 **Code execution:** Run code locally, in Docker, or in an E2B sandbox
97
+ - 🔎 **Online search / web browsing:** Search and fetch web pages
98
+ - 🔌 **MCP client support:** Connect to MCP servers and use their tools/resources
99
+ - 📄 **Document input and output:** Import files into context and produce file outputs
100
+ - 🧩 **Skills system:** Extend agents with modular, domain-specific instruction packages
101
+ - 🛠️ **Flexible tool execution:** A generic `Tool` interface allows easy tool definition
102
+ - 👤 **Human-in-the-loop:** Includes a built-in user input tool that enables human feedback or clarification during agent execution
103
+ - 🧠 **Context management:** Automatically summarizes conversation history when approaching context limits
104
+ - 🔁 **Flexible provider support:** Pre-built support for OpenAI-compatible APIs, LiteLLM, or bring your own client
105
+ - 🖼️ **Multimodal support:** Process images, video, and audio with automatic format conversion
104
106
 
105
107
  ## Installation
106
108
 
@@ -116,6 +118,7 @@ pip install 'stirrup[litellm]' # or: uv add 'stirrup[litellm]'
116
118
  pip install 'stirrup[docker]' # or: uv add 'stirrup[docker]'
117
119
  pip install 'stirrup[e2b]' # or: uv add 'stirrup[e2b]'
118
120
  pip install 'stirrup[mcp]' # or: uv add 'stirrup[mcp]'
121
+ pip install 'stirrup[browser]' # or: uv add 'stirrup[browser]'
119
122
  ```
120
123
 
121
124
  ## Quick Start
@@ -9,32 +9,32 @@
9
9
  <br>
10
10
  </div>
11
11
 
12
-
13
12
  <p align="center">
14
13
  <a href="https://pypi.python.org/pypi/stirrup"><img src="https://img.shields.io/pypi/v/stirrup" alt="PyPI version" /></a>&nbsp;<!--
15
14
  --><a href="https://github.com/ArtificialAnalysis/Stirrup/blob/main/LICENSE"><img src="https://img.shields.io/github/license/ArtificialAnalysis/Stirrup" alt="License" /></a>&nbsp;<!--
16
15
  --><a href="https://stirrup.artificialanalysis.ai"><img src="https://img.shields.io/badge/MkDocs-4F46E5?logo=materialformkdocs&logoColor=fff" alt="MkDocs" /></a>
17
16
  </p>
18
17
 
19
-
20
18
  Stirrup is a lightweight framework, or starting point template, for building agents. It differs from other agent frameworks by:
21
19
 
22
20
  - **Working with the model, not against it:** Stirrup gets out of the way and lets the model choose its own approach to completing tasks (similar to Claude Code). Many frameworks impose rigid workflows that can degrade results.
23
21
  - **Best practices and tools built-in:** We analyzed the leading agents (Claude Code, Codex, and others) to understand and incorporate best practices relating to topics like context management and foundational tools (e.g., code execution).
24
22
  - **Fully customizable:** Use Stirrup as a package or as a starting template to build your own fully customized agents.
25
23
 
24
+ > **Note:** This is the Python implementation, [StirrupJS](https://github.com/ArtificialAnalysis/StirrupJS) is the Typescript implementation.
25
+
26
26
  ## Features
27
27
 
28
- - **Essential tools built-in:**
29
- - Online search / web browsing
30
- - Code execution (local, Docker container, E2B sandbox)
31
- - MCP client
32
- - Document input and output
33
- - **Skills system:** Extend agent capabilities with modular, domain-specific instruction packages
34
- - **Flexible tool execution:** A generic `Tool` class allows easy tool definition and extension
35
- - **Context management:** Automatically summarizes conversation history when approaching context limits
36
- - **Flexible provider support:** Pre-built support for OpenAI-compatible APIs and LiteLLM, or bring your own client
37
- - **Multimodal support:** Process images, video, and audio with automatic format conversion
28
+ - 🧪 **Code execution:** Run code locally, in Docker, or in an E2B sandbox
29
+ - 🔎 **Online search / web browsing:** Search and fetch web pages
30
+ - 🔌 **MCP client support:** Connect to MCP servers and use their tools/resources
31
+ - 📄 **Document input and output:** Import files into context and produce file outputs
32
+ - 🧩 **Skills system:** Extend agents with modular, domain-specific instruction packages
33
+ - 🛠️ **Flexible tool execution:** A generic `Tool` interface allows easy tool definition
34
+ - 👤 **Human-in-the-loop:** Includes a built-in user input tool that enables human feedback or clarification during agent execution
35
+ - 🧠 **Context management:** Automatically summarizes conversation history when approaching context limits
36
+ - 🔁 **Flexible provider support:** Pre-built support for OpenAI-compatible APIs, LiteLLM, or bring your own client
37
+ - 🖼️ **Multimodal support:** Process images, video, and audio with automatic format conversion
38
38
 
39
39
  ## Installation
40
40
 
@@ -50,6 +50,7 @@ pip install 'stirrup[litellm]' # or: uv add 'stirrup[litellm]'
50
50
  pip install 'stirrup[docker]' # or: uv add 'stirrup[docker]'
51
51
  pip install 'stirrup[e2b]' # or: uv add 'stirrup[e2b]'
52
52
  pip install 'stirrup[mcp]' # or: uv add 'stirrup[mcp]'
53
+ pip install 'stirrup[browser]' # or: uv add 'stirrup[browser]'
53
54
  ```
54
55
 
55
56
  ## Quick Start
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "stirrup"
3
- version = "0.1.3"
3
+ version = "0.1.5"
4
4
  description = "The lightweight foundation for building agents"
5
5
  readme = "README.md"
6
6
  license = { file = "LICENSE" }
@@ -41,7 +41,8 @@ litellm = ["litellm>=1.79.3"]
41
41
  e2b = ["e2b-code-interpreter>=2.3.0"]
42
42
  docker = ["docker>=7.0.0", "python-dotenv>=1.0.0"]
43
43
  mcp = ["mcp>=1.9.0"]
44
- all = ["stirrup[litellm,e2b,docker,mcp]"]
44
+ browser = ["browser-use>=0.11.3"]
45
+ all = ["stirrup[litellm,e2b,docker,mcp,browser]"]
45
46
 
46
47
  [project.urls]
47
48
  Homepage = "https://github.com/ArtificialAnalysis/Stirrup"
@@ -70,6 +71,7 @@ anyio_mode = "auto"
70
71
  markers = [
71
72
  "docker: marks tests as requiring the docker package (deselect with '-m \"not docker\"')",
72
73
  "e2b: marks tests as requiring the e2b package (deselect with '-m \"not e2b\"')",
74
+ "browser: marks tests as requiring headless Chrome (deselect with '-m \"not browser\"')",
73
75
  ]
74
76
 
75
77
  [tool.ruff]
@@ -3,12 +3,17 @@
3
3
  The default client is ChatCompletionsClient, which uses the OpenAI SDK directly
4
4
  and supports any OpenAI-compatible API via the `base_url` parameter.
5
5
 
6
+ OpenResponsesClient uses the OpenAI Responses API (responses.create) for providers
7
+ that support this newer API format.
8
+
6
9
  For multi-provider support via LiteLLM, install the litellm extra:
7
10
  pip install stirrup[litellm]
8
11
  """
9
12
 
10
13
  from stirrup.clients.chat_completions_client import ChatCompletionsClient
14
+ from stirrup.clients.open_responses_client import OpenResponsesClient
11
15
 
12
16
  __all__ = [
13
17
  "ChatCompletionsClient",
18
+ "OpenResponsesClient",
14
19
  ]
@@ -0,0 +1,434 @@
1
+ """OpenAI SDK-based LLM client for the Responses API.
2
+
3
+ This client uses the official OpenAI Python SDK's responses.create() method,
4
+ supporting both OpenAI's API and any OpenAI-compatible endpoint that implements
5
+ the Responses API via the `base_url` parameter.
6
+ """
7
+
8
+ import logging
9
+ import os
10
+ from typing import Any
11
+
12
+ from openai import (
13
+ APIConnectionError,
14
+ APITimeoutError,
15
+ AsyncOpenAI,
16
+ InternalServerError,
17
+ RateLimitError,
18
+ )
19
+ from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_exponential
20
+
21
+ from stirrup.core.exceptions import ContextOverflowError
22
+ from stirrup.core.models import (
23
+ AssistantMessage,
24
+ AudioContentBlock,
25
+ ChatMessage,
26
+ Content,
27
+ EmptyParams,
28
+ ImageContentBlock,
29
+ LLMClient,
30
+ Reasoning,
31
+ SystemMessage,
32
+ TokenUsage,
33
+ Tool,
34
+ ToolCall,
35
+ ToolMessage,
36
+ UserMessage,
37
+ VideoContentBlock,
38
+ )
39
+
40
+ __all__ = [
41
+ "OpenResponsesClient",
42
+ ]
43
+
44
+ LOGGER = logging.getLogger(__name__)
45
+
46
+
47
+ def _content_to_open_responses_input(content: Content) -> list[dict[str, Any]]:
48
+ """Convert Content blocks to OpenResponses input content format.
49
+
50
+ Uses input_text for text content (vs output_text for responses).
51
+ """
52
+ if isinstance(content, str):
53
+ return [{"type": "input_text", "text": content}]
54
+
55
+ out: list[dict[str, Any]] = []
56
+ for block in content:
57
+ if isinstance(block, str):
58
+ out.append({"type": "input_text", "text": block})
59
+ elif isinstance(block, ImageContentBlock):
60
+ out.append({"type": "input_image", "image_url": block.to_base64_url()})
61
+ elif isinstance(block, AudioContentBlock):
62
+ out.append(
63
+ {
64
+ "type": "input_audio",
65
+ "input_audio": {
66
+ "data": block.to_base64_url().split(",")[1],
67
+ "format": block.extension,
68
+ },
69
+ }
70
+ )
71
+ elif isinstance(block, VideoContentBlock):
72
+ out.append({"type": "input_file", "file_data": block.to_base64_url()})
73
+ else:
74
+ raise NotImplementedError(f"Unsupported content block: {type(block)}")
75
+ return out
76
+
77
+
78
+ def _content_to_open_responses_output(content: Content) -> list[dict[str, Any]]:
79
+ """Convert Content blocks to OpenResponses output content format.
80
+
81
+ Uses output_text for assistant message content.
82
+ """
83
+ if isinstance(content, str):
84
+ return [{"type": "output_text", "text": content}]
85
+
86
+ out: list[dict[str, Any]] = []
87
+ for block in content:
88
+ if isinstance(block, str):
89
+ out.append({"type": "output_text", "text": block})
90
+ else:
91
+ raise NotImplementedError(f"Unsupported output content block: {type(block)}")
92
+ return out
93
+
94
+
95
+ def _to_open_responses_tools(tools: dict[str, Tool]) -> list[dict[str, Any]]:
96
+ """Convert Tool objects to OpenResponses function format.
97
+
98
+ OpenResponses API expects tools with name/description/parameters at top level,
99
+ not nested under a 'function' key like Chat Completions API.
100
+
101
+ Args:
102
+ tools: Dictionary mapping tool names to Tool objects.
103
+
104
+ Returns:
105
+ List of tool definitions in OpenResponses format.
106
+ """
107
+ out: list[dict[str, Any]] = []
108
+ for t in tools.values():
109
+ tool_def: dict[str, Any] = {
110
+ "type": "function",
111
+ "name": t.name,
112
+ "description": t.description,
113
+ }
114
+ if t.parameters is not EmptyParams:
115
+ tool_def["parameters"] = t.parameters.model_json_schema()
116
+ out.append(tool_def)
117
+ return out
118
+
119
+
120
+ def _to_open_responses_input(
121
+ msgs: list[ChatMessage],
122
+ ) -> tuple[str | None, list[dict[str, Any]]]:
123
+ """Convert ChatMessage list to OpenResponses (instructions, input) tuple.
124
+
125
+ SystemMessage content is extracted as the instructions parameter.
126
+ Other messages are converted to input items.
127
+
128
+ Returns:
129
+ Tuple of (instructions, input_items) where instructions is the system
130
+ message content (or None) and input_items is the list of input items.
131
+ """
132
+ instructions: str | None = None
133
+ input_items: list[dict[str, Any]] = []
134
+
135
+ for m in msgs:
136
+ if isinstance(m, SystemMessage):
137
+ # Extract system message as instructions
138
+ if isinstance(m.content, str):
139
+ instructions = m.content
140
+ else:
141
+ # Join text content blocks for instructions
142
+ instructions = "\n".join(block if isinstance(block, str) else "" for block in m.content)
143
+ elif isinstance(m, UserMessage):
144
+ input_items.append(
145
+ {
146
+ "role": "user",
147
+ "content": _content_to_open_responses_input(m.content),
148
+ }
149
+ )
150
+ elif isinstance(m, AssistantMessage):
151
+ # For assistant messages, we need to add them as response output items
152
+ # First add any text content as a message item
153
+ content_str = (
154
+ m.content
155
+ if isinstance(m.content, str)
156
+ else "\n".join(block if isinstance(block, str) else "" for block in m.content)
157
+ )
158
+ if content_str:
159
+ input_items.append(
160
+ {
161
+ "type": "message",
162
+ "role": "assistant",
163
+ "content": [{"type": "output_text", "text": content_str}],
164
+ }
165
+ )
166
+
167
+ # Add tool calls as separate function_call items
168
+ input_items.extend(
169
+ {
170
+ "type": "function_call",
171
+ "call_id": tc.tool_call_id,
172
+ "name": tc.name,
173
+ "arguments": tc.arguments,
174
+ }
175
+ for tc in m.tool_calls
176
+ )
177
+ elif isinstance(m, ToolMessage):
178
+ # Tool results are function_call_output items
179
+ content_str = m.content if isinstance(m.content, str) else str(m.content)
180
+ input_items.append(
181
+ {
182
+ "type": "function_call_output",
183
+ "call_id": m.tool_call_id,
184
+ "output": content_str,
185
+ }
186
+ )
187
+ else:
188
+ raise NotImplementedError(f"Unsupported message type: {type(m)}")
189
+
190
+ return instructions, input_items
191
+
192
+
193
+ def _get_attr(obj: Any, name: str, default: Any = None) -> Any: # noqa: ANN401
194
+ """Get attribute from object or dict, with fallback default."""
195
+ if isinstance(obj, dict):
196
+ return obj.get(name, default)
197
+ return getattr(obj, name, default)
198
+
199
+
200
+ def _parse_response_output(
201
+ output: list[Any],
202
+ ) -> tuple[str, list[ToolCall], Reasoning | None]:
203
+ """Parse response output items into content, tool_calls, and reasoning.
204
+
205
+ Args:
206
+ output: List of output items from the response.
207
+
208
+ Returns:
209
+ Tuple of (content_text, tool_calls, reasoning).
210
+ """
211
+ content_parts: list[str] = []
212
+ tool_calls: list[ToolCall] = []
213
+ reasoning: Reasoning | None = None
214
+
215
+ for item in output:
216
+ item_type = _get_attr(item, "type")
217
+
218
+ if item_type == "message":
219
+ # Extract text content from message
220
+ msg_content = _get_attr(item, "content", [])
221
+ for content_item in msg_content:
222
+ content_type = _get_attr(content_item, "type")
223
+ if content_type == "output_text":
224
+ text = _get_attr(content_item, "text", "")
225
+ content_parts.append(text)
226
+
227
+ elif item_type == "function_call":
228
+ call_id = _get_attr(item, "call_id")
229
+ name = _get_attr(item, "name")
230
+ arguments = _get_attr(item, "arguments", "")
231
+ tool_calls.append(
232
+ ToolCall(
233
+ tool_call_id=call_id,
234
+ name=name,
235
+ arguments=arguments,
236
+ )
237
+ )
238
+
239
+ elif item_type == "reasoning":
240
+ # Extract reasoning/thinking content - try multiple possible attribute names
241
+ # summary can be a list of Summary objects with .text attribute
242
+ summary = _get_attr(item, "summary")
243
+ if summary:
244
+ if isinstance(summary, list):
245
+ # Extract text from Summary objects
246
+ thinking = "\n".join(_get_attr(s, "text", "") for s in summary if _get_attr(s, "text"))
247
+ else:
248
+ thinking = str(summary)
249
+ else:
250
+ thinking = _get_attr(item, "thinking") or ""
251
+
252
+ if thinking:
253
+ reasoning = Reasoning(content=thinking)
254
+
255
+ return "\n".join(content_parts), tool_calls, reasoning
256
+
257
+
258
+ class OpenResponsesClient(LLMClient):
259
+ """OpenAI SDK-based client using the Responses API.
260
+
261
+ Uses the official OpenAI Python SDK's responses.create() method.
262
+ Supports custom base_url for OpenAI-compatible providers that implement
263
+ the Responses API.
264
+
265
+ Includes automatic retries for transient failures and token usage tracking.
266
+
267
+ Example:
268
+ >>> # Standard OpenAI usage
269
+ >>> client = OpenResponsesClient(model="gpt-4o", max_tokens=128_000)
270
+ >>>
271
+ >>> # Custom OpenAI-compatible endpoint
272
+ >>> client = OpenResponsesClient(
273
+ ... model="gpt-4o",
274
+ ... base_url="http://localhost:8000/v1",
275
+ ... api_key="your-api-key",
276
+ ... )
277
+ """
278
+
279
+ def __init__(
280
+ self,
281
+ model: str,
282
+ max_tokens: int = 64_000,
283
+ *,
284
+ base_url: str | None = None,
285
+ api_key: str | None = None,
286
+ reasoning_effort: str | None = None,
287
+ timeout: float | None = None,
288
+ max_retries: int = 2,
289
+ instructions: str | None = None,
290
+ kwargs: dict[str, Any] | None = None,
291
+ ) -> None:
292
+ """Initialize OpenAI SDK client with model configuration for Responses API.
293
+
294
+ Args:
295
+ model: Model identifier (e.g., 'gpt-4o', 'o1-preview').
296
+ max_tokens: Maximum output tokens. Defaults to 64,000.
297
+ base_url: API base URL. If None, uses OpenAI's standard URL.
298
+ Use for OpenAI-compatible providers.
299
+ api_key: API key for authentication. If None, reads from OPENROUTER_API_KEY
300
+ environment variable.
301
+ reasoning_effort: Reasoning effort level for extended thinking models
302
+ (e.g., 'low', 'medium', 'high'). Only used with o1/o3 style models.
303
+ timeout: Request timeout in seconds. If None, uses OpenAI SDK default.
304
+ max_retries: Number of retries for transient errors. Defaults to 2.
305
+ instructions: Default system-level instructions. Can be overridden by
306
+ SystemMessage in the messages list.
307
+ kwargs: Additional arguments passed to responses.create().
308
+ """
309
+ self._model = model
310
+ self._max_tokens = max_tokens
311
+ self._reasoning_effort = reasoning_effort
312
+ self._default_instructions = instructions
313
+ self._kwargs = kwargs or {}
314
+
315
+ # Initialize AsyncOpenAI client
316
+ resolved_api_key = api_key or os.environ.get("OPENAI_API_KEY")
317
+
318
+ # Strip /responses suffix if present - SDK appends it automatically
319
+ resolved_base_url = base_url
320
+ if resolved_base_url and resolved_base_url.rstrip("/").endswith("/responses"):
321
+ resolved_base_url = resolved_base_url.rstrip("/").removesuffix("/responses")
322
+
323
+ self._client = AsyncOpenAI(
324
+ api_key=resolved_api_key,
325
+ base_url=resolved_base_url,
326
+ timeout=timeout,
327
+ max_retries=max_retries,
328
+ )
329
+
330
+ @property
331
+ def max_tokens(self) -> int:
332
+ """Maximum output tokens."""
333
+ return self._max_tokens
334
+
335
+ @property
336
+ def model_slug(self) -> str:
337
+ """Model identifier."""
338
+ return self._model
339
+
340
+ @retry(
341
+ retry=retry_if_exception_type(
342
+ (
343
+ APIConnectionError,
344
+ APITimeoutError,
345
+ RateLimitError,
346
+ InternalServerError,
347
+ )
348
+ ),
349
+ stop=stop_after_attempt(3),
350
+ wait=wait_exponential(multiplier=1, min=1, max=10),
351
+ )
352
+ async def generate(
353
+ self,
354
+ messages: list[ChatMessage],
355
+ tools: dict[str, Tool],
356
+ ) -> AssistantMessage:
357
+ """Generate assistant response with optional tool calls using Responses API.
358
+
359
+ Retries up to 3 times on transient errors (connection, timeout, rate limit,
360
+ internal server errors) with exponential backoff.
361
+
362
+ Args:
363
+ messages: List of conversation messages.
364
+ tools: Dictionary mapping tool names to Tool objects.
365
+
366
+ Returns:
367
+ AssistantMessage containing the model's response, any tool calls,
368
+ and token usage statistics.
369
+
370
+ Raises:
371
+ ContextOverflowError: If the response is incomplete due to token limits.
372
+ """
373
+ # Convert messages to OpenResponses format
374
+ instructions, input_items = _to_open_responses_input(messages)
375
+
376
+ # Use provided instructions or fall back to default
377
+ final_instructions = instructions or self._default_instructions
378
+
379
+ # Build request kwargs
380
+ request_kwargs: dict[str, Any] = {
381
+ "model": self._model,
382
+ "input": input_items,
383
+ "max_output_tokens": self._max_tokens,
384
+ **self._kwargs,
385
+ }
386
+
387
+ # Add instructions if present
388
+ if final_instructions:
389
+ request_kwargs["instructions"] = final_instructions
390
+
391
+ # Add tools if provided
392
+ if tools:
393
+ request_kwargs["tools"] = _to_open_responses_tools(tools)
394
+ request_kwargs["tool_choice"] = "auto"
395
+
396
+ # Add reasoning effort if configured (for o1/o3 models)
397
+ if self._reasoning_effort:
398
+ request_kwargs["reasoning"] = {"effort": self._reasoning_effort}
399
+
400
+ # Make API call
401
+ response = await self._client.responses.create(**request_kwargs)
402
+
403
+ # Check for incomplete response (context overflow)
404
+ if response.status == "incomplete":
405
+ stop_reason = getattr(response, "incomplete_details", None)
406
+ raise ContextOverflowError(
407
+ f"Response incomplete for model {self.model_slug}: {stop_reason}. "
408
+ "Reduce max_tokens or message length and try again."
409
+ )
410
+
411
+ # Parse response output
412
+ content, tool_calls, reasoning = _parse_response_output(response.output)
413
+
414
+ # Parse token usage
415
+ usage = response.usage
416
+ input_tokens = usage.input_tokens if usage else 0
417
+ output_tokens = usage.output_tokens if usage else 0
418
+
419
+ # Handle reasoning tokens if available
420
+ reasoning_tokens = 0
421
+ if usage and hasattr(usage, "output_tokens_details") and usage.output_tokens_details:
422
+ reasoning_tokens = getattr(usage.output_tokens_details, "reasoning_tokens", 0) or 0
423
+ output_tokens = output_tokens - reasoning_tokens
424
+
425
+ return AssistantMessage(
426
+ reasoning=reasoning,
427
+ content=content,
428
+ tool_calls=tool_calls,
429
+ token_usage=TokenUsage(
430
+ input=input_tokens,
431
+ output=output_tokens,
432
+ reasoning=reasoning_tokens,
433
+ ),
434
+ )