stirrup 0.1.2__tar.gz → 0.1.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {stirrup-0.1.2 → stirrup-0.1.4}/PKG-INFO +16 -13
- {stirrup-0.1.2 → stirrup-0.1.4}/README.md +11 -10
- {stirrup-0.1.2 → stirrup-0.1.4}/pyproject.toml +4 -2
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/__init__.py +2 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/clients/__init__.py +5 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/clients/chat_completions_client.py +0 -3
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/clients/litellm_client.py +20 -11
- stirrup-0.1.4/src/stirrup/clients/open_responses_client.py +434 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/clients/utils.py +6 -1
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/constants.py +6 -2
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/core/agent.py +196 -57
- stirrup-0.1.4/src/stirrup/core/cache.py +479 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/core/models.py +53 -9
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/prompts/base_system_prompt.txt +1 -1
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/tools/__init__.py +3 -0
- stirrup-0.1.4/src/stirrup/tools/browser_use.py +591 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/tools/calculator.py +1 -1
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/tools/code_backends/base.py +24 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/tools/code_backends/docker.py +19 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/tools/code_backends/e2b.py +43 -11
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/tools/code_backends/local.py +19 -2
- stirrup-0.1.4/src/stirrup/tools/finish.py +49 -0
- stirrup-0.1.4/src/stirrup/tools/user_input.py +130 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/tools/web.py +1 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/utils/logging.py +32 -7
- stirrup-0.1.2/src/stirrup/tools/finish.py +0 -23
- {stirrup-0.1.2 → stirrup-0.1.4}/LICENSE +0 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/core/__init__.py +0 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/core/exceptions.py +0 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/prompts/__init__.py +0 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/prompts/message_summarizer.txt +0 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/prompts/message_summarizer_bridge.txt +0 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/py.typed +0 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/skills/__init__.py +0 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/skills/skills.py +0 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/tools/code_backends/__init__.py +0 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/tools/mcp.py +0 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/tools/view_image.py +0 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/utils/__init__.py +0 -0
- {stirrup-0.1.2 → stirrup-0.1.4}/src/stirrup/utils/text.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: stirrup
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.4
|
|
4
4
|
Summary: The lightweight foundation for building agents
|
|
5
5
|
Keywords: ai,agent,llm,openai,anthropic,tools,framework
|
|
6
6
|
Author: Artificial Analysis, Inc.
|
|
@@ -47,17 +47,19 @@ Requires-Dist: pydantic>=2.0.0
|
|
|
47
47
|
Requires-Dist: rich>=13.0.0
|
|
48
48
|
Requires-Dist: tenacity>=5.0.0
|
|
49
49
|
Requires-Dist: trafilatura>=1.9.0
|
|
50
|
-
Requires-Dist: stirrup[litellm,e2b,docker,mcp] ; extra == 'all'
|
|
50
|
+
Requires-Dist: stirrup[litellm,e2b,docker,mcp,browser] ; extra == 'all'
|
|
51
|
+
Requires-Dist: browser-use>=0.11.3 ; extra == 'browser'
|
|
51
52
|
Requires-Dist: docker>=7.0.0 ; extra == 'docker'
|
|
52
53
|
Requires-Dist: python-dotenv>=1.0.0 ; extra == 'docker'
|
|
53
54
|
Requires-Dist: e2b-code-interpreter>=2.3.0 ; extra == 'e2b'
|
|
54
55
|
Requires-Dist: litellm>=1.79.3 ; extra == 'litellm'
|
|
55
56
|
Requires-Dist: mcp>=1.9.0 ; extra == 'mcp'
|
|
56
57
|
Requires-Python: >=3.12
|
|
57
|
-
Project-URL: Documentation, https://stirrup.artificialanalysis.ai
|
|
58
58
|
Project-URL: Homepage, https://github.com/ArtificialAnalysis/Stirrup
|
|
59
|
+
Project-URL: Documentation, https://stirrup.artificialanalysis.ai
|
|
59
60
|
Project-URL: Repository, https://github.com/ArtificialAnalysis/Stirrup
|
|
60
61
|
Provides-Extra: all
|
|
62
|
+
Provides-Extra: browser
|
|
61
63
|
Provides-Extra: docker
|
|
62
64
|
Provides-Extra: e2b
|
|
63
65
|
Provides-Extra: litellm
|
|
@@ -91,16 +93,16 @@ Stirrup is a lightweight framework, or starting point template, for building age
|
|
|
91
93
|
|
|
92
94
|
## Features
|
|
93
95
|
|
|
94
|
-
- **
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
- **
|
|
100
|
-
- **
|
|
101
|
-
- **Context management:** Automatically summarizes conversation history when approaching context limits
|
|
102
|
-
- **Flexible provider support:** Pre-built support for OpenAI-compatible APIs
|
|
103
|
-
- **Multimodal support:** Process images, video, and audio with automatic format conversion
|
|
96
|
+
- 🧪 **Code execution:** Run code locally, in Docker, or in an E2B sandbox
|
|
97
|
+
- 🔎 **Online search / web browsing:** Search and fetch web pages
|
|
98
|
+
- 🔌 **MCP client support:** Connect to MCP servers and use their tools/resources
|
|
99
|
+
- 📄 **Document input and output:** Import files into context and produce file outputs
|
|
100
|
+
- 🧩 **Skills system:** Extend agents with modular, domain-specific instruction packages
|
|
101
|
+
- 🛠️ **Flexible tool execution:** A generic `Tool` interface allows easy tool definition
|
|
102
|
+
- 👤 **Human-in-the-loop:** Includes a built-in user input tool that enables human feedback or clarification during agent execution
|
|
103
|
+
- 🧠 **Context management:** Automatically summarizes conversation history when approaching context limits
|
|
104
|
+
- 🔁 **Flexible provider support:** Pre-built support for OpenAI-compatible APIs, LiteLLM, or bring your own client
|
|
105
|
+
- 🖼️ **Multimodal support:** Process images, video, and audio with automatic format conversion
|
|
104
106
|
|
|
105
107
|
## Installation
|
|
106
108
|
|
|
@@ -116,6 +118,7 @@ pip install 'stirrup[litellm]' # or: uv add 'stirrup[litellm]'
|
|
|
116
118
|
pip install 'stirrup[docker]' # or: uv add 'stirrup[docker]'
|
|
117
119
|
pip install 'stirrup[e2b]' # or: uv add 'stirrup[e2b]'
|
|
118
120
|
pip install 'stirrup[mcp]' # or: uv add 'stirrup[mcp]'
|
|
121
|
+
pip install 'stirrup[browser]' # or: uv add 'stirrup[browser]'
|
|
119
122
|
```
|
|
120
123
|
|
|
121
124
|
## Quick Start
|
|
@@ -25,16 +25,16 @@ Stirrup is a lightweight framework, or starting point template, for building age
|
|
|
25
25
|
|
|
26
26
|
## Features
|
|
27
27
|
|
|
28
|
-
- **
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
- **
|
|
34
|
-
- **
|
|
35
|
-
- **Context management:** Automatically summarizes conversation history when approaching context limits
|
|
36
|
-
- **Flexible provider support:** Pre-built support for OpenAI-compatible APIs
|
|
37
|
-
- **Multimodal support:** Process images, video, and audio with automatic format conversion
|
|
28
|
+
- 🧪 **Code execution:** Run code locally, in Docker, or in an E2B sandbox
|
|
29
|
+
- 🔎 **Online search / web browsing:** Search and fetch web pages
|
|
30
|
+
- 🔌 **MCP client support:** Connect to MCP servers and use their tools/resources
|
|
31
|
+
- 📄 **Document input and output:** Import files into context and produce file outputs
|
|
32
|
+
- 🧩 **Skills system:** Extend agents with modular, domain-specific instruction packages
|
|
33
|
+
- 🛠️ **Flexible tool execution:** A generic `Tool` interface allows easy tool definition
|
|
34
|
+
- 👤 **Human-in-the-loop:** Includes a built-in user input tool that enables human feedback or clarification during agent execution
|
|
35
|
+
- 🧠 **Context management:** Automatically summarizes conversation history when approaching context limits
|
|
36
|
+
- 🔁 **Flexible provider support:** Pre-built support for OpenAI-compatible APIs, LiteLLM, or bring your own client
|
|
37
|
+
- 🖼️ **Multimodal support:** Process images, video, and audio with automatic format conversion
|
|
38
38
|
|
|
39
39
|
## Installation
|
|
40
40
|
|
|
@@ -50,6 +50,7 @@ pip install 'stirrup[litellm]' # or: uv add 'stirrup[litellm]'
|
|
|
50
50
|
pip install 'stirrup[docker]' # or: uv add 'stirrup[docker]'
|
|
51
51
|
pip install 'stirrup[e2b]' # or: uv add 'stirrup[e2b]'
|
|
52
52
|
pip install 'stirrup[mcp]' # or: uv add 'stirrup[mcp]'
|
|
53
|
+
pip install 'stirrup[browser]' # or: uv add 'stirrup[browser]'
|
|
53
54
|
```
|
|
54
55
|
|
|
55
56
|
## Quick Start
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "stirrup"
|
|
3
|
-
version = "0.1.
|
|
3
|
+
version = "0.1.4"
|
|
4
4
|
description = "The lightweight foundation for building agents"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
license = { file = "LICENSE" }
|
|
@@ -41,7 +41,8 @@ litellm = ["litellm>=1.79.3"]
|
|
|
41
41
|
e2b = ["e2b-code-interpreter>=2.3.0"]
|
|
42
42
|
docker = ["docker>=7.0.0", "python-dotenv>=1.0.0"]
|
|
43
43
|
mcp = ["mcp>=1.9.0"]
|
|
44
|
-
|
|
44
|
+
browser = ["browser-use>=0.11.3"]
|
|
45
|
+
all = ["stirrup[litellm,e2b,docker,mcp,browser]"]
|
|
45
46
|
|
|
46
47
|
[project.urls]
|
|
47
48
|
Homepage = "https://github.com/ArtificialAnalysis/Stirrup"
|
|
@@ -70,6 +71,7 @@ anyio_mode = "auto"
|
|
|
70
71
|
markers = [
|
|
71
72
|
"docker: marks tests as requiring the docker package (deselect with '-m \"not docker\"')",
|
|
72
73
|
"e2b: marks tests as requiring the e2b package (deselect with '-m \"not e2b\"')",
|
|
74
|
+
"browser: marks tests as requiring headless Chrome (deselect with '-m \"not browser\"')",
|
|
73
75
|
]
|
|
74
76
|
|
|
75
77
|
[tool.ruff]
|
|
@@ -35,6 +35,7 @@ from stirrup.core.models import (
|
|
|
35
35
|
AssistantMessage,
|
|
36
36
|
AudioContentBlock,
|
|
37
37
|
ChatMessage,
|
|
38
|
+
EmptyParams,
|
|
38
39
|
ImageContentBlock,
|
|
39
40
|
LLMClient,
|
|
40
41
|
SubAgentMetadata,
|
|
@@ -58,6 +59,7 @@ __all__ = [
|
|
|
58
59
|
"AudioContentBlock",
|
|
59
60
|
"ChatMessage",
|
|
60
61
|
"ContextOverflowError",
|
|
62
|
+
"EmptyParams",
|
|
61
63
|
"ImageContentBlock",
|
|
62
64
|
"LLMClient",
|
|
63
65
|
"SubAgentMetadata",
|
|
@@ -3,12 +3,17 @@
|
|
|
3
3
|
The default client is ChatCompletionsClient, which uses the OpenAI SDK directly
|
|
4
4
|
and supports any OpenAI-compatible API via the `base_url` parameter.
|
|
5
5
|
|
|
6
|
+
OpenResponsesClient uses the OpenAI Responses API (responses.create) for providers
|
|
7
|
+
that support this newer API format.
|
|
8
|
+
|
|
6
9
|
For multi-provider support via LiteLLM, install the litellm extra:
|
|
7
10
|
pip install stirrup[litellm]
|
|
8
11
|
"""
|
|
9
12
|
|
|
10
13
|
from stirrup.clients.chat_completions_client import ChatCompletionsClient
|
|
14
|
+
from stirrup.clients.open_responses_client import OpenResponsesClient
|
|
11
15
|
|
|
12
16
|
__all__ = [
|
|
13
17
|
"ChatCompletionsClient",
|
|
18
|
+
"OpenResponsesClient",
|
|
14
19
|
]
|
|
@@ -67,7 +67,6 @@ class ChatCompletionsClient(LLMClient):
|
|
|
67
67
|
*,
|
|
68
68
|
base_url: str | None = None,
|
|
69
69
|
api_key: str | None = None,
|
|
70
|
-
supports_audio_input: bool = False,
|
|
71
70
|
reasoning_effort: str | None = None,
|
|
72
71
|
timeout: float | None = None,
|
|
73
72
|
max_retries: int = 2,
|
|
@@ -82,7 +81,6 @@ class ChatCompletionsClient(LLMClient):
|
|
|
82
81
|
Use for OpenAI-compatible providers (e.g., 'http://localhost:8000/v1').
|
|
83
82
|
api_key: API key for authentication. If None, reads from OPENROUTER_API_KEY
|
|
84
83
|
environment variable.
|
|
85
|
-
supports_audio_input: Whether the model supports audio inputs. Defaults to False.
|
|
86
84
|
reasoning_effort: Reasoning effort level for extended thinking models
|
|
87
85
|
(e.g., 'low', 'medium', 'high'). Only used with o1/o3 style models.
|
|
88
86
|
timeout: Request timeout in seconds. If None, uses OpenAI SDK default.
|
|
@@ -92,7 +90,6 @@ class ChatCompletionsClient(LLMClient):
|
|
|
92
90
|
"""
|
|
93
91
|
self._model = model
|
|
94
92
|
self._max_tokens = max_tokens
|
|
95
|
-
self._supports_audio_input = supports_audio_input
|
|
96
93
|
self._reasoning_effort = reasoning_effort
|
|
97
94
|
self._kwargs = kwargs or {}
|
|
98
95
|
|
|
@@ -7,7 +7,7 @@ Requires the litellm extra: `pip install stirrup[litellm]`
|
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
9
|
import logging
|
|
10
|
-
from typing import Any
|
|
10
|
+
from typing import Any, Literal
|
|
11
11
|
|
|
12
12
|
try:
|
|
13
13
|
from litellm import acompletion
|
|
@@ -38,6 +38,8 @@ __all__ = [
|
|
|
38
38
|
|
|
39
39
|
LOGGER = logging.getLogger(__name__)
|
|
40
40
|
|
|
41
|
+
type ReasoningEffort = Literal["none", "minimal", "low", "medium", "high", "xhigh", "default"]
|
|
42
|
+
|
|
41
43
|
|
|
42
44
|
class LiteLLMClient(LLMClient):
|
|
43
45
|
"""LiteLLM-based client supporting multiple LLM providers with unified interface.
|
|
@@ -49,8 +51,8 @@ class LiteLLMClient(LLMClient):
|
|
|
49
51
|
self,
|
|
50
52
|
model_slug: str,
|
|
51
53
|
max_tokens: int,
|
|
52
|
-
|
|
53
|
-
reasoning_effort:
|
|
54
|
+
api_key: str | None = None,
|
|
55
|
+
reasoning_effort: ReasoningEffort | None = None,
|
|
54
56
|
kwargs: dict[str, Any] | None = None,
|
|
55
57
|
) -> None:
|
|
56
58
|
"""Initialize LiteLLM client with model configuration and capabilities.
|
|
@@ -58,15 +60,13 @@ class LiteLLMClient(LLMClient):
|
|
|
58
60
|
Args:
|
|
59
61
|
model_slug: Model identifier for LiteLLM (e.g., 'anthropic/claude-3-5-sonnet-20241022')
|
|
60
62
|
max_tokens: Maximum context window size in tokens
|
|
61
|
-
supports_audio_input: Whether the model supports audio inputs
|
|
62
63
|
reasoning_effort: Reasoning effort level for extended thinking models (e.g., 'medium', 'high')
|
|
63
64
|
kwargs: Additional arguments to pass to LiteLLM completion calls
|
|
64
65
|
"""
|
|
65
66
|
self._model_slug = model_slug
|
|
66
|
-
self._supports_video_input = False
|
|
67
|
-
self._supports_audio_input = supports_audio_input
|
|
68
67
|
self._max_tokens = max_tokens
|
|
69
|
-
self._reasoning_effort = reasoning_effort
|
|
68
|
+
self._reasoning_effort: ReasoningEffort | None = reasoning_effort
|
|
69
|
+
self._api_key = api_key
|
|
70
70
|
self._kwargs = kwargs or {}
|
|
71
71
|
|
|
72
72
|
@property
|
|
@@ -92,6 +92,8 @@ class LiteLLMClient(LLMClient):
|
|
|
92
92
|
tools=to_openai_tools(tools) if tools else None,
|
|
93
93
|
tool_choice="auto" if tools else None,
|
|
94
94
|
max_tokens=self._max_tokens,
|
|
95
|
+
reasoning_effort=self._reasoning_effort,
|
|
96
|
+
api_key=self._api_key,
|
|
95
97
|
**self._kwargs,
|
|
96
98
|
)
|
|
97
99
|
|
|
@@ -103,14 +105,20 @@ class LiteLLMClient(LLMClient):
|
|
|
103
105
|
)
|
|
104
106
|
|
|
105
107
|
msg = choice["message"]
|
|
106
|
-
|
|
107
108
|
reasoning: Reasoning | None = None
|
|
108
109
|
if getattr(msg, "reasoning_content", None) is not None:
|
|
109
110
|
reasoning = Reasoning(content=msg.reasoning_content)
|
|
110
111
|
if getattr(msg, "thinking_blocks", None) is not None and len(msg.thinking_blocks) > 0:
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
112
|
+
if len(msg.thinking_blocks) > 1:
|
|
113
|
+
raise ValueError("Found multiple thinking blocks in the response")
|
|
114
|
+
|
|
115
|
+
signature = msg.thinking_blocks[0].get("thinking_signature", None)
|
|
116
|
+
content = msg.thinking_blocks[0].get("thinking", None)
|
|
117
|
+
|
|
118
|
+
if signature is None and content is None:
|
|
119
|
+
raise ValueError("Signature and content not found in the thinking block response")
|
|
120
|
+
|
|
121
|
+
reasoning = Reasoning(signature=signature, content=content)
|
|
114
122
|
|
|
115
123
|
usage = r["usage"]
|
|
116
124
|
|
|
@@ -119,6 +127,7 @@ class LiteLLMClient(LLMClient):
|
|
|
119
127
|
tool_call_id=tc.get("id"),
|
|
120
128
|
name=tc["function"]["name"],
|
|
121
129
|
arguments=tc["function"].get("arguments", "") or "",
|
|
130
|
+
signature=tc.get("provider_specific_fields", {}).get("thought_signature", None),
|
|
122
131
|
)
|
|
123
132
|
for tc in (msg.get("tool_calls") or [])
|
|
124
133
|
]
|