stirrup 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,12 +3,17 @@
3
3
  The default client is ChatCompletionsClient, which uses the OpenAI SDK directly
4
4
  and supports any OpenAI-compatible API via the `base_url` parameter.
5
5
 
6
+ OpenResponsesClient uses the OpenAI Responses API (responses.create) for providers
7
+ that support this newer API format.
8
+
6
9
  For multi-provider support via LiteLLM, install the litellm extra:
7
10
  pip install stirrup[litellm]
8
11
  """
9
12
 
10
13
  from stirrup.clients.chat_completions_client import ChatCompletionsClient
14
+ from stirrup.clients.open_responses_client import OpenResponsesClient
11
15
 
12
16
  __all__ = [
13
17
  "ChatCompletionsClient",
18
+ "OpenResponsesClient",
14
19
  ]
@@ -0,0 +1,434 @@
1
+ """OpenAI SDK-based LLM client for the Responses API.
2
+
3
+ This client uses the official OpenAI Python SDK's responses.create() method,
4
+ supporting both OpenAI's API and any OpenAI-compatible endpoint that implements
5
+ the Responses API via the `base_url` parameter.
6
+ """
7
+
8
+ import logging
9
+ import os
10
+ from typing import Any
11
+
12
+ from openai import (
13
+ APIConnectionError,
14
+ APITimeoutError,
15
+ AsyncOpenAI,
16
+ InternalServerError,
17
+ RateLimitError,
18
+ )
19
+ from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_exponential
20
+
21
+ from stirrup.core.exceptions import ContextOverflowError
22
+ from stirrup.core.models import (
23
+ AssistantMessage,
24
+ AudioContentBlock,
25
+ ChatMessage,
26
+ Content,
27
+ EmptyParams,
28
+ ImageContentBlock,
29
+ LLMClient,
30
+ Reasoning,
31
+ SystemMessage,
32
+ TokenUsage,
33
+ Tool,
34
+ ToolCall,
35
+ ToolMessage,
36
+ UserMessage,
37
+ VideoContentBlock,
38
+ )
39
+
40
+ __all__ = [
41
+ "OpenResponsesClient",
42
+ ]
43
+
44
+ LOGGER = logging.getLogger(__name__)
45
+
46
+
47
+ def _content_to_open_responses_input(content: Content) -> list[dict[str, Any]]:
48
+ """Convert Content blocks to OpenResponses input content format.
49
+
50
+ Uses input_text for text content (vs output_text for responses).
51
+ """
52
+ if isinstance(content, str):
53
+ return [{"type": "input_text", "text": content}]
54
+
55
+ out: list[dict[str, Any]] = []
56
+ for block in content:
57
+ if isinstance(block, str):
58
+ out.append({"type": "input_text", "text": block})
59
+ elif isinstance(block, ImageContentBlock):
60
+ out.append({"type": "input_image", "image_url": block.to_base64_url()})
61
+ elif isinstance(block, AudioContentBlock):
62
+ out.append(
63
+ {
64
+ "type": "input_audio",
65
+ "input_audio": {
66
+ "data": block.to_base64_url().split(",")[1],
67
+ "format": block.extension,
68
+ },
69
+ }
70
+ )
71
+ elif isinstance(block, VideoContentBlock):
72
+ out.append({"type": "input_file", "file_data": block.to_base64_url()})
73
+ else:
74
+ raise NotImplementedError(f"Unsupported content block: {type(block)}")
75
+ return out
76
+
77
+
78
+ def _content_to_open_responses_output(content: Content) -> list[dict[str, Any]]:
79
+ """Convert Content blocks to OpenResponses output content format.
80
+
81
+ Uses output_text for assistant message content.
82
+ """
83
+ if isinstance(content, str):
84
+ return [{"type": "output_text", "text": content}]
85
+
86
+ out: list[dict[str, Any]] = []
87
+ for block in content:
88
+ if isinstance(block, str):
89
+ out.append({"type": "output_text", "text": block})
90
+ else:
91
+ raise NotImplementedError(f"Unsupported output content block: {type(block)}")
92
+ return out
93
+
94
+
95
+ def _to_open_responses_tools(tools: dict[str, Tool]) -> list[dict[str, Any]]:
96
+ """Convert Tool objects to OpenResponses function format.
97
+
98
+ OpenResponses API expects tools with name/description/parameters at top level,
99
+ not nested under a 'function' key like Chat Completions API.
100
+
101
+ Args:
102
+ tools: Dictionary mapping tool names to Tool objects.
103
+
104
+ Returns:
105
+ List of tool definitions in OpenResponses format.
106
+ """
107
+ out: list[dict[str, Any]] = []
108
+ for t in tools.values():
109
+ tool_def: dict[str, Any] = {
110
+ "type": "function",
111
+ "name": t.name,
112
+ "description": t.description,
113
+ }
114
+ if t.parameters is not EmptyParams:
115
+ tool_def["parameters"] = t.parameters.model_json_schema()
116
+ out.append(tool_def)
117
+ return out
118
+
119
+
120
+ def _to_open_responses_input(
121
+ msgs: list[ChatMessage],
122
+ ) -> tuple[str | None, list[dict[str, Any]]]:
123
+ """Convert ChatMessage list to OpenResponses (instructions, input) tuple.
124
+
125
+ SystemMessage content is extracted as the instructions parameter.
126
+ Other messages are converted to input items.
127
+
128
+ Returns:
129
+ Tuple of (instructions, input_items) where instructions is the system
130
+ message content (or None) and input_items is the list of input items.
131
+ """
132
+ instructions: str | None = None
133
+ input_items: list[dict[str, Any]] = []
134
+
135
+ for m in msgs:
136
+ if isinstance(m, SystemMessage):
137
+ # Extract system message as instructions
138
+ if isinstance(m.content, str):
139
+ instructions = m.content
140
+ else:
141
+ # Join text content blocks for instructions
142
+ instructions = "\n".join(block if isinstance(block, str) else "" for block in m.content)
143
+ elif isinstance(m, UserMessage):
144
+ input_items.append(
145
+ {
146
+ "role": "user",
147
+ "content": _content_to_open_responses_input(m.content),
148
+ }
149
+ )
150
+ elif isinstance(m, AssistantMessage):
151
+ # For assistant messages, we need to add them as response output items
152
+ # First add any text content as a message item
153
+ content_str = (
154
+ m.content
155
+ if isinstance(m.content, str)
156
+ else "\n".join(block if isinstance(block, str) else "" for block in m.content)
157
+ )
158
+ if content_str:
159
+ input_items.append(
160
+ {
161
+ "type": "message",
162
+ "role": "assistant",
163
+ "content": [{"type": "output_text", "text": content_str}],
164
+ }
165
+ )
166
+
167
+ # Add tool calls as separate function_call items
168
+ input_items.extend(
169
+ {
170
+ "type": "function_call",
171
+ "call_id": tc.tool_call_id,
172
+ "name": tc.name,
173
+ "arguments": tc.arguments,
174
+ }
175
+ for tc in m.tool_calls
176
+ )
177
+ elif isinstance(m, ToolMessage):
178
+ # Tool results are function_call_output items
179
+ content_str = m.content if isinstance(m.content, str) else str(m.content)
180
+ input_items.append(
181
+ {
182
+ "type": "function_call_output",
183
+ "call_id": m.tool_call_id,
184
+ "output": content_str,
185
+ }
186
+ )
187
+ else:
188
+ raise NotImplementedError(f"Unsupported message type: {type(m)}")
189
+
190
+ return instructions, input_items
191
+
192
+
193
+ def _get_attr(obj: Any, name: str, default: Any = None) -> Any: # noqa: ANN401
194
+ """Get attribute from object or dict, with fallback default."""
195
+ if isinstance(obj, dict):
196
+ return obj.get(name, default)
197
+ return getattr(obj, name, default)
198
+
199
+
200
+ def _parse_response_output(
201
+ output: list[Any],
202
+ ) -> tuple[str, list[ToolCall], Reasoning | None]:
203
+ """Parse response output items into content, tool_calls, and reasoning.
204
+
205
+ Args:
206
+ output: List of output items from the response.
207
+
208
+ Returns:
209
+ Tuple of (content_text, tool_calls, reasoning).
210
+ """
211
+ content_parts: list[str] = []
212
+ tool_calls: list[ToolCall] = []
213
+ reasoning: Reasoning | None = None
214
+
215
+ for item in output:
216
+ item_type = _get_attr(item, "type")
217
+
218
+ if item_type == "message":
219
+ # Extract text content from message
220
+ msg_content = _get_attr(item, "content", [])
221
+ for content_item in msg_content:
222
+ content_type = _get_attr(content_item, "type")
223
+ if content_type == "output_text":
224
+ text = _get_attr(content_item, "text", "")
225
+ content_parts.append(text)
226
+
227
+ elif item_type == "function_call":
228
+ call_id = _get_attr(item, "call_id")
229
+ name = _get_attr(item, "name")
230
+ arguments = _get_attr(item, "arguments", "")
231
+ tool_calls.append(
232
+ ToolCall(
233
+ tool_call_id=call_id,
234
+ name=name,
235
+ arguments=arguments,
236
+ )
237
+ )
238
+
239
+ elif item_type == "reasoning":
240
+ # Extract reasoning/thinking content - try multiple possible attribute names
241
+ # summary can be a list of Summary objects with .text attribute
242
+ summary = _get_attr(item, "summary")
243
+ if summary:
244
+ if isinstance(summary, list):
245
+ # Extract text from Summary objects
246
+ thinking = "\n".join(_get_attr(s, "text", "") for s in summary if _get_attr(s, "text"))
247
+ else:
248
+ thinking = str(summary)
249
+ else:
250
+ thinking = _get_attr(item, "thinking") or ""
251
+
252
+ if thinking:
253
+ reasoning = Reasoning(content=thinking)
254
+
255
+ return "\n".join(content_parts), tool_calls, reasoning
256
+
257
+
258
+ class OpenResponsesClient(LLMClient):
259
+ """OpenAI SDK-based client using the Responses API.
260
+
261
+ Uses the official OpenAI Python SDK's responses.create() method.
262
+ Supports custom base_url for OpenAI-compatible providers that implement
263
+ the Responses API.
264
+
265
+ Includes automatic retries for transient failures and token usage tracking.
266
+
267
+ Example:
268
+ >>> # Standard OpenAI usage
269
+ >>> client = OpenResponsesClient(model="gpt-4o", max_tokens=128_000)
270
+ >>>
271
+ >>> # Custom OpenAI-compatible endpoint
272
+ >>> client = OpenResponsesClient(
273
+ ... model="gpt-4o",
274
+ ... base_url="http://localhost:8000/v1",
275
+ ... api_key="your-api-key",
276
+ ... )
277
+ """
278
+
279
+ def __init__(
280
+ self,
281
+ model: str,
282
+ max_tokens: int = 64_000,
283
+ *,
284
+ base_url: str | None = None,
285
+ api_key: str | None = None,
286
+ reasoning_effort: str | None = None,
287
+ timeout: float | None = None,
288
+ max_retries: int = 2,
289
+ instructions: str | None = None,
290
+ kwargs: dict[str, Any] | None = None,
291
+ ) -> None:
292
+ """Initialize OpenAI SDK client with model configuration for Responses API.
293
+
294
+ Args:
295
+ model: Model identifier (e.g., 'gpt-4o', 'o1-preview').
296
+ max_tokens: Maximum output tokens. Defaults to 64,000.
297
+ base_url: API base URL. If None, uses OpenAI's standard URL.
298
+ Use for OpenAI-compatible providers.
299
+ api_key: API key for authentication. If None, reads from OPENROUTER_API_KEY
300
+ environment variable.
301
+ reasoning_effort: Reasoning effort level for extended thinking models
302
+ (e.g., 'low', 'medium', 'high'). Only used with o1/o3 style models.
303
+ timeout: Request timeout in seconds. If None, uses OpenAI SDK default.
304
+ max_retries: Number of retries for transient errors. Defaults to 2.
305
+ instructions: Default system-level instructions. Can be overridden by
306
+ SystemMessage in the messages list.
307
+ kwargs: Additional arguments passed to responses.create().
308
+ """
309
+ self._model = model
310
+ self._max_tokens = max_tokens
311
+ self._reasoning_effort = reasoning_effort
312
+ self._default_instructions = instructions
313
+ self._kwargs = kwargs or {}
314
+
315
+ # Initialize AsyncOpenAI client
316
+ resolved_api_key = api_key or os.environ.get("OPENAI_API_KEY")
317
+
318
+ # Strip /responses suffix if present - SDK appends it automatically
319
+ resolved_base_url = base_url
320
+ if resolved_base_url and resolved_base_url.rstrip("/").endswith("/responses"):
321
+ resolved_base_url = resolved_base_url.rstrip("/").removesuffix("/responses")
322
+
323
+ self._client = AsyncOpenAI(
324
+ api_key=resolved_api_key,
325
+ base_url=resolved_base_url,
326
+ timeout=timeout,
327
+ max_retries=max_retries,
328
+ )
329
+
330
+ @property
331
+ def max_tokens(self) -> int:
332
+ """Maximum output tokens."""
333
+ return self._max_tokens
334
+
335
+ @property
336
+ def model_slug(self) -> str:
337
+ """Model identifier."""
338
+ return self._model
339
+
340
+ @retry(
341
+ retry=retry_if_exception_type(
342
+ (
343
+ APIConnectionError,
344
+ APITimeoutError,
345
+ RateLimitError,
346
+ InternalServerError,
347
+ )
348
+ ),
349
+ stop=stop_after_attempt(3),
350
+ wait=wait_exponential(multiplier=1, min=1, max=10),
351
+ )
352
+ async def generate(
353
+ self,
354
+ messages: list[ChatMessage],
355
+ tools: dict[str, Tool],
356
+ ) -> AssistantMessage:
357
+ """Generate assistant response with optional tool calls using Responses API.
358
+
359
+ Retries up to 3 times on transient errors (connection, timeout, rate limit,
360
+ internal server errors) with exponential backoff.
361
+
362
+ Args:
363
+ messages: List of conversation messages.
364
+ tools: Dictionary mapping tool names to Tool objects.
365
+
366
+ Returns:
367
+ AssistantMessage containing the model's response, any tool calls,
368
+ and token usage statistics.
369
+
370
+ Raises:
371
+ ContextOverflowError: If the response is incomplete due to token limits.
372
+ """
373
+ # Convert messages to OpenResponses format
374
+ instructions, input_items = _to_open_responses_input(messages)
375
+
376
+ # Use provided instructions or fall back to default
377
+ final_instructions = instructions or self._default_instructions
378
+
379
+ # Build request kwargs
380
+ request_kwargs: dict[str, Any] = {
381
+ "model": self._model,
382
+ "input": input_items,
383
+ "max_output_tokens": self._max_tokens,
384
+ **self._kwargs,
385
+ }
386
+
387
+ # Add instructions if present
388
+ if final_instructions:
389
+ request_kwargs["instructions"] = final_instructions
390
+
391
+ # Add tools if provided
392
+ if tools:
393
+ request_kwargs["tools"] = _to_open_responses_tools(tools)
394
+ request_kwargs["tool_choice"] = "auto"
395
+
396
+ # Add reasoning effort if configured (for o1/o3 models)
397
+ if self._reasoning_effort:
398
+ request_kwargs["reasoning"] = {"effort": self._reasoning_effort}
399
+
400
+ # Make API call
401
+ response = await self._client.responses.create(**request_kwargs)
402
+
403
+ # Check for incomplete response (context overflow)
404
+ if response.status == "incomplete":
405
+ stop_reason = getattr(response, "incomplete_details", None)
406
+ raise ContextOverflowError(
407
+ f"Response incomplete for model {self.model_slug}: {stop_reason}. "
408
+ "Reduce max_tokens or message length and try again."
409
+ )
410
+
411
+ # Parse response output
412
+ content, tool_calls, reasoning = _parse_response_output(response.output)
413
+
414
+ # Parse token usage
415
+ usage = response.usage
416
+ input_tokens = usage.input_tokens if usage else 0
417
+ output_tokens = usage.output_tokens if usage else 0
418
+
419
+ # Handle reasoning tokens if available
420
+ reasoning_tokens = 0
421
+ if usage and hasattr(usage, "output_tokens_details") and usage.output_tokens_details:
422
+ reasoning_tokens = getattr(usage.output_tokens_details, "reasoning_tokens", 0) or 0
423
+ output_tokens = output_tokens - reasoning_tokens
424
+
425
+ return AssistantMessage(
426
+ reasoning=reasoning,
427
+ content=content,
428
+ tool_calls=tool_calls,
429
+ token_usage=TokenUsage(
430
+ input=input_tokens,
431
+ output=output_tokens,
432
+ reasoning=reasoning_tokens,
433
+ ),
434
+ )
stirrup/core/agent.py CHANGED
@@ -234,6 +234,7 @@ class Agent[FinishParams: BaseModel, FinishMeta]:
234
234
  self._pending_skills_dir: Path | None = None
235
235
  self._resume: bool = False
236
236
  self._clear_cache_on_success: bool = True
237
+ self._cache_on_interrupt: bool = True
237
238
 
238
239
  # Instance-scoped state (populated during __aenter__, isolated per agent instance)
239
240
  self._active_tools: dict[str, Tool] = {}
@@ -277,6 +278,7 @@ class Agent[FinishParams: BaseModel, FinishMeta]:
277
278
  skills_dir: Path | str | None = None,
278
279
  resume: bool = False,
279
280
  clear_cache_on_success: bool = True,
281
+ cache_on_interrupt: bool = True,
280
282
  ) -> Self:
281
283
  """Configure a session and return self for use as async context manager.
282
284
 
@@ -299,6 +301,10 @@ class Agent[FinishParams: BaseModel, FinishMeta]:
299
301
  clear_cache_on_success: If True (default), automatically clear the cache
300
302
  when the agent completes successfully. Set to False
301
303
  to preserve caches for inspection or debugging.
304
+ cache_on_interrupt: If True (default), set up a SIGINT handler to cache
305
+ state on Ctrl+C. Set to False when running agents in
306
+ threads or subprocesses where signal handlers cannot
307
+ be registered from non-main threads.
302
308
 
303
309
  Returns:
304
310
  Self, for use with `async with agent.session(...) as session:`
@@ -317,6 +323,7 @@ class Agent[FinishParams: BaseModel, FinishMeta]:
317
323
  self._pending_skills_dir = Path(skills_dir) if skills_dir else None
318
324
  self._resume = resume
319
325
  self._clear_cache_on_success = clear_cache_on_success
326
+ self._cache_on_interrupt = cache_on_interrupt
320
327
  return self
321
328
 
322
329
  def _handle_interrupt(self, _signum: int, _frame: object) -> None:
@@ -655,6 +662,13 @@ class Agent[FinishParams: BaseModel, FinishMeta]:
655
662
  state.skills_metadata = load_skills_metadata(skills_path)
656
663
  logger.debug("[%s __aenter__] Loaded %d skills", self._name, len(state.skills_metadata))
657
664
  self._pending_skills_dir = None # Clear pending state
665
+ elif parent_state and parent_state.skills_metadata:
666
+ # Sub-agent: inherit skills from parent
667
+ state.skills_metadata = parent_state.skills_metadata
668
+ logger.debug("[%s __aenter__] Inherited %d skills from parent", self._name, len(state.skills_metadata))
669
+ # Transfer skills directory from parent's exec_env to sub-agent's exec_env
670
+ if state.exec_env and parent_state.exec_env:
671
+ await state.exec_env.upload_files("skills", source_env=parent_state.exec_env)
658
672
 
659
673
  # Configure and enter logger context
660
674
  self._logger.name = self._name
@@ -664,7 +678,7 @@ class Agent[FinishParams: BaseModel, FinishMeta]:
664
678
  self._logger.__enter__()
665
679
 
666
680
  # Set up signal handler for graceful caching on interrupt (root agent only)
667
- if current_depth == 0:
681
+ if current_depth == 0 and self._cache_on_interrupt:
668
682
  self._original_sigint = signal.getsignal(signal.SIGINT)
669
683
  signal.signal(signal.SIGINT, self._handle_interrupt)
670
684
 
@@ -814,7 +828,9 @@ class Agent[FinishParams: BaseModel, FinishMeta]:
814
828
 
815
829
  if tool:
816
830
  try:
817
- params = tool.parameters.model_validate_json(tool_call.arguments)
831
+ # Normalize empty arguments to valid empty JSON object
832
+ args = tool_call.arguments if tool_call.arguments and tool_call.arguments.strip() else "{}"
833
+ params = tool.parameters.model_validate_json(args)
818
834
 
819
835
  # Set parent depth for sub-agent tools to read
820
836
  prev_depth = _PARENT_DEPTH.set(self._logger.depth)
stirrup/core/models.py CHANGED
@@ -421,12 +421,14 @@ class ToolUseCountMetadata(BaseModel):
421
421
 
422
422
  Implements Addable protocol for aggregation. Use this for tools that only need
423
423
  to track how many times they were called.
424
+
425
+ Subclasses can override __add__ with their own type thanks to Self typing.
424
426
  """
425
427
 
426
428
  num_uses: int = 1
427
429
 
428
- def __add__(self, other: "ToolUseCountMetadata") -> "ToolUseCountMetadata":
429
- return ToolUseCountMetadata(num_uses=self.num_uses + other.num_uses)
430
+ def __add__(self, other: Self) -> Self:
431
+ return self.__class__(num_uses=self.num_uses + other.num_uses)
430
432
 
431
433
 
432
434
  class ToolResult[M](BaseModel):
stirrup/tools/__init__.py CHANGED
@@ -47,6 +47,7 @@ Optional tool providers require explicit imports from their submodules:
47
47
  - DockerCodeExecToolProvider: `from stirrup.tools.code_backends.docker import DockerCodeExecToolProvider`
48
48
  - E2BCodeExecToolProvider: `from stirrup.tools.code_backends.e2b import E2BCodeExecToolProvider`
49
49
  - MCPToolProvider: `from stirrup.tools.mcp import MCPToolProvider`
50
+ - BrowserUseToolProvider: `from stirrup.tools.browser_use import BrowserUseToolProvider`
50
51
  """
51
52
 
52
53
  from typing import Any