aloop 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aloop might be problematic. Click here for more details.

Files changed (62) hide show
  1. agent/__init__.py +0 -0
  2. agent/agent.py +182 -0
  3. agent/base.py +406 -0
  4. agent/context.py +126 -0
  5. agent/todo.py +149 -0
  6. agent/tool_executor.py +54 -0
  7. agent/verification.py +135 -0
  8. aloop-0.1.0.dist-info/METADATA +246 -0
  9. aloop-0.1.0.dist-info/RECORD +62 -0
  10. aloop-0.1.0.dist-info/WHEEL +5 -0
  11. aloop-0.1.0.dist-info/entry_points.txt +2 -0
  12. aloop-0.1.0.dist-info/licenses/LICENSE +21 -0
  13. aloop-0.1.0.dist-info/top_level.txt +9 -0
  14. cli.py +19 -0
  15. config.py +146 -0
  16. interactive.py +865 -0
  17. llm/__init__.py +51 -0
  18. llm/base.py +26 -0
  19. llm/compat.py +226 -0
  20. llm/content_utils.py +309 -0
  21. llm/litellm_adapter.py +450 -0
  22. llm/message_types.py +245 -0
  23. llm/model_manager.py +265 -0
  24. llm/retry.py +95 -0
  25. main.py +246 -0
  26. memory/__init__.py +20 -0
  27. memory/compressor.py +554 -0
  28. memory/manager.py +538 -0
  29. memory/serialization.py +82 -0
  30. memory/short_term.py +88 -0
  31. memory/token_tracker.py +203 -0
  32. memory/types.py +51 -0
  33. tools/__init__.py +6 -0
  34. tools/advanced_file_ops.py +557 -0
  35. tools/base.py +51 -0
  36. tools/calculator.py +50 -0
  37. tools/code_navigator.py +975 -0
  38. tools/explore.py +254 -0
  39. tools/file_ops.py +150 -0
  40. tools/git_tools.py +791 -0
  41. tools/notify.py +69 -0
  42. tools/parallel_execute.py +420 -0
  43. tools/session_manager.py +205 -0
  44. tools/shell.py +147 -0
  45. tools/shell_background.py +470 -0
  46. tools/smart_edit.py +491 -0
  47. tools/todo.py +130 -0
  48. tools/web_fetch.py +673 -0
  49. tools/web_search.py +61 -0
  50. utils/__init__.py +15 -0
  51. utils/logger.py +105 -0
  52. utils/model_pricing.py +49 -0
  53. utils/runtime.py +75 -0
  54. utils/terminal_ui.py +422 -0
  55. utils/tui/__init__.py +39 -0
  56. utils/tui/command_registry.py +49 -0
  57. utils/tui/components.py +306 -0
  58. utils/tui/input_handler.py +393 -0
  59. utils/tui/model_ui.py +204 -0
  60. utils/tui/progress.py +292 -0
  61. utils/tui/status_bar.py +178 -0
  62. utils/tui/theme.py +165 -0
llm/litellm_adapter.py ADDED
@@ -0,0 +1,450 @@
1
+ """LiteLLM adapter for unified LLM access across 100+ providers."""
2
+
3
+ import json
4
+ import logging
5
+ from typing import Any, Dict, List, Optional, Tuple, Union
6
+
7
+ import litellm
8
+
9
+ from utils import get_logger
10
+
11
+ from .content_utils import extract_text, extract_tool_calls_from_content
12
+ from .message_types import (
13
+ LLMMessage,
14
+ LLMResponse,
15
+ StopReason,
16
+ ToolCall,
17
+ ToolCallBlock,
18
+ ToolResult,
19
+ )
20
+ from .retry import with_retry
21
+
22
+ logger = get_logger(__name__)
23
+
24
+ # Suppress LiteLLM's verbose logging to console
25
+ # LiteLLM uses its own logger that prints to console by default
26
+ litellm_logger = logging.getLogger("LiteLLM")
27
+ litellm_logger.setLevel(logging.WARNING) # Only show warnings and errors
28
+ litellm_logger.propagate = False # Don't propagate to root logger
29
+
30
+
31
+ class LiteLLMAdapter:
32
+ """LiteLLM adapter supporting 100+ LLM providers."""
33
+
34
+ def __init__(self, model: str, **kwargs):
35
+ """Initialize LiteLLM adapter.
36
+
37
+ Args:
38
+ model: LiteLLM model identifier (e.g., "anthropic/claude-3-5-sonnet-20241022")
39
+ **kwargs: Additional configuration:
40
+ - api_key: API key (optional, uses env vars by default)
41
+ - api_base: Custom base URL
42
+ - drop_params: Drop unsupported params (default: True)
43
+ - timeout: Request timeout in seconds
44
+ """
45
+ # Extract model and provider
46
+ self.model = model
47
+ self.provider = model.split("/")[0] if "/" in model else "unknown"
48
+
49
+ # Extract configuration from kwargs
50
+ self.api_key = kwargs.pop("api_key", None)
51
+ self.api_base = kwargs.pop("api_base", None)
52
+ self.drop_params = kwargs.pop("drop_params", True)
53
+ self.timeout = kwargs.pop("timeout", 600)
54
+
55
+ # Configure LiteLLM global settings
56
+ litellm.drop_params = self.drop_params
57
+ litellm.set_verbose = False # Disable verbose output
58
+ litellm.suppress_debug_info = True # Suppress debug info
59
+
60
+ # Also suppress httpx and openai loggers that LiteLLM uses
61
+ logging.getLogger("httpx").setLevel(logging.WARNING)
62
+ logging.getLogger("openai").setLevel(logging.WARNING)
63
+ logging.getLogger("anthropic").setLevel(logging.WARNING)
64
+
65
+ logger.info(f"Initialized LiteLLM adapter for provider: {self.provider}, model: {model}")
66
+
67
+ @with_retry()
68
+ async def _make_api_call_async(self, **call_params):
69
+ """Internal async API call with retry logic."""
70
+ acompletion = getattr(litellm, "acompletion", None)
71
+ if acompletion is None:
72
+ raise RuntimeError("LiteLLM async completion is unavailable.")
73
+ return await acompletion(**call_params)
74
+
75
+ def _build_call_params(
76
+ self,
77
+ messages: List[LLMMessage],
78
+ tools: Optional[List[Dict[str, Any]]],
79
+ max_tokens: int,
80
+ **kwargs,
81
+ ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
82
+ """Prepare LiteLLM call parameters and converted messages."""
83
+ litellm_messages = self._convert_messages(messages)
84
+
85
+ call_params: Dict[str, Any] = {
86
+ "model": self.model,
87
+ "messages": litellm_messages,
88
+ "max_tokens": max_tokens,
89
+ "timeout": self.timeout,
90
+ }
91
+
92
+ # Add API key if provided
93
+ if self.api_key:
94
+ call_params["api_key"] = self.api_key
95
+
96
+ # Add custom base URL if provided
97
+ if self.api_base:
98
+ call_params["api_base"] = self.api_base
99
+
100
+ # Convert tools to OpenAI format if provided
101
+ if tools:
102
+ call_params["tools"] = self._convert_tools(tools)
103
+
104
+ # Add any additional parameters
105
+ call_params.update(kwargs)
106
+
107
+ return litellm_messages, call_params
108
+
109
+ async def call_async(
110
+ self,
111
+ messages: List[LLMMessage],
112
+ tools: Optional[List[Dict[str, Any]]] = None,
113
+ max_tokens: int = 4096,
114
+ **kwargs,
115
+ ) -> LLMResponse:
116
+ """Async LLM call via LiteLLM with automatic retry."""
117
+ litellm_messages, call_params = self._build_call_params(
118
+ messages=messages,
119
+ tools=tools,
120
+ max_tokens=max_tokens,
121
+ **kwargs,
122
+ )
123
+
124
+ logger.debug(
125
+ f"Calling LiteLLM async with model: {self.model}, messages: {len(litellm_messages)}, tools: {len(tools) if tools else 0}"
126
+ )
127
+ response = await self._make_api_call_async(**call_params)
128
+
129
+ if hasattr(response, "usage") and response.usage:
130
+ usage = response.usage
131
+ logger.debug(
132
+ f"Token Usage: Input={usage.get('prompt_tokens', 0)}, "
133
+ f"Output={usage.get('completion_tokens', 0)}, "
134
+ f"Total={usage.get('total_tokens', 0)}"
135
+ )
136
+
137
+ return self._convert_response(response)
138
+
139
+ def _convert_messages(self, messages: List[LLMMessage]) -> List[Dict]:
140
+ """Convert LLMMessage to LiteLLM format (OpenAI-compatible).
141
+
142
+ Handles both new format (tool_calls field, tool role) and legacy format
143
+ (tool_result blocks in user content).
144
+ """
145
+ litellm_messages = []
146
+
147
+ for msg in messages:
148
+ # Handle system messages
149
+ if msg.role == "system":
150
+ content = msg.content if isinstance(msg.content, str) else extract_text(msg.content)
151
+ litellm_messages.append({"role": "system", "content": content})
152
+
153
+ # Handle tool messages (new OpenAI format)
154
+ elif msg.role == "tool":
155
+ litellm_messages.append(
156
+ {
157
+ "role": "tool",
158
+ "content": msg.content or "",
159
+ "tool_call_id": msg.tool_call_id or "",
160
+ }
161
+ )
162
+
163
+ # Handle user messages
164
+ elif msg.role == "user":
165
+ if isinstance(msg.content, str):
166
+ litellm_messages.append({"role": "user", "content": msg.content})
167
+ elif isinstance(msg.content, list):
168
+ # Legacy: Handle tool results (Anthropic format)
169
+ # Convert to tool messages for OpenAI compatibility
170
+ tool_messages = self._convert_anthropic_tool_results(msg.content)
171
+ if tool_messages:
172
+ litellm_messages.extend(tool_messages)
173
+ else:
174
+ # Not tool results, extract text
175
+ content = extract_text(msg.content)
176
+ litellm_messages.append({"role": "user", "content": content})
177
+ else:
178
+ content = extract_text(msg.content)
179
+ litellm_messages.append({"role": "user", "content": content})
180
+
181
+ # Handle assistant messages
182
+ elif msg.role == "assistant":
183
+ assistant_msg: Dict[str, Any] = {"role": "assistant"}
184
+
185
+ # New format: tool_calls field
186
+ if hasattr(msg, "tool_calls") and msg.tool_calls:
187
+ assistant_msg["tool_calls"] = msg.tool_calls
188
+ # Content can be None or text
189
+ if msg.content:
190
+ assistant_msg["content"] = msg.content
191
+ else:
192
+ assistant_msg["content"] = None
193
+ # Simple string content
194
+ elif isinstance(msg.content, str):
195
+ assistant_msg["content"] = msg.content
196
+ # Legacy: complex content (may contain tool calls)
197
+ else:
198
+ # Extract tool calls from legacy format
199
+ tool_calls = extract_tool_calls_from_content(msg.content)
200
+ if tool_calls:
201
+ assistant_msg["tool_calls"] = tool_calls
202
+ # Also extract any text content
203
+ text = extract_text(msg.content)
204
+ assistant_msg["content"] = text if text else None
205
+ else:
206
+ content = extract_text(msg.content)
207
+ assistant_msg["content"] = content if content else ""
208
+
209
+ litellm_messages.append(assistant_msg)
210
+
211
+ return litellm_messages
212
+
213
+ def _convert_anthropic_tool_results(self, content: List) -> List[Dict]:
214
+ """Convert Anthropic tool_result format to OpenAI tool messages.
215
+
216
+ Args:
217
+ content: List of content blocks potentially containing tool_result
218
+
219
+ Returns:
220
+ List of tool messages in OpenAI format, or empty list if not tool results
221
+ """
222
+ return [
223
+ {
224
+ "role": "tool",
225
+ "content": block.get("content", ""),
226
+ "tool_call_id": block.get("tool_use_id", ""),
227
+ }
228
+ for block in content
229
+ if isinstance(block, dict) and block.get("type") == "tool_result"
230
+ ]
231
+
232
+ def _clean_message(self, message) -> None:
233
+ """Clean up unnecessary fields from message to reduce memory usage.
234
+
235
+ Removes:
236
+ - provider_specific_fields (contains thought_signature)
237
+ - __thought__ suffix from tool call IDs
238
+
239
+ These fields are added by Anthropic's extended thinking feature and
240
+ can be very large (2-3KB each), serving no purpose for agent operation.
241
+ """
242
+ if hasattr(message, "tool_calls") and message.tool_calls:
243
+ for tc in message.tool_calls:
244
+ # Remove provider_specific_fields if present
245
+ if hasattr(tc, "provider_specific_fields"):
246
+ tc.provider_specific_fields = None
247
+
248
+ # Clean __thought__ suffix from tool call ID
249
+ # e.g., "call_abc123__thought__xxx..." -> "call_abc123"
250
+ if hasattr(tc, "id") and tc.id and "__thought__" in tc.id:
251
+ tc.id = tc.id.split("__thought__")[0]
252
+
253
+ def _convert_tools(self, tools: List[Dict[str, Any]]) -> List[Dict]:
254
+ """Convert Anthropic tool format to OpenAI format."""
255
+ return [
256
+ {
257
+ "type": "function",
258
+ "function": {
259
+ "name": tool["name"],
260
+ "description": tool["description"],
261
+ "parameters": tool["input_schema"],
262
+ },
263
+ }
264
+ for tool in tools
265
+ ]
266
+
267
+ def _convert_response(self, response) -> LLMResponse:
268
+ """Convert LiteLLM response to LLMResponse with normalized content.
269
+
270
+ Key change: Instead of storing the raw message object, we extract
271
+ and normalize all content to ensure JSON serializability.
272
+ """
273
+ # Extract message from response
274
+ message = response.choices[0].message
275
+
276
+ # Clean up provider_specific_fields (removes thought_signature, etc.)
277
+ self._clean_message(message)
278
+
279
+ # Determine stop reason (normalize to OpenAI format)
280
+ finish_reason = response.choices[0].finish_reason
281
+ stop_reason = StopReason.normalize(finish_reason or "stop")
282
+
283
+ # Extract text content
284
+ content = None
285
+ if hasattr(message, "content") and message.content:
286
+ content = (
287
+ message.content
288
+ if isinstance(message.content, str)
289
+ else extract_text(message.content)
290
+ )
291
+
292
+ # Extract and normalize tool calls
293
+ tool_calls = None
294
+ if hasattr(message, "tool_calls") and message.tool_calls:
295
+ tool_calls = self._normalize_tool_calls(message.tool_calls)
296
+
297
+ # Extract token usage
298
+ usage_dict = None
299
+ if hasattr(response, "usage") and response.usage:
300
+ usage_dict = {
301
+ "input_tokens": response.usage.get("prompt_tokens", 0),
302
+ "output_tokens": response.usage.get("completion_tokens", 0),
303
+ }
304
+
305
+ # Extract thinking content
306
+ thinking = self._extract_thinking_from_message(message)
307
+
308
+ return LLMResponse(
309
+ content=content,
310
+ tool_calls=tool_calls,
311
+ stop_reason=stop_reason,
312
+ usage=usage_dict,
313
+ thinking=thinking,
314
+ )
315
+
316
+ def _normalize_tool_calls(self, tool_calls: List) -> List[ToolCallBlock]:
317
+ """Normalize tool calls to OpenAI format.
318
+
319
+ Args:
320
+ tool_calls: List of tool calls from LiteLLM response
321
+
322
+ Returns:
323
+ List of ToolCallBlock in standard format
324
+ """
325
+ normalized: List[ToolCallBlock] = []
326
+ for tc in tool_calls:
327
+ # Get arguments as string
328
+ arguments = tc.function.arguments
329
+ if not isinstance(arguments, str):
330
+ arguments = json.dumps(arguments)
331
+
332
+ tool_call: ToolCallBlock = {
333
+ "id": tc.id,
334
+ "type": "function",
335
+ "function": {
336
+ "name": tc.function.name,
337
+ "arguments": arguments,
338
+ },
339
+ }
340
+ normalized.append(tool_call)
341
+ return normalized
342
+
343
+ def _extract_thinking_from_message(self, message) -> Optional[str]:
344
+ """Extract thinking/reasoning content from message.
345
+
346
+ Args:
347
+ message: Message object from LiteLLM response
348
+
349
+ Returns:
350
+ Thinking content string or None
351
+ """
352
+ thinking_parts = []
353
+
354
+ # Check for thinking_blocks (Anthropic extended thinking via LiteLLM)
355
+ if hasattr(message, "thinking_blocks") and message.thinking_blocks:
356
+ for block in message.thinking_blocks:
357
+ if hasattr(block, "thinking"):
358
+ thinking_parts.append(block.thinking)
359
+ elif isinstance(block, dict) and "thinking" in block:
360
+ thinking_parts.append(block["thinking"])
361
+ elif isinstance(block, str):
362
+ thinking_parts.append(block)
363
+
364
+ # Check for reasoning_content (OpenAI o1 style)
365
+ if hasattr(message, "reasoning_content") and message.reasoning_content:
366
+ thinking_parts.append(message.reasoning_content)
367
+
368
+ # Check content blocks for thinking type
369
+ if hasattr(message, "content") and isinstance(message.content, list):
370
+ for block in message.content:
371
+ if isinstance(block, dict) and block.get("type") == "thinking":
372
+ thinking_parts.append(block.get("thinking", ""))
373
+ elif hasattr(block, "type") and block.type == "thinking":
374
+ thinking_parts.append(getattr(block, "thinking", ""))
375
+
376
+ return "\n\n".join(thinking_parts) if thinking_parts else None
377
+
378
+ def extract_text(self, response: LLMResponse) -> str:
379
+ """Extract text from LLMResponse.
380
+
381
+ With the new format, content is already extracted and normalized.
382
+ """
383
+ return response.content or ""
384
+
385
+ def extract_tool_calls(self, response: LLMResponse) -> List[ToolCall]:
386
+ """Extract tool calls from LLMResponse.
387
+
388
+ With the new format, tool_calls are already normalized to OpenAI format.
389
+ This method parses the JSON arguments into dicts.
390
+ """
391
+ if not response.tool_calls:
392
+ return []
393
+
394
+ tool_calls = []
395
+ for tc in response.tool_calls:
396
+ try:
397
+ arguments = json.loads(tc["function"]["arguments"])
398
+ except (json.JSONDecodeError, KeyError):
399
+ arguments = {}
400
+
401
+ tool_calls.append(
402
+ ToolCall(
403
+ id=tc["id"],
404
+ name=tc["function"]["name"],
405
+ arguments=arguments,
406
+ )
407
+ )
408
+
409
+ return tool_calls
410
+
411
+ def extract_thinking(self, response: LLMResponse) -> Optional[str]:
412
+ """Extract thinking/reasoning content from LLMResponse.
413
+
414
+ With the new format, thinking is already extracted during response conversion.
415
+ """
416
+ return response.thinking
417
+
418
+ def format_tool_results(self, results: List[ToolResult]) -> Union[LLMMessage, List[LLMMessage]]:
419
+ """Format tool results for LiteLLM in OpenAI format.
420
+
421
+ Returns a list of tool messages, one per result. This is the standard
422
+ OpenAI format that LiteLLM expects for tool responses.
423
+
424
+ Args:
425
+ results: List of ToolResult objects
426
+
427
+ Returns:
428
+ List of LLMMessages with role="tool"
429
+ """
430
+ return [
431
+ LLMMessage(
432
+ role="tool",
433
+ content=result.content,
434
+ tool_call_id=result.tool_call_id,
435
+ name=result.name if hasattr(result, "name") else None,
436
+ )
437
+ for result in results
438
+ ]
439
+
440
+ @property
441
+ def supports_tools(self) -> bool:
442
+ """Most LiteLLM providers support tool calling."""
443
+ # Most providers support tools, return True by default
444
+ # LiteLLM will handle unsupported cases gracefully
445
+ return True
446
+
447
+ @property
448
+ def provider_name(self) -> str:
449
+ """Name of the LLM provider."""
450
+ return self.provider.upper()
llm/message_types.py ADDED
@@ -0,0 +1,245 @@
1
+ """Unified message types for LLM interface using LiteLLM/OpenAI standard format.
2
+
3
+ This module defines the standard message formats used throughout the codebase.
4
+ All types follow the OpenAI/LiteLLM format for consistency and serialization.
5
+ """
6
+
7
+ from dataclasses import dataclass
8
+ from typing import Any, Dict, List, Literal, Optional
9
+
10
+ from typing_extensions import TypedDict
11
+
12
+ # =============================================================================
13
+ # Tool Call Types (OpenAI Standard)
14
+ # =============================================================================
15
+
16
+
17
+ class FunctionCall(TypedDict):
18
+ """Function call details within a tool call."""
19
+
20
+ name: str
21
+ arguments: str # JSON string
22
+
23
+
24
+ class ToolCallBlock(TypedDict):
25
+ """A single tool call in OpenAI format."""
26
+
27
+ id: str
28
+ type: Literal["function"]
29
+ function: FunctionCall
30
+
31
+
32
+ # =============================================================================
33
+ # Stop Reason Constants
34
+ # =============================================================================
35
+
36
+
37
+ class StopReason:
38
+ """Standard stop reason constants (OpenAI format)."""
39
+
40
+ STOP = "stop" # Normal completion
41
+ TOOL_CALLS = "tool_calls" # Model wants to call tools
42
+ LENGTH = "length" # Max tokens reached
43
+
44
+ # Aliases for backward compatibility (Anthropic format)
45
+ _ALIASES = {
46
+ "end_turn": "stop",
47
+ "tool_use": "tool_calls",
48
+ "max_tokens": "length",
49
+ }
50
+
51
+ @classmethod
52
+ def normalize(cls, reason: str) -> str:
53
+ """Normalize a stop reason to standard format.
54
+
55
+ Args:
56
+ reason: Stop reason (may be in Anthropic or OpenAI format)
57
+
58
+ Returns:
59
+ Normalized stop reason in OpenAI format
60
+ """
61
+ return cls._ALIASES.get(reason, reason)
62
+
63
+
64
+ # =============================================================================
65
+ # LLM Message (OpenAI Standard)
66
+ # =============================================================================
67
+
68
+
69
+ @dataclass
70
+ class LLMMessage:
71
+ """Unified message format across all LLM providers.
72
+
73
+ Follows OpenAI/LiteLLM format:
74
+ - role: "system", "user", "assistant", or "tool"
75
+ - content: Text content (str or None for assistant with tool_calls)
76
+ - tool_calls: For assistant role, list of tool calls
77
+ - tool_call_id: For tool role, ID of the tool call this responds to
78
+ - name: For tool role, name of the tool
79
+
80
+ This class is fully JSON-serializable via to_dict()/from_dict().
81
+ """
82
+
83
+ role: Literal["system", "user", "assistant", "tool"]
84
+ content: Optional[str] = None
85
+ tool_calls: Optional[List[ToolCallBlock]] = None
86
+ tool_call_id: Optional[str] = None # For tool role
87
+ name: Optional[str] = None # Tool name (for tool role)
88
+
89
+ def to_dict(self) -> Dict[str, Any]:
90
+ """Convert to dictionary for serialization and API calls.
91
+
92
+ Returns:
93
+ Dict representation in OpenAI format
94
+ """
95
+ result: Dict[str, Any] = {"role": self.role}
96
+
97
+ if self.content is not None:
98
+ result["content"] = self.content
99
+
100
+ if self.tool_calls:
101
+ result["tool_calls"] = self.tool_calls
102
+
103
+ if self.tool_call_id:
104
+ result["tool_call_id"] = self.tool_call_id
105
+
106
+ if self.name:
107
+ result["name"] = self.name
108
+
109
+ return result
110
+
111
+ @classmethod
112
+ def from_dict(cls, data: Dict[str, Any]) -> "LLMMessage":
113
+ """Create LLMMessage from dictionary.
114
+
115
+ Args:
116
+ data: Dictionary with message data
117
+
118
+ Returns:
119
+ LLMMessage instance
120
+ """
121
+ return cls(
122
+ role=data["role"],
123
+ content=data.get("content"),
124
+ tool_calls=data.get("tool_calls"),
125
+ tool_call_id=data.get("tool_call_id"),
126
+ name=data.get("name"),
127
+ )
128
+
129
+ def has_tool_calls(self) -> bool:
130
+ """Check if message contains tool calls."""
131
+ return bool(self.tool_calls)
132
+
133
+ def is_tool_response(self) -> bool:
134
+ """Check if this is a tool response message."""
135
+ return self.role == "tool" and self.tool_call_id is not None
136
+
137
+
138
+ # =============================================================================
139
+ # LLM Response (Normalized, No Raw Objects)
140
+ # =============================================================================
141
+
142
+
143
+ @dataclass
144
+ class LLMResponse:
145
+ """Unified response format across all LLM providers.
146
+
147
+ This class stores normalized data, NOT raw provider objects.
148
+ All fields are JSON-serializable.
149
+
150
+ Attributes:
151
+ content: Text content from the response (None if only tool calls)
152
+ tool_calls: List of tool calls in OpenAI format
153
+ stop_reason: Normalized stop reason (StopReason constants)
154
+ usage: Token usage dict {"input_tokens": int, "output_tokens": int}
155
+ thinking: Thinking/reasoning content (for models that support it)
156
+ """
157
+
158
+ content: Optional[str] = None
159
+ tool_calls: Optional[List[ToolCallBlock]] = None
160
+ stop_reason: str = StopReason.STOP
161
+ usage: Optional[Dict[str, int]] = None
162
+ thinking: Optional[str] = None
163
+
164
+ def to_message(self) -> LLMMessage:
165
+ """Convert response to an LLMMessage for storing in conversation history.
166
+
167
+ Returns:
168
+ LLMMessage with role="assistant"
169
+ """
170
+ return LLMMessage(
171
+ role="assistant",
172
+ content=self.content,
173
+ tool_calls=self.tool_calls,
174
+ )
175
+
176
+ def has_tool_calls(self) -> bool:
177
+ """Check if response contains tool calls."""
178
+ return bool(self.tool_calls)
179
+
180
+ def to_dict(self) -> Dict[str, Any]:
181
+ """Convert to dictionary for serialization.
182
+
183
+ Returns:
184
+ Dict representation
185
+ """
186
+ result: Dict[str, Any] = {
187
+ "stop_reason": self.stop_reason,
188
+ }
189
+
190
+ if self.content is not None:
191
+ result["content"] = self.content
192
+
193
+ if self.tool_calls:
194
+ result["tool_calls"] = self.tool_calls
195
+
196
+ if self.usage:
197
+ result["usage"] = self.usage
198
+
199
+ if self.thinking:
200
+ result["thinking"] = self.thinking
201
+
202
+ return result
203
+
204
+
205
+ # =============================================================================
206
+ # Tool Call and Result Types
207
+ # =============================================================================
208
+
209
+
210
+ @dataclass
211
+ class ToolCall:
212
+ """Parsed tool call for execution.
213
+
214
+ This is used after extracting tool calls from the response,
215
+ with arguments already parsed from JSON string to dict.
216
+ """
217
+
218
+ id: str
219
+ name: str
220
+ arguments: Dict[str, Any]
221
+
222
+
223
+ @dataclass
224
+ class ToolResult:
225
+ """Result from tool execution.
226
+
227
+ Used to format tool results back to the LLM.
228
+ """
229
+
230
+ tool_call_id: str
231
+ content: str
232
+ name: Optional[str] = None # Tool name (optional but recommended)
233
+
234
+ def to_message(self) -> LLMMessage:
235
+ """Convert to LLMMessage for conversation history.
236
+
237
+ Returns:
238
+ LLMMessage with role="tool"
239
+ """
240
+ return LLMMessage(
241
+ role="tool",
242
+ content=self.content,
243
+ tool_call_id=self.tool_call_id,
244
+ name=self.name,
245
+ )