aloop 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aloop might be problematic. Click here for more details.

Files changed (62) hide show
  1. agent/__init__.py +0 -0
  2. agent/agent.py +182 -0
  3. agent/base.py +406 -0
  4. agent/context.py +126 -0
  5. agent/todo.py +149 -0
  6. agent/tool_executor.py +54 -0
  7. agent/verification.py +135 -0
  8. aloop-0.1.0.dist-info/METADATA +246 -0
  9. aloop-0.1.0.dist-info/RECORD +62 -0
  10. aloop-0.1.0.dist-info/WHEEL +5 -0
  11. aloop-0.1.0.dist-info/entry_points.txt +2 -0
  12. aloop-0.1.0.dist-info/licenses/LICENSE +21 -0
  13. aloop-0.1.0.dist-info/top_level.txt +9 -0
  14. cli.py +19 -0
  15. config.py +146 -0
  16. interactive.py +865 -0
  17. llm/__init__.py +51 -0
  18. llm/base.py +26 -0
  19. llm/compat.py +226 -0
  20. llm/content_utils.py +309 -0
  21. llm/litellm_adapter.py +450 -0
  22. llm/message_types.py +245 -0
  23. llm/model_manager.py +265 -0
  24. llm/retry.py +95 -0
  25. main.py +246 -0
  26. memory/__init__.py +20 -0
  27. memory/compressor.py +554 -0
  28. memory/manager.py +538 -0
  29. memory/serialization.py +82 -0
  30. memory/short_term.py +88 -0
  31. memory/token_tracker.py +203 -0
  32. memory/types.py +51 -0
  33. tools/__init__.py +6 -0
  34. tools/advanced_file_ops.py +557 -0
  35. tools/base.py +51 -0
  36. tools/calculator.py +50 -0
  37. tools/code_navigator.py +975 -0
  38. tools/explore.py +254 -0
  39. tools/file_ops.py +150 -0
  40. tools/git_tools.py +791 -0
  41. tools/notify.py +69 -0
  42. tools/parallel_execute.py +420 -0
  43. tools/session_manager.py +205 -0
  44. tools/shell.py +147 -0
  45. tools/shell_background.py +470 -0
  46. tools/smart_edit.py +491 -0
  47. tools/todo.py +130 -0
  48. tools/web_fetch.py +673 -0
  49. tools/web_search.py +61 -0
  50. utils/__init__.py +15 -0
  51. utils/logger.py +105 -0
  52. utils/model_pricing.py +49 -0
  53. utils/runtime.py +75 -0
  54. utils/terminal_ui.py +422 -0
  55. utils/tui/__init__.py +39 -0
  56. utils/tui/command_registry.py +49 -0
  57. utils/tui/components.py +306 -0
  58. utils/tui/input_handler.py +393 -0
  59. utils/tui/model_ui.py +204 -0
  60. utils/tui/progress.py +292 -0
  61. utils/tui/status_bar.py +178 -0
  62. utils/tui/theme.py +165 -0
llm/__init__.py ADDED
@@ -0,0 +1,51 @@
1
+ """LLM module - LiteLLM adapter for unified access to 100+ providers."""
2
+
3
+ # Import new types from message_types (primary source)
4
+ # Import compatibility utilities
5
+ from .compat import ensure_new_format, migrate_messages, normalize_stop_reason
6
+
7
+ # Import utilities
8
+ from .content_utils import (
9
+ extract_text,
10
+ extract_text_from_message,
11
+ extract_tool_calls_from_content,
12
+ message_to_dict,
13
+ )
14
+
15
+ # Import adapter
16
+ from .litellm_adapter import LiteLLMAdapter
17
+ from .message_types import (
18
+ FunctionCall,
19
+ LLMMessage,
20
+ LLMResponse,
21
+ StopReason,
22
+ ToolCall,
23
+ ToolCallBlock,
24
+ ToolResult,
25
+ )
26
+ from .model_manager import ModelManager, ModelProfile
27
+
28
+ __all__ = [
29
+ # Core types
30
+ "LLMMessage",
31
+ "LLMResponse",
32
+ "ToolCall",
33
+ "ToolResult",
34
+ "ToolCallBlock",
35
+ "FunctionCall",
36
+ "StopReason",
37
+ # Adapter
38
+ "LiteLLMAdapter",
39
+ # Model Manager
40
+ "ModelManager",
41
+ "ModelProfile",
42
+ # Utilities
43
+ "extract_text",
44
+ "extract_text_from_message",
45
+ "extract_tool_calls_from_content",
46
+ "message_to_dict",
47
+ # Compatibility
48
+ "ensure_new_format",
49
+ "migrate_messages",
50
+ "normalize_stop_reason",
51
+ ]
llm/base.py ADDED
@@ -0,0 +1,26 @@
1
+ """Base data structures for LLM interface.
2
+
3
+ This module re-exports types from message_types.py for backward compatibility.
4
+ New code should import from llm.message_types or llm directly.
5
+ """
6
+
7
+ # Re-export all types from message_types for backward compatibility
8
+ from .message_types import (
9
+ FunctionCall,
10
+ LLMMessage,
11
+ LLMResponse,
12
+ StopReason,
13
+ ToolCall,
14
+ ToolCallBlock,
15
+ ToolResult,
16
+ )
17
+
18
+ __all__ = [
19
+ "LLMMessage",
20
+ "LLMResponse",
21
+ "ToolCall",
22
+ "ToolResult",
23
+ "ToolCallBlock",
24
+ "FunctionCall",
25
+ "StopReason",
26
+ ]
llm/compat.py ADDED
@@ -0,0 +1,226 @@
1
+ """Backward compatibility layer for message format migration.
2
+
3
+ This module provides utilities for converting between old and new message formats,
4
+ allowing gradual migration while maintaining backward compatibility.
5
+ """
6
+
7
+ from typing import Any, Dict, List, Union
8
+
9
+ from .content_utils import extract_text, extract_tool_calls_from_content
10
+ from .message_types import LLMMessage, StopReason
11
+
12
+
13
+ def ensure_new_format(message: Any) -> LLMMessage:
14
+ """Convert any message format to the new LLMMessage format.
15
+
16
+ Handles:
17
+ - New LLMMessage instances (passed through)
18
+ - Old LLMMessage with complex content
19
+ - Dict representations
20
+ - Raw message objects from providers
21
+
22
+ Args:
23
+ message: Message in any supported format
24
+
25
+ Returns:
26
+ LLMMessage in new format
27
+ """
28
+ # Already new format
29
+ if isinstance(message, LLMMessage):
30
+ # Check if it has the new attributes (tool_calls, tool_call_id)
31
+ if hasattr(message, "tool_calls"):
32
+ return message
33
+ # Old format LLMMessage - convert
34
+ return _convert_old_llm_message(message)
35
+
36
+ # Dict format
37
+ if isinstance(message, dict):
38
+ return LLMMessage.from_dict(message)
39
+
40
+ # Raw message object (from provider)
41
+ return _convert_raw_message(message)
42
+
43
+
44
+ def _convert_old_llm_message(message: LLMMessage) -> LLMMessage:
45
+ """Convert old-format LLMMessage to new format.
46
+
47
+ Old format:
48
+ - role: str
49
+ - content: Any (could be str, list of blocks, or Message object)
50
+
51
+ New format:
52
+ - role: Literal[...]
53
+ - content: Optional[str]
54
+ - tool_calls: Optional[List[ToolCallBlock]]
55
+ - tool_call_id: Optional[str]
56
+ - name: Optional[str]
57
+
58
+ Args:
59
+ message: Old-format LLMMessage
60
+
61
+ Returns:
62
+ New-format LLMMessage
63
+ """
64
+ role = message.role
65
+ content = message.content
66
+
67
+ # Extract text content
68
+ text_content = extract_text(content) if content else None
69
+
70
+ # For empty string, use None
71
+ if text_content == "":
72
+ text_content = None
73
+
74
+ # Extract tool calls if present
75
+ tool_calls = extract_tool_calls_from_content(content) if content else None
76
+
77
+ # Handle tool result messages (old Anthropic format)
78
+ tool_call_id = None
79
+ name = None
80
+
81
+ if isinstance(content, list):
82
+ for block in content:
83
+ if isinstance(block, dict) and block.get("type") == "tool_result":
84
+ tool_call_id = block.get("tool_use_id")
85
+ # In old format, tool results don't have name
86
+ # Try to extract from content if possible
87
+ text_content = block.get("content", "")
88
+ role = "tool" # Convert to OpenAI tool role
89
+ break
90
+
91
+ return LLMMessage(
92
+ role=role, # type: ignore
93
+ content=text_content,
94
+ tool_calls=tool_calls if tool_calls else None,
95
+ tool_call_id=tool_call_id,
96
+ name=name,
97
+ )
98
+
99
+
100
+ def _convert_raw_message(message: Any) -> LLMMessage:
101
+ """Convert a raw message object from provider to LLMMessage.
102
+
103
+ Args:
104
+ message: Raw message object (e.g., from LiteLLM response)
105
+
106
+ Returns:
107
+ LLMMessage
108
+ """
109
+ role = getattr(message, "role", "assistant")
110
+ content = getattr(message, "content", None)
111
+
112
+ # Extract text
113
+ text_content = extract_text(content) if content else None
114
+ if text_content == "":
115
+ text_content = None
116
+
117
+ # Extract tool calls
118
+ tool_calls = None
119
+ if hasattr(message, "tool_calls") and message.tool_calls:
120
+ tool_calls = extract_tool_calls_from_content(message)
121
+
122
+ return LLMMessage(
123
+ role=role, # type: ignore
124
+ content=text_content,
125
+ tool_calls=tool_calls,
126
+ )
127
+
128
+
129
+ def normalize_stop_reason(reason: str) -> str:
130
+ """Normalize stop reason to OpenAI format.
131
+
132
+ Args:
133
+ reason: Stop reason in any format (Anthropic or OpenAI)
134
+
135
+ Returns:
136
+ Normalized stop reason
137
+ """
138
+ return StopReason.normalize(reason)
139
+
140
+
141
+ def convert_tool_results_to_messages(
142
+ results: List[Dict[str, Any]],
143
+ ) -> List[LLMMessage]:
144
+ """Convert tool results from old Anthropic format to new message format.
145
+
146
+ Old format (single message with list of tool_result blocks):
147
+ LLMMessage(role="user", content=[
148
+ {"type": "tool_result", "tool_use_id": "...", "content": "..."},
149
+ ...
150
+ ])
151
+
152
+ New format (one message per tool result):
153
+ [
154
+ LLMMessage(role="tool", content="...", tool_call_id="...", name="..."),
155
+ ...
156
+ ]
157
+
158
+ Args:
159
+ results: List of tool result dicts in Anthropic format
160
+
161
+ Returns:
162
+ List of LLMMessages in OpenAI tool format
163
+ """
164
+ return [
165
+ LLMMessage(
166
+ role="tool",
167
+ content=result.get("content", ""),
168
+ tool_call_id=result.get("tool_use_id", ""),
169
+ name=result.get("name"),
170
+ )
171
+ for result in results
172
+ if result.get("type") == "tool_result"
173
+ ]
174
+
175
+
176
+ def format_tool_results_for_api(
177
+ results: List[Dict[str, Any]], use_openai_format: bool = True
178
+ ) -> Union[LLMMessage, List[LLMMessage]]:
179
+ """Format tool results for API call.
180
+
181
+ Args:
182
+ results: List of tool result dicts
183
+ use_openai_format: If True, return list of tool messages (OpenAI format)
184
+ If False, return single user message (Anthropic format)
185
+
186
+ Returns:
187
+ Formatted message(s)
188
+ """
189
+ if use_openai_format:
190
+ return convert_tool_results_to_messages(results)
191
+ else:
192
+ # Old Anthropic format
193
+ return LLMMessage(role="user", content=results) # type: ignore
194
+
195
+
196
+ def is_new_format_message(message: LLMMessage) -> bool:
197
+ """Check if a message is in the new format.
198
+
199
+ New format messages have:
200
+ - content as Optional[str] (not complex types)
201
+ - tool_calls as Optional[List[ToolCallBlock]]
202
+
203
+ Args:
204
+ message: LLMMessage to check
205
+
206
+ Returns:
207
+ True if new format
208
+ """
209
+ # Check if content is simple (str or None)
210
+ if message.content is not None and not isinstance(message.content, str):
211
+ return False
212
+
213
+ # New format messages have tool_calls attribute
214
+ return hasattr(message, "tool_calls")
215
+
216
+
217
+ def migrate_messages(messages: List[LLMMessage]) -> List[LLMMessage]:
218
+ """Migrate a list of messages to new format.
219
+
220
+ Args:
221
+ messages: List of messages in any format
222
+
223
+ Returns:
224
+ List of messages in new format
225
+ """
226
+ return [ensure_new_format(msg) for msg in messages]
llm/content_utils.py ADDED
@@ -0,0 +1,309 @@
1
+ """Centralized content extraction utilities for LLM messages.
2
+
3
+ This module consolidates all content extraction logic that was previously
4
+ scattered across litellm_adapter.py, compressor.py, and token_tracker.py.
5
+ """
6
+
7
+ from typing import Any, List, Optional
8
+
9
+ from .message_types import LLMMessage, ToolCallBlock
10
+
11
+
12
+ def extract_text(content: Any) -> str:
13
+ """Extract text content from any message format.
14
+
15
+ Handles:
16
+ - String content
17
+ - Message objects with .content attribute
18
+ - List of content blocks (Anthropic format)
19
+ - Dict content blocks
20
+
21
+ Args:
22
+ content: Content in any supported format
23
+
24
+ Returns:
25
+ Extracted text as string
26
+ """
27
+ if content is None:
28
+ return ""
29
+
30
+ if isinstance(content, str):
31
+ return content
32
+
33
+ # Handle Message objects (from previous LLM responses)
34
+ if hasattr(content, "content"):
35
+ return extract_text(content.content)
36
+
37
+ # Handle list of content blocks
38
+ if isinstance(content, list):
39
+ texts = []
40
+ for block in content:
41
+ text = _extract_text_from_block(block)
42
+ if text:
43
+ texts.append(text)
44
+ return "\n".join(texts) if texts else ""
45
+
46
+ # Handle dict content block
47
+ if isinstance(content, dict):
48
+ return _extract_text_from_block(content)
49
+
50
+ # Fallback: convert to string
51
+ return str(content)
52
+
53
+
54
+ def _extract_text_from_block(block: Any) -> str:
55
+ """Extract text from a single content block.
56
+
57
+ Args:
58
+ block: Content block (dict or object)
59
+
60
+ Returns:
61
+ Text content or empty string
62
+ """
63
+ # Handle dict format
64
+ if isinstance(block, dict):
65
+ if block.get("type") == "text":
66
+ return block.get("text", "")
67
+ if "text" in block:
68
+ return block["text"]
69
+ # For tool_use/tool_result, don't include in text extraction
70
+ if block.get("type") in ("tool_use", "tool_result"):
71
+ return ""
72
+ return ""
73
+
74
+ # Handle object format (ContentBlock from Anthropic SDK)
75
+ if hasattr(block, "text"):
76
+ return block.text
77
+
78
+ if hasattr(block, "type") and block.type == "text":
79
+ return getattr(block, "text", "")
80
+
81
+ return ""
82
+
83
+
84
+ def extract_text_from_message(message: LLMMessage) -> str:
85
+ """Extract text content from an LLMMessage.
86
+
87
+ For new-format messages, returns the content directly.
88
+ For old-format messages with complex content, extracts text.
89
+
90
+ Args:
91
+ message: LLMMessage instance
92
+
93
+ Returns:
94
+ Text content
95
+ """
96
+ if message.content is None:
97
+ return ""
98
+
99
+ if isinstance(message.content, str):
100
+ return message.content
101
+
102
+ # Handle legacy complex content
103
+ return extract_text(message.content)
104
+
105
+
106
+ def extract_tool_calls_from_content(content: Any) -> List[ToolCallBlock]:
107
+ """Extract tool calls from message content.
108
+
109
+ Handles both OpenAI format (tool_calls field) and Anthropic format
110
+ (tool_use blocks in content).
111
+
112
+ Args:
113
+ content: Message content in any format
114
+
115
+ Returns:
116
+ List of tool calls in OpenAI/LiteLLM format
117
+ """
118
+ tool_calls: List[ToolCallBlock] = []
119
+
120
+ # Handle Message objects
121
+ if hasattr(content, "tool_calls") and content.tool_calls:
122
+ for tc in content.tool_calls:
123
+ tool_call = _normalize_tool_call(tc)
124
+ if tool_call:
125
+ tool_calls.append(tool_call)
126
+ return tool_calls
127
+
128
+ # Handle list of content blocks (Anthropic format)
129
+ if isinstance(content, list):
130
+ for block in content:
131
+ if (
132
+ isinstance(block, dict)
133
+ and block.get("type") == "tool_use"
134
+ or hasattr(block, "type")
135
+ and block.type == "tool_use"
136
+ ):
137
+ tool_call = _anthropic_to_openai_tool_call(block)
138
+ if tool_call:
139
+ tool_calls.append(tool_call)
140
+
141
+ return tool_calls
142
+
143
+
144
+ def _normalize_tool_call(tc: Any) -> Optional[ToolCallBlock]:
145
+ """Normalize a tool call to OpenAI format.
146
+
147
+ Args:
148
+ tc: Tool call in any format
149
+
150
+ Returns:
151
+ ToolCallBlock in OpenAI format or None
152
+ """
153
+ import json
154
+
155
+ # Already in OpenAI dict format
156
+ if isinstance(tc, dict):
157
+ if "function" in tc:
158
+ return tc # type: ignore
159
+ # Anthropic format in dict
160
+ if tc.get("type") == "tool_use":
161
+ return _anthropic_to_openai_tool_call(tc)
162
+
163
+ # OpenAI object format (from LiteLLM)
164
+ if hasattr(tc, "function") and hasattr(tc, "id"):
165
+ arguments = tc.function.arguments
166
+ if not isinstance(arguments, str):
167
+ arguments = json.dumps(arguments)
168
+
169
+ return {
170
+ "id": tc.id,
171
+ "type": "function",
172
+ "function": {
173
+ "name": tc.function.name,
174
+ "arguments": arguments,
175
+ },
176
+ }
177
+
178
+ # Anthropic object format
179
+ if hasattr(tc, "type") and tc.type == "tool_use":
180
+ return _anthropic_to_openai_tool_call(tc)
181
+
182
+ return None
183
+
184
+
185
+ def _anthropic_to_openai_tool_call(block: Any) -> Optional[ToolCallBlock]:
186
+ """Convert Anthropic tool_use block to OpenAI format.
187
+
188
+ Args:
189
+ block: Anthropic tool_use block (dict or object)
190
+
191
+ Returns:
192
+ ToolCallBlock in OpenAI format or None
193
+ """
194
+ import json
195
+
196
+ if isinstance(block, dict):
197
+ tool_id = block.get("id", "")
198
+ name = block.get("name", "")
199
+ input_data = block.get("input", {})
200
+ else:
201
+ tool_id = getattr(block, "id", "")
202
+ name = getattr(block, "name", "")
203
+ input_data = getattr(block, "input", {})
204
+
205
+ if not tool_id or not name:
206
+ return None
207
+
208
+ # Convert input to JSON string
209
+ arguments = input_data if isinstance(input_data, str) else json.dumps(input_data)
210
+
211
+ return {
212
+ "id": tool_id,
213
+ "type": "function",
214
+ "function": {
215
+ "name": name,
216
+ "arguments": arguments,
217
+ },
218
+ }
219
+
220
+
221
+ def message_to_dict(message: LLMMessage) -> dict:
222
+ """Convert LLMMessage to dictionary for API calls.
223
+
224
+ This is a convenience wrapper around message.to_dict() that also
225
+ handles legacy message formats.
226
+
227
+ Args:
228
+ message: LLMMessage instance
229
+
230
+ Returns:
231
+ Dictionary in OpenAI format
232
+ """
233
+ # Use the new to_dict if available
234
+ if hasattr(message, "to_dict"):
235
+ return message.to_dict()
236
+
237
+ # Legacy format
238
+ result = {"role": message.role}
239
+
240
+ if message.content is not None:
241
+ if isinstance(message.content, str):
242
+ result["content"] = message.content
243
+ else:
244
+ # Extract text from complex content
245
+ result["content"] = extract_text(message.content)
246
+
247
+ return result
248
+
249
+
250
+ def content_has_tool_calls(content: Any) -> bool:
251
+ """Check if content contains tool calls.
252
+
253
+ Args:
254
+ content: Message content in any format
255
+
256
+ Returns:
257
+ True if contains tool calls
258
+ """
259
+ # Check for tool_calls field
260
+ if hasattr(content, "tool_calls") and content.tool_calls:
261
+ return True
262
+
263
+ # Check for tool_use blocks in list
264
+ if isinstance(content, list):
265
+ for block in content:
266
+ if isinstance(block, dict) and block.get("type") in ("tool_use", "tool_calls"):
267
+ return True
268
+ if hasattr(block, "type") and block.type in ("tool_use", "tool_calls"):
269
+ return True
270
+
271
+ return False
272
+
273
+
274
+ def content_has_tool_results(content: Any) -> bool:
275
+ """Check if content contains tool results.
276
+
277
+ Args:
278
+ content: Message content in any format
279
+
280
+ Returns:
281
+ True if contains tool results
282
+ """
283
+ if isinstance(content, list):
284
+ for block in content:
285
+ if isinstance(block, dict):
286
+ if block.get("type") == "tool_result":
287
+ return True
288
+ elif hasattr(block, "type") and block.type == "tool_result":
289
+ return True
290
+
291
+ return False
292
+
293
+
294
+ def estimate_tokens(content: Any) -> int:
295
+ """Estimate token count for content.
296
+
297
+ Uses a simple character-based estimation:
298
+ ~3.5 characters per token for mixed content.
299
+
300
+ Args:
301
+ content: Content to estimate
302
+
303
+ Returns:
304
+ Estimated token count
305
+ """
306
+ text = extract_text(content)
307
+ if not text:
308
+ return 0
309
+ return max(1, int(len(text) / 3.5))