aloop 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. agent/__init__.py +0 -0
  2. agent/agent.py +182 -0
  3. agent/base.py +406 -0
  4. agent/context.py +126 -0
  5. agent/prompts/__init__.py +1 -0
  6. agent/todo.py +149 -0
  7. agent/tool_executor.py +54 -0
  8. agent/verification.py +135 -0
  9. aloop-0.1.1.dist-info/METADATA +252 -0
  10. aloop-0.1.1.dist-info/RECORD +66 -0
  11. aloop-0.1.1.dist-info/WHEEL +5 -0
  12. aloop-0.1.1.dist-info/entry_points.txt +2 -0
  13. aloop-0.1.1.dist-info/licenses/LICENSE +21 -0
  14. aloop-0.1.1.dist-info/top_level.txt +9 -0
  15. cli.py +19 -0
  16. config.py +146 -0
  17. interactive.py +865 -0
  18. llm/__init__.py +51 -0
  19. llm/base.py +26 -0
  20. llm/compat.py +226 -0
  21. llm/content_utils.py +309 -0
  22. llm/litellm_adapter.py +450 -0
  23. llm/message_types.py +245 -0
  24. llm/model_manager.py +265 -0
  25. llm/retry.py +95 -0
  26. main.py +246 -0
  27. memory/__init__.py +20 -0
  28. memory/compressor.py +554 -0
  29. memory/manager.py +538 -0
  30. memory/serialization.py +82 -0
  31. memory/short_term.py +88 -0
  32. memory/store/__init__.py +6 -0
  33. memory/store/memory_store.py +100 -0
  34. memory/store/yaml_file_memory_store.py +414 -0
  35. memory/token_tracker.py +203 -0
  36. memory/types.py +51 -0
  37. tools/__init__.py +6 -0
  38. tools/advanced_file_ops.py +557 -0
  39. tools/base.py +51 -0
  40. tools/calculator.py +50 -0
  41. tools/code_navigator.py +975 -0
  42. tools/explore.py +254 -0
  43. tools/file_ops.py +150 -0
  44. tools/git_tools.py +791 -0
  45. tools/notify.py +69 -0
  46. tools/parallel_execute.py +420 -0
  47. tools/session_manager.py +205 -0
  48. tools/shell.py +147 -0
  49. tools/shell_background.py +470 -0
  50. tools/smart_edit.py +491 -0
  51. tools/todo.py +130 -0
  52. tools/web_fetch.py +673 -0
  53. tools/web_search.py +61 -0
  54. utils/__init__.py +15 -0
  55. utils/logger.py +105 -0
  56. utils/model_pricing.py +49 -0
  57. utils/runtime.py +75 -0
  58. utils/terminal_ui.py +422 -0
  59. utils/tui/__init__.py +39 -0
  60. utils/tui/command_registry.py +49 -0
  61. utils/tui/components.py +306 -0
  62. utils/tui/input_handler.py +393 -0
  63. utils/tui/model_ui.py +204 -0
  64. utils/tui/progress.py +292 -0
  65. utils/tui/status_bar.py +178 -0
  66. utils/tui/theme.py +165 -0
agent/__init__.py ADDED
File without changes
agent/agent.py ADDED
@@ -0,0 +1,182 @@
1
+ """Loop agent implementation."""
2
+
3
+ from config import Config
4
+ from llm import LLMMessage
5
+ from utils import terminal_ui
6
+
7
+ from .base import BaseAgent
8
+ from .context import format_context_prompt
9
+
10
+
11
+ class LoopAgent(BaseAgent):
12
+ """Primary agent implementation — one unified loop for all tasks."""
13
+
14
+ SYSTEM_PROMPT = """<role>
15
+ You are a helpful AI assistant that uses tools to accomplish tasks efficiently and reliably.
16
+ </role>
17
+
18
+ <critical_rules>
19
+ IMPORTANT: Always think before acting
20
+ IMPORTANT: Use the most efficient tool for each operation
21
+ IMPORTANT: Manage todo lists for complex multi-step tasks
22
+ IMPORTANT: Mark tasks completed IMMEDIATELY after finishing them
23
+ </critical_rules>
24
+
25
+ <task_management>
26
+ Use the manage_todo_list tool for complex tasks to prevent forgetting steps.
27
+
28
+ WHEN TO USE TODO LISTS:
29
+ - Tasks with 3+ distinct steps
30
+ - Multi-file operations
31
+ - Complex workflows requiring planning
32
+ - Any task where tracking progress helps
33
+
34
+ TODO LIST RULES:
35
+ - Create todos BEFORE starting complex work
36
+ - Exactly ONE task must be in_progress at any time
37
+ - Mark tasks completed IMMEDIATELY after finishing
38
+ - Update status as you work through the list
39
+
40
+ <good_example>
41
+ User: Create a data pipeline that reads CSV, processes it, and generates report
42
+ Assistant: I'll use the todo list to track this multi-step task.
43
+ [Calls manage_todo_list with operation="add" for each step]
44
+ [Marks first task as in_progress before starting]
45
+ [Uses read_file tool]
46
+ [Marks as completed, moves to next task]
47
+ </good_example>
48
+
49
+ <bad_example>
50
+ User: Create a data pipeline that reads CSV, processes it, and generates report
51
+ Assistant: [Immediately starts without planning, forgets steps halfway through]
52
+ </bad_example>
53
+ </task_management>
54
+
55
+ <tool_usage_guidelines>
56
+ For file operations:
57
+ - Use glob_files to find files by pattern (fast, efficient)
58
+ - Use code_navigator to find function/class definitions (10x faster than grep, AST-based)
59
+ - Use grep_content for text search only (not for finding code structure)
60
+ - Use read_file only when you need full contents (avoid reading multiple large files at once)
61
+ - Use smart_edit for code edits (fuzzy match, auto backup, diff preview)
62
+ - Use edit_file for simple append/insert operations only
63
+ - Use write_file only for creating new files or complete rewrites
64
+
65
+ CRITICAL: Never read multiple large files in a single iteration - this causes context overflow!
66
+ Instead: Use code_navigator or grep_content to find specific information, then read only what you need.
67
+
68
+ For complex tasks:
69
+ - Use manage_todo_list to track progress
70
+ - Break into smaller, manageable steps
71
+ - Mark tasks completed as you go
72
+ - Keep exactly ONE task in_progress at a time
73
+
74
+ <good_example>
75
+ Task: Find all Python files that import 'requests'
76
+ Approach:
77
+ 1. Use glob_files with pattern "**/*.py" to find Python files
78
+ 2. Use grep_content with pattern "^import requests|^from requests" to search
79
+ Result: Efficient, minimal tokens used
80
+ </good_example>
81
+
82
+ <bad_example>
83
+ Task: Find all Python files that import 'requests'
84
+ Approach:
85
+ 1. Use read_file on every Python file one by one
86
+ 2. Manually search through content
87
+ Result: Wasteful, uses 100x more tokens
88
+ </bad_example>
89
+ </tool_usage_guidelines>
90
+
91
+ <workflow>
92
+ For each user request, follow this ReAct pattern:
93
+ 1. THINK: Analyze what's needed, choose best tools
94
+ 2. ACT: Execute with appropriate tools
95
+ 3. OBSERVE: Check results and learn from them
96
+ 4. REPEAT or COMPLETE: Continue the loop or provide final answer
97
+
98
+ When you have enough information, provide your final answer directly without using more tools.
99
+ </workflow>
100
+
101
+ <complex_task_strategy>
102
+ For complex tasks, combine tools to achieve an explore-plan-execute workflow:
103
+
104
+ 1. **EXPLORE**: Gather context before acting
105
+ - Use explore_context for parallel information gathering (code structure, web research)
106
+
107
+ 2. **PLAN**: Structure your approach
108
+ - Use manage_todo_list to break down the task into trackable steps
109
+ - Identify dependencies between steps
110
+
111
+ 3. **EXECUTE**: Carry out the plan
112
+ - Use parallel_execute for multiple independent/semi-dependent tasks
113
+ - Use regular tools for simple sequential operations
114
+
115
+ When to use each approach:
116
+ - Simple task (1-2 steps) → Use tools directly
117
+ - Medium task (3-5 steps) → Use todo list + sequential execution
118
+ - Complex task (needs research) → Explore → Plan → Execute
119
+ - Parallel workload → parallel_execute
120
+ </complex_task_strategy>"""
121
+
122
+ async def run(self, task: str, verify: bool = True) -> str:
123
+ """Execute ReAct loop until task is complete.
124
+
125
+ Args:
126
+ task: The task to complete
127
+ verify: If True, use ralph loop (outer verification). If False, use
128
+ plain react loop (suitable for interactive multi-turn sessions).
129
+
130
+ Returns:
131
+ Final answer as a string
132
+ """
133
+ # Build system message with context (only if not already in memory)
134
+ # This allows multi-turn conversations to reuse the same system message
135
+ if not self.memory.system_messages:
136
+ system_content = self.SYSTEM_PROMPT
137
+ try:
138
+ context = await format_context_prompt()
139
+ system_content = context + "\n" + system_content
140
+ except Exception:
141
+ # If context gathering fails, continue without it
142
+ pass
143
+
144
+ # Add system message only on first turn
145
+ await self.memory.add_message(LLMMessage(role="system", content=system_content))
146
+
147
+ # Add user task/message
148
+ await self.memory.add_message(LLMMessage(role="user", content=task))
149
+
150
+ tools = self.tool_executor.get_tool_schemas()
151
+
152
+ if verify:
153
+ # Use ralph loop (outer verification wrapping the inner ReAct loop)
154
+ result = await self._ralph_loop(
155
+ messages=[], # Not used when use_memory=True
156
+ tools=tools,
157
+ use_memory=True,
158
+ save_to_memory=True,
159
+ task=task,
160
+ max_iterations=Config.RALPH_LOOP_MAX_ITERATIONS,
161
+ )
162
+ else:
163
+ # Plain react loop without verification
164
+ result = await self._react_loop(
165
+ messages=[],
166
+ tools=tools,
167
+ use_memory=True,
168
+ save_to_memory=True,
169
+ task=task,
170
+ )
171
+
172
+ self._print_memory_stats()
173
+
174
+ # Save memory state to database after task completion
175
+ await self.memory.save_memory()
176
+
177
+ return result
178
+
179
+ def _print_memory_stats(self):
180
+ """Print memory usage statistics."""
181
+ stats = self.memory.get_stats()
182
+ terminal_ui.print_memory_stats(stats)
agent/base.py ADDED
@@ -0,0 +1,406 @@
1
+ """Base agent class for all agent types."""
2
+
3
+ from abc import ABC, abstractmethod
4
+ from typing import TYPE_CHECKING, List, Optional
5
+
6
+ from llm import LLMMessage, LLMResponse, StopReason, ToolResult
7
+ from memory import MemoryManager
8
+ from tools.base import BaseTool
9
+ from tools.todo import TodoTool
10
+ from utils import get_logger, terminal_ui
11
+ from utils.tui.progress import AsyncSpinner
12
+
13
+ from .todo import TodoList
14
+ from .tool_executor import ToolExecutor
15
+ from .verification import LLMVerifier, VerificationResult, Verifier
16
+
17
+ if TYPE_CHECKING:
18
+ from llm import LiteLLMAdapter, ModelManager
19
+
20
+ logger = get_logger(__name__)
21
+
22
+
23
+ class BaseAgent(ABC):
24
+ """Abstract base class for all agent types."""
25
+
26
+ def __init__(
27
+ self,
28
+ llm: "LiteLLMAdapter",
29
+ tools: List[BaseTool],
30
+ max_iterations: int = 10,
31
+ model_manager: Optional["ModelManager"] = None,
32
+ ):
33
+ """Initialize the agent.
34
+
35
+ Args:
36
+ llm: LLM instance to use
37
+ max_iterations: Maximum number of agent loop iterations
38
+ tools: List of tools available to the agent
39
+ model_manager: Optional model manager for switching models
40
+ """
41
+ self.llm = llm
42
+ self.max_iterations = max_iterations
43
+ self.model_manager = model_manager
44
+
45
+ # Initialize todo list system
46
+ self.todo_list = TodoList()
47
+
48
+ # Add todo tool to the tools list if enabled
49
+ tools = [] if tools is None else list(tools) # Make a copy to avoid modifying original
50
+
51
+ todo_tool = TodoTool(self.todo_list)
52
+ tools.append(todo_tool)
53
+
54
+ self.tool_executor = ToolExecutor(tools)
55
+
56
+ # Memory manager is fully owned by the agent
57
+ self.memory = MemoryManager(llm)
58
+ self.memory.set_todo_context_provider(self._get_todo_context)
59
+
60
+ async def load_session(self, session_id: str) -> None:
61
+ """Load a saved session into the agent's memory.
62
+
63
+ Args:
64
+ session_id: Session ID to load
65
+ """
66
+ self.memory = await MemoryManager.from_session(session_id, self.llm)
67
+ self.memory.set_todo_context_provider(self._get_todo_context)
68
+
69
+ def _set_llm_adapter(self, llm: "LiteLLMAdapter") -> None:
70
+ self.llm = llm
71
+
72
+ # Keep memory/compressor in sync with the active LLM.
73
+ # Otherwise stats/compression might continue using the previous model.
74
+ if hasattr(self, "memory") and self.memory:
75
+ self.memory.llm = llm
76
+ if hasattr(self.memory, "compressor") and self.memory.compressor:
77
+ self.memory.compressor.llm = llm
78
+
79
+ @abstractmethod
80
+ def run(self, task: str) -> str:
81
+ """Execute the agent on a task and return final answer."""
82
+ pass
83
+
84
+ async def _call_llm(
85
+ self,
86
+ messages: List[LLMMessage],
87
+ tools: Optional[List] = None,
88
+ spinner_message: str = "Thinking...",
89
+ **kwargs,
90
+ ) -> LLMResponse:
91
+ """Helper to call LLM with consistent parameters.
92
+
93
+ Args:
94
+ messages: List of conversation messages
95
+ tools: Optional list of tool schemas
96
+ spinner_message: Message to display with spinner
97
+ **kwargs: Additional LLM-specific parameters
98
+
99
+ Returns:
100
+ LLMResponse object
101
+ """
102
+ async with AsyncSpinner(terminal_ui.console, spinner_message):
103
+ return await self.llm.call_async(
104
+ messages=messages, tools=tools, max_tokens=4096, **kwargs
105
+ )
106
+
107
+ def _extract_text(self, response: LLMResponse) -> str:
108
+ """Extract text from LLM response.
109
+
110
+ Args:
111
+ response: LLMResponse object
112
+
113
+ Returns:
114
+ Extracted text
115
+ """
116
+ return self.llm.extract_text(response)
117
+
118
+ def _get_todo_context(self) -> Optional[str]:
119
+ """Get current todo list state for memory compression.
120
+
121
+ Returns formatted todo list if items exist, None otherwise.
122
+ This is used by MemoryManager to inject todo state into summaries.
123
+ """
124
+ items = self.todo_list.get_current()
125
+ if not items:
126
+ return None
127
+ return self.todo_list.format_list()
128
+
129
+ async def _react_loop(
130
+ self,
131
+ messages: List[LLMMessage],
132
+ tools: List,
133
+ use_memory: bool = True,
134
+ save_to_memory: bool = True,
135
+ task: str = "",
136
+ ) -> str:
137
+ """Execute a ReAct (Reasoning + Acting) loop.
138
+
139
+ This is a generic ReAct loop implementation that can be used by different agent types.
140
+ It supports both global memory-based context (for main agent loop) and local message
141
+ lists (for mini-loops within plan execution).
142
+
143
+ Args:
144
+ messages: Initial message list (ignored if use_memory=True)
145
+ tools: List of available tool schemas
146
+ use_memory: If True, use self.memory for context; if False, use local messages list
147
+ save_to_memory: If True, save messages to self.memory (only when use_memory=True)
148
+ task: Optional task description for context in tool result processing
149
+
150
+ Returns:
151
+ Final answer as a string
152
+ """
153
+ while True:
154
+ # Get context (either from memory or local messages)
155
+ context = self.memory.get_context_for_llm() if use_memory else messages
156
+
157
+ # Call LLM with tools
158
+ response = await self._call_llm(
159
+ messages=context,
160
+ tools=tools,
161
+ spinner_message="Analyzing request...",
162
+ )
163
+
164
+ # Save assistant response using response.to_message() for proper format
165
+ assistant_msg = response.to_message()
166
+ if use_memory:
167
+ if save_to_memory:
168
+ # Extract actual token usage from response
169
+ actual_tokens = None
170
+ if response.usage:
171
+ actual_tokens = {
172
+ "input": response.usage.get("input_tokens", 0),
173
+ "output": response.usage.get("output_tokens", 0),
174
+ }
175
+ await self.memory.add_message(assistant_msg, actual_tokens=actual_tokens)
176
+
177
+ # Log compression info if it happened
178
+ if self.memory.was_compressed_last_iteration:
179
+ logger.debug(
180
+ f"Memory compressed: saved {self.memory.last_compression_savings} tokens"
181
+ )
182
+ else:
183
+ # For local messages (mini-loop), still track token usage
184
+ if response.usage:
185
+ self.memory.token_tracker.add_input_tokens(
186
+ response.usage.get("input_tokens", 0)
187
+ )
188
+ self.memory.token_tracker.add_output_tokens(
189
+ response.usage.get("output_tokens", 0)
190
+ )
191
+ messages.append(assistant_msg)
192
+
193
+ # Print thinking/reasoning if available (for all responses)
194
+ if hasattr(self.llm, "extract_thinking"):
195
+ thinking = self.llm.extract_thinking(response)
196
+ if thinking:
197
+ terminal_ui.print_thinking(thinking)
198
+
199
+ # Check if we're done (no tool calls)
200
+ if response.stop_reason == StopReason.STOP:
201
+ final_answer = self._extract_text(response)
202
+ terminal_ui.console.print("\n[bold green]✓ Final answer received[/bold green]")
203
+ return final_answer
204
+
205
+ # Execute tool calls
206
+ if response.stop_reason == StopReason.TOOL_CALLS:
207
+ # Print assistant text content alongside tool calls
208
+ if response.content:
209
+ terminal_ui.print_assistant_message(response.content)
210
+
211
+ tool_calls = self.llm.extract_tool_calls(response)
212
+
213
+ if not tool_calls:
214
+ # No tool calls found, return response
215
+ final_answer = self._extract_text(response)
216
+ return final_answer if final_answer else "No response generated."
217
+
218
+ # Execute each tool call
219
+ tool_results = []
220
+ for tc in tool_calls:
221
+ terminal_ui.print_tool_call(tc.name, tc.arguments)
222
+
223
+ # Execute tool with spinner
224
+ async with AsyncSpinner(terminal_ui.console, f"Executing {tc.name}..."):
225
+ result = await self.tool_executor.execute_tool_call(tc.name, tc.arguments)
226
+ # Tool already handles size limits, no additional processing needed
227
+
228
+ terminal_ui.print_tool_result(result)
229
+
230
+ # Log result (truncated)
231
+ logger.debug(f"Tool result: {result[:200]}{'...' if len(result) > 200 else ''}")
232
+
233
+ tool_results.append(
234
+ ToolResult(tool_call_id=tc.id, content=result, name=tc.name)
235
+ )
236
+
237
+ # Format tool results and add to context
238
+ # format_tool_results now returns a list of tool messages (OpenAI format)
239
+ result_messages = self.llm.format_tool_results(tool_results)
240
+ if isinstance(result_messages, list):
241
+ for msg in result_messages:
242
+ if use_memory and save_to_memory:
243
+ await self.memory.add_message(msg)
244
+ else:
245
+ messages.append(msg)
246
+ else:
247
+ # Backward compatibility: single message
248
+ if use_memory and save_to_memory:
249
+ await self.memory.add_message(result_messages)
250
+ else:
251
+ messages.append(result_messages)
252
+
253
+ def switch_model(self, model_id: str) -> bool:
254
+ """Switch to a different model.
255
+
256
+ Args:
257
+ model_id: LiteLLM model ID to switch to
258
+
259
+ Returns:
260
+ True if switch was successful, False otherwise
261
+ """
262
+ if not self.model_manager:
263
+ logger.warning("No model manager available for switching models")
264
+ return False
265
+
266
+ profile = self.model_manager.get_model(model_id)
267
+ if not profile:
268
+ logger.error(f"Model '{model_id}' not found")
269
+ return False
270
+
271
+ # Validate the model
272
+ is_valid, error_msg = self.model_manager.validate_model(profile)
273
+ if not is_valid:
274
+ logger.error(f"Invalid model: {error_msg}")
275
+ return False
276
+
277
+ # Switch the model
278
+ new_profile = self.model_manager.switch_model(model_id)
279
+ if not new_profile:
280
+ logger.error(f"Failed to switch to model '{model_id}'")
281
+ return False
282
+
283
+ # Reinitialize LLM adapter with new model
284
+ from llm import LiteLLMAdapter
285
+
286
+ new_llm = LiteLLMAdapter(
287
+ model=new_profile.model_id,
288
+ api_key=new_profile.api_key,
289
+ api_base=new_profile.api_base,
290
+ timeout=new_profile.timeout,
291
+ drop_params=new_profile.drop_params,
292
+ )
293
+ self._set_llm_adapter(new_llm)
294
+
295
+ logger.info(f"Switched to model: {new_profile.model_id}")
296
+ return True
297
+
298
+ def get_current_model_info(self) -> Optional[dict]:
299
+ """Get information about the current model.
300
+
301
+ Returns:
302
+ Dictionary with model info or None if not available
303
+ """
304
+ if self.model_manager:
305
+ profile = self.model_manager.get_current_model()
306
+ if not profile:
307
+ return None
308
+ return {
309
+ "name": profile.model_id,
310
+ "model_id": profile.model_id,
311
+ "provider": profile.provider,
312
+ }
313
+ return None
314
+
315
+ async def _ralph_loop(
316
+ self,
317
+ messages: List[LLMMessage],
318
+ tools: List,
319
+ use_memory: bool = True,
320
+ save_to_memory: bool = True,
321
+ task: str = "",
322
+ max_iterations: int = 3,
323
+ verifier: Optional[Verifier] = None,
324
+ ) -> str:
325
+ """Outer verification loop that wraps _react_loop.
326
+
327
+ After _react_loop returns a final answer, a verifier judges whether the
328
+ original task is satisfied. If not, feedback is injected and the inner
329
+ loop re-enters.
330
+
331
+ Args:
332
+ messages: Initial message list (passed through to _react_loop).
333
+ tools: List of available tool schemas.
334
+ use_memory: If True, use self.memory for context.
335
+ save_to_memory: If True, save messages to self.memory.
336
+ task: The original task description.
337
+ max_iterations: Maximum number of outer verification iterations.
338
+ verifier: Optional custom Verifier instance. Defaults to LLMVerifier.
339
+
340
+ Returns:
341
+ Final answer as a string.
342
+ """
343
+ if verifier is None:
344
+ verifier = LLMVerifier(self.llm, terminal_ui)
345
+
346
+ previous_results: List[VerificationResult] = []
347
+
348
+ for iteration in range(1, max_iterations + 1):
349
+ logger.debug(f"Ralph loop iteration {iteration}/{max_iterations}")
350
+
351
+ result = await self._react_loop(
352
+ messages=messages,
353
+ tools=tools,
354
+ use_memory=use_memory,
355
+ save_to_memory=save_to_memory,
356
+ task=task,
357
+ )
358
+
359
+ # Skip verification on last iteration — just return whatever we got
360
+ if iteration == max_iterations:
361
+ logger.debug("Ralph loop: max iterations reached, returning result")
362
+ terminal_ui.console.print(
363
+ f"\n[bold dark_orange]⚠ Verification skipped "
364
+ f"(max iterations {max_iterations} reached), returning last result[/bold dark_orange]"
365
+ )
366
+ return result
367
+
368
+ verification = await verifier.verify(
369
+ task=task,
370
+ result=result,
371
+ iteration=iteration,
372
+ previous_results=previous_results,
373
+ )
374
+ previous_results.append(verification)
375
+
376
+ if verification.complete:
377
+ logger.debug(f"Ralph loop: verified complete — {verification.reason}")
378
+ terminal_ui.console.print(
379
+ f"\n[bold green]✓ Verification passed "
380
+ f"(attempt {iteration}/{max_iterations}): {verification.reason}[/bold green]"
381
+ )
382
+ return result
383
+
384
+ # Inject feedback as a user message so the next _react_loop iteration
385
+ # picks it up from memory.
386
+ feedback = (
387
+ f"Your previous answer was reviewed and found incomplete. "
388
+ f"Feedback: {verification.reason}\n\n"
389
+ f"Please address the feedback and provide a complete answer."
390
+ )
391
+ # Print the incomplete result so the user can see what the agent produced
392
+ terminal_ui.print_unfinished_answer(result)
393
+
394
+ logger.debug(f"Ralph loop: injecting feedback — {verification.reason}")
395
+ terminal_ui.console.print(
396
+ f"\n[bold yellow]⟳ Verification feedback (attempt {iteration}/{max_iterations}): "
397
+ f"{verification.reason}[/bold yellow]"
398
+ )
399
+
400
+ if use_memory and save_to_memory:
401
+ await self.memory.add_message(LLMMessage(role="user", content=feedback))
402
+ else:
403
+ messages.append(LLMMessage(role="user", content=feedback))
404
+
405
+ # Should not reach here, but return last result as safety fallback
406
+ return result # type: ignore[possibly-undefined]