patchllm 0.2.2__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. patchllm/__main__.py +0 -0
  2. patchllm/agent/__init__.py +0 -0
  3. patchllm/agent/actions.py +73 -0
  4. patchllm/agent/executor.py +57 -0
  5. patchllm/agent/planner.py +76 -0
  6. patchllm/agent/session.py +425 -0
  7. patchllm/cli/__init__.py +0 -0
  8. patchllm/cli/entrypoint.py +120 -0
  9. patchllm/cli/handlers.py +192 -0
  10. patchllm/cli/helpers.py +72 -0
  11. patchllm/interactive/__init__.py +0 -0
  12. patchllm/interactive/selector.py +100 -0
  13. patchllm/llm.py +39 -0
  14. patchllm/main.py +1 -323
  15. patchllm/parser.py +120 -64
  16. patchllm/patcher.py +118 -0
  17. patchllm/scopes/__init__.py +0 -0
  18. patchllm/scopes/builder.py +55 -0
  19. patchllm/scopes/constants.py +70 -0
  20. patchllm/scopes/helpers.py +147 -0
  21. patchllm/scopes/resolvers.py +82 -0
  22. patchllm/scopes/structure.py +64 -0
  23. patchllm/tui/__init__.py +0 -0
  24. patchllm/tui/completer.py +153 -0
  25. patchllm/tui/interface.py +703 -0
  26. patchllm/utils.py +19 -1
  27. patchllm/voice/__init__.py +0 -0
  28. patchllm/{listener.py → voice/listener.py} +8 -1
  29. patchllm-1.0.0.dist-info/METADATA +153 -0
  30. patchllm-1.0.0.dist-info/RECORD +51 -0
  31. patchllm-1.0.0.dist-info/entry_points.txt +2 -0
  32. {patchllm-0.2.2.dist-info → patchllm-1.0.0.dist-info}/top_level.txt +1 -0
  33. tests/__init__.py +0 -0
  34. tests/conftest.py +112 -0
  35. tests/test_actions.py +62 -0
  36. tests/test_agent.py +383 -0
  37. tests/test_completer.py +121 -0
  38. tests/test_context.py +140 -0
  39. tests/test_executor.py +60 -0
  40. tests/test_interactive.py +64 -0
  41. tests/test_parser.py +70 -0
  42. tests/test_patcher.py +71 -0
  43. tests/test_planner.py +53 -0
  44. tests/test_recipes.py +111 -0
  45. tests/test_scopes.py +47 -0
  46. tests/test_structure.py +48 -0
  47. tests/test_tui.py +397 -0
  48. tests/test_utils.py +31 -0
  49. patchllm/context.py +0 -238
  50. patchllm-0.2.2.dist-info/METADATA +0 -129
  51. patchllm-0.2.2.dist-info/RECORD +0 -12
  52. patchllm-0.2.2.dist-info/entry_points.txt +0 -2
  53. {patchllm-0.2.2.dist-info → patchllm-1.0.0.dist-info}/WHEEL +0 -0
  54. {patchllm-0.2.2.dist-info → patchllm-1.0.0.dist-info}/licenses/LICENSE +0 -0
patchllm/__main__.py ADDED
File without changes
File without changes
@@ -0,0 +1,73 @@
1
+ import subprocess
2
+ from rich.console import Console
3
+ from rich.panel import Panel
4
+
5
+ console = Console()
6
+
7
+ def run_tests():
8
+ """
9
+ Runs tests using pytest and displays the output.
10
+ """
11
+ console.print("\n--- Running Tests ---", style="bold yellow")
12
+ try:
13
+ process = subprocess.run(
14
+ ["pytest"],
15
+ capture_output=True,
16
+ text=True,
17
+ check=False # We don't want to crash if tests fail
18
+ )
19
+
20
+ output = process.stdout + process.stderr
21
+
22
+ if process.returncode == 0:
23
+ title = "[bold green]✅ Tests Passed[/bold green]"
24
+ border_style = "green"
25
+ else:
26
+ title = "[bold red]❌ Tests Failed[/bold red]"
27
+ border_style = "red"
28
+
29
+ console.print(Panel(output, title=title, border_style=border_style, expand=True))
30
+
31
+ except FileNotFoundError:
32
+ console.print("❌ 'pytest' command not found. Is it installed and in your PATH?", style="red")
33
+ except Exception as e:
34
+ console.print(f"❌ An unexpected error occurred while running tests: {e}", style="red")
35
+
36
+
37
+ def stage_files(files_to_stage: list[str] = None):
38
+ """
39
+ Stages files using git. If no files are specified, stages all changes.
40
+
41
+ Args:
42
+ files_to_stage (list[str], optional): A list of specific files to stage. Defaults to None.
43
+ """
44
+ command = ["git", "add"]
45
+ action_desc = "all changes"
46
+ if files_to_stage:
47
+ command.extend(files_to_stage)
48
+ action_desc = f"{len(files_to_stage)} file(s)"
49
+ else:
50
+ command.append(".")
51
+
52
+ console.print(f"\n--- Staging {action_desc} ---", style="bold yellow")
53
+ try:
54
+ process = subprocess.run(
55
+ command,
56
+ capture_output=True,
57
+ text=True,
58
+ check=True
59
+ )
60
+
61
+ output = process.stdout + process.stderr
62
+ if output:
63
+ console.print(output, style="dim")
64
+
65
+ console.print("✅ Files staged successfully.", style="green")
66
+
67
+ except FileNotFoundError:
68
+ console.print("❌ 'git' command not found. Is it installed and in your PATH?", style="red")
69
+ except subprocess.CalledProcessError as e:
70
+ console.print("❌ Failed to stage files.", style="red")
71
+ console.print(e.stderr)
72
+ except Exception as e:
73
+ console.print(f"❌ An unexpected error occurred while staging files: {e}", style="red")
@@ -0,0 +1,57 @@
1
+ from ..llm import run_llm_query
2
+ from ..parser import summarize_changes, get_diff_for_file, parse_change_summary
3
+
4
+ def execute_step(step_instruction: str, history: list[dict], context: str | None, context_images: list | None, model_name: str) -> dict | None:
5
+ """
6
+ Executes a single step of the plan by calling the LLM.
7
+
8
+ Args:
9
+ step_instruction (str): The instruction for the current step.
10
+ history (list[dict]): The full conversation history.
11
+ context (str | None): The file context for the LLM.
12
+ context_images (list | None): A list of image data dictionaries for multimodal context.
13
+ model_name (str): The name of the LLM to use.
14
+
15
+ Returns:
16
+ A dictionary containing the instruction, response, and diffs, or None if it fails.
17
+ """
18
+
19
+ prompt_text = f"## Current Task:\n{step_instruction}"
20
+ if context:
21
+ prompt_text = f"## Context:\n{context}\n\n---\n\n{prompt_text}"
22
+
23
+ user_content = [{"type": "text", "text": prompt_text}]
24
+
25
+ if context_images:
26
+ for image_info in context_images:
27
+ user_content.append({
28
+ "type": "image_url",
29
+ "image_url": {
30
+ "url": f"data:{image_info['mime_type']};base64,{image_info['content_base64']}"
31
+ }
32
+ })
33
+
34
+ # Create a temporary message history for this specific call
35
+ messages = history + [{"role": "user", "content": user_content}]
36
+
37
+ llm_response = run_llm_query(messages, model_name)
38
+
39
+ if not llm_response:
40
+ return None
41
+
42
+ change_summary = parse_change_summary(llm_response)
43
+ summary = summarize_changes(llm_response)
44
+ all_files = summary.get("modified", []) + summary.get("created", [])
45
+
46
+ diffs = []
47
+ for file_path in all_files:
48
+ diff_text = get_diff_for_file(file_path, llm_response)
49
+ diffs.append({"file_path": file_path, "diff_text": diff_text})
50
+
51
+ return {
52
+ "instruction": step_instruction,
53
+ "llm_response": llm_response,
54
+ "summary": summary,
55
+ "diffs": diffs,
56
+ "change_summary": change_summary,
57
+ }
@@ -0,0 +1,76 @@
1
+ import re
2
+ from ..llm import run_llm_query
3
+
4
+ def _get_planning_prompt(goal: str, context_tree: str) -> list[dict]:
5
+ """Constructs the initial prompt for the planning phase."""
6
+
7
+ system_prompt = (
8
+ "You are an expert software architect. Your task is to create a high-level, milestone-focused plan to "
9
+ "accomplish a user's goal. Break down the goal into logical, sequential steps that represent significant "
10
+ "pieces of functionality or architectural changes."
11
+ "\n\n"
12
+ "IMPORTANT RULES:\n"
13
+ "- DO NOT list individual file modifications. Instead, group related changes into a single milestone.\n"
14
+ "- For example, instead of a plan like '1. Add route to api.py, 2. Create logic in services.py', a better, "
15
+ "milestone-focused step would be '1. Implement the user authentication endpoint, including routes and server actions'.\n"
16
+ "- Do not write any code or implementation details in the plan.\n"
17
+ "- Each step should be a clear, actionable instruction for a developer.\n"
18
+ "- The final plan must be a numbered list."
19
+ )
20
+
21
+ user_prompt = (
22
+ "Based on my goal and the project structure below, create your plan.\n\n"
23
+ f"## Project Structure:\n```\n{context_tree}\n```\n\n"
24
+ f"## Goal:\n{goal}"
25
+ )
26
+
27
+ return [
28
+ {"role": "system", "content": system_prompt},
29
+ {"role": "user", "content": user_prompt}
30
+ ]
31
+
32
+ def _get_refine_prompt(history: list[dict], feedback: str) -> list[dict]:
33
+ """Constructs the prompt for refining an existing plan."""
34
+ refine_instruction = (
35
+ "The user has provided feedback or a new idea on the plan you created. "
36
+ "Carefully review the entire conversation and their latest feedback. "
37
+ "Your task is to generate a new, complete, and improved step-by-step plan that incorporates their feedback. "
38
+ "The new plan should be a single, cohesive, numbered list. Do not just add to the old plan; create a new one from scratch."
39
+ f"\n\n## User Feedback:\n{feedback}"
40
+ )
41
+
42
+ return history + [{"role": "user", "content": refine_instruction}]
43
+
44
+ def parse_plan_from_response(response_text: str) -> list[str] | None:
45
+ """Finds all lines that start with a number and a period (e.g., "1.", "2.")."""
46
+ if not response_text:
47
+ return None
48
+ # This is more robust than splitting by newline.
49
+ plan = re.findall(r"^\s*\d+\.\s+(.*)", response_text, re.MULTILINE)
50
+ return plan if plan else None
51
+
52
+ def generate_plan_and_history(goal: str, context_tree: str, model_name: str) -> tuple[list[dict], str | None]:
53
+ """
54
+ Calls the LLM to generate an initial plan and returns the history and response.
55
+
56
+ Returns:
57
+ A tuple containing the initial planning history (list of messages) and the LLM's raw response text.
58
+ """
59
+ messages = _get_planning_prompt(goal, context_tree)
60
+ response_text = run_llm_query(messages, model_name)
61
+
62
+ if response_text:
63
+ messages.append({"role": "assistant", "content": response_text})
64
+
65
+ return messages, response_text
66
+
67
+ def generate_refined_plan(history: list[dict], feedback: str, model_name: str) -> str | None:
68
+ """
69
+ Calls the LLM to refine a plan based on conversation history and new feedback.
70
+
71
+ Returns:
72
+ The LLM's raw response text containing the new plan.
73
+ """
74
+ messages = _get_refine_prompt(history, feedback)
75
+ response_text = run_llm_query(messages, model_name)
76
+ return response_text
@@ -0,0 +1,425 @@
1
+ from pathlib import Path
2
+ import json
3
+ import os
4
+
5
+ from ..cli.helpers import get_system_prompt
6
+
7
+ CONFIG_FILE_PATH = Path(".patchllm_config.json")
8
+
9
+ class AgentSession:
10
+ """
11
+ Manages the state for a continuous, interactive agent session.
12
+ """
13
+ def __init__(self, args, scopes: dict, recipes: dict):
14
+ self.args = args
15
+ self.goal: str | None = None
16
+ self.plan: list[str] = []
17
+ self.current_step: int = 0
18
+ self.context: str | None = None
19
+ self.context_files: list[Path] = []
20
+ self.context_images: list = []
21
+ self.history: list[dict] = [{"role": "system", "content": get_system_prompt()}]
22
+ self.planning_history: list[dict] = []
23
+ self.scopes = scopes
24
+ self.recipes = recipes
25
+ self.last_execution_result: dict | None = None
26
+ self.action_history: list[str] = []
27
+ self.last_revert_state: list[dict] = []
28
+ self.api_keys: dict = {}
29
+ self.load_settings()
30
+
31
+ def load_settings(self):
32
+ """Loads settings from the config file and applies them."""
33
+ if CONFIG_FILE_PATH.exists():
34
+ try:
35
+ with open(CONFIG_FILE_PATH, 'r') as f:
36
+ settings = json.load(f)
37
+
38
+ if 'model' in settings:
39
+ self.args.model = settings['model']
40
+
41
+ self.api_keys = settings.get('api_keys', {})
42
+ for key, value in self.api_keys.items():
43
+ if key not in os.environ:
44
+ os.environ[key] = value
45
+
46
+ except (json.JSONDecodeError, IOError):
47
+ pass
48
+
49
+ def save_settings(self):
50
+ """Saves current settings to the config file."""
51
+ settings_to_save = {
52
+ 'model': self.args.model,
53
+ 'api_keys': self.api_keys
54
+ }
55
+ with open(CONFIG_FILE_PATH, 'w') as f:
56
+ json.dump(settings_to_save, f, indent=2)
57
+
58
+ def set_api_key(self, key_name: str, key_value: str):
59
+ """Sets an API key, applies it to the environment, and saves it."""
60
+ self.api_keys[key_name] = key_value
61
+ os.environ[key_name] = key_value
62
+ self.save_settings()
63
+
64
+ def remove_api_key(self, key_name: str):
65
+ """Removes an API key and saves the settings."""
66
+ if key_name in self.api_keys:
67
+ del self.api_keys[key_name]
68
+ self.save_settings()
69
+
70
+ def to_dict(self) -> dict:
71
+ """Serializes the session's state to a dictionary."""
72
+ return {
73
+ "goal": self.goal,
74
+ "plan": self.plan,
75
+ "current_step": self.current_step,
76
+ "context_files": [p.as_posix() for p in self.context_files],
77
+ "action_history": self.action_history,
78
+ "last_revert_state": self.last_revert_state,
79
+ }
80
+
81
+ def from_dict(self, data: dict):
82
+ """Restores the session's state from a dictionary."""
83
+ self.goal = data.get("goal")
84
+ self.plan = data.get("plan", [])
85
+ self.current_step = data.get("current_step", 0)
86
+ self.action_history = data.get("action_history", [])
87
+ self.last_revert_state = data.get("last_revert_state", [])
88
+
89
+ context_file_paths = data.get("context_files", [])
90
+ if context_file_paths:
91
+ self.add_files_and_rebuild_context([Path(p) for p in context_file_paths])
92
+
93
+ def set_goal(self, goal: str):
94
+ self.goal = goal
95
+ self.plan = []
96
+ self.current_step = 0
97
+ self.planning_history = []
98
+ self.action_history.append(f"Goal set: {goal}")
99
+
100
+ def edit_plan_step(self, step_number: int, new_instruction: str) -> bool:
101
+ """Edits an instruction in the current plan."""
102
+ if 1 <= step_number <= len(self.plan):
103
+ self.plan[step_number - 1] = new_instruction
104
+ return True
105
+ return False
106
+
107
+ def remove_plan_step(self, step_number: int) -> bool:
108
+ """Removes a step from the current plan."""
109
+ if 1 <= step_number <= len(self.plan):
110
+ del self.plan[step_number - 1]
111
+ if step_number - 1 < self.current_step:
112
+ self.current_step -=1
113
+ return True
114
+ return False
115
+
116
+ def add_plan_step(self, instruction: str):
117
+ """Adds a new instruction to the end of the plan."""
118
+ self.plan.append(instruction)
119
+
120
+ def skip_step(self) -> bool:
121
+ """Skips the current step and moves to the next one."""
122
+ if self.current_step < len(self.plan):
123
+ self.current_step += 1
124
+ self.last_execution_result = None
125
+ return True
126
+ return False
127
+
128
+ def create_plan(self) -> bool:
129
+ from ..scopes.builder import helpers
130
+ from . import planner
131
+
132
+ if not self.goal: return False
133
+ context_tree = helpers.generate_source_tree(Path(".").resolve(), self.context_files)
134
+
135
+ self.planning_history, plan_response = planner.generate_plan_and_history(self.goal, context_tree, self.args.model)
136
+
137
+ if plan_response:
138
+ parsed_plan = planner.parse_plan_from_response(plan_response)
139
+ if parsed_plan:
140
+ self.plan = parsed_plan
141
+ self.action_history.append("Plan generated.")
142
+ return True
143
+ return False
144
+
145
+ def ask_question(self, question: str) -> str | None:
146
+ """Asks a clarifying question about the plan or the context."""
147
+ from ..llm import run_llm_query
148
+
149
+ prompt_text = (
150
+ "Based on our conversation so far, please answer my question.\n\n"
151
+ f"## My Question\n{question}"
152
+ )
153
+
154
+ if self.context:
155
+ prompt_text = (
156
+ "Based on the provided context and our conversation so far, please answer my question.\n\n"
157
+ f"## Code Context\n{self.context}\n\n---\n\n"
158
+ f"## My Question\n{question}"
159
+ )
160
+
161
+ user_content = [{"type": "text", "text": prompt_text}]
162
+
163
+ if self.context_images:
164
+ for image_info in self.context_images:
165
+ user_content.append({
166
+ "type": "image_url",
167
+ "image_url": {
168
+ "url": f"data:{image_info['mime_type']};base64,{image_info['content_base64']}"
169
+ }
170
+ })
171
+
172
+ messages = self.planning_history + [{"role": "user", "content": user_content}]
173
+ response = run_llm_query(messages, self.args.model)
174
+
175
+ if response:
176
+ self.planning_history.append({"role": "assistant", "content": response})
177
+
178
+ return response
179
+
180
+ def refine_plan(self, feedback: str) -> bool:
181
+ """Refines the existing plan based on user feedback."""
182
+ from . import planner
183
+
184
+ new_plan_response = planner.generate_refined_plan(self.planning_history, feedback, self.args.model)
185
+ if new_plan_response:
186
+ parsed_plan = planner.parse_plan_from_response(new_plan_response)
187
+ if parsed_plan:
188
+ self.planning_history.append({"role": "user", "content": feedback})
189
+ self.planning_history.append({"role": "assistant", "content": new_plan_response})
190
+ self.plan = parsed_plan
191
+ return True
192
+ return False
193
+
194
+ def run_next_step(self, instruction_override: str | None = None) -> dict | None:
195
+ from . import executor
196
+
197
+ if not self.plan or self.current_step >= len(self.plan): return None
198
+ step_instruction = instruction_override or self.plan[self.current_step]
199
+ result = executor.execute_step(step_instruction, self.history, self.context, self.context_images, self.args.model)
200
+ if result: self.last_execution_result = result
201
+ return result
202
+
203
+ def run_all_remaining_steps(self) -> dict | None:
204
+ """Combines all remaining steps into a single execution call."""
205
+ from . import executor
206
+
207
+ if not self.plan or self.current_step >= len(self.plan): return None
208
+
209
+ remaining_steps = self.plan[self.current_step:]
210
+ if not remaining_steps: return None
211
+
212
+ formatted_steps = "\n".join(f"{i + 1}. {step}" for i, step in enumerate(remaining_steps))
213
+ combined_instruction = (
214
+ "Please execute the following remaining steps of the plan in a single pass. "
215
+ "Ensure you provide the full, final content for every file you modify.\n\n"
216
+ f"--- Remaining Steps ---\n{formatted_steps}"
217
+ )
218
+
219
+ result = executor.execute_step(combined_instruction, self.history, self.context, self.context_images, self.args.model)
220
+
221
+ if result:
222
+ self.last_execution_result = result
223
+ self.last_execution_result['is_multi_step'] = True
224
+ return result
225
+
226
+ def run_goal_directly(self) -> dict | None:
227
+ """Executes the user's goal directly without a plan."""
228
+ from . import executor
229
+
230
+ if not self.goal: return None
231
+
232
+ combined_instruction = (
233
+ "Please achieve the following goal in a single pass. "
234
+ "Ensure you provide the full, final content for every file you modify.\n\n"
235
+ f"--- Goal ---\n{self.goal}"
236
+ )
237
+
238
+ result = executor.execute_step(combined_instruction, self.history, self.context, self.context_images, self.args.model)
239
+
240
+ if result:
241
+ self.last_execution_result = result
242
+ self.last_execution_result['is_planless_run'] = True
243
+ return result
244
+
245
+ def approve_changes(self, files_to_approve: list[str]) -> bool:
246
+ """
247
+ Applies changes from the last execution, either fully or partially.
248
+ Returns True if all changes were applied, False otherwise.
249
+ """
250
+ from ..parser import paste_response_selectively, _parse_file_blocks
251
+
252
+ if not self.last_execution_result: return False
253
+
254
+ all_proposed_files = self.last_execution_result.get("summary", {}).get("modified", []) + \
255
+ self.last_execution_result.get("summary", {}).get("created", [])
256
+
257
+ revert_state = []
258
+ parsed_blocks = _parse_file_blocks(self.last_execution_result["llm_response"])
259
+
260
+ for file_path_str in files_to_approve:
261
+ file_path = Path(file_path_str)
262
+ if file_path.exists():
263
+ try:
264
+ original_content = file_path.read_text(encoding="utf-8")
265
+ revert_state.append({"file_path": file_path.as_posix(), "content": original_content, "action": "modify"})
266
+ except Exception:
267
+ pass
268
+ else:
269
+ revert_state.append({"file_path": file_path.as_posix(), "content": None, "action": "create"})
270
+
271
+ self.last_revert_state = revert_state
272
+
273
+ paste_response_selectively(self.last_execution_result["llm_response"], files_to_approve)
274
+
275
+ is_multi_step = self.last_execution_result.get('is_multi_step', False)
276
+ is_planless_run = self.last_execution_result.get('is_planless_run', False)
277
+
278
+ if is_planless_run:
279
+ step_log_msg = "plan-less goal execution"
280
+ else:
281
+ step_log_msg = f"steps {self.current_step + 1}-{len(self.plan)}" if is_multi_step else f"step {self.current_step + 1}"
282
+
283
+ self.action_history.append(f"Approved {len(files_to_approve)} file(s) for {step_log_msg}.")
284
+
285
+ is_full_approval = len(files_to_approve) == len(all_proposed_files)
286
+
287
+ if is_full_approval:
288
+ instruction_used = self.last_execution_result.get("instruction")
289
+ if not instruction_used and not is_planless_run:
290
+ instruction_used = self.plan[self.current_step]
291
+
292
+ user_prompt = f"Context attached.\n\n---\n\nMy task was: {instruction_used}"
293
+ self.history.append({"role": "user", "content": user_prompt})
294
+ self.history.append({"role": "assistant", "content": self.last_execution_result["llm_response"]})
295
+
296
+ if not is_planless_run:
297
+ if is_multi_step:
298
+ self.current_step = len(self.plan)
299
+ else:
300
+ self.current_step += 1
301
+
302
+ self.last_execution_result = None
303
+ else:
304
+ self.last_execution_result['approved_files'] = files_to_approve
305
+
306
+ return is_full_approval
307
+
308
+ def revert_last_approval(self) -> bool:
309
+ """Writes the stored original content back to the files from the last approval."""
310
+ if not self.last_revert_state:
311
+ return False
312
+
313
+ for state in self.last_revert_state:
314
+ file_path = Path(state["file_path"])
315
+ action = state["action"]
316
+
317
+ try:
318
+ if action == "modify":
319
+ file_path.write_text(state["content"], encoding="utf-8")
320
+ elif action == "create":
321
+ if file_path.exists():
322
+ file_path.unlink()
323
+ except Exception as e:
324
+ print(f"Warning: Could not revert {file_path}: {e}")
325
+
326
+ self.action_history.append("Reverted last approval.")
327
+ self.last_revert_state = []
328
+ return True
329
+
330
+ def retry_step(self, feedback: str) -> dict | None:
331
+ """
332
+ Retries the current step. If a partial approval occurred, it constructs
333
+ a more detailed prompt informing the LLM of what was approved and rejected.
334
+ """
335
+ from . import executor
336
+
337
+ is_planless_run = self.last_execution_result and self.last_execution_result.get('is_planless_run', False)
338
+
339
+ if not is_planless_run and (not self.plan or self.current_step >= len(self.plan)): return None
340
+ if is_planless_run and not self.goal: return None
341
+
342
+ is_multi_step = self.last_execution_result and self.last_execution_result.get('is_multi_step', False)
343
+
344
+ if is_planless_run:
345
+ original_instruction = f"to achieve the goal: {self.goal}"
346
+ else:
347
+ original_instruction = "to complete the rest of the plan" if is_multi_step else self.plan[self.current_step]
348
+
349
+ if self.last_execution_result and 'approved_files' in self.last_execution_result:
350
+ approved = self.last_execution_result['approved_files']
351
+ all_proposed = self.last_execution_result.get("summary", {}).get("modified", []) + \
352
+ self.last_execution_result.get("summary", {}).get("created", [])
353
+ rejected = [f for f in all_proposed if f not in approved]
354
+
355
+ refined_instruction = (
356
+ f"Your previous attempt was partially correct. I have **approved** the changes for the following files:\n"
357
+ f"- {', '.join(Path(f).name for f in approved)}\n\n"
358
+ f"However, I **rejected** the changes for these files:\n"
359
+ f"- {', '.join(Path(f).name for f in rejected)}\n\n"
360
+ f"Here is my feedback on the rejected files: {feedback}\n\n"
361
+ f"Please provide a new, corrected version of **only the rejected files** based on this feedback.\n\n"
362
+ f"---\n\nMy original overall instruction for this step was: {original_instruction}"
363
+ )
364
+ else:
365
+ refined_instruction = (
366
+ f"My previous attempt was not correct. Here is my feedback: {feedback}\n\n"
367
+ f"---\n\nMy original instruction was: {original_instruction}"
368
+ )
369
+
370
+ result = executor.execute_step(refined_instruction, self.history, self.context, self.context_images, self.args.model)
371
+ if result:
372
+ self.last_execution_result = result
373
+ if is_multi_step:
374
+ self.last_execution_result['is_multi_step'] = True
375
+ if is_planless_run:
376
+ self.last_execution_result['is_planless_run'] = True
377
+ return result
378
+
379
+ def reload_scopes(self, scopes_file_path: str):
380
+ from ..utils import load_from_py_file
381
+
382
+ try:
383
+ self.scopes = load_from_py_file(scopes_file_path, "scopes")
384
+ except FileNotFoundError:
385
+ self.scopes = {}
386
+ except Exception as e:
387
+ print(f"Warning: Could not reload scopes file: {e}")
388
+
389
+ def load_context_from_scope(self, scope_name: str) -> str:
390
+ from ..scopes.builder import build_context
391
+
392
+ context_object = build_context(scope_name, self.scopes, Path(".").resolve())
393
+ if context_object:
394
+ self.context = context_object.get("context")
395
+ self.context_files = context_object.get("files", [])
396
+ self.context_images = context_object.get("images", [])
397
+ return context_object.get("tree", "Context loaded.")
398
+ self.clear_context()
399
+ return f"⚠️ Could not build context for scope '{scope_name}'. No files found."
400
+
401
+ def add_files_and_rebuild_context(self, new_files: list[Path]) -> str:
402
+ from ..scopes.builder import build_context_from_files
403
+
404
+ current_files_set = set(self.context_files)
405
+ updated_files = sorted(list(current_files_set.union(set(new_files))))
406
+ context_object = build_context_from_files(updated_files, Path(".").resolve())
407
+ if context_object:
408
+ self.context = context_object.get("context")
409
+ self.context_files = context_object.get("files", [])
410
+ self.context_images = context_object.get("images", [])
411
+ return context_object.get("tree", "Context updated.")
412
+ return "⚠️ Failed to rebuild context with new files."
413
+
414
+ def add_context_from_scope(self, scope_name: str) -> str:
415
+ from ..scopes.builder import build_context
416
+
417
+ context_object = build_context(scope_name, self.scopes, Path(".").resolve())
418
+ if not context_object or not context_object.get("files"):
419
+ return f"⚠️ Scope '{scope_name}' resolved to zero files. Context is unchanged."
420
+ return self.add_files_and_rebuild_context(context_object.get("files", []))
421
+
422
+ def clear_context(self):
423
+ self.context = None
424
+ self.context_files = []
425
+ self.context_images = []
File without changes