patchllm 0.2.1__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. patchllm/__main__.py +0 -0
  2. patchllm/agent/__init__.py +0 -0
  3. patchllm/agent/actions.py +73 -0
  4. patchllm/agent/executor.py +57 -0
  5. patchllm/agent/planner.py +76 -0
  6. patchllm/agent/session.py +425 -0
  7. patchllm/cli/__init__.py +0 -0
  8. patchllm/cli/entrypoint.py +120 -0
  9. patchllm/cli/handlers.py +192 -0
  10. patchllm/cli/helpers.py +72 -0
  11. patchllm/interactive/__init__.py +0 -0
  12. patchllm/interactive/selector.py +100 -0
  13. patchllm/llm.py +39 -0
  14. patchllm/main.py +1 -283
  15. patchllm/parser.py +120 -64
  16. patchllm/patcher.py +118 -0
  17. patchllm/scopes/__init__.py +0 -0
  18. patchllm/scopes/builder.py +55 -0
  19. patchllm/scopes/constants.py +70 -0
  20. patchllm/scopes/helpers.py +147 -0
  21. patchllm/scopes/resolvers.py +82 -0
  22. patchllm/scopes/structure.py +64 -0
  23. patchllm/tui/__init__.py +0 -0
  24. patchllm/tui/completer.py +153 -0
  25. patchllm/tui/interface.py +703 -0
  26. patchllm/utils.py +19 -1
  27. patchllm/voice/__init__.py +0 -0
  28. patchllm/{listener.py → voice/listener.py} +8 -1
  29. patchllm-1.0.0.dist-info/METADATA +153 -0
  30. patchllm-1.0.0.dist-info/RECORD +51 -0
  31. patchllm-1.0.0.dist-info/entry_points.txt +2 -0
  32. {patchllm-0.2.1.dist-info → patchllm-1.0.0.dist-info}/top_level.txt +1 -0
  33. tests/__init__.py +0 -0
  34. tests/conftest.py +112 -0
  35. tests/test_actions.py +62 -0
  36. tests/test_agent.py +383 -0
  37. tests/test_completer.py +121 -0
  38. tests/test_context.py +140 -0
  39. tests/test_executor.py +60 -0
  40. tests/test_interactive.py +64 -0
  41. tests/test_parser.py +70 -0
  42. tests/test_patcher.py +71 -0
  43. tests/test_planner.py +53 -0
  44. tests/test_recipes.py +111 -0
  45. tests/test_scopes.py +47 -0
  46. tests/test_structure.py +48 -0
  47. tests/test_tui.py +397 -0
  48. tests/test_utils.py +31 -0
  49. patchllm/context.py +0 -238
  50. patchllm-0.2.1.dist-info/METADATA +0 -127
  51. patchllm-0.2.1.dist-info/RECORD +0 -12
  52. patchllm-0.2.1.dist-info/entry_points.txt +0 -2
  53. {patchllm-0.2.1.dist-info → patchllm-1.0.0.dist-info}/WHEEL +0 -0
  54. {patchllm-0.2.1.dist-info → patchllm-1.0.0.dist-info}/licenses/LICENSE +0 -0
tests/test_agent.py ADDED
@@ -0,0 +1,383 @@
1
+ import pytest
2
+ from pathlib import Path
3
+ import os
4
+ import json
5
+ from unittest.mock import patch, MagicMock
6
+
7
+ from patchllm.agent.session import AgentSession, CONFIG_FILE_PATH
8
+ from patchllm.utils import load_from_py_file
9
+
10
+ @pytest.fixture
11
+ def mock_args():
12
+ class MockArgs:
13
+ def __init__(self, model="default-model"):
14
+ self.model = model
15
+ return MockArgs()
16
+
17
+ def test_session_ask_question_about_plan(mock_args):
18
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
19
+ session.plan = ["step 1"]
20
+ session.planning_history = [{"role": "system", "content": "You are a planner."}]
21
+
22
+ with patch('patchllm.llm.run_llm_query') as mock_llm:
23
+ mock_llm.return_value = "This is the answer."
24
+ response = session.ask_question("Why step 1?")
25
+
26
+ assert response == "This is the answer."
27
+ assert len(session.planning_history) == 2 # System, Assistant (user is not stored)
28
+
29
+ # Check the call to the mock
30
+ mock_llm.assert_called_once()
31
+ sent_messages = mock_llm.call_args[0][0]
32
+ user_message_content = sent_messages[-1]['content'][0]['text']
33
+
34
+ assert "My Question" in user_message_content
35
+ assert "Why step 1?" in user_message_content
36
+ assert "Code Context" not in user_message_content
37
+
38
+ assert session.planning_history[-1]['content'] == "This is the answer."
39
+ assert session.plan == ["step 1"]
40
+
41
+
42
+ def test_session_ask_question_about_context(mock_args):
43
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
44
+ session.context = "<file_path:/app.py>..."
45
+ session.planning_history = [{"role": "system", "content": "You are a planner."}]
46
+
47
+ with patch('patchllm.llm.run_llm_query') as mock_llm:
48
+ mock_llm.return_value = "It's a web server."
49
+ response = session.ask_question("What does app.py do?")
50
+
51
+ assert response == "It's a web server."
52
+ assert len(session.planning_history) == 2
53
+
54
+ mock_llm.assert_called_once()
55
+ sent_messages = mock_llm.call_args[0][0]
56
+ prompt_content = sent_messages[-1]['content'][0]['text']
57
+
58
+ assert "Code Context" in prompt_content
59
+ assert "<file_path:/app.py>..." in prompt_content
60
+ assert "My Question" in prompt_content
61
+ assert "What does app.py do?" in prompt_content
62
+
63
+ def test_session_ask_question_with_image(mock_args):
64
+ """Ensures that ask_question sends image data correctly."""
65
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
66
+ session.context = "Some text context"
67
+ session.context_images = [{
68
+ "mime_type": "image/png",
69
+ "content_base64": "base64string"
70
+ }]
71
+
72
+ with patch('patchllm.llm.run_llm_query') as mock_llm:
73
+ mock_llm.return_value = "It's an image."
74
+ session.ask_question("What is this?")
75
+
76
+ mock_llm.assert_called_once()
77
+ sent_messages = mock_llm.call_args[0][0]
78
+ user_message_content = sent_messages[-1]['content']
79
+
80
+ assert isinstance(user_message_content, list)
81
+ assert len(user_message_content) == 2
82
+
83
+ text_part = user_message_content[0]
84
+ assert text_part['type'] == 'text'
85
+ assert "What is this?" in text_part['text']
86
+
87
+ image_part = user_message_content[1]
88
+ assert image_part['type'] == 'image_url'
89
+ assert image_part['image_url']['url'] == "data:image/png;base64,base64string"
90
+
91
+ def test_session_refine_plan(mock_args):
92
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
93
+ session.plan = ["old step 1"]
94
+ session.planning_history = [{"role": "system", "content": "Planner"}]
95
+
96
+ with patch('patchllm.agent.planner.generate_refined_plan') as mock_refiner:
97
+ mock_refiner.return_value = "1. new step 1\n2. new step 2"
98
+ success = session.refine_plan("Please add another step.")
99
+
100
+ assert success is True
101
+ assert session.plan == ["new step 1", "new step 2"]
102
+ assert len(session.planning_history) == 3
103
+ mock_refiner.assert_called_once()
104
+
105
+ def test_session_load_and_save_settings(mock_args, tmp_path):
106
+ os.chdir(tmp_path)
107
+ session1 = AgentSession(args=mock_args, scopes={}, recipes={})
108
+ session1.args.model = "new-saved-model"
109
+ session1.save_settings()
110
+
111
+ assert CONFIG_FILE_PATH.exists()
112
+ with open(CONFIG_FILE_PATH, 'r') as f:
113
+ data = json.load(f)
114
+ assert data['model'] == "new-saved-model"
115
+
116
+ session2 = AgentSession(args=mock_args, scopes={}, recipes={})
117
+ assert session2.args.model == "new-saved-model"
118
+
119
+ CONFIG_FILE_PATH.unlink()
120
+ mock_args.model = "default-model"
121
+ session3 = AgentSession(args=mock_args, scopes={}, recipes={})
122
+ assert session3.args.model == "default-model"
123
+
124
+
125
+ def test_session_edit_plan_step(mock_args):
126
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
127
+ session.plan = ["step 1", "step 2", "step 3"]
128
+
129
+ success = session.edit_plan_step(2, "step 2 edited")
130
+ assert success is True
131
+ assert session.plan == ["step 1", "step 2 edited", "step 3"]
132
+
133
+ failure = session.edit_plan_step(5, "invalid")
134
+ assert failure is False
135
+
136
+ def test_session_remove_plan_step(mock_args):
137
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
138
+ session.plan = ["step 1", "step 2", "step 3"]
139
+ session.current_step = 2
140
+
141
+ success = session.remove_plan_step(1)
142
+ assert success is True
143
+ assert session.plan == ["step 2", "step 3"]
144
+ assert session.current_step == 1
145
+
146
+ success_2 = session.remove_plan_step(2)
147
+ assert success_2 is True
148
+ assert session.plan == ["step 2"]
149
+ assert session.current_step == 1
150
+
151
+ failure = session.remove_plan_step(5)
152
+ assert failure is False
153
+
154
+ def test_session_add_plan_step(mock_args):
155
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
156
+ session.plan = ["step 1"]
157
+ session.add_plan_step("step 2")
158
+ assert session.plan == ["step 1", "step 2"]
159
+
160
+ def test_session_skip_step(mock_args):
161
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
162
+ session.plan = ["step 1", "step 2"]
163
+ session.last_execution_result = {"diffs": []}
164
+
165
+ success = session.skip_step()
166
+ assert success is True
167
+ assert session.current_step == 1
168
+ assert session.last_execution_result is None
169
+
170
+ session.skip_step()
171
+ assert session.current_step == 2
172
+
173
+ failure = session.skip_step()
174
+ assert failure is False
175
+
176
+ def test_session_approve_changes_full(mock_args):
177
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
178
+ session.plan = ["do something"]
179
+ llm_response = "<file_path:/tmp/a.txt>\n```python\nprint('hello')\n```"
180
+ session.last_execution_result = {
181
+ "instruction": "do something",
182
+ "llm_response": llm_response,
183
+ "summary": {"modified": ["/tmp/a.txt"], "created": []}
184
+ }
185
+ with patch('patchllm.parser.paste_response_selectively') as mock_paste:
186
+ is_full_approval = session.approve_changes(["/tmp/a.txt"])
187
+ assert is_full_approval is True
188
+ mock_paste.assert_called_once_with(llm_response, ["/tmp/a.txt"])
189
+ assert session.current_step == 1
190
+ assert session.last_execution_result is None
191
+
192
+ def test_session_approve_changes_partial(mock_args):
193
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
194
+ session.plan = ["do something"]
195
+ llm_response = "<file_path:/tmp/a.txt>\n```\n...\n```<file_path:/tmp/b.txt>\n```\n...\n```"
196
+ session.last_execution_result = {
197
+ "instruction": "do something",
198
+ "llm_response": llm_response,
199
+ "summary": {"modified": ["/tmp/a.txt", "/tmp/b.txt"], "created": []}
200
+ }
201
+ with patch('patchllm.parser.paste_response_selectively') as mock_paste:
202
+ is_full_approval = session.approve_changes(["/tmp/a.txt"])
203
+ assert is_full_approval is False
204
+ mock_paste.assert_called_once_with(llm_response, ["/tmp/a.txt"])
205
+ assert session.current_step == 0
206
+ assert session.last_execution_result is not None
207
+ assert session.last_execution_result['approved_files'] == ["/tmp/a.txt"]
208
+
209
+ def test_session_retry_step_after_partial_approval(mock_args):
210
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
211
+ session.plan = ["original instruction"]
212
+ session.last_execution_result = {
213
+ "approved_files": ["/tmp/a.txt"],
214
+ "summary": {"modified": ["/tmp/a.txt", "/tmp/b.txt"], "created": []}
215
+ }
216
+ with patch('patchllm.agent.executor.execute_step') as mock_exec:
217
+ session.retry_step("it was wrong")
218
+ mock_exec.assert_called_once()
219
+ refined_instruction = mock_exec.call_args[0][0]
220
+ assert "I have **approved** the changes" in refined_instruction
221
+ assert "a.txt" in refined_instruction
222
+ assert "I **rejected** the changes" in refined_instruction
223
+ assert "b.txt" in refined_instruction
224
+ assert "feedback on the rejected files: it was wrong" in refined_instruction
225
+ assert "original overall instruction" in refined_instruction
226
+
227
+ def test_session_retry_step(mock_args):
228
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
229
+ session.plan = ["original instruction"]
230
+ with patch('patchllm.agent.executor.execute_step') as mock_exec:
231
+ session.retry_step("it was wrong")
232
+ mock_exec.assert_called_once()
233
+ refined_instruction = mock_exec.call_args[0][0]
234
+ assert "feedback: it was wrong" in refined_instruction
235
+ assert "original instruction" in refined_instruction
236
+
237
+ def test_session_serialization_and_deserialization(mock_args, temp_project):
238
+ os.chdir(temp_project)
239
+ session1 = AgentSession(args=mock_args, scopes={}, recipes={})
240
+ session1.set_goal("my goal")
241
+ session1.plan = ["step 1", "step 2"]
242
+ session1.current_step = 1
243
+ session1.action_history = ["Goal set: my goal"]
244
+ session1.last_revert_state = [{"file_path": "/tmp/a.txt", "content": "old", "action": "modify"}]
245
+ file_path = temp_project / "main.py"
246
+ file_path.write_text("content")
247
+ session1.add_files_and_rebuild_context([file_path])
248
+
249
+ session_data = session1.to_dict()
250
+
251
+ session2 = AgentSession(args=mock_args, scopes={}, recipes={})
252
+ session2.from_dict(session_data)
253
+
254
+ assert session2.goal == session1.goal
255
+ assert session2.plan == session1.plan
256
+ assert session2.current_step == session1.current_step
257
+ assert session2.context_files == session1.context_files
258
+ assert "content" in session2.context
259
+ assert session2.action_history == session1.action_history
260
+ assert session2.last_revert_state == session1.last_revert_state
261
+
262
+ def test_session_action_history(mock_args):
263
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
264
+ session.set_goal("My test goal")
265
+ assert len(session.action_history) == 1
266
+
267
+ with patch('patchllm.agent.planner.generate_plan_and_history') as mock_planner:
268
+ mock_planner.return_value = ([{"role": "user", "content": ""}], "1. Do a thing")
269
+ session.create_plan()
270
+ assert len(session.action_history) == 2
271
+
272
+ session.last_execution_result = {"llm_response": "...", "summary": {"modified": ["/tmp/a.txt"], "created": []}}
273
+ session.approve_changes(["/tmp/a.txt"])
274
+ assert len(session.action_history) == 3
275
+ assert "Approved 1 file(s)" in session.action_history[2]
276
+
277
+ session.revert_last_approval()
278
+ assert len(session.action_history) == 4
279
+ assert "Reverted" in session.action_history[3]
280
+
281
+ def test_session_revert_last_approval(mock_args, tmp_path):
282
+ os.chdir(tmp_path)
283
+
284
+ file_to_modify = tmp_path / "test.py"
285
+ original_content = "def hello_world():\n return 'original'"
286
+ file_to_modify.write_text(original_content)
287
+
288
+ file_to_create = tmp_path / "new_file.py"
289
+
290
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
291
+ session.current_step = 0
292
+ session.plan = ["Modify test.py and create new_file.py"]
293
+
294
+ new_content_modify = "def hello_world():\n return 'modified'"
295
+ new_content_create = "print('new file')"
296
+
297
+ llm_response = (
298
+ f"<file_path:{file_to_modify.as_posix()}>\n```python\n{new_content_modify}\n```\n"
299
+ f"<file_path:{file_to_create.as_posix()}>\n```python\n{new_content_create}\n```"
300
+ )
301
+ session.last_execution_result = {"llm_response": llm_response, "summary": {"modified": [file_to_modify.as_posix()], "created": [file_to_create.as_posix()]}}
302
+
303
+ session.approve_changes([file_to_modify.as_posix(), file_to_create.as_posix()])
304
+ assert file_to_modify.read_text() == new_content_modify
305
+ assert file_to_create.read_text() == new_content_create
306
+ assert len(session.last_revert_state) == 2
307
+
308
+ success_revert = session.revert_last_approval()
309
+ assert success_revert is True
310
+
311
+ assert file_to_modify.read_text() == original_content
312
+ assert not file_to_create.exists()
313
+ assert session.last_revert_state == []
314
+
315
+ def test_session_load_context_with_image(mock_args, temp_project):
316
+ """Tests that loading a scope with an image populates context_images."""
317
+ os.chdir(temp_project)
318
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
319
+
320
+ # We use a dynamic scope that will just grab everything in the directory
321
+ session.load_context_from_scope(f"@dir:{temp_project.as_posix()}")
322
+
323
+ assert session.context is not None
324
+ assert "main.py" in session.context
325
+
326
+ assert session.context_images is not None
327
+ assert len(session.context_images) == 1
328
+ assert session.context_images[0]["path"].name == "logo.png"
329
+
330
+ def test_session_run_goal_directly(mock_args):
331
+ """Tests executing a goal without a plan."""
332
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
333
+ session.set_goal("my goal")
334
+ with patch('patchllm.agent.executor.execute_step') as mock_exec:
335
+ mock_exec.return_value = {"summary": {}}
336
+ session.run_goal_directly()
337
+
338
+ mock_exec.assert_called_once()
339
+ instruction = mock_exec.call_args[0][0]
340
+ assert "achieve the following goal" in instruction
341
+ assert "my goal" in instruction
342
+
343
+ assert session.last_execution_result is not None
344
+ assert session.last_execution_result['is_planless_run'] is True
345
+
346
+ def test_session_approve_changes_planless_run(mock_args):
347
+ """Tests that approving a planless run does not advance a step count."""
348
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
349
+ session.set_goal("my goal")
350
+ llm_response = "<file_path:/a.txt>\n```\n...\n```"
351
+ session.last_execution_result = {
352
+ "instruction": "...",
353
+ "llm_response": llm_response,
354
+ "summary": {"modified": ["/a.txt"], "created": []},
355
+ "is_planless_run": True
356
+ }
357
+
358
+ with patch('patchllm.parser.paste_response_selectively'):
359
+ is_full_approval = session.approve_changes(["/a.txt"])
360
+
361
+ assert is_full_approval is True
362
+ assert session.current_step == 0 # Should NOT have changed
363
+ assert session.last_execution_result is None
364
+ assert "plan-less goal execution" in session.action_history[-1]
365
+
366
+ def test_session_retry_step_planless_partial_approval(mock_args):
367
+ """Tests retrying a planless run after a partial approval."""
368
+ session = AgentSession(args=mock_args, scopes={}, recipes={})
369
+ session.set_goal("my goal")
370
+ session.last_execution_result = {
371
+ "approved_files": ["a.txt"],
372
+ "summary": {"modified": ["a.txt", "b.txt"], "created": []},
373
+ "is_planless_run": True
374
+ }
375
+ with patch('patchllm.agent.executor.execute_step') as mock_exec:
376
+ session.retry_step("feedback for b")
377
+
378
+ mock_exec.assert_called_once()
379
+ instruction = mock_exec.call_args[0][0]
380
+ assert "approved** the changes for the following files:\n- a.txt" in instruction
381
+ assert "rejected** the changes for these files:\n- b.txt" in instruction
382
+ assert "feedback on the rejected files: feedback for b" in instruction
383
+ assert "achieve the goal: my goal" in instruction
@@ -0,0 +1,121 @@
1
+ import pytest
2
+ from prompt_toolkit.document import Document
3
+ from prompt_toolkit.completion import Completion
4
+ from prompt_toolkit.formatted_text import to_plain_text
5
+
6
+ # This import will only work if prompt_toolkit is installed
7
+ pytest.importorskip("prompt_toolkit")
8
+
9
+ from patchllm.tui.completer import PatchLLMCompleter
10
+
11
+ @pytest.fixture
12
+ def completer():
13
+ """Provides a PatchLLMCompleter instance with mock data."""
14
+ mock_scopes = {"base": {}, "js_files": {}}
15
+ return PatchLLMCompleter(mock_scopes)
16
+
17
+ def test_initial_state_completions(completer):
18
+ """Tests that only valid initial commands are shown when no goal or plan exists."""
19
+ completer.set_session_state(has_goal=False, has_plan=False, has_pending_changes=False, can_revert=False, has_context=False)
20
+ doc = Document("/")
21
+ completions = list(completer.get_completions(doc, None))
22
+
23
+ completion_displays = {to_plain_text(c.display) for c in completions}
24
+
25
+ assert "task - set goal" in completion_displays
26
+ assert "context - set context" in completion_displays
27
+ assert "menu - help" in completion_displays
28
+ # These should NOT be present in the initial state
29
+ assert "plan - generate or manage" not in completion_displays
30
+ assert "agent - run step" not in completion_displays
31
+ assert "agent - approve changes" not in completion_displays
32
+ assert "agent - revert last approval" not in completion_displays
33
+ assert "agent - ask question" not in completion_displays
34
+
35
+ def test_has_context_state_completions(completer):
36
+ """Tests that /ask is available when only context is set."""
37
+ completer.set_session_state(has_goal=False, has_plan=False, has_pending_changes=False, can_revert=False, has_context=True)
38
+ doc = Document("/")
39
+ completions = list(completer.get_completions(doc, None))
40
+
41
+ completion_displays = {to_plain_text(c.display) for c in completions}
42
+
43
+ # These should be available
44
+ assert "task - set goal" in completion_displays
45
+ assert "context - set context" in completion_displays
46
+ assert "agent - ask question" in completion_displays
47
+
48
+ # These should NOT be present
49
+ assert "plan - generate or manage" not in completion_displays
50
+ assert "agent - run step" not in completion_displays
51
+
52
+ def test_has_goal_state_completions(completer):
53
+ """Tests that plan generation is available once a goal is set."""
54
+ completer.set_session_state(has_goal=True, has_plan=False, has_pending_changes=False, can_revert=False, has_context=False)
55
+ doc = Document("/")
56
+ completions = list(completer.get_completions(doc, None))
57
+
58
+ completion_displays = {to_plain_text(c.display) for c in completions}
59
+
60
+ assert "task - set goal" in completion_displays
61
+ assert "plan - generate or manage" in completion_displays
62
+ assert "agent - ask question" in completion_displays
63
+ # These should NOT be present yet
64
+ assert "agent - run step" not in completion_displays
65
+
66
+ def test_has_plan_state_completions(completer):
67
+ """Tests that plan-related and execution commands are available once a plan exists."""
68
+ completer.set_session_state(has_goal=True, has_plan=True, has_pending_changes=False, can_revert=False, has_context=False)
69
+ doc = Document("/")
70
+ completions = list(completer.get_completions(doc, None))
71
+
72
+ completion_displays = {to_plain_text(c.display) for c in completions}
73
+
74
+ assert "agent - run step" in completion_displays
75
+ assert "agent - skip step" in completion_displays
76
+ assert "agent - ask question" in completion_displays
77
+ assert "plan - refine with feedback" in completion_displays
78
+ # This should NOT be present
79
+ assert "agent - approve changes" not in completion_displays
80
+
81
+ def test_pending_changes_state_completions(completer):
82
+ """Tests that approval/diff commands are available only after a run."""
83
+ completer.set_session_state(has_goal=True, has_plan=True, has_pending_changes=True, can_revert=False, has_context=False)
84
+ doc = Document("/")
85
+ completions = list(completer.get_completions(doc, None))
86
+
87
+ completion_displays = {to_plain_text(c.display) for c in completions}
88
+
89
+ # All previous commands should still be there
90
+ assert "agent - run step" in completion_displays
91
+ # The new commands should now be available
92
+ assert "agent - approve changes" in completion_displays
93
+ assert "agent - view diff" in completion_displays
94
+ assert "agent - retry with feedback" in completion_displays
95
+
96
+ def test_can_revert_state_completions(completer):
97
+ """Tests that the revert command is available after an approval."""
98
+ # This state occurs right after an approval, where there are no *pending* changes, but there is something to revert.
99
+ completer.set_session_state(has_goal=True, has_plan=True, has_pending_changes=False, can_revert=True, has_context=False)
100
+ doc = Document("/")
101
+ completions = list(completer.get_completions(doc, None))
102
+
103
+ completion_displays = {to_plain_text(c.display) for c in completions}
104
+
105
+ assert "agent - revert last approval" in completion_displays
106
+ # Approve should not be available, as there are no pending (un-approved) changes
107
+ assert "agent - approve changes" not in completion_displays
108
+
109
+
110
+ def test_completion_object_structure(completer):
111
+ """Tests that the completion object has the correct text, display, and meta."""
112
+ completer.set_session_state(has_goal=False, has_plan=False, has_pending_changes=False, can_revert=False, has_context=False)
113
+ doc = Document("/task")
114
+ completions = list(completer.get_completions(doc, None))
115
+
116
+ task_completion = next((c for c in completions if c.text == '/task'), None)
117
+
118
+ assert task_completion is not None
119
+ assert task_completion.text == "/task"
120
+ assert to_plain_text(task_completion.display) == "task - set goal"
121
+ assert to_plain_text(task_completion.display_meta) == "Sets the high-level goal for the agent."
tests/test_context.py ADDED
@@ -0,0 +1,140 @@
1
+ import os
2
+ import time
3
+ import subprocess
4
+ import textwrap
5
+ import pytest
6
+ import base64
7
+ # --- MODIFICATION: Changed to absolute imports ---
8
+ from patchllm.scopes.builder import build_context
9
+ from patchllm.utils import load_from_py_file
10
+ from patchllm.scopes.helpers import _format_context
11
+
12
+ # --- Static Scope Tests ---
13
+
14
+ def test_build_context_static_scope(temp_project, temp_scopes_file):
15
+ scopes = load_from_py_file(temp_scopes_file, "scopes")
16
+ os.chdir(temp_project)
17
+ result = build_context("base", scopes, temp_project)
18
+ assert result is not None
19
+ context = result["context"]
20
+ assert "main.py" in context
21
+ assert "utils.py" in context
22
+ assert "test_utils.py" not in context
23
+ assert "component.js" not in context
24
+
25
+ def test_build_context_static_search_words(temp_project, temp_scopes_file):
26
+ scopes = load_from_py_file(temp_scopes_file, "scopes")
27
+ os.chdir(temp_project)
28
+ result = build_context("search_scope", scopes, temp_project)
29
+ assert result is not None
30
+ context = result["context"]
31
+ assert "main.py" in context
32
+ assert "utils.py" not in context
33
+ assert "README.md" not in context
34
+
35
+ # --- Dynamic Scope Tests ---
36
+
37
+ def test_dynamic_scope_git_staged(git_project):
38
+ (git_project / "main.py").write_text("new content")
39
+ subprocess.run(["git", "add", "main.py"], cwd=git_project, check=True)
40
+ result = build_context("@git:staged", {}, git_project)
41
+ assert result is not None
42
+ context = result["context"]
43
+ assert "main.py" in context
44
+ assert "utils.py" not in context
45
+
46
+ def test_dynamic_scope_git_unstaged(git_project):
47
+ (git_project / "utils.py").write_text("unstaged changes")
48
+ result = build_context("@git:unstaged", {}, git_project)
49
+ assert result is not None
50
+ assert "utils.py" in result["context"]
51
+ assert "main.py" not in result["context"]
52
+
53
+ def test_dynamic_scope_recent(temp_project):
54
+ time.sleep(0.1)
55
+ (temp_project / "main.py").touch()
56
+ result = build_context("@recent", {}, temp_project)
57
+ assert result is not None
58
+ tree = result["tree"]
59
+ assert "main.py" in tree
60
+ assert len(tree.strip().split('\n')) >= 5
61
+
62
+ def test_dynamic_scope_search(temp_project):
63
+ os.chdir(temp_project)
64
+ result = build_context('@search:"helper_function"', {}, temp_project)
65
+ assert result is not None
66
+ context = result["context"]
67
+ assert "utils.py" in context
68
+ assert "test_utils.py" in context
69
+ assert "main.py" not in context
70
+
71
+ def test_dynamic_scope_error_traceback(temp_project):
72
+ main_py_path = (temp_project / "main.py").as_posix()
73
+ utils_py_path = (temp_project / "utils.py").as_posix()
74
+ traceback = textwrap.dedent(f'''
75
+ Traceback (most recent call last):
76
+ File "{main_py_path}", line 3, in <module>
77
+ File "{utils_py_path}", line 5, in do_stuff
78
+ ZeroDivisionError: division by zero
79
+ ''').strip()
80
+ scope_string = '@error:"' + traceback + '"'
81
+ result = build_context(scope_string, {}, temp_project)
82
+ assert result is not None
83
+ context = result["context"]
84
+ assert "main.py" in context
85
+ assert "utils.py" in context
86
+ assert "README.md" not in context
87
+
88
+ def test_dynamic_scope_related(temp_project):
89
+ os.chdir(temp_project)
90
+ result = build_context("@related:utils.py", {}, temp_project)
91
+ assert result is not None
92
+ context = result["context"]
93
+ assert "utils.py" in context
94
+ assert "test_utils.py" in context
95
+ assert "main.py" not in context
96
+
97
+ def test_dynamic_scope_dir(temp_project):
98
+ os.chdir(temp_project)
99
+ result = build_context("@dir:src", {}, temp_project)
100
+ assert result is not None
101
+ context = result["context"]
102
+ assert "component.js" in context
103
+ assert "styles.css" in context
104
+ assert "main.py" not in context
105
+
106
+ def test_format_context_with_image(temp_project):
107
+ """Tests that _format_context correctly processes both text and image files."""
108
+ os.chdir(temp_project)
109
+ text_file = temp_project / "main.py"
110
+ image_file = temp_project / "logo.png"
111
+
112
+ all_files = [text_file, image_file]
113
+
114
+ result = _format_context(all_files, [], temp_project)
115
+
116
+ assert result is not None
117
+
118
+ # Check text context
119
+ assert "<file_path:" in result["context"]
120
+ assert "main.py" in result["context"]
121
+ assert "<file_path:" + image_file.as_posix() not in result["context"] # Image content shouldn't be in text context
122
+
123
+ # Check image data
124
+ assert "images" in result
125
+ assert len(result["images"]) == 1
126
+ image_data = result["images"][0]
127
+ assert image_data["path"] == image_file
128
+ assert image_data["mime_type"] == "image/png"
129
+
130
+ # Check if base64 content is valid
131
+ try:
132
+ base64.b64decode(image_data["content_base64"])
133
+ except Exception:
134
+ pytest.fail("Image content is not valid base64")
135
+
136
+ # Check files list
137
+ assert "files" in result
138
+ assert len(result["files"]) == 2
139
+ assert text_file in result["files"]
140
+ assert image_file in result["files"]