ara-cli 0.1.9.69__py3-none-any.whl → 0.1.10.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ara-cli might be problematic. Click here for more details.
- ara_cli/__init__.py +18 -2
- ara_cli/__main__.py +248 -62
- ara_cli/ara_command_action.py +155 -86
- ara_cli/ara_config.py +226 -80
- ara_cli/ara_subcommands/__init__.py +0 -0
- ara_cli/ara_subcommands/autofix.py +26 -0
- ara_cli/ara_subcommands/chat.py +27 -0
- ara_cli/ara_subcommands/classifier_directory.py +16 -0
- ara_cli/ara_subcommands/common.py +100 -0
- ara_cli/ara_subcommands/create.py +75 -0
- ara_cli/ara_subcommands/delete.py +22 -0
- ara_cli/ara_subcommands/extract.py +22 -0
- ara_cli/ara_subcommands/fetch_templates.py +14 -0
- ara_cli/ara_subcommands/list.py +65 -0
- ara_cli/ara_subcommands/list_tags.py +25 -0
- ara_cli/ara_subcommands/load.py +48 -0
- ara_cli/ara_subcommands/prompt.py +136 -0
- ara_cli/ara_subcommands/read.py +47 -0
- ara_cli/ara_subcommands/read_status.py +20 -0
- ara_cli/ara_subcommands/read_user.py +20 -0
- ara_cli/ara_subcommands/reconnect.py +27 -0
- ara_cli/ara_subcommands/rename.py +22 -0
- ara_cli/ara_subcommands/scan.py +14 -0
- ara_cli/ara_subcommands/set_status.py +22 -0
- ara_cli/ara_subcommands/set_user.py +22 -0
- ara_cli/ara_subcommands/template.py +16 -0
- ara_cli/artefact_autofix.py +649 -68
- ara_cli/artefact_creator.py +8 -11
- ara_cli/artefact_deleter.py +2 -4
- ara_cli/artefact_fuzzy_search.py +22 -10
- ara_cli/artefact_link_updater.py +4 -4
- ara_cli/artefact_lister.py +29 -55
- ara_cli/artefact_models/artefact_data_retrieval.py +23 -0
- ara_cli/artefact_models/artefact_load.py +11 -3
- ara_cli/artefact_models/artefact_model.py +146 -39
- ara_cli/artefact_models/artefact_templates.py +70 -44
- ara_cli/artefact_models/businessgoal_artefact_model.py +23 -25
- ara_cli/artefact_models/epic_artefact_model.py +34 -26
- ara_cli/artefact_models/feature_artefact_model.py +203 -64
- ara_cli/artefact_models/keyfeature_artefact_model.py +21 -24
- ara_cli/artefact_models/serialize_helper.py +1 -1
- ara_cli/artefact_models/task_artefact_model.py +83 -15
- ara_cli/artefact_models/userstory_artefact_model.py +37 -27
- ara_cli/artefact_models/vision_artefact_model.py +23 -42
- ara_cli/artefact_reader.py +92 -91
- ara_cli/artefact_renamer.py +8 -4
- ara_cli/artefact_scan.py +66 -3
- ara_cli/chat.py +622 -162
- ara_cli/chat_agent/__init__.py +0 -0
- ara_cli/chat_agent/agent_communicator.py +62 -0
- ara_cli/chat_agent/agent_process_manager.py +211 -0
- ara_cli/chat_agent/agent_status_manager.py +73 -0
- ara_cli/chat_agent/agent_workspace_manager.py +76 -0
- ara_cli/commands/__init__.py +0 -0
- ara_cli/commands/command.py +7 -0
- ara_cli/commands/extract_command.py +15 -0
- ara_cli/commands/load_command.py +65 -0
- ara_cli/commands/load_image_command.py +34 -0
- ara_cli/commands/read_command.py +117 -0
- ara_cli/completers.py +144 -0
- ara_cli/directory_navigator.py +37 -4
- ara_cli/error_handler.py +134 -0
- ara_cli/file_classifier.py +6 -5
- ara_cli/file_lister.py +1 -1
- ara_cli/file_loaders/__init__.py +0 -0
- ara_cli/file_loaders/binary_file_loader.py +33 -0
- ara_cli/file_loaders/document_file_loader.py +34 -0
- ara_cli/file_loaders/document_reader.py +245 -0
- ara_cli/file_loaders/document_readers.py +233 -0
- ara_cli/file_loaders/file_loader.py +50 -0
- ara_cli/file_loaders/file_loaders.py +123 -0
- ara_cli/file_loaders/image_processor.py +89 -0
- ara_cli/file_loaders/markdown_reader.py +75 -0
- ara_cli/file_loaders/text_file_loader.py +187 -0
- ara_cli/global_file_lister.py +51 -0
- ara_cli/list_filter.py +1 -1
- ara_cli/output_suppressor.py +1 -1
- ara_cli/prompt_extractor.py +215 -88
- ara_cli/prompt_handler.py +521 -134
- ara_cli/prompt_rag.py +2 -2
- ara_cli/tag_extractor.py +83 -38
- ara_cli/template_loader.py +245 -0
- ara_cli/template_manager.py +18 -13
- ara_cli/templates/prompt-modules/commands/empty.commands.md +2 -12
- ara_cli/templates/prompt-modules/commands/extract_general.commands.md +12 -0
- ara_cli/templates/prompt-modules/commands/extract_markdown.commands.md +11 -0
- ara_cli/templates/prompt-modules/commands/extract_python.commands.md +13 -0
- ara_cli/templates/prompt-modules/commands/feature_add_or_modifiy_specified_behavior.commands.md +36 -0
- ara_cli/templates/prompt-modules/commands/feature_generate_initial_specified_bevahior.commands.md +53 -0
- ara_cli/templates/prompt-modules/commands/prompt_template_tech_stack_transformer.commands.md +95 -0
- ara_cli/templates/prompt-modules/commands/python_bug_fixing_code.commands.md +34 -0
- ara_cli/templates/prompt-modules/commands/python_generate_code.commands.md +27 -0
- ara_cli/templates/prompt-modules/commands/python_refactoring_code.commands.md +39 -0
- ara_cli/templates/prompt-modules/commands/python_step_definitions_generation_and_fixing.commands.md +40 -0
- ara_cli/templates/prompt-modules/commands/python_unittest_generation_and_fixing.commands.md +48 -0
- ara_cli/update_config_prompt.py +9 -3
- ara_cli/version.py +1 -1
- ara_cli-0.1.10.8.dist-info/METADATA +241 -0
- ara_cli-0.1.10.8.dist-info/RECORD +193 -0
- tests/test_ara_command_action.py +73 -59
- tests/test_ara_config.py +341 -36
- tests/test_artefact_autofix.py +1060 -0
- tests/test_artefact_link_updater.py +3 -3
- tests/test_artefact_lister.py +52 -132
- tests/test_artefact_renamer.py +2 -2
- tests/test_artefact_scan.py +327 -33
- tests/test_chat.py +2063 -498
- tests/test_file_classifier.py +24 -1
- tests/test_file_creator.py +3 -5
- tests/test_file_lister.py +1 -1
- tests/test_global_file_lister.py +131 -0
- tests/test_list_filter.py +2 -2
- tests/test_prompt_handler.py +746 -0
- tests/test_tag_extractor.py +19 -13
- tests/test_template_loader.py +192 -0
- tests/test_template_manager.py +5 -4
- tests/test_update_config_prompt.py +2 -2
- ara_cli/ara_command_parser.py +0 -327
- ara_cli/templates/prompt-modules/blueprints/complete_pytest_unittest.blueprint.md +0 -27
- ara_cli/templates/prompt-modules/blueprints/task_todo_list_implement_feature_BDD_way.blueprint.md +0 -30
- ara_cli/templates/prompt-modules/commands/artefact_classification.commands.md +0 -9
- ara_cli/templates/prompt-modules/commands/artefact_extension.commands.md +0 -17
- ara_cli/templates/prompt-modules/commands/artefact_formulation.commands.md +0 -14
- ara_cli/templates/prompt-modules/commands/behave_step_generation.commands.md +0 -102
- ara_cli/templates/prompt-modules/commands/code_generation_complex.commands.md +0 -20
- ara_cli/templates/prompt-modules/commands/code_generation_simple.commands.md +0 -13
- ara_cli/templates/prompt-modules/commands/error_fixing.commands.md +0 -20
- ara_cli/templates/prompt-modules/commands/feature_file_update.commands.md +0 -18
- ara_cli/templates/prompt-modules/commands/feature_formulation.commands.md +0 -43
- ara_cli/templates/prompt-modules/commands/js_code_generation_simple.commands.md +0 -13
- ara_cli/templates/prompt-modules/commands/refactoring.commands.md +0 -15
- ara_cli/templates/prompt-modules/commands/refactoring_analysis.commands.md +0 -9
- ara_cli/templates/prompt-modules/commands/reverse_engineer_feature_file.commands.md +0 -15
- ara_cli/templates/prompt-modules/commands/reverse_engineer_program_flow.commands.md +0 -19
- ara_cli/templates/template.businessgoal +0 -10
- ara_cli/templates/template.capability +0 -10
- ara_cli/templates/template.epic +0 -15
- ara_cli/templates/template.example +0 -6
- ara_cli/templates/template.feature +0 -26
- ara_cli/templates/template.issue +0 -14
- ara_cli/templates/template.keyfeature +0 -15
- ara_cli/templates/template.task +0 -6
- ara_cli/templates/template.userstory +0 -17
- ara_cli/templates/template.vision +0 -14
- ara_cli-0.1.9.69.dist-info/METADATA +0 -16
- ara_cli-0.1.9.69.dist-info/RECORD +0 -158
- tests/test_ara_autofix.py +0 -219
- {ara_cli-0.1.9.69.dist-info → ara_cli-0.1.10.8.dist-info}/WHEEL +0 -0
- {ara_cli-0.1.9.69.dist-info → ara_cli-0.1.10.8.dist-info}/entry_points.txt +0 -0
- {ara_cli-0.1.9.69.dist-info → ara_cli-0.1.10.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,746 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
import os
|
|
3
|
+
import shutil
|
|
4
|
+
import base64
|
|
5
|
+
import re
|
|
6
|
+
from unittest.mock import patch, MagicMock, mock_open, call
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from ara_cli import prompt_handler
|
|
10
|
+
from ara_cli.ara_config import ARAconfig, LLMConfigItem, ConfigManager
|
|
11
|
+
from ara_cli.classifier import Classifier
|
|
12
|
+
|
|
13
|
+
from langfuse.api.resources.commons.errors import NotFoundError
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@pytest.fixture(autouse=True)
|
|
17
|
+
def mock_langfuse():
|
|
18
|
+
"""Mock Langfuse client to prevent network calls during tests."""
|
|
19
|
+
with patch.object(prompt_handler.LLMSingleton, 'langfuse', None):
|
|
20
|
+
mock_langfuse_instance = MagicMock()
|
|
21
|
+
|
|
22
|
+
# Mock the get_prompt method to raise NotFoundError (simulating prompt not found)
|
|
23
|
+
mock_langfuse_instance.get_prompt.side_effect = NotFoundError(
|
|
24
|
+
# status_code=404,
|
|
25
|
+
body={'message': "Prompt not found", 'error': 'LangfuseNotFoundError'}
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
# Mock the span context manager
|
|
29
|
+
mock_span = MagicMock()
|
|
30
|
+
mock_span.__enter__ = MagicMock(return_value=mock_span)
|
|
31
|
+
mock_span.__exit__ = MagicMock(return_value=None)
|
|
32
|
+
mock_langfuse_instance.start_as_current_span.return_value = mock_span
|
|
33
|
+
|
|
34
|
+
with patch.object(prompt_handler.LLMSingleton, 'langfuse', mock_langfuse_instance):
|
|
35
|
+
yield mock_langfuse_instance
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@pytest.fixture
|
|
39
|
+
def mock_config():
|
|
40
|
+
"""Mocks a standard ARAconfig object for testing."""
|
|
41
|
+
config = ARAconfig(
|
|
42
|
+
ext_code_dirs=[{"code": "./src"}],
|
|
43
|
+
glossary_dir="./glossary",
|
|
44
|
+
doc_dir="./docs",
|
|
45
|
+
local_prompt_templates_dir="./ara/.araconfig/custom-prompt-modules",
|
|
46
|
+
ara_prompt_given_list_includes=["*.py", "*.md"],
|
|
47
|
+
llm_config={
|
|
48
|
+
"gpt-4o": LLMConfigItem(provider="openai", model="openai/gpt-4o", temperature=0.8, max_tokens=1024),
|
|
49
|
+
"o3-mini": LLMConfigItem(provider="openai", model="openai/o3-mini", temperature=0.9, max_tokens=2048),
|
|
50
|
+
},
|
|
51
|
+
default_llm="gpt-4o",
|
|
52
|
+
extraction_llm="o3-mini"
|
|
53
|
+
)
|
|
54
|
+
return config
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@pytest.fixture(autouse=True)
|
|
58
|
+
def mock_config_manager(mock_config):
|
|
59
|
+
"""Patches ConfigManager to ensure it always returns the mock_config."""
|
|
60
|
+
with patch.object(ConfigManager, 'get_config') as mock_get_config:
|
|
61
|
+
mock_get_config.return_value = mock_config
|
|
62
|
+
yield mock_get_config
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@pytest.fixture(autouse=True)
|
|
66
|
+
def reset_singleton():
|
|
67
|
+
"""Resets the LLMSingleton and ConfigManager before each test for isolation."""
|
|
68
|
+
prompt_handler.LLMSingleton._instance = None
|
|
69
|
+
prompt_handler.LLMSingleton._default_model = None
|
|
70
|
+
prompt_handler.LLMSingleton._extraction_model = None
|
|
71
|
+
ConfigManager.reset()
|
|
72
|
+
yield
|
|
73
|
+
ConfigManager.reset()
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class TestLLMSingleton:
|
|
77
|
+
"""Tests the behavior of the LLMSingleton class."""
|
|
78
|
+
|
|
79
|
+
def test_get_instance_creates_with_default_model(self, mock_config_manager):
|
|
80
|
+
instance = prompt_handler.LLMSingleton.get_instance()
|
|
81
|
+
assert instance is not None
|
|
82
|
+
assert prompt_handler.LLMSingleton.get_default_model() == "gpt-4o"
|
|
83
|
+
assert prompt_handler.LLMSingleton.get_extraction_model() == "o3-mini"
|
|
84
|
+
assert instance.default_config_params['temperature'] == 0.8
|
|
85
|
+
assert instance.extraction_config_params['temperature'] == 0.9
|
|
86
|
+
|
|
87
|
+
def test_get_instance_creates_with_first_model_if_no_default(self, mock_config_manager, mock_config):
|
|
88
|
+
mock_config.default_llm = None
|
|
89
|
+
instance = prompt_handler.LLMSingleton.get_instance()
|
|
90
|
+
assert instance is not None
|
|
91
|
+
assert prompt_handler.LLMSingleton.get_default_model() == "gpt-4o"
|
|
92
|
+
|
|
93
|
+
def test_get_instance_no_extraction_llm_falls_back_to_default(self, mock_config_manager, mock_config):
|
|
94
|
+
mock_config.extraction_llm = None
|
|
95
|
+
instance = prompt_handler.LLMSingleton.get_instance()
|
|
96
|
+
assert instance is not None
|
|
97
|
+
assert prompt_handler.LLMSingleton.get_extraction_model() == "gpt-4o"
|
|
98
|
+
|
|
99
|
+
def test_get_instance_no_llm_config_raises_error(self, mock_config_manager, mock_config):
|
|
100
|
+
mock_config.llm_config = {}
|
|
101
|
+
mock_config.default_llm = None # This is crucial to hit the correct check
|
|
102
|
+
with pytest.raises(ValueError, match="No LLM configurations are defined in the configuration file."):
|
|
103
|
+
prompt_handler.LLMSingleton.get_instance()
|
|
104
|
+
|
|
105
|
+
def test_get_instance_constructor_raises_for_missing_extraction_config(self, mock_config_manager, mock_config):
|
|
106
|
+
mock_config.extraction_llm = "missing-model"
|
|
107
|
+
with pytest.raises(ValueError, match="No configuration found for the extraction model: missing-model"):
|
|
108
|
+
prompt_handler.LLMSingleton.get_instance()
|
|
109
|
+
|
|
110
|
+
def test_get_instance_returns_same_instance(self, mock_config_manager):
|
|
111
|
+
instance1 = prompt_handler.LLMSingleton.get_instance()
|
|
112
|
+
instance2 = prompt_handler.LLMSingleton.get_instance()
|
|
113
|
+
assert instance1 is instance2
|
|
114
|
+
|
|
115
|
+
def test_get_config_by_purpose(self, mock_config_manager):
|
|
116
|
+
default_params = prompt_handler.LLMSingleton.get_config_by_purpose('default')
|
|
117
|
+
extraction_params = prompt_handler.LLMSingleton.get_config_by_purpose('extraction')
|
|
118
|
+
assert default_params['model'] == 'openai/gpt-4o'
|
|
119
|
+
assert extraction_params['model'] == 'openai/o3-mini'
|
|
120
|
+
|
|
121
|
+
def test_set_default_model_switches_model(self, mock_config_manager):
|
|
122
|
+
initial_instance = prompt_handler.LLMSingleton.get_instance()
|
|
123
|
+
assert prompt_handler.LLMSingleton.get_default_model() == "gpt-4o"
|
|
124
|
+
|
|
125
|
+
new_instance = prompt_handler.LLMSingleton.set_default_model("o3-mini")
|
|
126
|
+
|
|
127
|
+
assert prompt_handler.LLMSingleton.get_default_model() == "o3-mini"
|
|
128
|
+
assert new_instance.default_config_params['temperature'] == 0.9
|
|
129
|
+
assert initial_instance is not new_instance
|
|
130
|
+
|
|
131
|
+
def test_set_default_model_to_same_model_does_nothing(self, mock_config_manager):
|
|
132
|
+
instance1 = prompt_handler.LLMSingleton.get_instance()
|
|
133
|
+
instance2 = prompt_handler.LLMSingleton.set_default_model("gpt-4o")
|
|
134
|
+
assert instance1 is instance2
|
|
135
|
+
|
|
136
|
+
def test_set_default_model_to_invalid_raises_error(self, mock_config_manager):
|
|
137
|
+
with pytest.raises(ValueError, match="No configuration found for the default model: invalid-model"):
|
|
138
|
+
prompt_handler.LLMSingleton.set_default_model("invalid-model")
|
|
139
|
+
|
|
140
|
+
def test_set_extraction_model_switches_model(self, mock_config_manager):
|
|
141
|
+
initial_instance = prompt_handler.LLMSingleton.get_instance()
|
|
142
|
+
new_instance = prompt_handler.LLMSingleton.set_extraction_model("gpt-4o")
|
|
143
|
+
assert prompt_handler.LLMSingleton.get_extraction_model() == "gpt-4o"
|
|
144
|
+
assert new_instance.extraction_config_params['temperature'] == 0.8
|
|
145
|
+
assert initial_instance is not new_instance
|
|
146
|
+
|
|
147
|
+
def test_set_extraction_model_to_same_model_does_nothing(self, mock_config_manager):
|
|
148
|
+
instance1 = prompt_handler.LLMSingleton.get_instance()
|
|
149
|
+
instance2 = prompt_handler.LLMSingleton.set_extraction_model("o3-mini")
|
|
150
|
+
assert instance1 is instance2
|
|
151
|
+
|
|
152
|
+
def test_set_extraction_model_to_invalid_raises_error(self, mock_config_manager):
|
|
153
|
+
with pytest.raises(ValueError, match="No configuration found for the extraction model: invalid-model"):
|
|
154
|
+
prompt_handler.LLMSingleton.set_extraction_model("invalid-model")
|
|
155
|
+
|
|
156
|
+
def test_get_default_model_initializes_if_needed(self, mock_config_manager):
|
|
157
|
+
assert prompt_handler.LLMSingleton._instance is None
|
|
158
|
+
model = prompt_handler.LLMSingleton.get_default_model()
|
|
159
|
+
assert model == "gpt-4o"
|
|
160
|
+
assert prompt_handler.LLMSingleton._instance is not None
|
|
161
|
+
|
|
162
|
+
def test_get_extraction_model_initializes_if_needed(self, mock_config_manager):
|
|
163
|
+
assert prompt_handler.LLMSingleton._instance is None
|
|
164
|
+
model = prompt_handler.LLMSingleton.get_extraction_model()
|
|
165
|
+
assert model == "o3-mini"
|
|
166
|
+
assert prompt_handler.LLMSingleton._instance is not None
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
class TestFileIO:
|
|
170
|
+
"""Tests file I/O helper functions."""
|
|
171
|
+
|
|
172
|
+
def test_write_and_read_string_from_file(self, tmp_path):
|
|
173
|
+
file_path = tmp_path / "test.txt"
|
|
174
|
+
test_string = "Hello World"
|
|
175
|
+
|
|
176
|
+
prompt_handler.write_string_to_file(file_path, test_string, 'w')
|
|
177
|
+
|
|
178
|
+
content = prompt_handler.read_string_from_file(file_path)
|
|
179
|
+
assert test_string in content
|
|
180
|
+
|
|
181
|
+
content_get = prompt_handler.get_file_content(file_path)
|
|
182
|
+
assert content.strip() == test_string
|
|
183
|
+
|
|
184
|
+
def test_get_partial_file_content(self, tmp_path):
|
|
185
|
+
file_path = tmp_path / "test.txt"
|
|
186
|
+
file_path.write_text("\n".join(f"Line {i}" for i in range(1, 21)))
|
|
187
|
+
|
|
188
|
+
content = prompt_handler.get_partial_file_content(str(file_path), "2:4,18:19")
|
|
189
|
+
expected = "Line 2\nLine 3\nLine 4\nLine 18\nLine 19\n"
|
|
190
|
+
assert content == expected
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
class TestCoreLogic:
|
|
194
|
+
"""Tests functions related to the main business logic."""
|
|
195
|
+
|
|
196
|
+
@pytest.fixture(autouse=True)
|
|
197
|
+
def setup_test_env(self, tmp_path):
|
|
198
|
+
"""Changes CWD to a temporary directory for test isolation."""
|
|
199
|
+
original_cwd = os.getcwd()
|
|
200
|
+
os.chdir(tmp_path)
|
|
201
|
+
yield
|
|
202
|
+
os.chdir(original_cwd)
|
|
203
|
+
|
|
204
|
+
@pytest.mark.parametrize("message, expected", [
|
|
205
|
+
({"content": "Hello"}, True),
|
|
206
|
+
({"content": " "}, False),
|
|
207
|
+
({"content": ""}, False),
|
|
208
|
+
({"content": "\n\t"}, False),
|
|
209
|
+
({"content": [{"type": "text", "text": " "}]}, False),
|
|
210
|
+
({"content": [{"type": "text", "text": "Valid text"}]}, True),
|
|
211
|
+
({"content": [{"type": "image_url"}, {"type": "text", "text": "More text"}]}, True),
|
|
212
|
+
({"content": []}, False),
|
|
213
|
+
({"content": 123}, False),
|
|
214
|
+
({}, False),
|
|
215
|
+
])
|
|
216
|
+
def test_is_valid_message(self, message, expected):
|
|
217
|
+
assert prompt_handler._is_valid_message(message) == expected
|
|
218
|
+
|
|
219
|
+
@patch('ara_cli.prompt_handler.litellm.completion')
|
|
220
|
+
def test_send_prompt(self, mock_completion, mock_config, mock_config_manager):
|
|
221
|
+
"""Tests that send_prompt uses the default LLM by default."""
|
|
222
|
+
mock_chunk = MagicMock()
|
|
223
|
+
mock_chunk.choices[0].delta.content = "test chunk"
|
|
224
|
+
mock_completion.return_value = [mock_chunk]
|
|
225
|
+
|
|
226
|
+
prompt = [{"role": "user", "content": "A test"}]
|
|
227
|
+
|
|
228
|
+
result = list(prompt_handler.send_prompt(prompt))
|
|
229
|
+
|
|
230
|
+
expected_params = mock_config.llm_config['gpt-4o'].model_dump(exclude_none=True)
|
|
231
|
+
del expected_params['provider']
|
|
232
|
+
|
|
233
|
+
mock_completion.assert_called_once_with(
|
|
234
|
+
messages=prompt, stream=True, **expected_params
|
|
235
|
+
)
|
|
236
|
+
assert len(result) == 1
|
|
237
|
+
assert result[0].choices[0].delta.content == "test chunk"
|
|
238
|
+
|
|
239
|
+
@patch('ara_cli.prompt_handler.litellm.completion')
|
|
240
|
+
def test_send_prompt_filters_invalid_messages(self, mock_completion, mock_config_manager):
|
|
241
|
+
prompt = [
|
|
242
|
+
{"role": "user", "content": "Valid message"},
|
|
243
|
+
{"role": "user", "content": " "},
|
|
244
|
+
{"role": "assistant", "content": "Another valid one"},
|
|
245
|
+
]
|
|
246
|
+
valid_prompt = [prompt[0], prompt[2]]
|
|
247
|
+
|
|
248
|
+
list(prompt_handler.send_prompt(prompt))
|
|
249
|
+
|
|
250
|
+
mock_completion.assert_called_once()
|
|
251
|
+
called_args = mock_completion.call_args[1]
|
|
252
|
+
assert called_args['messages'] == valid_prompt
|
|
253
|
+
|
|
254
|
+
@patch('ara_cli.prompt_handler.litellm.completion')
|
|
255
|
+
def test_send_prompt_uses_extraction_llm(self, mock_completion, mock_config, mock_config_manager):
|
|
256
|
+
"""Tests that send_prompt uses the extraction LLM when specified."""
|
|
257
|
+
mock_completion.return_value = []
|
|
258
|
+
prompt = [{"role": "user", "content": "Extract this"}]
|
|
259
|
+
|
|
260
|
+
list(prompt_handler.send_prompt(prompt, purpose='extraction'))
|
|
261
|
+
|
|
262
|
+
expected_params = mock_config.llm_config['o3-mini'].model_dump(exclude_none=True)
|
|
263
|
+
del expected_params['provider']
|
|
264
|
+
|
|
265
|
+
mock_completion.assert_called_once_with(
|
|
266
|
+
messages=prompt, stream=True, **expected_params
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
@patch('ara_cli.prompt_handler.send_prompt')
|
|
270
|
+
def test_describe_image(self, mock_send_prompt, tmp_path, mock_langfuse):
|
|
271
|
+
fake_image_path = tmp_path / "test.jpeg"
|
|
272
|
+
fake_image_content = b"fakeimagedata"
|
|
273
|
+
fake_image_path.write_bytes(fake_image_content)
|
|
274
|
+
|
|
275
|
+
mock_send_prompt.return_value = iter([])
|
|
276
|
+
|
|
277
|
+
# Ensure the langfuse mock is properly set up for this instance
|
|
278
|
+
instance = prompt_handler.LLMSingleton.get_instance()
|
|
279
|
+
instance.langfuse = mock_langfuse
|
|
280
|
+
|
|
281
|
+
prompt_handler.describe_image(fake_image_path)
|
|
282
|
+
|
|
283
|
+
mock_send_prompt.assert_called_once()
|
|
284
|
+
called_args, called_kwargs = mock_send_prompt.call_args
|
|
285
|
+
|
|
286
|
+
assert called_kwargs == {'purpose': 'extraction'}
|
|
287
|
+
message_content = called_args[0][0]['content']
|
|
288
|
+
assert message_content[0]['type'] == 'text'
|
|
289
|
+
assert message_content[1]['type'] == 'image_url'
|
|
290
|
+
|
|
291
|
+
encoded_image = base64.b64encode(fake_image_content).decode('utf-8')
|
|
292
|
+
expected_url = f"data:image/jpeg;base64,{encoded_image}"
|
|
293
|
+
assert message_content[1]['image_url']['url'] == expected_url
|
|
294
|
+
|
|
295
|
+
@patch('ara_cli.prompt_handler.send_prompt')
|
|
296
|
+
def test_describe_image_returns_response_text(self, mock_send_prompt, tmp_path, mock_langfuse):
|
|
297
|
+
fake_image_path = tmp_path / "test.gif"
|
|
298
|
+
fake_image_path.touch()
|
|
299
|
+
|
|
300
|
+
mock_chunk1 = MagicMock()
|
|
301
|
+
mock_chunk1.choices[0].delta.content = "This is "
|
|
302
|
+
mock_chunk2 = MagicMock()
|
|
303
|
+
mock_chunk2.choices[0].delta.content = "a description."
|
|
304
|
+
mock_chunk3 = MagicMock()
|
|
305
|
+
mock_chunk3.choices[0].delta.content = None # Test empty chunk
|
|
306
|
+
mock_send_prompt.return_value = iter([mock_chunk1, mock_chunk3, mock_chunk2])
|
|
307
|
+
|
|
308
|
+
# Ensure the langfuse mock is properly set up for this instance
|
|
309
|
+
instance = prompt_handler.LLMSingleton.get_instance()
|
|
310
|
+
instance.langfuse = mock_langfuse
|
|
311
|
+
|
|
312
|
+
description = prompt_handler.describe_image(fake_image_path)
|
|
313
|
+
assert description == "This is a description."
|
|
314
|
+
|
|
315
|
+
@patch('ara_cli.prompt_handler.Classifier.get_sub_directory', return_value="test_classifier")
|
|
316
|
+
def test_append_headings(self, mock_get_sub, tmp_path):
|
|
317
|
+
os.makedirs("ara/test_classifier/my_param.data", exist_ok=True)
|
|
318
|
+
log_file = tmp_path / "ara/test_classifier/my_param.data/test_classifier.prompt_log.md"
|
|
319
|
+
log_file.touch()
|
|
320
|
+
|
|
321
|
+
prompt_handler.append_headings("test_classifier", "my_param", "PROMPT")
|
|
322
|
+
assert "## PROMPT_1" in log_file.read_text()
|
|
323
|
+
|
|
324
|
+
prompt_handler.append_headings("test_classifier", "my_param", "PROMPT")
|
|
325
|
+
assert "## PROMPT_2" in log_file.read_text()
|
|
326
|
+
|
|
327
|
+
@patch('ara_cli.prompt_handler.Classifier.get_sub_directory', return_value="test_classifier")
|
|
328
|
+
def test_append_headings_creates_file_if_not_exists(self, mock_get_sub, tmp_path):
|
|
329
|
+
os.makedirs("ara/test_classifier/my_param.data", exist_ok=True)
|
|
330
|
+
log_file = tmp_path / "ara/test_classifier/my_param.data/test_classifier.prompt_log.md"
|
|
331
|
+
assert not log_file.exists()
|
|
332
|
+
|
|
333
|
+
prompt_handler.append_headings("test_classifier", "my_param", "HEADING")
|
|
334
|
+
assert log_file.exists()
|
|
335
|
+
assert "## HEADING_1" in log_file.read_text()
|
|
336
|
+
|
|
337
|
+
@patch('ara_cli.prompt_handler.Classifier.get_sub_directory', return_value="test_classifier")
|
|
338
|
+
def test_write_prompt_result(self, mock_get_sub, tmp_path):
|
|
339
|
+
os.makedirs("ara/test_classifier/my_param.data", exist_ok=True)
|
|
340
|
+
log_file = tmp_path / "ara/test_classifier/my_param.data/test_classifier.prompt_log.md"
|
|
341
|
+
|
|
342
|
+
prompt_handler.write_prompt_result("test_classifier", "my_param", "Test content")
|
|
343
|
+
assert "Test content" in log_file.read_text()
|
|
344
|
+
|
|
345
|
+
def test_prepend_system_prompt(self, mock_langfuse):
|
|
346
|
+
# Ensure the langfuse mock is properly set up for this instance
|
|
347
|
+
instance = prompt_handler.LLMSingleton.get_instance()
|
|
348
|
+
instance.langfuse = mock_langfuse
|
|
349
|
+
|
|
350
|
+
messages = [{"role": "user", "content": "Hi"}]
|
|
351
|
+
result = prompt_handler.prepend_system_prompt(messages)
|
|
352
|
+
assert len(result) == 2
|
|
353
|
+
assert result[0]['role'] == 'system'
|
|
354
|
+
assert result[1]['role'] == 'user'
|
|
355
|
+
|
|
356
|
+
@patch('logging.getLogger')
|
|
357
|
+
def test_append_images_to_message_logic(self, mock_get_logger):
|
|
358
|
+
# Test case 1: No images, should return original message
|
|
359
|
+
message_no_img = {"role": "user", "content": "Hello"}
|
|
360
|
+
result = prompt_handler.append_images_to_message(message_no_img, [])
|
|
361
|
+
assert result == {"role": "user", "content": "Hello"}
|
|
362
|
+
|
|
363
|
+
# Test case 2: Add images to a text-only message
|
|
364
|
+
message_with_text = {"role": "user", "content": "Describe these."}
|
|
365
|
+
images = [{"type": "image_url", "image_url": {"url": "data:..."}}]
|
|
366
|
+
result = prompt_handler.append_images_to_message(message_with_text, images)
|
|
367
|
+
expected_content = [
|
|
368
|
+
{"type": "text", "text": "Describe these."},
|
|
369
|
+
{"type": "image_url", "image_url": {"url": "data:..."}}
|
|
370
|
+
]
|
|
371
|
+
assert result["content"] == expected_content
|
|
372
|
+
|
|
373
|
+
# Test case 3: Add images to an existing list content
|
|
374
|
+
message_with_list = {"role": "user", "content": [{"type": "text", "text": "Initial text."}]}
|
|
375
|
+
result = prompt_handler.append_images_to_message(message_with_list, images)
|
|
376
|
+
expected_content_2 = [
|
|
377
|
+
{"type": "text", "text": "Initial text."},
|
|
378
|
+
{"type": "image_url", "image_url": {"url": "data:..."}}
|
|
379
|
+
]
|
|
380
|
+
assert result["content"] == expected_content_2
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
class TestFileOperations:
|
|
384
|
+
"""Tests for complex file operations and parsing."""
|
|
385
|
+
|
|
386
|
+
@pytest.fixture(autouse=True)
|
|
387
|
+
def setup_fs(self, tmp_path):
|
|
388
|
+
self.root = tmp_path
|
|
389
|
+
os.chdir(self.root)
|
|
390
|
+
yield
|
|
391
|
+
|
|
392
|
+
def test_write_template_files_to_config(self):
|
|
393
|
+
base_path = self.root / "templates"
|
|
394
|
+
(base_path / "rules").mkdir(parents=True)
|
|
395
|
+
(base_path / "rules" / "b.rules.md").touch()
|
|
396
|
+
(base_path / "rules" / "a.rules.md").touch()
|
|
397
|
+
|
|
398
|
+
m = mock_open()
|
|
399
|
+
with patch('builtins.open', m):
|
|
400
|
+
prompt_handler.write_template_files_to_config("rules", m(), str(base_path))
|
|
401
|
+
|
|
402
|
+
# Check that files were written in sorted order with correct spacing
|
|
403
|
+
calls = m().write.call_args_list
|
|
404
|
+
assert calls[0] == call(" - [] rules/a.rules.md\n")
|
|
405
|
+
assert calls[1] == call(" - [] rules/b.rules.md\n")
|
|
406
|
+
|
|
407
|
+
def test_find_files_with_endings(self):
|
|
408
|
+
(self.root / "a.rules.md").touch()
|
|
409
|
+
(self.root / "b.intention.md").touch()
|
|
410
|
+
(self.root / "c.rules.md").touch()
|
|
411
|
+
(self.root / "d.other.md").touch()
|
|
412
|
+
(self.root / "subdir").mkdir()
|
|
413
|
+
(self.root / "subdir" / "e.rules.md").touch()
|
|
414
|
+
|
|
415
|
+
endings = [".intention.md", ".rules.md"]
|
|
416
|
+
files = prompt_handler.find_files_with_endings(str(self.root), endings)
|
|
417
|
+
|
|
418
|
+
# Should only find files in the root, not subdir, and sorted by ending order
|
|
419
|
+
# Sort results to make test independent of filesystem list order
|
|
420
|
+
assert sorted(files) == sorted(["b.intention.md", "a.rules.md", "c.rules.md"])
|
|
421
|
+
|
|
422
|
+
def test_move_and_copy_files(self):
|
|
423
|
+
prompt_data = self.root / "prompt.data"
|
|
424
|
+
prompt_archive = self.root / "prompt.archive"
|
|
425
|
+
source_dir = self.root / "source"
|
|
426
|
+
prompt_data.mkdir()
|
|
427
|
+
prompt_archive.mkdir()
|
|
428
|
+
source_dir.mkdir()
|
|
429
|
+
|
|
430
|
+
source_file = source_dir / "new.rules.md"
|
|
431
|
+
source_file.write_text("new rules")
|
|
432
|
+
|
|
433
|
+
existing_file = prompt_data / "old.rules.md"
|
|
434
|
+
existing_file.write_text("old rules")
|
|
435
|
+
|
|
436
|
+
unrelated_source = source_dir / "unrelated.txt"
|
|
437
|
+
unrelated_source.touch()
|
|
438
|
+
|
|
439
|
+
missing_source = source_dir / "nonexistent.rules.md"
|
|
440
|
+
|
|
441
|
+
with patch('builtins.print') as mock_print:
|
|
442
|
+
# Test move and copy
|
|
443
|
+
prompt_handler.move_and_copy_files(str(source_file), str(prompt_data), str(prompt_archive))
|
|
444
|
+
assert not existing_file.exists()
|
|
445
|
+
assert (prompt_archive / "old.rules.md").exists()
|
|
446
|
+
assert (prompt_data / "new.rules.md").read_text() == "new rules"
|
|
447
|
+
|
|
448
|
+
# Test skipping unrelated files
|
|
449
|
+
prompt_handler.move_and_copy_files(str(unrelated_source), str(prompt_data), str(prompt_archive))
|
|
450
|
+
assert mock_print.call_args_list[-1] == call("File name unrelated.txt does not end with one of the specified patterns, skipping move and copy.")
|
|
451
|
+
|
|
452
|
+
# Test warning for missing source
|
|
453
|
+
prompt_handler.move_and_copy_files(str(missing_source), str(prompt_data), str(prompt_archive))
|
|
454
|
+
assert mock_print.call_args_list[-1] == call(f"WARNING: template {missing_source} does not exist.")
|
|
455
|
+
|
|
456
|
+
def test_extract_and_load_markdown_files_complex_hierarchy(self):
|
|
457
|
+
md_content = """
|
|
458
|
+
# L1
|
|
459
|
+
- [x] l1.md
|
|
460
|
+
## L2-A
|
|
461
|
+
- [x] l2a.md
|
|
462
|
+
### L3
|
|
463
|
+
- [] l3_unchecked.md
|
|
464
|
+
- [x] l3.md
|
|
465
|
+
## L2-B
|
|
466
|
+
- [x] l2b.md
|
|
467
|
+
# L1-Again
|
|
468
|
+
- [x] l1_again.md
|
|
469
|
+
"""
|
|
470
|
+
m = mock_open(read_data=md_content)
|
|
471
|
+
with patch('builtins.open', m):
|
|
472
|
+
paths = prompt_handler.extract_and_load_markdown_files("dummy_path")
|
|
473
|
+
|
|
474
|
+
expected = [
|
|
475
|
+
'L1/l1.md',
|
|
476
|
+
'L1/L2-A/l2a.md',
|
|
477
|
+
'L1/L2-A/L3/l3.md',
|
|
478
|
+
'L1/L2-B/l2b.md',
|
|
479
|
+
'L1-Again/l1_again.md',
|
|
480
|
+
]
|
|
481
|
+
assert paths == expected
|
|
482
|
+
|
|
483
|
+
@patch('ara_cli.prompt_handler.get_partial_file_content')
|
|
484
|
+
@patch('ara_cli.prompt_handler.get_file_content')
|
|
485
|
+
def test_load_givens(self, mock_get_content, mock_get_partial, tmp_path):
|
|
486
|
+
# Setup files
|
|
487
|
+
md_config = tmp_path / "config.givens.md"
|
|
488
|
+
text_file = tmp_path / "file.txt"
|
|
489
|
+
image_file = tmp_path / "image.png"
|
|
490
|
+
|
|
491
|
+
text_file.write_text("Full content")
|
|
492
|
+
image_file.write_bytes(b"imagedata")
|
|
493
|
+
|
|
494
|
+
md_content = f"""
|
|
495
|
+
# src
|
|
496
|
+
- [x] {text_file}
|
|
497
|
+
- [x] [1:2] {text_file}
|
|
498
|
+
# assets
|
|
499
|
+
- [x] {image_file}
|
|
500
|
+
"""
|
|
501
|
+
md_config.write_text(md_content)
|
|
502
|
+
|
|
503
|
+
# Mocks
|
|
504
|
+
mock_get_content.return_value = "Full content"
|
|
505
|
+
mock_get_partial.return_value = "Partial content"
|
|
506
|
+
|
|
507
|
+
# Execute
|
|
508
|
+
with patch('ara_cli.prompt_handler.extract_and_load_markdown_files', return_value=[str(text_file), f"[1:2] {text_file}", str(image_file)]):
|
|
509
|
+
# The regex in load_givens is flawed, so we manually mock the extracted items
|
|
510
|
+
match = re.match(r".*?\[(\d+:\d+(?:,\s*\d+:\d+)*)\]\s+(.+)", f"[1:2] {text_file}")
|
|
511
|
+
assert match is not None
|
|
512
|
+
|
|
513
|
+
content, image_data = prompt_handler.load_givens(str(md_config))
|
|
514
|
+
|
|
515
|
+
# Assertions
|
|
516
|
+
assert "Full content" in content
|
|
517
|
+
assert "Partial content" in content
|
|
518
|
+
mock_get_content.assert_called_once_with(str(text_file))
|
|
519
|
+
mock_get_partial.assert_called_once_with(str(text_file), "1:2")
|
|
520
|
+
|
|
521
|
+
assert len(image_data) == 1
|
|
522
|
+
assert image_data[0]['type'] == 'image_url'
|
|
523
|
+
encoded = base64.b64encode(b"imagedata").decode("utf-8")
|
|
524
|
+
assert encoded in image_data[0]['image_url']['url']
|
|
525
|
+
assert f"" in content
|
|
526
|
+
|
|
527
|
+
@patch('ara_cli.prompt_handler.load_givens')
|
|
528
|
+
@patch('ara_cli.prompt_handler.get_file_content')
|
|
529
|
+
@patch('ara_cli.prompt_handler.find_files_with_endings')
|
|
530
|
+
def test_collect_file_content_by_extension(self, mock_find, mock_get, mock_load):
|
|
531
|
+
prompt_data_path = "/fake/path"
|
|
532
|
+
mock_find.side_effect = [["rules.rules.md"], ["givens.prompt_givens.md"]]
|
|
533
|
+
mock_get.return_value = "Rules content"
|
|
534
|
+
mock_load.return_value = ("Givens content", ["image_data"])
|
|
535
|
+
|
|
536
|
+
extensions = [".rules.md", ".prompt_givens.md"]
|
|
537
|
+
content, images = prompt_handler.collect_file_content_by_extension(prompt_data_path, extensions)
|
|
538
|
+
|
|
539
|
+
mock_find.assert_has_calls([call(prompt_data_path, [ext]) for ext in extensions])
|
|
540
|
+
mock_get.assert_called_once_with(os.path.join(prompt_data_path, "rules.rules.md"))
|
|
541
|
+
mock_load.assert_called_once_with(os.path.join(prompt_data_path, "givens.prompt_givens.md"))
|
|
542
|
+
|
|
543
|
+
assert "Rules content" in content
|
|
544
|
+
assert "Givens content" in content
|
|
545
|
+
assert images == ["image_data"]
|
|
546
|
+
|
|
547
|
+
|
|
548
|
+
class TestArtefactAndTemplateHandling:
|
|
549
|
+
"""Tests functions that manage artefact and template files."""
|
|
550
|
+
|
|
551
|
+
@pytest.fixture(autouse=True)
|
|
552
|
+
def setup_fs(self, tmp_path):
|
|
553
|
+
self.root = tmp_path
|
|
554
|
+
os.chdir(self.root)
|
|
555
|
+
self.mock_classifier = "my_artefact"
|
|
556
|
+
self.mock_param = "my_param"
|
|
557
|
+
|
|
558
|
+
self.classifier_patch = patch('ara_cli.prompt_handler.Classifier.get_sub_directory', return_value=self.mock_classifier)
|
|
559
|
+
self.mock_get_sub_dir = self.classifier_patch.start()
|
|
560
|
+
|
|
561
|
+
yield
|
|
562
|
+
|
|
563
|
+
self.classifier_patch.stop()
|
|
564
|
+
|
|
565
|
+
def test_prompt_data_directory_creation(self):
|
|
566
|
+
path = prompt_handler.prompt_data_directory_creation(self.mock_classifier, self.mock_param)
|
|
567
|
+
expected_path = self.root / "ara" / self.mock_classifier / f"{self.mock_param}.data" / "prompt.data"
|
|
568
|
+
assert os.path.exists(expected_path)
|
|
569
|
+
assert Path(path).resolve() == expected_path.resolve()
|
|
570
|
+
|
|
571
|
+
@patch('ara_cli.prompt_handler.generate_markdown_listing')
|
|
572
|
+
@patch('ara_cli.prompt_handler.ArtefactCreator')
|
|
573
|
+
def test_initialize_prompt_templates(self, mock_artefact_creator, mock_generate_listing, mock_config_manager):
|
|
574
|
+
# This side effect creates the file that the function expects to read
|
|
575
|
+
def create_dummy_file(*args, **kwargs):
|
|
576
|
+
file_path = args[2]
|
|
577
|
+
Path(file_path).parent.mkdir(parents=True, exist_ok=True)
|
|
578
|
+
Path(file_path).touch()
|
|
579
|
+
|
|
580
|
+
mock_generate_listing.side_effect = create_dummy_file
|
|
581
|
+
|
|
582
|
+
prompt_handler.initialize_prompt_templates(self.mock_classifier, self.mock_param)
|
|
583
|
+
|
|
584
|
+
prompt_data_path = self.root / "ara" / self.mock_classifier / f"{self.mock_param}.data" / "prompt.data"
|
|
585
|
+
prompt_log_path = prompt_data_path.parent
|
|
586
|
+
|
|
587
|
+
mock_artefact_creator.return_value.create_artefact_prompt_files.assert_called_once()
|
|
588
|
+
assert mock_generate_listing.call_count == 2
|
|
589
|
+
|
|
590
|
+
|
|
591
|
+
@patch('ara_cli.prompt_handler.generate_markdown_listing')
|
|
592
|
+
def test_generate_config_prompt_template_file(self, mock_generate_listing, mock_config_manager):
|
|
593
|
+
prompt_data_path = "prompt/data"
|
|
594
|
+
with patch('ara_cli.prompt_handler.TemplatePathManager.get_template_base_path', return_value="/global/templates"):
|
|
595
|
+
prompt_handler.generate_config_prompt_template_file(prompt_data_path, "config.md")
|
|
596
|
+
|
|
597
|
+
mock_generate_listing.assert_called_once()
|
|
598
|
+
args, _ = mock_generate_listing.call_args
|
|
599
|
+
assert any("custom-prompt-modules" in d for d in args[0])
|
|
600
|
+
assert any("prompt-modules" in d for d in args[0])
|
|
601
|
+
assert "*.blueprint.md" in args[1]
|
|
602
|
+
assert args[2] == os.path.join(prompt_data_path, "config.md")
|
|
603
|
+
|
|
604
|
+
@patch('ara_cli.prompt_handler.generate_markdown_listing')
|
|
605
|
+
def test_generate_config_prompt_givens_file(self, mock_generate_listing, mock_config_manager):
|
|
606
|
+
prompt_data_path = prompt_handler.prompt_data_directory_creation(self.mock_classifier, self.mock_param)
|
|
607
|
+
|
|
608
|
+
prompt_handler.generate_config_prompt_givens_file(prompt_data_path, "config.givens.md")
|
|
609
|
+
|
|
610
|
+
mock_generate_listing.assert_called_once()
|
|
611
|
+
args, _ = mock_generate_listing.call_args
|
|
612
|
+
assert "ara" in args[0]
|
|
613
|
+
assert "./src" in args[0]
|
|
614
|
+
assert args[1] == ["*.py", "*.md"]
|
|
615
|
+
assert args[2] == os.path.join(prompt_data_path, "config.givens.md")
|
|
616
|
+
|
|
617
|
+
@patch('ara_cli.prompt_handler.generate_markdown_listing')
|
|
618
|
+
def test_generate_config_prompt_givens_file_marks_artefact(self, mock_generate_listing, mock_config_manager):
|
|
619
|
+
prompt_data_path = Path(prompt_handler.prompt_data_directory_creation(self.mock_classifier, self.mock_param))
|
|
620
|
+
config_path = prompt_data_path / "config.givens.md"
|
|
621
|
+
artefact_to_mark = "file.py"
|
|
622
|
+
|
|
623
|
+
def create_fake_file(*args, **kwargs):
|
|
624
|
+
content = f"- [] some_other_file.txt\n- [] {artefact_to_mark}\n"
|
|
625
|
+
with open(args[2], 'w') as f:
|
|
626
|
+
f.write(content)
|
|
627
|
+
|
|
628
|
+
mock_generate_listing.side_effect = create_fake_file
|
|
629
|
+
|
|
630
|
+
prompt_handler.generate_config_prompt_givens_file(
|
|
631
|
+
str(prompt_data_path), "config.givens.md", artefact_to_mark=artefact_to_mark
|
|
632
|
+
)
|
|
633
|
+
|
|
634
|
+
content = config_path.read_text()
|
|
635
|
+
assert f"- [x] {artefact_to_mark}" in content
|
|
636
|
+
assert f"- [] some_other_file.txt" in content
|
|
637
|
+
|
|
638
|
+
@patch('ara_cli.prompt_handler.extract_and_load_markdown_files')
|
|
639
|
+
@patch('ara_cli.prompt_handler.move_and_copy_files')
|
|
640
|
+
@patch('ara_cli.prompt_handler.TemplatePathManager.get_template_base_path', return_value="/global/templates")
|
|
641
|
+
def test_load_selected_prompt_templates(self, mock_base_path, mock_move, mock_extract, mock_config_manager):
|
|
642
|
+
prompt_data_path = prompt_handler.prompt_data_directory_creation(self.mock_classifier, self.mock_param)
|
|
643
|
+
config_file = Path(prompt_data_path) / "config.prompt_templates.md"
|
|
644
|
+
config_file.touch()
|
|
645
|
+
|
|
646
|
+
mock_extract.return_value = [
|
|
647
|
+
"custom-prompt-modules/my_custom.rules.md",
|
|
648
|
+
"prompt-modules/global.intention.md",
|
|
649
|
+
"unrecognized/file.md"
|
|
650
|
+
]
|
|
651
|
+
|
|
652
|
+
with patch('builtins.print') as mock_print:
|
|
653
|
+
prompt_handler.load_selected_prompt_templates(self.mock_classifier, self.mock_param)
|
|
654
|
+
|
|
655
|
+
archive_path = os.path.join(prompt_data_path, "prompt.archive")
|
|
656
|
+
|
|
657
|
+
assert mock_move.call_count == 2
|
|
658
|
+
mock_print.assert_any_call("WARNING: Unrecognized template type for item unrecognized/file.md.")
|
|
659
|
+
|
|
660
|
+
def test_load_selected_prompt_templates_no_config_file_warns_and_returns(self):
|
|
661
|
+
prompt_handler.prompt_data_directory_creation(self.mock_classifier, self.mock_param)
|
|
662
|
+
|
|
663
|
+
with patch('builtins.print') as mock_print:
|
|
664
|
+
prompt_handler.load_selected_prompt_templates(self.mock_classifier, self.mock_param)
|
|
665
|
+
|
|
666
|
+
mock_print.assert_called_once_with("WARNING: config.prompt_templates.md does not exist.")
|
|
667
|
+
|
|
668
|
+
@patch('ara_cli.prompt_handler.send_prompt')
|
|
669
|
+
@patch('ara_cli.prompt_handler.collect_file_content_by_extension')
|
|
670
|
+
@patch('ara_cli.prompt_handler.append_images_to_message', side_effect=lambda msg, img: msg) # Passthrough
|
|
671
|
+
def test_create_and_send_custom_prompt_handles_empty_chunks(self, mock_append, mock_collect, mock_send, tmp_path):
|
|
672
|
+
# Create the directory structure the function expects
|
|
673
|
+
prompt_data_path = Path(f"ara/{self.mock_classifier}/{self.mock_param}.data/prompt.data")
|
|
674
|
+
prompt_data_path.mkdir(parents=True, exist_ok=True)
|
|
675
|
+
|
|
676
|
+
mock_collect.return_value = ("Test Content", [])
|
|
677
|
+
|
|
678
|
+
mock_chunk_ok = MagicMock()
|
|
679
|
+
mock_chunk_ok.choices[0].delta.content = "response"
|
|
680
|
+
mock_chunk_empty = MagicMock()
|
|
681
|
+
mock_chunk_empty.choices[0].delta.content = None
|
|
682
|
+
mock_send.return_value = iter([mock_chunk_empty, mock_chunk_ok])
|
|
683
|
+
|
|
684
|
+
log_file = tmp_path / "ara" / self.mock_classifier / f"{self.mock_param}.data" / f"{self.mock_classifier}.prompt_log.md"
|
|
685
|
+
log_file.touch()
|
|
686
|
+
|
|
687
|
+
prompt_handler.create_and_send_custom_prompt(self.mock_classifier, self.mock_param)
|
|
688
|
+
|
|
689
|
+
log_content = log_file.read_text()
|
|
690
|
+
assert "response" in log_content
|
|
691
|
+
assert "None" not in log_content
|
|
692
|
+
|
|
693
|
+
@patch('ara_cli.prompt_handler.send_prompt')
|
|
694
|
+
@patch('ara_cli.prompt_handler.collect_file_content_by_extension')
|
|
695
|
+
@patch('ara_cli.prompt_handler.append_images_to_message')
|
|
696
|
+
def test_create_and_send_custom_prompt(self, mock_append_images, mock_collect, mock_send, mock_config_manager):
|
|
697
|
+
prompt_handler.prompt_data_directory_creation(self.mock_classifier, self.mock_param)
|
|
698
|
+
|
|
699
|
+
mock_collect.return_value = ("### GIVENS\ncontent", [{"type": "image_url"}])
|
|
700
|
+
|
|
701
|
+
# append_images_to_message returns a single dict, not a list of dicts.
|
|
702
|
+
returned_message_dict = {'role': 'user', 'content': ['### GIVENS\ncontent', {'type': 'image_url'}]}
|
|
703
|
+
mock_append_images.return_value = returned_message_dict
|
|
704
|
+
|
|
705
|
+
mock_send.return_value = iter([MagicMock(choices=[MagicMock(delta=MagicMock(content="llm response"))])])
|
|
706
|
+
|
|
707
|
+
prompt_handler.create_and_send_custom_prompt(self.mock_classifier, self.mock_param)
|
|
708
|
+
|
|
709
|
+
mock_collect.assert_called_once()
|
|
710
|
+
|
|
711
|
+
# Assert that append_images_to_message was called with a single dict (the bug fix)
|
|
712
|
+
mock_append_images.assert_called_once_with(
|
|
713
|
+
{'role': 'user', 'content': '### GIVENS\ncontent'},
|
|
714
|
+
[{'type': 'image_url'}]
|
|
715
|
+
)
|
|
716
|
+
|
|
717
|
+
# Assert that send_prompt was called with a list containing the dict returned from append_images_to_message
|
|
718
|
+
mock_send.assert_called_once_with([returned_message_dict])
|
|
719
|
+
|
|
720
|
+
log_file = self.root / "ara" / self.mock_classifier / f"{self.mock_param}.data" / f"{self.mock_classifier}.prompt_log.md"
|
|
721
|
+
assert "llm response" in log_file.read_text()
|
|
722
|
+
|
|
723
|
+
@patch('ara_cli.global_file_lister.generate_global_markdown_listing')
|
|
724
|
+
def test_generate_config_prompt_global_givens_file(self, mock_global_lister, mock_config_manager, mock_config):
|
|
725
|
+
"""Tests that the global givens file is generated correctly when global_dirs are present."""
|
|
726
|
+
prompt_data_path = self.root / "prompt/data"
|
|
727
|
+
prompt_data_path.mkdir(parents=True)
|
|
728
|
+
|
|
729
|
+
# Scenario 1: No global_dirs are configured, should return early and do nothing.
|
|
730
|
+
mock_config.global_dirs = []
|
|
731
|
+
prompt_handler.generate_config_prompt_global_givens_file(str(prompt_data_path), "global.md")
|
|
732
|
+
mock_global_lister.assert_not_called()
|
|
733
|
+
|
|
734
|
+
# Scenario 2: With global_dirs, should call the global lister with correct arguments.
|
|
735
|
+
mock_config.global_dirs = [{"source_dir": "/global/src1"}, {"path": "/global/src2"}]
|
|
736
|
+
mock_config.ara_prompt_given_list_includes = ["*.py", "*.md"]
|
|
737
|
+
|
|
738
|
+
# Use patch to suppress print output during the test
|
|
739
|
+
with patch('builtins.print'):
|
|
740
|
+
prompt_handler.generate_config_prompt_global_givens_file(str(prompt_data_path), "global.md")
|
|
741
|
+
|
|
742
|
+
mock_global_lister.assert_called_once()
|
|
743
|
+
args, _ = mock_global_lister.call_args
|
|
744
|
+
assert args[0] == ["/global/src1", "/global/src2"]
|
|
745
|
+
assert args[1] == ["*.py", "*.md"]
|
|
746
|
+
assert args[2] == os.path.join(prompt_data_path, "global.md")
|