ara-cli 0.1.9.86__py3-none-any.whl → 0.1.9.89__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ara_cli/ara_config.py +69 -114
- ara_cli/prompt_handler.py +2 -5
- ara_cli/version.py +1 -1
- {ara_cli-0.1.9.86.dist-info → ara_cli-0.1.9.89.dist-info}/METADATA +1 -1
- {ara_cli-0.1.9.86.dist-info → ara_cli-0.1.9.89.dist-info}/RECORD +10 -9
- tests/test_ara_config.py +174 -281
- tests/test_prompt_handler.py +306 -0
- {ara_cli-0.1.9.86.dist-info → ara_cli-0.1.9.89.dist-info}/WHEEL +0 -0
- {ara_cli-0.1.9.86.dist-info → ara_cli-0.1.9.89.dist-info}/entry_points.txt +0 -0
- {ara_cli-0.1.9.86.dist-info → ara_cli-0.1.9.89.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
import os
|
|
3
|
+
import shutil
|
|
4
|
+
import base64
|
|
5
|
+
from unittest.mock import patch, MagicMock, mock_open, call
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
from ara_cli import prompt_handler
|
|
9
|
+
from ara_cli.ara_config import ARAconfig, LLMConfigItem
|
|
10
|
+
|
|
11
|
+
@pytest.fixture
|
|
12
|
+
def mock_config():
|
|
13
|
+
"""Mocks a standard ARAconfig object for testing."""
|
|
14
|
+
config = ARAconfig(
|
|
15
|
+
ext_code_dirs=[{"source_dir": "./src"}],
|
|
16
|
+
glossary_dir="./glossary",
|
|
17
|
+
doc_dir="./docs",
|
|
18
|
+
local_prompt_templates_dir="./ara/.araconfig",
|
|
19
|
+
custom_prompt_templates_subdir="custom-prompt-modules",
|
|
20
|
+
ara_prompt_given_list_includes=["*.py"],
|
|
21
|
+
llm_config={
|
|
22
|
+
"gpt-4o": LLMConfigItem(provider="openai", model="openai/gpt-4o", temperature=0.8, max_tokens=1024, max_completion_tokens= None),
|
|
23
|
+
"o3-mini": LLMConfigItem(provider="openai", model="openai/o3-mini", temperature=0.9, max_tokens=2048, max_completion_tokens= None),
|
|
24
|
+
},
|
|
25
|
+
default_llm="gpt-4o"
|
|
26
|
+
)
|
|
27
|
+
return config
|
|
28
|
+
|
|
29
|
+
@pytest.fixture
|
|
30
|
+
def mock_config_manager(mock_config):
|
|
31
|
+
"""Patches ConfigManager to ensure it always returns the mock_config."""
|
|
32
|
+
with patch('ara_cli.ara_config.ConfigManager.get_config') as mock_get_config:
|
|
33
|
+
mock_get_config.return_value = mock_config
|
|
34
|
+
yield mock_get_config
|
|
35
|
+
|
|
36
|
+
@pytest.fixture(autouse=True)
|
|
37
|
+
def reset_singleton():
|
|
38
|
+
"""Resets the LLMSingleton before each test for isolation."""
|
|
39
|
+
prompt_handler.LLMSingleton._instance = None
|
|
40
|
+
prompt_handler.LLMSingleton._model = None
|
|
41
|
+
yield
|
|
42
|
+
|
|
43
|
+
class TestLLMSingleton:
|
|
44
|
+
"""Tests the behavior of the LLMSingleton class."""
|
|
45
|
+
|
|
46
|
+
def test_get_instance_creates_with_default_model(self, mock_config_manager):
|
|
47
|
+
instance = prompt_handler.LLMSingleton.get_instance()
|
|
48
|
+
assert instance is not None
|
|
49
|
+
assert prompt_handler.LLMSingleton.get_model() == "gpt-4o"
|
|
50
|
+
assert instance.config_parameters['temperature'] == 0.8
|
|
51
|
+
|
|
52
|
+
def test_get_instance_creates_with_first_model_if_no_default(self, mock_config_manager, mock_config):
|
|
53
|
+
mock_config.default_llm = None
|
|
54
|
+
instance = prompt_handler.LLMSingleton.get_instance()
|
|
55
|
+
assert instance is not None
|
|
56
|
+
assert prompt_handler.LLMSingleton.get_model() == "gpt-4o"
|
|
57
|
+
|
|
58
|
+
def test_get_instance_returns_same_instance(self, mock_config_manager):
|
|
59
|
+
instance1 = prompt_handler.LLMSingleton.get_instance()
|
|
60
|
+
instance2 = prompt_handler.LLMSingleton.get_instance()
|
|
61
|
+
assert instance1 is instance2
|
|
62
|
+
|
|
63
|
+
def test_set_model_switches_model(self, mock_config_manager):
|
|
64
|
+
initial_instance = prompt_handler.LLMSingleton.get_instance()
|
|
65
|
+
assert prompt_handler.LLMSingleton.get_model() == "gpt-4o"
|
|
66
|
+
|
|
67
|
+
with patch('builtins.print') as mock_print:
|
|
68
|
+
new_instance = prompt_handler.LLMSingleton.set_model("o3-mini")
|
|
69
|
+
mock_print.assert_called_with("Language model switched to 'o3-mini'")
|
|
70
|
+
|
|
71
|
+
assert prompt_handler.LLMSingleton.get_model() == "o3-mini"
|
|
72
|
+
assert new_instance.config_parameters['temperature'] == 0.9
|
|
73
|
+
assert initial_instance is not new_instance
|
|
74
|
+
|
|
75
|
+
def test_set_model_to_invalid_raises_error(self, mock_config_manager):
|
|
76
|
+
with pytest.raises(ValueError, match="No configuration found for the model: invalid-model"):
|
|
77
|
+
prompt_handler.LLMSingleton.set_model("invalid-model")
|
|
78
|
+
|
|
79
|
+
def test_get_model_initializes_if_needed(self, mock_config_manager):
|
|
80
|
+
assert prompt_handler.LLMSingleton._instance is None
|
|
81
|
+
model = prompt_handler.LLMSingleton.get_model()
|
|
82
|
+
assert model == "gpt-4o"
|
|
83
|
+
assert prompt_handler.LLMSingleton._instance is not None
|
|
84
|
+
|
|
85
|
+
class TestFileIO:
|
|
86
|
+
"""Tests file I/O helper functions."""
|
|
87
|
+
|
|
88
|
+
def test_write_and_read_string_from_file(self, tmp_path):
|
|
89
|
+
file_path = tmp_path / "test.txt"
|
|
90
|
+
test_string = "Hello World"
|
|
91
|
+
|
|
92
|
+
prompt_handler.write_string_to_file(file_path, test_string, 'w')
|
|
93
|
+
|
|
94
|
+
content = prompt_handler.read_string_from_file(file_path)
|
|
95
|
+
assert test_string in content
|
|
96
|
+
|
|
97
|
+
content_get = prompt_handler.get_file_content(file_path)
|
|
98
|
+
assert content == content_get
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
class TestCoreLogic:
|
|
102
|
+
"""Tests functions related to the main business logic."""
|
|
103
|
+
|
|
104
|
+
@patch('ara_cli.prompt_handler.litellm.completion')
|
|
105
|
+
@patch('ara_cli.prompt_handler.LLMSingleton.get_instance')
|
|
106
|
+
def test_send_prompt(self, mock_get_instance, mock_completion, mock_config):
|
|
107
|
+
mock_llm_instance = MagicMock()
|
|
108
|
+
mock_llm_instance.config_parameters = mock_config.llm_config['gpt-4o'].model_dump()
|
|
109
|
+
mock_get_instance.return_value = mock_llm_instance
|
|
110
|
+
|
|
111
|
+
mock_chunk = MagicMock()
|
|
112
|
+
mock_chunk.choices[0].delta.content = "test chunk"
|
|
113
|
+
mock_completion.return_value = [mock_chunk]
|
|
114
|
+
|
|
115
|
+
prompt = [{"role": "user", "content": "A test"}]
|
|
116
|
+
|
|
117
|
+
result = list(prompt_handler.send_prompt(prompt))
|
|
118
|
+
|
|
119
|
+
# Create expected parameters to match the actual implementation
|
|
120
|
+
# The actual send_prompt function copies config_parameters and only removes 'provider'
|
|
121
|
+
expected_params = mock_config.llm_config['gpt-4o'].model_dump()
|
|
122
|
+
if 'provider' in expected_params:
|
|
123
|
+
del expected_params['provider']
|
|
124
|
+
|
|
125
|
+
mock_completion.assert_called_once_with(
|
|
126
|
+
messages=prompt,
|
|
127
|
+
stream=True,
|
|
128
|
+
**expected_params
|
|
129
|
+
)
|
|
130
|
+
assert len(result) == 1
|
|
131
|
+
assert result[0].choices[0].delta.content == "test chunk"
|
|
132
|
+
|
|
133
|
+
@patch('ara_cli.prompt_handler.send_prompt')
|
|
134
|
+
def test_describe_image(self, mock_send_prompt, tmp_path):
|
|
135
|
+
fake_image_path = tmp_path / "test.png"
|
|
136
|
+
fake_image_content = b"fakeimagedata"
|
|
137
|
+
fake_image_path.write_bytes(fake_image_content)
|
|
138
|
+
|
|
139
|
+
mock_send_prompt.return_value = iter([])
|
|
140
|
+
|
|
141
|
+
prompt_handler.describe_image(fake_image_path)
|
|
142
|
+
|
|
143
|
+
mock_send_prompt.assert_called_once()
|
|
144
|
+
called_args = mock_send_prompt.call_args[0][0]
|
|
145
|
+
|
|
146
|
+
assert len(called_args) == 1
|
|
147
|
+
message_content = called_args[0]['content']
|
|
148
|
+
assert isinstance(message_content, list)
|
|
149
|
+
assert message_content[0]['type'] == 'text'
|
|
150
|
+
assert message_content[1]['type'] == 'image_url'
|
|
151
|
+
|
|
152
|
+
encoded_image = base64.b64encode(fake_image_content).decode('utf-8')
|
|
153
|
+
expected_url = f"data:image/png;base64,{encoded_image}"
|
|
154
|
+
assert message_content[1]['image_url']['url'] == expected_url
|
|
155
|
+
|
|
156
|
+
@patch('ara_cli.prompt_handler.Classifier.get_sub_directory', return_value="test_classifier")
|
|
157
|
+
def test_append_headings(self, mock_get_sub, tmp_path):
|
|
158
|
+
os.chdir(tmp_path)
|
|
159
|
+
os.makedirs("ara/test_classifier/my_param.data", exist_ok=True)
|
|
160
|
+
|
|
161
|
+
log_file = tmp_path / "ara/test_classifier/my_param.data/test_classifier.prompt_log.md"
|
|
162
|
+
|
|
163
|
+
prompt_handler.append_headings("test_classifier", "my_param", "PROMPT")
|
|
164
|
+
assert "## PROMPT_1" in log_file.read_text()
|
|
165
|
+
|
|
166
|
+
prompt_handler.append_headings("test_classifier", "my_param", "PROMPT")
|
|
167
|
+
assert "## PROMPT_2" in log_file.read_text()
|
|
168
|
+
|
|
169
|
+
prompt_handler.append_headings("test_classifier", "my_param", "RESULT")
|
|
170
|
+
assert "## RESULT_1" in log_file.read_text()
|
|
171
|
+
|
|
172
|
+
class TestArtefactAndTemplateHandling:
|
|
173
|
+
"""Tests functions that manage artefact and template files."""
|
|
174
|
+
|
|
175
|
+
@pytest.fixture(autouse=True)
|
|
176
|
+
def setup_fs(self, tmp_path):
|
|
177
|
+
self.root = tmp_path
|
|
178
|
+
os.chdir(self.root)
|
|
179
|
+
self.mock_classifier = "my_artefact"
|
|
180
|
+
self.mock_param = "my_param"
|
|
181
|
+
|
|
182
|
+
self.classifier_patch = patch('ara_cli.prompt_handler.Classifier.get_sub_directory', return_value=self.mock_classifier)
|
|
183
|
+
self.mock_get_sub_dir = self.classifier_patch.start()
|
|
184
|
+
|
|
185
|
+
yield
|
|
186
|
+
|
|
187
|
+
self.classifier_patch.stop()
|
|
188
|
+
|
|
189
|
+
def test_prompt_data_directory_creation(self):
|
|
190
|
+
path = prompt_handler.prompt_data_directory_creation(self.mock_classifier, self.mock_param)
|
|
191
|
+
expected_path = self.root / "ara" / self.mock_classifier / f"{self.mock_param}.data" / "prompt.data"
|
|
192
|
+
assert os.path.exists(expected_path)
|
|
193
|
+
assert Path(path).resolve() == expected_path.resolve()
|
|
194
|
+
|
|
195
|
+
@patch('ara_cli.prompt_handler.generate_markdown_listing')
|
|
196
|
+
def test_generate_config_prompt_givens_file(self, mock_generate_listing, mock_config_manager):
|
|
197
|
+
prompt_data_path = prompt_handler.prompt_data_directory_creation(self.mock_classifier, self.mock_param)
|
|
198
|
+
|
|
199
|
+
prompt_handler.generate_config_prompt_givens_file(prompt_data_path, "config.givens.md")
|
|
200
|
+
|
|
201
|
+
mock_generate_listing.assert_called_once()
|
|
202
|
+
args, _ = mock_generate_listing.call_args
|
|
203
|
+
assert "ara" in args[0]
|
|
204
|
+
assert "./src" in args[0]
|
|
205
|
+
assert "./docs" in args[0]
|
|
206
|
+
assert "./glossary" in args[0]
|
|
207
|
+
assert args[1] == ["*.py"]
|
|
208
|
+
assert args[2] == os.path.join(prompt_data_path, "config.givens.md")
|
|
209
|
+
|
|
210
|
+
@patch('ara_cli.prompt_handler.generate_markdown_listing')
|
|
211
|
+
def test_generate_config_prompt_givens_file_marks_artefact(self, mock_generate_listing, mock_config_manager):
|
|
212
|
+
prompt_data_path = Path(prompt_handler.prompt_data_directory_creation(self.mock_classifier, self.mock_param))
|
|
213
|
+
config_path = prompt_data_path / "config.givens.md"
|
|
214
|
+
artefact_to_mark = "file.py"
|
|
215
|
+
|
|
216
|
+
def create_fake_file(*args, **kwargs):
|
|
217
|
+
content = f"- [] some_other_file.txt\n- [] {artefact_to_mark}\n"
|
|
218
|
+
with open(args[2], 'w') as f:
|
|
219
|
+
f.write(content)
|
|
220
|
+
|
|
221
|
+
mock_generate_listing.side_effect = create_fake_file
|
|
222
|
+
|
|
223
|
+
prompt_handler.generate_config_prompt_givens_file(
|
|
224
|
+
str(prompt_data_path), "config.givens.md", artefact_to_mark=artefact_to_mark
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
content = config_path.read_text()
|
|
228
|
+
assert f"- [x] {artefact_to_mark}" in content
|
|
229
|
+
assert f"- [] some_other_file.txt" in content
|
|
230
|
+
|
|
231
|
+
@patch('ara_cli.prompt_handler.extract_and_load_markdown_files')
|
|
232
|
+
@patch('ara_cli.prompt_handler.move_and_copy_files')
|
|
233
|
+
@patch('ara_cli.prompt_handler.TemplatePathManager.get_template_base_path', return_value="/global/templates")
|
|
234
|
+
def test_load_selected_prompt_templates(self, mock_base_path, mock_move, mock_extract, mock_config_manager):
|
|
235
|
+
prompt_data_path = prompt_handler.prompt_data_directory_creation(self.mock_classifier, self.mock_param)
|
|
236
|
+
config_file = Path(prompt_data_path) / "config.prompt_templates.md"
|
|
237
|
+
config_file.touch()
|
|
238
|
+
|
|
239
|
+
mock_extract.return_value = [
|
|
240
|
+
"custom-prompt-modules/my_custom.rules.md",
|
|
241
|
+
"prompt-modules/global.intention.md",
|
|
242
|
+
"unrecognized/file.md"
|
|
243
|
+
]
|
|
244
|
+
|
|
245
|
+
prompt_handler.load_selected_prompt_templates(self.mock_classifier, self.mock_param)
|
|
246
|
+
|
|
247
|
+
archive_path = os.path.join(prompt_data_path, "prompt.archive")
|
|
248
|
+
|
|
249
|
+
assert mock_move.call_count == 2
|
|
250
|
+
expected_calls = [
|
|
251
|
+
call(
|
|
252
|
+
os.path.join(mock_config_manager.return_value.local_prompt_templates_dir, "custom-prompt-modules/my_custom.rules.md"),
|
|
253
|
+
prompt_data_path,
|
|
254
|
+
archive_path
|
|
255
|
+
),
|
|
256
|
+
call(
|
|
257
|
+
os.path.join("/global/templates", "prompt-modules/global.intention.md"),
|
|
258
|
+
prompt_data_path,
|
|
259
|
+
archive_path
|
|
260
|
+
)
|
|
261
|
+
]
|
|
262
|
+
mock_move.assert_has_calls(expected_calls, any_order=True)
|
|
263
|
+
|
|
264
|
+
def test_extract_and_load_markdown_files(self):
|
|
265
|
+
md_content = """
|
|
266
|
+
# prompt-modules
|
|
267
|
+
## a-category
|
|
268
|
+
- [x] first.rules.md
|
|
269
|
+
- [] second.rules.md
|
|
270
|
+
# custom-prompt-modules
|
|
271
|
+
- [x] custom.intention.md
|
|
272
|
+
"""
|
|
273
|
+
m = mock_open(read_data=md_content)
|
|
274
|
+
with patch('builtins.open', m):
|
|
275
|
+
paths = prompt_handler.extract_and_load_markdown_files("dummy_path")
|
|
276
|
+
|
|
277
|
+
assert len(paths) == 2
|
|
278
|
+
assert 'prompt-modules/a-category/first.rules.md' in paths
|
|
279
|
+
assert 'custom-prompt-modules/custom.intention.md' in paths
|
|
280
|
+
|
|
281
|
+
@patch('ara_cli.prompt_handler.send_prompt')
|
|
282
|
+
@patch('ara_cli.prompt_handler.collect_file_content_by_extension')
|
|
283
|
+
@patch('ara_cli.prompt_handler.append_images_to_message')
|
|
284
|
+
def test_create_and_send_custom_prompt(self, mock_append_images, mock_collect, mock_send):
|
|
285
|
+
prompt_data_path = Path(prompt_handler.prompt_data_directory_creation(self.mock_classifier, self.mock_param))
|
|
286
|
+
|
|
287
|
+
mock_collect.return_value = ("### GIVENS\ncontent", [{"type": "image_url", "image_url": {}}])
|
|
288
|
+
|
|
289
|
+
final_message_list = [{"role": "user", "content": [{"type": "text", "text": "### GIVENS\ncontent"}, {"type": "image_url", "image_url": {}}]}]
|
|
290
|
+
mock_append_images.return_value = final_message_list
|
|
291
|
+
|
|
292
|
+
mock_send.return_value = iter([MagicMock(choices=[MagicMock(delta=MagicMock(content="llm response"))])])
|
|
293
|
+
|
|
294
|
+
prompt_handler.create_and_send_custom_prompt(self.mock_classifier, self.mock_param)
|
|
295
|
+
|
|
296
|
+
mock_collect.assert_called_once()
|
|
297
|
+
mock_append_images.assert_called_once()
|
|
298
|
+
mock_send.assert_called_once_with(final_message_list)
|
|
299
|
+
|
|
300
|
+
artefact_root = self.root / "ara" / self.mock_classifier
|
|
301
|
+
log_file = artefact_root / f"{self.mock_param}.data" / f"{self.mock_classifier}.prompt_log.md"
|
|
302
|
+
|
|
303
|
+
assert log_file.exists()
|
|
304
|
+
log_content = log_file.read_text()
|
|
305
|
+
assert "### GIVENS\ncontent" in log_content
|
|
306
|
+
assert "llm response" in log_content
|
|
File without changes
|
|
File without changes
|
|
File without changes
|