ara-cli 0.1.13.3__py3-none-any.whl → 0.1.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ara_cli/__init__.py +1 -1
- ara_cli/ara_command_action.py +162 -112
- ara_cli/ara_config.py +1 -1
- ara_cli/ara_subcommands/convert.py +66 -2
- ara_cli/ara_subcommands/prompt.py +266 -106
- ara_cli/artefact_autofix.py +2 -2
- ara_cli/artefact_converter.py +152 -53
- ara_cli/artefact_creator.py +41 -17
- ara_cli/artefact_lister.py +3 -3
- ara_cli/artefact_models/artefact_model.py +1 -1
- ara_cli/artefact_models/artefact_templates.py +0 -9
- ara_cli/artefact_models/feature_artefact_model.py +8 -8
- ara_cli/artefact_reader.py +62 -43
- ara_cli/artefact_scan.py +39 -17
- ara_cli/chat.py +23 -15
- ara_cli/children_contribution_updater.py +737 -0
- ara_cli/classifier.py +34 -0
- ara_cli/commands/load_command.py +4 -3
- ara_cli/commands/load_image_command.py +1 -1
- ara_cli/commands/read_command.py +23 -27
- ara_cli/completers.py +24 -0
- ara_cli/error_handler.py +26 -11
- ara_cli/file_loaders/document_reader.py +0 -178
- ara_cli/file_loaders/factories/__init__.py +0 -0
- ara_cli/file_loaders/factories/document_reader_factory.py +32 -0
- ara_cli/file_loaders/factories/file_loader_factory.py +27 -0
- ara_cli/file_loaders/file_loader.py +1 -30
- ara_cli/file_loaders/loaders/__init__.py +0 -0
- ara_cli/file_loaders/{document_file_loader.py → loaders/document_file_loader.py} +1 -1
- ara_cli/file_loaders/loaders/text_file_loader.py +47 -0
- ara_cli/file_loaders/readers/__init__.py +0 -0
- ara_cli/file_loaders/readers/docx_reader.py +49 -0
- ara_cli/file_loaders/readers/excel_reader.py +27 -0
- ara_cli/file_loaders/{markdown_reader.py → readers/markdown_reader.py} +1 -1
- ara_cli/file_loaders/readers/odt_reader.py +59 -0
- ara_cli/file_loaders/readers/pdf_reader.py +54 -0
- ara_cli/file_loaders/readers/pptx_reader.py +104 -0
- ara_cli/file_loaders/tools/__init__.py +0 -0
- ara_cli/output_suppressor.py +53 -0
- ara_cli/prompt_handler.py +123 -17
- ara_cli/tag_extractor.py +8 -7
- ara_cli/version.py +1 -1
- {ara_cli-0.1.13.3.dist-info → ara_cli-0.1.14.0.dist-info}/METADATA +18 -12
- {ara_cli-0.1.13.3.dist-info → ara_cli-0.1.14.0.dist-info}/RECORD +58 -45
- {ara_cli-0.1.13.3.dist-info → ara_cli-0.1.14.0.dist-info}/WHEEL +1 -1
- tests/test_artefact_converter.py +1 -46
- tests/test_artefact_lister.py +11 -8
- tests/test_chat.py +4 -4
- tests/test_chat_givens_images.py +1 -1
- tests/test_children_contribution_updater.py +98 -0
- tests/test_document_loader_office.py +267 -0
- tests/test_prompt_handler.py +416 -214
- tests/test_setup_default_chat_prompt_mode.py +198 -0
- tests/test_tag_extractor.py +95 -49
- ara_cli/file_loaders/document_readers.py +0 -233
- ara_cli/file_loaders/file_loaders.py +0 -123
- ara_cli/file_loaders/text_file_loader.py +0 -187
- /ara_cli/file_loaders/{binary_file_loader.py → loaders/binary_file_loader.py} +0 -0
- /ara_cli/file_loaders/{image_processor.py → tools/image_processor.py} +0 -0
- {ara_cli-0.1.13.3.dist-info → ara_cli-0.1.14.0.dist-info}/entry_points.txt +0 -0
- {ara_cli-0.1.13.3.dist-info → ara_cli-0.1.14.0.dist-info}/top_level.txt +0 -0
tests/test_prompt_handler.py
CHANGED
|
@@ -16,22 +16,24 @@ from langfuse.api.resources.commons.errors import NotFoundError
|
|
|
16
16
|
@pytest.fixture(autouse=True)
|
|
17
17
|
def mock_langfuse():
|
|
18
18
|
"""Mock Langfuse client to prevent network calls during tests."""
|
|
19
|
-
with patch.object(prompt_handler.LLMSingleton,
|
|
19
|
+
with patch.object(prompt_handler.LLMSingleton, "langfuse", None):
|
|
20
20
|
mock_langfuse_instance = MagicMock()
|
|
21
|
-
|
|
21
|
+
|
|
22
22
|
# Mock the get_prompt method to raise NotFoundError (simulating prompt not found)
|
|
23
23
|
mock_langfuse_instance.get_prompt.side_effect = NotFoundError(
|
|
24
|
-
# status_code=404,
|
|
25
|
-
body={
|
|
24
|
+
# status_code=404,
|
|
25
|
+
body={"message": "Prompt not found", "error": "LangfuseNotFoundError"}
|
|
26
26
|
)
|
|
27
|
-
|
|
27
|
+
|
|
28
28
|
# Mock the span context manager
|
|
29
29
|
mock_span = MagicMock()
|
|
30
30
|
mock_span.__enter__ = MagicMock(return_value=mock_span)
|
|
31
31
|
mock_span.__exit__ = MagicMock(return_value=None)
|
|
32
32
|
mock_langfuse_instance.start_as_current_span.return_value = mock_span
|
|
33
|
-
|
|
34
|
-
with patch.object(
|
|
33
|
+
|
|
34
|
+
with patch.object(
|
|
35
|
+
prompt_handler.LLMSingleton, "langfuse", mock_langfuse_instance
|
|
36
|
+
):
|
|
35
37
|
yield mock_langfuse_instance
|
|
36
38
|
|
|
37
39
|
|
|
@@ -45,11 +47,21 @@ def mock_config():
|
|
|
45
47
|
local_prompt_templates_dir="./ara/.araconfig/custom-prompt-modules",
|
|
46
48
|
ara_prompt_given_list_includes=["*.py", "*.md"],
|
|
47
49
|
llm_config={
|
|
48
|
-
"gpt-4o": LLMConfigItem(
|
|
49
|
-
|
|
50
|
+
"gpt-4o": LLMConfigItem(
|
|
51
|
+
provider="openai",
|
|
52
|
+
model="openai/gpt-4o",
|
|
53
|
+
temperature=0.8,
|
|
54
|
+
max_tokens=1024,
|
|
55
|
+
),
|
|
56
|
+
"o3-mini": LLMConfigItem(
|
|
57
|
+
provider="openai",
|
|
58
|
+
model="openai/o3-mini",
|
|
59
|
+
temperature=0.9,
|
|
60
|
+
max_tokens=2048,
|
|
61
|
+
),
|
|
50
62
|
},
|
|
51
63
|
default_llm="gpt-4o",
|
|
52
|
-
extraction_llm="o3-mini"
|
|
64
|
+
extraction_llm="o3-mini",
|
|
53
65
|
)
|
|
54
66
|
return config
|
|
55
67
|
|
|
@@ -57,7 +69,7 @@ def mock_config():
|
|
|
57
69
|
@pytest.fixture(autouse=True)
|
|
58
70
|
def mock_config_manager(mock_config):
|
|
59
71
|
"""Patches ConfigManager to ensure it always returns the mock_config."""
|
|
60
|
-
with patch.object(ConfigManager,
|
|
72
|
+
with patch.object(ConfigManager, "get_config") as mock_get_config:
|
|
61
73
|
mock_get_config.return_value = mock_config
|
|
62
74
|
yield mock_get_config
|
|
63
75
|
|
|
@@ -81,30 +93,44 @@ class TestLLMSingleton:
|
|
|
81
93
|
assert instance is not None
|
|
82
94
|
assert prompt_handler.LLMSingleton.get_default_model() == "gpt-4o"
|
|
83
95
|
assert prompt_handler.LLMSingleton.get_extraction_model() == "o3-mini"
|
|
84
|
-
assert instance.default_config_params[
|
|
85
|
-
assert instance.extraction_config_params[
|
|
96
|
+
assert instance.default_config_params["temperature"] == 0.8
|
|
97
|
+
assert instance.extraction_config_params["temperature"] == 0.9
|
|
86
98
|
|
|
87
|
-
def test_get_instance_creates_with_first_model_if_no_default(
|
|
99
|
+
def test_get_instance_creates_with_first_model_if_no_default(
|
|
100
|
+
self, mock_config_manager, mock_config
|
|
101
|
+
):
|
|
88
102
|
mock_config.default_llm = None
|
|
89
103
|
instance = prompt_handler.LLMSingleton.get_instance()
|
|
90
104
|
assert instance is not None
|
|
91
105
|
assert prompt_handler.LLMSingleton.get_default_model() == "gpt-4o"
|
|
92
106
|
|
|
93
|
-
def test_get_instance_no_extraction_llm_falls_back_to_default(
|
|
107
|
+
def test_get_instance_no_extraction_llm_falls_back_to_default(
|
|
108
|
+
self, mock_config_manager, mock_config
|
|
109
|
+
):
|
|
94
110
|
mock_config.extraction_llm = None
|
|
95
111
|
instance = prompt_handler.LLMSingleton.get_instance()
|
|
96
112
|
assert instance is not None
|
|
97
113
|
assert prompt_handler.LLMSingleton.get_extraction_model() == "gpt-4o"
|
|
98
114
|
|
|
99
|
-
def test_get_instance_no_llm_config_raises_error(
|
|
115
|
+
def test_get_instance_no_llm_config_raises_error(
|
|
116
|
+
self, mock_config_manager, mock_config
|
|
117
|
+
):
|
|
100
118
|
mock_config.llm_config = {}
|
|
101
119
|
mock_config.default_llm = None # This is crucial to hit the correct check
|
|
102
|
-
with pytest.raises(
|
|
120
|
+
with pytest.raises(
|
|
121
|
+
ValueError,
|
|
122
|
+
match="No LLM configurations are defined in the configuration file.",
|
|
123
|
+
):
|
|
103
124
|
prompt_handler.LLMSingleton.get_instance()
|
|
104
125
|
|
|
105
|
-
def test_get_instance_constructor_raises_for_missing_extraction_config(
|
|
126
|
+
def test_get_instance_constructor_raises_for_missing_extraction_config(
|
|
127
|
+
self, mock_config_manager, mock_config
|
|
128
|
+
):
|
|
106
129
|
mock_config.extraction_llm = "missing-model"
|
|
107
|
-
with pytest.raises(
|
|
130
|
+
with pytest.raises(
|
|
131
|
+
ValueError,
|
|
132
|
+
match="No configuration found for the extraction model: missing-model",
|
|
133
|
+
):
|
|
108
134
|
prompt_handler.LLMSingleton.get_instance()
|
|
109
135
|
|
|
110
136
|
def test_get_instance_returns_same_instance(self, mock_config_manager):
|
|
@@ -113,19 +139,21 @@ class TestLLMSingleton:
|
|
|
113
139
|
assert instance1 is instance2
|
|
114
140
|
|
|
115
141
|
def test_get_config_by_purpose(self, mock_config_manager):
|
|
116
|
-
default_params = prompt_handler.LLMSingleton.get_config_by_purpose(
|
|
117
|
-
extraction_params = prompt_handler.LLMSingleton.get_config_by_purpose(
|
|
118
|
-
|
|
119
|
-
|
|
142
|
+
default_params = prompt_handler.LLMSingleton.get_config_by_purpose("default")
|
|
143
|
+
extraction_params = prompt_handler.LLMSingleton.get_config_by_purpose(
|
|
144
|
+
"extraction"
|
|
145
|
+
)
|
|
146
|
+
assert default_params["model"] == "openai/gpt-4o"
|
|
147
|
+
assert extraction_params["model"] == "openai/o3-mini"
|
|
120
148
|
|
|
121
149
|
def test_set_default_model_switches_model(self, mock_config_manager):
|
|
122
150
|
initial_instance = prompt_handler.LLMSingleton.get_instance()
|
|
123
151
|
assert prompt_handler.LLMSingleton.get_default_model() == "gpt-4o"
|
|
124
|
-
|
|
152
|
+
|
|
125
153
|
new_instance = prompt_handler.LLMSingleton.set_default_model("o3-mini")
|
|
126
154
|
|
|
127
155
|
assert prompt_handler.LLMSingleton.get_default_model() == "o3-mini"
|
|
128
|
-
assert new_instance.default_config_params[
|
|
156
|
+
assert new_instance.default_config_params["temperature"] == 0.9
|
|
129
157
|
assert initial_instance is not new_instance
|
|
130
158
|
|
|
131
159
|
def test_set_default_model_to_same_model_does_nothing(self, mock_config_manager):
|
|
@@ -134,14 +162,17 @@ class TestLLMSingleton:
|
|
|
134
162
|
assert instance1 is instance2
|
|
135
163
|
|
|
136
164
|
def test_set_default_model_to_invalid_raises_error(self, mock_config_manager):
|
|
137
|
-
with pytest.raises(
|
|
165
|
+
with pytest.raises(
|
|
166
|
+
ValueError,
|
|
167
|
+
match="No configuration found for the default model: invalid-model",
|
|
168
|
+
):
|
|
138
169
|
prompt_handler.LLMSingleton.set_default_model("invalid-model")
|
|
139
170
|
|
|
140
171
|
def test_set_extraction_model_switches_model(self, mock_config_manager):
|
|
141
172
|
initial_instance = prompt_handler.LLMSingleton.get_instance()
|
|
142
173
|
new_instance = prompt_handler.LLMSingleton.set_extraction_model("gpt-4o")
|
|
143
174
|
assert prompt_handler.LLMSingleton.get_extraction_model() == "gpt-4o"
|
|
144
|
-
assert new_instance.extraction_config_params[
|
|
175
|
+
assert new_instance.extraction_config_params["temperature"] == 0.8
|
|
145
176
|
assert initial_instance is not new_instance
|
|
146
177
|
|
|
147
178
|
def test_set_extraction_model_to_same_model_does_nothing(self, mock_config_manager):
|
|
@@ -150,7 +181,10 @@ class TestLLMSingleton:
|
|
|
150
181
|
assert instance1 is instance2
|
|
151
182
|
|
|
152
183
|
def test_set_extraction_model_to_invalid_raises_error(self, mock_config_manager):
|
|
153
|
-
with pytest.raises(
|
|
184
|
+
with pytest.raises(
|
|
185
|
+
ValueError,
|
|
186
|
+
match="No configuration found for the extraction model: invalid-model",
|
|
187
|
+
):
|
|
154
188
|
prompt_handler.LLMSingleton.set_extraction_model("invalid-model")
|
|
155
189
|
|
|
156
190
|
def test_get_default_model_initializes_if_needed(self, mock_config_manager):
|
|
@@ -172,12 +206,12 @@ class TestFileIO:
|
|
|
172
206
|
def test_write_and_read_string_from_file(self, tmp_path):
|
|
173
207
|
file_path = tmp_path / "test.txt"
|
|
174
208
|
test_string = "Hello World"
|
|
175
|
-
|
|
176
|
-
prompt_handler.write_string_to_file(file_path, test_string,
|
|
177
|
-
|
|
209
|
+
|
|
210
|
+
prompt_handler.write_string_to_file(file_path, test_string, "w")
|
|
211
|
+
|
|
178
212
|
content = prompt_handler.read_string_from_file(file_path)
|
|
179
213
|
assert test_string in content
|
|
180
|
-
|
|
214
|
+
|
|
181
215
|
content_get = prompt_handler.get_file_content(file_path)
|
|
182
216
|
assert content.strip() == test_string
|
|
183
217
|
|
|
@@ -192,7 +226,7 @@ class TestFileIO:
|
|
|
192
226
|
|
|
193
227
|
class TestCoreLogic:
|
|
194
228
|
"""Tests functions related to the main business logic."""
|
|
195
|
-
|
|
229
|
+
|
|
196
230
|
@pytest.fixture(autouse=True)
|
|
197
231
|
def setup_test_env(self, tmp_path):
|
|
198
232
|
"""Changes CWD to a temporary directory for test isolation."""
|
|
@@ -201,43 +235,63 @@ class TestCoreLogic:
|
|
|
201
235
|
yield
|
|
202
236
|
os.chdir(original_cwd)
|
|
203
237
|
|
|
204
|
-
@pytest.mark.parametrize(
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
238
|
+
@pytest.mark.parametrize(
|
|
239
|
+
"message, expected",
|
|
240
|
+
[
|
|
241
|
+
({"content": "Hello"}, True),
|
|
242
|
+
({"content": " "}, False),
|
|
243
|
+
({"content": ""}, False),
|
|
244
|
+
({"content": "\n\t"}, False),
|
|
245
|
+
({"content": [{"type": "text", "text": " "}]}, False),
|
|
246
|
+
({"content": [{"type": "text", "text": "Valid text"}]}, True),
|
|
247
|
+
(
|
|
248
|
+
{
|
|
249
|
+
"content": [
|
|
250
|
+
{"type": "image_url"},
|
|
251
|
+
{"type": "text", "text": "More text"},
|
|
252
|
+
]
|
|
253
|
+
},
|
|
254
|
+
True,
|
|
255
|
+
),
|
|
256
|
+
({"content": []}, False),
|
|
257
|
+
({"content": 123}, False),
|
|
258
|
+
({}, False),
|
|
259
|
+
],
|
|
260
|
+
)
|
|
216
261
|
def test_is_valid_message(self, message, expected):
|
|
217
262
|
assert prompt_handler._is_valid_message(message) == expected
|
|
218
263
|
|
|
219
|
-
@patch(
|
|
220
|
-
def test_send_prompt(self,
|
|
264
|
+
@patch("ara_cli.prompt_handler._get_litellm")
|
|
265
|
+
def test_send_prompt(self, mock_get_litellm, mock_config, mock_config_manager):
|
|
221
266
|
"""Tests that send_prompt uses the default LLM by default."""
|
|
267
|
+
mock_litellm = MagicMock()
|
|
268
|
+
mock_get_litellm.return_value = mock_litellm
|
|
269
|
+
|
|
222
270
|
mock_chunk = MagicMock()
|
|
223
271
|
mock_chunk.choices[0].delta.content = "test chunk"
|
|
224
|
-
|
|
272
|
+
mock_litellm.completion.return_value = [mock_chunk]
|
|
225
273
|
|
|
226
274
|
prompt = [{"role": "user", "content": "A test"}]
|
|
227
|
-
|
|
275
|
+
|
|
228
276
|
result = list(prompt_handler.send_prompt(prompt))
|
|
229
277
|
|
|
230
|
-
expected_params = mock_config.llm_config[
|
|
231
|
-
del expected_params[
|
|
278
|
+
expected_params = mock_config.llm_config["gpt-4o"].model_dump(exclude_none=True)
|
|
279
|
+
del expected_params["provider"]
|
|
232
280
|
|
|
233
|
-
|
|
281
|
+
mock_litellm.completion.assert_called_once_with(
|
|
234
282
|
messages=prompt, stream=True, **expected_params
|
|
235
283
|
)
|
|
236
284
|
assert len(result) == 1
|
|
237
285
|
assert result[0].choices[0].delta.content == "test chunk"
|
|
238
286
|
|
|
239
|
-
@patch(
|
|
240
|
-
def test_send_prompt_filters_invalid_messages(
|
|
287
|
+
@patch("ara_cli.prompt_handler._get_litellm")
|
|
288
|
+
def test_send_prompt_filters_invalid_messages(
|
|
289
|
+
self, mock_get_litellm, mock_config_manager
|
|
290
|
+
):
|
|
291
|
+
mock_litellm = MagicMock()
|
|
292
|
+
mock_get_litellm.return_value = mock_litellm
|
|
293
|
+
mock_litellm.completion.return_value = []
|
|
294
|
+
|
|
241
295
|
prompt = [
|
|
242
296
|
{"role": "user", "content": "Valid message"},
|
|
243
297
|
{"role": "user", "content": " "},
|
|
@@ -247,53 +301,62 @@ class TestCoreLogic:
|
|
|
247
301
|
|
|
248
302
|
list(prompt_handler.send_prompt(prompt))
|
|
249
303
|
|
|
250
|
-
|
|
251
|
-
called_args =
|
|
252
|
-
assert called_args[
|
|
304
|
+
mock_litellm.completion.assert_called_once()
|
|
305
|
+
called_args = mock_litellm.completion.call_args[1]
|
|
306
|
+
assert called_args["messages"] == valid_prompt
|
|
253
307
|
|
|
254
|
-
@patch(
|
|
255
|
-
def test_send_prompt_uses_extraction_llm(
|
|
308
|
+
@patch("ara_cli.prompt_handler._get_litellm")
|
|
309
|
+
def test_send_prompt_uses_extraction_llm(
|
|
310
|
+
self, mock_get_litellm, mock_config, mock_config_manager
|
|
311
|
+
):
|
|
256
312
|
"""Tests that send_prompt uses the extraction LLM when specified."""
|
|
257
|
-
|
|
313
|
+
mock_litellm = MagicMock()
|
|
314
|
+
mock_get_litellm.return_value = mock_litellm
|
|
315
|
+
mock_litellm.completion.return_value = []
|
|
316
|
+
|
|
258
317
|
prompt = [{"role": "user", "content": "Extract this"}]
|
|
259
|
-
|
|
260
|
-
list(prompt_handler.send_prompt(prompt, purpose='extraction'))
|
|
261
318
|
|
|
262
|
-
|
|
263
|
-
|
|
319
|
+
list(prompt_handler.send_prompt(prompt, purpose="extraction"))
|
|
320
|
+
|
|
321
|
+
expected_params = mock_config.llm_config["o3-mini"].model_dump(
|
|
322
|
+
exclude_none=True
|
|
323
|
+
)
|
|
324
|
+
del expected_params["provider"]
|
|
264
325
|
|
|
265
|
-
|
|
326
|
+
mock_litellm.completion.assert_called_once_with(
|
|
266
327
|
messages=prompt, stream=True, **expected_params
|
|
267
328
|
)
|
|
268
329
|
|
|
269
|
-
@patch(
|
|
330
|
+
@patch("ara_cli.prompt_handler.send_prompt")
|
|
270
331
|
def test_describe_image(self, mock_send_prompt, tmp_path, mock_langfuse):
|
|
271
332
|
fake_image_path = tmp_path / "test.jpeg"
|
|
272
333
|
fake_image_content = b"fakeimagedata"
|
|
273
334
|
fake_image_path.write_bytes(fake_image_content)
|
|
274
|
-
|
|
335
|
+
|
|
275
336
|
mock_send_prompt.return_value = iter([])
|
|
276
|
-
|
|
337
|
+
|
|
277
338
|
# Ensure the langfuse mock is properly set up for this instance
|
|
278
339
|
instance = prompt_handler.LLMSingleton.get_instance()
|
|
279
340
|
instance.langfuse = mock_langfuse
|
|
280
|
-
|
|
341
|
+
|
|
281
342
|
prompt_handler.describe_image(fake_image_path)
|
|
282
|
-
|
|
343
|
+
|
|
283
344
|
mock_send_prompt.assert_called_once()
|
|
284
345
|
called_args, called_kwargs = mock_send_prompt.call_args
|
|
285
|
-
|
|
286
|
-
assert called_kwargs == {
|
|
287
|
-
message_content = called_args[0][0][
|
|
288
|
-
assert message_content[0][
|
|
289
|
-
assert message_content[1][
|
|
290
|
-
|
|
291
|
-
encoded_image = base64.b64encode(fake_image_content).decode(
|
|
346
|
+
|
|
347
|
+
assert called_kwargs == {"purpose": "extraction"}
|
|
348
|
+
message_content = called_args[0][0]["content"]
|
|
349
|
+
assert message_content[0]["type"] == "text"
|
|
350
|
+
assert message_content[1]["type"] == "image_url"
|
|
351
|
+
|
|
352
|
+
encoded_image = base64.b64encode(fake_image_content).decode("utf-8")
|
|
292
353
|
expected_url = f"data:image/jpeg;base64,{encoded_image}"
|
|
293
|
-
assert message_content[1][
|
|
354
|
+
assert message_content[1]["image_url"]["url"] == expected_url
|
|
294
355
|
|
|
295
|
-
@patch(
|
|
296
|
-
def test_describe_image_returns_response_text(
|
|
356
|
+
@patch("ara_cli.prompt_handler.send_prompt")
|
|
357
|
+
def test_describe_image_returns_response_text(
|
|
358
|
+
self, mock_send_prompt, tmp_path, mock_langfuse
|
|
359
|
+
):
|
|
297
360
|
fake_image_path = tmp_path / "test.gif"
|
|
298
361
|
fake_image_path.touch()
|
|
299
362
|
|
|
@@ -302,7 +365,7 @@ class TestCoreLogic:
|
|
|
302
365
|
mock_chunk2 = MagicMock()
|
|
303
366
|
mock_chunk2.choices[0].delta.content = "a description."
|
|
304
367
|
mock_chunk3 = MagicMock()
|
|
305
|
-
mock_chunk3.choices[0].delta.content = None
|
|
368
|
+
mock_chunk3.choices[0].delta.content = None # Test empty chunk
|
|
306
369
|
mock_send_prompt.return_value = iter([mock_chunk1, mock_chunk3, mock_chunk2])
|
|
307
370
|
|
|
308
371
|
# Ensure the langfuse mock is properly set up for this instance
|
|
@@ -312,48 +375,65 @@ class TestCoreLogic:
|
|
|
312
375
|
description = prompt_handler.describe_image(fake_image_path)
|
|
313
376
|
assert description == "This is a description."
|
|
314
377
|
|
|
315
|
-
@patch(
|
|
378
|
+
@patch(
|
|
379
|
+
"ara_cli.prompt_handler.Classifier.get_sub_directory",
|
|
380
|
+
return_value="test_classifier",
|
|
381
|
+
)
|
|
316
382
|
def test_append_headings(self, mock_get_sub, tmp_path):
|
|
317
383
|
os.makedirs("ara/test_classifier/my_param.data", exist_ok=True)
|
|
318
|
-
log_file =
|
|
384
|
+
log_file = (
|
|
385
|
+
tmp_path / "ara/test_classifier/my_param.data/test_classifier.prompt_log.md"
|
|
386
|
+
)
|
|
319
387
|
log_file.touch()
|
|
320
388
|
|
|
321
389
|
prompt_handler.append_headings("test_classifier", "my_param", "PROMPT")
|
|
322
390
|
assert "## PROMPT_1" in log_file.read_text()
|
|
323
|
-
|
|
391
|
+
|
|
324
392
|
prompt_handler.append_headings("test_classifier", "my_param", "PROMPT")
|
|
325
393
|
assert "## PROMPT_2" in log_file.read_text()
|
|
326
394
|
|
|
327
|
-
@patch(
|
|
395
|
+
@patch(
|
|
396
|
+
"ara_cli.prompt_handler.Classifier.get_sub_directory",
|
|
397
|
+
return_value="test_classifier",
|
|
398
|
+
)
|
|
328
399
|
def test_append_headings_creates_file_if_not_exists(self, mock_get_sub, tmp_path):
|
|
329
400
|
os.makedirs("ara/test_classifier/my_param.data", exist_ok=True)
|
|
330
|
-
log_file =
|
|
401
|
+
log_file = (
|
|
402
|
+
tmp_path / "ara/test_classifier/my_param.data/test_classifier.prompt_log.md"
|
|
403
|
+
)
|
|
331
404
|
assert not log_file.exists()
|
|
332
405
|
|
|
333
406
|
prompt_handler.append_headings("test_classifier", "my_param", "HEADING")
|
|
334
407
|
assert log_file.exists()
|
|
335
408
|
assert "## HEADING_1" in log_file.read_text()
|
|
336
409
|
|
|
337
|
-
@patch(
|
|
410
|
+
@patch(
|
|
411
|
+
"ara_cli.prompt_handler.Classifier.get_sub_directory",
|
|
412
|
+
return_value="test_classifier",
|
|
413
|
+
)
|
|
338
414
|
def test_write_prompt_result(self, mock_get_sub, tmp_path):
|
|
339
415
|
os.makedirs("ara/test_classifier/my_param.data", exist_ok=True)
|
|
340
|
-
log_file =
|
|
416
|
+
log_file = (
|
|
417
|
+
tmp_path / "ara/test_classifier/my_param.data/test_classifier.prompt_log.md"
|
|
418
|
+
)
|
|
341
419
|
|
|
342
|
-
prompt_handler.write_prompt_result(
|
|
420
|
+
prompt_handler.write_prompt_result(
|
|
421
|
+
"test_classifier", "my_param", "Test content"
|
|
422
|
+
)
|
|
343
423
|
assert "Test content" in log_file.read_text()
|
|
344
424
|
|
|
345
425
|
def test_prepend_system_prompt(self, mock_langfuse):
|
|
346
426
|
# Ensure the langfuse mock is properly set up for this instance
|
|
347
427
|
instance = prompt_handler.LLMSingleton.get_instance()
|
|
348
428
|
instance.langfuse = mock_langfuse
|
|
349
|
-
|
|
429
|
+
|
|
350
430
|
messages = [{"role": "user", "content": "Hi"}]
|
|
351
431
|
result = prompt_handler.prepend_system_prompt(messages)
|
|
352
432
|
assert len(result) == 2
|
|
353
|
-
assert result[0][
|
|
354
|
-
assert result[1][
|
|
433
|
+
assert result[0]["role"] == "system"
|
|
434
|
+
assert result[1]["role"] == "user"
|
|
355
435
|
|
|
356
|
-
@patch(
|
|
436
|
+
@patch("logging.getLogger")
|
|
357
437
|
def test_append_images_to_message_logic(self, mock_get_logger):
|
|
358
438
|
# Test case 1: No images, should return original message
|
|
359
439
|
message_no_img = {"role": "user", "content": "Hello"}
|
|
@@ -366,16 +446,19 @@ class TestCoreLogic:
|
|
|
366
446
|
result = prompt_handler.append_images_to_message(message_with_text, images)
|
|
367
447
|
expected_content = [
|
|
368
448
|
{"type": "text", "text": "Describe these."},
|
|
369
|
-
{"type": "image_url", "image_url": {"url": "data:..."}}
|
|
449
|
+
{"type": "image_url", "image_url": {"url": "data:..."}},
|
|
370
450
|
]
|
|
371
451
|
assert result["content"] == expected_content
|
|
372
|
-
|
|
452
|
+
|
|
373
453
|
# Test case 3: Add images to an existing list content
|
|
374
|
-
message_with_list = {
|
|
454
|
+
message_with_list = {
|
|
455
|
+
"role": "user",
|
|
456
|
+
"content": [{"type": "text", "text": "Initial text."}],
|
|
457
|
+
}
|
|
375
458
|
result = prompt_handler.append_images_to_message(message_with_list, images)
|
|
376
459
|
expected_content_2 = [
|
|
377
460
|
{"type": "text", "text": "Initial text."},
|
|
378
|
-
{"type": "image_url", "image_url": {"url": "data:..."}}
|
|
461
|
+
{"type": "image_url", "image_url": {"url": "data:..."}},
|
|
379
462
|
]
|
|
380
463
|
assert result["content"] == expected_content_2
|
|
381
464
|
|
|
@@ -394,9 +477,9 @@ class TestFileOperations:
|
|
|
394
477
|
(base_path / "rules").mkdir(parents=True)
|
|
395
478
|
(base_path / "rules" / "b.rules.md").touch()
|
|
396
479
|
(base_path / "rules" / "a.rules.md").touch()
|
|
397
|
-
|
|
480
|
+
|
|
398
481
|
m = mock_open()
|
|
399
|
-
with patch(
|
|
482
|
+
with patch("builtins.open", m):
|
|
400
483
|
prompt_handler.write_template_files_to_config("rules", m(), str(base_path))
|
|
401
484
|
|
|
402
485
|
# Check that files were written in sorted order with correct spacing
|
|
@@ -429,29 +512,39 @@ class TestFileOperations:
|
|
|
429
512
|
|
|
430
513
|
source_file = source_dir / "new.rules.md"
|
|
431
514
|
source_file.write_text("new rules")
|
|
432
|
-
|
|
515
|
+
|
|
433
516
|
existing_file = prompt_data / "old.rules.md"
|
|
434
517
|
existing_file.write_text("old rules")
|
|
435
518
|
|
|
436
519
|
unrelated_source = source_dir / "unrelated.txt"
|
|
437
520
|
unrelated_source.touch()
|
|
438
|
-
|
|
521
|
+
|
|
439
522
|
missing_source = source_dir / "nonexistent.rules.md"
|
|
440
|
-
|
|
441
|
-
with patch(
|
|
523
|
+
|
|
524
|
+
with patch("builtins.print") as mock_print:
|
|
442
525
|
# Test move and copy
|
|
443
|
-
prompt_handler.move_and_copy_files(
|
|
526
|
+
prompt_handler.move_and_copy_files(
|
|
527
|
+
str(source_file), str(prompt_data), str(prompt_archive)
|
|
528
|
+
)
|
|
444
529
|
assert not existing_file.exists()
|
|
445
530
|
assert (prompt_archive / "old.rules.md").exists()
|
|
446
531
|
assert (prompt_data / "new.rules.md").read_text() == "new rules"
|
|
447
|
-
|
|
532
|
+
|
|
448
533
|
# Test skipping unrelated files
|
|
449
|
-
prompt_handler.move_and_copy_files(
|
|
450
|
-
|
|
451
|
-
|
|
534
|
+
prompt_handler.move_and_copy_files(
|
|
535
|
+
str(unrelated_source), str(prompt_data), str(prompt_archive)
|
|
536
|
+
)
|
|
537
|
+
assert mock_print.call_args_list[-1] == call(
|
|
538
|
+
"File name unrelated.txt does not end with one of the specified patterns, skipping move and copy."
|
|
539
|
+
)
|
|
540
|
+
|
|
452
541
|
# Test warning for missing source
|
|
453
|
-
prompt_handler.move_and_copy_files(
|
|
454
|
-
|
|
542
|
+
prompt_handler.move_and_copy_files(
|
|
543
|
+
str(missing_source), str(prompt_data), str(prompt_archive)
|
|
544
|
+
)
|
|
545
|
+
assert mock_print.call_args_list[-1] == call(
|
|
546
|
+
f"WARNING: template {missing_source} does not exist."
|
|
547
|
+
)
|
|
455
548
|
|
|
456
549
|
def test_extract_and_load_markdown_files_complex_hierarchy(self):
|
|
457
550
|
md_content = """
|
|
@@ -468,29 +561,29 @@ class TestFileOperations:
|
|
|
468
561
|
- [x] l1_again.md
|
|
469
562
|
"""
|
|
470
563
|
m = mock_open(read_data=md_content)
|
|
471
|
-
with patch(
|
|
564
|
+
with patch("builtins.open", m):
|
|
472
565
|
paths = prompt_handler.extract_and_load_markdown_files("dummy_path")
|
|
473
|
-
|
|
566
|
+
|
|
474
567
|
expected = [
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
568
|
+
"L1/l1.md",
|
|
569
|
+
"L1/L2-A/l2a.md",
|
|
570
|
+
"L1/L2-A/L3/l3.md",
|
|
571
|
+
"L1/L2-B/l2b.md",
|
|
572
|
+
"L1-Again/l1_again.md",
|
|
480
573
|
]
|
|
481
574
|
assert paths == expected
|
|
482
575
|
|
|
483
|
-
@patch(
|
|
484
|
-
@patch(
|
|
576
|
+
@patch("ara_cli.prompt_handler.get_partial_file_content")
|
|
577
|
+
@patch("ara_cli.prompt_handler.get_file_content")
|
|
485
578
|
def test_load_givens(self, mock_get_content, mock_get_partial, tmp_path):
|
|
486
579
|
# Setup files
|
|
487
580
|
md_config = tmp_path / "config.givens.md"
|
|
488
581
|
text_file = tmp_path / "file.txt"
|
|
489
582
|
image_file = tmp_path / "image.png"
|
|
490
|
-
|
|
583
|
+
|
|
491
584
|
text_file.write_text("Full content")
|
|
492
585
|
image_file.write_bytes(b"imagedata")
|
|
493
|
-
|
|
586
|
+
|
|
494
587
|
md_content = f"""
|
|
495
588
|
# src
|
|
496
589
|
- [x] {text_file}
|
|
@@ -503,11 +596,16 @@ class TestFileOperations:
|
|
|
503
596
|
# Mocks
|
|
504
597
|
mock_get_content.return_value = "Full content"
|
|
505
598
|
mock_get_partial.return_value = "Partial content"
|
|
506
|
-
|
|
599
|
+
|
|
507
600
|
# Execute
|
|
508
|
-
with patch(
|
|
509
|
-
|
|
510
|
-
|
|
601
|
+
with patch(
|
|
602
|
+
"ara_cli.prompt_handler.extract_and_load_markdown_files",
|
|
603
|
+
return_value=[str(text_file), f"[1:2] {text_file}", str(image_file)],
|
|
604
|
+
):
|
|
605
|
+
# The regex in load_givens is flawed, so we manually mock the extracted items
|
|
606
|
+
match = re.match(
|
|
607
|
+
r".*?\[(\d+:\d+(?:,\s*\d+:\d+)*)\]\s+(.+)", f"[1:2] {text_file}"
|
|
608
|
+
)
|
|
511
609
|
assert match is not None
|
|
512
610
|
|
|
513
611
|
content, image_data = prompt_handler.load_givens(str(md_config))
|
|
@@ -519,27 +617,35 @@ class TestFileOperations:
|
|
|
519
617
|
mock_get_partial.assert_called_once_with(str(text_file), "1:2")
|
|
520
618
|
|
|
521
619
|
assert len(image_data) == 1
|
|
522
|
-
assert image_data[0][
|
|
620
|
+
assert image_data[0]["type"] == "image_url"
|
|
523
621
|
encoded = base64.b64encode(b"imagedata").decode("utf-8")
|
|
524
|
-
assert encoded in image_data[0][
|
|
622
|
+
assert encoded in image_data[0]["image_url"]["url"]
|
|
525
623
|
assert f"" in content
|
|
526
624
|
|
|
527
|
-
@patch(
|
|
528
|
-
@patch(
|
|
529
|
-
@patch(
|
|
625
|
+
@patch("ara_cli.prompt_handler.load_givens")
|
|
626
|
+
@patch("ara_cli.prompt_handler.get_file_content")
|
|
627
|
+
@patch("ara_cli.prompt_handler.find_files_with_endings")
|
|
530
628
|
def test_collect_file_content_by_extension(self, mock_find, mock_get, mock_load):
|
|
531
629
|
prompt_data_path = "/fake/path"
|
|
532
630
|
mock_find.side_effect = [["rules.rules.md"], ["givens.prompt_givens.md"]]
|
|
533
631
|
mock_get.return_value = "Rules content"
|
|
534
632
|
mock_load.return_value = ("Givens content", ["image_data"])
|
|
535
|
-
|
|
633
|
+
|
|
536
634
|
extensions = [".rules.md", ".prompt_givens.md"]
|
|
537
|
-
content, images = prompt_handler.collect_file_content_by_extension(
|
|
635
|
+
content, images = prompt_handler.collect_file_content_by_extension(
|
|
636
|
+
prompt_data_path, extensions
|
|
637
|
+
)
|
|
638
|
+
|
|
639
|
+
mock_find.assert_has_calls(
|
|
640
|
+
[call(prompt_data_path, [ext]) for ext in extensions]
|
|
641
|
+
)
|
|
642
|
+
mock_get.assert_called_once_with(
|
|
643
|
+
os.path.join(prompt_data_path, "rules.rules.md")
|
|
644
|
+
)
|
|
645
|
+
mock_load.assert_called_once_with(
|
|
646
|
+
os.path.join(prompt_data_path, "givens.prompt_givens.md")
|
|
647
|
+
)
|
|
538
648
|
|
|
539
|
-
mock_find.assert_has_calls([call(prompt_data_path, [ext]) for ext in extensions])
|
|
540
|
-
mock_get.assert_called_once_with(os.path.join(prompt_data_path, "rules.rules.md"))
|
|
541
|
-
mock_load.assert_called_once_with(os.path.join(prompt_data_path, "givens.prompt_givens.md"))
|
|
542
|
-
|
|
543
649
|
assert "Rules content" in content
|
|
544
650
|
assert "Givens content" in content
|
|
545
651
|
assert images == ["image_data"]
|
|
@@ -554,23 +660,36 @@ class TestArtefactAndTemplateHandling:
|
|
|
554
660
|
os.chdir(self.root)
|
|
555
661
|
self.mock_classifier = "my_artefact"
|
|
556
662
|
self.mock_param = "my_param"
|
|
557
|
-
|
|
558
|
-
self.classifier_patch = patch(
|
|
663
|
+
|
|
664
|
+
self.classifier_patch = patch(
|
|
665
|
+
"ara_cli.prompt_handler.Classifier.get_sub_directory",
|
|
666
|
+
return_value=self.mock_classifier,
|
|
667
|
+
)
|
|
559
668
|
self.mock_get_sub_dir = self.classifier_patch.start()
|
|
560
|
-
|
|
669
|
+
|
|
561
670
|
yield
|
|
562
|
-
|
|
671
|
+
|
|
563
672
|
self.classifier_patch.stop()
|
|
564
673
|
|
|
565
674
|
def test_prompt_data_directory_creation(self):
|
|
566
|
-
path = prompt_handler.prompt_data_directory_creation(
|
|
567
|
-
|
|
675
|
+
path = prompt_handler.prompt_data_directory_creation(
|
|
676
|
+
self.mock_classifier, self.mock_param
|
|
677
|
+
)
|
|
678
|
+
expected_path = (
|
|
679
|
+
self.root
|
|
680
|
+
/ "ara"
|
|
681
|
+
/ self.mock_classifier
|
|
682
|
+
/ f"{self.mock_param}.data"
|
|
683
|
+
/ "prompt.data"
|
|
684
|
+
)
|
|
568
685
|
assert os.path.exists(expected_path)
|
|
569
686
|
assert Path(path).resolve() == expected_path.resolve()
|
|
570
687
|
|
|
571
|
-
@patch(
|
|
572
|
-
@patch(
|
|
573
|
-
def test_initialize_prompt_templates(
|
|
688
|
+
@patch("ara_cli.prompt_handler.generate_markdown_listing")
|
|
689
|
+
@patch("ara_cli.prompt_handler.ArtefactCreator")
|
|
690
|
+
def test_initialize_prompt_templates(
|
|
691
|
+
self, mock_artefact_creator, mock_generate_listing, mock_config_manager
|
|
692
|
+
):
|
|
574
693
|
# This side effect creates the file that the function expects to read
|
|
575
694
|
def create_dummy_file(*args, **kwargs):
|
|
576
695
|
file_path = args[2]
|
|
@@ -578,21 +697,35 @@ class TestArtefactAndTemplateHandling:
|
|
|
578
697
|
Path(file_path).touch()
|
|
579
698
|
|
|
580
699
|
mock_generate_listing.side_effect = create_dummy_file
|
|
581
|
-
|
|
582
|
-
prompt_handler.initialize_prompt_templates(
|
|
583
|
-
|
|
584
|
-
|
|
700
|
+
|
|
701
|
+
prompt_handler.initialize_prompt_templates(
|
|
702
|
+
self.mock_classifier, self.mock_param
|
|
703
|
+
)
|
|
704
|
+
|
|
705
|
+
prompt_data_path = (
|
|
706
|
+
self.root
|
|
707
|
+
/ "ara"
|
|
708
|
+
/ self.mock_classifier
|
|
709
|
+
/ f"{self.mock_param}.data"
|
|
710
|
+
/ "prompt.data"
|
|
711
|
+
)
|
|
585
712
|
prompt_log_path = prompt_data_path.parent
|
|
586
713
|
|
|
587
714
|
mock_artefact_creator.return_value.create_artefact_prompt_files.assert_called_once()
|
|
588
715
|
assert mock_generate_listing.call_count == 2
|
|
589
716
|
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
717
|
+
@patch("ara_cli.prompt_handler.generate_markdown_listing")
|
|
718
|
+
def test_generate_config_prompt_template_file(
|
|
719
|
+
self, mock_generate_listing, mock_config_manager
|
|
720
|
+
):
|
|
593
721
|
prompt_data_path = "prompt/data"
|
|
594
|
-
with patch(
|
|
595
|
-
prompt_handler.
|
|
722
|
+
with patch(
|
|
723
|
+
"ara_cli.prompt_handler.TemplatePathManager.get_template_base_path",
|
|
724
|
+
return_value="/global/templates",
|
|
725
|
+
):
|
|
726
|
+
prompt_handler.generate_config_prompt_template_file(
|
|
727
|
+
prompt_data_path, "config.md"
|
|
728
|
+
)
|
|
596
729
|
|
|
597
730
|
mock_generate_listing.assert_called_once()
|
|
598
731
|
args, _ = mock_generate_listing.call_args
|
|
@@ -601,12 +734,18 @@ class TestArtefactAndTemplateHandling:
|
|
|
601
734
|
assert "*.blueprint.md" in args[1]
|
|
602
735
|
assert args[2] == os.path.join(prompt_data_path, "config.md")
|
|
603
736
|
|
|
604
|
-
@patch(
|
|
605
|
-
def test_generate_config_prompt_givens_file(
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
prompt_handler.
|
|
609
|
-
|
|
737
|
+
@patch("ara_cli.prompt_handler.generate_markdown_listing")
|
|
738
|
+
def test_generate_config_prompt_givens_file(
|
|
739
|
+
self, mock_generate_listing, mock_config_manager
|
|
740
|
+
):
|
|
741
|
+
prompt_data_path = prompt_handler.prompt_data_directory_creation(
|
|
742
|
+
self.mock_classifier, self.mock_param
|
|
743
|
+
)
|
|
744
|
+
|
|
745
|
+
prompt_handler.generate_config_prompt_givens_file(
|
|
746
|
+
prompt_data_path, "config.givens.md"
|
|
747
|
+
)
|
|
748
|
+
|
|
610
749
|
mock_generate_listing.assert_called_once()
|
|
611
750
|
args, _ = mock_generate_listing.call_args
|
|
612
751
|
assert "ara" in args[0]
|
|
@@ -614,15 +753,21 @@ class TestArtefactAndTemplateHandling:
|
|
|
614
753
|
assert args[1] == ["*.py", "*.md"]
|
|
615
754
|
assert args[2] == os.path.join(prompt_data_path, "config.givens.md")
|
|
616
755
|
|
|
617
|
-
@patch(
|
|
618
|
-
def test_generate_config_prompt_givens_file_marks_artefact(
|
|
619
|
-
|
|
756
|
+
@patch("ara_cli.prompt_handler.generate_markdown_listing")
|
|
757
|
+
def test_generate_config_prompt_givens_file_marks_artefact(
|
|
758
|
+
self, mock_generate_listing, mock_config_manager
|
|
759
|
+
):
|
|
760
|
+
prompt_data_path = Path(
|
|
761
|
+
prompt_handler.prompt_data_directory_creation(
|
|
762
|
+
self.mock_classifier, self.mock_param
|
|
763
|
+
)
|
|
764
|
+
)
|
|
620
765
|
config_path = prompt_data_path / "config.givens.md"
|
|
621
766
|
artefact_to_mark = "file.py"
|
|
622
767
|
|
|
623
768
|
def create_fake_file(*args, **kwargs):
|
|
624
769
|
content = f"- [] some_other_file.txt\n- [] {artefact_to_mark}\n"
|
|
625
|
-
with open(args[2],
|
|
770
|
+
with open(args[2], "w") as f:
|
|
626
771
|
f.write(content)
|
|
627
772
|
|
|
628
773
|
mock_generate_listing.side_effect = create_fake_file
|
|
@@ -630,117 +775,174 @@ class TestArtefactAndTemplateHandling:
|
|
|
630
775
|
prompt_handler.generate_config_prompt_givens_file(
|
|
631
776
|
str(prompt_data_path), "config.givens.md", artefact_to_mark=artefact_to_mark
|
|
632
777
|
)
|
|
633
|
-
|
|
778
|
+
|
|
634
779
|
content = config_path.read_text()
|
|
635
780
|
assert f"- [x] {artefact_to_mark}" in content
|
|
636
781
|
assert f"- [] some_other_file.txt" in content
|
|
637
782
|
|
|
638
|
-
@patch(
|
|
639
|
-
@patch(
|
|
640
|
-
@patch(
|
|
641
|
-
|
|
642
|
-
|
|
783
|
+
@patch("ara_cli.prompt_handler.extract_and_load_markdown_files")
|
|
784
|
+
@patch("ara_cli.prompt_handler.move_and_copy_files")
|
|
785
|
+
@patch(
|
|
786
|
+
"ara_cli.prompt_handler.TemplatePathManager.get_template_base_path",
|
|
787
|
+
return_value="/global/templates",
|
|
788
|
+
)
|
|
789
|
+
def test_load_selected_prompt_templates(
|
|
790
|
+
self, mock_base_path, mock_move, mock_extract, mock_config_manager
|
|
791
|
+
):
|
|
792
|
+
prompt_data_path = prompt_handler.prompt_data_directory_creation(
|
|
793
|
+
self.mock_classifier, self.mock_param
|
|
794
|
+
)
|
|
643
795
|
config_file = Path(prompt_data_path) / "config.prompt_templates.md"
|
|
644
796
|
config_file.touch()
|
|
645
797
|
|
|
646
798
|
mock_extract.return_value = [
|
|
647
799
|
"custom-prompt-modules/my_custom.rules.md",
|
|
648
800
|
"prompt-modules/global.intention.md",
|
|
649
|
-
"unrecognized/file.md"
|
|
801
|
+
"unrecognized/file.md",
|
|
650
802
|
]
|
|
651
|
-
|
|
652
|
-
with patch(
|
|
653
|
-
prompt_handler.load_selected_prompt_templates(
|
|
803
|
+
|
|
804
|
+
with patch("builtins.print") as mock_print:
|
|
805
|
+
prompt_handler.load_selected_prompt_templates(
|
|
806
|
+
self.mock_classifier, self.mock_param
|
|
807
|
+
)
|
|
654
808
|
|
|
655
809
|
archive_path = os.path.join(prompt_data_path, "prompt.archive")
|
|
656
810
|
|
|
657
811
|
assert mock_move.call_count == 2
|
|
658
|
-
mock_print.assert_any_call(
|
|
812
|
+
mock_print.assert_any_call(
|
|
813
|
+
"WARNING: Unrecognized template type for item unrecognized/file.md."
|
|
814
|
+
)
|
|
659
815
|
|
|
660
816
|
def test_load_selected_prompt_templates_no_config_file_warns_and_returns(self):
|
|
661
|
-
prompt_handler.prompt_data_directory_creation(
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
817
|
+
prompt_handler.prompt_data_directory_creation(
|
|
818
|
+
self.mock_classifier, self.mock_param
|
|
819
|
+
)
|
|
820
|
+
|
|
821
|
+
with patch("builtins.print") as mock_print:
|
|
822
|
+
prompt_handler.load_selected_prompt_templates(
|
|
823
|
+
self.mock_classifier, self.mock_param
|
|
824
|
+
)
|
|
825
|
+
|
|
826
|
+
mock_print.assert_called_once_with(
|
|
827
|
+
"WARNING: config.prompt_templates.md does not exist."
|
|
828
|
+
)
|
|
829
|
+
|
|
830
|
+
@patch("ara_cli.prompt_handler.send_prompt")
|
|
831
|
+
@patch("ara_cli.prompt_handler.collect_file_content_by_extension")
|
|
832
|
+
@patch(
|
|
833
|
+
"ara_cli.prompt_handler.append_images_to_message",
|
|
834
|
+
side_effect=lambda msg, img: msg,
|
|
835
|
+
) # Passthrough
|
|
836
|
+
def test_create_and_send_custom_prompt_handles_empty_chunks(
|
|
837
|
+
self, mock_append, mock_collect, mock_send, tmp_path
|
|
838
|
+
):
|
|
672
839
|
# Create the directory structure the function expects
|
|
673
|
-
prompt_data_path = Path(
|
|
840
|
+
prompt_data_path = Path(
|
|
841
|
+
f"ara/{self.mock_classifier}/{self.mock_param}.data/prompt.data"
|
|
842
|
+
)
|
|
674
843
|
prompt_data_path.mkdir(parents=True, exist_ok=True)
|
|
675
844
|
|
|
676
845
|
mock_collect.return_value = ("Test Content", [])
|
|
677
|
-
|
|
846
|
+
|
|
678
847
|
mock_chunk_ok = MagicMock()
|
|
679
848
|
mock_chunk_ok.choices[0].delta.content = "response"
|
|
680
849
|
mock_chunk_empty = MagicMock()
|
|
681
850
|
mock_chunk_empty.choices[0].delta.content = None
|
|
682
851
|
mock_send.return_value = iter([mock_chunk_empty, mock_chunk_ok])
|
|
683
852
|
|
|
684
|
-
log_file =
|
|
853
|
+
log_file = (
|
|
854
|
+
tmp_path
|
|
855
|
+
/ "ara"
|
|
856
|
+
/ self.mock_classifier
|
|
857
|
+
/ f"{self.mock_param}.data"
|
|
858
|
+
/ f"{self.mock_classifier}.prompt_log.md"
|
|
859
|
+
)
|
|
685
860
|
log_file.touch()
|
|
686
861
|
|
|
687
|
-
prompt_handler.create_and_send_custom_prompt(
|
|
862
|
+
prompt_handler.create_and_send_custom_prompt(
|
|
863
|
+
self.mock_classifier, self.mock_param
|
|
864
|
+
)
|
|
688
865
|
|
|
689
866
|
log_content = log_file.read_text()
|
|
690
867
|
assert "response" in log_content
|
|
691
868
|
assert "None" not in log_content
|
|
692
869
|
|
|
693
|
-
@patch(
|
|
694
|
-
@patch(
|
|
695
|
-
@patch(
|
|
696
|
-
def test_create_and_send_custom_prompt(
|
|
697
|
-
|
|
870
|
+
@patch("ara_cli.prompt_handler.send_prompt")
|
|
871
|
+
@patch("ara_cli.prompt_handler.collect_file_content_by_extension")
|
|
872
|
+
@patch("ara_cli.prompt_handler.append_images_to_message")
|
|
873
|
+
def test_create_and_send_custom_prompt(
|
|
874
|
+
self, mock_append_images, mock_collect, mock_send, mock_config_manager
|
|
875
|
+
):
|
|
876
|
+
prompt_handler.prompt_data_directory_creation(
|
|
877
|
+
self.mock_classifier, self.mock_param
|
|
878
|
+
)
|
|
698
879
|
|
|
699
880
|
mock_collect.return_value = ("### GIVENS\ncontent", [{"type": "image_url"}])
|
|
700
|
-
|
|
881
|
+
|
|
701
882
|
# append_images_to_message returns a single dict, not a list of dicts.
|
|
702
|
-
returned_message_dict = {
|
|
883
|
+
returned_message_dict = {
|
|
884
|
+
"role": "user",
|
|
885
|
+
"content": ["### GIVENS\ncontent", {"type": "image_url"}],
|
|
886
|
+
}
|
|
703
887
|
mock_append_images.return_value = returned_message_dict
|
|
704
888
|
|
|
705
|
-
mock_send.return_value = iter(
|
|
889
|
+
mock_send.return_value = iter(
|
|
890
|
+
[MagicMock(choices=[MagicMock(delta=MagicMock(content="llm response"))])]
|
|
891
|
+
)
|
|
892
|
+
|
|
893
|
+
prompt_handler.create_and_send_custom_prompt(
|
|
894
|
+
self.mock_classifier, self.mock_param
|
|
895
|
+
)
|
|
706
896
|
|
|
707
|
-
prompt_handler.create_and_send_custom_prompt(self.mock_classifier, self.mock_param)
|
|
708
|
-
|
|
709
897
|
mock_collect.assert_called_once()
|
|
710
898
|
|
|
711
899
|
# Assert that append_images_to_message was called with a single dict (the bug fix)
|
|
712
900
|
mock_append_images.assert_called_once_with(
|
|
713
|
-
{
|
|
714
|
-
[{'type': 'image_url'}]
|
|
901
|
+
{"role": "user", "content": "### GIVENS\ncontent"}, [{"type": "image_url"}]
|
|
715
902
|
)
|
|
716
903
|
|
|
717
904
|
# Assert that send_prompt was called with a list containing the dict returned from append_images_to_message
|
|
718
905
|
mock_send.assert_called_once_with([returned_message_dict])
|
|
719
906
|
|
|
720
|
-
log_file =
|
|
907
|
+
log_file = (
|
|
908
|
+
self.root
|
|
909
|
+
/ "ara"
|
|
910
|
+
/ self.mock_classifier
|
|
911
|
+
/ f"{self.mock_param}.data"
|
|
912
|
+
/ f"{self.mock_classifier}.prompt_log.md"
|
|
913
|
+
)
|
|
721
914
|
assert "llm response" in log_file.read_text()
|
|
722
915
|
|
|
723
|
-
@patch(
|
|
724
|
-
def test_generate_config_prompt_global_givens_file(
|
|
916
|
+
@patch("ara_cli.global_file_lister.generate_global_markdown_listing")
|
|
917
|
+
def test_generate_config_prompt_global_givens_file(
|
|
918
|
+
self, mock_global_lister, mock_config_manager, mock_config
|
|
919
|
+
):
|
|
725
920
|
"""Tests that the global givens file is generated correctly when global_dirs are present."""
|
|
726
921
|
prompt_data_path = self.root / "prompt/data"
|
|
727
922
|
prompt_data_path.mkdir(parents=True)
|
|
728
|
-
|
|
923
|
+
|
|
729
924
|
# Scenario 1: No global_dirs are configured, should return early and do nothing.
|
|
730
925
|
mock_config.global_dirs = []
|
|
731
|
-
prompt_handler.generate_config_prompt_global_givens_file(
|
|
926
|
+
prompt_handler.generate_config_prompt_global_givens_file(
|
|
927
|
+
str(prompt_data_path), "global.md"
|
|
928
|
+
)
|
|
732
929
|
mock_global_lister.assert_not_called()
|
|
733
|
-
|
|
930
|
+
|
|
734
931
|
# Scenario 2: With global_dirs, should call the global lister with correct arguments.
|
|
735
|
-
mock_config.global_dirs = [
|
|
932
|
+
mock_config.global_dirs = [
|
|
933
|
+
{"source_dir": "/global/src1"},
|
|
934
|
+
{"path": "/global/src2"},
|
|
935
|
+
]
|
|
736
936
|
mock_config.ara_prompt_given_list_includes = ["*.py", "*.md"]
|
|
737
|
-
|
|
937
|
+
|
|
738
938
|
# Use patch to suppress print output during the test
|
|
739
|
-
with patch(
|
|
740
|
-
prompt_handler.generate_config_prompt_global_givens_file(
|
|
741
|
-
|
|
939
|
+
with patch("builtins.print"):
|
|
940
|
+
prompt_handler.generate_config_prompt_global_givens_file(
|
|
941
|
+
str(prompt_data_path), "global.md"
|
|
942
|
+
)
|
|
943
|
+
|
|
742
944
|
mock_global_lister.assert_called_once()
|
|
743
945
|
args, _ = mock_global_lister.call_args
|
|
744
946
|
assert args[0] == ["/global/src1", "/global/src2"]
|
|
745
947
|
assert args[1] == ["*.py", "*.md"]
|
|
746
|
-
assert args[2] == os.path.join(prompt_data_path, "global.md")
|
|
948
|
+
assert args[2] == os.path.join(prompt_data_path, "global.md")
|