ara-cli 0.1.10.0__py3-none-any.whl → 0.1.13.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ara_cli/__init__.py +51 -6
- ara_cli/__main__.py +270 -103
- ara_cli/ara_command_action.py +106 -63
- ara_cli/ara_config.py +187 -128
- ara_cli/ara_subcommands/__init__.py +0 -0
- ara_cli/ara_subcommands/autofix.py +26 -0
- ara_cli/ara_subcommands/chat.py +27 -0
- ara_cli/ara_subcommands/classifier_directory.py +16 -0
- ara_cli/ara_subcommands/common.py +100 -0
- ara_cli/ara_subcommands/config.py +221 -0
- ara_cli/ara_subcommands/convert.py +43 -0
- ara_cli/ara_subcommands/create.py +75 -0
- ara_cli/ara_subcommands/delete.py +22 -0
- ara_cli/ara_subcommands/extract.py +22 -0
- ara_cli/ara_subcommands/fetch.py +41 -0
- ara_cli/ara_subcommands/fetch_agents.py +22 -0
- ara_cli/ara_subcommands/fetch_scripts.py +19 -0
- ara_cli/ara_subcommands/fetch_templates.py +19 -0
- ara_cli/ara_subcommands/list.py +139 -0
- ara_cli/ara_subcommands/list_tags.py +25 -0
- ara_cli/ara_subcommands/load.py +48 -0
- ara_cli/ara_subcommands/prompt.py +136 -0
- ara_cli/ara_subcommands/read.py +47 -0
- ara_cli/ara_subcommands/read_status.py +20 -0
- ara_cli/ara_subcommands/read_user.py +20 -0
- ara_cli/ara_subcommands/reconnect.py +27 -0
- ara_cli/ara_subcommands/rename.py +22 -0
- ara_cli/ara_subcommands/scan.py +14 -0
- ara_cli/ara_subcommands/set_status.py +22 -0
- ara_cli/ara_subcommands/set_user.py +22 -0
- ara_cli/ara_subcommands/template.py +16 -0
- ara_cli/artefact_autofix.py +154 -63
- ara_cli/artefact_converter.py +256 -0
- ara_cli/artefact_models/artefact_model.py +106 -25
- ara_cli/artefact_models/artefact_templates.py +20 -10
- ara_cli/artefact_models/epic_artefact_model.py +11 -2
- ara_cli/artefact_models/feature_artefact_model.py +31 -1
- ara_cli/artefact_models/userstory_artefact_model.py +15 -3
- ara_cli/artefact_scan.py +2 -2
- ara_cli/chat.py +283 -80
- ara_cli/chat_agent/__init__.py +0 -0
- ara_cli/chat_agent/agent_process_manager.py +155 -0
- ara_cli/chat_script_runner/__init__.py +0 -0
- ara_cli/chat_script_runner/script_completer.py +23 -0
- ara_cli/chat_script_runner/script_finder.py +41 -0
- ara_cli/chat_script_runner/script_lister.py +36 -0
- ara_cli/chat_script_runner/script_runner.py +36 -0
- ara_cli/chat_web_search/__init__.py +0 -0
- ara_cli/chat_web_search/web_search.py +263 -0
- ara_cli/commands/agent_run_command.py +98 -0
- ara_cli/commands/fetch_agents_command.py +106 -0
- ara_cli/commands/fetch_scripts_command.py +43 -0
- ara_cli/commands/fetch_templates_command.py +39 -0
- ara_cli/commands/fetch_templates_commands.py +39 -0
- ara_cli/commands/list_agents_command.py +39 -0
- ara_cli/commands/read_command.py +17 -4
- ara_cli/completers.py +180 -0
- ara_cli/constants.py +2 -0
- ara_cli/directory_navigator.py +37 -4
- ara_cli/file_loaders/text_file_loader.py +2 -2
- ara_cli/global_file_lister.py +5 -15
- ara_cli/llm_utils.py +58 -0
- ara_cli/prompt_chat.py +20 -4
- ara_cli/prompt_extractor.py +199 -76
- ara_cli/prompt_handler.py +160 -59
- ara_cli/tag_extractor.py +38 -18
- ara_cli/template_loader.py +3 -2
- ara_cli/template_manager.py +52 -21
- ara_cli/templates/global-scripts/hello_global.py +1 -0
- ara_cli/templates/prompt-modules/commands/add_scenarios_for_new_behaviour.feature_creation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/align_feature_with_implementation_changes.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/analyze_codebase_and_plan_tasks.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/choose_best_parent_artefact.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/create_tasks_from_artefact_content.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/create_tests_for_uncovered_modules.test_generation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/derive_features_from_video_description.feature_creation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/describe_agent_capabilities.agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/empty.commands.md +2 -12
- ara_cli/templates/prompt-modules/commands/execute_scoped_todos_in_task.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/explain_single_file_purpose.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/extract_file_information_bullets.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/extract_general.commands.md +12 -0
- ara_cli/templates/prompt-modules/commands/extract_markdown.commands.md +11 -0
- ara_cli/templates/prompt-modules/commands/extract_python.commands.md +13 -0
- ara_cli/templates/prompt-modules/commands/feature_add_or_modifiy_specified_behavior.commands.md +36 -0
- ara_cli/templates/prompt-modules/commands/feature_generate_initial_specified_bevahior.commands.md +53 -0
- ara_cli/templates/prompt-modules/commands/fix_failing_behave_step_definitions.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/fix_failing_pytest_tests.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/general_instruction_policy.commands.md +47 -0
- ara_cli/templates/prompt-modules/commands/generate_and_fix_pytest_tests.test_generation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/prompt_template_tech_stack_transformer.commands.md +95 -0
- ara_cli/templates/prompt-modules/commands/python_bug_fixing_code.commands.md +34 -0
- ara_cli/templates/prompt-modules/commands/python_generate_code.commands.md +27 -0
- ara_cli/templates/prompt-modules/commands/python_refactoring_code.commands.md +39 -0
- ara_cli/templates/prompt-modules/commands/python_step_definitions_generation_and_fixing.commands.md +40 -0
- ara_cli/templates/prompt-modules/commands/python_unittest_generation_and_fixing.commands.md +48 -0
- ara_cli/templates/prompt-modules/commands/suggest_next_story_child_tasks.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/summarize_or_transcribe_media.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/update_feature_to_match_implementation.feature_creation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/update_user_story_with_requirements.interview_agent.commands.md +1 -0
- ara_cli/version.py +1 -1
- {ara_cli-0.1.10.0.dist-info → ara_cli-0.1.13.3.dist-info}/METADATA +34 -1
- {ara_cli-0.1.10.0.dist-info → ara_cli-0.1.13.3.dist-info}/RECORD +123 -54
- tests/test_ara_command_action.py +31 -19
- tests/test_ara_config.py +177 -90
- tests/test_artefact_autofix.py +170 -97
- tests/test_artefact_autofix_integration.py +495 -0
- tests/test_artefact_converter.py +357 -0
- tests/test_artefact_extraction.py +564 -0
- tests/test_artefact_scan.py +1 -1
- tests/test_chat.py +162 -126
- tests/test_chat_givens_images.py +603 -0
- tests/test_chat_script_runner.py +454 -0
- tests/test_global_file_lister.py +1 -1
- tests/test_llm_utils.py +164 -0
- tests/test_prompt_chat.py +343 -0
- tests/test_prompt_extractor.py +683 -0
- tests/test_prompt_handler.py +12 -4
- tests/test_tag_extractor.py +19 -13
- tests/test_web_search.py +467 -0
- ara_cli/ara_command_parser.py +0 -605
- ara_cli/templates/prompt-modules/blueprints/complete_pytest_unittest.blueprint.md +0 -27
- ara_cli/templates/prompt-modules/blueprints/task_todo_list_implement_feature_BDD_way.blueprint.md +0 -30
- ara_cli/templates/prompt-modules/commands/artefact_classification.commands.md +0 -9
- ara_cli/templates/prompt-modules/commands/artefact_extension.commands.md +0 -17
- ara_cli/templates/prompt-modules/commands/artefact_formulation.commands.md +0 -14
- ara_cli/templates/prompt-modules/commands/behave_step_generation.commands.md +0 -102
- ara_cli/templates/prompt-modules/commands/code_generation_complex.commands.md +0 -20
- ara_cli/templates/prompt-modules/commands/code_generation_simple.commands.md +0 -13
- ara_cli/templates/prompt-modules/commands/error_fixing.commands.md +0 -20
- ara_cli/templates/prompt-modules/commands/feature_file_update.commands.md +0 -18
- ara_cli/templates/prompt-modules/commands/feature_formulation.commands.md +0 -43
- ara_cli/templates/prompt-modules/commands/js_code_generation_simple.commands.md +0 -13
- ara_cli/templates/prompt-modules/commands/refactoring.commands.md +0 -15
- ara_cli/templates/prompt-modules/commands/refactoring_analysis.commands.md +0 -9
- ara_cli/templates/prompt-modules/commands/reverse_engineer_feature_file.commands.md +0 -15
- ara_cli/templates/prompt-modules/commands/reverse_engineer_program_flow.commands.md +0 -19
- {ara_cli-0.1.10.0.dist-info → ara_cli-0.1.13.3.dist-info}/WHEEL +0 -0
- {ara_cli-0.1.10.0.dist-info → ara_cli-0.1.13.3.dist-info}/entry_points.txt +0 -0
- {ara_cli-0.1.10.0.dist-info → ara_cli-0.1.13.3.dist-info}/top_level.txt +0 -0
ara_cli/prompt_handler.py
CHANGED
|
@@ -60,7 +60,12 @@ class LLMSingleton:
|
|
|
60
60
|
# Check if there was an authentication error
|
|
61
61
|
stderr_output = captured_stderr.getvalue()
|
|
62
62
|
if "Authentication error" in stderr_output:
|
|
63
|
-
warnings.warn(
|
|
63
|
+
warnings.warn(
|
|
64
|
+
"Invalid Langfuse credentials - prompt tracing disabled and using default prompts. "
|
|
65
|
+
"Set environment variables 'ARA_CLI_LANGFUSE_PUBLIC_KEY', 'ARA_CLI_LANGFUSE_SECRET_KEY', "
|
|
66
|
+
"'LANGFUSE_HOST' and restart application to use Langfuse capabilities",
|
|
67
|
+
UserWarning,
|
|
68
|
+
)
|
|
64
69
|
|
|
65
70
|
LLMSingleton._default_model = default_model_id
|
|
66
71
|
LLMSingleton._extraction_model = extraction_model_id
|
|
@@ -160,6 +165,50 @@ def _is_valid_message(message: dict) -> bool:
|
|
|
160
165
|
return False
|
|
161
166
|
|
|
162
167
|
|
|
168
|
+
def _norm(p: str) -> str:
|
|
169
|
+
"""Normalize slashes and collapse .. segments."""
|
|
170
|
+
return os.path.normpath(p) if p else p
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def resolve_existing_path(rel_or_abs_path: str, anchor_dir: str) -> str:
|
|
174
|
+
"""
|
|
175
|
+
Resolve a potentially relative path to an existing absolute path.
|
|
176
|
+
|
|
177
|
+
Strategy:
|
|
178
|
+
- If already absolute and exists -> return it.
|
|
179
|
+
- Else, try from the anchor_dir.
|
|
180
|
+
- Else, walk up parent directories from anchor_dir and try joining at each level.
|
|
181
|
+
- If nothing is found, return the normalized original (will fail later with clear message).
|
|
182
|
+
"""
|
|
183
|
+
if not rel_or_abs_path:
|
|
184
|
+
return rel_or_abs_path
|
|
185
|
+
|
|
186
|
+
candidate = _norm(rel_or_abs_path)
|
|
187
|
+
|
|
188
|
+
if os.path.isabs(candidate) and os.path.exists(candidate):
|
|
189
|
+
return candidate
|
|
190
|
+
|
|
191
|
+
anchor_dir = os.path.abspath(anchor_dir or os.getcwd())
|
|
192
|
+
|
|
193
|
+
# Try from anchor dir directly
|
|
194
|
+
direct = _norm(os.path.join(anchor_dir, candidate))
|
|
195
|
+
if os.path.exists(direct):
|
|
196
|
+
return direct
|
|
197
|
+
|
|
198
|
+
# Walk parents
|
|
199
|
+
cur = anchor_dir
|
|
200
|
+
prev = None
|
|
201
|
+
while cur and cur != prev:
|
|
202
|
+
test = _norm(os.path.join(cur, candidate))
|
|
203
|
+
if os.path.exists(test):
|
|
204
|
+
return test
|
|
205
|
+
prev = cur
|
|
206
|
+
cur = os.path.dirname(cur)
|
|
207
|
+
|
|
208
|
+
# Give back normalized candidate; open() will raise, but at least path is clean
|
|
209
|
+
return candidate
|
|
210
|
+
|
|
211
|
+
|
|
163
212
|
def send_prompt(prompt, purpose="default"):
|
|
164
213
|
"""Prepares and sends a prompt to the LLM, streaming the response."""
|
|
165
214
|
chat_instance = LLMSingleton.get_instance()
|
|
@@ -213,8 +262,6 @@ def describe_image(image_path: str) -> str:
|
|
|
213
262
|
Returns:
|
|
214
263
|
Text description of the image
|
|
215
264
|
"""
|
|
216
|
-
import base64
|
|
217
|
-
|
|
218
265
|
with LLMSingleton.get_instance().langfuse.start_as_current_span(
|
|
219
266
|
name="ara-cli/describe-image"
|
|
220
267
|
) as span:
|
|
@@ -234,14 +281,19 @@ def describe_image(image_path: str) -> str:
|
|
|
234
281
|
# Fallback to default prompt if Langfuse prompt is not available
|
|
235
282
|
if not describe_image_prompt:
|
|
236
283
|
logging.info("Using default describe-image prompt.")
|
|
237
|
-
describe_image_prompt =
|
|
284
|
+
describe_image_prompt = (
|
|
285
|
+
"Please describe this image in detail. If it contains text, transcribe it exactly. "
|
|
286
|
+
"If it's a diagram or chart, explain its structure and content. If it's a photo or illustration, "
|
|
287
|
+
"describe what you see."
|
|
288
|
+
)
|
|
238
289
|
|
|
239
|
-
#
|
|
240
|
-
|
|
290
|
+
# Resolve and read the image
|
|
291
|
+
resolved_image_path = resolve_existing_path(image_path, os.getcwd())
|
|
292
|
+
with open(resolved_image_path, "rb") as image_file:
|
|
241
293
|
base64_image = base64.b64encode(image_file.read()).decode("utf-8")
|
|
242
294
|
|
|
243
295
|
# Determine image type
|
|
244
|
-
image_extension = os.path.splitext(
|
|
296
|
+
image_extension = os.path.splitext(resolved_image_path)[1].lower()
|
|
245
297
|
mime_type = {
|
|
246
298
|
".png": "image/png",
|
|
247
299
|
".jpg": "image/jpeg",
|
|
@@ -256,7 +308,7 @@ def describe_image(image_path: str) -> str:
|
|
|
256
308
|
"content": [
|
|
257
309
|
{
|
|
258
310
|
"type": "text",
|
|
259
|
-
"text":
|
|
311
|
+
"text": describe_image_prompt,
|
|
260
312
|
},
|
|
261
313
|
{
|
|
262
314
|
"type": "image_url",
|
|
@@ -288,7 +340,9 @@ def describe_image(image_path: str) -> str:
|
|
|
288
340
|
def append_headings(classifier, param, heading_name):
|
|
289
341
|
sub_directory = Classifier.get_sub_directory(classifier)
|
|
290
342
|
|
|
291
|
-
artefact_data_path =
|
|
343
|
+
artefact_data_path = _norm(
|
|
344
|
+
f"ara/{sub_directory}/{param}.data/{classifier}.prompt_log.md"
|
|
345
|
+
)
|
|
292
346
|
|
|
293
347
|
# Check if the file exists, and if not, create an empty file
|
|
294
348
|
if not os.path.exists(artefact_data_path):
|
|
@@ -309,15 +363,15 @@ def append_headings(classifier, param, heading_name):
|
|
|
309
363
|
|
|
310
364
|
def write_prompt_result(classifier, param, text):
|
|
311
365
|
sub_directory = Classifier.get_sub_directory(classifier)
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
366
|
+
artefact_data_path = _norm(
|
|
367
|
+
f"ara/{sub_directory}/{param}.data/{classifier}.prompt_log.md"
|
|
368
|
+
)
|
|
315
369
|
write_string_to_file(artefact_data_path, text, "a")
|
|
316
370
|
|
|
317
371
|
|
|
318
372
|
def prompt_data_directory_creation(classifier, parameter):
|
|
319
373
|
sub_directory = Classifier.get_sub_directory(classifier)
|
|
320
|
-
prompt_data_path = f"ara/{sub_directory}/{parameter}.data/prompt.data"
|
|
374
|
+
prompt_data_path = _norm(f"ara/{sub_directory}/{parameter}.data/prompt.data")
|
|
321
375
|
if not exists(prompt_data_path):
|
|
322
376
|
makedirs(prompt_data_path)
|
|
323
377
|
return prompt_data_path
|
|
@@ -347,9 +401,10 @@ def initialize_prompt_templates(classifier, parameter):
|
|
|
347
401
|
artefact_to_mark=f"{parameter}.{classifier}",
|
|
348
402
|
)
|
|
349
403
|
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
404
|
+
# Only once (was duplicated before)
|
|
405
|
+
generate_config_prompt_global_givens_file(
|
|
406
|
+
prompt_data_path, "config.prompt_global_givens.md"
|
|
407
|
+
)
|
|
353
408
|
|
|
354
409
|
|
|
355
410
|
def write_template_files_to_config(template_type, config_file, base_template_path):
|
|
@@ -361,7 +416,7 @@ def write_template_files_to_config(template_type, config_file, base_template_pat
|
|
|
361
416
|
|
|
362
417
|
def load_selected_prompt_templates(classifier, parameter):
|
|
363
418
|
sub_directory = Classifier.get_sub_directory(classifier)
|
|
364
|
-
prompt_data_path = f"ara/{sub_directory}/{parameter}.data/prompt.data"
|
|
419
|
+
prompt_data_path = _norm(f"ara/{sub_directory}/{parameter}.data/prompt.data")
|
|
365
420
|
config_file_path = os.path.join(prompt_data_path, "config.prompt_templates.md")
|
|
366
421
|
|
|
367
422
|
if not os.path.exists(config_file_path):
|
|
@@ -464,7 +519,9 @@ def move_and_copy_files(source_path, prompt_data_path, prompt_archive_path):
|
|
|
464
519
|
|
|
465
520
|
def extract_and_load_markdown_files(md_prompt_file_path):
|
|
466
521
|
"""
|
|
467
|
-
Extracts markdown files paths based on checked items and constructs proper paths
|
|
522
|
+
Extracts markdown files paths based on checked items and constructs proper paths
|
|
523
|
+
respecting markdown header hierarchy. **Returns normalized relative paths**
|
|
524
|
+
(not resolved), and resolution happens later relative to the config file dir.
|
|
468
525
|
"""
|
|
469
526
|
header_stack = []
|
|
470
527
|
path_accumulator = []
|
|
@@ -480,53 +537,73 @@ def extract_and_load_markdown_files(md_prompt_file_path):
|
|
|
480
537
|
header_stack.append(header)
|
|
481
538
|
elif "[x]" in line:
|
|
482
539
|
relative_path = line.split("]")[-1].strip()
|
|
483
|
-
|
|
484
|
-
|
|
540
|
+
# Use os.path.join for OS-safe joining, then normalize
|
|
541
|
+
full_rel_path = os.path.join(*header_stack, relative_path) if header_stack else relative_path
|
|
542
|
+
path_accumulator.append(_norm(full_rel_path))
|
|
485
543
|
return path_accumulator
|
|
486
544
|
|
|
487
545
|
|
|
488
546
|
def load_givens(file_path):
|
|
547
|
+
"""
|
|
548
|
+
Reads marked givens from a config markdown and returns:
|
|
549
|
+
- combined markdown content (including code fences / images)
|
|
550
|
+
- a list of image data dicts for the multimodal message
|
|
551
|
+
Paths inside the markdown are resolved robustly relative to the config file directory (and its parents).
|
|
552
|
+
"""
|
|
489
553
|
content = ""
|
|
490
554
|
image_data_list = []
|
|
491
555
|
markdown_items = extract_and_load_markdown_files(file_path)
|
|
492
556
|
|
|
493
|
-
# Only proceed and add the header if there are marked items to load.
|
|
494
557
|
if not markdown_items:
|
|
495
558
|
return "", []
|
|
496
559
|
|
|
497
560
|
content = "### GIVENS\n\n"
|
|
498
561
|
|
|
562
|
+
anchor_dir = os.path.dirname(os.path.abspath(file_path))
|
|
563
|
+
|
|
499
564
|
for item in markdown_items:
|
|
500
|
-
|
|
501
|
-
|
|
565
|
+
resolved = resolve_existing_path(item, anchor_dir)
|
|
566
|
+
# Keep the listing line readable, show the original relative item
|
|
567
|
+
content += item + "\n"
|
|
568
|
+
|
|
569
|
+
ext = os.path.splitext(resolved)[1].lower()
|
|
570
|
+
|
|
571
|
+
# Image branch
|
|
572
|
+
if ext in (".png", ".jpeg", ".jpg", ".gif", ".bmp"):
|
|
573
|
+
with open(resolved, "rb") as image_file:
|
|
502
574
|
base64_image = base64.b64encode(image_file.read()).decode("utf-8")
|
|
575
|
+
|
|
576
|
+
mime_type = {
|
|
577
|
+
".png": "image/png",
|
|
578
|
+
".jpg": "image/jpeg",
|
|
579
|
+
".jpeg": "image/jpeg",
|
|
580
|
+
".gif": "image/gif",
|
|
581
|
+
".bmp": "image/bmp",
|
|
582
|
+
}.get(ext, "image/png")
|
|
583
|
+
|
|
503
584
|
image_data_list.append(
|
|
504
585
|
{
|
|
505
586
|
"type": "image_url",
|
|
506
|
-
"image_url": {"url": f"data:
|
|
587
|
+
"image_url": {"url": f"data:{mime_type};base64,{base64_image}"},
|
|
507
588
|
}
|
|
508
589
|
)
|
|
509
|
-
|
|
510
|
-
content += f"
|
|
516
|
-
# TODO re.match can not split the item with [] correctly and extract the line numbers
|
|
517
|
-
# TODO logic of subsections is not supported by the update algorithm of the config prompt givens updater
|
|
518
|
-
# TODO extract in lines of *.md files potential images and add them to the image list
|
|
590
|
+
# Also embed inline for the prompt markdown (use png as a neutral default for data URI)
|
|
591
|
+
content += f"\n"
|
|
519
592
|
|
|
593
|
+
else:
|
|
594
|
+
# Check if the item specifies line ranges: e.g. "[10:20,25:30] filePath"
|
|
520
595
|
match = re.match(r".*?\[(\d+:\d+(?:,\s*\d+:\d+)*)\]\s+(.+)", item)
|
|
521
596
|
if match:
|
|
522
597
|
line_ranges, file_name = match.groups()
|
|
523
|
-
|
|
524
|
-
content +=
|
|
598
|
+
resolved_sub = resolve_existing_path(file_name, anchor_dir)
|
|
599
|
+
content += "```\n"
|
|
600
|
+
content += get_partial_file_content(resolved_sub, line_ranges) + "\n"
|
|
525
601
|
content += "```\n\n"
|
|
526
602
|
else:
|
|
527
|
-
content +=
|
|
528
|
-
content += get_file_content(
|
|
603
|
+
content += "```\n"
|
|
604
|
+
content += get_file_content(resolved) + "\n"
|
|
529
605
|
content += "```\n\n"
|
|
606
|
+
|
|
530
607
|
return content, image_data_list
|
|
531
608
|
|
|
532
609
|
|
|
@@ -535,7 +612,7 @@ def get_partial_file_content(file_name, line_ranges):
|
|
|
535
612
|
Reads specific lines from a file based on the line ranges provided.
|
|
536
613
|
|
|
537
614
|
Args:
|
|
538
|
-
file_name (str): The path to the file.
|
|
615
|
+
file_name (str): The path to the file (absolute or relative, already resolved by caller).
|
|
539
616
|
line_ranges (str): A string representing the line ranges to read, e.g., '10:20,25:30'.
|
|
540
617
|
|
|
541
618
|
Returns:
|
|
@@ -585,9 +662,7 @@ def prepend_system_prompt(message_list):
|
|
|
585
662
|
# Fallback to default prompt if Langfuse prompt is not available
|
|
586
663
|
if not system_prompt:
|
|
587
664
|
logging.info("Using default system prompt.")
|
|
588
|
-
system_prompt =
|
|
589
|
-
"You are a helpful assistant that can process both text and images."
|
|
590
|
-
)
|
|
665
|
+
system_prompt = "You are a helpful assistant that can process both text and images."
|
|
591
666
|
|
|
592
667
|
# Prepend the system prompt
|
|
593
668
|
system_prompt_message = {"role": "system", "content": system_prompt}
|
|
@@ -597,6 +672,9 @@ def prepend_system_prompt(message_list):
|
|
|
597
672
|
|
|
598
673
|
|
|
599
674
|
def append_images_to_message(message, image_data_list):
|
|
675
|
+
"""
|
|
676
|
+
Appends image data list to a single message dict (NOT to a list).
|
|
677
|
+
"""
|
|
600
678
|
logger = logging.getLogger(__name__)
|
|
601
679
|
|
|
602
680
|
logger.debug(
|
|
@@ -607,13 +685,17 @@ def append_images_to_message(message, image_data_list):
|
|
|
607
685
|
logger.debug("No images to append, returning original message")
|
|
608
686
|
return message
|
|
609
687
|
|
|
610
|
-
message_content = message
|
|
688
|
+
message_content = message.get("content")
|
|
611
689
|
logger.debug(f"Original message content: {message_content}")
|
|
612
690
|
|
|
613
691
|
if isinstance(message_content, str):
|
|
614
692
|
message["content"] = [{"type": "text", "text": message_content}]
|
|
615
693
|
|
|
616
|
-
message["content"]
|
|
694
|
+
if isinstance(message["content"], list):
|
|
695
|
+
message["content"].extend(image_data_list)
|
|
696
|
+
else:
|
|
697
|
+
# If somehow content is not list or str, coerce to list
|
|
698
|
+
message["content"] = [{"type": "text", "text": str(message_content)}] + image_data_list
|
|
617
699
|
|
|
618
700
|
logger.debug(f"Updated message content with {len(image_data_list)} images")
|
|
619
701
|
|
|
@@ -622,11 +704,20 @@ def append_images_to_message(message, image_data_list):
|
|
|
622
704
|
|
|
623
705
|
def create_and_send_custom_prompt(classifier, parameter):
|
|
624
706
|
sub_directory = Classifier.get_sub_directory(classifier)
|
|
625
|
-
prompt_data_path = f"ara/{sub_directory}/{parameter}.data/prompt.data"
|
|
707
|
+
prompt_data_path = _norm(f"ara/{sub_directory}/{parameter}.data/prompt.data")
|
|
626
708
|
prompt_file_path_markdown = join(prompt_data_path, f"{classifier}.prompt.md")
|
|
627
709
|
|
|
628
|
-
extensions = [
|
|
629
|
-
|
|
710
|
+
extensions = [
|
|
711
|
+
".blueprint.md",
|
|
712
|
+
".rules.md",
|
|
713
|
+
".prompt_givens.md",
|
|
714
|
+
".prompt_global_givens.md",
|
|
715
|
+
".intention.md",
|
|
716
|
+
".commands.md",
|
|
717
|
+
]
|
|
718
|
+
combined_content_markdown, image_data_list = collect_file_content_by_extension(
|
|
719
|
+
prompt_data_path, extensions
|
|
720
|
+
)
|
|
630
721
|
|
|
631
722
|
with open(prompt_file_path_markdown, "w", encoding="utf-8") as file:
|
|
632
723
|
file.write(combined_content_markdown)
|
|
@@ -635,14 +726,14 @@ def create_and_send_custom_prompt(classifier, parameter):
|
|
|
635
726
|
append_headings(classifier, parameter, "prompt")
|
|
636
727
|
write_prompt_result(classifier, parameter, prompt)
|
|
637
728
|
|
|
729
|
+
# Build message and append images correctly (fixed)
|
|
638
730
|
message = {"role": "user", "content": combined_content_markdown}
|
|
639
|
-
|
|
731
|
+
message = append_images_to_message(message, image_data_list)
|
|
640
732
|
message_list = [message]
|
|
641
733
|
|
|
642
|
-
message_list = append_images_to_message(message_list, image_data_list)
|
|
643
734
|
append_headings(classifier, parameter, "result")
|
|
644
735
|
|
|
645
|
-
artefact_data_path = (
|
|
736
|
+
artefact_data_path = _norm(
|
|
646
737
|
f"ara/{sub_directory}/{parameter}.data/{classifier}.prompt_log.md"
|
|
647
738
|
)
|
|
648
739
|
with open(artefact_data_path, "a", encoding="utf-8") as file:
|
|
@@ -652,7 +743,6 @@ def create_and_send_custom_prompt(classifier, parameter):
|
|
|
652
743
|
continue
|
|
653
744
|
file.write(chunk_content)
|
|
654
745
|
file.flush()
|
|
655
|
-
# write_prompt_result(classifier, parameter, response)
|
|
656
746
|
|
|
657
747
|
|
|
658
748
|
def generate_config_prompt_template_file(
|
|
@@ -661,7 +751,8 @@ def generate_config_prompt_template_file(
|
|
|
661
751
|
config_prompt_templates_path = os.path.join(
|
|
662
752
|
prompt_data_path, config_prompt_templates_name
|
|
663
753
|
)
|
|
664
|
-
|
|
754
|
+
# Use instance method consistently
|
|
755
|
+
config = ConfigManager().get_config()
|
|
665
756
|
global_prompt_template_path = TemplatePathManager.get_template_base_path()
|
|
666
757
|
dir_list = ["ara/.araconfig/custom-prompt-modules"] + [
|
|
667
758
|
f"{os.path.join(global_prompt_template_path,'prompt-modules')}"
|
|
@@ -678,7 +769,7 @@ def generate_config_prompt_givens_file(
|
|
|
678
769
|
config_prompt_givens_path = os.path.join(
|
|
679
770
|
prompt_data_path, config_prompt_givens_name
|
|
680
771
|
)
|
|
681
|
-
config = ConfigManager.get_config()
|
|
772
|
+
config = ConfigManager().get_config()
|
|
682
773
|
dir_list = (
|
|
683
774
|
["ara"]
|
|
684
775
|
+ [path for d in config.ext_code_dirs for path in d.values()]
|
|
@@ -712,14 +803,24 @@ def generate_config_prompt_givens_file(
|
|
|
712
803
|
with open(config_prompt_givens_path, "w", encoding="utf-8") as file:
|
|
713
804
|
file.write("".join(updated_listing))
|
|
714
805
|
|
|
715
|
-
|
|
806
|
+
|
|
807
|
+
def generate_config_prompt_global_givens_file(
|
|
808
|
+
prompt_data_path, config_prompt_givens_name, artefact_to_mark=None
|
|
809
|
+
):
|
|
716
810
|
from ara_cli.global_file_lister import generate_global_markdown_listing
|
|
717
|
-
config_prompt_givens_path = os.path.join(prompt_data_path, config_prompt_givens_name)
|
|
718
|
-
config = ConfigManager.get_config()
|
|
719
811
|
|
|
720
|
-
|
|
812
|
+
config_prompt_givens_path = os.path.join(
|
|
813
|
+
prompt_data_path, config_prompt_givens_name
|
|
814
|
+
)
|
|
815
|
+
config = ConfigManager().get_config()
|
|
816
|
+
|
|
817
|
+
if not hasattr(config, "global_dirs") or not config.global_dirs:
|
|
721
818
|
return
|
|
722
819
|
|
|
723
820
|
dir_list = [path for d in config.global_dirs for path in d.values()]
|
|
724
|
-
print(
|
|
725
|
-
|
|
821
|
+
print(
|
|
822
|
+
f"used {dir_list} for global prompt givens file listing with absolute paths"
|
|
823
|
+
)
|
|
824
|
+
generate_global_markdown_listing(
|
|
825
|
+
dir_list, config.ara_prompt_given_list_includes, config_prompt_givens_path
|
|
826
|
+
)
|
ara_cli/tag_extractor.py
CHANGED
|
@@ -6,18 +6,17 @@ from ara_cli.artefact_models.artefact_data_retrieval import (
|
|
|
6
6
|
artefact_tags_retrieval,
|
|
7
7
|
)
|
|
8
8
|
|
|
9
|
-
|
|
10
9
|
class TagExtractor:
|
|
11
10
|
def __init__(self, file_system=None):
|
|
12
11
|
self.file_system = file_system or os
|
|
13
12
|
|
|
14
|
-
def filter_column(self,
|
|
13
|
+
def filter_column(self, tag_groups, filtered_artefacts):
|
|
15
14
|
status_tags = {"to-do", "in-progress", "review", "done", "closed"}
|
|
16
15
|
|
|
17
16
|
artefacts_to_process = self._get_artefacts_without_status_tags(
|
|
18
17
|
filtered_artefacts, status_tags
|
|
19
18
|
)
|
|
20
|
-
self._add_non_status_tags_to_set(
|
|
19
|
+
self._add_non_status_tags_to_set(tag_groups, artefacts_to_process, status_tags)
|
|
21
20
|
|
|
22
21
|
def _get_artefacts_without_status_tags(self, filtered_artefacts, status_tags):
|
|
23
22
|
artefacts_to_process = []
|
|
@@ -32,7 +31,7 @@ class TagExtractor:
|
|
|
32
31
|
tags = artefact.tags + [artefact.status] if artefact.status else artefact.tags
|
|
33
32
|
return set(tag for tag in tags if tag is not None)
|
|
34
33
|
|
|
35
|
-
def _add_non_status_tags_to_set(self,
|
|
34
|
+
def _add_non_status_tags_to_set(self, tag_groups, artefacts, status_tags):
|
|
36
35
|
for artefact in artefacts:
|
|
37
36
|
tags = [
|
|
38
37
|
tag for tag in (artefact.tags + [artefact.status]) if tag is not None
|
|
@@ -40,23 +39,45 @@ class TagExtractor:
|
|
|
40
39
|
for tag in tags:
|
|
41
40
|
if self._is_skipped_tag(tag, status_tags):
|
|
42
41
|
continue
|
|
43
|
-
|
|
42
|
+
key = tag.lower()
|
|
43
|
+
if key not in tag_groups:
|
|
44
|
+
tag_groups[key] = set()
|
|
45
|
+
tag_groups[key].add(tag)
|
|
44
46
|
|
|
45
47
|
def _is_skipped_tag(self, tag, status_tags):
|
|
46
48
|
return (
|
|
47
49
|
tag in status_tags or tag.startswith("priority_") or tag.startswith("user_")
|
|
48
50
|
)
|
|
49
51
|
|
|
50
|
-
def
|
|
52
|
+
def _collect_all_tags(self, artefact):
|
|
53
|
+
"""Collect all tags from an artefact including user tags and author."""
|
|
54
|
+
all_tags = []
|
|
55
|
+
all_tags.extend(artefact.tags)
|
|
56
|
+
|
|
57
|
+
if artefact.status:
|
|
58
|
+
all_tags.append(artefact.status)
|
|
59
|
+
|
|
60
|
+
user_tags = [f"user_{tag}" for tag in artefact.users]
|
|
61
|
+
all_tags.extend(user_tags)
|
|
62
|
+
|
|
63
|
+
if hasattr(artefact, 'author') and artefact.author:
|
|
64
|
+
all_tags.append(artefact.author)
|
|
65
|
+
|
|
66
|
+
return [tag for tag in all_tags if tag is not None]
|
|
67
|
+
|
|
68
|
+
def _add_tags_to_groups(self, tag_groups, tags):
|
|
69
|
+
"""Add tags to tag groups."""
|
|
70
|
+
for tag in tags:
|
|
71
|
+
key = tag.lower()
|
|
72
|
+
if key not in tag_groups:
|
|
73
|
+
tag_groups[key] = set()
|
|
74
|
+
tag_groups[key].add(tag)
|
|
75
|
+
|
|
76
|
+
def add_to_tags_set(self, tag_groups, filtered_artefacts):
|
|
51
77
|
for artefact_list in filtered_artefacts.values():
|
|
52
78
|
for artefact in artefact_list:
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
tag
|
|
56
|
-
for tag in (artefact.tags + [artefact.status] + user_tags)
|
|
57
|
-
if tag is not None
|
|
58
|
-
]
|
|
59
|
-
tags_set.update(tags)
|
|
79
|
+
all_tags = self._collect_all_tags(artefact)
|
|
80
|
+
self._add_tags_to_groups(tag_groups, all_tags)
|
|
60
81
|
|
|
61
82
|
def extract_tags(
|
|
62
83
|
self,
|
|
@@ -81,12 +102,11 @@ class TagExtractor:
|
|
|
81
102
|
tag_retrieval=artefact_tags_retrieval,
|
|
82
103
|
)
|
|
83
104
|
|
|
84
|
-
|
|
105
|
+
tag_groups = {}
|
|
85
106
|
|
|
86
107
|
if filtered_extra_column:
|
|
87
|
-
self.filter_column(
|
|
108
|
+
self.filter_column(tag_groups, filtered_artefacts)
|
|
88
109
|
else:
|
|
89
|
-
self.add_to_tags_set(
|
|
110
|
+
self.add_to_tags_set(tag_groups, filtered_artefacts)
|
|
90
111
|
|
|
91
|
-
|
|
92
|
-
return sorted_tags
|
|
112
|
+
return tag_groups
|
ara_cli/template_loader.py
CHANGED
|
@@ -4,6 +4,7 @@ import glob
|
|
|
4
4
|
from ara_cli.template_manager import TemplatePathManager
|
|
5
5
|
from ara_cli.ara_config import ConfigManager
|
|
6
6
|
from ara_cli.directory_navigator import DirectoryNavigator
|
|
7
|
+
from . import ROLE_PROMPT
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
class TemplateLoader:
|
|
@@ -147,7 +148,7 @@ class TemplateLoader:
|
|
|
147
148
|
# Direct file loading for CLI usage
|
|
148
149
|
try:
|
|
149
150
|
with open(file_path, 'r', encoding='utf-8') as template_file:
|
|
150
|
-
template_content = template_file.read()
|
|
151
|
+
template_content = template_file.read().replace('\r\n', '\n')
|
|
151
152
|
|
|
152
153
|
# Add prompt tag if needed
|
|
153
154
|
self._add_prompt_tag_if_needed(chat_file_path)
|
|
@@ -171,7 +172,7 @@ class TemplateLoader:
|
|
|
171
172
|
with open(chat_file_path, 'r', encoding='utf-8') as file:
|
|
172
173
|
lines = file.readlines()
|
|
173
174
|
|
|
174
|
-
prompt_tag = f"# {
|
|
175
|
+
prompt_tag = f"# {ROLE_PROMPT}:"
|
|
175
176
|
if Chat.get_last_role_marker(lines) == prompt_tag:
|
|
176
177
|
return
|
|
177
178
|
|