ara-cli 0.1.10.0__py3-none-any.whl → 0.1.10.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ara-cli might be problematic. Click here for more details.

Files changed (49) hide show
  1. ara_cli/__main__.py +252 -97
  2. ara_cli/ara_command_action.py +11 -6
  3. ara_cli/ara_subcommands/__init__.py +0 -0
  4. ara_cli/ara_subcommands/autofix.py +26 -0
  5. ara_cli/ara_subcommands/chat.py +27 -0
  6. ara_cli/ara_subcommands/classifier_directory.py +16 -0
  7. ara_cli/ara_subcommands/common.py +100 -0
  8. ara_cli/ara_subcommands/create.py +75 -0
  9. ara_cli/ara_subcommands/delete.py +22 -0
  10. ara_cli/ara_subcommands/extract.py +22 -0
  11. ara_cli/ara_subcommands/fetch_templates.py +14 -0
  12. ara_cli/ara_subcommands/list.py +65 -0
  13. ara_cli/ara_subcommands/list_tags.py +25 -0
  14. ara_cli/ara_subcommands/load.py +48 -0
  15. ara_cli/ara_subcommands/prompt.py +136 -0
  16. ara_cli/ara_subcommands/read.py +47 -0
  17. ara_cli/ara_subcommands/read_status.py +20 -0
  18. ara_cli/ara_subcommands/read_user.py +20 -0
  19. ara_cli/ara_subcommands/reconnect.py +27 -0
  20. ara_cli/ara_subcommands/rename.py +22 -0
  21. ara_cli/ara_subcommands/scan.py +14 -0
  22. ara_cli/ara_subcommands/set_status.py +22 -0
  23. ara_cli/ara_subcommands/set_user.py +22 -0
  24. ara_cli/ara_subcommands/template.py +16 -0
  25. ara_cli/artefact_autofix.py +44 -6
  26. ara_cli/artefact_models/artefact_model.py +106 -25
  27. ara_cli/artefact_models/artefact_templates.py +18 -9
  28. ara_cli/artefact_models/epic_artefact_model.py +11 -2
  29. ara_cli/artefact_models/feature_artefact_model.py +31 -1
  30. ara_cli/artefact_models/userstory_artefact_model.py +15 -3
  31. ara_cli/artefact_scan.py +2 -2
  32. ara_cli/chat.py +1 -19
  33. ara_cli/commands/read_command.py +17 -4
  34. ara_cli/completers.py +144 -0
  35. ara_cli/file_loaders/text_file_loader.py +2 -2
  36. ara_cli/prompt_extractor.py +97 -79
  37. ara_cli/prompt_handler.py +160 -59
  38. ara_cli/tag_extractor.py +38 -18
  39. ara_cli/template_loader.py +1 -1
  40. ara_cli/version.py +1 -1
  41. {ara_cli-0.1.10.0.dist-info → ara_cli-0.1.10.4.dist-info}/METADATA +2 -1
  42. {ara_cli-0.1.10.0.dist-info → ara_cli-0.1.10.4.dist-info}/RECORD +48 -26
  43. tests/test_artefact_scan.py +1 -1
  44. tests/test_prompt_handler.py +12 -4
  45. tests/test_tag_extractor.py +19 -13
  46. ara_cli/ara_command_parser.py +0 -605
  47. {ara_cli-0.1.10.0.dist-info → ara_cli-0.1.10.4.dist-info}/WHEEL +0 -0
  48. {ara_cli-0.1.10.0.dist-info → ara_cli-0.1.10.4.dist-info}/entry_points.txt +0 -0
  49. {ara_cli-0.1.10.0.dist-info → ara_cli-0.1.10.4.dist-info}/top_level.txt +0 -0
@@ -8,20 +8,89 @@ from ara_cli.classifier import Classifier
8
8
  from ara_cli.directory_navigator import DirectoryNavigator
9
9
  from ara_cli.artefact_models.artefact_mapping import title_prefix_to_artefact_class
10
10
 
11
+ def _find_extract_token(tokens):
12
+ """Find the first token that needs to be processed."""
13
+ for token in tokens:
14
+ if token.type == 'fence' and token.content.strip().startswith("# [x] extract"):
15
+ return token
16
+ return None
17
+
18
+ def _extract_file_path(content_lines):
19
+ """Extract file path from content lines."""
20
+ if not content_lines:
21
+ return None
22
+ file_path_search = re.search(r"# filename: (.+)", content_lines[0])
23
+ return file_path_search.group(1).strip() if file_path_search else None
24
+
25
+ def _find_artefact_class(content_lines):
26
+ """Find the appropriate artefact class from content lines."""
27
+ for line in content_lines[:2]:
28
+ words = line.strip().split(' ')
29
+ if not words:
30
+ continue
31
+ first_word = words[0]
32
+ if first_word in title_prefix_to_artefact_class:
33
+ return title_prefix_to_artefact_class[first_word]
34
+ return None
35
+
36
+ def _process_file_extraction(file_path, code_content, force, write):
37
+ """Process file extraction logic."""
38
+ print(f"Filename extracted: {file_path}")
39
+ handle_existing_file(file_path, code_content, force, write)
40
+
41
+ def _process_artefact_extraction(artefact_class, content_lines, force, write):
42
+ """Process artefact extraction logic."""
43
+ artefact = artefact_class.deserialize('\n'.join(content_lines))
44
+ serialized_artefact = artefact.serialize()
45
+
46
+ original_directory = os.getcwd()
47
+ directory_navigator = DirectoryNavigator()
48
+ directory_navigator.navigate_to_target()
49
+
50
+ artefact_path = artefact.file_path
51
+ directory = os.path.dirname(artefact_path)
52
+ os.makedirs(directory, exist_ok=True)
53
+ handle_existing_file(artefact_path, serialized_artefact, force, write)
54
+
55
+ os.chdir(original_directory)
56
+
57
+ def _process_extraction_block(token_to_process, updated_content, force, write):
58
+ """Process a single extraction block."""
59
+ # Get the original block text for later replacement
60
+ source_lines = updated_content.split('\n')
61
+ start_line, end_line = token_to_process.map
62
+ original_block_text = '\n'.join(source_lines[start_line:end_line])
63
+
64
+ block_content = token_to_process.content
65
+ block_lines = block_content.split('\n')
66
+ content_lines_after_extract = block_lines[1:]
67
+
68
+ file_path = _extract_file_path(content_lines_after_extract)
69
+
70
+ if file_path:
71
+ code_content = '\n'.join(content_lines_after_extract[1:])
72
+ _process_file_extraction(file_path, code_content, force, write)
73
+ else:
74
+ artefact_class = _find_artefact_class(content_lines_after_extract)
75
+ if artefact_class:
76
+ _process_artefact_extraction(artefact_class, content_lines_after_extract, force, write)
77
+ else:
78
+ print("No filename or valid artefact found, skipping processing for this block.")
11
79
 
12
- def extract_code_blocks_md(markdown_text):
13
- md = MarkdownIt()
14
- tokens = md.parse(markdown_text)
15
- code_blocks = [token.content for token in tokens if token.type == 'fence']
16
- return code_blocks
17
-
80
+ # Update the main content by replacing the processed block text with a modified version
81
+ modified_block_text = original_block_text.replace("# [x] extract", "# [v] extract", 1)
82
+ return updated_content.replace(original_block_text, modified_block_text, 1)
18
83
 
19
84
  def extract_responses(document_path, relative_to_ara_root=False, force=False, write=False):
20
85
  print(f"Starting extraction from '{document_path}'")
21
86
  block_extraction_counter = 0
22
87
 
23
- with open(document_path, 'r', encoding='utf-8', errors='replace') as file:
24
- content = file.read()
88
+ try:
89
+ with open(document_path, 'r', encoding='utf-8', errors='replace') as file:
90
+ content = file.read()
91
+ except FileNotFoundError:
92
+ print(f"Error: File not found at '{document_path}'. Skipping extraction.")
93
+ return
25
94
 
26
95
  cwd = os.getcwd()
27
96
  if relative_to_ara_root:
@@ -29,71 +98,27 @@ def extract_responses(document_path, relative_to_ara_root=False, force=False, wr
29
98
  navigator.navigate_to_target()
30
99
  os.chdir('..')
31
100
 
32
- code_blocks_found = extract_code_blocks_md(content)
33
101
  updated_content = content
34
102
 
35
- for block in code_blocks_found:
36
- block_lines = block.split('\n')
103
+ while True:
104
+ md = MarkdownIt()
105
+ tokens = md.parse(updated_content)
37
106
 
38
- if "# [x] extract" not in block_lines[0]:
39
- continue
40
- print("Block found and processed.")
41
-
42
- block_lines = block_lines[1:]
43
-
44
- file_path_search = re.search(r"# filename: (.+)", block_lines[0])
45
-
46
- if file_path_search:
47
- file_path = file_path_search.group(1).strip()
48
- print(f"Filename extracted: {file_path}")
49
-
50
- block_lines = block_lines[1:] # Remove first line again after removing filename line
51
- block = '\n'.join(block_lines)
52
-
53
- handle_existing_file(file_path, block, force, write)
54
- block_extraction_counter += 1
55
-
56
- # Update the markdown content
57
- updated_content = update_markdown(content, block, file_path)
58
- else:
59
- # Extract artefact
60
- artefact_class = None
61
- for line in block_lines[:2]:
62
- words = line.strip().split(' ')
63
- if not words:
64
- continue
65
- first_word = words[0]
66
- if first_word not in title_prefix_to_artefact_class:
67
- continue
68
- artefact_class = title_prefix_to_artefact_class[first_word]
69
- if not artefact_class:
70
- print("No filename found, skipping this block.")
71
- continue
72
- artefact = artefact_class.deserialize('\n'.join(block_lines))
73
- serialized_artefact = artefact.serialize()
74
-
75
- original_directory = os.getcwd()
76
- directory_navigator = DirectoryNavigator()
77
- directory_navigator.navigate_to_target()
78
-
79
- artefact_path = artefact.file_path
80
- directory = os.path.dirname(artefact_path)
81
- os.makedirs(directory, exist_ok=True)
82
- handle_existing_file(artefact_path, serialized_artefact, force, write)
107
+ token_to_process = _find_extract_token(tokens)
108
+ if not token_to_process:
109
+ break # No more blocks to process
83
110
 
84
- os.chdir(original_directory)
111
+ block_extraction_counter += 1
112
+ print("Block found and processed.")
85
113
 
86
- # TODO: make update_markdown work block by block instead of updating the whole document at once
87
- block_extraction_counter += 1
88
- updated_content = update_markdown(content, block, None)
114
+ updated_content = _process_extraction_block(token_to_process, updated_content, force, write)
89
115
 
90
116
  os.chdir(cwd)
91
- # Save the updated markdown content
92
117
  with open(document_path, 'w', encoding='utf-8') as file:
93
118
  file.write(updated_content)
94
119
 
95
- print(f"End of extraction. Found {block_extraction_counter} blocks.")
96
-
120
+ if block_extraction_counter > 0:
121
+ print(f"End of extraction. Found and processed {block_extraction_counter} blocks in '{os.path.basename(document_path)}'.")
97
122
 
98
123
  def modify_and_save_file(response, file_path):
99
124
  print(f"Debug: Modifying and saving file {file_path}")
@@ -119,11 +144,9 @@ def modify_and_save_file(response, file_path):
119
144
  except json.JSONDecodeError as ex:
120
145
  print(f"ERROR: Failed to decode JSON response: {ex}")
121
146
 
122
-
123
147
  def prompt_user_decision(prompt):
124
148
  return input(prompt)
125
149
 
126
-
127
150
  def determine_should_create(skip_query=False):
128
151
  if skip_query:
129
152
  return True
@@ -132,13 +155,14 @@ def determine_should_create(skip_query=False):
132
155
  return True
133
156
  return False
134
157
 
135
-
136
158
  def create_file_if_not_exist(filename, content, skip_query=False):
137
159
  try:
138
160
  if not os.path.exists(filename):
139
161
  if determine_should_create(skip_query):
140
162
  # Ensure the directory exists
141
- os.makedirs(os.path.dirname(filename), exist_ok=True)
163
+ dir_name = os.path.dirname(filename)
164
+ if dir_name:
165
+ os.makedirs(dir_name, exist_ok=True)
142
166
 
143
167
  with open(filename, 'w', encoding='utf-8') as file:
144
168
  file.write(content)
@@ -150,7 +174,6 @@ def create_file_if_not_exist(filename, content, skip_query=False):
150
174
  print(f"Error: {e}")
151
175
  print(f"Failed to create file {filename} due to an OS error")
152
176
 
153
-
154
177
  def create_prompt_for_file_modification(content_str, filename):
155
178
  if not os.path.exists(filename):
156
179
  print(f"WARNING: {filename} for merge prompt creation does not exist.")
@@ -181,11 +204,15 @@ def create_prompt_for_file_modification(content_str, filename):
181
204
 
182
205
  return prompt_text
183
206
 
184
-
185
207
  def handle_existing_file(filename, block_content, skip_query=False, write=False):
186
208
  if not os.path.isfile(filename):
187
209
  print(f"File {filename} does not exist, attempting to create")
210
+ # Ensure directory exists before writing
211
+ directory = os.path.dirname(filename)
212
+ if directory:
213
+ os.makedirs(directory, exist_ok=True)
188
214
  create_file_if_not_exist(filename, block_content, skip_query)
215
+
189
216
  elif write:
190
217
  print(f"File {filename} exists. Overwriting without LLM merge as requested.")
191
218
  try:
@@ -213,18 +240,9 @@ def handle_existing_file(filename, block_content, skip_query=False, write=False)
213
240
  response += content
214
241
  modify_and_save_file(response, filename)
215
242
 
216
-
217
243
  def extract_and_save_prompt_results(classifier, param, write=False):
218
244
  sub_directory = Classifier.get_sub_directory(classifier)
219
245
  prompt_log_file = f"ara/{sub_directory}/{param}.data/{classifier}.prompt_log.md"
220
246
  print(f"Extract marked sections from: {prompt_log_file}")
221
247
 
222
- extract_responses(prompt_log_file, write=write)
223
-
224
-
225
- def update_markdown(original_content, block_content, filename):
226
- """
227
- Update the markdown content by changing the extract block from "# [x] extract" to "# [v] extract"
228
- """
229
- updated_content = original_content.replace("# [x] extract", "# [v] extract")
230
- return updated_content
248
+ extract_responses(prompt_log_file, write=write)
ara_cli/prompt_handler.py CHANGED
@@ -60,7 +60,12 @@ class LLMSingleton:
60
60
  # Check if there was an authentication error
61
61
  stderr_output = captured_stderr.getvalue()
62
62
  if "Authentication error" in stderr_output:
63
- warnings.warn("Invalid Langfuse credentials - prompt tracing disabled and using default prompts. Set environment variables 'ARA_CLI_LANGFUSE_PUBLIC_KEY', 'ARA_CLI_LANGFUSE_SECRET_KEY', 'LANGFUSE_HOST' and restart application to use Langfuse capabilities", UserWarning)
63
+ warnings.warn(
64
+ "Invalid Langfuse credentials - prompt tracing disabled and using default prompts. "
65
+ "Set environment variables 'ARA_CLI_LANGFUSE_PUBLIC_KEY', 'ARA_CLI_LANGFUSE_SECRET_KEY', "
66
+ "'LANGFUSE_HOST' and restart application to use Langfuse capabilities",
67
+ UserWarning,
68
+ )
64
69
 
65
70
  LLMSingleton._default_model = default_model_id
66
71
  LLMSingleton._extraction_model = extraction_model_id
@@ -160,6 +165,50 @@ def _is_valid_message(message: dict) -> bool:
160
165
  return False
161
166
 
162
167
 
168
+ def _norm(p: str) -> str:
169
+ """Normalize slashes and collapse .. segments."""
170
+ return os.path.normpath(p) if p else p
171
+
172
+
173
+ def resolve_existing_path(rel_or_abs_path: str, anchor_dir: str) -> str:
174
+ """
175
+ Resolve a potentially relative path to an existing absolute path.
176
+
177
+ Strategy:
178
+ - If already absolute and exists -> return it.
179
+ - Else, try from the anchor_dir.
180
+ - Else, walk up parent directories from anchor_dir and try joining at each level.
181
+ - If nothing is found, return the normalized original (will fail later with clear message).
182
+ """
183
+ if not rel_or_abs_path:
184
+ return rel_or_abs_path
185
+
186
+ candidate = _norm(rel_or_abs_path)
187
+
188
+ if os.path.isabs(candidate) and os.path.exists(candidate):
189
+ return candidate
190
+
191
+ anchor_dir = os.path.abspath(anchor_dir or os.getcwd())
192
+
193
+ # Try from anchor dir directly
194
+ direct = _norm(os.path.join(anchor_dir, candidate))
195
+ if os.path.exists(direct):
196
+ return direct
197
+
198
+ # Walk parents
199
+ cur = anchor_dir
200
+ prev = None
201
+ while cur and cur != prev:
202
+ test = _norm(os.path.join(cur, candidate))
203
+ if os.path.exists(test):
204
+ return test
205
+ prev = cur
206
+ cur = os.path.dirname(cur)
207
+
208
+ # Give back normalized candidate; open() will raise, but at least path is clean
209
+ return candidate
210
+
211
+
163
212
  def send_prompt(prompt, purpose="default"):
164
213
  """Prepares and sends a prompt to the LLM, streaming the response."""
165
214
  chat_instance = LLMSingleton.get_instance()
@@ -213,8 +262,6 @@ def describe_image(image_path: str) -> str:
213
262
  Returns:
214
263
  Text description of the image
215
264
  """
216
- import base64
217
-
218
265
  with LLMSingleton.get_instance().langfuse.start_as_current_span(
219
266
  name="ara-cli/describe-image"
220
267
  ) as span:
@@ -234,14 +281,19 @@ def describe_image(image_path: str) -> str:
234
281
  # Fallback to default prompt if Langfuse prompt is not available
235
282
  if not describe_image_prompt:
236
283
  logging.info("Using default describe-image prompt.")
237
- describe_image_prompt = "Please describe this image in detail. If it contains text, transcribe it exactly. If it's a diagram or chart, explain its structure and content. If it's a photo or illustration, describe what you see."
284
+ describe_image_prompt = (
285
+ "Please describe this image in detail. If it contains text, transcribe it exactly. "
286
+ "If it's a diagram or chart, explain its structure and content. If it's a photo or illustration, "
287
+ "describe what you see."
288
+ )
238
289
 
239
- # Read and encode the image
240
- with open(image_path, "rb") as image_file:
290
+ # Resolve and read the image
291
+ resolved_image_path = resolve_existing_path(image_path, os.getcwd())
292
+ with open(resolved_image_path, "rb") as image_file:
241
293
  base64_image = base64.b64encode(image_file.read()).decode("utf-8")
242
294
 
243
295
  # Determine image type
244
- image_extension = os.path.splitext(image_path)[1].lower()
296
+ image_extension = os.path.splitext(resolved_image_path)[1].lower()
245
297
  mime_type = {
246
298
  ".png": "image/png",
247
299
  ".jpg": "image/jpeg",
@@ -256,7 +308,7 @@ def describe_image(image_path: str) -> str:
256
308
  "content": [
257
309
  {
258
310
  "type": "text",
259
- "text": "Please describe this image in detail. If it contains text, transcribe it exactly. If it's a diagram or chart, explain its structure and content. If it's a photo or illustration, describe what you see.",
311
+ "text": describe_image_prompt,
260
312
  },
261
313
  {
262
314
  "type": "image_url",
@@ -288,7 +340,9 @@ def describe_image(image_path: str) -> str:
288
340
  def append_headings(classifier, param, heading_name):
289
341
  sub_directory = Classifier.get_sub_directory(classifier)
290
342
 
291
- artefact_data_path = f"ara/{sub_directory}/{param}.data/{classifier}.prompt_log.md"
343
+ artefact_data_path = _norm(
344
+ f"ara/{sub_directory}/{param}.data/{classifier}.prompt_log.md"
345
+ )
292
346
 
293
347
  # Check if the file exists, and if not, create an empty file
294
348
  if not os.path.exists(artefact_data_path):
@@ -309,15 +363,15 @@ def append_headings(classifier, param, heading_name):
309
363
 
310
364
  def write_prompt_result(classifier, param, text):
311
365
  sub_directory = Classifier.get_sub_directory(classifier)
312
-
313
- # TODO change absolute path to relative path with directory navigator
314
- artefact_data_path = f"ara/{sub_directory}/{param}.data/{classifier}.prompt_log.md"
366
+ artefact_data_path = _norm(
367
+ f"ara/{sub_directory}/{param}.data/{classifier}.prompt_log.md"
368
+ )
315
369
  write_string_to_file(artefact_data_path, text, "a")
316
370
 
317
371
 
318
372
  def prompt_data_directory_creation(classifier, parameter):
319
373
  sub_directory = Classifier.get_sub_directory(classifier)
320
- prompt_data_path = f"ara/{sub_directory}/{parameter}.data/prompt.data"
374
+ prompt_data_path = _norm(f"ara/{sub_directory}/{parameter}.data/prompt.data")
321
375
  if not exists(prompt_data_path):
322
376
  makedirs(prompt_data_path)
323
377
  return prompt_data_path
@@ -347,9 +401,10 @@ def initialize_prompt_templates(classifier, parameter):
347
401
  artefact_to_mark=f"{parameter}.{classifier}",
348
402
  )
349
403
 
350
- generate_config_prompt_global_givens_file(prompt_data_path, "config.prompt_global_givens.md")
351
-
352
- generate_config_prompt_global_givens_file(prompt_data_path, "config.prompt_global_givens.md")
404
+ # Only once (was duplicated before)
405
+ generate_config_prompt_global_givens_file(
406
+ prompt_data_path, "config.prompt_global_givens.md"
407
+ )
353
408
 
354
409
 
355
410
  def write_template_files_to_config(template_type, config_file, base_template_path):
@@ -361,7 +416,7 @@ def write_template_files_to_config(template_type, config_file, base_template_pat
361
416
 
362
417
  def load_selected_prompt_templates(classifier, parameter):
363
418
  sub_directory = Classifier.get_sub_directory(classifier)
364
- prompt_data_path = f"ara/{sub_directory}/{parameter}.data/prompt.data"
419
+ prompt_data_path = _norm(f"ara/{sub_directory}/{parameter}.data/prompt.data")
365
420
  config_file_path = os.path.join(prompt_data_path, "config.prompt_templates.md")
366
421
 
367
422
  if not os.path.exists(config_file_path):
@@ -464,7 +519,9 @@ def move_and_copy_files(source_path, prompt_data_path, prompt_archive_path):
464
519
 
465
520
  def extract_and_load_markdown_files(md_prompt_file_path):
466
521
  """
467
- Extracts markdown files paths based on checked items and constructs proper paths respecting markdown header hierarchy.
522
+ Extracts markdown files paths based on checked items and constructs proper paths
523
+ respecting markdown header hierarchy. **Returns normalized relative paths**
524
+ (not resolved), and resolution happens later relative to the config file dir.
468
525
  """
469
526
  header_stack = []
470
527
  path_accumulator = []
@@ -480,53 +537,73 @@ def extract_and_load_markdown_files(md_prompt_file_path):
480
537
  header_stack.append(header)
481
538
  elif "[x]" in line:
482
539
  relative_path = line.split("]")[-1].strip()
483
- full_path = os.path.join("/".join(header_stack), relative_path)
484
- path_accumulator.append(full_path)
540
+ # Use os.path.join for OS-safe joining, then normalize
541
+ full_rel_path = os.path.join(*header_stack, relative_path) if header_stack else relative_path
542
+ path_accumulator.append(_norm(full_rel_path))
485
543
  return path_accumulator
486
544
 
487
545
 
488
546
  def load_givens(file_path):
547
+ """
548
+ Reads marked givens from a config markdown and returns:
549
+ - combined markdown content (including code fences / images)
550
+ - a list of image data dicts for the multimodal message
551
+ Paths inside the markdown are resolved robustly relative to the config file directory (and its parents).
552
+ """
489
553
  content = ""
490
554
  image_data_list = []
491
555
  markdown_items = extract_and_load_markdown_files(file_path)
492
556
 
493
- # Only proceed and add the header if there are marked items to load.
494
557
  if not markdown_items:
495
558
  return "", []
496
559
 
497
560
  content = "### GIVENS\n\n"
498
561
 
562
+ anchor_dir = os.path.dirname(os.path.abspath(file_path))
563
+
499
564
  for item in markdown_items:
500
- if item.lower().endswith((".png", ".jpeg", ".jpg")):
501
- with open(item, "rb") as image_file:
565
+ resolved = resolve_existing_path(item, anchor_dir)
566
+ # Keep the listing line readable, show the original relative item
567
+ content += item + "\n"
568
+
569
+ ext = os.path.splitext(resolved)[1].lower()
570
+
571
+ # Image branch
572
+ if ext in (".png", ".jpeg", ".jpg", ".gif", ".bmp"):
573
+ with open(resolved, "rb") as image_file:
502
574
  base64_image = base64.b64encode(image_file.read()).decode("utf-8")
575
+
576
+ mime_type = {
577
+ ".png": "image/png",
578
+ ".jpg": "image/jpeg",
579
+ ".jpeg": "image/jpeg",
580
+ ".gif": "image/gif",
581
+ ".bmp": "image/bmp",
582
+ }.get(ext, "image/png")
583
+
503
584
  image_data_list.append(
504
585
  {
505
586
  "type": "image_url",
506
- "image_url": {"url": f"data:image/png;base64,{base64_image}"},
587
+ "image_url": {"url": f"data:{mime_type};base64,{base64_image}"},
507
588
  }
508
589
  )
509
- content += item + "\n"
510
- content += f"![{item}](data:image/png;base64,{base64_image})" + "\n"
511
- else:
512
- # Check if the item specifies line ranges
513
- # TODO item has currently no trailing [] see extraction and handover method in extract and load
514
- # item = f"[10:29] {item}"
515
- # print(f"found {item}, check for subsection")
516
- # TODO re.match can not split the item with [] correctly and extract the line numbers
517
- # TODO logic of subsections is not supported by the update algorithm of the config prompt givens updater
518
- # TODO extract in lines of *.md files potential images and add them to the image list
590
+ # Also embed inline for the prompt markdown (use png as a neutral default for data URI)
591
+ content += f"![{item}](data:{mime_type};base64,{base64_image})\n"
519
592
 
593
+ else:
594
+ # Check if the item specifies line ranges: e.g. "[10:20,25:30] filePath"
520
595
  match = re.match(r".*?\[(\d+:\d+(?:,\s*\d+:\d+)*)\]\s+(.+)", item)
521
596
  if match:
522
597
  line_ranges, file_name = match.groups()
523
- content += file_name + "\n" + "```\n"
524
- content += get_partial_file_content(file_name, line_ranges) + "\n"
598
+ resolved_sub = resolve_existing_path(file_name, anchor_dir)
599
+ content += "```\n"
600
+ content += get_partial_file_content(resolved_sub, line_ranges) + "\n"
525
601
  content += "```\n\n"
526
602
  else:
527
- content += item + "\n" + "```\n"
528
- content += get_file_content(item) + "\n"
603
+ content += "```\n"
604
+ content += get_file_content(resolved) + "\n"
529
605
  content += "```\n\n"
606
+
530
607
  return content, image_data_list
531
608
 
532
609
 
@@ -535,7 +612,7 @@ def get_partial_file_content(file_name, line_ranges):
535
612
  Reads specific lines from a file based on the line ranges provided.
536
613
 
537
614
  Args:
538
- file_name (str): The path to the file.
615
+ file_name (str): The path to the file (absolute or relative, already resolved by caller).
539
616
  line_ranges (str): A string representing the line ranges to read, e.g., '10:20,25:30'.
540
617
 
541
618
  Returns:
@@ -585,9 +662,7 @@ def prepend_system_prompt(message_list):
585
662
  # Fallback to default prompt if Langfuse prompt is not available
586
663
  if not system_prompt:
587
664
  logging.info("Using default system prompt.")
588
- system_prompt = (
589
- "You are a helpful assistant that can process both text and images."
590
- )
665
+ system_prompt = "You are a helpful assistant that can process both text and images."
591
666
 
592
667
  # Prepend the system prompt
593
668
  system_prompt_message = {"role": "system", "content": system_prompt}
@@ -597,6 +672,9 @@ def prepend_system_prompt(message_list):
597
672
 
598
673
 
599
674
  def append_images_to_message(message, image_data_list):
675
+ """
676
+ Appends image data list to a single message dict (NOT to a list).
677
+ """
600
678
  logger = logging.getLogger(__name__)
601
679
 
602
680
  logger.debug(
@@ -607,13 +685,17 @@ def append_images_to_message(message, image_data_list):
607
685
  logger.debug("No images to append, returning original message")
608
686
  return message
609
687
 
610
- message_content = message["content"]
688
+ message_content = message.get("content")
611
689
  logger.debug(f"Original message content: {message_content}")
612
690
 
613
691
  if isinstance(message_content, str):
614
692
  message["content"] = [{"type": "text", "text": message_content}]
615
693
 
616
- message["content"].extend(image_data_list)
694
+ if isinstance(message["content"], list):
695
+ message["content"].extend(image_data_list)
696
+ else:
697
+ # If somehow content is not list or str, coerce to list
698
+ message["content"] = [{"type": "text", "text": str(message_content)}] + image_data_list
617
699
 
618
700
  logger.debug(f"Updated message content with {len(image_data_list)} images")
619
701
 
@@ -622,11 +704,20 @@ def append_images_to_message(message, image_data_list):
622
704
 
623
705
  def create_and_send_custom_prompt(classifier, parameter):
624
706
  sub_directory = Classifier.get_sub_directory(classifier)
625
- prompt_data_path = f"ara/{sub_directory}/{parameter}.data/prompt.data"
707
+ prompt_data_path = _norm(f"ara/{sub_directory}/{parameter}.data/prompt.data")
626
708
  prompt_file_path_markdown = join(prompt_data_path, f"{classifier}.prompt.md")
627
709
 
628
- extensions = [".blueprint.md", ".rules.md", ".prompt_givens.md", ".prompt_global_givens.md", ".intention.md", ".commands.md"]
629
- combined_content_markdown, image_data_list = collect_file_content_by_extension(prompt_data_path, extensions)
710
+ extensions = [
711
+ ".blueprint.md",
712
+ ".rules.md",
713
+ ".prompt_givens.md",
714
+ ".prompt_global_givens.md",
715
+ ".intention.md",
716
+ ".commands.md",
717
+ ]
718
+ combined_content_markdown, image_data_list = collect_file_content_by_extension(
719
+ prompt_data_path, extensions
720
+ )
630
721
 
631
722
  with open(prompt_file_path_markdown, "w", encoding="utf-8") as file:
632
723
  file.write(combined_content_markdown)
@@ -635,14 +726,14 @@ def create_and_send_custom_prompt(classifier, parameter):
635
726
  append_headings(classifier, parameter, "prompt")
636
727
  write_prompt_result(classifier, parameter, prompt)
637
728
 
729
+ # Build message and append images correctly (fixed)
638
730
  message = {"role": "user", "content": combined_content_markdown}
639
-
731
+ message = append_images_to_message(message, image_data_list)
640
732
  message_list = [message]
641
733
 
642
- message_list = append_images_to_message(message_list, image_data_list)
643
734
  append_headings(classifier, parameter, "result")
644
735
 
645
- artefact_data_path = (
736
+ artefact_data_path = _norm(
646
737
  f"ara/{sub_directory}/{parameter}.data/{classifier}.prompt_log.md"
647
738
  )
648
739
  with open(artefact_data_path, "a", encoding="utf-8") as file:
@@ -652,7 +743,6 @@ def create_and_send_custom_prompt(classifier, parameter):
652
743
  continue
653
744
  file.write(chunk_content)
654
745
  file.flush()
655
- # write_prompt_result(classifier, parameter, response)
656
746
 
657
747
 
658
748
  def generate_config_prompt_template_file(
@@ -661,7 +751,8 @@ def generate_config_prompt_template_file(
661
751
  config_prompt_templates_path = os.path.join(
662
752
  prompt_data_path, config_prompt_templates_name
663
753
  )
664
- config = ConfigManager.get_config()
754
+ # Use instance method consistently
755
+ config = ConfigManager().get_config()
665
756
  global_prompt_template_path = TemplatePathManager.get_template_base_path()
666
757
  dir_list = ["ara/.araconfig/custom-prompt-modules"] + [
667
758
  f"{os.path.join(global_prompt_template_path,'prompt-modules')}"
@@ -678,7 +769,7 @@ def generate_config_prompt_givens_file(
678
769
  config_prompt_givens_path = os.path.join(
679
770
  prompt_data_path, config_prompt_givens_name
680
771
  )
681
- config = ConfigManager.get_config()
772
+ config = ConfigManager().get_config()
682
773
  dir_list = (
683
774
  ["ara"]
684
775
  + [path for d in config.ext_code_dirs for path in d.values()]
@@ -712,14 +803,24 @@ def generate_config_prompt_givens_file(
712
803
  with open(config_prompt_givens_path, "w", encoding="utf-8") as file:
713
804
  file.write("".join(updated_listing))
714
805
 
715
- def generate_config_prompt_global_givens_file(prompt_data_path, config_prompt_givens_name, artefact_to_mark=None):
806
+
807
+ def generate_config_prompt_global_givens_file(
808
+ prompt_data_path, config_prompt_givens_name, artefact_to_mark=None
809
+ ):
716
810
  from ara_cli.global_file_lister import generate_global_markdown_listing
717
- config_prompt_givens_path = os.path.join(prompt_data_path, config_prompt_givens_name)
718
- config = ConfigManager.get_config()
719
811
 
720
- if not hasattr(config, 'global_dirs') or not config.global_dirs:
812
+ config_prompt_givens_path = os.path.join(
813
+ prompt_data_path, config_prompt_givens_name
814
+ )
815
+ config = ConfigManager().get_config()
816
+
817
+ if not hasattr(config, "global_dirs") or not config.global_dirs:
721
818
  return
722
819
 
723
820
  dir_list = [path for d in config.global_dirs for path in d.values()]
724
- print(f"used {dir_list} for global prompt givens file listing with absolute paths")
725
- generate_global_markdown_listing(dir_list, config.ara_prompt_given_list_includes, config_prompt_givens_path)
821
+ print(
822
+ f"used {dir_list} for global prompt givens file listing with absolute paths"
823
+ )
824
+ generate_global_markdown_listing(
825
+ dir_list, config.ara_prompt_given_list_includes, config_prompt_givens_path
826
+ )