ara-cli 0.1.9.89__py3-none-any.whl → 0.1.9.92__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ara-cli might be problematic. Click here for more details.

ara_cli/ara_config.py CHANGED
@@ -49,6 +49,11 @@ class ARAconfig(BaseModel):
49
49
  temperature=1,
50
50
  max_completion_tokens=16000
51
51
  ),
52
+ "gpt-5-mini": LLMConfigItem(
53
+ provider="openai",
54
+ model="openai/gpt-5-mini-2025-08-07",
55
+ temperature=1
56
+ ),
52
57
  "gpt-4o": LLMConfigItem(
53
58
  provider="openai",
54
59
  model="openai/gpt-4o",
@@ -92,11 +97,12 @@ class ARAconfig(BaseModel):
92
97
  max_tokens=4000
93
98
  )
94
99
  })
95
- default_llm: Optional[str] = "gpt-5"
100
+ default_llm: Optional[str] = None
101
+ extraction_llm: Optional[str] = None
96
102
 
97
103
  @model_validator(mode='after')
98
104
  def check_critical_fields(self) -> 'ARAconfig':
99
- """Check for empty critical fields and use defaults if needed."""
105
+ """Check for empty critical fields and validate default_llm and extraction_llm."""
100
106
  critical_fields = {
101
107
  'ext_code_dirs': [{"source_dir": "./src"}, {"source_dir": "./tests"}],
102
108
  'local_ara_templates_dir': "./ara/.araconfig/templates/",
@@ -110,6 +116,30 @@ class ARAconfig(BaseModel):
110
116
  print(f"Warning: Value for '{field}' is missing or empty. Using default.")
111
117
  setattr(self, field, default_value)
112
118
 
119
+ if not self.llm_config:
120
+ print("Warning: 'llm_config' is empty. 'default_llm' and 'extraction_llm' cannot be set.")
121
+ self.default_llm = None
122
+ self.extraction_llm = None
123
+ return self
124
+
125
+ first_available_llm = next(iter(self.llm_config))
126
+
127
+ if not self.default_llm:
128
+ print(f"Warning: 'default_llm' is not set. Defaulting to the first available model: '{first_available_llm}'.")
129
+ self.default_llm = first_available_llm
130
+ elif self.default_llm not in self.llm_config:
131
+ print(f"Warning: The configured 'default_llm' ('{self.default_llm}') does not exist in 'llm_config'.")
132
+ print(f"-> Reverting to the first available model: '{first_available_llm}'.")
133
+ self.default_llm = first_available_llm
134
+
135
+ if not self.extraction_llm:
136
+ print(f"Warning: 'extraction_llm' is not set. Setting it to the same as 'default_llm': '{self.default_llm}'.")
137
+ self.extraction_llm = self.default_llm
138
+ elif self.extraction_llm not in self.llm_config:
139
+ print(f"Warning: The configured 'extraction_llm' ('{self.extraction_llm}') does not exist in 'llm_config'.")
140
+ print(f"-> Reverting to the 'default_llm' value: '{self.default_llm}'.")
141
+ self.extraction_llm = self.default_llm
142
+
113
143
  return self
114
144
 
115
145
  # Function to ensure the necessary directories exist
ara_cli/chat.py CHANGED
@@ -433,7 +433,7 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
433
433
  break
434
434
 
435
435
  is_file_document = any(file_name_lower.endswith(ext)
436
- for ext in document_type_extensions)
436
+ for ext in document_type_extensions)
437
437
 
438
438
  if is_file_document:
439
439
  return self.load_document_file(
@@ -532,7 +532,7 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
532
532
  return [x for x in glob.glob(text + '*')]
533
533
 
534
534
  def _retrieve_ara_config(self):
535
- from ara_cli.prompt_handler import ConfigManager
535
+ from ara_cli.ara_config import ConfigManager
536
536
  return ConfigManager().get_config()
537
537
 
538
538
  def _retrieve_llm_config(self):
@@ -625,7 +625,7 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
625
625
  @cmd2.with_category(CATEGORY_LLM_CONTROL)
626
626
  def do_CHOOSE_MODEL(self, model_name):
627
627
  from ara_cli.prompt_handler import LLMSingleton
628
- from ara_cli.ara_config import DEFAULT_CONFIG_LOCATION, save_data
628
+ from ara_cli.ara_config import DEFAULT_CONFIG_LOCATION, save_data, ConfigManager
629
629
  from ara_cli.directory_navigator import DirectoryNavigator
630
630
 
631
631
  original_dir = os.getcwd()
@@ -634,7 +634,11 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
634
634
  os.chdir('..')
635
635
 
636
636
  if not self._verify_llm_choice(model_name):
637
+ os.chdir(original_dir)
637
638
  return
639
+
640
+ ConfigManager.reset()
641
+ self.config = ConfigManager.get_config()
638
642
  self.config.default_llm = model_name
639
643
  save_data(filepath=DEFAULT_CONFIG_LOCATION, config=self.config)
640
644
 
@@ -642,11 +646,48 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
642
646
 
643
647
  os.chdir(original_dir)
644
648
 
649
+ @cmd2.with_category(CATEGORY_LLM_CONTROL)
650
+ def do_CHOOSE_EXTRACTION_MODEL(self, model_name):
651
+ """Choose the language model for extraction tasks."""
652
+ from ara_cli.ara_config import DEFAULT_CONFIG_LOCATION, save_data, ConfigManager
653
+ from ara_cli.directory_navigator import DirectoryNavigator
654
+
655
+ original_dir = os.getcwd()
656
+ navigator = DirectoryNavigator()
657
+ navigator.navigate_to_target()
658
+ os.chdir('..')
659
+
660
+ if not self._verify_llm_choice(model_name):
661
+ os.chdir(original_dir)
662
+ return
663
+
664
+ ConfigManager.reset()
665
+ self.config = ConfigManager.get_config()
666
+ self.config.extraction_llm = model_name
667
+ save_data(filepath=DEFAULT_CONFIG_LOCATION, config=self.config)
668
+ print(f"Extraction model switched to '{model_name}'")
669
+
670
+ os.chdir(original_dir)
671
+
645
672
  @cmd2.with_category(CATEGORY_LLM_CONTROL)
646
673
  def do_CURRENT_MODEL(self, _):
647
- from ara_cli.prompt_handler import LLMSingleton
674
+ """Displays the current default (reasoning) language model."""
675
+ from ara_cli.ara_config import ConfigManager
676
+
677
+ ConfigManager.reset()
678
+ config = self._retrieve_ara_config()
679
+
680
+ print(config.default_llm)
648
681
 
649
- print(LLMSingleton.get_model())
682
+ @cmd2.with_category(CATEGORY_LLM_CONTROL)
683
+ def do_CURRENT_EXTRACTION_MODEL(self, _):
684
+ """Displays the current extraction language model."""
685
+ from ara_cli.ara_config import ConfigManager
686
+
687
+ ConfigManager.reset()
688
+ config = self._retrieve_ara_config()
689
+
690
+ print(config.extraction_llm)
650
691
 
651
692
  def _complete_llms(self, text, line, begidx, endidx):
652
693
  llm_config = self._retrieve_llm_config()
@@ -662,6 +703,9 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
662
703
  def complete_CHOOSE_MODEL(self, text, line, begidx, endidx):
663
704
  return self._complete_llms(text, line, begidx, endidx)
664
705
 
706
+ def complete_CHOOSE_EXTRACTION_MODEL(self, text, line, begidx, endidx):
707
+ return self._complete_llms(text, line, begidx, endidx)
708
+
665
709
  @cmd2.with_category(CATEGORY_CHAT_CONTROL)
666
710
  def do_NEW(self, chat_name):
667
711
  """Create a new chat. Optionally provide a chat name in-line: NEW new_chat"""
@@ -859,3 +903,95 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
859
903
  completions = [classifier for classifier in classifiers if classifier.startswith(text)]
860
904
 
861
905
  return completions
906
+
907
+
908
+ def _get_plural_template_type(self, template_type: str) -> str:
909
+ """Determines the plural form of a template type."""
910
+ plurals = {"commands": "commands", "rules": "rules"}
911
+ return plurals.get(template_type, f"{template_type}s")
912
+
913
+ def _find_project_root(self) -> str | None:
914
+ """
915
+ Finds the project root by searching for an 'ara' directory,
916
+ starting from the chat file's directory and moving upwards.
917
+ """
918
+ current_dir = os.path.dirname(self.chat_name)
919
+ while True:
920
+ if os.path.isdir(os.path.join(current_dir, 'ara')):
921
+ return current_dir
922
+ parent_dir = os.path.dirname(current_dir)
923
+ if parent_dir == current_dir: # Reached the filesystem root
924
+ return None
925
+ current_dir = parent_dir
926
+
927
+ def _gather_templates_from_path(self, search_path: str, templates_set: set, prefix: str = ""):
928
+ """
929
+ Scans a given path for items and adds them to the provided set,
930
+ optionally prepending a prefix.
931
+ """
932
+ import glob
933
+ if not os.path.isdir(search_path):
934
+ return
935
+ for path in glob.glob(os.path.join(search_path, '*')):
936
+ templates_set.add(f"{prefix}{os.path.basename(path)}")
937
+
938
+ def _get_available_templates(self, template_type: str) -> list[str]:
939
+ """
940
+ Scans for available global and project-local custom templates.
941
+ This method safely searches for template files without changing the
942
+ current directory, making it safe for use in autocompleters.
943
+
944
+ Args:
945
+ template_type: The type of template to search for (e.g., 'rules').
946
+
947
+ Returns:
948
+ A sorted list of unique template names. Global templates are
949
+ prefixed with 'global/'.
950
+ """
951
+ from ara_cli.template_manager import TemplatePathManager
952
+
953
+ plural_type = self._get_plural_template_type(template_type)
954
+ templates = set()
955
+
956
+ # 1. Find Global Templates
957
+ try:
958
+ global_base_path = TemplatePathManager.get_template_base_path()
959
+ global_template_dir = os.path.join(global_base_path, "prompt-modules", plural_type)
960
+ self._gather_templates_from_path(global_template_dir, templates, prefix="global/")
961
+ except Exception:
962
+ pass # Silently ignore if global templates are not found
963
+
964
+ # 2. Find Local Custom Templates
965
+ try:
966
+ project_root = self._find_project_root()
967
+ if project_root:
968
+ local_templates_base = os.path.join(project_root, self.config.local_prompt_templates_dir)
969
+ custom_dir = os.path.join(local_templates_base, self.config.custom_prompt_templates_subdir, plural_type)
970
+ self._gather_templates_from_path(custom_dir, templates)
971
+ except Exception:
972
+ pass # Silently ignore if local templates cannot be resolved
973
+
974
+ return sorted(list(templates))
975
+
976
+ def _template_completer(self, text: str, template_type: str) -> list[str]:
977
+ """Generic completer for different template types."""
978
+ available_templates = self._get_available_templates(template_type)
979
+ if not text:
980
+ return available_templates
981
+ return [t for t in available_templates if t.startswith(text)]
982
+
983
+ def complete_LOAD_RULES(self, text, line, begidx, endidx):
984
+ """Completer for the LOAD_RULES command."""
985
+ return self._template_completer(text, "rules")
986
+
987
+ def complete_LOAD_INTENTION(self, text, line, begidx, endidx):
988
+ """Completer for the LOAD_INTENTION command."""
989
+ return self._template_completer(text, "intention")
990
+
991
+ def complete_LOAD_COMMANDS(self, text, line, begidx, endidx):
992
+ """Completer for the LOAD_COMMANDS command."""
993
+ return self._template_completer(text, "commands")
994
+
995
+ def complete_LOAD_BLUEPRINT(self, text, line, begidx, endidx):
996
+ """Completer for the LOAD_BLUEPRINT command."""
997
+ return self._template_completer(text, "blueprint")
@@ -192,7 +192,7 @@ def handle_existing_file(filename, block_content, skip_query=False):
192
192
  messages = [{"role": "user", "content": prompt_text}]
193
193
  response = ""
194
194
 
195
- for chunk in send_prompt(messages):
195
+ for chunk in send_prompt(messages, purpose='extraction'):
196
196
  content = chunk.choices[0].delta.content
197
197
  if content:
198
198
  response += content
ara_cli/prompt_handler.py CHANGED
@@ -71,29 +71,54 @@ def read_string_from_file(path):
71
71
  text = file.read()
72
72
  return text
73
73
 
74
+ def _get_model_config(purpose: str) -> dict:
75
+ """
76
+ Resolves model ID by purpose and returns its prepared configuration for LiteLLM.
77
+ """
78
+ config = ConfigManager().get_config()
79
+
80
+ # Determine which model to use based on the purpose
81
+ model_id = config.default_llm
82
+ if purpose == 'extraction':
83
+ # Use extraction_llm if it exists, otherwise fall back to default_llm
84
+ model_id = getattr(config, 'extraction_llm', config.default_llm)
85
+
86
+ selected_config = config.llm_config.get(str(model_id))
87
+
88
+ if not selected_config:
89
+ raise ValueError(f"No configuration found for model '{model_id}' used for '{purpose}'")
90
+
91
+ config_parameters = selected_config.model_dump(exclude_none=True)
92
+ config_parameters.pop("provider", None)
93
+
94
+ return config_parameters
95
+
96
+
97
+ def _is_valid_message(message: dict) -> bool:
98
+ """
99
+ Checks if a message in a prompt is valid (i.e., not empty).
100
+ It handles both string content and list content (for multimodal inputs).
101
+ """
102
+ content = message.get('content')
103
+
104
+ if isinstance(content, str):
105
+ return content.strip() != ''
106
+
107
+ if isinstance(content, list):
108
+ # For multimodal content, check if there's at least one non-empty text part.
109
+ return any(
110
+ item.get('type') == 'text' and item.get('text', '').strip() != ''
111
+ for item in content
112
+ )
113
+
114
+ return False
115
+
116
+
117
+ def send_prompt(prompt, purpose='default'):
118
+ """Prepares and sends a prompt to the LLM, streaming the response."""
119
+ config_parameters = _get_model_config(purpose)
74
120
 
75
- def send_prompt(prompt):
76
- chat = LLMSingleton.get_instance()
77
-
78
- config_parameters = chat.config_parameters.copy()
79
- if "provider" in config_parameters:
80
- del config_parameters["provider"]
81
-
82
- filtered_prompt = list(filter(
83
- lambda msg: (
84
- msg.get('content') and
85
- isinstance(msg['content'], list) and
86
- any(
87
- item.get('type') == 'text' and
88
- item.get('text', '').strip() != ''
89
- for item in msg['content']
90
- )
91
- ) or (
92
- isinstance(msg.get('content'), str) and
93
- msg['content'].strip() != ''
94
- ),
95
- prompt
96
- ))
121
+ filtered_prompt = [msg for msg in prompt if _is_valid_message(msg)]
97
122
 
98
123
  completion = litellm.completion(
99
124
  **config_parameters,
ara_cli/version.py CHANGED
@@ -1,2 +1,2 @@
1
1
  # version.py
2
- __version__ = "0.1.9.89" # fith parameter like .0 for local install test purposes only. official numbers should be 4 digit numbers
2
+ __version__ = "0.1.9.92" # fith parameter like .0 for local install test purposes only. official numbers should be 4 digit numbers
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ara_cli
3
- Version: 0.1.9.89
3
+ Version: 0.1.9.92
4
4
  Summary: Powerful, open source command-line tool for managing, structuring and automating software development artifacts in line with Business-Driven Development (BDD) and AI-assisted processes
5
5
  Description-Content-Type: text/markdown
6
6
  Requires-Dist: litellm
@@ -2,7 +2,7 @@ ara_cli/__init__.py,sha256=0zl7IegxTid26EBGLav_fXZ4CCIV3H5TfAoFQiOHjvg,148
2
2
  ara_cli/__main__.py,sha256=J5DCDLRZ6UcpYwM1-NkjaLo4PTetcSj2dB4HrrftkUw,2064
3
3
  ara_cli/ara_command_action.py,sha256=_LHE2V5hbJxN7ccYiptuPktRfbTnXmQEt_D_FxDBlBY,22456
4
4
  ara_cli/ara_command_parser.py,sha256=I-e9W-QwTIMKMzlHycSlCWCyBFQfiFYvGre1XsDbrFI,20573
5
- ara_cli/ara_config.py,sha256=w7GkDNy2Tx75LFZBu8J2KWAUMpk4F5jzuMhSjmvn948,7206
5
+ ara_cli/ara_config.py,sha256=5uBo_flNgZSk7B9lmyfvzWyxfIQzb13LbieCpJfdZJI,8765
6
6
  ara_cli/artefact_autofix.py,sha256=WVTiIR-jo4YKmmz4eS3qTFvl45W1YKwAk1XSuz9QX10,20015
7
7
  ara_cli/artefact_creator.py,sha256=0Ory6cB-Ahkw-BDNb8QHnTbp_OHGABdkb9bhwcEdcIc,6063
8
8
  ara_cli/artefact_deleter.py,sha256=Co4wwCH3yW8H9NrOq7_2p5571EeHr0TsfE-H8KqoOfY,1900
@@ -12,7 +12,7 @@ ara_cli/artefact_lister.py,sha256=jhk4n4eqp7hDIq07q43QzS7-36BM3OfZ4EABxCeOGcw,47
12
12
  ara_cli/artefact_reader.py,sha256=Pho0_Eqm7kD9CNbVMhKb6mkNM0I3iJiCJXbXmVp1DJU,7827
13
13
  ara_cli/artefact_renamer.py,sha256=Hnz_3zD9xxnBa1FHyUE6mIktLk_9ttP2rFRvQIkmz-o,4061
14
14
  ara_cli/artefact_scan.py,sha256=msPCm-vPWOAZ_e_z5GylXxq1MtNlmJ4zvKrsdOFCWF4,4813
15
- ara_cli/chat.py,sha256=qQINBi5VCzlZOcQqDqUJY0p6VlAPiAWwWICSe7fvcDQ,32540
15
+ ara_cli/chat.py,sha256=m_nMOCKDhjC4SPEDiG3Wu_0hLphe0ey0A4YuZV0LQxQ,38167
16
16
  ara_cli/classifier.py,sha256=zWskj7rBYdqYBGjksBm46iTgVU5IIf2PZsJr4qeiwVU,1878
17
17
  ara_cli/codefusionretriever.py,sha256=fCHgXdIBRzkVAnapX-KI2NQ44XbrrF4tEQmn5J6clUI,1980
18
18
  ara_cli/codehierachieretriever.py,sha256=Xd3EgEWWhkSf1TmTWtf8X5_YvyE_4B66nRrqarwSiTU,1182
@@ -24,14 +24,14 @@ ara_cli/filename_validator.py,sha256=Aw9PL8d5-Ymhp3EY6lDrUBk3cudaNqo1Uw5RzPpI1jA
24
24
  ara_cli/list_filter.py,sha256=qKGwwQsrWe7L5FbdxEbBYD1bbbi8c-RMypjXqXvLbgs,5291
25
25
  ara_cli/output_suppressor.py,sha256=nwiHaQLwabOjMoJOeUESBnZszGMxrQZfJ3N2OvahX7Y,389
26
26
  ara_cli/prompt_chat.py,sha256=kd_OINDQFit6jN04bb7mzgY259JBbRaTaNp9F-webkc,1346
27
- ara_cli/prompt_extractor.py,sha256=6xLGd4ZJHDKkamEUQcdRbKM3ilBtxBjp0X2o8wrvHb0,7732
28
- ara_cli/prompt_handler.py,sha256=5FoVCNmmzrS4hjHL4qKteQt2A5MIycoZStkJrVL5l_4,20136
27
+ ara_cli/prompt_extractor.py,sha256=Sk1aQkvn8W_YNcGhfChsbT19wUAWEJmOHTr3mveyQww,7754
28
+ ara_cli/prompt_handler.py,sha256=pCQ2LMYawGIMlWKI5EN6QdLn0UNEk-Eckkz4SVVHR10,21122
29
29
  ara_cli/prompt_rag.py,sha256=ydlhe4CUqz0jdzlY7jBbpKaf_5fjMrAZKnriKea3ZAg,7485
30
30
  ara_cli/run_file_lister.py,sha256=XbrrDTJXp1LFGx9Lv91SNsEHZPP-PyEMBF_P4btjbDA,2360
31
31
  ara_cli/tag_extractor.py,sha256=TGdaQOVnjy25R0zDsAifB67C5oom0Fwo24s0_fr5A_I,3151
32
32
  ara_cli/template_manager.py,sha256=YwrN6AYPpl6ZrW8BVQpVXx8yTRf-oNpJUIKeg4NAggs,6606
33
33
  ara_cli/update_config_prompt.py,sha256=Oy9vNTw6UhDohyTEfSKkqE5ifEMPlmWNYkKHgUrK_pY,4607
34
- ara_cli/version.py,sha256=NaGz6YoHedIfREfmsLD-XITUWBN2mNZImEJSC6EQf9g,146
34
+ ara_cli/version.py,sha256=NFSXE1c5XdqCXt75PBLQ12kGF9XoLW2MiAQbfCkmfto,146
35
35
  ara_cli/artefact_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  ara_cli/artefact_models/artefact_load.py,sha256=IXzWxP-Q_j_oDGMno0m-OuXCQ7Vd5c_NctshGr4ROBw,621
37
37
  ara_cli/artefact_models/artefact_mapping.py,sha256=8aD0spBjkJ8toMAmFawc6UTUxB6-tEEViZXv2I-r88Q,1874
@@ -149,12 +149,12 @@ tests/test_file_classifier.py,sha256=kLWPiePu3F5mkVuI_lK_2QlLh2kXD_Mt2K8KZZ1fAnA
149
149
  tests/test_file_creator.py,sha256=D3G7MbgE0m8JmZihxnTryxLco6iZdbV--2CGc0L20FM,2109
150
150
  tests/test_file_lister.py,sha256=Q9HwhKKx540EPzTmfzOCnvtAgON0aMmpJE2eOe1J3EA,4324
151
151
  tests/test_list_filter.py,sha256=fJA3d_SdaOAUkE7jn68MOVS0THXGghy1fye_64Zvo1U,7964
152
- tests/test_prompt_handler.py,sha256=GJbKeipXAwKs-IpHlzaFBxB9_G3FlHwTpCGqfBNfSy8,13338
152
+ tests/test_prompt_handler.py,sha256=Ysxq2e6JFfNpj3bTRFNNThzpDNkAGDphwVf2Ysz2EK0,14980
153
153
  tests/test_tag_extractor.py,sha256=nSiAYlTKZ7TLAOtcJpwK5zTWHhFYU0tI5xKnivLc1dU,2712
154
154
  tests/test_template_manager.py,sha256=q-LMHRG4rHkD6ON6YW4cpZxUx9hul6Or8wVVRC2kb-8,4099
155
155
  tests/test_update_config_prompt.py,sha256=xsqj1WTn4BsG5Q2t-sNPfu7EoMURFcS-hfb5VSXUnJc,6765
156
- ara_cli-0.1.9.89.dist-info/METADATA,sha256=f0NXWsKYiDAZ1GCsswX9Y2bPbvztjI4LSOFx7zmatio,6739
157
- ara_cli-0.1.9.89.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
158
- ara_cli-0.1.9.89.dist-info/entry_points.txt,sha256=v4h7MzysTgSIDYfEo3oj4Kz_8lzsRa3hq-KJHEcLVX8,45
159
- ara_cli-0.1.9.89.dist-info/top_level.txt,sha256=WM4cLHT5DYUaWzLtRj-gu3yVNFpGQ6lLRI3FMmC-38I,14
160
- ara_cli-0.1.9.89.dist-info/RECORD,,
156
+ ara_cli-0.1.9.92.dist-info/METADATA,sha256=9X9Ol5Xh8ApM3-XaY1O_FbfNNCjqdLErIR-y5zxQklw,6739
157
+ ara_cli-0.1.9.92.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
158
+ ara_cli-0.1.9.92.dist-info/entry_points.txt,sha256=v4h7MzysTgSIDYfEo3oj4Kz_8lzsRa3hq-KJHEcLVX8,45
159
+ ara_cli-0.1.9.92.dist-info/top_level.txt,sha256=WM4cLHT5DYUaWzLtRj-gu3yVNFpGQ6lLRI3FMmC-38I,14
160
+ ara_cli-0.1.9.92.dist-info/RECORD,,
@@ -6,7 +6,7 @@ from unittest.mock import patch, MagicMock, mock_open, call
6
6
  from pathlib import Path
7
7
 
8
8
  from ara_cli import prompt_handler
9
- from ara_cli.ara_config import ARAconfig, LLMConfigItem
9
+ from ara_cli.ara_config import ARAconfig, LLMConfigItem, ConfigManager
10
10
 
11
11
  @pytest.fixture
12
12
  def mock_config():
@@ -19,26 +19,30 @@ def mock_config():
19
19
  custom_prompt_templates_subdir="custom-prompt-modules",
20
20
  ara_prompt_given_list_includes=["*.py"],
21
21
  llm_config={
22
- "gpt-4o": LLMConfigItem(provider="openai", model="openai/gpt-4o", temperature=0.8, max_tokens=1024, max_completion_tokens= None),
23
- "o3-mini": LLMConfigItem(provider="openai", model="openai/o3-mini", temperature=0.9, max_tokens=2048, max_completion_tokens= None),
22
+ "gpt-4o": LLMConfigItem(provider="openai", model="openai/gpt-4o", temperature=0.8, max_tokens=1024),
23
+ "o3-mini": LLMConfigItem(provider="openai", model="openai/o3-mini", temperature=0.9, max_tokens=2048),
24
24
  },
25
- default_llm="gpt-4o"
25
+ default_llm="gpt-4o",
26
+ extraction_llm="o3-mini"
26
27
  )
27
28
  return config
28
29
 
29
30
  @pytest.fixture
30
31
  def mock_config_manager(mock_config):
31
32
  """Patches ConfigManager to ensure it always returns the mock_config."""
32
- with patch('ara_cli.ara_config.ConfigManager.get_config') as mock_get_config:
33
+ with patch.object(ConfigManager, 'get_config') as mock_get_config:
33
34
  mock_get_config.return_value = mock_config
34
35
  yield mock_get_config
35
36
 
36
37
  @pytest.fixture(autouse=True)
37
38
  def reset_singleton():
38
- """Resets the LLMSingleton before each test for isolation."""
39
+ """Resets the LLMSingleton and ConfigManager before each test for isolation."""
39
40
  prompt_handler.LLMSingleton._instance = None
40
41
  prompt_handler.LLMSingleton._model = None
42
+ ConfigManager.reset()
41
43
  yield
44
+ ConfigManager.reset()
45
+
42
46
 
43
47
  class TestLLMSingleton:
44
48
  """Tests the behavior of the LLMSingleton class."""
@@ -100,14 +104,19 @@ class TestFileIO:
100
104
 
101
105
  class TestCoreLogic:
102
106
  """Tests functions related to the main business logic."""
107
+
108
+ @pytest.fixture(autouse=True)
109
+ def setup_test_env(self, tmp_path):
110
+ """Changes CWD to a temporary directory for test isolation."""
111
+ original_cwd = os.getcwd()
112
+ os.chdir(tmp_path)
113
+ yield
114
+ os.chdir(original_cwd)
115
+
103
116
 
104
117
  @patch('ara_cli.prompt_handler.litellm.completion')
105
- @patch('ara_cli.prompt_handler.LLMSingleton.get_instance')
106
- def test_send_prompt(self, mock_get_instance, mock_completion, mock_config):
107
- mock_llm_instance = MagicMock()
108
- mock_llm_instance.config_parameters = mock_config.llm_config['gpt-4o'].model_dump()
109
- mock_get_instance.return_value = mock_llm_instance
110
-
118
+ def test_send_prompt(self, mock_completion, mock_config, mock_config_manager):
119
+ """Tests that send_prompt uses the default LLM by default."""
111
120
  mock_chunk = MagicMock()
112
121
  mock_chunk.choices[0].delta.content = "test chunk"
113
122
  mock_completion.return_value = [mock_chunk]
@@ -116,9 +125,8 @@ class TestCoreLogic:
116
125
 
117
126
  result = list(prompt_handler.send_prompt(prompt))
118
127
 
119
- # Create expected parameters to match the actual implementation
120
- # The actual send_prompt function copies config_parameters and only removes 'provider'
121
- expected_params = mock_config.llm_config['gpt-4o'].model_dump()
128
+ # Check that the parameters for the default model ('gpt-4o') were used
129
+ expected_params = mock_config.llm_config['gpt-4o'].model_dump(exclude_none=True)
122
130
  if 'provider' in expected_params:
123
131
  del expected_params['provider']
124
132
 
@@ -130,6 +138,29 @@ class TestCoreLogic:
130
138
  assert len(result) == 1
131
139
  assert result[0].choices[0].delta.content == "test chunk"
132
140
 
141
+ @patch('ara_cli.prompt_handler.litellm.completion')
142
+ def test_send_prompt_uses_extraction_llm(self, mock_completion, mock_config, mock_config_manager):
143
+ """Tests that send_prompt uses the extraction LLM when specified."""
144
+ mock_chunk = MagicMock()
145
+ mock_chunk.choices[0].delta.content = "extraction chunk"
146
+ mock_completion.return_value = [mock_chunk]
147
+ prompt = [{"role": "user", "content": "Extract this"}]
148
+
149
+ # Call with the 'extraction' purpose
150
+ result = list(prompt_handler.send_prompt(prompt, purpose='extraction'))
151
+
152
+ # Check that the parameters for the extraction model ('o3-mini') were used
153
+ expected_params = mock_config.llm_config['o3-mini'].model_dump(exclude_none=True)
154
+ if 'provider' in expected_params:
155
+ del expected_params['provider']
156
+
157
+ mock_completion.assert_called_once_with(
158
+ messages=prompt,
159
+ stream=True,
160
+ **expected_params
161
+ )
162
+ assert result[0].choices[0].delta.content == "extraction chunk"
163
+
133
164
  @patch('ara_cli.prompt_handler.send_prompt')
134
165
  def test_describe_image(self, mock_send_prompt, tmp_path):
135
166
  fake_image_path = tmp_path / "test.png"
@@ -155,11 +186,14 @@ class TestCoreLogic:
155
186
 
156
187
  @patch('ara_cli.prompt_handler.Classifier.get_sub_directory', return_value="test_classifier")
157
188
  def test_append_headings(self, mock_get_sub, tmp_path):
158
- os.chdir(tmp_path)
189
+ # The autouse fixture already handles chdir, so we just use tmp_path for paths
159
190
  os.makedirs("ara/test_classifier/my_param.data", exist_ok=True)
160
191
 
161
192
  log_file = tmp_path / "ara/test_classifier/my_param.data/test_classifier.prompt_log.md"
162
193
 
194
+ # Create file first to avoid FileNotFoundError
195
+ log_file.touch()
196
+
163
197
  prompt_handler.append_headings("test_classifier", "my_param", "PROMPT")
164
198
  assert "## PROMPT_1" in log_file.read_text()
165
199
 
@@ -281,12 +315,15 @@ class TestArtefactAndTemplateHandling:
281
315
  @patch('ara_cli.prompt_handler.send_prompt')
282
316
  @patch('ara_cli.prompt_handler.collect_file_content_by_extension')
283
317
  @patch('ara_cli.prompt_handler.append_images_to_message')
284
- def test_create_and_send_custom_prompt(self, mock_append_images, mock_collect, mock_send):
318
+ def test_create_and_send_custom_prompt(self, mock_append_images, mock_collect, mock_send, mock_config_manager):
285
319
  prompt_data_path = Path(prompt_handler.prompt_data_directory_creation(self.mock_classifier, self.mock_param))
286
320
 
287
321
  mock_collect.return_value = ("### GIVENS\ncontent", [{"type": "image_url", "image_url": {}}])
288
322
 
289
- final_message_list = [{"role": "user", "content": [{"type": "text", "text": "### GIVENS\ncontent"}, {"type": "image_url", "image_url": {}}]}]
323
+ # The initial message list before appending images
324
+ initial_message_list = [{'role': 'user', 'content': '### GIVENS\ncontent'}]
325
+ # The final list after images are appended
326
+ final_message_list = [{'role': 'user', 'content': ['### GIVENS\ncontent', {'type': 'image_url', 'image_url': {}}]}]
290
327
  mock_append_images.return_value = final_message_list
291
328
 
292
329
  mock_send.return_value = iter([MagicMock(choices=[MagicMock(delta=MagicMock(content="llm response"))])])
@@ -294,7 +331,8 @@ class TestArtefactAndTemplateHandling:
294
331
  prompt_handler.create_and_send_custom_prompt(self.mock_classifier, self.mock_param)
295
332
 
296
333
  mock_collect.assert_called_once()
297
- mock_append_images.assert_called_once()
334
+ # The append function is called with the message list containing the text and the image data list
335
+ mock_append_images.assert_called_once_with([{'role': 'user', 'content': '### GIVENS\ncontent'}], [{'type': 'image_url', 'image_url': {}}])
298
336
  mock_send.assert_called_once_with(final_message_list)
299
337
 
300
338
  artefact_root = self.root / "ara" / self.mock_classifier