ara-cli 0.1.9.87__py3-none-any.whl → 0.1.9.91__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ara_cli/ara_config.py CHANGED
@@ -14,6 +14,7 @@ class LLMConfigItem(BaseModel):
14
14
  model: str
15
15
  temperature: float = Field(ge=0.0, le=1.0)
16
16
  max_tokens: Optional[int] = None
17
+ max_completion_tokens: Optional[int] = None
17
18
 
18
19
  class ARAconfig(BaseModel):
19
20
  ext_code_dirs: List[Dict[str, str]] = Field(default_factory=lambda: [
@@ -42,50 +43,62 @@ class ARAconfig(BaseModel):
42
43
  "*.jpeg",
43
44
  ])
44
45
  llm_config: Dict[str, LLMConfigItem] = Field(default_factory=lambda: {
46
+ "gpt-5": LLMConfigItem(
47
+ provider="openai",
48
+ model="openai/gpt-5",
49
+ temperature=1,
50
+ max_completion_tokens=16000
51
+ ),
52
+ "gpt-5-mini": LLMConfigItem(
53
+ provider="openai",
54
+ model="openai/gpt-5-mini-2025-08-07",
55
+ temperature=1
56
+ ),
45
57
  "gpt-4o": LLMConfigItem(
46
58
  provider="openai",
47
59
  model="openai/gpt-4o",
48
60
  temperature=0.8,
49
- max_tokens=16384
61
+ max_tokens=16000
50
62
  ),
51
63
  "gpt-4.1": LLMConfigItem(
52
64
  provider="openai",
53
65
  model="openai/gpt-4.1",
54
66
  temperature=0.8,
55
- max_tokens=1024
67
+ max_tokens=16000
56
68
  ),
57
69
  "o3-mini": LLMConfigItem(
58
70
  provider="openai",
59
71
  model="openai/o3-mini",
60
72
  temperature=1.0,
61
- max_tokens=1024
73
+ max_tokens=8000
62
74
  ),
63
75
  "opus-4": LLMConfigItem(
64
76
  provider="anthropic",
65
77
  model="anthropic/claude-opus-4-20250514",
66
- temperature=0.8,
78
+ temperature=0.5,
67
79
  max_tokens=32000
68
80
  ),
69
81
  "sonnet-4": LLMConfigItem(
70
82
  provider="anthropic",
71
83
  model="anthropic/claude-sonnet-4-20250514",
72
- temperature=0.8,
73
- max_tokens=1024
84
+ temperature=0.5,
85
+ max_tokens=32000
74
86
  ),
75
87
  "together-ai-llama-2": LLMConfigItem(
76
88
  provider="together_ai",
77
89
  model="together_ai/togethercomputer/llama-2-70b",
78
90
  temperature=0.8,
79
- max_tokens=1024
91
+ max_tokens=4000
80
92
  ),
81
93
  "groq-llama-3": LLMConfigItem(
82
94
  provider="groq",
83
95
  model="groq/llama3-70b-8192",
84
96
  temperature=0.8,
85
- max_tokens=1024
97
+ max_tokens=4000
86
98
  )
87
99
  })
88
- default_llm: Optional[str] = "gpt-4o"
100
+ default_llm: Optional[str] = "gpt-5"
101
+ extraction_llm: Optional[str] = "gpt-5-mini"
89
102
 
90
103
  @model_validator(mode='after')
91
104
  def check_critical_fields(self) -> 'ARAconfig':
ara_cli/chat.py CHANGED
@@ -433,7 +433,7 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
433
433
  break
434
434
 
435
435
  is_file_document = any(file_name_lower.endswith(ext)
436
- for ext in document_type_extensions)
436
+ for ext in document_type_extensions)
437
437
 
438
438
  if is_file_document:
439
439
  return self.load_document_file(
@@ -532,7 +532,7 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
532
532
  return [x for x in glob.glob(text + '*')]
533
533
 
534
534
  def _retrieve_ara_config(self):
535
- from ara_cli.prompt_handler import ConfigManager
535
+ from ara_cli.ara_config import ConfigManager
536
536
  return ConfigManager().get_config()
537
537
 
538
538
  def _retrieve_llm_config(self):
@@ -625,7 +625,7 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
625
625
  @cmd2.with_category(CATEGORY_LLM_CONTROL)
626
626
  def do_CHOOSE_MODEL(self, model_name):
627
627
  from ara_cli.prompt_handler import LLMSingleton
628
- from ara_cli.ara_config import DEFAULT_CONFIG_LOCATION, save_data
628
+ from ara_cli.ara_config import DEFAULT_CONFIG_LOCATION, save_data, ConfigManager
629
629
  from ara_cli.directory_navigator import DirectoryNavigator
630
630
 
631
631
  original_dir = os.getcwd()
@@ -634,7 +634,11 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
634
634
  os.chdir('..')
635
635
 
636
636
  if not self._verify_llm_choice(model_name):
637
+ os.chdir(original_dir)
637
638
  return
639
+
640
+ ConfigManager.reset()
641
+ self.config = ConfigManager.get_config()
638
642
  self.config.default_llm = model_name
639
643
  save_data(filepath=DEFAULT_CONFIG_LOCATION, config=self.config)
640
644
 
@@ -642,11 +646,39 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
642
646
 
643
647
  os.chdir(original_dir)
644
648
 
649
+ @cmd2.with_category(CATEGORY_LLM_CONTROL)
650
+ def do_CHOOSE_EXTRACTION_MODEL(self, model_name):
651
+ """Choose the language model for extraction tasks."""
652
+ from ara_cli.ara_config import DEFAULT_CONFIG_LOCATION, save_data, ConfigManager
653
+ from ara_cli.directory_navigator import DirectoryNavigator
654
+
655
+ original_dir = os.getcwd()
656
+ navigator = DirectoryNavigator()
657
+ navigator.navigate_to_target()
658
+ os.chdir('..')
659
+
660
+ if not self._verify_llm_choice(model_name):
661
+ os.chdir(original_dir)
662
+ return
663
+
664
+ ConfigManager.reset()
665
+ self.config = ConfigManager.get_config()
666
+ self.config.extraction_llm = model_name
667
+ save_data(filepath=DEFAULT_CONFIG_LOCATION, config=self.config)
668
+ print(f"Extraction model switched to '{model_name}'")
669
+
670
+ os.chdir(original_dir)
671
+
645
672
  @cmd2.with_category(CATEGORY_LLM_CONTROL)
646
673
  def do_CURRENT_MODEL(self, _):
647
- from ara_cli.prompt_handler import LLMSingleton
674
+ """Displays the current default and extraction language models."""
675
+ from ara_cli.ara_config import ConfigManager
648
676
 
649
- print(LLMSingleton.get_model())
677
+ ConfigManager.reset()
678
+ config = self._retrieve_ara_config()
679
+
680
+ print(f"Current Reasoning LLM (default_llm): {config.default_llm}")
681
+ print(f"Current Extraction LLM (extraction_llm): {config.extraction_llm}")
650
682
 
651
683
  def _complete_llms(self, text, line, begidx, endidx):
652
684
  llm_config = self._retrieve_llm_config()
@@ -662,6 +694,9 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
662
694
  def complete_CHOOSE_MODEL(self, text, line, begidx, endidx):
663
695
  return self._complete_llms(text, line, begidx, endidx)
664
696
 
697
+ def complete_CHOOSE_EXTRACTION_MODEL(self, text, line, begidx, endidx):
698
+ return self._complete_llms(text, line, begidx, endidx)
699
+
665
700
  @cmd2.with_category(CATEGORY_CHAT_CONTROL)
666
701
  def do_NEW(self, chat_name):
667
702
  """Create a new chat. Optionally provide a chat name in-line: NEW new_chat"""
@@ -859,3 +894,95 @@ Start chatting (type 'HELP'/'h' for available commands, 'QUIT'/'q' to exit chat
859
894
  completions = [classifier for classifier in classifiers if classifier.startswith(text)]
860
895
 
861
896
  return completions
897
+
898
+
899
+ def _get_plural_template_type(self, template_type: str) -> str:
900
+ """Determines the plural form of a template type."""
901
+ plurals = {"commands": "commands", "rules": "rules"}
902
+ return plurals.get(template_type, f"{template_type}s")
903
+
904
+ def _find_project_root(self) -> str | None:
905
+ """
906
+ Finds the project root by searching for an 'ara' directory,
907
+ starting from the chat file's directory and moving upwards.
908
+ """
909
+ current_dir = os.path.dirname(self.chat_name)
910
+ while True:
911
+ if os.path.isdir(os.path.join(current_dir, 'ara')):
912
+ return current_dir
913
+ parent_dir = os.path.dirname(current_dir)
914
+ if parent_dir == current_dir: # Reached the filesystem root
915
+ return None
916
+ current_dir = parent_dir
917
+
918
+ def _gather_templates_from_path(self, search_path: str, templates_set: set, prefix: str = ""):
919
+ """
920
+ Scans a given path for items and adds them to the provided set,
921
+ optionally prepending a prefix.
922
+ """
923
+ import glob
924
+ if not os.path.isdir(search_path):
925
+ return
926
+ for path in glob.glob(os.path.join(search_path, '*')):
927
+ templates_set.add(f"{prefix}{os.path.basename(path)}")
928
+
929
+ def _get_available_templates(self, template_type: str) -> list[str]:
930
+ """
931
+ Scans for available global and project-local custom templates.
932
+ This method safely searches for template files without changing the
933
+ current directory, making it safe for use in autocompleters.
934
+
935
+ Args:
936
+ template_type: The type of template to search for (e.g., 'rules').
937
+
938
+ Returns:
939
+ A sorted list of unique template names. Global templates are
940
+ prefixed with 'global/'.
941
+ """
942
+ from ara_cli.template_manager import TemplatePathManager
943
+
944
+ plural_type = self._get_plural_template_type(template_type)
945
+ templates = set()
946
+
947
+ # 1. Find Global Templates
948
+ try:
949
+ global_base_path = TemplatePathManager.get_template_base_path()
950
+ global_template_dir = os.path.join(global_base_path, "prompt-modules", plural_type)
951
+ self._gather_templates_from_path(global_template_dir, templates, prefix="global/")
952
+ except Exception:
953
+ pass # Silently ignore if global templates are not found
954
+
955
+ # 2. Find Local Custom Templates
956
+ try:
957
+ project_root = self._find_project_root()
958
+ if project_root:
959
+ local_templates_base = os.path.join(project_root, self.config.local_prompt_templates_dir)
960
+ custom_dir = os.path.join(local_templates_base, self.config.custom_prompt_templates_subdir, plural_type)
961
+ self._gather_templates_from_path(custom_dir, templates)
962
+ except Exception:
963
+ pass # Silently ignore if local templates cannot be resolved
964
+
965
+ return sorted(list(templates))
966
+
967
+ def _template_completer(self, text: str, template_type: str) -> list[str]:
968
+ """Generic completer for different template types."""
969
+ available_templates = self._get_available_templates(template_type)
970
+ if not text:
971
+ return available_templates
972
+ return [t for t in available_templates if t.startswith(text)]
973
+
974
+ def complete_LOAD_RULES(self, text, line, begidx, endidx):
975
+ """Completer for the LOAD_RULES command."""
976
+ return self._template_completer(text, "rules")
977
+
978
+ def complete_LOAD_INTENTION(self, text, line, begidx, endidx):
979
+ """Completer for the LOAD_INTENTION command."""
980
+ return self._template_completer(text, "intention")
981
+
982
+ def complete_LOAD_COMMANDS(self, text, line, begidx, endidx):
983
+ """Completer for the LOAD_COMMANDS command."""
984
+ return self._template_completer(text, "commands")
985
+
986
+ def complete_LOAD_BLUEPRINT(self, text, line, begidx, endidx):
987
+ """Completer for the LOAD_BLUEPRINT command."""
988
+ return self._template_completer(text, "blueprint")
@@ -192,7 +192,7 @@ def handle_existing_file(filename, block_content, skip_query=False):
192
192
  messages = [{"role": "user", "content": prompt_text}]
193
193
  response = ""
194
194
 
195
- for chunk in send_prompt(messages):
195
+ for chunk in send_prompt(messages, purpose='extraction'):
196
196
  content = chunk.choices[0].delta.content
197
197
  if content:
198
198
  response += content
ara_cli/prompt_handler.py CHANGED
@@ -71,29 +71,54 @@ def read_string_from_file(path):
71
71
  text = file.read()
72
72
  return text
73
73
 
74
+ def _get_model_config(purpose: str) -> dict:
75
+ """
76
+ Resolves model ID by purpose and returns its prepared configuration for LiteLLM.
77
+ """
78
+ config = ConfigManager().get_config()
79
+
80
+ # Determine which model to use based on the purpose
81
+ model_id = config.default_llm
82
+ if purpose == 'extraction':
83
+ # Use extraction_llm if it exists, otherwise fall back to default_llm
84
+ model_id = getattr(config, 'extraction_llm', config.default_llm)
85
+
86
+ selected_config = config.llm_config.get(str(model_id))
87
+
88
+ if not selected_config:
89
+ raise ValueError(f"No configuration found for model '{model_id}' used for '{purpose}'")
90
+
91
+ config_parameters = selected_config.model_dump(exclude_none=True)
92
+ config_parameters.pop("provider", None)
93
+
94
+ return config_parameters
95
+
96
+
97
+ def _is_valid_message(message: dict) -> bool:
98
+ """
99
+ Checks if a message in a prompt is valid (i.e., not empty).
100
+ It handles both string content and list content (for multimodal inputs).
101
+ """
102
+ content = message.get('content')
103
+
104
+ if isinstance(content, str):
105
+ return content.strip() != ''
106
+
107
+ if isinstance(content, list):
108
+ # For multimodal content, check if there's at least one non-empty text part.
109
+ return any(
110
+ item.get('type') == 'text' and item.get('text', '').strip() != ''
111
+ for item in content
112
+ )
113
+
114
+ return False
115
+
116
+
117
+ def send_prompt(prompt, purpose='default'):
118
+ """Prepares and sends a prompt to the LLM, streaming the response."""
119
+ config_parameters = _get_model_config(purpose)
74
120
 
75
- def send_prompt(prompt):
76
- chat = LLMSingleton.get_instance()
77
-
78
- config_parameters = chat.config_parameters.copy()
79
- if "provider" in config_parameters:
80
- del config_parameters["provider"]
81
-
82
- filtered_prompt = list(filter(
83
- lambda msg: (
84
- msg.get('content') and
85
- isinstance(msg['content'], list) and
86
- any(
87
- item.get('type') == 'text' and
88
- item.get('text', '').strip() != ''
89
- for item in msg['content']
90
- )
91
- ) or (
92
- isinstance(msg.get('content'), str) and
93
- msg['content'].strip() != ''
94
- ),
95
- prompt
96
- ))
121
+ filtered_prompt = [msg for msg in prompt if _is_valid_message(msg)]
97
122
 
98
123
  completion = litellm.completion(
99
124
  **config_parameters,
ara_cli/version.py CHANGED
@@ -1,2 +1,2 @@
1
1
  # version.py
2
- __version__ = "0.1.9.87" # fith parameter like .0 for local install test purposes only. official numbers should be 4 digit numbers
2
+ __version__ = "0.1.9.91" # fith parameter like .0 for local install test purposes only. official numbers should be 4 digit numbers
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ara_cli
3
- Version: 0.1.9.87
3
+ Version: 0.1.9.91
4
4
  Summary: Powerful, open source command-line tool for managing, structuring and automating software development artifacts in line with Business-Driven Development (BDD) and AI-assisted processes
5
5
  Description-Content-Type: text/markdown
6
6
  Requires-Dist: litellm
@@ -2,7 +2,7 @@ ara_cli/__init__.py,sha256=0zl7IegxTid26EBGLav_fXZ4CCIV3H5TfAoFQiOHjvg,148
2
2
  ara_cli/__main__.py,sha256=J5DCDLRZ6UcpYwM1-NkjaLo4PTetcSj2dB4HrrftkUw,2064
3
3
  ara_cli/ara_command_action.py,sha256=_LHE2V5hbJxN7ccYiptuPktRfbTnXmQEt_D_FxDBlBY,22456
4
4
  ara_cli/ara_command_parser.py,sha256=I-e9W-QwTIMKMzlHycSlCWCyBFQfiFYvGre1XsDbrFI,20573
5
- ara_cli/ara_config.py,sha256=KVITofnYlIVyhf50qwUO5fu8vlxjDwRjPyKzqEhEC_M,6982
5
+ ara_cli/ara_config.py,sha256=B_o6llPUNVOluOt1d-ioE7KZMa9wEuxONh2G9JyTYLQ,7410
6
6
  ara_cli/artefact_autofix.py,sha256=WVTiIR-jo4YKmmz4eS3qTFvl45W1YKwAk1XSuz9QX10,20015
7
7
  ara_cli/artefact_creator.py,sha256=0Ory6cB-Ahkw-BDNb8QHnTbp_OHGABdkb9bhwcEdcIc,6063
8
8
  ara_cli/artefact_deleter.py,sha256=Co4wwCH3yW8H9NrOq7_2p5571EeHr0TsfE-H8KqoOfY,1900
@@ -12,7 +12,7 @@ ara_cli/artefact_lister.py,sha256=jhk4n4eqp7hDIq07q43QzS7-36BM3OfZ4EABxCeOGcw,47
12
12
  ara_cli/artefact_reader.py,sha256=Pho0_Eqm7kD9CNbVMhKb6mkNM0I3iJiCJXbXmVp1DJU,7827
13
13
  ara_cli/artefact_renamer.py,sha256=Hnz_3zD9xxnBa1FHyUE6mIktLk_9ttP2rFRvQIkmz-o,4061
14
14
  ara_cli/artefact_scan.py,sha256=msPCm-vPWOAZ_e_z5GylXxq1MtNlmJ4zvKrsdOFCWF4,4813
15
- ara_cli/chat.py,sha256=qQINBi5VCzlZOcQqDqUJY0p6VlAPiAWwWICSe7fvcDQ,32540
15
+ ara_cli/chat.py,sha256=Y_LSYvpntnoRikYSOI95rWPImA-Rr_xppNIhN-cjjUY,37966
16
16
  ara_cli/classifier.py,sha256=zWskj7rBYdqYBGjksBm46iTgVU5IIf2PZsJr4qeiwVU,1878
17
17
  ara_cli/codefusionretriever.py,sha256=fCHgXdIBRzkVAnapX-KI2NQ44XbrrF4tEQmn5J6clUI,1980
18
18
  ara_cli/codehierachieretriever.py,sha256=Xd3EgEWWhkSf1TmTWtf8X5_YvyE_4B66nRrqarwSiTU,1182
@@ -24,14 +24,14 @@ ara_cli/filename_validator.py,sha256=Aw9PL8d5-Ymhp3EY6lDrUBk3cudaNqo1Uw5RzPpI1jA
24
24
  ara_cli/list_filter.py,sha256=qKGwwQsrWe7L5FbdxEbBYD1bbbi8c-RMypjXqXvLbgs,5291
25
25
  ara_cli/output_suppressor.py,sha256=nwiHaQLwabOjMoJOeUESBnZszGMxrQZfJ3N2OvahX7Y,389
26
26
  ara_cli/prompt_chat.py,sha256=kd_OINDQFit6jN04bb7mzgY259JBbRaTaNp9F-webkc,1346
27
- ara_cli/prompt_extractor.py,sha256=6xLGd4ZJHDKkamEUQcdRbKM3ilBtxBjp0X2o8wrvHb0,7732
28
- ara_cli/prompt_handler.py,sha256=5FoVCNmmzrS4hjHL4qKteQt2A5MIycoZStkJrVL5l_4,20136
27
+ ara_cli/prompt_extractor.py,sha256=Sk1aQkvn8W_YNcGhfChsbT19wUAWEJmOHTr3mveyQww,7754
28
+ ara_cli/prompt_handler.py,sha256=pCQ2LMYawGIMlWKI5EN6QdLn0UNEk-Eckkz4SVVHR10,21122
29
29
  ara_cli/prompt_rag.py,sha256=ydlhe4CUqz0jdzlY7jBbpKaf_5fjMrAZKnriKea3ZAg,7485
30
30
  ara_cli/run_file_lister.py,sha256=XbrrDTJXp1LFGx9Lv91SNsEHZPP-PyEMBF_P4btjbDA,2360
31
31
  ara_cli/tag_extractor.py,sha256=TGdaQOVnjy25R0zDsAifB67C5oom0Fwo24s0_fr5A_I,3151
32
32
  ara_cli/template_manager.py,sha256=YwrN6AYPpl6ZrW8BVQpVXx8yTRf-oNpJUIKeg4NAggs,6606
33
33
  ara_cli/update_config_prompt.py,sha256=Oy9vNTw6UhDohyTEfSKkqE5ifEMPlmWNYkKHgUrK_pY,4607
34
- ara_cli/version.py,sha256=27waJFP_fluwJJNMwbe3_NlHJaeI6oPvAK2CQIhI63w,146
34
+ ara_cli/version.py,sha256=B3OzxM9cfU7TOWcR4tcsGLFojByOhZd98cMMsL45DYc,146
35
35
  ara_cli/artefact_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  ara_cli/artefact_models/artefact_load.py,sha256=IXzWxP-Q_j_oDGMno0m-OuXCQ7Vd5c_NctshGr4ROBw,621
37
37
  ara_cli/artefact_models/artefact_mapping.py,sha256=8aD0spBjkJ8toMAmFawc6UTUxB6-tEEViZXv2I-r88Q,1874
@@ -134,7 +134,7 @@ ara_cli/templates/specification_breakdown_files/template.technology.exploration.
134
134
  ara_cli/templates/specification_breakdown_files/template.technology.md,sha256=bySiksz-8xtq0Nnj4svqe2MgUftWrVkbK9AcrDUE3KY,952
135
135
  tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
136
136
  tests/test_ara_command_action.py,sha256=JTLqXM9BSMlU33OQgrk_sZnoowFJZKZAx8q-st-wa34,25821
137
- tests/test_ara_config.py,sha256=gYqBB4Z7lB0PTSZrJRL7ekC0t9HZ_Rq3JF9EnOFmN5U,14280
137
+ tests/test_ara_config.py,sha256=H5GwDbab0GMSa6IbHdruzmbsHy5Ia0xX0uteJdfZ9Rg,14277
138
138
  tests/test_artefact_autofix.py,sha256=pApZ-N0dW8Ujt-cNLbgvd4bhiIIK8oXb-saLf6QlA-8,25022
139
139
  tests/test_artefact_fuzzy_search.py,sha256=5Sh3_l9QK8-WHn6JpGPU1b6h4QEnl2JoMq1Tdp2cj1U,1261
140
140
  tests/test_artefact_link_updater.py,sha256=biqbEp2jCOz8giv72hu2P2hDfeJfJ9OrVGdAv5d9cK4,2191
@@ -149,12 +149,12 @@ tests/test_file_classifier.py,sha256=kLWPiePu3F5mkVuI_lK_2QlLh2kXD_Mt2K8KZZ1fAnA
149
149
  tests/test_file_creator.py,sha256=D3G7MbgE0m8JmZihxnTryxLco6iZdbV--2CGc0L20FM,2109
150
150
  tests/test_file_lister.py,sha256=Q9HwhKKx540EPzTmfzOCnvtAgON0aMmpJE2eOe1J3EA,4324
151
151
  tests/test_list_filter.py,sha256=fJA3d_SdaOAUkE7jn68MOVS0THXGghy1fye_64Zvo1U,7964
152
- tests/test_prompt_handler.py,sha256=7V_AwXd2co1krnx5RKZRK-hqXS50nq77mX-Yx_QO0w0,13084
152
+ tests/test_prompt_handler.py,sha256=Ysxq2e6JFfNpj3bTRFNNThzpDNkAGDphwVf2Ysz2EK0,14980
153
153
  tests/test_tag_extractor.py,sha256=nSiAYlTKZ7TLAOtcJpwK5zTWHhFYU0tI5xKnivLc1dU,2712
154
154
  tests/test_template_manager.py,sha256=q-LMHRG4rHkD6ON6YW4cpZxUx9hul6Or8wVVRC2kb-8,4099
155
155
  tests/test_update_config_prompt.py,sha256=xsqj1WTn4BsG5Q2t-sNPfu7EoMURFcS-hfb5VSXUnJc,6765
156
- ara_cli-0.1.9.87.dist-info/METADATA,sha256=WKN4uhNXDHMDwwlxkCW5KbBV_2cbeixRkQYsLZNBdLY,6739
157
- ara_cli-0.1.9.87.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
158
- ara_cli-0.1.9.87.dist-info/entry_points.txt,sha256=v4h7MzysTgSIDYfEo3oj4Kz_8lzsRa3hq-KJHEcLVX8,45
159
- ara_cli-0.1.9.87.dist-info/top_level.txt,sha256=WM4cLHT5DYUaWzLtRj-gu3yVNFpGQ6lLRI3FMmC-38I,14
160
- ara_cli-0.1.9.87.dist-info/RECORD,,
156
+ ara_cli-0.1.9.91.dist-info/METADATA,sha256=V9fNcWjRmFt46pPhw1qNZKejVfobM82LT0s2kXmEaM4,6739
157
+ ara_cli-0.1.9.91.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
158
+ ara_cli-0.1.9.91.dist-info/entry_points.txt,sha256=v4h7MzysTgSIDYfEo3oj4Kz_8lzsRa3hq-KJHEcLVX8,45
159
+ ara_cli-0.1.9.91.dist-info/top_level.txt,sha256=WM4cLHT5DYUaWzLtRj-gu3yVNFpGQ6lLRI3FMmC-38I,14
160
+ ara_cli-0.1.9.91.dist-info/RECORD,,
tests/test_ara_config.py CHANGED
@@ -97,8 +97,8 @@ class TestARAconfig:
97
97
  config = ARAconfig()
98
98
  assert config.ext_code_dirs == [{"source_dir": "./src"}, {"source_dir": "./tests"}]
99
99
  assert config.glossary_dir == "./glossary"
100
- assert config.default_llm == "gpt-4o"
101
- assert "gpt-4o" in config.llm_config
100
+ assert config.default_llm == "gpt-5"
101
+ assert "gpt-5" in config.llm_config
102
102
 
103
103
  @patch('sys.stdout', new_callable=StringIO)
104
104
  def test_check_critical_fields_with_empty_list_reverts_to_default(self, mock_stdout):
@@ -228,7 +228,7 @@ class TestReadData:
228
228
  result = read_data("config.json")
229
229
 
230
230
  assert isinstance(result, ARAconfig)
231
- assert result.default_llm == "gpt-4o" # Should be the default config
231
+ assert result.default_llm == "gpt-5" # Should be the default config
232
232
 
233
233
  output = mock_stdout.getvalue()
234
234
  assert "Error: Invalid JSON in configuration file" in output
@@ -6,7 +6,7 @@ from unittest.mock import patch, MagicMock, mock_open, call
6
6
  from pathlib import Path
7
7
 
8
8
  from ara_cli import prompt_handler
9
- from ara_cli.ara_config import ARAconfig, LLMConfigItem
9
+ from ara_cli.ara_config import ARAconfig, LLMConfigItem, ConfigManager
10
10
 
11
11
  @pytest.fixture
12
12
  def mock_config():
@@ -22,23 +22,27 @@ def mock_config():
22
22
  "gpt-4o": LLMConfigItem(provider="openai", model="openai/gpt-4o", temperature=0.8, max_tokens=1024),
23
23
  "o3-mini": LLMConfigItem(provider="openai", model="openai/o3-mini", temperature=0.9, max_tokens=2048),
24
24
  },
25
- default_llm="gpt-4o"
25
+ default_llm="gpt-4o",
26
+ extraction_llm="o3-mini"
26
27
  )
27
28
  return config
28
29
 
29
30
  @pytest.fixture
30
31
  def mock_config_manager(mock_config):
31
32
  """Patches ConfigManager to ensure it always returns the mock_config."""
32
- with patch('ara_cli.ara_config.ConfigManager.get_config') as mock_get_config:
33
+ with patch.object(ConfigManager, 'get_config') as mock_get_config:
33
34
  mock_get_config.return_value = mock_config
34
35
  yield mock_get_config
35
36
 
36
37
  @pytest.fixture(autouse=True)
37
38
  def reset_singleton():
38
- """Resets the LLMSingleton before each test for isolation."""
39
+ """Resets the LLMSingleton and ConfigManager before each test for isolation."""
39
40
  prompt_handler.LLMSingleton._instance = None
40
41
  prompt_handler.LLMSingleton._model = None
42
+ ConfigManager.reset()
41
43
  yield
44
+ ConfigManager.reset()
45
+
42
46
 
43
47
  class TestLLMSingleton:
44
48
  """Tests the behavior of the LLMSingleton class."""
@@ -100,14 +104,19 @@ class TestFileIO:
100
104
 
101
105
  class TestCoreLogic:
102
106
  """Tests functions related to the main business logic."""
107
+
108
+ @pytest.fixture(autouse=True)
109
+ def setup_test_env(self, tmp_path):
110
+ """Changes CWD to a temporary directory for test isolation."""
111
+ original_cwd = os.getcwd()
112
+ os.chdir(tmp_path)
113
+ yield
114
+ os.chdir(original_cwd)
115
+
103
116
 
104
117
  @patch('ara_cli.prompt_handler.litellm.completion')
105
- @patch('ara_cli.prompt_handler.LLMSingleton.get_instance')
106
- def test_send_prompt(self, mock_get_instance, mock_completion, mock_config):
107
- mock_llm_instance = MagicMock()
108
- mock_llm_instance.config_parameters = mock_config.llm_config['gpt-4o'].model_dump()
109
- mock_get_instance.return_value = mock_llm_instance
110
-
118
+ def test_send_prompt(self, mock_completion, mock_config, mock_config_manager):
119
+ """Tests that send_prompt uses the default LLM by default."""
111
120
  mock_chunk = MagicMock()
112
121
  mock_chunk.choices[0].delta.content = "test chunk"
113
122
  mock_completion.return_value = [mock_chunk]
@@ -116,8 +125,10 @@ class TestCoreLogic:
116
125
 
117
126
  result = list(prompt_handler.send_prompt(prompt))
118
127
 
128
+ # Check that the parameters for the default model ('gpt-4o') were used
119
129
  expected_params = mock_config.llm_config['gpt-4o'].model_dump(exclude_none=True)
120
- del expected_params['provider']
130
+ if 'provider' in expected_params:
131
+ del expected_params['provider']
121
132
 
122
133
  mock_completion.assert_called_once_with(
123
134
  messages=prompt,
@@ -127,6 +138,29 @@ class TestCoreLogic:
127
138
  assert len(result) == 1
128
139
  assert result[0].choices[0].delta.content == "test chunk"
129
140
 
141
+ @patch('ara_cli.prompt_handler.litellm.completion')
142
+ def test_send_prompt_uses_extraction_llm(self, mock_completion, mock_config, mock_config_manager):
143
+ """Tests that send_prompt uses the extraction LLM when specified."""
144
+ mock_chunk = MagicMock()
145
+ mock_chunk.choices[0].delta.content = "extraction chunk"
146
+ mock_completion.return_value = [mock_chunk]
147
+ prompt = [{"role": "user", "content": "Extract this"}]
148
+
149
+ # Call with the 'extraction' purpose
150
+ result = list(prompt_handler.send_prompt(prompt, purpose='extraction'))
151
+
152
+ # Check that the parameters for the extraction model ('o3-mini') were used
153
+ expected_params = mock_config.llm_config['o3-mini'].model_dump(exclude_none=True)
154
+ if 'provider' in expected_params:
155
+ del expected_params['provider']
156
+
157
+ mock_completion.assert_called_once_with(
158
+ messages=prompt,
159
+ stream=True,
160
+ **expected_params
161
+ )
162
+ assert result[0].choices[0].delta.content == "extraction chunk"
163
+
130
164
  @patch('ara_cli.prompt_handler.send_prompt')
131
165
  def test_describe_image(self, mock_send_prompt, tmp_path):
132
166
  fake_image_path = tmp_path / "test.png"
@@ -152,11 +186,14 @@ class TestCoreLogic:
152
186
 
153
187
  @patch('ara_cli.prompt_handler.Classifier.get_sub_directory', return_value="test_classifier")
154
188
  def test_append_headings(self, mock_get_sub, tmp_path):
155
- os.chdir(tmp_path)
189
+ # The autouse fixture already handles chdir, so we just use tmp_path for paths
156
190
  os.makedirs("ara/test_classifier/my_param.data", exist_ok=True)
157
191
 
158
192
  log_file = tmp_path / "ara/test_classifier/my_param.data/test_classifier.prompt_log.md"
159
193
 
194
+ # Create file first to avoid FileNotFoundError
195
+ log_file.touch()
196
+
160
197
  prompt_handler.append_headings("test_classifier", "my_param", "PROMPT")
161
198
  assert "## PROMPT_1" in log_file.read_text()
162
199
 
@@ -278,12 +315,15 @@ class TestArtefactAndTemplateHandling:
278
315
  @patch('ara_cli.prompt_handler.send_prompt')
279
316
  @patch('ara_cli.prompt_handler.collect_file_content_by_extension')
280
317
  @patch('ara_cli.prompt_handler.append_images_to_message')
281
- def test_create_and_send_custom_prompt(self, mock_append_images, mock_collect, mock_send):
318
+ def test_create_and_send_custom_prompt(self, mock_append_images, mock_collect, mock_send, mock_config_manager):
282
319
  prompt_data_path = Path(prompt_handler.prompt_data_directory_creation(self.mock_classifier, self.mock_param))
283
320
 
284
321
  mock_collect.return_value = ("### GIVENS\ncontent", [{"type": "image_url", "image_url": {}}])
285
322
 
286
- final_message_list = [{"role": "user", "content": [{"type": "text", "text": "### GIVENS\ncontent"}, {"type": "image_url", "image_url": {}}]}]
323
+ # The initial message list before appending images
324
+ initial_message_list = [{'role': 'user', 'content': '### GIVENS\ncontent'}]
325
+ # The final list after images are appended
326
+ final_message_list = [{'role': 'user', 'content': ['### GIVENS\ncontent', {'type': 'image_url', 'image_url': {}}]}]
287
327
  mock_append_images.return_value = final_message_list
288
328
 
289
329
  mock_send.return_value = iter([MagicMock(choices=[MagicMock(delta=MagicMock(content="llm response"))])])
@@ -291,7 +331,8 @@ class TestArtefactAndTemplateHandling:
291
331
  prompt_handler.create_and_send_custom_prompt(self.mock_classifier, self.mock_param)
292
332
 
293
333
  mock_collect.assert_called_once()
294
- mock_append_images.assert_called_once()
334
+ # The append function is called with the message list containing the text and the image data list
335
+ mock_append_images.assert_called_once_with([{'role': 'user', 'content': '### GIVENS\ncontent'}], [{'type': 'image_url', 'image_url': {}}])
295
336
  mock_send.assert_called_once_with(final_message_list)
296
337
 
297
338
  artefact_root = self.root / "ara" / self.mock_classifier