ara-cli 0.1.10.5__py3-none-any.whl → 0.1.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ara_cli/__init__.py +51 -6
- ara_cli/__main__.py +87 -75
- ara_cli/ara_command_action.py +189 -101
- ara_cli/ara_config.py +187 -128
- ara_cli/ara_subcommands/common.py +2 -2
- ara_cli/ara_subcommands/config.py +221 -0
- ara_cli/ara_subcommands/convert.py +107 -0
- ara_cli/ara_subcommands/fetch.py +41 -0
- ara_cli/ara_subcommands/fetch_agents.py +22 -0
- ara_cli/ara_subcommands/fetch_scripts.py +19 -0
- ara_cli/ara_subcommands/fetch_templates.py +15 -10
- ara_cli/ara_subcommands/list.py +97 -23
- ara_cli/ara_subcommands/prompt.py +266 -106
- ara_cli/artefact_autofix.py +117 -64
- ara_cli/artefact_converter.py +355 -0
- ara_cli/artefact_creator.py +41 -17
- ara_cli/artefact_lister.py +3 -3
- ara_cli/artefact_models/artefact_model.py +1 -1
- ara_cli/artefact_models/artefact_templates.py +0 -9
- ara_cli/artefact_models/feature_artefact_model.py +8 -8
- ara_cli/artefact_reader.py +62 -43
- ara_cli/artefact_scan.py +39 -17
- ara_cli/chat.py +300 -71
- ara_cli/chat_agent/__init__.py +0 -0
- ara_cli/chat_agent/agent_process_manager.py +155 -0
- ara_cli/chat_script_runner/__init__.py +0 -0
- ara_cli/chat_script_runner/script_completer.py +23 -0
- ara_cli/chat_script_runner/script_finder.py +41 -0
- ara_cli/chat_script_runner/script_lister.py +36 -0
- ara_cli/chat_script_runner/script_runner.py +36 -0
- ara_cli/chat_web_search/__init__.py +0 -0
- ara_cli/chat_web_search/web_search.py +263 -0
- ara_cli/children_contribution_updater.py +737 -0
- ara_cli/classifier.py +34 -0
- ara_cli/commands/agent_run_command.py +98 -0
- ara_cli/commands/fetch_agents_command.py +106 -0
- ara_cli/commands/fetch_scripts_command.py +43 -0
- ara_cli/commands/fetch_templates_command.py +39 -0
- ara_cli/commands/fetch_templates_commands.py +39 -0
- ara_cli/commands/list_agents_command.py +39 -0
- ara_cli/commands/load_command.py +4 -3
- ara_cli/commands/load_image_command.py +1 -1
- ara_cli/commands/read_command.py +23 -27
- ara_cli/completers.py +95 -35
- ara_cli/constants.py +2 -0
- ara_cli/directory_navigator.py +37 -4
- ara_cli/error_handler.py +26 -11
- ara_cli/file_loaders/document_reader.py +0 -178
- ara_cli/file_loaders/factories/__init__.py +0 -0
- ara_cli/file_loaders/factories/document_reader_factory.py +32 -0
- ara_cli/file_loaders/factories/file_loader_factory.py +27 -0
- ara_cli/file_loaders/file_loader.py +1 -30
- ara_cli/file_loaders/loaders/__init__.py +0 -0
- ara_cli/file_loaders/{document_file_loader.py → loaders/document_file_loader.py} +1 -1
- ara_cli/file_loaders/loaders/text_file_loader.py +47 -0
- ara_cli/file_loaders/readers/__init__.py +0 -0
- ara_cli/file_loaders/readers/docx_reader.py +49 -0
- ara_cli/file_loaders/readers/excel_reader.py +27 -0
- ara_cli/file_loaders/{markdown_reader.py → readers/markdown_reader.py} +1 -1
- ara_cli/file_loaders/readers/odt_reader.py +59 -0
- ara_cli/file_loaders/readers/pdf_reader.py +54 -0
- ara_cli/file_loaders/readers/pptx_reader.py +104 -0
- ara_cli/file_loaders/tools/__init__.py +0 -0
- ara_cli/llm_utils.py +58 -0
- ara_cli/output_suppressor.py +53 -0
- ara_cli/prompt_chat.py +20 -4
- ara_cli/prompt_extractor.py +47 -32
- ara_cli/prompt_handler.py +123 -17
- ara_cli/tag_extractor.py +8 -7
- ara_cli/template_loader.py +2 -1
- ara_cli/template_manager.py +52 -21
- ara_cli/templates/global-scripts/hello_global.py +1 -0
- ara_cli/templates/prompt-modules/commands/add_scenarios_for_new_behaviour.feature_creation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/align_feature_with_implementation_changes.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/analyze_codebase_and_plan_tasks.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/choose_best_parent_artefact.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/create_tasks_from_artefact_content.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/create_tests_for_uncovered_modules.test_generation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/derive_features_from_video_description.feature_creation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/describe_agent_capabilities.agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/empty.commands.md +2 -12
- ara_cli/templates/prompt-modules/commands/execute_scoped_todos_in_task.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/explain_single_file_purpose.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/extract_file_information_bullets.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/extract_general.commands.md +12 -0
- ara_cli/templates/prompt-modules/commands/extract_markdown.commands.md +11 -0
- ara_cli/templates/prompt-modules/commands/extract_python.commands.md +13 -0
- ara_cli/templates/prompt-modules/commands/feature_add_or_modifiy_specified_behavior.commands.md +36 -0
- ara_cli/templates/prompt-modules/commands/feature_generate_initial_specified_bevahior.commands.md +53 -0
- ara_cli/templates/prompt-modules/commands/fix_failing_behave_step_definitions.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/fix_failing_pytest_tests.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/general_instruction_policy.commands.md +47 -0
- ara_cli/templates/prompt-modules/commands/generate_and_fix_pytest_tests.test_generation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/prompt_template_tech_stack_transformer.commands.md +95 -0
- ara_cli/templates/prompt-modules/commands/python_bug_fixing_code.commands.md +34 -0
- ara_cli/templates/prompt-modules/commands/python_generate_code.commands.md +27 -0
- ara_cli/templates/prompt-modules/commands/python_refactoring_code.commands.md +39 -0
- ara_cli/templates/prompt-modules/commands/python_step_definitions_generation_and_fixing.commands.md +40 -0
- ara_cli/templates/prompt-modules/commands/python_unittest_generation_and_fixing.commands.md +48 -0
- ara_cli/templates/prompt-modules/commands/suggest_next_story_child_tasks.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/summarize_or_transcribe_media.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/update_feature_to_match_implementation.feature_creation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/update_user_story_with_requirements.interview_agent.commands.md +1 -0
- ara_cli/version.py +1 -1
- {ara_cli-0.1.10.5.dist-info → ara_cli-0.1.14.0.dist-info}/METADATA +49 -11
- ara_cli-0.1.14.0.dist-info/RECORD +253 -0
- {ara_cli-0.1.10.5.dist-info → ara_cli-0.1.14.0.dist-info}/WHEEL +1 -1
- tests/test_ara_command_action.py +31 -19
- tests/test_ara_config.py +177 -90
- tests/test_artefact_autofix.py +170 -97
- tests/test_artefact_autofix_integration.py +495 -0
- tests/test_artefact_converter.py +312 -0
- tests/test_artefact_extraction.py +564 -0
- tests/test_artefact_lister.py +11 -8
- tests/test_chat.py +166 -130
- tests/test_chat_givens_images.py +603 -0
- tests/test_chat_script_runner.py +454 -0
- tests/test_children_contribution_updater.py +98 -0
- tests/test_document_loader_office.py +267 -0
- tests/test_llm_utils.py +164 -0
- tests/test_prompt_chat.py +343 -0
- tests/test_prompt_extractor.py +683 -0
- tests/test_prompt_handler.py +416 -214
- tests/test_setup_default_chat_prompt_mode.py +198 -0
- tests/test_tag_extractor.py +95 -49
- tests/test_web_search.py +467 -0
- ara_cli/file_loaders/document_readers.py +0 -233
- ara_cli/file_loaders/file_loaders.py +0 -123
- ara_cli/file_loaders/text_file_loader.py +0 -187
- ara_cli/templates/prompt-modules/blueprints/complete_pytest_unittest.blueprint.md +0 -27
- ara_cli/templates/prompt-modules/blueprints/pytest_unittest_prompt.blueprint.md +0 -32
- ara_cli/templates/prompt-modules/blueprints/task_todo_list_implement_feature_BDD_way.blueprint.md +0 -30
- ara_cli/templates/prompt-modules/commands/artefact_classification.commands.md +0 -9
- ara_cli/templates/prompt-modules/commands/artefact_extension.commands.md +0 -17
- ara_cli/templates/prompt-modules/commands/artefact_formulation.commands.md +0 -14
- ara_cli/templates/prompt-modules/commands/behave_step_generation.commands.md +0 -102
- ara_cli/templates/prompt-modules/commands/code_generation_complex.commands.md +0 -20
- ara_cli/templates/prompt-modules/commands/code_generation_simple.commands.md +0 -13
- ara_cli/templates/prompt-modules/commands/error_fixing.commands.md +0 -20
- ara_cli/templates/prompt-modules/commands/feature_file_update.commands.md +0 -18
- ara_cli/templates/prompt-modules/commands/feature_formulation.commands.md +0 -43
- ara_cli/templates/prompt-modules/commands/js_code_generation_simple.commands.md +0 -13
- ara_cli/templates/prompt-modules/commands/refactoring.commands.md +0 -15
- ara_cli/templates/prompt-modules/commands/refactoring_analysis.commands.md +0 -9
- ara_cli/templates/prompt-modules/commands/reverse_engineer_feature_file.commands.md +0 -15
- ara_cli/templates/prompt-modules/commands/reverse_engineer_program_flow.commands.md +0 -19
- ara_cli-0.1.10.5.dist-info/RECORD +0 -194
- /ara_cli/file_loaders/{binary_file_loader.py → loaders/binary_file_loader.py} +0 -0
- /ara_cli/file_loaders/{image_processor.py → tools/image_processor.py} +0 -0
- {ara_cli-0.1.10.5.dist-info → ara_cli-0.1.14.0.dist-info}/entry_points.txt +0 -0
- {ara_cli-0.1.10.5.dist-info → ara_cli-0.1.14.0.dist-info}/top_level.txt +0 -0
ara_cli/ara_config.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
from typing import List, Dict, Optional
|
|
1
|
+
from typing import List, Dict, Optional
|
|
2
2
|
from pydantic import BaseModel, ValidationError, Field, model_validator
|
|
3
3
|
import json
|
|
4
4
|
import os
|
|
5
|
-
from os.path import exists, dirname
|
|
6
5
|
from os import makedirs
|
|
6
|
+
from os.path import exists, dirname
|
|
7
7
|
from functools import lru_cache
|
|
8
8
|
import sys
|
|
9
9
|
import warnings
|
|
@@ -14,21 +14,92 @@ DEFAULT_CONFIG_LOCATION = "./ara/.araconfig/ara_config.json"
|
|
|
14
14
|
class LLMConfigItem(BaseModel):
|
|
15
15
|
provider: str
|
|
16
16
|
model: str
|
|
17
|
-
temperature: float = Field(ge=0.0, le=
|
|
17
|
+
temperature: Optional[float] = Field(ge=0.0, le=2.0)
|
|
18
18
|
max_tokens: Optional[int] = None
|
|
19
19
|
max_completion_tokens: Optional[int] = None
|
|
20
20
|
|
|
21
21
|
|
|
22
|
+
def get_default_llm_config() -> Dict[str, "LLMConfigItem"]:
|
|
23
|
+
"""Returns the default LLM configuration."""
|
|
24
|
+
return {
|
|
25
|
+
"gpt-5.2": LLMConfigItem(
|
|
26
|
+
provider="openai",
|
|
27
|
+
model="openai/gpt-5.2",
|
|
28
|
+
temperature=1,
|
|
29
|
+
max_completion_tokens=16000,
|
|
30
|
+
),
|
|
31
|
+
"gpt-5-mini": LLMConfigItem(
|
|
32
|
+
provider="openai", model="openai/gpt-5-mini", temperature=1
|
|
33
|
+
),
|
|
34
|
+
"gpt-5-web": LLMConfigItem(
|
|
35
|
+
provider="openai",
|
|
36
|
+
model="openai/gpt-5-search-api",
|
|
37
|
+
temperature=None,
|
|
38
|
+
max_completion_tokens=16000,
|
|
39
|
+
),
|
|
40
|
+
"gpt-4o": LLMConfigItem(
|
|
41
|
+
provider="openai",
|
|
42
|
+
model="openai/gpt-4o",
|
|
43
|
+
temperature=0.8,
|
|
44
|
+
max_tokens=16000,
|
|
45
|
+
),
|
|
46
|
+
"gpt-4o-search-preview": LLMConfigItem(
|
|
47
|
+
provider="openai",
|
|
48
|
+
model="openai/gpt-4o-search-preview",
|
|
49
|
+
temperature=None,
|
|
50
|
+
max_tokens=None,
|
|
51
|
+
max_completion_tokens=None,
|
|
52
|
+
),
|
|
53
|
+
"opus-4.5-advanced": LLMConfigItem(
|
|
54
|
+
provider="anthropic",
|
|
55
|
+
model="anthropic/claude-opus-4-5-20251101",
|
|
56
|
+
temperature=0.5,
|
|
57
|
+
max_tokens=32000,
|
|
58
|
+
),
|
|
59
|
+
"opus-4.1-exceptional": LLMConfigItem(
|
|
60
|
+
provider="anthropic",
|
|
61
|
+
model="anthropic/claude-opus-4-1-20250805",
|
|
62
|
+
temperature=0.5,
|
|
63
|
+
max_tokens=32000,
|
|
64
|
+
),
|
|
65
|
+
"sonnet-4.5-coding": LLMConfigItem(
|
|
66
|
+
provider="anthropic",
|
|
67
|
+
model="anthropic/claude-sonnet-4-5-20250929",
|
|
68
|
+
temperature=0.5,
|
|
69
|
+
max_tokens=32000,
|
|
70
|
+
),
|
|
71
|
+
"haiku-4-5": LLMConfigItem(
|
|
72
|
+
provider="anthropic",
|
|
73
|
+
model="anthropic/claude-haiku-4-5-20251001",
|
|
74
|
+
temperature=0.8,
|
|
75
|
+
max_tokens=32000,
|
|
76
|
+
),
|
|
77
|
+
"together-ai-llama-2": LLMConfigItem(
|
|
78
|
+
provider="together_ai",
|
|
79
|
+
model="together_ai/togethercomputer/llama-2-70b",
|
|
80
|
+
temperature=0.8,
|
|
81
|
+
max_tokens=4000,
|
|
82
|
+
),
|
|
83
|
+
"groq-llama-3": LLMConfigItem(
|
|
84
|
+
provider="groq",
|
|
85
|
+
model="groq/llama3-70b-8192",
|
|
86
|
+
temperature=0.8,
|
|
87
|
+
max_tokens=4000,
|
|
88
|
+
),
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
|
|
22
92
|
class ARAconfig(BaseModel):
|
|
23
|
-
ext_code_dirs: List[Dict[str, str]] = Field(
|
|
24
|
-
{"source_dir": "./src"},
|
|
25
|
-
|
|
26
|
-
])
|
|
93
|
+
ext_code_dirs: List[Dict[str, str]] = Field(
|
|
94
|
+
default_factory=lambda: [{"source_dir": "./src"}, {"source_dir": "./tests"}]
|
|
95
|
+
)
|
|
27
96
|
global_dirs: Optional[List[Dict[str, str]]] = Field(default=[])
|
|
28
97
|
glossary_dir: str = "./glossary"
|
|
29
98
|
doc_dir: str = "./docs"
|
|
30
99
|
local_prompt_templates_dir: str = "./ara/.araconfig"
|
|
31
100
|
custom_prompt_templates_subdir: Optional[str] = "custom-prompt-modules"
|
|
101
|
+
local_scripts_dir: str = "./ara/.araconfig"
|
|
102
|
+
custom_scripts_subdir: Optional[str] = "custom-scripts"
|
|
32
103
|
local_ara_templates_dir: str = "./ara/.araconfig/templates/"
|
|
33
104
|
ara_prompt_given_list_includes: List[str] = Field(
|
|
34
105
|
default_factory=lambda: [
|
|
@@ -48,63 +119,26 @@ class ARAconfig(BaseModel):
|
|
|
48
119
|
"*.jpeg",
|
|
49
120
|
]
|
|
50
121
|
)
|
|
51
|
-
llm_config
|
|
52
|
-
|
|
53
|
-
"gpt-5": LLMConfigItem(
|
|
54
|
-
provider="openai",
|
|
55
|
-
model="openai/gpt-5",
|
|
56
|
-
temperature=1,
|
|
57
|
-
max_completion_tokens=16000,
|
|
58
|
-
),
|
|
59
|
-
"gpt-5-mini": LLMConfigItem(
|
|
60
|
-
provider="openai", model="openai/gpt-5-mini-2025-08-07", temperature=1
|
|
61
|
-
),
|
|
62
|
-
"gpt-4o": LLMConfigItem(
|
|
63
|
-
provider="openai",
|
|
64
|
-
model="openai/gpt-4o",
|
|
65
|
-
temperature=0.8,
|
|
66
|
-
max_tokens=16000,
|
|
67
|
-
),
|
|
68
|
-
"gpt-4.1": LLMConfigItem(
|
|
69
|
-
provider="openai",
|
|
70
|
-
model="openai/gpt-4.1",
|
|
71
|
-
temperature=0.8,
|
|
72
|
-
max_tokens=16000,
|
|
73
|
-
),
|
|
74
|
-
"o3-mini": LLMConfigItem(
|
|
75
|
-
provider="openai",
|
|
76
|
-
model="openai/o3-mini",
|
|
77
|
-
temperature=1.0,
|
|
78
|
-
max_tokens=8000,
|
|
79
|
-
),
|
|
80
|
-
"opus-4": LLMConfigItem(
|
|
81
|
-
provider="anthropic",
|
|
82
|
-
model="anthropic/claude-opus-4-20250514",
|
|
83
|
-
temperature=0.5,
|
|
84
|
-
max_tokens=32000,
|
|
85
|
-
),
|
|
86
|
-
"sonnet-4": LLMConfigItem(
|
|
87
|
-
provider="anthropic",
|
|
88
|
-
model="anthropic/claude-sonnet-4-20250514",
|
|
89
|
-
temperature=0.5,
|
|
90
|
-
max_tokens=32000,
|
|
91
|
-
),
|
|
92
|
-
"together-ai-llama-2": LLMConfigItem(
|
|
93
|
-
provider="together_ai",
|
|
94
|
-
model="together_ai/togethercomputer/llama-2-70b",
|
|
95
|
-
temperature=0.8,
|
|
96
|
-
max_tokens=4000,
|
|
97
|
-
),
|
|
98
|
-
"groq-llama-3": LLMConfigItem(
|
|
99
|
-
provider="groq",
|
|
100
|
-
model="groq/llama3-70b-8192",
|
|
101
|
-
temperature=0.8,
|
|
102
|
-
max_tokens=4000,
|
|
103
|
-
),
|
|
104
|
-
}
|
|
105
|
-
)
|
|
122
|
+
# llm_config defaults to the standard set of models
|
|
123
|
+
llm_config: Dict[str, LLMConfigItem] = Field(default_factory=get_default_llm_config)
|
|
106
124
|
default_llm: Optional[str] = None
|
|
107
125
|
extraction_llm: Optional[str] = None
|
|
126
|
+
conversion_llm: Optional[str] = None
|
|
127
|
+
|
|
128
|
+
def _validate_llm_field(
|
|
129
|
+
self, field: str, fallback: str, missing_msg: str, invalid_msg: str
|
|
130
|
+
):
|
|
131
|
+
"""Helper to validate and set fallback for LLM fields."""
|
|
132
|
+
value = getattr(self, field)
|
|
133
|
+
if not value:
|
|
134
|
+
print(f"Warning: '{field}' is not set. {missing_msg}: '{fallback}'.")
|
|
135
|
+
setattr(self, field, fallback)
|
|
136
|
+
elif value not in self.llm_config:
|
|
137
|
+
print(
|
|
138
|
+
f"Warning: The configured '{field}' ('{value}') does not exist in 'llm_config'."
|
|
139
|
+
)
|
|
140
|
+
print(f"-> Reverting to {invalid_msg}: '{fallback}'.")
|
|
141
|
+
setattr(self, field, fallback)
|
|
108
142
|
|
|
109
143
|
@model_validator(mode="after")
|
|
110
144
|
def check_critical_fields(self) -> "ARAconfig":
|
|
@@ -113,12 +147,12 @@ class ARAconfig(BaseModel):
|
|
|
113
147
|
"ext_code_dirs": [{"source_dir": "./src"}, {"source_dir": "./tests"}],
|
|
114
148
|
"local_ara_templates_dir": "./ara/.araconfig/templates/",
|
|
115
149
|
"local_prompt_templates_dir": "./ara/.araconfig",
|
|
150
|
+
"local_scripts_dir": "./ara/.araconfig",
|
|
116
151
|
"glossary_dir": "./glossary",
|
|
117
152
|
}
|
|
118
153
|
|
|
119
154
|
for field, default_value in critical_fields.items():
|
|
120
|
-
|
|
121
|
-
if not current_value:
|
|
155
|
+
if not getattr(self, field):
|
|
122
156
|
print(
|
|
123
157
|
f"Warning: Value for '{field}' is missing or empty. Using default."
|
|
124
158
|
)
|
|
@@ -132,33 +166,25 @@ class ARAconfig(BaseModel):
|
|
|
132
166
|
self.extraction_llm = None
|
|
133
167
|
return self
|
|
134
168
|
|
|
135
|
-
|
|
169
|
+
first_available = next(iter(self.llm_config))
|
|
170
|
+
self._validate_llm_field(
|
|
171
|
+
"default_llm",
|
|
172
|
+
first_available,
|
|
173
|
+
"Defaulting to the first available model",
|
|
174
|
+
"the first available model",
|
|
175
|
+
)
|
|
136
176
|
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
self.default_llm = first_available_llm
|
|
142
|
-
elif self.default_llm not in self.llm_config:
|
|
143
|
-
print(
|
|
144
|
-
f"Warning: The configured 'default_llm' ('{self.default_llm}') does not exist in 'llm_config'."
|
|
145
|
-
)
|
|
146
|
-
print(
|
|
147
|
-
f"-> Reverting to the first available model: '{first_available_llm}'."
|
|
148
|
-
)
|
|
149
|
-
self.default_llm = first_available_llm
|
|
177
|
+
# Now used as fallback for others
|
|
178
|
+
fallback_val = self.default_llm
|
|
179
|
+
fallback_missing_msg = "Setting it to the same as 'default_llm'"
|
|
180
|
+
fallback_invalid_msg = "the 'default_llm' value"
|
|
150
181
|
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
print(
|
|
158
|
-
f"Warning: The configured 'extraction_llm' ('{self.extraction_llm}') does not exist in 'llm_config'."
|
|
159
|
-
)
|
|
160
|
-
print(f"-> Reverting to the 'default_llm' value: '{self.default_llm}'.")
|
|
161
|
-
self.extraction_llm = self.default_llm
|
|
182
|
+
self._validate_llm_field(
|
|
183
|
+
"extraction_llm", fallback_val, fallback_missing_msg, fallback_invalid_msg
|
|
184
|
+
)
|
|
185
|
+
self._validate_llm_field(
|
|
186
|
+
"conversion_llm", fallback_val, fallback_missing_msg, fallback_invalid_msg
|
|
187
|
+
)
|
|
162
188
|
|
|
163
189
|
return self
|
|
164
190
|
|
|
@@ -186,71 +212,101 @@ def handle_unrecognized_keys(data: dict) -> dict:
|
|
|
186
212
|
|
|
187
213
|
|
|
188
214
|
# Function to read the JSON file and return an ARAconfig model
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def _create_default_config(filepath: str) -> ARAconfig:
|
|
218
|
+
"""Create and save a default configuration."""
|
|
219
|
+
print(f"Configuration file not found. Creating a default one at '{filepath}'.")
|
|
220
|
+
default_config = ARAconfig(llm_config=get_default_llm_config())
|
|
221
|
+
save_data(filepath, default_config)
|
|
222
|
+
print("Please review the default configuration and re-run your command.")
|
|
223
|
+
sys.exit(0)
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def _load_json_config(filepath: str) -> dict:
|
|
227
|
+
"""Load and parse JSON configuration file."""
|
|
196
228
|
|
|
197
229
|
def warn_on_duplicate_llm_dict_key(ordered_pairs):
|
|
198
230
|
"""Reject duplicate keys."""
|
|
199
231
|
d = {}
|
|
200
232
|
for k, v in ordered_pairs:
|
|
201
233
|
if k in d:
|
|
202
|
-
warnings.warn(
|
|
234
|
+
warnings.warn(
|
|
235
|
+
f"Duplicate LLM configuration identifier '{k}'. The previous entry will be removed.",
|
|
236
|
+
UserWarning,
|
|
237
|
+
)
|
|
203
238
|
d[k] = v
|
|
204
239
|
return d
|
|
205
240
|
|
|
241
|
+
with open(filepath, "r", encoding="utf-8") as file:
|
|
242
|
+
content = file.read()
|
|
243
|
+
return json.loads(content, object_pairs_hook=warn_on_duplicate_llm_dict_key)
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def _correct_validation_errors(data: dict, errors: list) -> ARAconfig:
|
|
247
|
+
"""Correct validation errors and return a valid config."""
|
|
248
|
+
print("--- Configuration Error Detected ---")
|
|
249
|
+
print(
|
|
250
|
+
"Some settings in your configuration file are invalid. Attempting to fix them."
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
corrected_data = data.copy()
|
|
254
|
+
defaults = ARAconfig(llm_config=get_default_llm_config()).model_dump()
|
|
255
|
+
error_fields = {err["loc"][0] for err in errors if err["loc"]}
|
|
256
|
+
|
|
257
|
+
for field_name in error_fields:
|
|
258
|
+
print(
|
|
259
|
+
f"-> Field '{field_name}' is invalid and will be reverted to its default value."
|
|
260
|
+
)
|
|
261
|
+
if field_name in corrected_data:
|
|
262
|
+
corrected_data[field_name] = defaults.get(field_name)
|
|
263
|
+
|
|
264
|
+
print("--- End of Error Report ---")
|
|
265
|
+
return ARAconfig(**corrected_data)
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
@lru_cache(maxsize=1)
|
|
269
|
+
def read_data(filepath: str) -> ARAconfig:
|
|
270
|
+
"""
|
|
271
|
+
Reads, validates, and repairs the configuration file.
|
|
272
|
+
If the file doesn't exist, it creates a default one.
|
|
273
|
+
If the file is invalid, it corrects only the broken parts.
|
|
274
|
+
"""
|
|
206
275
|
ensure_directory_exists(dirname(filepath))
|
|
207
276
|
|
|
208
277
|
if not exists(filepath):
|
|
209
|
-
|
|
210
|
-
default_config = ARAconfig()
|
|
211
|
-
save_data(filepath, default_config)
|
|
212
|
-
print("Please review the default configuration and re-run your command.")
|
|
213
|
-
sys.exit(0)
|
|
278
|
+
return _create_default_config(filepath)
|
|
214
279
|
|
|
215
280
|
try:
|
|
216
|
-
|
|
217
|
-
content = file.read()
|
|
218
|
-
data = json.loads(content, object_pairs_hook=warn_on_duplicate_llm_dict_key)
|
|
281
|
+
data = _load_json_config(filepath)
|
|
219
282
|
except json.JSONDecodeError as e:
|
|
220
283
|
print(f"Error: Invalid JSON in configuration file: {e}")
|
|
221
284
|
print("Creating a new configuration with defaults...")
|
|
222
|
-
default_config = ARAconfig()
|
|
285
|
+
default_config = ARAconfig(llm_config=get_default_llm_config())
|
|
223
286
|
save_data(filepath, default_config)
|
|
224
287
|
return default_config
|
|
225
288
|
|
|
226
289
|
data = handle_unrecognized_keys(data)
|
|
227
290
|
|
|
291
|
+
needs_save = False
|
|
292
|
+
if "llm_config" not in data or not data.get("llm_config"):
|
|
293
|
+
print(
|
|
294
|
+
"Info: 'llm_config' is missing or empty. Populating with default LLM configurations."
|
|
295
|
+
)
|
|
296
|
+
data["llm_config"] = {
|
|
297
|
+
k: v.model_dump() for k, v in get_default_llm_config().items()
|
|
298
|
+
}
|
|
299
|
+
needs_save = True
|
|
300
|
+
|
|
228
301
|
try:
|
|
229
302
|
config = ARAconfig(**data)
|
|
230
|
-
|
|
303
|
+
if needs_save:
|
|
304
|
+
save_data(filepath, config)
|
|
231
305
|
return config
|
|
232
306
|
except ValidationError as e:
|
|
233
|
-
|
|
234
|
-
print(
|
|
235
|
-
"Some settings in your configuration file are invalid. Attempting to fix them."
|
|
236
|
-
)
|
|
237
|
-
|
|
238
|
-
corrected_data = data.copy()
|
|
239
|
-
defaults = ARAconfig().model_dump()
|
|
240
|
-
|
|
241
|
-
error_fields = {err["loc"][0] for err in e.errors() if err["loc"]}
|
|
242
|
-
|
|
243
|
-
for field_name in error_fields:
|
|
244
|
-
print(f"-> Field '{field_name}' is invalid and will be reverted to its default value.")
|
|
245
|
-
if field_name in corrected_data:
|
|
246
|
-
corrected_data[field_name] = defaults.get(field_name)
|
|
247
|
-
|
|
248
|
-
print("--- End of Error Report ---")
|
|
249
|
-
|
|
250
|
-
final_config = ARAconfig(**corrected_data)
|
|
307
|
+
final_config = _correct_validation_errors(data, e.errors())
|
|
251
308
|
save_data(filepath, final_config)
|
|
252
309
|
print(f"Configuration has been corrected and saved to '{filepath}'.")
|
|
253
|
-
|
|
254
310
|
return final_config
|
|
255
311
|
|
|
256
312
|
|
|
@@ -266,9 +322,12 @@ class ConfigManager:
|
|
|
266
322
|
_config_instance = None
|
|
267
323
|
|
|
268
324
|
@classmethod
|
|
269
|
-
def get_config(cls, filepath=
|
|
325
|
+
def get_config(cls, filepath=None) -> ARAconfig:
|
|
326
|
+
if filepath:
|
|
327
|
+
return read_data(filepath)
|
|
328
|
+
|
|
270
329
|
if cls._config_instance is None:
|
|
271
|
-
cls._config_instance = read_data(
|
|
330
|
+
cls._config_instance = read_data(DEFAULT_CONFIG_LOCATION)
|
|
272
331
|
return cls._config_instance
|
|
273
332
|
|
|
274
333
|
@classmethod
|
|
@@ -2,13 +2,13 @@ from typing import Optional
|
|
|
2
2
|
from enum import Enum
|
|
3
3
|
import typer
|
|
4
4
|
from ara_cli.classifier import Classifier
|
|
5
|
-
from ara_cli.
|
|
5
|
+
from ara_cli.constants import VALID_ASPECTS
|
|
6
6
|
from ara_cli.completers import DynamicCompleters
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
# Get classifiers and aspects
|
|
10
10
|
classifiers = Classifier.ordered_classifiers()
|
|
11
|
-
aspects =
|
|
11
|
+
aspects = VALID_ASPECTS
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
# Create enums for better type safety
|
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Config subcommand for ara-cli.
|
|
3
|
+
Provides commands for managing ara configuration.
|
|
4
|
+
"""
|
|
5
|
+
import typer
|
|
6
|
+
import os
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
config_app = typer.Typer(
|
|
10
|
+
help="Manage ara configuration",
|
|
11
|
+
no_args_is_help=True,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _show_config_status(config_path: str, current_data: dict) -> None:
|
|
16
|
+
"""Display current configuration status."""
|
|
17
|
+
import typer
|
|
18
|
+
typer.echo("Current configuration status:")
|
|
19
|
+
typer.echo(f" Config file: {config_path}")
|
|
20
|
+
typer.echo(
|
|
21
|
+
f" LLM configs: {len(current_data.get('llm_config', {}))} models defined")
|
|
22
|
+
typer.echo(f" Default LLM: {current_data.get('default_llm', 'not set')}")
|
|
23
|
+
typer.echo(
|
|
24
|
+
f" Extraction LLM: {current_data.get('extraction_llm', 'not set')}")
|
|
25
|
+
typer.echo("")
|
|
26
|
+
typer.echo("Use flags to reset specific parts:")
|
|
27
|
+
typer.echo(" --all Reset everything")
|
|
28
|
+
typer.echo(" --llm-config Reset LLM configurations")
|
|
29
|
+
typer.echo(" --default-llm Reset default LLM selection")
|
|
30
|
+
typer.echo(" --paths Reset directory paths")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _prepare_changes(
|
|
34
|
+
all_config: bool, llm_config: bool, default_llm: bool,
|
|
35
|
+
extraction_llm: bool, paths: bool, current_data: dict, defaults: dict
|
|
36
|
+
) -> tuple[list, dict]:
|
|
37
|
+
"""Prepare the list of changes and new data."""
|
|
38
|
+
changes = []
|
|
39
|
+
new_data = current_data.copy()
|
|
40
|
+
|
|
41
|
+
if all_config:
|
|
42
|
+
changes.append("All configuration values")
|
|
43
|
+
return changes, defaults.copy()
|
|
44
|
+
|
|
45
|
+
if llm_config:
|
|
46
|
+
changes.append("llm_config (LLM configurations)")
|
|
47
|
+
new_data["llm_config"] = defaults["llm_config"]
|
|
48
|
+
|
|
49
|
+
if default_llm:
|
|
50
|
+
first_llm = next(
|
|
51
|
+
iter(new_data.get("llm_config", defaults["llm_config"])))
|
|
52
|
+
changes.append(f"default_llm -> '{first_llm}'")
|
|
53
|
+
new_data["default_llm"] = first_llm
|
|
54
|
+
|
|
55
|
+
if extraction_llm:
|
|
56
|
+
target_llm = new_data.get("default_llm") or next(
|
|
57
|
+
iter(new_data.get("llm_config", defaults["llm_config"])))
|
|
58
|
+
changes.append(f"extraction_llm -> '{target_llm}'")
|
|
59
|
+
new_data["extraction_llm"] = target_llm
|
|
60
|
+
|
|
61
|
+
if paths:
|
|
62
|
+
path_fields = [
|
|
63
|
+
"ext_code_dirs", "global_dirs", "glossary_dir", "doc_dir",
|
|
64
|
+
"local_prompt_templates_dir", "local_scripts_dir", "local_ara_templates_dir",
|
|
65
|
+
]
|
|
66
|
+
for field in path_fields:
|
|
67
|
+
if field in defaults:
|
|
68
|
+
new_data[field] = defaults[field]
|
|
69
|
+
changes.append(
|
|
70
|
+
"Directory paths (ext_code_dirs, glossary_dir, doc_dir, etc.)")
|
|
71
|
+
|
|
72
|
+
return changes, new_data
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _apply_config_changes(config_path: str, new_data: dict) -> None:
|
|
76
|
+
"""Validate and save configuration changes."""
|
|
77
|
+
import typer
|
|
78
|
+
from ara_cli.ara_config import ARAconfig, save_data, ConfigManager
|
|
79
|
+
|
|
80
|
+
validated_config = ARAconfig(**new_data)
|
|
81
|
+
save_data(config_path, validated_config)
|
|
82
|
+
ConfigManager.reset()
|
|
83
|
+
|
|
84
|
+
typer.echo("")
|
|
85
|
+
typer.echo("✓ Configuration reset successfully.")
|
|
86
|
+
typer.echo(f" Saved to: {config_path}")
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
@config_app.command("reset")
|
|
90
|
+
def reset_config(
|
|
91
|
+
all_config: bool = typer.Option(
|
|
92
|
+
False, "--all", "-a", help="Reset entire configuration to defaults"),
|
|
93
|
+
llm_config: bool = typer.Option(
|
|
94
|
+
False, "--llm-config", help="Reset only llm_config to defaults"),
|
|
95
|
+
default_llm: bool = typer.Option(
|
|
96
|
+
False, "--default-llm", help="Reset only default_llm to first available LLM"),
|
|
97
|
+
extraction_llm: bool = typer.Option(
|
|
98
|
+
False, "--extraction-llm", help="Reset only extraction_llm to match default_llm"),
|
|
99
|
+
paths: bool = typer.Option(
|
|
100
|
+
False, "--paths", help="Reset directory paths to defaults"),
|
|
101
|
+
dry_run: bool = typer.Option(
|
|
102
|
+
False, "--dry-run", help="Show what would be reset without making changes"),
|
|
103
|
+
yes: bool = typer.Option(False, "--yes", "-y",
|
|
104
|
+
help="Skip confirmation prompt"),
|
|
105
|
+
):
|
|
106
|
+
"""
|
|
107
|
+
Reset ara configuration to default values.
|
|
108
|
+
|
|
109
|
+
If no flags are specified, shows current configuration status.
|
|
110
|
+
Use specific flags to reset only certain parts of the configuration.
|
|
111
|
+
|
|
112
|
+
Examples:
|
|
113
|
+
ara config reset --llm-config # Reset only LLM configurations
|
|
114
|
+
ara config reset --all # Reset everything to defaults
|
|
115
|
+
ara config reset --paths --dry-run # Preview path reset without applying
|
|
116
|
+
"""
|
|
117
|
+
from ara_cli.ara_config import DEFAULT_CONFIG_LOCATION, ARAconfig, get_default_llm_config
|
|
118
|
+
import json
|
|
119
|
+
|
|
120
|
+
config_path = DEFAULT_CONFIG_LOCATION
|
|
121
|
+
|
|
122
|
+
if not os.path.exists(config_path):
|
|
123
|
+
typer.echo(f"Configuration file not found at '{config_path}'.")
|
|
124
|
+
typer.echo("Run any ara command to create a default configuration.")
|
|
125
|
+
raise typer.Exit(1)
|
|
126
|
+
|
|
127
|
+
try:
|
|
128
|
+
with open(config_path, "r", encoding="utf-8") as f:
|
|
129
|
+
current_data = json.load(f)
|
|
130
|
+
except json.JSONDecodeError as e:
|
|
131
|
+
typer.echo(f"Error reading configuration: {e}")
|
|
132
|
+
raise typer.Exit(1)
|
|
133
|
+
|
|
134
|
+
no_flags = not any(
|
|
135
|
+
[all_config, llm_config, default_llm, extraction_llm, paths])
|
|
136
|
+
if no_flags:
|
|
137
|
+
_show_config_status(config_path, current_data)
|
|
138
|
+
return
|
|
139
|
+
|
|
140
|
+
default_config = ARAconfig(llm_config=get_default_llm_config())
|
|
141
|
+
defaults = default_config.model_dump()
|
|
142
|
+
|
|
143
|
+
changes, new_data = _prepare_changes(
|
|
144
|
+
all_config, llm_config, default_llm, extraction_llm, paths, current_data, defaults
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
typer.echo("The following will be reset to defaults:")
|
|
148
|
+
for change in changes:
|
|
149
|
+
typer.echo(f" • {change}")
|
|
150
|
+
|
|
151
|
+
if dry_run:
|
|
152
|
+
typer.echo("")
|
|
153
|
+
typer.echo("[Dry run - no changes made]")
|
|
154
|
+
return
|
|
155
|
+
|
|
156
|
+
if not yes:
|
|
157
|
+
typer.echo("")
|
|
158
|
+
if not typer.confirm("Proceed with reset?"):
|
|
159
|
+
typer.echo("Reset cancelled.")
|
|
160
|
+
raise typer.Exit(0)
|
|
161
|
+
|
|
162
|
+
try:
|
|
163
|
+
_apply_config_changes(config_path, new_data)
|
|
164
|
+
except Exception as e:
|
|
165
|
+
typer.echo(f"Error saving configuration: {e}", err=True)
|
|
166
|
+
raise typer.Exit(1)
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
@config_app.command("show")
|
|
170
|
+
def show_config(
|
|
171
|
+
llm_only: bool = typer.Option(
|
|
172
|
+
False,
|
|
173
|
+
"--llm",
|
|
174
|
+
help="Show only LLM configurations"
|
|
175
|
+
),
|
|
176
|
+
json_output: bool = typer.Option(
|
|
177
|
+
False,
|
|
178
|
+
"--json",
|
|
179
|
+
help="Output as JSON"
|
|
180
|
+
),
|
|
181
|
+
):
|
|
182
|
+
"""
|
|
183
|
+
Show current ara configuration.
|
|
184
|
+
|
|
185
|
+
Examples:
|
|
186
|
+
ara config show # Show full configuration
|
|
187
|
+
ara config show --llm # Show only LLM configurations
|
|
188
|
+
ara config show --json # Output as JSON
|
|
189
|
+
"""
|
|
190
|
+
from ara_cli.ara_config import DEFAULT_CONFIG_LOCATION
|
|
191
|
+
import json
|
|
192
|
+
|
|
193
|
+
config_path = DEFAULT_CONFIG_LOCATION
|
|
194
|
+
|
|
195
|
+
if not os.path.exists(config_path):
|
|
196
|
+
typer.echo(f"Configuration file not found at '{config_path}'.")
|
|
197
|
+
raise typer.Exit(1)
|
|
198
|
+
|
|
199
|
+
with open(config_path, "r", encoding="utf-8") as f:
|
|
200
|
+
config_data = json.load(f)
|
|
201
|
+
|
|
202
|
+
if llm_only:
|
|
203
|
+
output_data = {
|
|
204
|
+
"llm_config": config_data.get("llm_config", {}),
|
|
205
|
+
"default_llm": config_data.get("default_llm"),
|
|
206
|
+
"extraction_llm": config_data.get("extraction_llm"),
|
|
207
|
+
}
|
|
208
|
+
else:
|
|
209
|
+
output_data = config_data
|
|
210
|
+
|
|
211
|
+
if json_output:
|
|
212
|
+
typer.echo(json.dumps(output_data, indent=2))
|
|
213
|
+
else:
|
|
214
|
+
typer.echo(f"Configuration file: {config_path}")
|
|
215
|
+
typer.echo("")
|
|
216
|
+
typer.echo(json.dumps(output_data, indent=2))
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def register(app: typer.Typer):
|
|
220
|
+
"""Register the config command group with the main app."""
|
|
221
|
+
app.add_typer(config_app, name="config")
|