ara-cli 0.1.10.5__py3-none-any.whl → 0.1.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ara_cli/__init__.py +51 -6
- ara_cli/__main__.py +87 -75
- ara_cli/ara_command_action.py +189 -101
- ara_cli/ara_config.py +187 -128
- ara_cli/ara_subcommands/common.py +2 -2
- ara_cli/ara_subcommands/config.py +221 -0
- ara_cli/ara_subcommands/convert.py +107 -0
- ara_cli/ara_subcommands/fetch.py +41 -0
- ara_cli/ara_subcommands/fetch_agents.py +22 -0
- ara_cli/ara_subcommands/fetch_scripts.py +19 -0
- ara_cli/ara_subcommands/fetch_templates.py +15 -10
- ara_cli/ara_subcommands/list.py +97 -23
- ara_cli/ara_subcommands/prompt.py +266 -106
- ara_cli/artefact_autofix.py +117 -64
- ara_cli/artefact_converter.py +355 -0
- ara_cli/artefact_creator.py +41 -17
- ara_cli/artefact_lister.py +3 -3
- ara_cli/artefact_models/artefact_model.py +1 -1
- ara_cli/artefact_models/artefact_templates.py +0 -9
- ara_cli/artefact_models/feature_artefact_model.py +8 -8
- ara_cli/artefact_reader.py +62 -43
- ara_cli/artefact_scan.py +39 -17
- ara_cli/chat.py +300 -71
- ara_cli/chat_agent/__init__.py +0 -0
- ara_cli/chat_agent/agent_process_manager.py +155 -0
- ara_cli/chat_script_runner/__init__.py +0 -0
- ara_cli/chat_script_runner/script_completer.py +23 -0
- ara_cli/chat_script_runner/script_finder.py +41 -0
- ara_cli/chat_script_runner/script_lister.py +36 -0
- ara_cli/chat_script_runner/script_runner.py +36 -0
- ara_cli/chat_web_search/__init__.py +0 -0
- ara_cli/chat_web_search/web_search.py +263 -0
- ara_cli/children_contribution_updater.py +737 -0
- ara_cli/classifier.py +34 -0
- ara_cli/commands/agent_run_command.py +98 -0
- ara_cli/commands/fetch_agents_command.py +106 -0
- ara_cli/commands/fetch_scripts_command.py +43 -0
- ara_cli/commands/fetch_templates_command.py +39 -0
- ara_cli/commands/fetch_templates_commands.py +39 -0
- ara_cli/commands/list_agents_command.py +39 -0
- ara_cli/commands/load_command.py +4 -3
- ara_cli/commands/load_image_command.py +1 -1
- ara_cli/commands/read_command.py +23 -27
- ara_cli/completers.py +95 -35
- ara_cli/constants.py +2 -0
- ara_cli/directory_navigator.py +37 -4
- ara_cli/error_handler.py +26 -11
- ara_cli/file_loaders/document_reader.py +0 -178
- ara_cli/file_loaders/factories/__init__.py +0 -0
- ara_cli/file_loaders/factories/document_reader_factory.py +32 -0
- ara_cli/file_loaders/factories/file_loader_factory.py +27 -0
- ara_cli/file_loaders/file_loader.py +1 -30
- ara_cli/file_loaders/loaders/__init__.py +0 -0
- ara_cli/file_loaders/{document_file_loader.py → loaders/document_file_loader.py} +1 -1
- ara_cli/file_loaders/loaders/text_file_loader.py +47 -0
- ara_cli/file_loaders/readers/__init__.py +0 -0
- ara_cli/file_loaders/readers/docx_reader.py +49 -0
- ara_cli/file_loaders/readers/excel_reader.py +27 -0
- ara_cli/file_loaders/{markdown_reader.py → readers/markdown_reader.py} +1 -1
- ara_cli/file_loaders/readers/odt_reader.py +59 -0
- ara_cli/file_loaders/readers/pdf_reader.py +54 -0
- ara_cli/file_loaders/readers/pptx_reader.py +104 -0
- ara_cli/file_loaders/tools/__init__.py +0 -0
- ara_cli/llm_utils.py +58 -0
- ara_cli/output_suppressor.py +53 -0
- ara_cli/prompt_chat.py +20 -4
- ara_cli/prompt_extractor.py +47 -32
- ara_cli/prompt_handler.py +123 -17
- ara_cli/tag_extractor.py +8 -7
- ara_cli/template_loader.py +2 -1
- ara_cli/template_manager.py +52 -21
- ara_cli/templates/global-scripts/hello_global.py +1 -0
- ara_cli/templates/prompt-modules/commands/add_scenarios_for_new_behaviour.feature_creation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/align_feature_with_implementation_changes.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/analyze_codebase_and_plan_tasks.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/choose_best_parent_artefact.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/create_tasks_from_artefact_content.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/create_tests_for_uncovered_modules.test_generation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/derive_features_from_video_description.feature_creation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/describe_agent_capabilities.agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/empty.commands.md +2 -12
- ara_cli/templates/prompt-modules/commands/execute_scoped_todos_in_task.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/explain_single_file_purpose.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/extract_file_information_bullets.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/extract_general.commands.md +12 -0
- ara_cli/templates/prompt-modules/commands/extract_markdown.commands.md +11 -0
- ara_cli/templates/prompt-modules/commands/extract_python.commands.md +13 -0
- ara_cli/templates/prompt-modules/commands/feature_add_or_modifiy_specified_behavior.commands.md +36 -0
- ara_cli/templates/prompt-modules/commands/feature_generate_initial_specified_bevahior.commands.md +53 -0
- ara_cli/templates/prompt-modules/commands/fix_failing_behave_step_definitions.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/fix_failing_pytest_tests.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/general_instruction_policy.commands.md +47 -0
- ara_cli/templates/prompt-modules/commands/generate_and_fix_pytest_tests.test_generation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/prompt_template_tech_stack_transformer.commands.md +95 -0
- ara_cli/templates/prompt-modules/commands/python_bug_fixing_code.commands.md +34 -0
- ara_cli/templates/prompt-modules/commands/python_generate_code.commands.md +27 -0
- ara_cli/templates/prompt-modules/commands/python_refactoring_code.commands.md +39 -0
- ara_cli/templates/prompt-modules/commands/python_step_definitions_generation_and_fixing.commands.md +40 -0
- ara_cli/templates/prompt-modules/commands/python_unittest_generation_and_fixing.commands.md +48 -0
- ara_cli/templates/prompt-modules/commands/suggest_next_story_child_tasks.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/summarize_or_transcribe_media.interview_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/update_feature_to_match_implementation.feature_creation_agent.commands.md +1 -0
- ara_cli/templates/prompt-modules/commands/update_user_story_with_requirements.interview_agent.commands.md +1 -0
- ara_cli/version.py +1 -1
- {ara_cli-0.1.10.5.dist-info → ara_cli-0.1.14.0.dist-info}/METADATA +49 -11
- ara_cli-0.1.14.0.dist-info/RECORD +253 -0
- {ara_cli-0.1.10.5.dist-info → ara_cli-0.1.14.0.dist-info}/WHEEL +1 -1
- tests/test_ara_command_action.py +31 -19
- tests/test_ara_config.py +177 -90
- tests/test_artefact_autofix.py +170 -97
- tests/test_artefact_autofix_integration.py +495 -0
- tests/test_artefact_converter.py +312 -0
- tests/test_artefact_extraction.py +564 -0
- tests/test_artefact_lister.py +11 -8
- tests/test_chat.py +166 -130
- tests/test_chat_givens_images.py +603 -0
- tests/test_chat_script_runner.py +454 -0
- tests/test_children_contribution_updater.py +98 -0
- tests/test_document_loader_office.py +267 -0
- tests/test_llm_utils.py +164 -0
- tests/test_prompt_chat.py +343 -0
- tests/test_prompt_extractor.py +683 -0
- tests/test_prompt_handler.py +416 -214
- tests/test_setup_default_chat_prompt_mode.py +198 -0
- tests/test_tag_extractor.py +95 -49
- tests/test_web_search.py +467 -0
- ara_cli/file_loaders/document_readers.py +0 -233
- ara_cli/file_loaders/file_loaders.py +0 -123
- ara_cli/file_loaders/text_file_loader.py +0 -187
- ara_cli/templates/prompt-modules/blueprints/complete_pytest_unittest.blueprint.md +0 -27
- ara_cli/templates/prompt-modules/blueprints/pytest_unittest_prompt.blueprint.md +0 -32
- ara_cli/templates/prompt-modules/blueprints/task_todo_list_implement_feature_BDD_way.blueprint.md +0 -30
- ara_cli/templates/prompt-modules/commands/artefact_classification.commands.md +0 -9
- ara_cli/templates/prompt-modules/commands/artefact_extension.commands.md +0 -17
- ara_cli/templates/prompt-modules/commands/artefact_formulation.commands.md +0 -14
- ara_cli/templates/prompt-modules/commands/behave_step_generation.commands.md +0 -102
- ara_cli/templates/prompt-modules/commands/code_generation_complex.commands.md +0 -20
- ara_cli/templates/prompt-modules/commands/code_generation_simple.commands.md +0 -13
- ara_cli/templates/prompt-modules/commands/error_fixing.commands.md +0 -20
- ara_cli/templates/prompt-modules/commands/feature_file_update.commands.md +0 -18
- ara_cli/templates/prompt-modules/commands/feature_formulation.commands.md +0 -43
- ara_cli/templates/prompt-modules/commands/js_code_generation_simple.commands.md +0 -13
- ara_cli/templates/prompt-modules/commands/refactoring.commands.md +0 -15
- ara_cli/templates/prompt-modules/commands/refactoring_analysis.commands.md +0 -9
- ara_cli/templates/prompt-modules/commands/reverse_engineer_feature_file.commands.md +0 -15
- ara_cli/templates/prompt-modules/commands/reverse_engineer_program_flow.commands.md +0 -19
- ara_cli-0.1.10.5.dist-info/RECORD +0 -194
- /ara_cli/file_loaders/{binary_file_loader.py → loaders/binary_file_loader.py} +0 -0
- /ara_cli/file_loaders/{image_processor.py → tools/image_processor.py} +0 -0
- {ara_cli-0.1.10.5.dist-info → ara_cli-0.1.14.0.dist-info}/entry_points.txt +0 -0
- {ara_cli-0.1.10.5.dist-info → ara_cli-0.1.14.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
try:
|
|
4
|
+
import pty
|
|
5
|
+
except ImportError:
|
|
6
|
+
pty = None
|
|
7
|
+
import sys
|
|
8
|
+
import stat
|
|
9
|
+
from ara_cli.error_handler import AraError, ErrorLevel
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class AgentProcessManager:
|
|
13
|
+
"""
|
|
14
|
+
Manages the lifecycle of binary, interactive agents.
|
|
15
|
+
This manager is designed to run agents that require full TTY control,
|
|
16
|
+
handing over the terminal to the agent process until it exits.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, chat_instance=None):
|
|
20
|
+
self.chat_instance = chat_instance
|
|
21
|
+
self.agent_process = None
|
|
22
|
+
|
|
23
|
+
def get_agent_path(self, agent_name):
|
|
24
|
+
"""
|
|
25
|
+
Constructs the full path to the agent binary by reliably finding
|
|
26
|
+
the project root.
|
|
27
|
+
"""
|
|
28
|
+
if not self.chat_instance:
|
|
29
|
+
raise AraError("Chat instance is not available to find project root.")
|
|
30
|
+
|
|
31
|
+
base_dir = self.chat_instance._find_project_root()
|
|
32
|
+
if not base_dir:
|
|
33
|
+
raise AraError(
|
|
34
|
+
"Could not find project root from chat instance. Is this a valid ara project?"
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
return os.path.join(base_dir, "ara", ".araconfig", "agents", agent_name)
|
|
38
|
+
|
|
39
|
+
def _ensure_executable(self, agent_path):
|
|
40
|
+
"""
|
|
41
|
+
Validates existence and ensures the binary is executable.
|
|
42
|
+
"""
|
|
43
|
+
if (agent_path != "--help") and (not os.path.exists(agent_path)):
|
|
44
|
+
raise AraError(f"Agent binary not found at: {agent_path}")
|
|
45
|
+
|
|
46
|
+
if not os.path.isfile(agent_path):
|
|
47
|
+
raise AraError(f"Agent path does not point to a file: {agent_path}")
|
|
48
|
+
|
|
49
|
+
if not os.access(agent_path, os.X_OK):
|
|
50
|
+
try:
|
|
51
|
+
os.chmod(agent_path, os.stat(agent_path).st_mode | stat.S_IEXEC)
|
|
52
|
+
print(f"Made agent binary executable: {agent_path}")
|
|
53
|
+
except Exception as e:
|
|
54
|
+
raise AraError(
|
|
55
|
+
f"Agent binary is not executable and could not be changed: {agent_path}. Error: {e}"
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
def _print_session_banner(self, agent_name, is_start=True):
|
|
59
|
+
"""
|
|
60
|
+
Handles UI printing for start and end of sessions.
|
|
61
|
+
"""
|
|
62
|
+
print("\n" + "=" * 50)
|
|
63
|
+
if is_start:
|
|
64
|
+
print(f"Starting interactive agent: {agent_name}")
|
|
65
|
+
print("You are now in an interactive session with the agent.")
|
|
66
|
+
print(
|
|
67
|
+
"To exit, use the agent's own exit command (e.g., '/quit', '/exit', or Ctrl+C)."
|
|
68
|
+
)
|
|
69
|
+
print("You will be returned to the 'ara>' prompt after the agent exits.")
|
|
70
|
+
else:
|
|
71
|
+
print("Returned to ara-cli prompt.")
|
|
72
|
+
print("=" * 50 + ("\n" if is_start else ""))
|
|
73
|
+
|
|
74
|
+
sys.stdout.flush()
|
|
75
|
+
sys.stderr.flush()
|
|
76
|
+
|
|
77
|
+
def _handle_process_exit(self, agent_name, return_code):
|
|
78
|
+
"""
|
|
79
|
+
Analyzes the exit status code.
|
|
80
|
+
"""
|
|
81
|
+
print("\n" + "=" * 50)
|
|
82
|
+
|
|
83
|
+
if pty:
|
|
84
|
+
if os.WIFEXITED(return_code):
|
|
85
|
+
exit_code = os.WEXITSTATUS(return_code)
|
|
86
|
+
status_msg = (
|
|
87
|
+
f"Agent '{agent_name}' finished successfully."
|
|
88
|
+
if exit_code == 0
|
|
89
|
+
else f"Agent '{agent_name}' exited with code: {exit_code}."
|
|
90
|
+
)
|
|
91
|
+
print(status_msg)
|
|
92
|
+
elif os.WIFSIGNALED(return_code):
|
|
93
|
+
signal_num = os.WTERMSIG(return_code)
|
|
94
|
+
print(f"Agent '{agent_name}' terminated by signal: {signal_num}.")
|
|
95
|
+
else:
|
|
96
|
+
print(
|
|
97
|
+
f"Agent '{agent_name}' exited with an unexpected status: {return_code}."
|
|
98
|
+
)
|
|
99
|
+
else:
|
|
100
|
+
# Windows/Non-pty fallback: return_code is the actual exit code
|
|
101
|
+
status_msg = (
|
|
102
|
+
f"Agent '{agent_name}' finished successfully."
|
|
103
|
+
if return_code == 0
|
|
104
|
+
else f"Agent '{agent_name}' exited with code: {return_code}."
|
|
105
|
+
)
|
|
106
|
+
print(status_msg)
|
|
107
|
+
|
|
108
|
+
def run_agent(self, agent_name, agent_args):
|
|
109
|
+
"""
|
|
110
|
+
Finds, validates, and runs a binary agent using a pseudo-terminal (pty).
|
|
111
|
+
Refactored to maintain low CCN.
|
|
112
|
+
"""
|
|
113
|
+
if os.name == "nt":
|
|
114
|
+
raise AraError(
|
|
115
|
+
"Agent execution is not supported on Windows platforms.",
|
|
116
|
+
level=ErrorLevel.WARNING,
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
agent_path = self.get_agent_path(agent_name)
|
|
120
|
+
|
|
121
|
+
# Validation Logic Extracted
|
|
122
|
+
self._ensure_executable(agent_path)
|
|
123
|
+
|
|
124
|
+
command = [agent_path] + agent_args
|
|
125
|
+
|
|
126
|
+
# UI Logic Extracted
|
|
127
|
+
self._print_session_banner(agent_name, is_start=True)
|
|
128
|
+
|
|
129
|
+
try:
|
|
130
|
+
# Execution Logic
|
|
131
|
+
if pty:
|
|
132
|
+
return_code = pty.spawn(command)
|
|
133
|
+
else:
|
|
134
|
+
# Fallback if pty is missing on non-Windows (unlikely but safe)
|
|
135
|
+
import subprocess
|
|
136
|
+
|
|
137
|
+
return_code = subprocess.call(command)
|
|
138
|
+
|
|
139
|
+
# Exit Status Logic Extracted
|
|
140
|
+
self._handle_process_exit(agent_name, return_code)
|
|
141
|
+
|
|
142
|
+
except FileNotFoundError:
|
|
143
|
+
raise AraError(f"Failed to execute. Command not found: {agent_path}")
|
|
144
|
+
except Exception as e:
|
|
145
|
+
raise AraError(f"An error occurred while trying to run the agent: {e}")
|
|
146
|
+
finally:
|
|
147
|
+
self._print_session_banner(agent_name, is_start=False)
|
|
148
|
+
|
|
149
|
+
def cleanup_agent_process(self):
|
|
150
|
+
"""
|
|
151
|
+
Placeholder for cleanup.
|
|
152
|
+
"""
|
|
153
|
+
if hasattr(self, "chat_instance") and self.chat_instance:
|
|
154
|
+
self.chat_instance.prompt = "ara> "
|
|
155
|
+
pass
|
|
File without changes
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from ara_cli.chat_script_runner.script_lister import ScriptLister
|
|
2
|
+
|
|
3
|
+
class ScriptCompleter:
|
|
4
|
+
def __init__(self):
|
|
5
|
+
self.script_lister = ScriptLister()
|
|
6
|
+
|
|
7
|
+
def __call__(self, text, line, begidx, endidx):
|
|
8
|
+
if line.startswith("rpy global/"):
|
|
9
|
+
return self._complete_global_scripts(text)
|
|
10
|
+
|
|
11
|
+
return self._complete_all_scripts(text)
|
|
12
|
+
|
|
13
|
+
def _complete_all_scripts(self, text):
|
|
14
|
+
all_scripts = self.script_lister.get_all_scripts()
|
|
15
|
+
if not text:
|
|
16
|
+
return all_scripts
|
|
17
|
+
return [s for s in all_scripts if s.startswith(text)]
|
|
18
|
+
|
|
19
|
+
def _complete_global_scripts(self, text):
|
|
20
|
+
global_scripts = self.script_lister.get_global_scripts()
|
|
21
|
+
if not text:
|
|
22
|
+
return global_scripts
|
|
23
|
+
return [s for s in global_scripts if s.startswith(text)]
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from ara_cli.ara_config import ConfigManager
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class ScriptFinder:
|
|
6
|
+
def __init__(self):
|
|
7
|
+
config = ConfigManager.get_config()
|
|
8
|
+
# Convert to absolute path NOW, before any chdir operations
|
|
9
|
+
# This ensures scripts are found after Chat.start() changes cwd
|
|
10
|
+
self.local_prompt_templates_dir = os.path.abspath(
|
|
11
|
+
config.local_prompt_templates_dir
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
def get_custom_scripts_dir(self):
|
|
15
|
+
return os.path.join(self.local_prompt_templates_dir, "custom-scripts")
|
|
16
|
+
|
|
17
|
+
def get_global_scripts_dir(self):
|
|
18
|
+
return os.path.join(self.local_prompt_templates_dir, "global-scripts")
|
|
19
|
+
|
|
20
|
+
def find_script(self, script_name: str) -> str | None:
|
|
21
|
+
# Handle explicit global path for backward compatibility or specific cases
|
|
22
|
+
if script_name.startswith("global/"):
|
|
23
|
+
script_path = os.path.join(
|
|
24
|
+
self.get_global_scripts_dir(), script_name.replace("global/", ""))
|
|
25
|
+
if os.path.exists(script_path):
|
|
26
|
+
return script_path
|
|
27
|
+
return None
|
|
28
|
+
|
|
29
|
+
# 1. Search in custom-scripts first (allows overriding global scripts)
|
|
30
|
+
custom_script_path = os.path.join(
|
|
31
|
+
self.get_custom_scripts_dir(), script_name)
|
|
32
|
+
if os.path.exists(custom_script_path):
|
|
33
|
+
return custom_script_path
|
|
34
|
+
|
|
35
|
+
# 2. If not found in custom, fall back to global-scripts
|
|
36
|
+
global_script_path = os.path.join(
|
|
37
|
+
self.get_global_scripts_dir(), script_name)
|
|
38
|
+
if os.path.exists(global_script_path):
|
|
39
|
+
return global_script_path
|
|
40
|
+
|
|
41
|
+
return None
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import glob
|
|
3
|
+
from ara_cli.chat_script_runner.script_finder import ScriptFinder
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ScriptLister:
|
|
7
|
+
def __init__(self):
|
|
8
|
+
self.script_finder = ScriptFinder()
|
|
9
|
+
|
|
10
|
+
def get_all_scripts(self):
|
|
11
|
+
custom_scripts = self.get_custom_scripts()
|
|
12
|
+
global_scripts = self.get_global_scripts()
|
|
13
|
+
|
|
14
|
+
# Custom scripts without prefix, global scripts with 'global/' prefix
|
|
15
|
+
prefixed_global_scripts = [f"global/{s}" for s in global_scripts]
|
|
16
|
+
|
|
17
|
+
# Return a single, sorted list for autocompletion
|
|
18
|
+
return sorted(custom_scripts + prefixed_global_scripts)
|
|
19
|
+
|
|
20
|
+
def get_custom_scripts(self):
|
|
21
|
+
custom_scripts_dir = self.script_finder.get_custom_scripts_dir()
|
|
22
|
+
if not custom_scripts_dir or not os.path.isdir(custom_scripts_dir):
|
|
23
|
+
return []
|
|
24
|
+
return [
|
|
25
|
+
os.path.basename(f)
|
|
26
|
+
for f in glob.glob(os.path.join(custom_scripts_dir, "*.py"))
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
def get_global_scripts(self):
|
|
30
|
+
global_scripts_dir = self.script_finder.get_global_scripts_dir()
|
|
31
|
+
if not global_scripts_dir or not os.path.isdir(global_scripts_dir):
|
|
32
|
+
return []
|
|
33
|
+
return [
|
|
34
|
+
os.path.basename(f)
|
|
35
|
+
for f in glob.glob(os.path.join(global_scripts_dir, "*.py"))
|
|
36
|
+
]
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import subprocess
|
|
3
|
+
from ara_cli.chat_script_runner.script_finder import ScriptFinder
|
|
4
|
+
from ara_cli.chat_script_runner.script_lister import ScriptLister
|
|
5
|
+
|
|
6
|
+
class ScriptRunner:
|
|
7
|
+
def __init__(self, chat_instance):
|
|
8
|
+
self.chat_instance = chat_instance
|
|
9
|
+
self.script_finder = ScriptFinder()
|
|
10
|
+
self.script_lister = ScriptLister()
|
|
11
|
+
|
|
12
|
+
def run_script(self, script_name: str, args: list[str] = None):
|
|
13
|
+
script_path = self.script_finder.find_script(script_name)
|
|
14
|
+
if not script_path:
|
|
15
|
+
return f"Script '{script_name}' not found."
|
|
16
|
+
|
|
17
|
+
command = ["python", script_path]
|
|
18
|
+
if args:
|
|
19
|
+
command.extend(args)
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
result = subprocess.run(
|
|
23
|
+
command,
|
|
24
|
+
capture_output=True,
|
|
25
|
+
text=True,
|
|
26
|
+
check=True,
|
|
27
|
+
)
|
|
28
|
+
return result.stdout
|
|
29
|
+
except subprocess.CalledProcessError as e:
|
|
30
|
+
return f"Error running script: {e}\n{e.stderr}"
|
|
31
|
+
|
|
32
|
+
def get_available_scripts(self):
|
|
33
|
+
return self.script_lister.get_all_scripts()
|
|
34
|
+
|
|
35
|
+
def get_global_scripts(self):
|
|
36
|
+
return self.script_lister.get_global_scripts()
|
|
File without changes
|
|
@@ -0,0 +1,263 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Native web search implementation for OpenAI and Anthropic.
|
|
3
|
+
Uses the default LLM from ara_config to determine which provider to use.
|
|
4
|
+
Includes source citations at the end of search results.
|
|
5
|
+
|
|
6
|
+
OpenAI API Compatibility:
|
|
7
|
+
- Responses API: Uses `web_search` tool with models like gpt-5, o4-mini
|
|
8
|
+
- Chat Completions: Uses specialized models gpt-5-search-api, gpt-4o-search-preview
|
|
9
|
+
"""
|
|
10
|
+
import os
|
|
11
|
+
from typing import Generator, Tuple, Optional, List, Dict
|
|
12
|
+
|
|
13
|
+
from ara_cli.prompt_handler import LLMSingleton
|
|
14
|
+
from ara_cli.error_handler import AraError
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# OpenAI models that support web search via Responses API
|
|
18
|
+
OPENAI_RESPONSES_API_MODELS = [
|
|
19
|
+
"gpt-5", "gpt-5.1", "gpt-5.2", "o3", "o4-mini",
|
|
20
|
+
"openai/gpt-5", "openai/gpt-5.1", "openai/gpt-5.2", "openai/o3", "openai/o4-mini",
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
# OpenAI models that use Chat Completions API with built-in search
|
|
24
|
+
OPENAI_CHAT_COMPLETIONS_SEARCH_MODELS = [
|
|
25
|
+
"gpt-5-search-api", "gpt-4o-search-preview", "gpt-4o-mini-search-preview",
|
|
26
|
+
"openai/gpt-5-search-api", "openai/gpt-4o-search-preview", "openai/gpt-4o-mini-search-preview",
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
OPENAI_WEB_SEARCH_MODELS = OPENAI_RESPONSES_API_MODELS + \
|
|
30
|
+
OPENAI_CHAT_COMPLETIONS_SEARCH_MODELS
|
|
31
|
+
|
|
32
|
+
ANTHROPIC_WEB_SEARCH_MODELS = [
|
|
33
|
+
"claude-sonnet-4-5-20250929", "claude-sonnet-4-20250514",
|
|
34
|
+
"claude-haiku-4-5-20251001", "claude-3-5-haiku-latest",
|
|
35
|
+
"claude-opus-4-5-20251101", "claude-opus-4-1-20250805", "claude-opus-4-20250514",
|
|
36
|
+
"anthropic/claude-sonnet-4-5-20250929", "anthropic/claude-sonnet-4-20250514",
|
|
37
|
+
"anthropic/claude-haiku-4-5-20251001",
|
|
38
|
+
"anthropic/claude-3-5-haiku-latest", "anthropic/claude-opus-4-5-20251101",
|
|
39
|
+
"anthropic/claude-opus-4-1-20250805", "anthropic/claude-opus-4-20250514",
|
|
40
|
+
]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def is_web_search_supported(model: str) -> Tuple[bool, Optional[str]]:
|
|
44
|
+
"""Check if the model supports web search and return the provider."""
|
|
45
|
+
if model in OPENAI_WEB_SEARCH_MODELS:
|
|
46
|
+
return True, "openai"
|
|
47
|
+
if model in ANTHROPIC_WEB_SEARCH_MODELS:
|
|
48
|
+
return True, "anthropic"
|
|
49
|
+
return False, None
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def get_supported_models_message(model: str) -> str:
|
|
53
|
+
"""Return a message listing all supported web search models."""
|
|
54
|
+
return (
|
|
55
|
+
f"Web search is not supported by the current default model: {model}\n"
|
|
56
|
+
"Please choose one of the following models:\n"
|
|
57
|
+
"==OpenAI (Responses API)==\n"
|
|
58
|
+
"\tgpt-5, gpt-5.1, gpt-5.2, o3, o4-mini\n"
|
|
59
|
+
"==OpenAI (Chat Completions)==\n"
|
|
60
|
+
"\tgpt-5-search-api, gpt-4o-search-preview, gpt-4o-mini-search-preview\n"
|
|
61
|
+
"==Anthropic==\n"
|
|
62
|
+
"\tclaude-sonnet-4-5-20250929, claude-sonnet-4-20250514\n"
|
|
63
|
+
"\tclaude-haiku-4-5-20251001, claude-3-5-haiku-latest\n"
|
|
64
|
+
"\tclaude-opus-4-5-20251101, claude-opus-4-1-20250805, claude-opus-4-20250514\n"
|
|
65
|
+
"\nNote: Models can be prefixed with 'openai/' or 'anthropic/' for LiteLLM format.\n"
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def _get_raw_model_name(model: str) -> str:
|
|
70
|
+
"""Strip provider prefix from model name if present."""
|
|
71
|
+
for prefix in ("openai/", "anthropic/"):
|
|
72
|
+
if model.startswith(prefix):
|
|
73
|
+
return model[len(prefix):]
|
|
74
|
+
return model
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _deduplicate_citations(citations: List[Dict[str, str]]) -> List[Dict[str, str]]:
|
|
78
|
+
"""Remove duplicate citations by URL, preserving order."""
|
|
79
|
+
seen_urls = set()
|
|
80
|
+
unique = []
|
|
81
|
+
for citation in citations:
|
|
82
|
+
url = citation.get("url", "")
|
|
83
|
+
if url and url not in seen_urls:
|
|
84
|
+
seen_urls.add(url)
|
|
85
|
+
unique.append(citation)
|
|
86
|
+
return unique
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def _format_citations(citations: List[Dict[str, str]]) -> str:
|
|
90
|
+
"""Format a list of citations into a markdown string."""
|
|
91
|
+
unique_citations = _deduplicate_citations(citations)
|
|
92
|
+
if not unique_citations:
|
|
93
|
+
return ""
|
|
94
|
+
|
|
95
|
+
lines = ["\n\n---\n**Sources:**"]
|
|
96
|
+
for i, citation in enumerate(unique_citations, 1):
|
|
97
|
+
title = citation.get("title", "Untitled")
|
|
98
|
+
url = citation.get("url", "")
|
|
99
|
+
line = f"{i}. [{title}]({url})" if url else f"{i}. {title}"
|
|
100
|
+
lines.append(line)
|
|
101
|
+
|
|
102
|
+
return "\n".join(lines) + "\n"
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
# Mock classes for litellm response format compatibility
|
|
106
|
+
class _MockDelta:
|
|
107
|
+
def __init__(self, content: str):
|
|
108
|
+
self.content = content
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class _MockChoice:
|
|
112
|
+
def __init__(self, content: str):
|
|
113
|
+
self.delta = _MockDelta(content)
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class _MockChunk:
|
|
117
|
+
def __init__(self, content: str):
|
|
118
|
+
self.choices = [_MockChoice(content)]
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _create_chunk(content: str) -> _MockChunk:
|
|
122
|
+
"""Create a mock chunk that matches litellm response format."""
|
|
123
|
+
return _MockChunk(content)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def _extract_openai_citations(response) -> List[Dict[str, str]]:
|
|
127
|
+
"""Extract citations from OpenAI Responses API response."""
|
|
128
|
+
citations = []
|
|
129
|
+
output = getattr(response, 'output', None)
|
|
130
|
+
if not output:
|
|
131
|
+
return citations
|
|
132
|
+
|
|
133
|
+
for output_item in output:
|
|
134
|
+
if getattr(output_item, 'type', None) != 'message':
|
|
135
|
+
continue
|
|
136
|
+
content = getattr(output_item, 'content', [])
|
|
137
|
+
for content_item in content:
|
|
138
|
+
annotations = getattr(content_item, 'annotations', [])
|
|
139
|
+
for annotation in annotations:
|
|
140
|
+
if getattr(annotation, 'type', None) == 'url_citation':
|
|
141
|
+
citations.append({
|
|
142
|
+
"title": getattr(annotation, 'title', 'Source'),
|
|
143
|
+
"url": getattr(annotation, 'url', ''),
|
|
144
|
+
})
|
|
145
|
+
return citations
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def _perform_openai_chat_completions_search(client, raw_model: str, query: str) -> Generator:
|
|
149
|
+
"""Perform web search using Chat Completions API."""
|
|
150
|
+
response = client.chat.completions.create(
|
|
151
|
+
model=raw_model,
|
|
152
|
+
messages=[{"role": "user", "content": query}],
|
|
153
|
+
stream=True,
|
|
154
|
+
)
|
|
155
|
+
for chunk in response:
|
|
156
|
+
if chunk.choices and chunk.choices[0].delta.content:
|
|
157
|
+
yield _create_chunk(chunk.choices[0].delta.content)
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def _perform_openai_responses_api_search(client, raw_model: str, query: str) -> Generator:
|
|
161
|
+
"""Perform web search using Responses API with web_search tool."""
|
|
162
|
+
response = client.responses.create(
|
|
163
|
+
model=raw_model,
|
|
164
|
+
tools=[{"type": "web_search"}],
|
|
165
|
+
input=query,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
output_text = getattr(response, 'output_text', None)
|
|
169
|
+
if output_text:
|
|
170
|
+
yield _create_chunk(output_text)
|
|
171
|
+
|
|
172
|
+
citations = _extract_openai_citations(response)
|
|
173
|
+
citations_text = _format_citations(citations)
|
|
174
|
+
if citations_text:
|
|
175
|
+
yield _create_chunk(citations_text)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def perform_openai_web_search(query: str, model: str) -> Generator:
|
|
179
|
+
"""Perform web search using OpenAI's API."""
|
|
180
|
+
from openai import OpenAI
|
|
181
|
+
|
|
182
|
+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
|
183
|
+
raw_model = _get_raw_model_name(model)
|
|
184
|
+
|
|
185
|
+
if model in OPENAI_CHAT_COMPLETIONS_SEARCH_MODELS:
|
|
186
|
+
yield from _perform_openai_chat_completions_search(client, raw_model, query)
|
|
187
|
+
else:
|
|
188
|
+
yield from _perform_openai_responses_api_search(client, raw_model, query)
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def _extract_anthropic_text_citations(content_block) -> List[Dict[str, str]]:
|
|
192
|
+
"""Extract citations from Anthropic text block."""
|
|
193
|
+
citations = []
|
|
194
|
+
block_citations = getattr(content_block, 'citations', None)
|
|
195
|
+
if not block_citations:
|
|
196
|
+
return citations
|
|
197
|
+
|
|
198
|
+
for citation in block_citations:
|
|
199
|
+
if hasattr(citation, 'url'):
|
|
200
|
+
citations.append({
|
|
201
|
+
"title": getattr(citation, 'title', 'Source'),
|
|
202
|
+
"url": citation.url,
|
|
203
|
+
})
|
|
204
|
+
return citations
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def _extract_anthropic_search_results(content_block) -> List[Dict[str, str]]:
|
|
208
|
+
"""Extract citations from Anthropic web search tool result."""
|
|
209
|
+
citations = []
|
|
210
|
+
content = getattr(content_block, 'content', [])
|
|
211
|
+
|
|
212
|
+
for result in content:
|
|
213
|
+
if getattr(result, 'type', None) == "web_search_result":
|
|
214
|
+
citations.append({
|
|
215
|
+
"title": getattr(result, 'title', 'Source'),
|
|
216
|
+
"url": getattr(result, 'url', ''),
|
|
217
|
+
})
|
|
218
|
+
return citations
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def perform_anthropic_web_search(query: str, model: str) -> Generator:
|
|
222
|
+
"""Perform web search using Anthropic's Messages API."""
|
|
223
|
+
import anthropic
|
|
224
|
+
|
|
225
|
+
client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
|
|
226
|
+
raw_model = _get_raw_model_name(model)
|
|
227
|
+
citations = []
|
|
228
|
+
|
|
229
|
+
response = client.messages.create(
|
|
230
|
+
model=raw_model,
|
|
231
|
+
max_tokens=4096,
|
|
232
|
+
tools=[{"type": "web_search_20250305",
|
|
233
|
+
"name": "web_search", "max_uses": 5}],
|
|
234
|
+
messages=[{"role": "user", "content": query}],
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
for content_block in response.content:
|
|
238
|
+
if content_block.type == "text":
|
|
239
|
+
yield _create_chunk(content_block.text)
|
|
240
|
+
citations.extend(_extract_anthropic_text_citations(content_block))
|
|
241
|
+
elif content_block.type == "web_search_tool_result":
|
|
242
|
+
citations.extend(_extract_anthropic_search_results(content_block))
|
|
243
|
+
|
|
244
|
+
citations_text = _format_citations(citations)
|
|
245
|
+
if citations_text:
|
|
246
|
+
yield _create_chunk(citations_text)
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def perform_web_search_completion(query: str) -> Generator:
|
|
250
|
+
"""Performs a web search using the appropriate provider based on default LLM."""
|
|
251
|
+
chat_instance = LLMSingleton.get_instance()
|
|
252
|
+
config_parameters = chat_instance.get_config_by_purpose("default")
|
|
253
|
+
model = config_parameters.get("model")
|
|
254
|
+
|
|
255
|
+
is_supported, provider = is_web_search_supported(model)
|
|
256
|
+
|
|
257
|
+
if not is_supported:
|
|
258
|
+
raise AraError(get_supported_models_message(model))
|
|
259
|
+
|
|
260
|
+
if provider == "openai":
|
|
261
|
+
yield from perform_openai_web_search(query, model)
|
|
262
|
+
elif provider == "anthropic":
|
|
263
|
+
yield from perform_anthropic_web_search(query, model)
|