alita-sdk 0.3.257__py3-none-any.whl → 0.3.584__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +155 -0
- alita_sdk/cli/agent_loader.py +215 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3794 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1073 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +72 -12
- alita_sdk/community/inventory/__init__.py +236 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +11 -0
- alita_sdk/configurations/ado.py +148 -2
- alita_sdk/configurations/azure_search.py +1 -1
- alita_sdk/configurations/bigquery.py +1 -1
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/browser.py +18 -0
- alita_sdk/configurations/carrier.py +19 -0
- alita_sdk/configurations/confluence.py +130 -1
- alita_sdk/configurations/delta_lake.py +1 -1
- alita_sdk/configurations/figma.py +76 -5
- alita_sdk/configurations/github.py +65 -1
- alita_sdk/configurations/gitlab.py +81 -0
- alita_sdk/configurations/google_places.py +17 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/openapi.py +323 -0
- alita_sdk/configurations/postman.py +1 -1
- alita_sdk/configurations/qtest.py +72 -3
- alita_sdk/configurations/report_portal.py +115 -0
- alita_sdk/configurations/salesforce.py +19 -0
- alita_sdk/configurations/service_now.py +1 -12
- alita_sdk/configurations/sharepoint.py +167 -0
- alita_sdk/configurations/sonar.py +18 -0
- alita_sdk/configurations/sql.py +20 -0
- alita_sdk/configurations/testio.py +101 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +94 -1
- alita_sdk/configurations/zephyr_enterprise.py +94 -1
- alita_sdk/configurations/zephyr_essential.py +95 -0
- alita_sdk/runtime/clients/artifact.py +21 -4
- alita_sdk/runtime/clients/client.py +458 -67
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +352 -0
- alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
- alita_sdk/runtime/langchain/assistant.py +183 -43
- alita_sdk/runtime/langchain/constants.py +647 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +209 -31
- alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py +1 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +10 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaMarkdownLoader.py +66 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py +79 -10
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +52 -15
- alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py +9 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -4
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +15 -2
- alita_sdk/runtime/langchain/document_loaders/ImageParser.py +30 -0
- alita_sdk/runtime/langchain/document_loaders/constants.py +189 -41
- alita_sdk/runtime/langchain/interfaces/llm_processor.py +4 -2
- alita_sdk/runtime/langchain/langraph_agent.py +493 -105
- alita_sdk/runtime/langchain/utils.py +118 -8
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/skills/__init__.py +91 -0
- alita_sdk/runtime/skills/callbacks.py +498 -0
- alita_sdk/runtime/skills/discovery.py +540 -0
- alita_sdk/runtime/skills/executor.py +610 -0
- alita_sdk/runtime/skills/input_builder.py +371 -0
- alita_sdk/runtime/skills/models.py +330 -0
- alita_sdk/runtime/skills/registry.py +355 -0
- alita_sdk/runtime/skills/skill_runner.py +330 -0
- alita_sdk/runtime/toolkits/__init__.py +28 -0
- alita_sdk/runtime/toolkits/application.py +14 -4
- alita_sdk/runtime/toolkits/artifact.py +25 -9
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +782 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/skill_router.py +238 -0
- alita_sdk/runtime/toolkits/subgraph.py +11 -6
- alita_sdk/runtime/toolkits/tools.py +314 -70
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/__init__.py +24 -0
- alita_sdk/runtime/tools/application.py +16 -4
- alita_sdk/runtime/tools/artifact.py +367 -33
- alita_sdk/runtime/tools/data_analysis.py +183 -0
- alita_sdk/runtime/tools/function.py +100 -4
- alita_sdk/runtime/tools/graph.py +81 -0
- alita_sdk/runtime/tools/image_generation.py +218 -0
- alita_sdk/runtime/tools/llm.py +1032 -177
- alita_sdk/runtime/tools/loop.py +3 -1
- alita_sdk/runtime/tools/loop_output.py +3 -1
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +3 -1
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -1
- alita_sdk/runtime/tools/sandbox.py +375 -0
- alita_sdk/runtime/tools/skill_router.py +776 -0
- alita_sdk/runtime/tools/tool.py +3 -1
- alita_sdk/runtime/tools/vectorstore.py +69 -65
- alita_sdk/runtime/tools/vectorstore_base.py +163 -90
- alita_sdk/runtime/utils/AlitaCallback.py +137 -21
- alita_sdk/runtime/utils/constants.py +5 -1
- alita_sdk/runtime/utils/mcp_client.py +492 -0
- alita_sdk/runtime/utils/mcp_oauth.py +361 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +434 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/streamlit.py +41 -14
- alita_sdk/runtime/utils/toolkit_utils.py +28 -9
- alita_sdk/runtime/utils/utils.py +48 -0
- alita_sdk/tools/__init__.py +135 -37
- alita_sdk/tools/ado/__init__.py +2 -2
- alita_sdk/tools/ado/repos/__init__.py +16 -19
- alita_sdk/tools/ado/repos/repos_wrapper.py +12 -20
- alita_sdk/tools/ado/test_plan/__init__.py +27 -8
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +56 -28
- alita_sdk/tools/ado/wiki/__init__.py +28 -12
- alita_sdk/tools/ado/wiki/ado_wrapper.py +114 -40
- alita_sdk/tools/ado/work_item/__init__.py +28 -12
- alita_sdk/tools/ado/work_item/ado_wrapper.py +95 -11
- alita_sdk/tools/advanced_jira_mining/__init__.py +13 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +15 -11
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +14 -8
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +454 -110
- alita_sdk/tools/bitbucket/__init__.py +28 -19
- alita_sdk/tools/bitbucket/api_wrapper.py +285 -27
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +5 -5
- alita_sdk/tools/browser/__init__.py +41 -16
- alita_sdk/tools/browser/crawler.py +3 -1
- alita_sdk/tools/browser/utils.py +15 -6
- alita_sdk/tools/carrier/__init__.py +18 -17
- alita_sdk/tools/carrier/backend_reports_tool.py +8 -4
- alita_sdk/tools/carrier/excel_reporter.py +8 -4
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/codeparser.py +1 -1
- alita_sdk/tools/chunkers/sematic/json_chunker.py +2 -1
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +12 -7
- alita_sdk/tools/cloud/azure/__init__.py +12 -7
- alita_sdk/tools/cloud/gcp/__init__.py +12 -7
- alita_sdk/tools/cloud/k8s/__init__.py +12 -7
- alita_sdk/tools/code/linter/__init__.py +10 -8
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +21 -13
- alita_sdk/tools/code_indexer_toolkit.py +199 -0
- alita_sdk/tools/confluence/__init__.py +22 -14
- alita_sdk/tools/confluence/api_wrapper.py +197 -58
- alita_sdk/tools/confluence/loader.py +14 -2
- alita_sdk/tools/custom_open_api/__init__.py +12 -5
- alita_sdk/tools/elastic/__init__.py +11 -8
- alita_sdk/tools/elitea_base.py +546 -64
- alita_sdk/tools/figma/__init__.py +60 -11
- alita_sdk/tools/figma/api_wrapper.py +1400 -167
- alita_sdk/tools/figma/figma_client.py +73 -0
- alita_sdk/tools/figma/toon_tools.py +2748 -0
- alita_sdk/tools/github/__init__.py +18 -17
- alita_sdk/tools/github/api_wrapper.py +9 -26
- alita_sdk/tools/github/github_client.py +81 -12
- alita_sdk/tools/github/schemas.py +2 -1
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/gitlab/__init__.py +19 -13
- alita_sdk/tools/gitlab/api_wrapper.py +256 -80
- alita_sdk/tools/gitlab_org/__init__.py +14 -10
- alita_sdk/tools/google/bigquery/__init__.py +14 -13
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +21 -11
- alita_sdk/tools/jira/__init__.py +22 -11
- alita_sdk/tools/jira/api_wrapper.py +315 -168
- alita_sdk/tools/keycloak/__init__.py +11 -8
- alita_sdk/tools/localgit/__init__.py +9 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +38 -14
- alita_sdk/tools/non_code_indexer_toolkit.py +7 -2
- alita_sdk/tools/ocr/__init__.py +11 -8
- alita_sdk/tools/openapi/__init__.py +491 -106
- alita_sdk/tools/openapi/api_wrapper.py +1357 -0
- alita_sdk/tools/openapi/tool.py +20 -0
- alita_sdk/tools/pandas/__init__.py +20 -12
- alita_sdk/tools/pandas/api_wrapper.py +40 -45
- alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
- alita_sdk/tools/postman/__init__.py +11 -11
- alita_sdk/tools/postman/api_wrapper.py +19 -8
- alita_sdk/tools/postman/postman_analysis.py +8 -1
- alita_sdk/tools/pptx/__init__.py +11 -10
- alita_sdk/tools/qtest/__init__.py +22 -14
- alita_sdk/tools/qtest/api_wrapper.py +1784 -88
- alita_sdk/tools/rally/__init__.py +13 -10
- alita_sdk/tools/report_portal/__init__.py +23 -16
- alita_sdk/tools/salesforce/__init__.py +22 -16
- alita_sdk/tools/servicenow/__init__.py +21 -16
- alita_sdk/tools/servicenow/api_wrapper.py +1 -1
- alita_sdk/tools/sharepoint/__init__.py +17 -14
- alita_sdk/tools/sharepoint/api_wrapper.py +179 -39
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +13 -8
- alita_sdk/tools/sql/__init__.py +22 -19
- alita_sdk/tools/sql/api_wrapper.py +71 -23
- alita_sdk/tools/testio/__init__.py +21 -13
- alita_sdk/tools/testrail/__init__.py +13 -11
- alita_sdk/tools/testrail/api_wrapper.py +214 -46
- alita_sdk/tools/utils/__init__.py +28 -4
- alita_sdk/tools/utils/content_parser.py +241 -55
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +83 -27
- alita_sdk/tools/xray/__init__.py +18 -14
- alita_sdk/tools/xray/api_wrapper.py +58 -113
- alita_sdk/tools/yagmail/__init__.py +9 -3
- alita_sdk/tools/zephyr/__init__.py +12 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +16 -9
- alita_sdk/tools/zephyr_enterprise/api_wrapper.py +30 -15
- alita_sdk/tools/zephyr_essential/__init__.py +16 -10
- alita_sdk/tools/zephyr_essential/api_wrapper.py +297 -54
- alita_sdk/tools/zephyr_essential/client.py +6 -4
- alita_sdk/tools/zephyr_scale/__init__.py +13 -8
- alita_sdk/tools/zephyr_scale/api_wrapper.py +39 -31
- alita_sdk/tools/zephyr_squad/__init__.py +12 -7
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/METADATA +184 -37
- alita_sdk-0.3.584.dist-info/RECORD +452 -0
- alita_sdk-0.3.584.dist-info/entry_points.txt +2 -0
- alita_sdk/tools/bitbucket/tools.py +0 -304
- alita_sdk-0.3.257.dist-info/RECORD +0 -343
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/top_level.txt +0 -0
alita_sdk/cli/agents.py
ADDED
|
@@ -0,0 +1,3794 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent commands for Alita CLI.
|
|
3
|
+
|
|
4
|
+
Provides commands to work with agents interactively or in handoff mode,
|
|
5
|
+
supporting both platform agents and local agent definition files.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import click
|
|
10
|
+
import json
|
|
11
|
+
import logging
|
|
12
|
+
import sqlite3
|
|
13
|
+
import sys
|
|
14
|
+
import re
|
|
15
|
+
from typing import Optional, Dict, Any, List
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from datetime import datetime
|
|
18
|
+
import yaml
|
|
19
|
+
|
|
20
|
+
from rich.console import Console
|
|
21
|
+
from rich.panel import Panel
|
|
22
|
+
from rich.table import Table
|
|
23
|
+
from rich.markdown import Markdown
|
|
24
|
+
from rich import box
|
|
25
|
+
from rich.text import Text
|
|
26
|
+
from rich.status import Status
|
|
27
|
+
from rich.live import Live
|
|
28
|
+
|
|
29
|
+
from .cli import get_client
|
|
30
|
+
# Import from refactored modules
|
|
31
|
+
from .agent_ui import print_welcome, print_help, display_output, extract_output_from_result
|
|
32
|
+
from .agent_loader import load_agent_definition
|
|
33
|
+
from .agent_executor import create_llm_instance, create_agent_executor, create_agent_executor_with_mcp
|
|
34
|
+
from .toolkit_loader import load_toolkit_config, load_toolkit_configs
|
|
35
|
+
from .callbacks import create_cli_callback, CLICallbackHandler
|
|
36
|
+
from .input_handler import get_input_handler, styled_input, styled_selection_input
|
|
37
|
+
# Context management for chat history
|
|
38
|
+
from .context import CLIContextManager, CLIMessage, purge_old_sessions as purge_context_sessions
|
|
39
|
+
|
|
40
|
+
logger = logging.getLogger(__name__)
|
|
41
|
+
|
|
42
|
+
# Create a rich console for beautiful output
|
|
43
|
+
console = Console()
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def resolve_toolkit_config_path(config_path_str: str, test_file: Path, test_cases_dir: Path) -> Optional[str]:
|
|
47
|
+
"""
|
|
48
|
+
Resolve toolkit configuration file path from test case.
|
|
49
|
+
|
|
50
|
+
Tries multiple locations in order:
|
|
51
|
+
1. Absolute path
|
|
52
|
+
2. Relative to test case file directory
|
|
53
|
+
3. Relative to test cases directory
|
|
54
|
+
4. Relative to workspace root
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
config_path_str: Config path from test case
|
|
58
|
+
test_file: Path to the test case file
|
|
59
|
+
test_cases_dir: Path to test cases directory
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
Absolute path to config file if found, None otherwise
|
|
63
|
+
"""
|
|
64
|
+
if not config_path_str:
|
|
65
|
+
return None
|
|
66
|
+
|
|
67
|
+
# Normalize path separators
|
|
68
|
+
config_path_str = config_path_str.replace('\\', '/')
|
|
69
|
+
|
|
70
|
+
# Try absolute path first
|
|
71
|
+
config_path = Path(config_path_str)
|
|
72
|
+
if config_path.is_absolute() and config_path.exists():
|
|
73
|
+
return str(config_path)
|
|
74
|
+
|
|
75
|
+
# Try relative to test case file directory
|
|
76
|
+
config_path = test_file.parent / config_path_str
|
|
77
|
+
if config_path.exists():
|
|
78
|
+
return str(config_path)
|
|
79
|
+
|
|
80
|
+
# Try relative to test_cases_dir
|
|
81
|
+
config_path = test_cases_dir / config_path_str
|
|
82
|
+
if config_path.exists():
|
|
83
|
+
return str(config_path)
|
|
84
|
+
|
|
85
|
+
# Try relative to workspace root
|
|
86
|
+
workspace_root = Path.cwd()
|
|
87
|
+
config_path = workspace_root / config_path_str
|
|
88
|
+
if config_path.exists():
|
|
89
|
+
return str(config_path)
|
|
90
|
+
|
|
91
|
+
return None
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def parse_test_case(test_case_path: str) -> Dict[str, Any]:
|
|
95
|
+
"""
|
|
96
|
+
Parse a test case markdown file to extract configuration, steps, and expectations.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
test_case_path: Path to the test case markdown file
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
Dictionary containing:
|
|
103
|
+
- name: Test case name
|
|
104
|
+
- objective: Test objective
|
|
105
|
+
- config_path: Path to toolkit config file
|
|
106
|
+
- generate_test_data: Boolean flag indicating if test data generation is needed (default: True)
|
|
107
|
+
- test_data_config: Dictionary of test data configuration from table
|
|
108
|
+
- prerequisites: Pre-requisites section text
|
|
109
|
+
- variables: List of variable placeholders found (e.g., {{TEST_PR_NUMBER}})
|
|
110
|
+
- steps: List of test steps with their descriptions
|
|
111
|
+
- expectations: List of expectations/assertions
|
|
112
|
+
"""
|
|
113
|
+
path = Path(test_case_path)
|
|
114
|
+
if not path.exists():
|
|
115
|
+
raise FileNotFoundError(f"Test case not found: {test_case_path}")
|
|
116
|
+
|
|
117
|
+
content = path.read_text(encoding='utf-8')
|
|
118
|
+
|
|
119
|
+
# Extract test case name from the first heading
|
|
120
|
+
name_match = re.search(r'^#\s+(.+)$', content, re.MULTILINE)
|
|
121
|
+
name = name_match.group(1) if name_match else path.stem
|
|
122
|
+
|
|
123
|
+
# Extract objective
|
|
124
|
+
objective_match = re.search(r'##\s+Objective\s*\n\n(.+?)(?=\n\n##|\Z)', content, re.DOTALL)
|
|
125
|
+
objective = objective_match.group(1).strip() if objective_match else ""
|
|
126
|
+
|
|
127
|
+
# Extract config path and generateTestData flag
|
|
128
|
+
config_section_match = re.search(r'##\s+Config\s*\n\n(.+?)(?=\n\n##|\Z)', content, re.DOTALL)
|
|
129
|
+
config_path = None
|
|
130
|
+
generate_test_data = True # Default to True if not specified
|
|
131
|
+
|
|
132
|
+
if config_section_match:
|
|
133
|
+
config_section = config_section_match.group(1)
|
|
134
|
+
# Extract path
|
|
135
|
+
path_match = re.search(r'path:\s*(.+?)(?=\n|$)', config_section, re.MULTILINE)
|
|
136
|
+
if path_match:
|
|
137
|
+
config_path = path_match.group(1).strip()
|
|
138
|
+
|
|
139
|
+
# Extract generateTestData flag
|
|
140
|
+
gen_data_match = re.search(r'generateTestData\s*:\s*(true|false)', config_section, re.IGNORECASE)
|
|
141
|
+
if gen_data_match:
|
|
142
|
+
generate_test_data = gen_data_match.group(1).lower() == 'true'
|
|
143
|
+
|
|
144
|
+
# Extract Test Data Configuration table
|
|
145
|
+
test_data_config = {}
|
|
146
|
+
config_section_match = re.search(r'##\s+Test Data Configuration\s*\n(.+?)(?=\n##|\Z)', content, re.DOTALL)
|
|
147
|
+
if config_section_match:
|
|
148
|
+
config_section = config_section_match.group(1)
|
|
149
|
+
# Parse markdown table (format: | Parameter | Value | Description |)
|
|
150
|
+
table_rows = re.findall(r'\|\s*\*\*([^*]+)\*\*\s*\|\s*`?([^|`]+)`?\s*\|', config_section)
|
|
151
|
+
for param, value in table_rows:
|
|
152
|
+
test_data_config[param.strip()] = value.strip()
|
|
153
|
+
|
|
154
|
+
# Extract Pre-requisites section
|
|
155
|
+
prerequisites = ""
|
|
156
|
+
prereq_match = re.search(r'##\s+Pre-requisites\s*\n\n(.+?)(?=\n\n##|\Z)', content, re.DOTALL)
|
|
157
|
+
if prereq_match:
|
|
158
|
+
prerequisites = prereq_match.group(1).strip()
|
|
159
|
+
|
|
160
|
+
# Find all variable placeholders ({{VARIABLE_NAME}})
|
|
161
|
+
variables = list(set(re.findall(r'\{\{([A-Z_]+)\}\}', content)))
|
|
162
|
+
|
|
163
|
+
# Extract test steps and expectations
|
|
164
|
+
steps = []
|
|
165
|
+
expectations = []
|
|
166
|
+
|
|
167
|
+
# Find all Step sections
|
|
168
|
+
step_pattern = r'###\s+Step\s+(\d+):\s+(.+?)\n\n(.+?)(?=\n\n###|\n\n##|\Z)'
|
|
169
|
+
for step_match in re.finditer(step_pattern, content, re.DOTALL):
|
|
170
|
+
step_num = step_match.group(1)
|
|
171
|
+
step_title = step_match.group(2).strip()
|
|
172
|
+
step_content = step_match.group(3).strip()
|
|
173
|
+
|
|
174
|
+
# Extract the actual instruction (first paragraph before "Expectation:")
|
|
175
|
+
instruction_match = re.search(r'(.+?)(?=\n\n\*\*Expectation:\*\*|\Z)', step_content, re.DOTALL)
|
|
176
|
+
instruction = instruction_match.group(1).strip() if instruction_match else step_content
|
|
177
|
+
|
|
178
|
+
# Extract expectation if present
|
|
179
|
+
expectation_match = re.search(r'\*\*Expectation:\*\*\s+(.+)', step_content, re.DOTALL)
|
|
180
|
+
expectation = expectation_match.group(1).strip() if expectation_match else None
|
|
181
|
+
|
|
182
|
+
steps.append({
|
|
183
|
+
'number': int(step_num),
|
|
184
|
+
'title': step_title,
|
|
185
|
+
'instruction': instruction,
|
|
186
|
+
'expectation': expectation
|
|
187
|
+
})
|
|
188
|
+
|
|
189
|
+
if expectation:
|
|
190
|
+
expectations.append({
|
|
191
|
+
'step': int(step_num),
|
|
192
|
+
'description': expectation
|
|
193
|
+
})
|
|
194
|
+
|
|
195
|
+
return {
|
|
196
|
+
'name': name,
|
|
197
|
+
'objective': objective,
|
|
198
|
+
'config_path': config_path,
|
|
199
|
+
'generate_test_data': generate_test_data,
|
|
200
|
+
'test_data_config': test_data_config,
|
|
201
|
+
'prerequisites': prerequisites,
|
|
202
|
+
'variables': variables,
|
|
203
|
+
'steps': steps,
|
|
204
|
+
'expectations': expectations
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def validate_test_output(output: str, expectation: str) -> tuple[bool, str]:
|
|
209
|
+
"""
|
|
210
|
+
Validate test output against expectations.
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
output: The actual output from the agent
|
|
214
|
+
expectation: The expected result description
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
Tuple of (passed: bool, details: str)
|
|
218
|
+
"""
|
|
219
|
+
# Simple keyword-based validation
|
|
220
|
+
# Extract key phrases from expectation
|
|
221
|
+
|
|
222
|
+
# Common patterns in expectations
|
|
223
|
+
if "contains" in expectation.lower():
|
|
224
|
+
# Extract what should be contained
|
|
225
|
+
contains_match = re.search(r'contains.*?["`]([^"`]+)["`]', expectation, re.IGNORECASE)
|
|
226
|
+
if contains_match:
|
|
227
|
+
expected_text = contains_match.group(1)
|
|
228
|
+
if expected_text in output:
|
|
229
|
+
return True, f"Output contains expected text: '{expected_text}'"
|
|
230
|
+
else:
|
|
231
|
+
return False, f"Output does not contain expected text: '{expected_text}'"
|
|
232
|
+
|
|
233
|
+
if "without errors" in expectation.lower() or "runs without errors" in expectation.lower():
|
|
234
|
+
# Check for common error indicators
|
|
235
|
+
error_indicators = ['error', 'exception', 'failed', 'traceback']
|
|
236
|
+
has_error = any(indicator in output.lower() for indicator in error_indicators)
|
|
237
|
+
if not has_error:
|
|
238
|
+
return True, "Execution completed without errors"
|
|
239
|
+
else:
|
|
240
|
+
return False, "Execution encountered errors"
|
|
241
|
+
|
|
242
|
+
# Default: assume pass if output is non-empty
|
|
243
|
+
if output and len(output.strip()) > 0:
|
|
244
|
+
return True, "Output generated successfully"
|
|
245
|
+
|
|
246
|
+
return False, "No output generated"
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def _build_bulk_data_gen_prompt(parsed_test_cases: list) -> str:
|
|
250
|
+
"""Build consolidated requirements text for bulk test data generation."""
|
|
251
|
+
requirements = []
|
|
252
|
+
for idx, tc in enumerate(parsed_test_cases, 1):
|
|
253
|
+
test_case = tc['data']
|
|
254
|
+
test_file = tc['file']
|
|
255
|
+
|
|
256
|
+
parts = [f"Test Case #{idx}: {test_case['name']}", f"File: {test_file.name}", ""]
|
|
257
|
+
|
|
258
|
+
if test_case.get('test_data_config'):
|
|
259
|
+
parts.append("Test Data Configuration:")
|
|
260
|
+
for param, value in test_case['test_data_config'].items():
|
|
261
|
+
parts.append(f" - {param}: {value}")
|
|
262
|
+
|
|
263
|
+
if test_case.get('prerequisites'):
|
|
264
|
+
parts.append(f"\nPre-requisites:\n{test_case['prerequisites']}")
|
|
265
|
+
|
|
266
|
+
if test_case.get('variables'):
|
|
267
|
+
parts.append(f"\nVariables to generate: {', '.join(test_case['variables'])}")
|
|
268
|
+
|
|
269
|
+
requirements.append("\n".join(parts))
|
|
270
|
+
|
|
271
|
+
return f"""{'='*60}
|
|
272
|
+
|
|
273
|
+
{chr(10).join(requirements)}
|
|
274
|
+
|
|
275
|
+
{'='*60}"""
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def _build_single_test_execution_prompt(test_case_info: dict, test_number: int) -> str:
|
|
279
|
+
"""Build execution prompt for a single test case."""
|
|
280
|
+
test_case = test_case_info['data']
|
|
281
|
+
test_file = test_case_info['file']
|
|
282
|
+
|
|
283
|
+
parts = [
|
|
284
|
+
f"\n{'='*80}",
|
|
285
|
+
f"TEST CASE #{test_number}: {test_case['name']}",
|
|
286
|
+
f"File: {test_file.name}",
|
|
287
|
+
f"{'='*80}",
|
|
288
|
+
"\nList all the tools you have in your environment. Execute the following steps in sequential order and report results:"
|
|
289
|
+
]
|
|
290
|
+
|
|
291
|
+
if test_case['steps']:
|
|
292
|
+
for step in test_case['steps']:
|
|
293
|
+
parts.append(f"\nStep {step['number']}: {step['title']}")
|
|
294
|
+
parts.append(step['instruction'])
|
|
295
|
+
else:
|
|
296
|
+
parts.append("\n(No steps defined)")
|
|
297
|
+
|
|
298
|
+
return "\n".join(parts)
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def _build_single_test_validation_prompt(test_case_info: dict, test_number: int, execution_output: str) -> str:
|
|
302
|
+
"""Build validation prompt for a single test case."""
|
|
303
|
+
test_case = test_case_info['data']
|
|
304
|
+
|
|
305
|
+
parts = [
|
|
306
|
+
"Review the test execution results and validate this test case and provide the output in JSON format.\n",
|
|
307
|
+
f"\nTest Case #{test_number}: {test_case['name']}"
|
|
308
|
+
]
|
|
309
|
+
|
|
310
|
+
if test_case['steps']:
|
|
311
|
+
for step in test_case['steps']:
|
|
312
|
+
parts.append(f" Step {step['number']}: {step['title']}")
|
|
313
|
+
if step['expectation']:
|
|
314
|
+
parts.append(f" Expected: {step['expectation']}")
|
|
315
|
+
|
|
316
|
+
parts.append(f"\n\nActual Execution Results:\n{execution_output}\n")
|
|
317
|
+
|
|
318
|
+
# Escape quotes in test name for valid JSON in prompt
|
|
319
|
+
escaped_test_name = test_case['name'].replace('"', '\\"')
|
|
320
|
+
|
|
321
|
+
parts.append(f"""\nBased on the execution results above, validate this test case.
|
|
322
|
+
|
|
323
|
+
Respond ONLY with valid JSON in this EXACT format (no additional text before or after):
|
|
324
|
+
{{
|
|
325
|
+
"test_number": {test_number},
|
|
326
|
+
"test_name": "{escaped_test_name}",
|
|
327
|
+
"steps": [
|
|
328
|
+
{{"step_number": 1, "title": "<step title>", "passed": true/false, "details": "<brief explanation>"}},
|
|
329
|
+
{{"step_number": 2, "title": "<step title>", "passed": true/false, "details": "<brief explanation>"}}
|
|
330
|
+
]
|
|
331
|
+
}}
|
|
332
|
+
|
|
333
|
+
IMPORTANT: Return ONLY the JSON object. Do not include any explanatory text before or after the JSON.""")
|
|
334
|
+
|
|
335
|
+
return "\n".join(parts)
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
def _extract_json_from_text(text: str) -> dict:
|
|
339
|
+
"""Extract JSON object from text using brace counting."""
|
|
340
|
+
start_idx = text.find('{')
|
|
341
|
+
if start_idx == -1:
|
|
342
|
+
raise ValueError("No JSON found in text")
|
|
343
|
+
|
|
344
|
+
brace_count = 0
|
|
345
|
+
end_idx = -1
|
|
346
|
+
for i, char in enumerate(text[start_idx:], start=start_idx):
|
|
347
|
+
if char == '{':
|
|
348
|
+
brace_count += 1
|
|
349
|
+
elif char == '}':
|
|
350
|
+
brace_count -= 1
|
|
351
|
+
if brace_count == 0:
|
|
352
|
+
end_idx = i + 1
|
|
353
|
+
break
|
|
354
|
+
|
|
355
|
+
if end_idx == -1:
|
|
356
|
+
raise ValueError("Could not find matching closing brace")
|
|
357
|
+
|
|
358
|
+
return json.loads(text[start_idx:end_idx])
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
def _create_fallback_result_for_test(test_case: dict, test_file: Path, reason: str = 'Validation failed') -> dict:
|
|
362
|
+
"""Create a fallback result for a single test case with detailed step information.
|
|
363
|
+
|
|
364
|
+
Args:
|
|
365
|
+
test_case: Parsed test case data
|
|
366
|
+
test_file: Path to test case file
|
|
367
|
+
reason: Reason for fallback
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
Fallback test result dict with step details
|
|
371
|
+
"""
|
|
372
|
+
fallback_steps = []
|
|
373
|
+
for step_info in test_case.get('steps', []):
|
|
374
|
+
fallback_steps.append({
|
|
375
|
+
'step_number': step_info['number'],
|
|
376
|
+
'title': step_info['title'],
|
|
377
|
+
'passed': False,
|
|
378
|
+
'details': reason
|
|
379
|
+
})
|
|
380
|
+
|
|
381
|
+
return {
|
|
382
|
+
'title': test_case['name'],
|
|
383
|
+
'passed': False,
|
|
384
|
+
'file': test_file.name,
|
|
385
|
+
'step_results': fallback_steps,
|
|
386
|
+
'validation_error': reason
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
def _cleanup_executor_cache(cache: Dict[str, tuple], cache_name: str = "executor") -> None:
|
|
391
|
+
"""Clean up executor cache resources.
|
|
392
|
+
|
|
393
|
+
Args:
|
|
394
|
+
cache: Dictionary of cached executors
|
|
395
|
+
cache_name: Name of cache for logging
|
|
396
|
+
"""
|
|
397
|
+
console.print(f"[dim]Cleaning up {cache_name} cache...[/dim]")
|
|
398
|
+
for cache_key, cached_items in cache.items():
|
|
399
|
+
try:
|
|
400
|
+
# Extract memory from tuple (second element)
|
|
401
|
+
memory = cached_items[1] if len(cached_items) > 1 else None
|
|
402
|
+
|
|
403
|
+
# Close SQLite memory connection
|
|
404
|
+
if memory and hasattr(memory, 'conn') and memory.conn:
|
|
405
|
+
memory.conn.close()
|
|
406
|
+
except Exception as e:
|
|
407
|
+
logger.debug(f"Error cleaning up {cache_name} cache for {cache_key}: {e}")
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
def _create_executor_from_cache(cache: Dict[str, tuple], cache_key: str,
|
|
411
|
+
client, agent_def: Dict, toolkit_config_path: Optional[str],
|
|
412
|
+
config, model: Optional[str], temperature: Optional[float],
|
|
413
|
+
max_tokens: Optional[int], work_dir: Optional[str]) -> tuple:
|
|
414
|
+
"""Get or create executor from cache.
|
|
415
|
+
|
|
416
|
+
Args:
|
|
417
|
+
cache: Executor cache dictionary
|
|
418
|
+
cache_key: Key for caching
|
|
419
|
+
client: API client
|
|
420
|
+
agent_def: Agent definition
|
|
421
|
+
toolkit_config_path: Path to toolkit config
|
|
422
|
+
config: CLI configuration
|
|
423
|
+
model: Model override
|
|
424
|
+
temperature: Temperature override
|
|
425
|
+
max_tokens: Max tokens override
|
|
426
|
+
work_dir: Working directory
|
|
427
|
+
|
|
428
|
+
Returns:
|
|
429
|
+
Tuple of (agent_executor, memory, mcp_session_manager)
|
|
430
|
+
"""
|
|
431
|
+
if cache_key in cache:
|
|
432
|
+
return cache[cache_key]
|
|
433
|
+
|
|
434
|
+
# Create new executor
|
|
435
|
+
from langgraph.checkpoint.sqlite import SqliteSaver
|
|
436
|
+
import sqlite3
|
|
437
|
+
|
|
438
|
+
memory = SqliteSaver(sqlite3.connect(":memory:", check_same_thread=False))
|
|
439
|
+
toolkit_config_tuple = (toolkit_config_path,) if toolkit_config_path else ()
|
|
440
|
+
|
|
441
|
+
agent_executor, mcp_session_manager, _, _, _, _, _ = _setup_local_agent_executor(
|
|
442
|
+
client, agent_def, toolkit_config_tuple, config, model, temperature,
|
|
443
|
+
max_tokens, memory, work_dir
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
# Cache the executor
|
|
447
|
+
cached_tuple = (agent_executor, memory, mcp_session_manager)
|
|
448
|
+
cache[cache_key] = cached_tuple
|
|
449
|
+
return cached_tuple
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
def _print_validation_diagnostics(validation_output: str) -> None:
|
|
453
|
+
"""Print diagnostic information for validation output.
|
|
454
|
+
|
|
455
|
+
Args:
|
|
456
|
+
validation_output: The validation output to diagnose
|
|
457
|
+
"""
|
|
458
|
+
console.print(f"\n[bold red]🔍 Diagnostic Information:[/bold red]")
|
|
459
|
+
console.print(f"[dim]Output length: {len(validation_output)} characters[/dim]")
|
|
460
|
+
|
|
461
|
+
# Check for key JSON elements
|
|
462
|
+
has_json = '{' in validation_output and '}' in validation_output
|
|
463
|
+
has_fields = 'test_number' in validation_output and 'steps' in validation_output
|
|
464
|
+
|
|
465
|
+
console.print(f"[dim]Has JSON structure: {has_json}[/dim]")
|
|
466
|
+
console.print(f"[dim]Has required fields: {has_fields}[/dim]")
|
|
467
|
+
|
|
468
|
+
# Show relevant excerpt
|
|
469
|
+
if len(validation_output) > 400:
|
|
470
|
+
console.print(f"\n[red]First 200 chars:[/red] [dim]{validation_output[:200]}[/dim]")
|
|
471
|
+
console.print(f"[red]Last 200 chars:[/red] [dim]{validation_output[-200:]}[/dim]")
|
|
472
|
+
else:
|
|
473
|
+
console.print(f"\n[red]Full output:[/red] [dim]{validation_output}[/dim]")
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
def _get_alita_system_prompt(config) -> str:
|
|
477
|
+
"""
|
|
478
|
+
Get the Alita system prompt from user config or fallback to default.
|
|
479
|
+
|
|
480
|
+
Checks for $ALITA_DIR/agents/default.agent.md first, then falls back
|
|
481
|
+
to the built-in DEFAULT_PROMPT.
|
|
482
|
+
|
|
483
|
+
Returns:
|
|
484
|
+
The system prompt string for Alita
|
|
485
|
+
"""
|
|
486
|
+
from .agent.default import DEFAULT_PROMPT
|
|
487
|
+
|
|
488
|
+
# Check for user-customized prompt
|
|
489
|
+
custom_prompt_path = Path(config.agents_dir) / 'default.agent.md'
|
|
490
|
+
|
|
491
|
+
if custom_prompt_path.exists():
|
|
492
|
+
try:
|
|
493
|
+
content = custom_prompt_path.read_text(encoding='utf-8')
|
|
494
|
+
# Parse the agent.md file - extract system_prompt from frontmatter or use content
|
|
495
|
+
if content.startswith('---'):
|
|
496
|
+
# Has YAML frontmatter, try to parse
|
|
497
|
+
try:
|
|
498
|
+
parts = content.split('---', 2)
|
|
499
|
+
if len(parts) >= 3:
|
|
500
|
+
frontmatter = yaml.safe_load(parts[1])
|
|
501
|
+
body = parts[2].strip()
|
|
502
|
+
# Use system_prompt from frontmatter if present, otherwise use body
|
|
503
|
+
return frontmatter.get('system_prompt', body) if frontmatter else body
|
|
504
|
+
except Exception:
|
|
505
|
+
pass
|
|
506
|
+
# No frontmatter or parsing failed, use entire content as prompt
|
|
507
|
+
return content.strip()
|
|
508
|
+
except Exception as e:
|
|
509
|
+
logger.debug(f"Failed to load custom Alita prompt from {custom_prompt_path}: {e}")
|
|
510
|
+
|
|
511
|
+
return DEFAULT_PROMPT
|
|
512
|
+
|
|
513
|
+
|
|
514
|
+
def _get_inventory_system_prompt(config) -> str:
|
|
515
|
+
"""
|
|
516
|
+
Get the Inventory agent system prompt from user config or fallback to default.
|
|
517
|
+
|
|
518
|
+
Checks for $ALITA_DIR/agents/inventory.agent.md first, then falls back
|
|
519
|
+
to the default prompt with inventory-specific instructions.
|
|
520
|
+
|
|
521
|
+
Returns:
|
|
522
|
+
The system prompt string for Inventory agent
|
|
523
|
+
"""
|
|
524
|
+
from .agent.default import DEFAULT_PROMPT
|
|
525
|
+
|
|
526
|
+
# Check for user-customized prompt
|
|
527
|
+
custom_prompt_path = Path(config.agents_dir) / 'inventory.agent.md'
|
|
528
|
+
|
|
529
|
+
if custom_prompt_path.exists():
|
|
530
|
+
try:
|
|
531
|
+
content = custom_prompt_path.read_text(encoding='utf-8')
|
|
532
|
+
# Parse the agent.md file - extract system_prompt from frontmatter or use content
|
|
533
|
+
if content.startswith('---'):
|
|
534
|
+
try:
|
|
535
|
+
parts = content.split('---', 2)
|
|
536
|
+
if len(parts) >= 3:
|
|
537
|
+
frontmatter = yaml.safe_load(parts[1])
|
|
538
|
+
body = parts[2].strip()
|
|
539
|
+
return frontmatter.get('system_prompt', body) if frontmatter else body
|
|
540
|
+
except Exception:
|
|
541
|
+
pass
|
|
542
|
+
return content.strip()
|
|
543
|
+
except Exception as e:
|
|
544
|
+
logger.debug(f"Failed to load custom Inventory prompt from {custom_prompt_path}: {e}")
|
|
545
|
+
|
|
546
|
+
# Use default prompt + inventory toolkit instructions
|
|
547
|
+
inventory_context = """
|
|
548
|
+
|
|
549
|
+
## Inventory Knowledge Graph
|
|
550
|
+
|
|
551
|
+
You have access to the Inventory toolkit for querying a knowledge graph of software entities and relationships.
|
|
552
|
+
Use these tools to help users understand their codebase:
|
|
553
|
+
|
|
554
|
+
- **search_entities**: Find entities by name, type, or path patterns
|
|
555
|
+
- **get_entity**: Get full details of a specific entity
|
|
556
|
+
- **get_relationships**: Find relationships from/to an entity
|
|
557
|
+
- **impact_analysis**: Analyze what depends on an entity (useful for change impact)
|
|
558
|
+
- **get_graph_stats**: Get statistics about the knowledge graph
|
|
559
|
+
|
|
560
|
+
When answering questions about the codebase, use these tools to provide accurate, citation-backed answers.
|
|
561
|
+
"""
|
|
562
|
+
return DEFAULT_PROMPT + inventory_context
|
|
563
|
+
|
|
564
|
+
|
|
565
|
+
def _resolve_inventory_path(path: str, work_dir: Optional[str] = None) -> Optional[str]:
|
|
566
|
+
"""
|
|
567
|
+
Resolve an inventory/knowledge graph file path.
|
|
568
|
+
|
|
569
|
+
Tries locations in order:
|
|
570
|
+
1. Absolute path
|
|
571
|
+
2. Relative to current working directory (or work_dir if provided)
|
|
572
|
+
3. Relative to .alita/inventory/ in current directory
|
|
573
|
+
4. Relative to .alita/inventory/ in work_dir (if different)
|
|
574
|
+
|
|
575
|
+
Args:
|
|
576
|
+
path: The path to resolve (can be relative or absolute)
|
|
577
|
+
work_dir: Optional workspace directory to check
|
|
578
|
+
|
|
579
|
+
Returns:
|
|
580
|
+
Absolute path to the file if found, None otherwise
|
|
581
|
+
"""
|
|
582
|
+
# Expand user home directory
|
|
583
|
+
path = str(Path(path).expanduser())
|
|
584
|
+
|
|
585
|
+
# Try absolute path first
|
|
586
|
+
if Path(path).is_absolute() and Path(path).exists():
|
|
587
|
+
return str(Path(path).resolve())
|
|
588
|
+
|
|
589
|
+
# Try relative to current working directory
|
|
590
|
+
cwd = Path.cwd()
|
|
591
|
+
cwd_path = cwd / path
|
|
592
|
+
if cwd_path.exists():
|
|
593
|
+
return str(cwd_path.resolve())
|
|
594
|
+
|
|
595
|
+
# Try .alita/inventory/ in current directory
|
|
596
|
+
alita_inventory_path = cwd / '.alita' / 'inventory' / path
|
|
597
|
+
if alita_inventory_path.exists():
|
|
598
|
+
return str(alita_inventory_path.resolve())
|
|
599
|
+
|
|
600
|
+
# If work_dir is different from cwd, try there too
|
|
601
|
+
if work_dir:
|
|
602
|
+
work_path = Path(work_dir)
|
|
603
|
+
if work_path != cwd:
|
|
604
|
+
# Try relative to work_dir
|
|
605
|
+
work_rel_path = work_path / path
|
|
606
|
+
if work_rel_path.exists():
|
|
607
|
+
return str(work_rel_path.resolve())
|
|
608
|
+
|
|
609
|
+
# Try .alita/inventory/ in work_dir
|
|
610
|
+
work_alita_path = work_path / '.alita' / 'inventory' / path
|
|
611
|
+
if work_alita_path.exists():
|
|
612
|
+
return str(work_alita_path.resolve())
|
|
613
|
+
|
|
614
|
+
return None
|
|
615
|
+
|
|
616
|
+
|
|
617
|
+
def _build_inventory_config(path: str, work_dir: Optional[str] = None) -> Optional[Dict[str, Any]]:
|
|
618
|
+
"""
|
|
619
|
+
Build an inventory toolkit configuration from a file path.
|
|
620
|
+
|
|
621
|
+
The toolkit name is derived from the filename (stem).
|
|
622
|
+
All available tools are included.
|
|
623
|
+
|
|
624
|
+
Args:
|
|
625
|
+
path: Path to the knowledge graph JSON file
|
|
626
|
+
work_dir: Optional workspace directory for path resolution
|
|
627
|
+
|
|
628
|
+
Returns:
|
|
629
|
+
Toolkit configuration dict if file found, None otherwise
|
|
630
|
+
"""
|
|
631
|
+
# Resolve the path
|
|
632
|
+
resolved_path = _resolve_inventory_path(path, work_dir)
|
|
633
|
+
if not resolved_path:
|
|
634
|
+
return None
|
|
635
|
+
|
|
636
|
+
# Validate it's a JSON file
|
|
637
|
+
if not resolved_path.endswith('.json'):
|
|
638
|
+
return None
|
|
639
|
+
|
|
640
|
+
# Validate file exists and is readable
|
|
641
|
+
try:
|
|
642
|
+
with open(resolved_path, 'r') as f:
|
|
643
|
+
# Just check it's valid JSON
|
|
644
|
+
json.load(f)
|
|
645
|
+
except (IOError, json.JSONDecodeError):
|
|
646
|
+
return None
|
|
647
|
+
|
|
648
|
+
# Extract toolkit name from filename (e.g., 'alita' from 'alita.json')
|
|
649
|
+
toolkit_name = Path(resolved_path).stem
|
|
650
|
+
|
|
651
|
+
# Build configuration with all available tools
|
|
652
|
+
from .toolkit_loader import INVENTORY_TOOLS
|
|
653
|
+
|
|
654
|
+
return {
|
|
655
|
+
'type': 'inventory',
|
|
656
|
+
'toolkit_name': toolkit_name,
|
|
657
|
+
'graph_path': resolved_path,
|
|
658
|
+
'base_directory': work_dir,
|
|
659
|
+
'selected_tools': INVENTORY_TOOLS,
|
|
660
|
+
}
|
|
661
|
+
|
|
662
|
+
|
|
663
|
+
def _get_inventory_json_files(work_dir: Optional[str] = None) -> List[str]:
|
|
664
|
+
"""
|
|
665
|
+
Get list of .json files for inventory path completion.
|
|
666
|
+
|
|
667
|
+
Searches:
|
|
668
|
+
1. Current working directory (*.json files)
|
|
669
|
+
2. .alita/inventory/ directory (*.json files)
|
|
670
|
+
3. work_dir and work_dir/.alita/inventory/ if different from cwd
|
|
671
|
+
|
|
672
|
+
Args:
|
|
673
|
+
work_dir: Optional workspace directory
|
|
674
|
+
|
|
675
|
+
Returns:
|
|
676
|
+
List of relative or display paths for completion
|
|
677
|
+
"""
|
|
678
|
+
suggestions = []
|
|
679
|
+
seen = set()
|
|
680
|
+
|
|
681
|
+
cwd = Path.cwd()
|
|
682
|
+
|
|
683
|
+
# Current directory .json files
|
|
684
|
+
for f in cwd.glob('*.json'):
|
|
685
|
+
if f.name not in seen:
|
|
686
|
+
suggestions.append(f.name)
|
|
687
|
+
seen.add(f.name)
|
|
688
|
+
|
|
689
|
+
# .alita/inventory/ directory
|
|
690
|
+
alita_inv = cwd / '.alita' / 'inventory'
|
|
691
|
+
if alita_inv.exists():
|
|
692
|
+
for f in alita_inv.glob('*.json'):
|
|
693
|
+
display = f'.alita/inventory/{f.name}'
|
|
694
|
+
if display not in seen:
|
|
695
|
+
suggestions.append(display)
|
|
696
|
+
seen.add(display)
|
|
697
|
+
|
|
698
|
+
# work_dir if different
|
|
699
|
+
if work_dir:
|
|
700
|
+
work_path = Path(work_dir)
|
|
701
|
+
if work_path != cwd:
|
|
702
|
+
for f in work_path.glob('*.json'):
|
|
703
|
+
if f.name not in seen:
|
|
704
|
+
suggestions.append(f.name)
|
|
705
|
+
seen.add(f.name)
|
|
706
|
+
|
|
707
|
+
work_alita_inv = work_path / '.alita' / 'inventory'
|
|
708
|
+
if work_alita_inv.exists():
|
|
709
|
+
for f in work_alita_inv.glob('*.json'):
|
|
710
|
+
display = f'.alita/inventory/{f.name}'
|
|
711
|
+
if display not in seen:
|
|
712
|
+
suggestions.append(display)
|
|
713
|
+
seen.add(display)
|
|
714
|
+
|
|
715
|
+
return sorted(suggestions)
|
|
716
|
+
|
|
717
|
+
|
|
718
|
+
def _load_mcp_tools(agent_def: Dict[str, Any], mcp_config_path: str) -> List[Dict[str, Any]]:
|
|
719
|
+
"""Load MCP tools from agent definition with tool-level filtering.
|
|
720
|
+
|
|
721
|
+
Args:
|
|
722
|
+
agent_def: Agent definition dictionary containing mcps list
|
|
723
|
+
mcp_config_path: Path to mcp.json configuration file (workspace-level)
|
|
724
|
+
|
|
725
|
+
Returns:
|
|
726
|
+
List of toolkit configurations for MCP servers
|
|
727
|
+
"""
|
|
728
|
+
from .mcp_loader import load_mcp_tools
|
|
729
|
+
return load_mcp_tools(agent_def, mcp_config_path)
|
|
730
|
+
|
|
731
|
+
|
|
732
|
+
def _setup_local_agent_executor(client, agent_def: Dict[str, Any], toolkit_config: tuple,
|
|
733
|
+
config, model: Optional[str], temperature: Optional[float],
|
|
734
|
+
max_tokens: Optional[int], memory, allowed_directories: Optional[List[str]],
|
|
735
|
+
plan_state: Optional[Dict] = None):
|
|
736
|
+
"""Setup local agent executor with all configurations.
|
|
737
|
+
|
|
738
|
+
Args:
|
|
739
|
+
allowed_directories: List of allowed directories for filesystem access.
|
|
740
|
+
First directory is the primary/base directory.
|
|
741
|
+
|
|
742
|
+
Returns:
|
|
743
|
+
Tuple of (agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools)
|
|
744
|
+
"""
|
|
745
|
+
# Load toolkit configs
|
|
746
|
+
toolkit_configs = load_toolkit_configs(agent_def, toolkit_config)
|
|
747
|
+
|
|
748
|
+
# Load MCP tools
|
|
749
|
+
mcp_toolkit_configs = _load_mcp_tools(agent_def, config.mcp_config_path)
|
|
750
|
+
toolkit_configs.extend(mcp_toolkit_configs)
|
|
751
|
+
|
|
752
|
+
# Create LLM instance
|
|
753
|
+
llm, llm_model, llm_temperature, llm_max_tokens = create_llm_instance(
|
|
754
|
+
client, model, agent_def, temperature, max_tokens
|
|
755
|
+
)
|
|
756
|
+
|
|
757
|
+
# Add filesystem tools if directories are provided
|
|
758
|
+
filesystem_tools = None
|
|
759
|
+
terminal_tools = None
|
|
760
|
+
if allowed_directories:
|
|
761
|
+
from .tools import get_filesystem_tools, get_terminal_tools
|
|
762
|
+
preset = agent_def.get('filesystem_tools_preset')
|
|
763
|
+
include_tools = agent_def.get('filesystem_tools_include')
|
|
764
|
+
exclude_tools = agent_def.get('filesystem_tools_exclude')
|
|
765
|
+
|
|
766
|
+
# First directory is the primary base directory
|
|
767
|
+
base_dir = allowed_directories[0]
|
|
768
|
+
extra_dirs = allowed_directories[1:] if len(allowed_directories) > 1 else None
|
|
769
|
+
filesystem_tools = get_filesystem_tools(base_dir, include_tools, exclude_tools, preset, extra_dirs)
|
|
770
|
+
|
|
771
|
+
# Terminal tools use primary directory as cwd
|
|
772
|
+
terminal_tools = get_terminal_tools(base_dir)
|
|
773
|
+
|
|
774
|
+
tool_count = len(filesystem_tools) + len(terminal_tools)
|
|
775
|
+
if len(allowed_directories) == 1:
|
|
776
|
+
access_msg = f"✓ Granted filesystem & terminal access to: {base_dir} ({tool_count} tools)"
|
|
777
|
+
else:
|
|
778
|
+
access_msg = f"✓ Granted filesystem & terminal access to {len(allowed_directories)} directories ({tool_count} tools)"
|
|
779
|
+
if preset:
|
|
780
|
+
access_msg += f" [preset: {preset}]"
|
|
781
|
+
if include_tools:
|
|
782
|
+
access_msg += f" [include: {', '.join(include_tools)}]"
|
|
783
|
+
if exclude_tools:
|
|
784
|
+
access_msg += f" [exclude: {', '.join(exclude_tools)}]"
|
|
785
|
+
console.print(f"[dim]{access_msg}[/dim]")
|
|
786
|
+
|
|
787
|
+
# Add planning tools (always available)
|
|
788
|
+
planning_tools = None
|
|
789
|
+
plan_state_obj = None
|
|
790
|
+
if plan_state is not None:
|
|
791
|
+
from .tools import get_planning_tools, PlanState
|
|
792
|
+
# Create a plan callback to update the dict when plan changes
|
|
793
|
+
def plan_callback(state: PlanState):
|
|
794
|
+
plan_state['title'] = state.title
|
|
795
|
+
plan_state['steps'] = state.to_dict()['steps']
|
|
796
|
+
plan_state['session_id'] = state.session_id
|
|
797
|
+
|
|
798
|
+
# Get session_id from plan_state dict if provided
|
|
799
|
+
session_id = plan_state.get('session_id')
|
|
800
|
+
planning_tools, plan_state_obj = get_planning_tools(
|
|
801
|
+
plan_state=None,
|
|
802
|
+
plan_callback=plan_callback,
|
|
803
|
+
session_id=session_id
|
|
804
|
+
)
|
|
805
|
+
console.print(f"[dim]✓ Planning tools enabled ({len(planning_tools)} tools) [session: {plan_state_obj.session_id}][/dim]")
|
|
806
|
+
|
|
807
|
+
# Check if we have tools
|
|
808
|
+
has_tools = bool(agent_def.get('tools') or toolkit_configs or filesystem_tools or terminal_tools or planning_tools)
|
|
809
|
+
has_mcp = any(tc.get('toolkit_type') == 'mcp' for tc in toolkit_configs)
|
|
810
|
+
|
|
811
|
+
if not has_tools:
|
|
812
|
+
return None, None, llm, llm_model, filesystem_tools, terminal_tools, planning_tools
|
|
813
|
+
|
|
814
|
+
# Create agent executor with or without MCP
|
|
815
|
+
mcp_session_manager = None
|
|
816
|
+
if has_mcp:
|
|
817
|
+
# Create persistent event loop for MCP tools
|
|
818
|
+
from alita_sdk.runtime.tools.llm import LLMNode
|
|
819
|
+
if not hasattr(LLMNode, '_persistent_loop') or \
|
|
820
|
+
LLMNode._persistent_loop is None or \
|
|
821
|
+
LLMNode._persistent_loop.is_closed():
|
|
822
|
+
LLMNode._persistent_loop = asyncio.new_event_loop()
|
|
823
|
+
console.print("[dim]Created persistent event loop for MCP tools[/dim]")
|
|
824
|
+
|
|
825
|
+
# Load MCP tools using persistent loop
|
|
826
|
+
loop = LLMNode._persistent_loop
|
|
827
|
+
asyncio.set_event_loop(loop)
|
|
828
|
+
agent_executor, mcp_session_manager = loop.run_until_complete(
|
|
829
|
+
create_agent_executor_with_mcp(
|
|
830
|
+
client, agent_def, toolkit_configs,
|
|
831
|
+
llm, llm_model, llm_temperature, llm_max_tokens, memory,
|
|
832
|
+
filesystem_tools=filesystem_tools,
|
|
833
|
+
terminal_tools=terminal_tools,
|
|
834
|
+
planning_tools=planning_tools
|
|
835
|
+
)
|
|
836
|
+
)
|
|
837
|
+
else:
|
|
838
|
+
agent_executor = create_agent_executor(
|
|
839
|
+
client, agent_def, toolkit_configs,
|
|
840
|
+
llm, llm_model, llm_temperature, llm_max_tokens, memory,
|
|
841
|
+
filesystem_tools=filesystem_tools,
|
|
842
|
+
terminal_tools=terminal_tools,
|
|
843
|
+
planning_tools=planning_tools
|
|
844
|
+
)
|
|
845
|
+
|
|
846
|
+
return agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools
|
|
847
|
+
|
|
848
|
+
|
|
849
|
+
def _select_model_interactive(client) -> Optional[Dict[str, Any]]:
|
|
850
|
+
"""
|
|
851
|
+
Show interactive menu to select a model from available models.
|
|
852
|
+
|
|
853
|
+
Returns:
|
|
854
|
+
Selected model info dict or None if cancelled
|
|
855
|
+
"""
|
|
856
|
+
console.print("\n🔧 [bold cyan]Select a model:[/bold cyan]\n")
|
|
857
|
+
|
|
858
|
+
try:
|
|
859
|
+
# Use the new get_available_models API
|
|
860
|
+
models = client.get_available_models()
|
|
861
|
+
if not models:
|
|
862
|
+
console.print("[yellow]No models available from the platform.[/yellow]")
|
|
863
|
+
return None
|
|
864
|
+
|
|
865
|
+
# Build models list - API returns items[].name
|
|
866
|
+
models_list = []
|
|
867
|
+
for model in models:
|
|
868
|
+
model_name = model.get('name')
|
|
869
|
+
if model_name:
|
|
870
|
+
models_list.append({
|
|
871
|
+
'name': model_name,
|
|
872
|
+
'id': model.get('id'),
|
|
873
|
+
'model_data': model
|
|
874
|
+
})
|
|
875
|
+
|
|
876
|
+
if not models_list:
|
|
877
|
+
console.print("[yellow]No models found.[/yellow]")
|
|
878
|
+
return None
|
|
879
|
+
|
|
880
|
+
# Display models with numbers
|
|
881
|
+
table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE)
|
|
882
|
+
table.add_column("#", style="dim", width=4)
|
|
883
|
+
table.add_column("Model", style="cyan")
|
|
884
|
+
|
|
885
|
+
for i, model in enumerate(models_list, 1):
|
|
886
|
+
table.add_row(str(i), model['name'])
|
|
887
|
+
|
|
888
|
+
console.print(table)
|
|
889
|
+
console.print(f"\n[dim]0. Cancel[/dim]")
|
|
890
|
+
|
|
891
|
+
# Get user selection using styled input
|
|
892
|
+
while True:
|
|
893
|
+
try:
|
|
894
|
+
choice = styled_selection_input("Select model number")
|
|
895
|
+
|
|
896
|
+
if choice == '0':
|
|
897
|
+
return None
|
|
898
|
+
|
|
899
|
+
idx = int(choice) - 1
|
|
900
|
+
if 0 <= idx < len(models_list):
|
|
901
|
+
selected = models_list[idx]
|
|
902
|
+
console.print(f"✓ [green]Selected:[/green] [bold]{selected['name']}[/bold]")
|
|
903
|
+
return selected
|
|
904
|
+
else:
|
|
905
|
+
console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(models_list)}[/yellow]")
|
|
906
|
+
except ValueError:
|
|
907
|
+
console.print("[yellow]Please enter a valid number[/yellow]")
|
|
908
|
+
except (KeyboardInterrupt, EOFError):
|
|
909
|
+
return None
|
|
910
|
+
|
|
911
|
+
except Exception as e:
|
|
912
|
+
console.print(f"[red]Error fetching models: {e}[/red]")
|
|
913
|
+
return None
|
|
914
|
+
|
|
915
|
+
|
|
916
|
+
def _select_mcp_interactive(config) -> Optional[Dict[str, Any]]:
|
|
917
|
+
"""
|
|
918
|
+
Show interactive menu to select an MCP server from mcp.json.
|
|
919
|
+
|
|
920
|
+
Returns:
|
|
921
|
+
Selected MCP server config dict or None if cancelled
|
|
922
|
+
"""
|
|
923
|
+
from .mcp_loader import load_mcp_config
|
|
924
|
+
|
|
925
|
+
console.print("\n🔌 [bold cyan]Select an MCP server to add:[/bold cyan]\n")
|
|
926
|
+
|
|
927
|
+
mcp_config = load_mcp_config(config.mcp_config_path)
|
|
928
|
+
mcp_servers = mcp_config.get('mcpServers', {})
|
|
929
|
+
|
|
930
|
+
if not mcp_servers:
|
|
931
|
+
console.print(f"[yellow]No MCP servers found in {config.mcp_config_path}[/yellow]")
|
|
932
|
+
return None
|
|
933
|
+
|
|
934
|
+
servers_list = list(mcp_servers.items())
|
|
935
|
+
|
|
936
|
+
# Display servers with numbers
|
|
937
|
+
table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE)
|
|
938
|
+
table.add_column("#", style="dim", width=4)
|
|
939
|
+
table.add_column("Server", style="cyan")
|
|
940
|
+
table.add_column("Type", style="dim")
|
|
941
|
+
table.add_column("Command/URL", style="dim")
|
|
942
|
+
|
|
943
|
+
for i, (name, server_config) in enumerate(servers_list, 1):
|
|
944
|
+
server_type = server_config.get('type', 'stdio')
|
|
945
|
+
cmd_or_url = server_config.get('url') or server_config.get('command', '')
|
|
946
|
+
table.add_row(str(i), name, server_type, cmd_or_url[:40])
|
|
947
|
+
|
|
948
|
+
console.print(table)
|
|
949
|
+
console.print(f"\n[dim]0. Cancel[/dim]")
|
|
950
|
+
|
|
951
|
+
# Get user selection using styled input
|
|
952
|
+
while True:
|
|
953
|
+
try:
|
|
954
|
+
choice = styled_selection_input("Select MCP server number")
|
|
955
|
+
|
|
956
|
+
if choice == '0':
|
|
957
|
+
return None
|
|
958
|
+
|
|
959
|
+
idx = int(choice) - 1
|
|
960
|
+
if 0 <= idx < len(servers_list):
|
|
961
|
+
name, server_config = servers_list[idx]
|
|
962
|
+
console.print(f"✓ [green]Selected:[/green] [bold]{name}[/bold]")
|
|
963
|
+
return {'name': name, 'config': server_config}
|
|
964
|
+
else:
|
|
965
|
+
console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(servers_list)}[/yellow]")
|
|
966
|
+
except ValueError:
|
|
967
|
+
console.print("[yellow]Please enter a valid number[/yellow]")
|
|
968
|
+
except (KeyboardInterrupt, EOFError):
|
|
969
|
+
return None
|
|
970
|
+
|
|
971
|
+
|
|
972
|
+
def _select_toolkit_interactive(config) -> Optional[Dict[str, Any]]:
|
|
973
|
+
"""
|
|
974
|
+
Show interactive menu to select a toolkit from $ALITA_DIR/tools.
|
|
975
|
+
|
|
976
|
+
Returns:
|
|
977
|
+
Selected toolkit config dict or None if cancelled
|
|
978
|
+
"""
|
|
979
|
+
console.print("\n🧰 [bold cyan]Select a toolkit to add:[/bold cyan]\n")
|
|
980
|
+
|
|
981
|
+
tools_dir = Path(config.tools_dir)
|
|
982
|
+
|
|
983
|
+
if not tools_dir.exists():
|
|
984
|
+
console.print(f"[yellow]Tools directory not found: {tools_dir}[/yellow]")
|
|
985
|
+
return None
|
|
986
|
+
|
|
987
|
+
# Find all toolkit config files
|
|
988
|
+
toolkit_files = []
|
|
989
|
+
for pattern in ['*.json', '*.yaml', '*.yml']:
|
|
990
|
+
toolkit_files.extend(tools_dir.glob(pattern))
|
|
991
|
+
|
|
992
|
+
if not toolkit_files:
|
|
993
|
+
console.print(f"[yellow]No toolkit configurations found in {tools_dir}[/yellow]")
|
|
994
|
+
return None
|
|
995
|
+
|
|
996
|
+
# Load toolkit info
|
|
997
|
+
toolkits_list = []
|
|
998
|
+
for file_path in toolkit_files:
|
|
999
|
+
try:
|
|
1000
|
+
config_data = load_toolkit_config(str(file_path))
|
|
1001
|
+
toolkits_list.append({
|
|
1002
|
+
'file': str(file_path),
|
|
1003
|
+
'name': config_data.get('toolkit_name') or config_data.get('name') or file_path.stem,
|
|
1004
|
+
'type': config_data.get('toolkit_type') or config_data.get('type', 'unknown'),
|
|
1005
|
+
'config': config_data
|
|
1006
|
+
})
|
|
1007
|
+
except Exception as e:
|
|
1008
|
+
logger.debug(f"Failed to load toolkit config {file_path}: {e}")
|
|
1009
|
+
|
|
1010
|
+
if not toolkits_list:
|
|
1011
|
+
console.print(f"[yellow]No valid toolkit configurations found in {tools_dir}[/yellow]")
|
|
1012
|
+
return None
|
|
1013
|
+
|
|
1014
|
+
# Display toolkits with numbers
|
|
1015
|
+
table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE)
|
|
1016
|
+
table.add_column("#", style="dim", width=4)
|
|
1017
|
+
table.add_column("Toolkit", style="cyan")
|
|
1018
|
+
table.add_column("Type", style="dim")
|
|
1019
|
+
table.add_column("File", style="dim")
|
|
1020
|
+
|
|
1021
|
+
for i, toolkit in enumerate(toolkits_list, 1):
|
|
1022
|
+
table.add_row(str(i), toolkit['name'], toolkit['type'], Path(toolkit['file']).name)
|
|
1023
|
+
|
|
1024
|
+
console.print(table)
|
|
1025
|
+
console.print(f"\n[dim]0. Cancel[/dim]")
|
|
1026
|
+
|
|
1027
|
+
# Get user selection using styled input
|
|
1028
|
+
while True:
|
|
1029
|
+
try:
|
|
1030
|
+
choice = styled_selection_input("Select toolkit number")
|
|
1031
|
+
|
|
1032
|
+
if choice == '0':
|
|
1033
|
+
return None
|
|
1034
|
+
|
|
1035
|
+
idx = int(choice) - 1
|
|
1036
|
+
if 0 <= idx < len(toolkits_list):
|
|
1037
|
+
selected = toolkits_list[idx]
|
|
1038
|
+
console.print(f"✓ [green]Selected:[/green] [bold]{selected['name']}[/bold]")
|
|
1039
|
+
return selected
|
|
1040
|
+
else:
|
|
1041
|
+
console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(toolkits_list)}[/yellow]")
|
|
1042
|
+
except ValueError:
|
|
1043
|
+
console.print("[yellow]Please enter a valid number[/yellow]")
|
|
1044
|
+
except (KeyboardInterrupt, EOFError):
|
|
1045
|
+
return None
|
|
1046
|
+
|
|
1047
|
+
|
|
1048
|
+
def _list_available_toolkits(config) -> List[str]:
|
|
1049
|
+
"""
|
|
1050
|
+
List names of all available toolkits in $ALITA_DIR/tools.
|
|
1051
|
+
|
|
1052
|
+
Returns:
|
|
1053
|
+
List of toolkit names
|
|
1054
|
+
"""
|
|
1055
|
+
tools_dir = Path(config.tools_dir)
|
|
1056
|
+
|
|
1057
|
+
if not tools_dir.exists():
|
|
1058
|
+
return []
|
|
1059
|
+
|
|
1060
|
+
toolkit_names = []
|
|
1061
|
+
for pattern in ['*.json', '*.yaml', '*.yml']:
|
|
1062
|
+
for file_path in tools_dir.glob(pattern):
|
|
1063
|
+
try:
|
|
1064
|
+
config_data = load_toolkit_config(str(file_path))
|
|
1065
|
+
name = config_data.get('toolkit_name') or config_data.get('name') or file_path.stem
|
|
1066
|
+
toolkit_names.append(name)
|
|
1067
|
+
except Exception:
|
|
1068
|
+
pass
|
|
1069
|
+
|
|
1070
|
+
return toolkit_names
|
|
1071
|
+
|
|
1072
|
+
|
|
1073
|
+
def _find_toolkit_by_name(config, toolkit_name: str) -> Optional[Dict[str, Any]]:
|
|
1074
|
+
"""
|
|
1075
|
+
Find a toolkit by name in $ALITA_DIR/tools.
|
|
1076
|
+
|
|
1077
|
+
Args:
|
|
1078
|
+
config: CLI configuration
|
|
1079
|
+
toolkit_name: Name of the toolkit to find (case-insensitive)
|
|
1080
|
+
|
|
1081
|
+
Returns:
|
|
1082
|
+
Toolkit config dict or None if not found
|
|
1083
|
+
"""
|
|
1084
|
+
tools_dir = Path(config.tools_dir)
|
|
1085
|
+
|
|
1086
|
+
if not tools_dir.exists():
|
|
1087
|
+
return None
|
|
1088
|
+
|
|
1089
|
+
toolkit_name_lower = toolkit_name.lower()
|
|
1090
|
+
|
|
1091
|
+
for pattern in ['*.json', '*.yaml', '*.yml']:
|
|
1092
|
+
for file_path in tools_dir.glob(pattern):
|
|
1093
|
+
try:
|
|
1094
|
+
config_data = load_toolkit_config(str(file_path))
|
|
1095
|
+
name = config_data.get('toolkit_name') or config_data.get('name') or file_path.stem
|
|
1096
|
+
|
|
1097
|
+
# Match by name (case-insensitive) or file stem
|
|
1098
|
+
if name.lower() == toolkit_name_lower or file_path.stem.lower() == toolkit_name_lower:
|
|
1099
|
+
return {
|
|
1100
|
+
'file': str(file_path),
|
|
1101
|
+
'name': name,
|
|
1102
|
+
'type': config_data.get('toolkit_type') or config_data.get('type', 'unknown'),
|
|
1103
|
+
'config': config_data
|
|
1104
|
+
}
|
|
1105
|
+
except Exception:
|
|
1106
|
+
pass
|
|
1107
|
+
|
|
1108
|
+
return None
|
|
1109
|
+
|
|
1110
|
+
|
|
1111
|
+
def _select_agent_interactive(client, config) -> Optional[str]:
|
|
1112
|
+
"""
|
|
1113
|
+
Show interactive menu to select an agent from platform and local agents.
|
|
1114
|
+
|
|
1115
|
+
Returns:
|
|
1116
|
+
Agent source (name/id for platform, file path for local, '__direct__' for direct chat,
|
|
1117
|
+
'__inventory__' for inventory agent) or None if cancelled
|
|
1118
|
+
"""
|
|
1119
|
+
from .config import CLIConfig
|
|
1120
|
+
|
|
1121
|
+
console.print("\n🤖 [bold cyan]Select an agent to chat with:[/bold cyan]\n")
|
|
1122
|
+
|
|
1123
|
+
# Built-in agents
|
|
1124
|
+
console.print(f"1. [[bold]💬 Alita[/bold]] [cyan]Chat directly with LLM (no agent)[/cyan]")
|
|
1125
|
+
console.print(f" [dim]Direct conversation with the model without agent configuration[/dim]")
|
|
1126
|
+
console.print(f"2. [[bold]📊 Inventory[/bold]] [cyan]Knowledge graph builder agent[/cyan]")
|
|
1127
|
+
console.print(f" [dim]Build inventories from connected toolkits (use --toolkit-config to add sources)[/dim]")
|
|
1128
|
+
|
|
1129
|
+
agents_list = []
|
|
1130
|
+
|
|
1131
|
+
# Load platform agents
|
|
1132
|
+
try:
|
|
1133
|
+
platform_agents = client.get_list_of_apps()
|
|
1134
|
+
for agent in platform_agents:
|
|
1135
|
+
agents_list.append({
|
|
1136
|
+
'type': 'platform',
|
|
1137
|
+
'name': agent['name'],
|
|
1138
|
+
'source': agent['name'],
|
|
1139
|
+
'description': agent.get('description', '')[:60]
|
|
1140
|
+
})
|
|
1141
|
+
except Exception as e:
|
|
1142
|
+
logger.debug(f"Failed to load platform agents: {e}")
|
|
1143
|
+
|
|
1144
|
+
# Load local agents
|
|
1145
|
+
agents_dir = config.agents_dir
|
|
1146
|
+
search_dir = Path(agents_dir)
|
|
1147
|
+
|
|
1148
|
+
if search_dir.exists():
|
|
1149
|
+
for pattern in ['*.agent.md', '*.agent.yaml', '*.agent.yml', '*.agent.json']:
|
|
1150
|
+
for file_path in search_dir.rglob(pattern):
|
|
1151
|
+
try:
|
|
1152
|
+
agent_def = load_agent_definition(str(file_path))
|
|
1153
|
+
agents_list.append({
|
|
1154
|
+
'type': 'local',
|
|
1155
|
+
'name': agent_def.get('name', file_path.stem),
|
|
1156
|
+
'source': str(file_path),
|
|
1157
|
+
'description': agent_def.get('description', '')[:60]
|
|
1158
|
+
})
|
|
1159
|
+
except Exception as e:
|
|
1160
|
+
logger.debug(f"Failed to load {file_path}: {e}")
|
|
1161
|
+
|
|
1162
|
+
# Display agents with numbers using rich (starting from 3 since 1-2 are built-in)
|
|
1163
|
+
for i, agent in enumerate(agents_list, 3):
|
|
1164
|
+
agent_type = "📦 Platform" if agent['type'] == 'platform' else "📁 Local"
|
|
1165
|
+
console.print(f"{i}. [[bold]{agent_type}[/bold]] [cyan]{agent['name']}[/cyan]")
|
|
1166
|
+
if agent['description']:
|
|
1167
|
+
console.print(f" [dim]{agent['description']}[/dim]")
|
|
1168
|
+
|
|
1169
|
+
console.print(f"\n[dim]0. Cancel[/dim]")
|
|
1170
|
+
|
|
1171
|
+
# Get user selection using styled input
|
|
1172
|
+
while True:
|
|
1173
|
+
try:
|
|
1174
|
+
choice = styled_selection_input("Select agent number")
|
|
1175
|
+
|
|
1176
|
+
if choice == '0':
|
|
1177
|
+
return None
|
|
1178
|
+
|
|
1179
|
+
if choice == '1':
|
|
1180
|
+
console.print(f"✓ [green]Selected:[/green] [bold]Alita[/bold]")
|
|
1181
|
+
return '__direct__'
|
|
1182
|
+
|
|
1183
|
+
if choice == '2':
|
|
1184
|
+
console.print(f"✓ [green]Selected:[/green] [bold]Inventory[/bold]")
|
|
1185
|
+
return '__inventory__'
|
|
1186
|
+
|
|
1187
|
+
idx = int(choice) - 3 # Offset by 3 since 1-2 are built-in agents
|
|
1188
|
+
if 0 <= idx < len(agents_list):
|
|
1189
|
+
selected = agents_list[idx]
|
|
1190
|
+
console.print(f"✓ [green]Selected:[/green] [bold]{selected['name']}[/bold]")
|
|
1191
|
+
return selected['source']
|
|
1192
|
+
else:
|
|
1193
|
+
console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(agents_list) + 2}[/yellow]")
|
|
1194
|
+
except ValueError:
|
|
1195
|
+
console.print("[yellow]Please enter a valid number[/yellow]")
|
|
1196
|
+
except (KeyboardInterrupt, EOFError):
|
|
1197
|
+
console.print("\n[dim]Cancelled.[/dim]")
|
|
1198
|
+
return None
|
|
1199
|
+
|
|
1200
|
+
|
|
1201
|
+
@click.group()
|
|
1202
|
+
def agent():
|
|
1203
|
+
"""Agent testing and interaction commands."""
|
|
1204
|
+
pass
|
|
1205
|
+
|
|
1206
|
+
|
|
1207
|
+
@agent.command('list')
|
|
1208
|
+
@click.option('--local', is_flag=True, help='List local agent definition files')
|
|
1209
|
+
@click.option('--directory', default=None, help='Directory to search for local agents (defaults to AGENTS_DIR from .env)')
|
|
1210
|
+
@click.pass_context
|
|
1211
|
+
def agent_list(ctx, local: bool, directory: Optional[str]):
|
|
1212
|
+
"""
|
|
1213
|
+
List available agents.
|
|
1214
|
+
|
|
1215
|
+
By default, lists agents from the platform.
|
|
1216
|
+
Use --local to list agent definition files in the local directory.
|
|
1217
|
+
"""
|
|
1218
|
+
formatter = ctx.obj['formatter']
|
|
1219
|
+
config = ctx.obj['config']
|
|
1220
|
+
|
|
1221
|
+
try:
|
|
1222
|
+
if local:
|
|
1223
|
+
# List local agent definition files
|
|
1224
|
+
if directory is None:
|
|
1225
|
+
directory = config.agents_dir
|
|
1226
|
+
search_dir = Path(directory)
|
|
1227
|
+
|
|
1228
|
+
if not search_dir.exists():
|
|
1229
|
+
console.print(f"[red]Directory not found: {directory}[/red]")
|
|
1230
|
+
return
|
|
1231
|
+
|
|
1232
|
+
agents = []
|
|
1233
|
+
|
|
1234
|
+
# Find agent definition files
|
|
1235
|
+
for pattern in ['*.agent.md', '*.agent.yaml', '*.agent.yml', '*.agent.json']:
|
|
1236
|
+
for file_path in search_dir.rglob(pattern):
|
|
1237
|
+
try:
|
|
1238
|
+
agent_def = load_agent_definition(str(file_path))
|
|
1239
|
+
# Use relative path if already relative, otherwise make it relative to cwd
|
|
1240
|
+
try:
|
|
1241
|
+
display_path = str(file_path.relative_to(Path.cwd()))
|
|
1242
|
+
except ValueError:
|
|
1243
|
+
display_path = str(file_path)
|
|
1244
|
+
|
|
1245
|
+
agents.append({
|
|
1246
|
+
'name': agent_def.get('name', file_path.stem),
|
|
1247
|
+
'file': display_path,
|
|
1248
|
+
'description': agent_def.get('description', '')[:80]
|
|
1249
|
+
})
|
|
1250
|
+
except Exception as e:
|
|
1251
|
+
logger.debug(f"Failed to load {file_path}: {e}")
|
|
1252
|
+
|
|
1253
|
+
if not agents:
|
|
1254
|
+
console.print(f"\n[yellow]No agent definition files found in {directory}[/yellow]")
|
|
1255
|
+
return
|
|
1256
|
+
|
|
1257
|
+
# Display local agents in a table
|
|
1258
|
+
table = Table(
|
|
1259
|
+
title=f"Local Agent Definitions in {directory}",
|
|
1260
|
+
show_header=True,
|
|
1261
|
+
header_style="bold cyan",
|
|
1262
|
+
border_style="cyan",
|
|
1263
|
+
box=box.ROUNDED
|
|
1264
|
+
)
|
|
1265
|
+
table.add_column("Name", style="bold cyan", no_wrap=True)
|
|
1266
|
+
table.add_column("File", style="dim")
|
|
1267
|
+
table.add_column("Description", style="white")
|
|
1268
|
+
|
|
1269
|
+
for agent_info in sorted(agents, key=lambda x: x['name']):
|
|
1270
|
+
table.add_row(
|
|
1271
|
+
agent_info['name'],
|
|
1272
|
+
agent_info['file'],
|
|
1273
|
+
agent_info['description'] or "-"
|
|
1274
|
+
)
|
|
1275
|
+
|
|
1276
|
+
console.print("\n")
|
|
1277
|
+
console.print(table)
|
|
1278
|
+
console.print(f"\n[green]Total: {len(agents)} local agents[/green]")
|
|
1279
|
+
|
|
1280
|
+
else:
|
|
1281
|
+
# List platform agents
|
|
1282
|
+
client = get_client(ctx)
|
|
1283
|
+
|
|
1284
|
+
agents = client.get_list_of_apps()
|
|
1285
|
+
|
|
1286
|
+
if formatter.__class__.__name__ == 'JSONFormatter':
|
|
1287
|
+
click.echo(formatter._dump({'agents': agents, 'total': len(agents)}))
|
|
1288
|
+
else:
|
|
1289
|
+
table = Table(
|
|
1290
|
+
title="Available Platform Agents",
|
|
1291
|
+
show_header=True,
|
|
1292
|
+
header_style="bold cyan",
|
|
1293
|
+
border_style="cyan",
|
|
1294
|
+
box=box.ROUNDED
|
|
1295
|
+
)
|
|
1296
|
+
table.add_column("ID", style="yellow", no_wrap=True)
|
|
1297
|
+
table.add_column("Name", style="bold cyan")
|
|
1298
|
+
table.add_column("Description", style="white")
|
|
1299
|
+
|
|
1300
|
+
for agent_info in agents:
|
|
1301
|
+
table.add_row(
|
|
1302
|
+
str(agent_info['id']),
|
|
1303
|
+
agent_info['name'],
|
|
1304
|
+
agent_info.get('description', '')[:80] or "-"
|
|
1305
|
+
)
|
|
1306
|
+
|
|
1307
|
+
console.print("\n")
|
|
1308
|
+
console.print(table)
|
|
1309
|
+
console.print(f"\n[green]Total: {len(agents)} agents[/green]")
|
|
1310
|
+
|
|
1311
|
+
except Exception as e:
|
|
1312
|
+
logger.exception("Failed to list agents")
|
|
1313
|
+
error_panel = Panel(
|
|
1314
|
+
str(e),
|
|
1315
|
+
title="Error",
|
|
1316
|
+
border_style="red",
|
|
1317
|
+
box=box.ROUNDED
|
|
1318
|
+
)
|
|
1319
|
+
console.print(error_panel, style="red")
|
|
1320
|
+
raise click.Abort()
|
|
1321
|
+
|
|
1322
|
+
|
|
1323
|
+
@agent.command('show')
|
|
1324
|
+
@click.argument('agent_source')
|
|
1325
|
+
@click.option('--version', help='Agent version (for platform agents)')
|
|
1326
|
+
@click.pass_context
|
|
1327
|
+
def agent_show(ctx, agent_source: str, version: Optional[str]):
|
|
1328
|
+
"""
|
|
1329
|
+
Show agent details.
|
|
1330
|
+
|
|
1331
|
+
AGENT_SOURCE can be:
|
|
1332
|
+
- Platform agent ID or name (e.g., "123" or "my-agent")
|
|
1333
|
+
- Path to local agent file (e.g., ".github/agents/sdk-dev.agent.md")
|
|
1334
|
+
"""
|
|
1335
|
+
formatter = ctx.obj['formatter']
|
|
1336
|
+
|
|
1337
|
+
try:
|
|
1338
|
+
# Check if it's a file path
|
|
1339
|
+
if Path(agent_source).exists():
|
|
1340
|
+
# Local agent file
|
|
1341
|
+
agent_def = load_agent_definition(agent_source)
|
|
1342
|
+
|
|
1343
|
+
if formatter.__class__.__name__ == 'JSONFormatter':
|
|
1344
|
+
click.echo(formatter._dump(agent_def))
|
|
1345
|
+
else:
|
|
1346
|
+
# Create details panel
|
|
1347
|
+
details = Text()
|
|
1348
|
+
details.append("File: ", style="bold")
|
|
1349
|
+
details.append(f"{agent_source}\n", style="cyan")
|
|
1350
|
+
|
|
1351
|
+
if agent_def.get('description'):
|
|
1352
|
+
details.append("\nDescription: ", style="bold")
|
|
1353
|
+
details.append(f"{agent_def['description']}\n", style="white")
|
|
1354
|
+
|
|
1355
|
+
if agent_def.get('model'):
|
|
1356
|
+
details.append("Model: ", style="bold")
|
|
1357
|
+
details.append(f"{agent_def['model']}\n", style="cyan")
|
|
1358
|
+
|
|
1359
|
+
if agent_def.get('tools'):
|
|
1360
|
+
details.append("Tools: ", style="bold")
|
|
1361
|
+
details.append(f"{', '.join(agent_def['tools'])}\n", style="cyan")
|
|
1362
|
+
|
|
1363
|
+
if agent_def.get('temperature') is not None:
|
|
1364
|
+
details.append("Temperature: ", style="bold")
|
|
1365
|
+
details.append(f"{agent_def['temperature']}\n", style="cyan")
|
|
1366
|
+
|
|
1367
|
+
panel = Panel(
|
|
1368
|
+
details,
|
|
1369
|
+
title=f"Local Agent: {agent_def.get('name', 'Unknown')}",
|
|
1370
|
+
title_align="left",
|
|
1371
|
+
border_style="cyan",
|
|
1372
|
+
box=box.ROUNDED
|
|
1373
|
+
)
|
|
1374
|
+
console.print("\n")
|
|
1375
|
+
console.print(panel)
|
|
1376
|
+
|
|
1377
|
+
if agent_def.get('system_prompt'):
|
|
1378
|
+
console.print("\n[bold]System Prompt:[/bold]")
|
|
1379
|
+
console.print(Panel(agent_def['system_prompt'][:500] + "...", border_style="dim", box=box.ROUNDED))
|
|
1380
|
+
|
|
1381
|
+
else:
|
|
1382
|
+
# Platform agent
|
|
1383
|
+
client = get_client(ctx)
|
|
1384
|
+
|
|
1385
|
+
# Try to find agent by ID or name
|
|
1386
|
+
agents = client.get_list_of_apps()
|
|
1387
|
+
|
|
1388
|
+
agent = None
|
|
1389
|
+
try:
|
|
1390
|
+
agent_id = int(agent_source)
|
|
1391
|
+
agent = next((a for a in agents if a['id'] == agent_id), None)
|
|
1392
|
+
except ValueError:
|
|
1393
|
+
agent = next((a for a in agents if a['name'] == agent_source), None)
|
|
1394
|
+
|
|
1395
|
+
if not agent:
|
|
1396
|
+
raise click.ClickException(f"Agent '{agent_source}' not found")
|
|
1397
|
+
|
|
1398
|
+
# Get details
|
|
1399
|
+
details = client.get_app_details(agent['id'])
|
|
1400
|
+
|
|
1401
|
+
if formatter.__class__.__name__ == 'JSONFormatter':
|
|
1402
|
+
click.echo(formatter._dump(details))
|
|
1403
|
+
else:
|
|
1404
|
+
# Create platform agent details panel
|
|
1405
|
+
content = Text()
|
|
1406
|
+
content.append("ID: ", style="bold")
|
|
1407
|
+
content.append(f"{details['id']}\n", style="yellow")
|
|
1408
|
+
|
|
1409
|
+
if details.get('description'):
|
|
1410
|
+
content.append("\nDescription: ", style="bold")
|
|
1411
|
+
content.append(f"{details['description']}\n", style="white")
|
|
1412
|
+
|
|
1413
|
+
panel = Panel(
|
|
1414
|
+
content,
|
|
1415
|
+
title=f"Agent: {details['name']}",
|
|
1416
|
+
title_align="left",
|
|
1417
|
+
border_style="cyan",
|
|
1418
|
+
box=box.ROUNDED
|
|
1419
|
+
)
|
|
1420
|
+
console.print("\n")
|
|
1421
|
+
console.print(panel)
|
|
1422
|
+
|
|
1423
|
+
# Display versions in a table
|
|
1424
|
+
if details.get('versions'):
|
|
1425
|
+
console.print("\n[bold]Versions:[/bold]")
|
|
1426
|
+
versions_table = Table(box=box.ROUNDED, border_style="dim")
|
|
1427
|
+
versions_table.add_column("Name", style="cyan")
|
|
1428
|
+
versions_table.add_column("ID", style="yellow")
|
|
1429
|
+
for ver in details.get('versions', []):
|
|
1430
|
+
versions_table.add_row(ver['name'], str(ver['id']))
|
|
1431
|
+
console.print(versions_table)
|
|
1432
|
+
|
|
1433
|
+
except click.ClickException:
|
|
1434
|
+
raise
|
|
1435
|
+
except Exception as e:
|
|
1436
|
+
logger.exception("Failed to show agent details")
|
|
1437
|
+
error_panel = Panel(
|
|
1438
|
+
str(e),
|
|
1439
|
+
title="Error",
|
|
1440
|
+
border_style="red",
|
|
1441
|
+
box=box.ROUNDED
|
|
1442
|
+
)
|
|
1443
|
+
console.print(error_panel, style="red")
|
|
1444
|
+
raise click.Abort()
|
|
1445
|
+
|
|
1446
|
+
|
|
1447
|
+
@agent.command('chat')
|
|
1448
|
+
@click.argument('agent_source', required=False)
|
|
1449
|
+
@click.option('--version', help='Agent version (for platform agents)')
|
|
1450
|
+
@click.option('--toolkit-config', multiple=True, type=click.Path(exists=True),
|
|
1451
|
+
help='Toolkit configuration files (can specify multiple)')
|
|
1452
|
+
@click.option('--inventory', 'inventory_path', type=str,
|
|
1453
|
+
help='Load inventory/knowledge graph from JSON file (e.g., alita.json or .alita/inventory/alita.json)')
|
|
1454
|
+
@click.option('--thread-id', help='Continue existing conversation thread')
|
|
1455
|
+
@click.option('--model', help='Override LLM model')
|
|
1456
|
+
@click.option('--temperature', type=float, help='Override temperature')
|
|
1457
|
+
@click.option('--max-tokens', type=int, help='Override max tokens')
|
|
1458
|
+
@click.option('--dir', 'work_dir', type=click.Path(exists=True, file_okay=False, dir_okay=True),
|
|
1459
|
+
help='Grant agent filesystem access to this directory')
|
|
1460
|
+
@click.option('--verbose', '-v', type=click.Choice(['quiet', 'default', 'debug']), default='default',
|
|
1461
|
+
help='Output verbosity level: quiet (final output only), default (tool calls + outputs), debug (all including LLM calls)')
|
|
1462
|
+
@click.option('--recursion-limit', type=int, default=50,
|
|
1463
|
+
help='Maximum number of tool execution steps per turn')
|
|
1464
|
+
@click.pass_context
|
|
1465
|
+
def agent_chat(ctx, agent_source: Optional[str], version: Optional[str],
|
|
1466
|
+
toolkit_config: tuple, inventory_path: Optional[str], thread_id: Optional[str],
|
|
1467
|
+
model: Optional[str], temperature: Optional[float],
|
|
1468
|
+
max_tokens: Optional[int], work_dir: Optional[str],
|
|
1469
|
+
verbose: str, recursion_limit: Optional[int]):
|
|
1470
|
+
"""Start interactive chat with an agent.
|
|
1471
|
+
|
|
1472
|
+
\b
|
|
1473
|
+
Examples:
|
|
1474
|
+
alita chat # Interactive agent selection
|
|
1475
|
+
alita chat my-agent # Chat with platform agent
|
|
1476
|
+
alita chat ./agent.md # Chat with local agent file
|
|
1477
|
+
alita chat --inventory alita.json
|
|
1478
|
+
alita chat my-agent --dir ./src
|
|
1479
|
+
alita chat my-agent --thread-id abc123
|
|
1480
|
+
alita chat my-agent -v quiet # Hide tool calls
|
|
1481
|
+
alita chat my-agent -v debug # Show all LLM calls
|
|
1482
|
+
alita chat __inventory__ --toolkit-config jira.json
|
|
1483
|
+
"""
|
|
1484
|
+
formatter = ctx.obj['formatter']
|
|
1485
|
+
config = ctx.obj['config']
|
|
1486
|
+
client = get_client(ctx)
|
|
1487
|
+
|
|
1488
|
+
# Setup verbose level
|
|
1489
|
+
show_verbose = verbose != 'quiet'
|
|
1490
|
+
debug_mode = verbose == 'debug'
|
|
1491
|
+
|
|
1492
|
+
try:
|
|
1493
|
+
# If no agent specified, start with direct chat by default
|
|
1494
|
+
if not agent_source:
|
|
1495
|
+
agent_source = '__direct__'
|
|
1496
|
+
|
|
1497
|
+
# Check for built-in agent modes
|
|
1498
|
+
is_direct = agent_source == '__direct__'
|
|
1499
|
+
is_inventory = agent_source == '__inventory__'
|
|
1500
|
+
is_builtin = is_direct or is_inventory
|
|
1501
|
+
is_local = not is_builtin and Path(agent_source).exists()
|
|
1502
|
+
|
|
1503
|
+
# Get defaults from config
|
|
1504
|
+
default_model = config.default_model or 'gpt-4o'
|
|
1505
|
+
default_temperature = config.default_temperature if config.default_temperature is not None else 0.1
|
|
1506
|
+
default_max_tokens = config.default_max_tokens or 4096
|
|
1507
|
+
|
|
1508
|
+
# Initialize variables for dynamic updates
|
|
1509
|
+
current_model = model
|
|
1510
|
+
current_temperature = temperature
|
|
1511
|
+
current_max_tokens = max_tokens
|
|
1512
|
+
added_mcp_configs = []
|
|
1513
|
+
added_toolkit_configs = list(toolkit_config) if toolkit_config else []
|
|
1514
|
+
mcp_session_manager = None
|
|
1515
|
+
llm = None
|
|
1516
|
+
agent_executor = None
|
|
1517
|
+
agent_def = {}
|
|
1518
|
+
filesystem_tools = None
|
|
1519
|
+
terminal_tools = None
|
|
1520
|
+
planning_tools = None
|
|
1521
|
+
plan_state = None
|
|
1522
|
+
|
|
1523
|
+
# Handle --inventory option: add inventory toolkit config at startup
|
|
1524
|
+
if inventory_path:
|
|
1525
|
+
inventory_config = _build_inventory_config(inventory_path, work_dir)
|
|
1526
|
+
if inventory_config:
|
|
1527
|
+
added_toolkit_configs.append(inventory_config)
|
|
1528
|
+
console.print(f"[dim]✓ Loading inventory: {inventory_config['toolkit_name']} ({inventory_config['graph_path']})[/dim]")
|
|
1529
|
+
else:
|
|
1530
|
+
console.print(f"[yellow]Warning: Inventory file not found: {inventory_path}[/yellow]")
|
|
1531
|
+
console.print("[dim]Searched in current directory and .alita/inventory/[/dim]")
|
|
1532
|
+
|
|
1533
|
+
# Approval mode: 'always' (confirm each tool), 'auto' (no confirmation), 'yolo' (no safety checks)
|
|
1534
|
+
approval_mode = 'always'
|
|
1535
|
+
allowed_directories = [work_dir] if work_dir else [] # Track allowed directories for /dir command
|
|
1536
|
+
current_agent_file = agent_source if is_local else None # Track agent file for /reload command
|
|
1537
|
+
|
|
1538
|
+
if is_direct:
|
|
1539
|
+
# Direct chat mode - no agent, just LLM with Alita instructions
|
|
1540
|
+
agent_name = "Alita"
|
|
1541
|
+
agent_type = "Direct LLM"
|
|
1542
|
+
alita_prompt = _get_alita_system_prompt(config)
|
|
1543
|
+
agent_def = {
|
|
1544
|
+
'model': model or default_model,
|
|
1545
|
+
'temperature': temperature if temperature is not None else default_temperature,
|
|
1546
|
+
'max_tokens': max_tokens or default_max_tokens,
|
|
1547
|
+
'system_prompt': alita_prompt
|
|
1548
|
+
}
|
|
1549
|
+
elif is_inventory:
|
|
1550
|
+
# Inventory agent mode - knowledge graph builder with inventory toolkit
|
|
1551
|
+
agent_name = "Inventory"
|
|
1552
|
+
agent_type = "Built-in Agent"
|
|
1553
|
+
inventory_prompt = _get_inventory_system_prompt(config)
|
|
1554
|
+
agent_def = {
|
|
1555
|
+
'name': 'inventory-agent',
|
|
1556
|
+
'model': model or default_model,
|
|
1557
|
+
'temperature': temperature if temperature is not None else 0.3,
|
|
1558
|
+
'max_tokens': max_tokens or default_max_tokens,
|
|
1559
|
+
'system_prompt': inventory_prompt,
|
|
1560
|
+
# Include inventory toolkit by default
|
|
1561
|
+
'toolkit_configs': [
|
|
1562
|
+
{'type': 'inventory', 'graph_path': './knowledge_graph.json'}
|
|
1563
|
+
]
|
|
1564
|
+
}
|
|
1565
|
+
elif is_local:
|
|
1566
|
+
agent_def = load_agent_definition(agent_source)
|
|
1567
|
+
agent_name = agent_def.get('name', Path(agent_source).stem)
|
|
1568
|
+
agent_type = "Local Agent"
|
|
1569
|
+
else:
|
|
1570
|
+
# Platform agent - find it
|
|
1571
|
+
agents = client.get_list_of_apps()
|
|
1572
|
+
agent = None
|
|
1573
|
+
|
|
1574
|
+
try:
|
|
1575
|
+
agent_id = int(agent_source)
|
|
1576
|
+
agent = next((a for a in agents if a['id'] == agent_id), None)
|
|
1577
|
+
except ValueError:
|
|
1578
|
+
agent = next((a for a in agents if a['name'] == agent_source), None)
|
|
1579
|
+
|
|
1580
|
+
if not agent:
|
|
1581
|
+
raise click.ClickException(f"Agent '{agent_source}' not found")
|
|
1582
|
+
|
|
1583
|
+
agent_name = agent['name']
|
|
1584
|
+
agent_type = "Platform Agent"
|
|
1585
|
+
|
|
1586
|
+
# Get model and temperature for welcome banner
|
|
1587
|
+
llm_model_display = current_model or agent_def.get('model', default_model)
|
|
1588
|
+
llm_temperature_display = current_temperature if current_temperature is not None else agent_def.get('temperature', default_temperature)
|
|
1589
|
+
|
|
1590
|
+
# Print nice welcome banner
|
|
1591
|
+
print_welcome(agent_name, llm_model_display, llm_temperature_display, approval_mode)
|
|
1592
|
+
|
|
1593
|
+
# Initialize conversation
|
|
1594
|
+
chat_history = []
|
|
1595
|
+
|
|
1596
|
+
# Initialize session for persistence (memory + plan)
|
|
1597
|
+
from .tools import generate_session_id, create_session_memory, save_session_metadata, to_portable_path
|
|
1598
|
+
current_session_id = generate_session_id()
|
|
1599
|
+
plan_state = {'session_id': current_session_id}
|
|
1600
|
+
|
|
1601
|
+
# Create persistent memory for agent (stored in session directory)
|
|
1602
|
+
memory = create_session_memory(current_session_id)
|
|
1603
|
+
|
|
1604
|
+
# Save session metadata with agent source for session resume
|
|
1605
|
+
agent_source_portable = to_portable_path(current_agent_file) if current_agent_file else None
|
|
1606
|
+
# Filter out transient inventory configs (dicts) - only save file paths
|
|
1607
|
+
serializable_toolkit_configs = [tc for tc in added_toolkit_configs if isinstance(tc, str)]
|
|
1608
|
+
# Extract inventory graph path if present
|
|
1609
|
+
inventory_graph = None
|
|
1610
|
+
for tc in added_toolkit_configs:
|
|
1611
|
+
if isinstance(tc, dict) and tc.get('type') == 'inventory':
|
|
1612
|
+
inventory_graph = tc.get('graph_path')
|
|
1613
|
+
break
|
|
1614
|
+
save_session_metadata(current_session_id, {
|
|
1615
|
+
'agent_name': agent_name,
|
|
1616
|
+
'agent_type': agent_type if 'agent_type' in dir() else 'Direct LLM',
|
|
1617
|
+
'agent_source': agent_source_portable,
|
|
1618
|
+
'model': llm_model_display,
|
|
1619
|
+
'temperature': llm_temperature_display,
|
|
1620
|
+
'work_dir': work_dir,
|
|
1621
|
+
'is_direct': is_direct,
|
|
1622
|
+
'is_local': is_local,
|
|
1623
|
+
'is_inventory': is_inventory,
|
|
1624
|
+
'added_toolkit_configs': serializable_toolkit_configs,
|
|
1625
|
+
'inventory_graph': inventory_graph,
|
|
1626
|
+
'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])],
|
|
1627
|
+
})
|
|
1628
|
+
console.print(f"[dim]Session: {current_session_id}[/dim]")
|
|
1629
|
+
|
|
1630
|
+
# Initialize context manager for chat history management
|
|
1631
|
+
context_config = config.context_management
|
|
1632
|
+
ctx_manager = CLIContextManager(
|
|
1633
|
+
session_id=current_session_id,
|
|
1634
|
+
max_context_tokens=context_config.get('max_context_tokens', 8000),
|
|
1635
|
+
preserve_recent=context_config.get('preserve_recent_messages', 5),
|
|
1636
|
+
pruning_method=context_config.get('pruning_method', 'oldest_first'),
|
|
1637
|
+
enable_summarization=context_config.get('enable_summarization', True),
|
|
1638
|
+
summary_trigger_ratio=context_config.get('summary_trigger_ratio', 0.8),
|
|
1639
|
+
summaries_limit=context_config.get('summaries_limit_count', 5),
|
|
1640
|
+
llm=None # Will be set after LLM creation
|
|
1641
|
+
)
|
|
1642
|
+
|
|
1643
|
+
# Purge old sessions on startup (cleanup task)
|
|
1644
|
+
try:
|
|
1645
|
+
purge_context_sessions(
|
|
1646
|
+
sessions_dir=config.sessions_dir,
|
|
1647
|
+
max_age_days=context_config.get('session_max_age_days', 30),
|
|
1648
|
+
max_sessions=context_config.get('max_sessions', 100)
|
|
1649
|
+
)
|
|
1650
|
+
except Exception as e:
|
|
1651
|
+
logger.debug(f"Session cleanup failed: {e}")
|
|
1652
|
+
|
|
1653
|
+
# Create agent executor
|
|
1654
|
+
if is_direct or is_local or is_inventory:
|
|
1655
|
+
# Setup local agent executor (handles all config, tools, MCP, etc.)
|
|
1656
|
+
try:
|
|
1657
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1658
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, work_dir, plan_state
|
|
1659
|
+
)
|
|
1660
|
+
except Exception:
|
|
1661
|
+
return
|
|
1662
|
+
else:
|
|
1663
|
+
# Platform agent
|
|
1664
|
+
details = client.get_app_details(agent['id'])
|
|
1665
|
+
|
|
1666
|
+
if version:
|
|
1667
|
+
version_obj = next((v for v in details['versions'] if v['name'] == version), None)
|
|
1668
|
+
if not version_obj:
|
|
1669
|
+
raise click.ClickException(f"Version '{version}' not found")
|
|
1670
|
+
version_id = version_obj['id']
|
|
1671
|
+
else:
|
|
1672
|
+
# Use first version
|
|
1673
|
+
version_id = details['versions'][0]['id']
|
|
1674
|
+
|
|
1675
|
+
# Display configuration
|
|
1676
|
+
console.print()
|
|
1677
|
+
console.print("✓ [green]Connected to platform agent[/green]")
|
|
1678
|
+
console.print()
|
|
1679
|
+
|
|
1680
|
+
agent_executor = client.application(
|
|
1681
|
+
application_id=agent['id'],
|
|
1682
|
+
application_version_id=version_id,
|
|
1683
|
+
memory=memory,
|
|
1684
|
+
chat_history=chat_history
|
|
1685
|
+
)
|
|
1686
|
+
llm = None # Platform agents don't use direct LLM
|
|
1687
|
+
|
|
1688
|
+
# Set LLM on context manager for summarization
|
|
1689
|
+
if llm is not None:
|
|
1690
|
+
ctx_manager.llm = llm
|
|
1691
|
+
|
|
1692
|
+
# Initialize input handler for readline support
|
|
1693
|
+
input_handler = get_input_handler()
|
|
1694
|
+
|
|
1695
|
+
# Set up toolkit names callback for tab completion
|
|
1696
|
+
from .input_handler import set_toolkit_names_callback, set_inventory_files_callback
|
|
1697
|
+
set_toolkit_names_callback(lambda: _list_available_toolkits(config))
|
|
1698
|
+
|
|
1699
|
+
# Set up inventory files callback for /inventory tab completion
|
|
1700
|
+
set_inventory_files_callback(lambda: _get_inventory_json_files(allowed_directories[0] if allowed_directories else None))
|
|
1701
|
+
|
|
1702
|
+
# Interactive chat loop
|
|
1703
|
+
while True:
|
|
1704
|
+
try:
|
|
1705
|
+
# Get context info for the UI indicator
|
|
1706
|
+
context_info = ctx_manager.get_context_info()
|
|
1707
|
+
|
|
1708
|
+
# Get input with styled prompt (prompt is part of input() for proper readline handling)
|
|
1709
|
+
user_input = styled_input(context_info=context_info).strip()
|
|
1710
|
+
|
|
1711
|
+
if not user_input:
|
|
1712
|
+
continue
|
|
1713
|
+
|
|
1714
|
+
# Handle commands
|
|
1715
|
+
if user_input.lower() in ['exit', 'quit']:
|
|
1716
|
+
# Save final session state before exiting
|
|
1717
|
+
try:
|
|
1718
|
+
from .tools import update_session_metadata, to_portable_path
|
|
1719
|
+
update_session_metadata(current_session_id, {
|
|
1720
|
+
'agent_source': to_portable_path(current_agent_file) if current_agent_file else None,
|
|
1721
|
+
'model': current_model or llm_model_display,
|
|
1722
|
+
'temperature': current_temperature if current_temperature is not None else llm_temperature_display,
|
|
1723
|
+
'allowed_directories': allowed_directories,
|
|
1724
|
+
'added_toolkit_configs': list(added_toolkit_configs),
|
|
1725
|
+
'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])],
|
|
1726
|
+
})
|
|
1727
|
+
except Exception as e:
|
|
1728
|
+
logger.debug(f"Failed to save session state on exit: {e}")
|
|
1729
|
+
console.print("\n[bold cyan]👋 Goodbye![/bold cyan]\n")
|
|
1730
|
+
break
|
|
1731
|
+
|
|
1732
|
+
if user_input == '/clear':
|
|
1733
|
+
chat_history = []
|
|
1734
|
+
ctx_manager.clear()
|
|
1735
|
+
console.print("[green]✓ Conversation history cleared.[/green]")
|
|
1736
|
+
continue
|
|
1737
|
+
|
|
1738
|
+
if user_input == '/history':
|
|
1739
|
+
if not chat_history:
|
|
1740
|
+
console.print("[yellow]No conversation history yet.[/yellow]")
|
|
1741
|
+
else:
|
|
1742
|
+
console.print("\n[bold cyan]── Conversation History ──[/bold cyan]")
|
|
1743
|
+
for i, msg in enumerate(chat_history, 1):
|
|
1744
|
+
role = msg.get('role', 'unknown')
|
|
1745
|
+
content = msg.get('content', '')
|
|
1746
|
+
role_color = 'blue' if role == 'user' else 'green'
|
|
1747
|
+
included_marker = "" if ctx_manager.is_message_included(i - 1) else " [dim](pruned)[/dim]"
|
|
1748
|
+
console.print(f"\n[bold {role_color}]{i}. {role.upper()}:[/bold {role_color}] {content[:100]}...{included_marker}")
|
|
1749
|
+
continue
|
|
1750
|
+
|
|
1751
|
+
if user_input == '/save':
|
|
1752
|
+
console.print("[yellow]Save to file (default: conversation.json):[/yellow] ", end="")
|
|
1753
|
+
filename = input().strip()
|
|
1754
|
+
filename = filename or "conversation.json"
|
|
1755
|
+
with open(filename, 'w') as f:
|
|
1756
|
+
json.dump({'history': chat_history}, f, indent=2)
|
|
1757
|
+
console.print(f"[green]✓ Conversation saved to {filename}[/green]")
|
|
1758
|
+
continue
|
|
1759
|
+
|
|
1760
|
+
if user_input == '/help':
|
|
1761
|
+
print_help()
|
|
1762
|
+
continue
|
|
1763
|
+
|
|
1764
|
+
# /model command - switch model
|
|
1765
|
+
if user_input == '/model':
|
|
1766
|
+
if not (is_direct or is_local):
|
|
1767
|
+
console.print("[yellow]Model switching is only available for local agents and direct chat.[/yellow]")
|
|
1768
|
+
continue
|
|
1769
|
+
|
|
1770
|
+
selected_model = _select_model_interactive(client)
|
|
1771
|
+
if selected_model:
|
|
1772
|
+
current_model = selected_model['name']
|
|
1773
|
+
agent_def['model'] = current_model
|
|
1774
|
+
|
|
1775
|
+
# Recreate LLM and agent executor - use session memory to preserve history
|
|
1776
|
+
from .tools import create_session_memory, update_session_metadata
|
|
1777
|
+
memory = create_session_memory(current_session_id)
|
|
1778
|
+
try:
|
|
1779
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1780
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
1781
|
+
)
|
|
1782
|
+
# Persist model change to session
|
|
1783
|
+
update_session_metadata(current_session_id, {
|
|
1784
|
+
'model': current_model,
|
|
1785
|
+
'temperature': current_temperature if current_temperature is not None else agent_def.get('temperature', 0.7)
|
|
1786
|
+
})
|
|
1787
|
+
console.print(Panel(
|
|
1788
|
+
f"[cyan]ℹ Model switched to [bold]{current_model}[/bold]. Agent state reset, chat history preserved.[/cyan]",
|
|
1789
|
+
border_style="cyan",
|
|
1790
|
+
box=box.ROUNDED
|
|
1791
|
+
))
|
|
1792
|
+
except Exception as e:
|
|
1793
|
+
console.print(f"[red]Error switching model: {e}[/red]")
|
|
1794
|
+
continue
|
|
1795
|
+
|
|
1796
|
+
# /reload command - reload agent definition from file
|
|
1797
|
+
if user_input == '/reload':
|
|
1798
|
+
if not is_local:
|
|
1799
|
+
if is_direct or is_inventory:
|
|
1800
|
+
console.print("[yellow]Cannot reload built-in agent mode - no agent file to reload.[/yellow]")
|
|
1801
|
+
else:
|
|
1802
|
+
console.print("[yellow]Reload is only available for local agents (file-based).[/yellow]")
|
|
1803
|
+
continue
|
|
1804
|
+
|
|
1805
|
+
if not current_agent_file or not Path(current_agent_file).exists():
|
|
1806
|
+
console.print("[red]Agent file not found. Cannot reload.[/red]")
|
|
1807
|
+
continue
|
|
1808
|
+
|
|
1809
|
+
try:
|
|
1810
|
+
# Reload agent definition from file
|
|
1811
|
+
new_agent_def = load_agent_definition(current_agent_file)
|
|
1812
|
+
|
|
1813
|
+
# Preserve runtime additions (MCPs, tools added via commands)
|
|
1814
|
+
if 'mcps' in agent_def and agent_def['mcps']:
|
|
1815
|
+
# Merge MCPs: file MCPs + runtime added MCPs
|
|
1816
|
+
file_mcps = new_agent_def.get('mcps', [])
|
|
1817
|
+
for mcp in agent_def['mcps']:
|
|
1818
|
+
mcp_name = mcp if isinstance(mcp, str) else mcp.get('name')
|
|
1819
|
+
file_mcp_names = [m if isinstance(m, str) else m.get('name') for m in file_mcps]
|
|
1820
|
+
if mcp_name not in file_mcp_names:
|
|
1821
|
+
file_mcps.append(mcp)
|
|
1822
|
+
new_agent_def['mcps'] = file_mcps
|
|
1823
|
+
|
|
1824
|
+
# Update agent_def with new values (preserving model/temp overrides)
|
|
1825
|
+
old_system_prompt = agent_def.get('system_prompt', '')
|
|
1826
|
+
new_system_prompt = new_agent_def.get('system_prompt', '')
|
|
1827
|
+
|
|
1828
|
+
agent_def.update(new_agent_def)
|
|
1829
|
+
|
|
1830
|
+
# Restore runtime overrides
|
|
1831
|
+
if current_model:
|
|
1832
|
+
agent_def['model'] = current_model
|
|
1833
|
+
if current_temperature is not None:
|
|
1834
|
+
agent_def['temperature'] = current_temperature
|
|
1835
|
+
if current_max_tokens:
|
|
1836
|
+
agent_def['max_tokens'] = current_max_tokens
|
|
1837
|
+
|
|
1838
|
+
# Recreate agent executor with reloaded definition
|
|
1839
|
+
from .tools import create_session_memory
|
|
1840
|
+
memory = create_session_memory(current_session_id)
|
|
1841
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1842
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
1843
|
+
)
|
|
1844
|
+
|
|
1845
|
+
# Show what changed
|
|
1846
|
+
prompt_changed = old_system_prompt != new_system_prompt
|
|
1847
|
+
agent_name = agent_def.get('name', Path(current_agent_file).stem)
|
|
1848
|
+
|
|
1849
|
+
if prompt_changed:
|
|
1850
|
+
console.print(Panel(
|
|
1851
|
+
f"[green]✓ Reloaded agent: [bold]{agent_name}[/bold][/green]\n"
|
|
1852
|
+
f"[dim]System prompt updated ({len(new_system_prompt)} chars)[/dim]",
|
|
1853
|
+
border_style="green",
|
|
1854
|
+
box=box.ROUNDED
|
|
1855
|
+
))
|
|
1856
|
+
else:
|
|
1857
|
+
console.print(Panel(
|
|
1858
|
+
f"[cyan]ℹ Reloaded agent: [bold]{agent_name}[/bold][/cyan]\n"
|
|
1859
|
+
f"[dim]No changes detected in system prompt[/dim]",
|
|
1860
|
+
border_style="cyan",
|
|
1861
|
+
box=box.ROUNDED
|
|
1862
|
+
))
|
|
1863
|
+
except Exception as e:
|
|
1864
|
+
console.print(f"[red]Error reloading agent: {e}[/red]")
|
|
1865
|
+
continue
|
|
1866
|
+
|
|
1867
|
+
# /add_mcp command - add MCP server
|
|
1868
|
+
if user_input == '/add_mcp':
|
|
1869
|
+
if not (is_direct or is_local or is_inventory):
|
|
1870
|
+
console.print("[yellow]Adding MCP is only available for local agents and built-in agents.[/yellow]")
|
|
1871
|
+
continue
|
|
1872
|
+
|
|
1873
|
+
selected_mcp = _select_mcp_interactive(config)
|
|
1874
|
+
if selected_mcp:
|
|
1875
|
+
mcp_name = selected_mcp['name']
|
|
1876
|
+
# Add MCP to agent definition
|
|
1877
|
+
if 'mcps' not in agent_def:
|
|
1878
|
+
agent_def['mcps'] = []
|
|
1879
|
+
if mcp_name not in [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])]:
|
|
1880
|
+
agent_def['mcps'].append(mcp_name)
|
|
1881
|
+
|
|
1882
|
+
# Recreate agent executor with new MCP - use session memory to preserve history
|
|
1883
|
+
from .tools import create_session_memory, update_session_metadata
|
|
1884
|
+
memory = create_session_memory(current_session_id)
|
|
1885
|
+
try:
|
|
1886
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1887
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
1888
|
+
)
|
|
1889
|
+
# Persist added MCPs to session
|
|
1890
|
+
update_session_metadata(current_session_id, {
|
|
1891
|
+
'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])]
|
|
1892
|
+
})
|
|
1893
|
+
console.print(Panel(
|
|
1894
|
+
f"[cyan]ℹ Added MCP: [bold]{mcp_name}[/bold]. Agent state reset, chat history preserved.[/cyan]",
|
|
1895
|
+
border_style="cyan",
|
|
1896
|
+
box=box.ROUNDED
|
|
1897
|
+
))
|
|
1898
|
+
except Exception as e:
|
|
1899
|
+
console.print(f"[red]Error adding MCP: {e}[/red]")
|
|
1900
|
+
continue
|
|
1901
|
+
|
|
1902
|
+
# /add_toolkit command - add toolkit
|
|
1903
|
+
if user_input == '/add_toolkit' or user_input.startswith('/add_toolkit '):
|
|
1904
|
+
if not (is_direct or is_local or is_inventory):
|
|
1905
|
+
console.print("[yellow]Adding toolkit is only available for local agents and built-in agents.[/yellow]")
|
|
1906
|
+
continue
|
|
1907
|
+
|
|
1908
|
+
parts = user_input.split(maxsplit=1)
|
|
1909
|
+
if len(parts) == 2:
|
|
1910
|
+
# Direct toolkit selection by name
|
|
1911
|
+
toolkit_name_arg = parts[1].strip()
|
|
1912
|
+
selected_toolkit = _find_toolkit_by_name(config, toolkit_name_arg)
|
|
1913
|
+
if not selected_toolkit:
|
|
1914
|
+
console.print(f"[yellow]Toolkit '{toolkit_name_arg}' not found.[/yellow]")
|
|
1915
|
+
# Show available toolkits
|
|
1916
|
+
available = _list_available_toolkits(config)
|
|
1917
|
+
if available:
|
|
1918
|
+
console.print(f"[dim]Available toolkits: {', '.join(available)}[/dim]")
|
|
1919
|
+
continue
|
|
1920
|
+
else:
|
|
1921
|
+
# Interactive selection
|
|
1922
|
+
selected_toolkit = _select_toolkit_interactive(config)
|
|
1923
|
+
|
|
1924
|
+
if selected_toolkit:
|
|
1925
|
+
toolkit_name = selected_toolkit['name']
|
|
1926
|
+
toolkit_file = selected_toolkit['file']
|
|
1927
|
+
|
|
1928
|
+
# Add toolkit config path
|
|
1929
|
+
if toolkit_file not in added_toolkit_configs:
|
|
1930
|
+
added_toolkit_configs.append(toolkit_file)
|
|
1931
|
+
|
|
1932
|
+
# Recreate agent executor with new toolkit - use session memory to preserve history
|
|
1933
|
+
from .tools import create_session_memory, update_session_metadata
|
|
1934
|
+
memory = create_session_memory(current_session_id)
|
|
1935
|
+
try:
|
|
1936
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1937
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
1938
|
+
)
|
|
1939
|
+
# Persist added toolkits to session
|
|
1940
|
+
update_session_metadata(current_session_id, {
|
|
1941
|
+
'added_toolkit_configs': list(added_toolkit_configs)
|
|
1942
|
+
})
|
|
1943
|
+
console.print(Panel(
|
|
1944
|
+
f"[cyan]ℹ Added toolkit: [bold]{toolkit_name}[/bold]. Agent state reset, chat history preserved.[/cyan]",
|
|
1945
|
+
border_style="cyan",
|
|
1946
|
+
box=box.ROUNDED
|
|
1947
|
+
))
|
|
1948
|
+
except Exception as e:
|
|
1949
|
+
console.print(f"[red]Error adding toolkit: {e}[/red]")
|
|
1950
|
+
continue
|
|
1951
|
+
|
|
1952
|
+
# /rm_mcp command - remove MCP server
|
|
1953
|
+
if user_input == '/rm_mcp' or user_input.startswith('/rm_mcp '):
|
|
1954
|
+
if not (is_direct or is_local or is_inventory):
|
|
1955
|
+
console.print("[yellow]Removing MCP is only available for local agents and built-in agents.[/yellow]")
|
|
1956
|
+
continue
|
|
1957
|
+
|
|
1958
|
+
current_mcps = agent_def.get('mcps', [])
|
|
1959
|
+
if not current_mcps:
|
|
1960
|
+
console.print("[yellow]No MCP servers are currently loaded.[/yellow]")
|
|
1961
|
+
continue
|
|
1962
|
+
|
|
1963
|
+
# Get list of MCP names
|
|
1964
|
+
mcp_names = [m if isinstance(m, str) else m.get('name') for m in current_mcps]
|
|
1965
|
+
|
|
1966
|
+
parts = user_input.split(maxsplit=1)
|
|
1967
|
+
if len(parts) == 2:
|
|
1968
|
+
# Direct removal by name
|
|
1969
|
+
mcp_name_to_remove = parts[1].strip()
|
|
1970
|
+
if mcp_name_to_remove not in mcp_names:
|
|
1971
|
+
console.print(f"[yellow]MCP '{mcp_name_to_remove}' not found.[/yellow]")
|
|
1972
|
+
console.print(f"[dim]Loaded MCPs: {', '.join(mcp_names)}[/dim]")
|
|
1973
|
+
continue
|
|
1974
|
+
else:
|
|
1975
|
+
# Interactive selection
|
|
1976
|
+
console.print("\n🔌 [bold cyan]Remove MCP Server[/bold cyan]\n")
|
|
1977
|
+
for i, name in enumerate(mcp_names, 1):
|
|
1978
|
+
console.print(f" [bold]{i}[/bold]. {name}")
|
|
1979
|
+
console.print(f" [bold]0[/bold]. [dim]Cancel[/dim]")
|
|
1980
|
+
console.print()
|
|
1981
|
+
|
|
1982
|
+
try:
|
|
1983
|
+
choice = int(input("Select MCP to remove: ").strip())
|
|
1984
|
+
if choice == 0:
|
|
1985
|
+
continue
|
|
1986
|
+
if 1 <= choice <= len(mcp_names):
|
|
1987
|
+
mcp_name_to_remove = mcp_names[choice - 1]
|
|
1988
|
+
else:
|
|
1989
|
+
console.print("[yellow]Invalid selection.[/yellow]")
|
|
1990
|
+
continue
|
|
1991
|
+
except (ValueError, KeyboardInterrupt):
|
|
1992
|
+
continue
|
|
1993
|
+
|
|
1994
|
+
# Remove the MCP
|
|
1995
|
+
agent_def['mcps'] = [m for m in current_mcps if (m if isinstance(m, str) else m.get('name')) != mcp_name_to_remove]
|
|
1996
|
+
|
|
1997
|
+
# Recreate agent executor without the MCP
|
|
1998
|
+
from .tools import create_session_memory, update_session_metadata
|
|
1999
|
+
memory = create_session_memory(current_session_id)
|
|
2000
|
+
try:
|
|
2001
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2002
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
2003
|
+
)
|
|
2004
|
+
# Persist updated MCPs to session
|
|
2005
|
+
update_session_metadata(current_session_id, {
|
|
2006
|
+
'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])]
|
|
2007
|
+
})
|
|
2008
|
+
console.print(Panel(
|
|
2009
|
+
f"[cyan]ℹ Removed MCP: [bold]{mcp_name_to_remove}[/bold]. Agent state reset, chat history preserved.[/cyan]",
|
|
2010
|
+
border_style="cyan",
|
|
2011
|
+
box=box.ROUNDED
|
|
2012
|
+
))
|
|
2013
|
+
except Exception as e:
|
|
2014
|
+
console.print(f"[red]Error removing MCP: {e}[/red]")
|
|
2015
|
+
continue
|
|
2016
|
+
|
|
2017
|
+
# /rm_toolkit command - remove toolkit
|
|
2018
|
+
if user_input == '/rm_toolkit' or user_input.startswith('/rm_toolkit '):
|
|
2019
|
+
if not (is_direct or is_local or is_inventory):
|
|
2020
|
+
console.print("[yellow]Removing toolkit is only available for local agents and built-in agents.[/yellow]")
|
|
2021
|
+
continue
|
|
2022
|
+
|
|
2023
|
+
if not added_toolkit_configs:
|
|
2024
|
+
console.print("[yellow]No toolkits are currently loaded.[/yellow]")
|
|
2025
|
+
continue
|
|
2026
|
+
|
|
2027
|
+
# Get toolkit names from config files
|
|
2028
|
+
toolkit_info = [] # List of (name, file_path)
|
|
2029
|
+
for toolkit_file in added_toolkit_configs:
|
|
2030
|
+
try:
|
|
2031
|
+
with open(toolkit_file, 'r') as f:
|
|
2032
|
+
tk_config = json.load(f)
|
|
2033
|
+
tk_name = tk_config.get('toolkit_name', Path(toolkit_file).stem)
|
|
2034
|
+
toolkit_info.append((tk_name, toolkit_file))
|
|
2035
|
+
except Exception:
|
|
2036
|
+
toolkit_info.append((Path(toolkit_file).stem, toolkit_file))
|
|
2037
|
+
|
|
2038
|
+
parts = user_input.split(maxsplit=1)
|
|
2039
|
+
if len(parts) == 2:
|
|
2040
|
+
# Direct removal by name
|
|
2041
|
+
toolkit_name_to_remove = parts[1].strip()
|
|
2042
|
+
matching = [(name, path) for name, path in toolkit_info if name == toolkit_name_to_remove]
|
|
2043
|
+
if not matching:
|
|
2044
|
+
console.print(f"[yellow]Toolkit '{toolkit_name_to_remove}' not found.[/yellow]")
|
|
2045
|
+
console.print(f"[dim]Loaded toolkits: {', '.join(name for name, _ in toolkit_info)}[/dim]")
|
|
2046
|
+
continue
|
|
2047
|
+
toolkit_file_to_remove = matching[0][1]
|
|
2048
|
+
else:
|
|
2049
|
+
# Interactive selection
|
|
2050
|
+
console.print("\n🔧 [bold cyan]Remove Toolkit[/bold cyan]\n")
|
|
2051
|
+
for i, (name, _) in enumerate(toolkit_info, 1):
|
|
2052
|
+
console.print(f" [bold]{i}[/bold]. {name}")
|
|
2053
|
+
console.print(f" [bold]0[/bold]. [dim]Cancel[/dim]")
|
|
2054
|
+
console.print()
|
|
2055
|
+
|
|
2056
|
+
try:
|
|
2057
|
+
choice = int(input("Select toolkit to remove: ").strip())
|
|
2058
|
+
if choice == 0:
|
|
2059
|
+
continue
|
|
2060
|
+
if 1 <= choice <= len(toolkit_info):
|
|
2061
|
+
toolkit_name_to_remove, toolkit_file_to_remove = toolkit_info[choice - 1]
|
|
2062
|
+
else:
|
|
2063
|
+
console.print("[yellow]Invalid selection.[/yellow]")
|
|
2064
|
+
continue
|
|
2065
|
+
except (ValueError, KeyboardInterrupt):
|
|
2066
|
+
continue
|
|
2067
|
+
|
|
2068
|
+
# Remove the toolkit
|
|
2069
|
+
added_toolkit_configs.remove(toolkit_file_to_remove)
|
|
2070
|
+
|
|
2071
|
+
# Recreate agent executor without the toolkit
|
|
2072
|
+
from .tools import create_session_memory, update_session_metadata
|
|
2073
|
+
memory = create_session_memory(current_session_id)
|
|
2074
|
+
try:
|
|
2075
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2076
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
2077
|
+
)
|
|
2078
|
+
# Persist updated toolkits to session
|
|
2079
|
+
update_session_metadata(current_session_id, {
|
|
2080
|
+
'added_toolkit_configs': list(added_toolkit_configs)
|
|
2081
|
+
})
|
|
2082
|
+
console.print(Panel(
|
|
2083
|
+
f"[cyan]ℹ Removed toolkit: [bold]{toolkit_name_to_remove}[/bold]. Agent state reset, chat history preserved.[/cyan]",
|
|
2084
|
+
border_style="cyan",
|
|
2085
|
+
box=box.ROUNDED
|
|
2086
|
+
))
|
|
2087
|
+
except Exception as e:
|
|
2088
|
+
console.print(f"[red]Error removing toolkit: {e}[/red]")
|
|
2089
|
+
continue
|
|
2090
|
+
|
|
2091
|
+
# /mode command - set approval mode
|
|
2092
|
+
if user_input == '/mode' or user_input.startswith('/mode '):
|
|
2093
|
+
parts = user_input.split(maxsplit=1)
|
|
2094
|
+
if len(parts) == 1:
|
|
2095
|
+
# Show current mode and options
|
|
2096
|
+
mode_info = {
|
|
2097
|
+
'always': ('yellow', 'Confirm before each tool execution'),
|
|
2098
|
+
'auto': ('green', 'Execute tools without confirmation'),
|
|
2099
|
+
'yolo': ('red', 'No confirmations, skip safety warnings')
|
|
2100
|
+
}
|
|
2101
|
+
console.print("\n🔧 [bold cyan]Approval Mode:[/bold cyan]\n")
|
|
2102
|
+
for mode_name, (color, desc) in mode_info.items():
|
|
2103
|
+
marker = "●" if mode_name == approval_mode else "○"
|
|
2104
|
+
console.print(f" [{color}]{marker}[/{color}] [bold]{mode_name}[/bold] - {desc}")
|
|
2105
|
+
console.print(f"\n[dim]Usage: /mode <always|auto|yolo>[/dim]")
|
|
2106
|
+
else:
|
|
2107
|
+
new_mode = parts[1].lower().strip()
|
|
2108
|
+
if new_mode in ['always', 'auto', 'yolo']:
|
|
2109
|
+
approval_mode = new_mode
|
|
2110
|
+
mode_colors = {'always': 'yellow', 'auto': 'green', 'yolo': 'red'}
|
|
2111
|
+
console.print(f"✓ [green]Mode set to[/green] [{mode_colors[new_mode]}][bold]{new_mode}[/bold][/{mode_colors[new_mode]}]")
|
|
2112
|
+
else:
|
|
2113
|
+
console.print(f"[yellow]Unknown mode: {new_mode}. Use: always, auto, or yolo[/yellow]")
|
|
2114
|
+
continue
|
|
2115
|
+
|
|
2116
|
+
# /dir command - manage allowed directories
|
|
2117
|
+
if user_input == '/dir' or user_input.startswith('/dir '):
|
|
2118
|
+
parts = user_input.split()
|
|
2119
|
+
|
|
2120
|
+
if len(parts) == 1:
|
|
2121
|
+
# /dir - list all allowed directories
|
|
2122
|
+
if allowed_directories:
|
|
2123
|
+
console.print("📁 [bold cyan]Allowed directories:[/bold cyan]")
|
|
2124
|
+
for i, d in enumerate(allowed_directories):
|
|
2125
|
+
marker = "●" if i == 0 else "○"
|
|
2126
|
+
label = " [dim](primary)[/dim]" if i == 0 else ""
|
|
2127
|
+
console.print(f" {marker} {d}{label}")
|
|
2128
|
+
else:
|
|
2129
|
+
console.print("[yellow]No directories allowed.[/yellow]")
|
|
2130
|
+
console.print("[dim]Usage: /dir [add|rm|remove] /path/to/directory[/dim]")
|
|
2131
|
+
continue
|
|
2132
|
+
|
|
2133
|
+
action = parts[1].lower()
|
|
2134
|
+
|
|
2135
|
+
# Handle /dir add /path or /dir /path (add is default)
|
|
2136
|
+
if action in ['add', 'rm', 'remove']:
|
|
2137
|
+
if len(parts) < 3:
|
|
2138
|
+
console.print(f"[yellow]Missing path. Usage: /dir {action} /path/to/directory[/yellow]")
|
|
2139
|
+
continue
|
|
2140
|
+
dir_path = parts[2]
|
|
2141
|
+
else:
|
|
2142
|
+
# /dir /path - default to add
|
|
2143
|
+
action = 'add'
|
|
2144
|
+
dir_path = parts[1]
|
|
2145
|
+
|
|
2146
|
+
dir_path = str(Path(dir_path).expanduser().resolve())
|
|
2147
|
+
|
|
2148
|
+
if action == 'add':
|
|
2149
|
+
if not Path(dir_path).exists():
|
|
2150
|
+
console.print(f"[red]Directory not found: {dir_path}[/red]")
|
|
2151
|
+
continue
|
|
2152
|
+
if not Path(dir_path).is_dir():
|
|
2153
|
+
console.print(f"[red]Not a directory: {dir_path}[/red]")
|
|
2154
|
+
continue
|
|
2155
|
+
|
|
2156
|
+
if dir_path in allowed_directories:
|
|
2157
|
+
console.print(f"[yellow]Directory already allowed: {dir_path}[/yellow]")
|
|
2158
|
+
continue
|
|
2159
|
+
|
|
2160
|
+
allowed_directories.append(dir_path)
|
|
2161
|
+
|
|
2162
|
+
# Recreate agent executor with updated directories
|
|
2163
|
+
if is_direct or is_local or is_inventory:
|
|
2164
|
+
from .tools import create_session_memory
|
|
2165
|
+
memory = create_session_memory(current_session_id)
|
|
2166
|
+
try:
|
|
2167
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2168
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
2169
|
+
)
|
|
2170
|
+
console.print(Panel(
|
|
2171
|
+
f"[cyan]✓ Added directory: [bold]{dir_path}[/bold]\n Total allowed: {len(allowed_directories)}[/cyan]",
|
|
2172
|
+
border_style="cyan",
|
|
2173
|
+
box=box.ROUNDED
|
|
2174
|
+
))
|
|
2175
|
+
except Exception as e:
|
|
2176
|
+
allowed_directories.remove(dir_path) # Rollback
|
|
2177
|
+
console.print(f"[red]Error adding directory: {e}[/red]")
|
|
2178
|
+
else:
|
|
2179
|
+
console.print("[yellow]Directory mounting is only available for local agents and built-in agents.[/yellow]")
|
|
2180
|
+
|
|
2181
|
+
elif action in ['rm', 'remove']:
|
|
2182
|
+
if dir_path not in allowed_directories:
|
|
2183
|
+
console.print(f"[yellow]Directory not in allowed list: {dir_path}[/yellow]")
|
|
2184
|
+
if allowed_directories:
|
|
2185
|
+
console.print("[dim]Currently allowed:[/dim]")
|
|
2186
|
+
for d in allowed_directories:
|
|
2187
|
+
console.print(f"[dim] - {d}[/dim]")
|
|
2188
|
+
continue
|
|
2189
|
+
|
|
2190
|
+
if len(allowed_directories) == 1:
|
|
2191
|
+
console.print("[yellow]Cannot remove the last directory. Use /dir add first to add another.[/yellow]")
|
|
2192
|
+
continue
|
|
2193
|
+
|
|
2194
|
+
allowed_directories.remove(dir_path)
|
|
2195
|
+
|
|
2196
|
+
# Recreate agent executor with updated directories
|
|
2197
|
+
if is_direct or is_local or is_inventory:
|
|
2198
|
+
from .tools import create_session_memory
|
|
2199
|
+
memory = create_session_memory(current_session_id)
|
|
2200
|
+
try:
|
|
2201
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2202
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
2203
|
+
)
|
|
2204
|
+
console.print(Panel(
|
|
2205
|
+
f"[cyan]✓ Removed directory: [bold]{dir_path}[/bold]\n Remaining: {len(allowed_directories)}[/cyan]",
|
|
2206
|
+
border_style="cyan",
|
|
2207
|
+
box=box.ROUNDED
|
|
2208
|
+
))
|
|
2209
|
+
except Exception as e:
|
|
2210
|
+
allowed_directories.append(dir_path) # Rollback
|
|
2211
|
+
console.print(f"[red]Error removing directory: {e}[/red]")
|
|
2212
|
+
else:
|
|
2213
|
+
console.print("[yellow]Directory mounting is only available for local agents and built-in agents.[/yellow]")
|
|
2214
|
+
continue
|
|
2215
|
+
|
|
2216
|
+
# /inventory command - load inventory/knowledge graph from path
|
|
2217
|
+
if user_input == '/inventory' or user_input.startswith('/inventory '):
|
|
2218
|
+
if not (is_direct or is_local or is_inventory):
|
|
2219
|
+
console.print("[yellow]Loading inventory is only available for local agents and built-in agents.[/yellow]")
|
|
2220
|
+
continue
|
|
2221
|
+
|
|
2222
|
+
parts = user_input.split(maxsplit=1)
|
|
2223
|
+
if len(parts) == 1:
|
|
2224
|
+
# Show current inventory and available files
|
|
2225
|
+
current_inventory = None
|
|
2226
|
+
for tc in added_toolkit_configs:
|
|
2227
|
+
if isinstance(tc, dict) and tc.get('type') == 'inventory':
|
|
2228
|
+
current_inventory = tc.get('graph_path')
|
|
2229
|
+
break
|
|
2230
|
+
elif isinstance(tc, str):
|
|
2231
|
+
try:
|
|
2232
|
+
with open(tc, 'r') as f:
|
|
2233
|
+
cfg = json.load(f)
|
|
2234
|
+
if cfg.get('type') == 'inventory':
|
|
2235
|
+
current_inventory = cfg.get('graph_path')
|
|
2236
|
+
break
|
|
2237
|
+
except Exception:
|
|
2238
|
+
pass
|
|
2239
|
+
|
|
2240
|
+
if current_inventory:
|
|
2241
|
+
console.print(f"📊 [bold cyan]Current inventory:[/bold cyan] {current_inventory}")
|
|
2242
|
+
else:
|
|
2243
|
+
console.print("[yellow]No inventory loaded.[/yellow]")
|
|
2244
|
+
|
|
2245
|
+
# Show available .json files
|
|
2246
|
+
primary_dir = allowed_directories[0] if allowed_directories else None
|
|
2247
|
+
available = _get_inventory_json_files(primary_dir)
|
|
2248
|
+
if available:
|
|
2249
|
+
console.print(f"[dim]Available files: {', '.join(available[:10])}")
|
|
2250
|
+
if len(available) > 10:
|
|
2251
|
+
console.print(f"[dim] ... and {len(available) - 10} more[/dim]")
|
|
2252
|
+
console.print("[dim]Usage: /inventory <path/to/graph.json>[/dim]")
|
|
2253
|
+
else:
|
|
2254
|
+
inventory_path = parts[1].strip()
|
|
2255
|
+
|
|
2256
|
+
# Build inventory config from path
|
|
2257
|
+
primary_dir = allowed_directories[0] if allowed_directories else None
|
|
2258
|
+
inventory_config = _build_inventory_config(inventory_path, primary_dir)
|
|
2259
|
+
if not inventory_config:
|
|
2260
|
+
console.print(f"[red]Inventory file not found: {inventory_path}[/red]")
|
|
2261
|
+
# Show search locations
|
|
2262
|
+
console.print("[dim]Searched in:[/dim]")
|
|
2263
|
+
console.print(f"[dim] - {Path.cwd()}[/dim]")
|
|
2264
|
+
console.print(f"[dim] - {Path.cwd() / '.alita' / 'inventory'}[/dim]")
|
|
2265
|
+
if primary_dir:
|
|
2266
|
+
console.print(f"[dim] - {primary_dir}[/dim]")
|
|
2267
|
+
console.print(f"[dim] - {Path(primary_dir) / '.alita' / 'inventory'}[/dim]")
|
|
2268
|
+
continue
|
|
2269
|
+
|
|
2270
|
+
# Remove any existing inventory toolkit configs
|
|
2271
|
+
new_toolkit_configs = []
|
|
2272
|
+
removed_inventory = None
|
|
2273
|
+
for tc in added_toolkit_configs:
|
|
2274
|
+
if isinstance(tc, dict) and tc.get('type') == 'inventory':
|
|
2275
|
+
removed_inventory = tc.get('toolkit_name', 'inventory')
|
|
2276
|
+
continue # Skip existing inventory
|
|
2277
|
+
elif isinstance(tc, str):
|
|
2278
|
+
try:
|
|
2279
|
+
with open(tc, 'r') as f:
|
|
2280
|
+
cfg = json.load(f)
|
|
2281
|
+
if cfg.get('type') == 'inventory':
|
|
2282
|
+
removed_inventory = cfg.get('toolkit_name', Path(tc).stem)
|
|
2283
|
+
continue # Skip existing inventory
|
|
2284
|
+
except Exception:
|
|
2285
|
+
pass
|
|
2286
|
+
new_toolkit_configs.append(tc)
|
|
2287
|
+
|
|
2288
|
+
# Add new inventory config
|
|
2289
|
+
new_toolkit_configs.append(inventory_config)
|
|
2290
|
+
added_toolkit_configs = new_toolkit_configs
|
|
2291
|
+
|
|
2292
|
+
# Recreate agent executor with new inventory
|
|
2293
|
+
from .tools import create_session_memory, update_session_metadata
|
|
2294
|
+
memory = create_session_memory(current_session_id)
|
|
2295
|
+
try:
|
|
2296
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2297
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
2298
|
+
)
|
|
2299
|
+
# Persist updated toolkits to session (exclude transient inventory configs)
|
|
2300
|
+
serializable_configs = [tc for tc in added_toolkit_configs if isinstance(tc, str)]
|
|
2301
|
+
update_session_metadata(current_session_id, {
|
|
2302
|
+
'added_toolkit_configs': serializable_configs,
|
|
2303
|
+
'inventory_graph': inventory_config.get('graph_path') # Save just the graph path
|
|
2304
|
+
})
|
|
2305
|
+
|
|
2306
|
+
toolkit_name = inventory_config['toolkit_name']
|
|
2307
|
+
graph_path = inventory_config['graph_path']
|
|
2308
|
+
if removed_inventory:
|
|
2309
|
+
console.print(Panel(
|
|
2310
|
+
f"[cyan]ℹ Replaced inventory [bold]{removed_inventory}[/bold] with [bold]{toolkit_name}[/bold]\n"
|
|
2311
|
+
f" Graph: {graph_path}[/cyan]",
|
|
2312
|
+
border_style="cyan",
|
|
2313
|
+
box=box.ROUNDED
|
|
2314
|
+
))
|
|
2315
|
+
else:
|
|
2316
|
+
console.print(Panel(
|
|
2317
|
+
f"[cyan]✓ Loaded inventory: [bold]{toolkit_name}[/bold]\n"
|
|
2318
|
+
f" Graph: {graph_path}[/cyan]",
|
|
2319
|
+
border_style="cyan",
|
|
2320
|
+
box=box.ROUNDED
|
|
2321
|
+
))
|
|
2322
|
+
except Exception as e:
|
|
2323
|
+
console.print(f"[red]Error loading inventory: {e}[/red]")
|
|
2324
|
+
continue
|
|
2325
|
+
|
|
2326
|
+
# /session command - list or resume sessions
|
|
2327
|
+
if user_input == '/session' or user_input.startswith('/session '):
|
|
2328
|
+
from .tools import list_sessions, PlanState
|
|
2329
|
+
parts = user_input.split(maxsplit=2)
|
|
2330
|
+
|
|
2331
|
+
if len(parts) == 1 or parts[1] == 'list':
|
|
2332
|
+
# List all sessions with plans
|
|
2333
|
+
sessions = list_sessions()
|
|
2334
|
+
if not sessions:
|
|
2335
|
+
console.print("[dim]No saved sessions found.[/dim]")
|
|
2336
|
+
console.print("[dim]Sessions are created when you start chatting.[/dim]")
|
|
2337
|
+
else:
|
|
2338
|
+
console.print("\n📋 [bold cyan]Saved Sessions:[/bold cyan]\n")
|
|
2339
|
+
from datetime import datetime
|
|
2340
|
+
for i, sess in enumerate(sessions[:10], 1): # Show last 10
|
|
2341
|
+
modified = datetime.fromtimestamp(sess['modified']).strftime('%Y-%m-%d %H:%M')
|
|
2342
|
+
|
|
2343
|
+
# Build session info line
|
|
2344
|
+
agent_info = sess.get('agent_name', 'unknown')
|
|
2345
|
+
model_info = sess.get('model', '')
|
|
2346
|
+
if model_info:
|
|
2347
|
+
agent_info = f"{agent_info} ({model_info})"
|
|
2348
|
+
|
|
2349
|
+
# Check if this is current session
|
|
2350
|
+
is_current = sess['session_id'] == current_session_id
|
|
2351
|
+
current_marker = " [green]◀ current[/green]" if is_current else ""
|
|
2352
|
+
|
|
2353
|
+
# Plan progress if available
|
|
2354
|
+
if sess.get('steps_total', 0) > 0:
|
|
2355
|
+
progress = f"[{sess['steps_completed']}/{sess['steps_total']}]"
|
|
2356
|
+
status = "✓" if sess['steps_completed'] == sess['steps_total'] else "○"
|
|
2357
|
+
plan_info = f" - {sess.get('title', 'Untitled')} {progress}"
|
|
2358
|
+
else:
|
|
2359
|
+
status = "●"
|
|
2360
|
+
plan_info = ""
|
|
2361
|
+
|
|
2362
|
+
console.print(f" {status} [cyan]{sess['session_id']}[/cyan]{plan_info}")
|
|
2363
|
+
console.print(f" [dim]{agent_info} • {modified}[/dim]{current_marker}")
|
|
2364
|
+
console.print(f"\n[dim]Usage: /session resume <session_id>[/dim]")
|
|
2365
|
+
|
|
2366
|
+
elif parts[1] == 'resume' and len(parts) > 2:
|
|
2367
|
+
session_id = parts[2].strip()
|
|
2368
|
+
from .tools import load_session_metadata, create_session_memory, from_portable_path
|
|
2369
|
+
|
|
2370
|
+
# Check if session exists (either plan or metadata)
|
|
2371
|
+
loaded_state = PlanState.load(session_id)
|
|
2372
|
+
session_metadata = load_session_metadata(session_id)
|
|
2373
|
+
|
|
2374
|
+
if loaded_state or session_metadata:
|
|
2375
|
+
# Update current session to use this session_id
|
|
2376
|
+
current_session_id = session_id
|
|
2377
|
+
|
|
2378
|
+
# Restore memory from session SQLite (reuses existing memory.db file)
|
|
2379
|
+
memory = create_session_memory(session_id)
|
|
2380
|
+
|
|
2381
|
+
# Update plan state if available
|
|
2382
|
+
if loaded_state:
|
|
2383
|
+
plan_state.update(loaded_state.to_dict())
|
|
2384
|
+
resume_info = f"\n\n{loaded_state.render()}"
|
|
2385
|
+
else:
|
|
2386
|
+
plan_state['session_id'] = session_id
|
|
2387
|
+
resume_info = ""
|
|
2388
|
+
|
|
2389
|
+
# Restore agent source and reload agent definition if available
|
|
2390
|
+
restored_agent = False
|
|
2391
|
+
if session_metadata:
|
|
2392
|
+
agent_source = session_metadata.get('agent_source')
|
|
2393
|
+
if agent_source:
|
|
2394
|
+
agent_file_path = from_portable_path(agent_source)
|
|
2395
|
+
if Path(agent_file_path).exists():
|
|
2396
|
+
try:
|
|
2397
|
+
agent_def = load_agent_definition(agent_file_path)
|
|
2398
|
+
current_agent_file = agent_file_path
|
|
2399
|
+
agent_name = agent_def.get('name', Path(agent_file_path).stem)
|
|
2400
|
+
is_local = True
|
|
2401
|
+
is_direct = False
|
|
2402
|
+
restored_agent = True
|
|
2403
|
+
except Exception as e:
|
|
2404
|
+
console.print(f"[yellow]Warning: Could not reload agent from {agent_source}: {e}[/yellow]")
|
|
2405
|
+
|
|
2406
|
+
# Restore added toolkit configs
|
|
2407
|
+
restored_toolkit_configs = session_metadata.get('added_toolkit_configs', [])
|
|
2408
|
+
if restored_toolkit_configs:
|
|
2409
|
+
added_toolkit_configs.clear()
|
|
2410
|
+
added_toolkit_configs.extend(restored_toolkit_configs)
|
|
2411
|
+
|
|
2412
|
+
# Restore added MCPs to agent_def
|
|
2413
|
+
restored_mcps = session_metadata.get('added_mcps', [])
|
|
2414
|
+
if restored_mcps and restored_agent:
|
|
2415
|
+
if 'mcps' not in agent_def:
|
|
2416
|
+
agent_def['mcps'] = []
|
|
2417
|
+
for mcp_name in restored_mcps:
|
|
2418
|
+
if mcp_name not in [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])]:
|
|
2419
|
+
agent_def['mcps'].append(mcp_name)
|
|
2420
|
+
|
|
2421
|
+
# Restore model/temperature overrides
|
|
2422
|
+
if session_metadata.get('model'):
|
|
2423
|
+
current_model = session_metadata['model']
|
|
2424
|
+
if restored_agent:
|
|
2425
|
+
agent_def['model'] = current_model
|
|
2426
|
+
if session_metadata.get('temperature') is not None:
|
|
2427
|
+
current_temperature = session_metadata['temperature']
|
|
2428
|
+
if restored_agent:
|
|
2429
|
+
agent_def['temperature'] = current_temperature
|
|
2430
|
+
|
|
2431
|
+
# Restore allowed directories
|
|
2432
|
+
if session_metadata.get('allowed_directories'):
|
|
2433
|
+
allowed_directories = session_metadata['allowed_directories']
|
|
2434
|
+
elif session_metadata.get('work_dir'):
|
|
2435
|
+
# Backward compatibility with old sessions
|
|
2436
|
+
allowed_directories = [session_metadata['work_dir']]
|
|
2437
|
+
|
|
2438
|
+
# Reinitialize context manager with resumed session_id to load chat history
|
|
2439
|
+
ctx_manager = CLIContextManager(
|
|
2440
|
+
session_id=session_id,
|
|
2441
|
+
max_context_tokens=context_config.get('max_context_tokens', 8000),
|
|
2442
|
+
preserve_recent=context_config.get('preserve_recent_messages', 5),
|
|
2443
|
+
pruning_method=context_config.get('pruning_method', 'oldest_first'),
|
|
2444
|
+
enable_summarization=context_config.get('enable_summarization', True),
|
|
2445
|
+
summary_trigger_ratio=context_config.get('summary_trigger_ratio', 0.8),
|
|
2446
|
+
summaries_limit=context_config.get('summaries_limit_count', 5),
|
|
2447
|
+
llm=llm if 'llm' in dir() else None
|
|
2448
|
+
)
|
|
2449
|
+
|
|
2450
|
+
# Show session info
|
|
2451
|
+
agent_info = session_metadata.get('agent_name', 'unknown') if session_metadata else 'unknown'
|
|
2452
|
+
model_info = session_metadata.get('model', '') if session_metadata else ''
|
|
2453
|
+
|
|
2454
|
+
console.print(Panel(
|
|
2455
|
+
f"[green]✓ Resumed session:[/green] [bold]{session_id}[/bold]\n"
|
|
2456
|
+
f"[dim]Agent: {agent_info}" + (f" • Model: {model_info}" if model_info else "") + f"[/dim]"
|
|
2457
|
+
f"{resume_info}",
|
|
2458
|
+
border_style="green",
|
|
2459
|
+
box=box.ROUNDED
|
|
2460
|
+
))
|
|
2461
|
+
|
|
2462
|
+
# Display restored chat history
|
|
2463
|
+
chat_history_export = ctx_manager.export_chat_history(include_only=False)
|
|
2464
|
+
if chat_history_export:
|
|
2465
|
+
preserve_recent = context_config.get('preserve_recent_messages', 5)
|
|
2466
|
+
total_messages = len(chat_history_export)
|
|
2467
|
+
|
|
2468
|
+
if total_messages > preserve_recent:
|
|
2469
|
+
console.print(f"\n[dim]... {total_messages - preserve_recent} earlier messages in context[/dim]")
|
|
2470
|
+
messages_to_show = chat_history_export[-preserve_recent:]
|
|
2471
|
+
else:
|
|
2472
|
+
messages_to_show = chat_history_export
|
|
2473
|
+
|
|
2474
|
+
for msg in messages_to_show:
|
|
2475
|
+
role = msg.get('role', 'user')
|
|
2476
|
+
content = msg.get('content', '')[:200] # Truncate for display
|
|
2477
|
+
if len(msg.get('content', '')) > 200:
|
|
2478
|
+
content += '...'
|
|
2479
|
+
role_color = 'cyan' if role == 'user' else 'green'
|
|
2480
|
+
role_label = 'You' if role == 'user' else 'Assistant'
|
|
2481
|
+
console.print(f"[dim][{role_color}]{role_label}:[/{role_color}] {content}[/dim]")
|
|
2482
|
+
console.print()
|
|
2483
|
+
|
|
2484
|
+
# Recreate agent executor with restored tools if we have a local/built-in agent
|
|
2485
|
+
if (is_direct or is_local or is_inventory) and restored_agent:
|
|
2486
|
+
try:
|
|
2487
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2488
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
2489
|
+
)
|
|
2490
|
+
ctx_manager.llm = llm # Update LLM for summarization
|
|
2491
|
+
|
|
2492
|
+
# Warn about MCP state loss
|
|
2493
|
+
if restored_mcps:
|
|
2494
|
+
console.print("[yellow]Note: MCP connections re-initialized (stateful server state like browser sessions are lost)[/yellow]")
|
|
2495
|
+
except Exception as e:
|
|
2496
|
+
console.print(f"[red]Error recreating agent executor: {e}[/red]")
|
|
2497
|
+
console.print("[yellow]Session state loaded but agent not fully restored. Some tools may not work.[/yellow]")
|
|
2498
|
+
elif is_direct or is_local or is_inventory:
|
|
2499
|
+
# Just update planning tools if we couldn't restore agent
|
|
2500
|
+
try:
|
|
2501
|
+
from .tools import get_planning_tools
|
|
2502
|
+
if loaded_state:
|
|
2503
|
+
planning_tools, _ = get_planning_tools(loaded_state)
|
|
2504
|
+
except Exception as e:
|
|
2505
|
+
console.print(f"[yellow]Warning: Could not reload planning tools: {e}[/yellow]")
|
|
2506
|
+
else:
|
|
2507
|
+
console.print(f"[red]Session not found: {session_id}[/red]")
|
|
2508
|
+
else:
|
|
2509
|
+
console.print("[dim]Usage: /session [list] or /session resume <session_id>[/dim]")
|
|
2510
|
+
continue
|
|
2511
|
+
|
|
2512
|
+
# /agent command - switch to a different agent
|
|
2513
|
+
if user_input == '/agent':
|
|
2514
|
+
selected_agent = _select_agent_interactive(client, config)
|
|
2515
|
+
if selected_agent and selected_agent != '__direct__' and selected_agent != '__inventory__':
|
|
2516
|
+
# Load the new agent
|
|
2517
|
+
new_is_local = Path(selected_agent).exists()
|
|
2518
|
+
|
|
2519
|
+
if new_is_local:
|
|
2520
|
+
agent_def = load_agent_definition(selected_agent)
|
|
2521
|
+
agent_name = agent_def.get('name', Path(selected_agent).stem)
|
|
2522
|
+
agent_type = "Local Agent"
|
|
2523
|
+
is_local = True
|
|
2524
|
+
is_direct = False
|
|
2525
|
+
is_inventory = False
|
|
2526
|
+
current_agent_file = selected_agent # Track for /reload
|
|
2527
|
+
else:
|
|
2528
|
+
# Platform agent
|
|
2529
|
+
agents = client.get_list_of_apps()
|
|
2530
|
+
new_agent = None
|
|
2531
|
+
try:
|
|
2532
|
+
agent_id = int(selected_agent)
|
|
2533
|
+
new_agent = next((a for a in agents if a['id'] == agent_id), None)
|
|
2534
|
+
except ValueError:
|
|
2535
|
+
new_agent = next((a for a in agents if a['name'] == selected_agent), None)
|
|
2536
|
+
|
|
2537
|
+
if new_agent:
|
|
2538
|
+
agent_name = new_agent['name']
|
|
2539
|
+
agent_type = "Platform Agent"
|
|
2540
|
+
is_local = False
|
|
2541
|
+
is_direct = False
|
|
2542
|
+
current_agent_file = None # No file for platform agents
|
|
2543
|
+
|
|
2544
|
+
# Setup platform agent
|
|
2545
|
+
details = client.get_app_details(new_agent['id'])
|
|
2546
|
+
version_id = details['versions'][0]['id']
|
|
2547
|
+
agent_executor = client.application(
|
|
2548
|
+
application_id=new_agent['id'],
|
|
2549
|
+
application_version_id=version_id,
|
|
2550
|
+
memory=memory,
|
|
2551
|
+
chat_history=chat_history
|
|
2552
|
+
)
|
|
2553
|
+
console.print(Panel(
|
|
2554
|
+
f"[cyan]ℹ Switched to agent: [bold]{agent_name}[/bold] ({agent_type}). Chat history preserved.[/cyan]",
|
|
2555
|
+
border_style="cyan",
|
|
2556
|
+
box=box.ROUNDED
|
|
2557
|
+
))
|
|
2558
|
+
continue
|
|
2559
|
+
|
|
2560
|
+
# For local agents, recreate executor
|
|
2561
|
+
if new_is_local:
|
|
2562
|
+
from .tools import create_session_memory
|
|
2563
|
+
memory = create_session_memory(current_session_id)
|
|
2564
|
+
added_toolkit_configs = []
|
|
2565
|
+
try:
|
|
2566
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2567
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
2568
|
+
)
|
|
2569
|
+
console.print(Panel(
|
|
2570
|
+
f"[cyan]ℹ Switched to agent: [bold]{agent_name}[/bold] ({agent_type}). Agent state reset, chat history preserved.[/cyan]",
|
|
2571
|
+
border_style="cyan",
|
|
2572
|
+
box=box.ROUNDED
|
|
2573
|
+
))
|
|
2574
|
+
except Exception as e:
|
|
2575
|
+
console.print(f"[red]Error switching agent: {e}[/red]")
|
|
2576
|
+
elif selected_agent == '__direct__':
|
|
2577
|
+
# Switch back to direct mode
|
|
2578
|
+
is_direct = True
|
|
2579
|
+
is_local = False
|
|
2580
|
+
is_inventory = False
|
|
2581
|
+
current_agent_file = None # No file for direct mode
|
|
2582
|
+
agent_name = "Alita"
|
|
2583
|
+
agent_type = "Direct LLM"
|
|
2584
|
+
alita_prompt = _get_alita_system_prompt(config)
|
|
2585
|
+
agent_def = {
|
|
2586
|
+
'model': current_model or default_model,
|
|
2587
|
+
'temperature': current_temperature if current_temperature is not None else default_temperature,
|
|
2588
|
+
'max_tokens': current_max_tokens or default_max_tokens,
|
|
2589
|
+
'system_prompt': alita_prompt
|
|
2590
|
+
}
|
|
2591
|
+
from .tools import create_session_memory
|
|
2592
|
+
memory = create_session_memory(current_session_id)
|
|
2593
|
+
try:
|
|
2594
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2595
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
2596
|
+
)
|
|
2597
|
+
console.print(Panel(
|
|
2598
|
+
f"[cyan]ℹ Switched to [bold]Alita[/bold]. Agent state reset, chat history preserved.[/cyan]",
|
|
2599
|
+
border_style="cyan",
|
|
2600
|
+
box=box.ROUNDED
|
|
2601
|
+
))
|
|
2602
|
+
except Exception as e:
|
|
2603
|
+
console.print(f"[red]Error switching to direct mode: {e}[/red]")
|
|
2604
|
+
elif selected_agent == '__inventory__':
|
|
2605
|
+
# Switch to inventory mode
|
|
2606
|
+
is_direct = False
|
|
2607
|
+
is_local = False
|
|
2608
|
+
is_inventory = True
|
|
2609
|
+
current_agent_file = None # No file for inventory mode
|
|
2610
|
+
agent_name = "Inventory"
|
|
2611
|
+
agent_type = "Built-in Agent"
|
|
2612
|
+
inventory_prompt = _get_inventory_system_prompt(config)
|
|
2613
|
+
agent_def = {
|
|
2614
|
+
'name': 'inventory-agent',
|
|
2615
|
+
'model': current_model or default_model,
|
|
2616
|
+
'temperature': current_temperature if current_temperature is not None else 0.3,
|
|
2617
|
+
'max_tokens': current_max_tokens or default_max_tokens,
|
|
2618
|
+
'system_prompt': inventory_prompt,
|
|
2619
|
+
'toolkit_configs': [
|
|
2620
|
+
{'type': 'inventory', 'graph_path': './knowledge_graph.json'}
|
|
2621
|
+
]
|
|
2622
|
+
}
|
|
2623
|
+
from .tools import create_session_memory
|
|
2624
|
+
memory = create_session_memory(current_session_id)
|
|
2625
|
+
try:
|
|
2626
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2627
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
2628
|
+
)
|
|
2629
|
+
console.print(Panel(
|
|
2630
|
+
f"[cyan]ℹ Switched to [bold]Inventory[/bold] agent. Use /add_toolkit to add source toolkits.[/cyan]",
|
|
2631
|
+
border_style="cyan",
|
|
2632
|
+
box=box.ROUNDED
|
|
2633
|
+
))
|
|
2634
|
+
except Exception as e:
|
|
2635
|
+
console.print(f"[red]Error switching to inventory mode: {e}[/red]")
|
|
2636
|
+
continue
|
|
2637
|
+
|
|
2638
|
+
# Execute agent
|
|
2639
|
+
# Track if history was already added during continuation handling
|
|
2640
|
+
history_already_added = False
|
|
2641
|
+
original_user_input = user_input # Preserve for history tracking
|
|
2642
|
+
|
|
2643
|
+
if (is_direct or is_local or is_inventory) and agent_executor is None:
|
|
2644
|
+
# Local agent without tools: use direct LLM call with streaming
|
|
2645
|
+
system_prompt = agent_def.get('system_prompt', '')
|
|
2646
|
+
messages = []
|
|
2647
|
+
if system_prompt:
|
|
2648
|
+
messages.append({"role": "system", "content": system_prompt})
|
|
2649
|
+
|
|
2650
|
+
# Build pruned context from context manager
|
|
2651
|
+
context_messages = ctx_manager.build_context()
|
|
2652
|
+
for msg in context_messages:
|
|
2653
|
+
messages.append(msg)
|
|
2654
|
+
|
|
2655
|
+
# Add user message
|
|
2656
|
+
messages.append({"role": "user", "content": user_input})
|
|
2657
|
+
|
|
2658
|
+
try:
|
|
2659
|
+
# Try streaming if available
|
|
2660
|
+
if hasattr(llm, 'stream'):
|
|
2661
|
+
output_chunks = []
|
|
2662
|
+
first_chunk = True
|
|
2663
|
+
|
|
2664
|
+
# Show spinner until first token arrives
|
|
2665
|
+
status = console.status("[yellow]Thinking...[/yellow]", spinner="dots")
|
|
2666
|
+
status.start()
|
|
2667
|
+
|
|
2668
|
+
# Stream the response token by token
|
|
2669
|
+
for chunk in llm.stream(messages):
|
|
2670
|
+
if hasattr(chunk, 'content'):
|
|
2671
|
+
token = chunk.content
|
|
2672
|
+
else:
|
|
2673
|
+
token = str(chunk)
|
|
2674
|
+
|
|
2675
|
+
if token:
|
|
2676
|
+
# Stop spinner and show agent name on first token
|
|
2677
|
+
if first_chunk:
|
|
2678
|
+
status.stop()
|
|
2679
|
+
console.print(f"\n[bold bright_cyan]{agent_name}:[/bold bright_cyan]\n", end="")
|
|
2680
|
+
first_chunk = False
|
|
2681
|
+
|
|
2682
|
+
console.print(token, end="", markup=False)
|
|
2683
|
+
output_chunks.append(token)
|
|
2684
|
+
|
|
2685
|
+
# Stop status if still running (no tokens received)
|
|
2686
|
+
if first_chunk:
|
|
2687
|
+
status.stop()
|
|
2688
|
+
console.print(f"\n[bold bright_cyan]{agent_name}:[/bold bright_cyan]\n", end="")
|
|
2689
|
+
|
|
2690
|
+
output = ''.join(output_chunks)
|
|
2691
|
+
console.print() # New line after streaming
|
|
2692
|
+
else:
|
|
2693
|
+
# Fallback to non-streaming with spinner
|
|
2694
|
+
with console.status("[yellow]Thinking...[/yellow]", spinner="dots"):
|
|
2695
|
+
response = llm.invoke(messages)
|
|
2696
|
+
if hasattr(response, 'content'):
|
|
2697
|
+
output = response.content
|
|
2698
|
+
else:
|
|
2699
|
+
output = str(response)
|
|
2700
|
+
|
|
2701
|
+
# Display response after spinner stops
|
|
2702
|
+
console.print(f"\n[bold bright_cyan]{agent_name}:[/bold bright_cyan]")
|
|
2703
|
+
if any(marker in output for marker in ['```', '**', '##', '- ', '* ']):
|
|
2704
|
+
console.print(Markdown(output))
|
|
2705
|
+
else:
|
|
2706
|
+
console.print(output)
|
|
2707
|
+
except Exception as e:
|
|
2708
|
+
console.print(f"\n[red]✗ Error: {e}[/red]\n")
|
|
2709
|
+
continue
|
|
2710
|
+
else:
|
|
2711
|
+
# Agent with tools or platform agent: use agent executor
|
|
2712
|
+
# Setup callback for verbose output
|
|
2713
|
+
from langchain_core.runnables import RunnableConfig
|
|
2714
|
+
from langgraph.errors import GraphRecursionError
|
|
2715
|
+
|
|
2716
|
+
# Initialize invoke_config with thread_id for checkpointing
|
|
2717
|
+
# This ensures the same thread is used across continuations
|
|
2718
|
+
invoke_config = RunnableConfig(
|
|
2719
|
+
configurable={"thread_id": current_session_id}
|
|
2720
|
+
)
|
|
2721
|
+
# always proceed with continuation enabled
|
|
2722
|
+
invoke_config["should_continue"] = True
|
|
2723
|
+
# Set recursion limit for tool executions
|
|
2724
|
+
logger.debug(f"Setting tool steps limit to {recursion_limit}")
|
|
2725
|
+
invoke_config["recursion_limit"] = recursion_limit
|
|
2726
|
+
cli_callback = None
|
|
2727
|
+
if show_verbose:
|
|
2728
|
+
cli_callback = create_cli_callback(verbose=True, debug=debug_mode)
|
|
2729
|
+
invoke_config["callbacks"] = [cli_callback]
|
|
2730
|
+
|
|
2731
|
+
# Track recursion continuation state
|
|
2732
|
+
continue_from_recursion = False
|
|
2733
|
+
recursion_attempts = 0
|
|
2734
|
+
tool_limit_attempts = 0 # Track tool limit continuation attempts
|
|
2735
|
+
max_recursion_continues = 5 # Prevent infinite continuation loops
|
|
2736
|
+
output = None # Initialize output before loop
|
|
2737
|
+
result = None # Initialize result before loop
|
|
2738
|
+
|
|
2739
|
+
while True:
|
|
2740
|
+
try:
|
|
2741
|
+
# Always start with a thinking spinner
|
|
2742
|
+
status = console.status("[yellow]Thinking...[/yellow]", spinner="dots")
|
|
2743
|
+
status.start()
|
|
2744
|
+
|
|
2745
|
+
# Pass status to callback so it can stop it when tool calls start
|
|
2746
|
+
if cli_callback:
|
|
2747
|
+
cli_callback.status = status
|
|
2748
|
+
|
|
2749
|
+
try:
|
|
2750
|
+
result = agent_executor.invoke(
|
|
2751
|
+
{
|
|
2752
|
+
"input": [user_input] if not is_local else user_input,
|
|
2753
|
+
"chat_history": ctx_manager.build_context()
|
|
2754
|
+
},
|
|
2755
|
+
config=invoke_config
|
|
2756
|
+
)
|
|
2757
|
+
finally:
|
|
2758
|
+
# Make sure spinner is stopped
|
|
2759
|
+
try:
|
|
2760
|
+
status.stop()
|
|
2761
|
+
except Exception:
|
|
2762
|
+
pass
|
|
2763
|
+
|
|
2764
|
+
# Extract output from result
|
|
2765
|
+
if result is not None:
|
|
2766
|
+
output = extract_output_from_result(result)
|
|
2767
|
+
|
|
2768
|
+
# Check if max tool iterations were reached and prompt user
|
|
2769
|
+
if output and "Maximum tool execution iterations" in output and "reached" in output:
|
|
2770
|
+
tool_limit_attempts += 1
|
|
2771
|
+
|
|
2772
|
+
console.print()
|
|
2773
|
+
console.print(Panel(
|
|
2774
|
+
f"[yellow]⚠ Tool execution limit reached[/yellow]\n\n"
|
|
2775
|
+
f"The agent has executed the maximum number of tool calls in a single turn.\n"
|
|
2776
|
+
f"This usually happens with complex tasks that require many sequential operations.\n\n"
|
|
2777
|
+
f"[dim]Attempt {tool_limit_attempts}/{max_recursion_continues}[/dim]",
|
|
2778
|
+
title="Tool Limit Reached",
|
|
2779
|
+
border_style="yellow",
|
|
2780
|
+
box=box.ROUNDED
|
|
2781
|
+
))
|
|
2782
|
+
|
|
2783
|
+
if tool_limit_attempts >= max_recursion_continues:
|
|
2784
|
+
console.print("[red]Maximum continuation attempts reached. Please break down your request into smaller tasks.[/red]")
|
|
2785
|
+
break
|
|
2786
|
+
|
|
2787
|
+
console.print("\nWhat would you like to do?")
|
|
2788
|
+
console.print(" [bold cyan]c[/bold cyan] - Continue execution (tell agent to resume)")
|
|
2789
|
+
console.print(" [bold cyan]s[/bold cyan] - Stop and keep partial results")
|
|
2790
|
+
console.print(" [bold cyan]n[/bold cyan] - Start a new request")
|
|
2791
|
+
console.print()
|
|
2792
|
+
|
|
2793
|
+
try:
|
|
2794
|
+
choice = input_handler.get_input("Choice [c/s/n]: ").strip().lower()
|
|
2795
|
+
except (KeyboardInterrupt, EOFError):
|
|
2796
|
+
choice = 's'
|
|
2797
|
+
|
|
2798
|
+
if choice == 'c':
|
|
2799
|
+
# Continue - send a follow-up message to resume
|
|
2800
|
+
console.print("\n[cyan]Continuing execution...[/cyan]\n")
|
|
2801
|
+
|
|
2802
|
+
# Clean up the output - remove the tool limit warning message
|
|
2803
|
+
clean_output = output
|
|
2804
|
+
if "Maximum tool execution iterations" in output:
|
|
2805
|
+
# Strip the warning from the end of the output
|
|
2806
|
+
lines = output.split('\n')
|
|
2807
|
+
clean_lines = [l for l in lines if "Maximum tool execution iterations" not in l and "Stopping tool execution" not in l]
|
|
2808
|
+
clean_output = '\n'.join(clean_lines).strip()
|
|
2809
|
+
|
|
2810
|
+
# Add current output to history first (without the warning)
|
|
2811
|
+
# Use original user input for first continuation, current for subsequent
|
|
2812
|
+
history_input = original_user_input if not history_already_added else user_input
|
|
2813
|
+
if clean_output:
|
|
2814
|
+
chat_history.append({"role": "user", "content": history_input})
|
|
2815
|
+
chat_history.append({"role": "assistant", "content": clean_output})
|
|
2816
|
+
ctx_manager.add_message("user", history_input)
|
|
2817
|
+
ctx_manager.add_message("assistant", clean_output)
|
|
2818
|
+
history_already_added = True
|
|
2819
|
+
|
|
2820
|
+
# CRITICAL: Use a new thread_id when continuing to avoid corrupted
|
|
2821
|
+
# checkpoint state. The tool limit may have left the checkpoint with
|
|
2822
|
+
# an AIMessage containing tool_calls without corresponding ToolMessages.
|
|
2823
|
+
# Using a new thread_id starts fresh with our clean context manager state.
|
|
2824
|
+
import uuid
|
|
2825
|
+
continuation_thread_id = f"{current_session_id}-cont-{uuid.uuid4().hex[:8]}"
|
|
2826
|
+
invoke_config = RunnableConfig(
|
|
2827
|
+
configurable={"thread_id": continuation_thread_id}
|
|
2828
|
+
)
|
|
2829
|
+
invoke_config["should_continue"] = True
|
|
2830
|
+
invoke_config["recursion_limit"] = recursion_limit
|
|
2831
|
+
if cli_callback:
|
|
2832
|
+
invoke_config["callbacks"] = [cli_callback]
|
|
2833
|
+
|
|
2834
|
+
# Set new input to continue with a more explicit continuation message
|
|
2835
|
+
# Include context about the task limit to help the agent understand
|
|
2836
|
+
user_input = (
|
|
2837
|
+
"The previous response was interrupted due to reaching the tool execution limit. "
|
|
2838
|
+
"Continue from where you left off and complete the remaining steps of the original task. "
|
|
2839
|
+
"Focus on what still needs to be done - do not repeat completed work."
|
|
2840
|
+
)
|
|
2841
|
+
continue # Retry the invoke in this inner loop
|
|
2842
|
+
|
|
2843
|
+
elif choice == 's':
|
|
2844
|
+
console.print("\n[yellow]Stopped. Partial work has been completed.[/yellow]")
|
|
2845
|
+
break # Exit retry loop and show output
|
|
2846
|
+
|
|
2847
|
+
else: # 'n' or anything else
|
|
2848
|
+
console.print("\n[dim]Skipped. Enter a new request.[/dim]")
|
|
2849
|
+
output = None
|
|
2850
|
+
break # Exit retry loop
|
|
2851
|
+
|
|
2852
|
+
# Success - exit the retry loop
|
|
2853
|
+
break
|
|
2854
|
+
|
|
2855
|
+
except GraphRecursionError as e:
|
|
2856
|
+
recursion_attempts += 1
|
|
2857
|
+
step_limit = getattr(e, 'recursion_limit', 25)
|
|
2858
|
+
|
|
2859
|
+
console.print()
|
|
2860
|
+
console.print(Panel(
|
|
2861
|
+
f"[yellow]⚠ Step limit reached ({step_limit} steps)[/yellow]\n\n"
|
|
2862
|
+
f"The agent has executed the maximum number of steps allowed.\n"
|
|
2863
|
+
f"This usually happens with complex tasks that require many tool calls.\n\n"
|
|
2864
|
+
f"[dim]Attempt {recursion_attempts}/{max_recursion_continues}[/dim]",
|
|
2865
|
+
title="Step Limit Reached",
|
|
2866
|
+
border_style="yellow",
|
|
2867
|
+
box=box.ROUNDED
|
|
2868
|
+
))
|
|
2869
|
+
|
|
2870
|
+
if recursion_attempts >= max_recursion_continues:
|
|
2871
|
+
console.print("[red]Maximum continuation attempts reached. Please break down your request into smaller tasks.[/red]")
|
|
2872
|
+
output = f"[Step limit reached after {recursion_attempts} continuation attempts. The task may be too complex - please break it into smaller steps.]"
|
|
2873
|
+
break
|
|
2874
|
+
|
|
2875
|
+
# Prompt user for action
|
|
2876
|
+
console.print("\nWhat would you like to do?")
|
|
2877
|
+
console.print(" [bold cyan]c[/bold cyan] - Continue execution (agent will resume from checkpoint)")
|
|
2878
|
+
console.print(" [bold cyan]s[/bold cyan] - Stop and get partial results")
|
|
2879
|
+
console.print(" [bold cyan]n[/bold cyan] - Start a new request")
|
|
2880
|
+
console.print()
|
|
2881
|
+
|
|
2882
|
+
try:
|
|
2883
|
+
choice = input_handler.get_input("Choice [c/s/n]: ").strip().lower()
|
|
2884
|
+
except (KeyboardInterrupt, EOFError):
|
|
2885
|
+
choice = 's'
|
|
2886
|
+
|
|
2887
|
+
if choice == 'c':
|
|
2888
|
+
# Continue - Use a new thread_id to avoid corrupted checkpoint state.
|
|
2889
|
+
# GraphRecursionError may have left the checkpoint with an AIMessage
|
|
2890
|
+
# containing tool_calls without corresponding ToolMessages.
|
|
2891
|
+
# Using a new thread_id starts fresh with our clean context manager state.
|
|
2892
|
+
continue_from_recursion = True
|
|
2893
|
+
console.print("\n[cyan]Continuing with fresh context...[/cyan]\n")
|
|
2894
|
+
|
|
2895
|
+
# Add current progress to history if we have it
|
|
2896
|
+
# (GraphRecursionError doesn't give us partial output, but context may have been updated)
|
|
2897
|
+
history_input = original_user_input if not history_already_added else user_input
|
|
2898
|
+
ctx_manager.add_message("user", history_input)
|
|
2899
|
+
ctx_manager.add_message("assistant", "[Previous task interrupted - continuing...]")
|
|
2900
|
+
history_already_added = True
|
|
2901
|
+
|
|
2902
|
+
# Create new thread_id to avoid corrupted checkpoint
|
|
2903
|
+
import uuid
|
|
2904
|
+
continuation_thread_id = f"{current_session_id}-cont-{uuid.uuid4().hex[:8]}"
|
|
2905
|
+
invoke_config = RunnableConfig(
|
|
2906
|
+
configurable={"thread_id": continuation_thread_id}
|
|
2907
|
+
)
|
|
2908
|
+
if cli_callback:
|
|
2909
|
+
invoke_config["callbacks"] = [cli_callback]
|
|
2910
|
+
|
|
2911
|
+
# More explicit continuation message
|
|
2912
|
+
user_input = (
|
|
2913
|
+
"The previous response was interrupted due to reaching the step limit. "
|
|
2914
|
+
"Continue from where you left off and complete the remaining steps of the original task. "
|
|
2915
|
+
"Focus on what still needs to be done - do not repeat completed work."
|
|
2916
|
+
)
|
|
2917
|
+
continue # Retry the invoke
|
|
2918
|
+
|
|
2919
|
+
elif choice == 's':
|
|
2920
|
+
# Stop and try to extract partial results
|
|
2921
|
+
console.print("\n[yellow]Stopped. Attempting to extract partial results...[/yellow]")
|
|
2922
|
+
output = "[Task stopped due to step limit. Partial work may have been completed - check any files or state that were modified.]"
|
|
2923
|
+
break
|
|
2924
|
+
|
|
2925
|
+
else: # 'n' or anything else
|
|
2926
|
+
console.print("\n[dim]Skipped. Enter a new request.[/dim]")
|
|
2927
|
+
output = None
|
|
2928
|
+
break
|
|
2929
|
+
|
|
2930
|
+
# Skip chat history update if we bailed out (no result)
|
|
2931
|
+
if output is None:
|
|
2932
|
+
continue
|
|
2933
|
+
|
|
2934
|
+
# Display response in a clear format
|
|
2935
|
+
console.print() # Add spacing
|
|
2936
|
+
console.print(f"[bold bright_cyan]{agent_name}:[/bold bright_cyan]")
|
|
2937
|
+
console.print() # Add spacing before response
|
|
2938
|
+
if any(marker in output for marker in ['```', '**', '##', '- ', '* ']):
|
|
2939
|
+
console.print(Markdown(output))
|
|
2940
|
+
else:
|
|
2941
|
+
console.print(output)
|
|
2942
|
+
console.print() # Add spacing after response
|
|
2943
|
+
|
|
2944
|
+
# Update chat history and context manager (skip if already added during continuation)
|
|
2945
|
+
if not history_already_added:
|
|
2946
|
+
chat_history.append({"role": "user", "content": original_user_input})
|
|
2947
|
+
chat_history.append({"role": "assistant", "content": output})
|
|
2948
|
+
|
|
2949
|
+
# Add messages to context manager for token tracking and pruning
|
|
2950
|
+
ctx_manager.add_message("user", original_user_input)
|
|
2951
|
+
ctx_manager.add_message("assistant", output)
|
|
2952
|
+
else:
|
|
2953
|
+
# During continuation, add the final response with continuation message
|
|
2954
|
+
chat_history.append({"role": "user", "content": user_input})
|
|
2955
|
+
chat_history.append({"role": "assistant", "content": output})
|
|
2956
|
+
ctx_manager.add_message("user", user_input)
|
|
2957
|
+
ctx_manager.add_message("assistant", output)
|
|
2958
|
+
|
|
2959
|
+
except KeyboardInterrupt:
|
|
2960
|
+
console.print("\n\n[yellow]Interrupted. Type 'exit' to quit or continue chatting.[/yellow]")
|
|
2961
|
+
continue
|
|
2962
|
+
except EOFError:
|
|
2963
|
+
# Save final session state before exiting
|
|
2964
|
+
try:
|
|
2965
|
+
from .tools import update_session_metadata, to_portable_path
|
|
2966
|
+
update_session_metadata(current_session_id, {
|
|
2967
|
+
'agent_source': to_portable_path(current_agent_file) if current_agent_file else None,
|
|
2968
|
+
'model': current_model or llm_model_display,
|
|
2969
|
+
'temperature': current_temperature if current_temperature is not None else llm_temperature_display,
|
|
2970
|
+
'allowed_directories': allowed_directories,
|
|
2971
|
+
'added_toolkit_configs': list(added_toolkit_configs),
|
|
2972
|
+
'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])],
|
|
2973
|
+
})
|
|
2974
|
+
except Exception as e:
|
|
2975
|
+
logger.debug(f"Failed to save session state on exit: {e}")
|
|
2976
|
+
console.print("\n\n[bold cyan]Goodbye! 👋[/bold cyan]")
|
|
2977
|
+
break
|
|
2978
|
+
|
|
2979
|
+
except click.ClickException:
|
|
2980
|
+
raise
|
|
2981
|
+
except Exception as e:
|
|
2982
|
+
logger.exception("Failed to start chat")
|
|
2983
|
+
error_panel = Panel(
|
|
2984
|
+
str(e),
|
|
2985
|
+
title="Error",
|
|
2986
|
+
border_style="red",
|
|
2987
|
+
box=box.ROUNDED
|
|
2988
|
+
)
|
|
2989
|
+
console.print(error_panel, style="red")
|
|
2990
|
+
raise click.Abort()
|
|
2991
|
+
|
|
2992
|
+
|
|
2993
|
+
@agent.command('run')
|
|
2994
|
+
@click.argument('agent_source')
|
|
2995
|
+
@click.argument('message')
|
|
2996
|
+
@click.option('--version', help='Agent version (for platform agents)')
|
|
2997
|
+
@click.option('--toolkit-config', multiple=True, type=click.Path(exists=True),
|
|
2998
|
+
help='Toolkit configuration files')
|
|
2999
|
+
@click.option('--model', help='Override LLM model')
|
|
3000
|
+
@click.option('--temperature', type=float, help='Override temperature')
|
|
3001
|
+
@click.option('--max-tokens', type=int, help='Override max tokens')
|
|
3002
|
+
@click.option('--save-thread', help='Save thread ID to file for continuation')
|
|
3003
|
+
@click.option('--dir', 'work_dir', type=click.Path(exists=True, file_okay=False, dir_okay=True),
|
|
3004
|
+
help='Grant agent filesystem access to this directory')
|
|
3005
|
+
@click.option('--verbose', '-v', type=click.Choice(['quiet', 'default', 'debug']), default='default',
|
|
3006
|
+
help='Output verbosity level: quiet (final output only), default (tool calls + outputs), debug (all including LLM calls)')
|
|
3007
|
+
@click.pass_context
|
|
3008
|
+
def agent_run(ctx, agent_source: str, message: str, version: Optional[str],
|
|
3009
|
+
toolkit_config: tuple, model: Optional[str],
|
|
3010
|
+
temperature: Optional[float], max_tokens: Optional[int],
|
|
3011
|
+
save_thread: Optional[str], work_dir: Optional[str],
|
|
3012
|
+
verbose: str):
|
|
3013
|
+
"""Run agent with a single message (handoff mode).
|
|
3014
|
+
|
|
3015
|
+
\b
|
|
3016
|
+
AGENT_SOURCE can be:
|
|
3017
|
+
- Platform agent ID or name
|
|
3018
|
+
- Path to local agent file
|
|
3019
|
+
|
|
3020
|
+
MESSAGE is the input message to send to the agent.
|
|
3021
|
+
|
|
3022
|
+
\b
|
|
3023
|
+
Examples:
|
|
3024
|
+
alita run my-agent "What is the status of JIRA-123?"
|
|
3025
|
+
alita run ./agent.md "Create a new toolkit for Stripe API"
|
|
3026
|
+
alita -o json run my-agent "Search for bugs" --toolkit-config jira.json
|
|
3027
|
+
alita run my-agent "Analyze code" --dir ./myproject
|
|
3028
|
+
alita run my-agent "Start task" --save-thread thread.txt
|
|
3029
|
+
alita run my-agent "Query" -v quiet
|
|
3030
|
+
alita run my-agent "Query" -v debug
|
|
3031
|
+
"""
|
|
3032
|
+
formatter = ctx.obj['formatter']
|
|
3033
|
+
client = get_client(ctx)
|
|
3034
|
+
|
|
3035
|
+
# Setup verbose level
|
|
3036
|
+
show_verbose = verbose != 'quiet'
|
|
3037
|
+
debug_mode = verbose == 'debug'
|
|
3038
|
+
|
|
3039
|
+
try:
|
|
3040
|
+
# Load agent
|
|
3041
|
+
is_local = Path(agent_source).exists()
|
|
3042
|
+
|
|
3043
|
+
if is_local:
|
|
3044
|
+
agent_def = load_agent_definition(agent_source)
|
|
3045
|
+
agent_name = agent_def.get('name', Path(agent_source).stem)
|
|
3046
|
+
|
|
3047
|
+
# Create memory for agent
|
|
3048
|
+
from langgraph.checkpoint.sqlite import SqliteSaver
|
|
3049
|
+
memory = SqliteSaver(sqlite3.connect(":memory:", check_same_thread=False))
|
|
3050
|
+
|
|
3051
|
+
# Setup local agent executor (reuses same logic as agent_chat)
|
|
3052
|
+
try:
|
|
3053
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
3054
|
+
client, agent_def, toolkit_config, ctx.obj['config'], model, temperature, max_tokens, memory, work_dir, {}
|
|
3055
|
+
)
|
|
3056
|
+
except Exception as e:
|
|
3057
|
+
error_panel = Panel(
|
|
3058
|
+
f"Failed to setup agent: {e}",
|
|
3059
|
+
title="Error",
|
|
3060
|
+
border_style="red",
|
|
3061
|
+
box=box.ROUNDED
|
|
3062
|
+
)
|
|
3063
|
+
console.print(error_panel, style="red")
|
|
3064
|
+
raise click.Abort()
|
|
3065
|
+
|
|
3066
|
+
# Execute agent
|
|
3067
|
+
if agent_executor:
|
|
3068
|
+
# Setup callback for verbose output
|
|
3069
|
+
from langchain_core.runnables import RunnableConfig
|
|
3070
|
+
from langgraph.errors import GraphRecursionError
|
|
3071
|
+
|
|
3072
|
+
invoke_config = None
|
|
3073
|
+
if show_verbose:
|
|
3074
|
+
cli_callback = create_cli_callback(verbose=True, debug=debug_mode)
|
|
3075
|
+
invoke_config = RunnableConfig(callbacks=[cli_callback])
|
|
3076
|
+
|
|
3077
|
+
try:
|
|
3078
|
+
# Execute with spinner for non-JSON output
|
|
3079
|
+
if formatter.__class__.__name__ == 'JSONFormatter':
|
|
3080
|
+
# JSON output: always quiet, no callbacks
|
|
3081
|
+
with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
|
|
3082
|
+
result = agent_executor.invoke({
|
|
3083
|
+
"input": message,
|
|
3084
|
+
"chat_history": []
|
|
3085
|
+
})
|
|
3086
|
+
|
|
3087
|
+
click.echo(formatter._dump({
|
|
3088
|
+
'agent': agent_name,
|
|
3089
|
+
'message': message,
|
|
3090
|
+
'response': extract_output_from_result(result),
|
|
3091
|
+
'full_result': result
|
|
3092
|
+
}))
|
|
3093
|
+
else:
|
|
3094
|
+
# Show status only when not verbose (verbose shows its own progress)
|
|
3095
|
+
if not show_verbose:
|
|
3096
|
+
with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
|
|
3097
|
+
result = agent_executor.invoke(
|
|
3098
|
+
{
|
|
3099
|
+
"input": message,
|
|
3100
|
+
"chat_history": []
|
|
3101
|
+
},
|
|
3102
|
+
config=invoke_config
|
|
3103
|
+
)
|
|
3104
|
+
else:
|
|
3105
|
+
console.print() # Add spacing before tool calls
|
|
3106
|
+
result = agent_executor.invoke(
|
|
3107
|
+
{
|
|
3108
|
+
"input": message,
|
|
3109
|
+
"chat_history": []
|
|
3110
|
+
},
|
|
3111
|
+
config=invoke_config
|
|
3112
|
+
)
|
|
3113
|
+
|
|
3114
|
+
# Extract and display output
|
|
3115
|
+
output = extract_output_from_result(result)
|
|
3116
|
+
display_output(agent_name, message, output)
|
|
3117
|
+
|
|
3118
|
+
except GraphRecursionError as e:
|
|
3119
|
+
step_limit = getattr(e, 'recursion_limit', 25)
|
|
3120
|
+
console.print()
|
|
3121
|
+
console.print(Panel(
|
|
3122
|
+
f"[yellow]⚠ Step limit reached ({step_limit} steps)[/yellow]\n\n"
|
|
3123
|
+
f"The agent exceeded the maximum number of steps.\n"
|
|
3124
|
+
f"This task may be too complex for a single run.\n\n"
|
|
3125
|
+
f"[bold]Suggestions:[/bold]\n"
|
|
3126
|
+
f"• Use [cyan]alita agent chat[/cyan] for interactive continuation\n"
|
|
3127
|
+
f"• Break the task into smaller, focused requests\n"
|
|
3128
|
+
f"• Check if partial work was completed (files created, etc.)",
|
|
3129
|
+
title="Step Limit Reached",
|
|
3130
|
+
border_style="yellow",
|
|
3131
|
+
box=box.ROUNDED
|
|
3132
|
+
))
|
|
3133
|
+
if formatter.__class__.__name__ == 'JSONFormatter':
|
|
3134
|
+
click.echo(formatter._dump({
|
|
3135
|
+
'agent': agent_name,
|
|
3136
|
+
'message': message,
|
|
3137
|
+
'error': 'step_limit_reached',
|
|
3138
|
+
'step_limit': step_limit,
|
|
3139
|
+
'response': f'Step limit of {step_limit} reached. Task may be too complex.'
|
|
3140
|
+
}))
|
|
3141
|
+
else:
|
|
3142
|
+
# Simple LLM mode without tools
|
|
3143
|
+
system_prompt = agent_def.get('system_prompt', '')
|
|
3144
|
+
messages = []
|
|
3145
|
+
if system_prompt:
|
|
3146
|
+
messages.append({"role": "system", "content": system_prompt})
|
|
3147
|
+
messages.append({"role": "user", "content": message})
|
|
3148
|
+
|
|
3149
|
+
# Execute with spinner for non-JSON output
|
|
3150
|
+
if formatter.__class__.__name__ == 'JSONFormatter':
|
|
3151
|
+
response = llm.invoke(messages)
|
|
3152
|
+
if hasattr(response, 'content'):
|
|
3153
|
+
output = response.content
|
|
3154
|
+
else:
|
|
3155
|
+
output = str(response)
|
|
3156
|
+
|
|
3157
|
+
click.echo(formatter._dump({
|
|
3158
|
+
'agent': agent_name,
|
|
3159
|
+
'message': message,
|
|
3160
|
+
'response': output
|
|
3161
|
+
}))
|
|
3162
|
+
else:
|
|
3163
|
+
# Show spinner while executing
|
|
3164
|
+
with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
|
|
3165
|
+
response = llm.invoke(messages)
|
|
3166
|
+
if hasattr(response, 'content'):
|
|
3167
|
+
output = response.content
|
|
3168
|
+
else:
|
|
3169
|
+
output = str(response)
|
|
3170
|
+
|
|
3171
|
+
# Display output
|
|
3172
|
+
display_output(agent_name, message, output)
|
|
3173
|
+
|
|
3174
|
+
else:
|
|
3175
|
+
# Platform agent
|
|
3176
|
+
agents = client.get_list_of_apps()
|
|
3177
|
+
agent = None
|
|
3178
|
+
|
|
3179
|
+
try:
|
|
3180
|
+
agent_id = int(agent_source)
|
|
3181
|
+
agent = next((a for a in agents if a['id'] == agent_id), None)
|
|
3182
|
+
except ValueError:
|
|
3183
|
+
agent = next((a for a in agents if a['name'] == agent_source), None)
|
|
3184
|
+
|
|
3185
|
+
if not agent:
|
|
3186
|
+
raise click.ClickException(f"Agent '{agent_source}' not found")
|
|
3187
|
+
|
|
3188
|
+
# Get version
|
|
3189
|
+
details = client.get_app_details(agent['id'])
|
|
3190
|
+
|
|
3191
|
+
if version:
|
|
3192
|
+
version_obj = next((v for v in details['versions'] if v['name'] == version), None)
|
|
3193
|
+
if not version_obj:
|
|
3194
|
+
raise click.ClickException(f"Version '{version}' not found")
|
|
3195
|
+
version_id = version_obj['id']
|
|
3196
|
+
else:
|
|
3197
|
+
version_id = details['versions'][0]['id']
|
|
3198
|
+
|
|
3199
|
+
# Load toolkit configs from CLI options
|
|
3200
|
+
toolkit_configs = []
|
|
3201
|
+
if toolkit_config:
|
|
3202
|
+
for config_path in toolkit_config:
|
|
3203
|
+
toolkit_configs.append(load_toolkit_config(config_path))
|
|
3204
|
+
|
|
3205
|
+
# Create memory
|
|
3206
|
+
from langgraph.checkpoint.sqlite import SqliteSaver
|
|
3207
|
+
memory = SqliteSaver(sqlite3.connect(":memory:", check_same_thread=False))
|
|
3208
|
+
|
|
3209
|
+
# Create agent executor
|
|
3210
|
+
agent_executor = client.application(
|
|
3211
|
+
application_id=agent['id'],
|
|
3212
|
+
application_version_id=version_id,
|
|
3213
|
+
memory=memory
|
|
3214
|
+
)
|
|
3215
|
+
|
|
3216
|
+
# Setup callback for verbose output
|
|
3217
|
+
from langchain_core.runnables import RunnableConfig
|
|
3218
|
+
from langgraph.errors import GraphRecursionError
|
|
3219
|
+
|
|
3220
|
+
invoke_config = None
|
|
3221
|
+
if show_verbose:
|
|
3222
|
+
cli_callback = create_cli_callback(verbose=True, debug=debug_mode)
|
|
3223
|
+
invoke_config = RunnableConfig(callbacks=[cli_callback])
|
|
3224
|
+
|
|
3225
|
+
try:
|
|
3226
|
+
# Execute with spinner for non-JSON output
|
|
3227
|
+
if formatter.__class__.__name__ == 'JSONFormatter':
|
|
3228
|
+
result = agent_executor.invoke({
|
|
3229
|
+
"input": [message],
|
|
3230
|
+
"chat_history": []
|
|
3231
|
+
})
|
|
3232
|
+
|
|
3233
|
+
click.echo(formatter._dump({
|
|
3234
|
+
'agent': agent['name'],
|
|
3235
|
+
'message': message,
|
|
3236
|
+
'response': result.get('output', ''),
|
|
3237
|
+
'full_result': result
|
|
3238
|
+
}))
|
|
3239
|
+
else:
|
|
3240
|
+
# Show status only when not verbose
|
|
3241
|
+
if not show_verbose:
|
|
3242
|
+
with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
|
|
3243
|
+
result = agent_executor.invoke(
|
|
3244
|
+
{
|
|
3245
|
+
"input": [message],
|
|
3246
|
+
"chat_history": []
|
|
3247
|
+
},
|
|
3248
|
+
config=invoke_config
|
|
3249
|
+
)
|
|
3250
|
+
else:
|
|
3251
|
+
console.print() # Add spacing before tool calls
|
|
3252
|
+
result = agent_executor.invoke(
|
|
3253
|
+
{
|
|
3254
|
+
"input": [message],
|
|
3255
|
+
"chat_history": []
|
|
3256
|
+
},
|
|
3257
|
+
config=invoke_config
|
|
3258
|
+
)
|
|
3259
|
+
|
|
3260
|
+
# Display output
|
|
3261
|
+
response = result.get('output', 'No response')
|
|
3262
|
+
display_output(agent['name'], message, response)
|
|
3263
|
+
|
|
3264
|
+
# Save thread if requested
|
|
3265
|
+
if save_thread:
|
|
3266
|
+
thread_data = {
|
|
3267
|
+
'agent_id': agent['id'],
|
|
3268
|
+
'agent_name': agent['name'],
|
|
3269
|
+
'version_id': version_id,
|
|
3270
|
+
'thread_id': result.get('thread_id'),
|
|
3271
|
+
'last_message': message
|
|
3272
|
+
}
|
|
3273
|
+
with open(save_thread, 'w') as f:
|
|
3274
|
+
json.dump(thread_data, f, indent=2)
|
|
3275
|
+
logger.info(f"Thread saved to {save_thread}")
|
|
3276
|
+
|
|
3277
|
+
except GraphRecursionError as e:
|
|
3278
|
+
step_limit = getattr(e, 'recursion_limit', 25)
|
|
3279
|
+
console.print()
|
|
3280
|
+
console.print(Panel(
|
|
3281
|
+
f"[yellow]⚠ Step limit reached ({step_limit} steps)[/yellow]\n\n"
|
|
3282
|
+
f"The agent exceeded the maximum number of steps.\n"
|
|
3283
|
+
f"This task may be too complex for a single run.\n\n"
|
|
3284
|
+
f"[bold]Suggestions:[/bold]\n"
|
|
3285
|
+
f"• Use [cyan]alita agent chat[/cyan] for interactive continuation\n"
|
|
3286
|
+
f"• Break the task into smaller, focused requests\n"
|
|
3287
|
+
f"• Check if partial work was completed (files created, etc.)",
|
|
3288
|
+
title="Step Limit Reached",
|
|
3289
|
+
border_style="yellow",
|
|
3290
|
+
box=box.ROUNDED
|
|
3291
|
+
))
|
|
3292
|
+
if formatter.__class__.__name__ == 'JSONFormatter':
|
|
3293
|
+
click.echo(formatter._dump({
|
|
3294
|
+
'agent': agent['name'],
|
|
3295
|
+
'message': message,
|
|
3296
|
+
'error': 'step_limit_reached',
|
|
3297
|
+
'step_limit': step_limit,
|
|
3298
|
+
'response': f'Step limit of {step_limit} reached. Task may be too complex.'
|
|
3299
|
+
}))
|
|
3300
|
+
|
|
3301
|
+
except click.ClickException:
|
|
3302
|
+
raise
|
|
3303
|
+
except Exception as e:
|
|
3304
|
+
logger.exception("Failed to run agent")
|
|
3305
|
+
error_panel = Panel(
|
|
3306
|
+
str(e),
|
|
3307
|
+
title="Error",
|
|
3308
|
+
border_style="red",
|
|
3309
|
+
box=box.ROUNDED
|
|
3310
|
+
)
|
|
3311
|
+
console.print(error_panel, style="red")
|
|
3312
|
+
raise click.Abort()
|
|
3313
|
+
|
|
3314
|
+
|
|
3315
|
+
@agent.command('execute-test-cases')
|
|
3316
|
+
@click.argument('agent_source')
|
|
3317
|
+
@click.option('--test-cases-dir', required=True, type=click.Path(exists=True, file_okay=False, dir_okay=True),
|
|
3318
|
+
help='Directory containing test case files')
|
|
3319
|
+
@click.option('--results-dir', required=True, type=click.Path(file_okay=False, dir_okay=True),
|
|
3320
|
+
help='Directory where test results will be saved')
|
|
3321
|
+
@click.option('--test-case', 'test_case_files', multiple=True,
|
|
3322
|
+
help='Specific test case file(s) to execute (e.g., TC-001.md). Can specify multiple times. If not specified, executes all test cases.')
|
|
3323
|
+
@click.option('--model', help='Override LLM model')
|
|
3324
|
+
@click.option('--temperature', type=float, help='Override temperature')
|
|
3325
|
+
@click.option('--max-tokens', type=int, help='Override max tokens')
|
|
3326
|
+
@click.option('--dir', 'work_dir', type=click.Path(exists=True, file_okay=False, dir_okay=True),
|
|
3327
|
+
help='Grant agent filesystem access to this directory')
|
|
3328
|
+
@click.option('--data-generator', type=click.Path(exists=True),
|
|
3329
|
+
help='Path to test data generator agent definition file')
|
|
3330
|
+
@click.option('--validator', type=click.Path(exists=True),
|
|
3331
|
+
help='Path to test validator agent definition file (default: .alita/agents/test-validator.agent.md)')
|
|
3332
|
+
@click.option('--skip-data-generation', is_flag=True,
|
|
3333
|
+
help='Skip test data generation step')
|
|
3334
|
+
@click.pass_context
|
|
3335
|
+
def execute_test_cases(ctx, agent_source: str, test_cases_dir: str, results_dir: str,
|
|
3336
|
+
test_case_files: tuple, model: Optional[str], temperature: Optional[float],
|
|
3337
|
+
max_tokens: Optional[int], work_dir: Optional[str],
|
|
3338
|
+
data_generator: Optional[str], validator: Optional[str],
|
|
3339
|
+
skip_data_generation: bool):
|
|
3340
|
+
"""
|
|
3341
|
+
Execute test cases from a directory and save results.
|
|
3342
|
+
|
|
3343
|
+
This command:
|
|
3344
|
+
1. (Optional) Executes test data generator agent to provision test data
|
|
3345
|
+
2. Scans TEST_CASES_DIR for test case markdown files (TC-*.md)
|
|
3346
|
+
3. For each test case:
|
|
3347
|
+
- Parses the test case to extract config, steps, and expectations
|
|
3348
|
+
- Loads the agent with the toolkit config specified in the test case
|
|
3349
|
+
- Executes each test step
|
|
3350
|
+
- Validates output against expectations
|
|
3351
|
+
- Generates a test result file
|
|
3352
|
+
4. Saves all results to RESULTS_DIR
|
|
3353
|
+
|
|
3354
|
+
AGENT_SOURCE: Path to agent definition file (e.g., .github/agents/test-runner.agent.md)
|
|
3355
|
+
|
|
3356
|
+
\b
|
|
3357
|
+
Examples:
|
|
3358
|
+
alita execute-test-cases ./agent.json --test-cases-dir ./tests --results-dir ./results
|
|
3359
|
+
alita execute-test-cases ./agent.json --test-cases-dir ./tests --results-dir ./results \
|
|
3360
|
+
--data-generator ./data-gen.json
|
|
3361
|
+
alita execute-test-cases ./agent.json --test-cases-dir ./tests --results-dir ./results \
|
|
3362
|
+
--test-case TC-001.md --test-case TC-002.md
|
|
3363
|
+
alita execute-test-cases ./agent.json --test-cases-dir ./tests --results-dir ./results \
|
|
3364
|
+
--skip-data-generation --model gpt-4o
|
|
3365
|
+
"""
|
|
3366
|
+
# Import dependencies at function start
|
|
3367
|
+
import sqlite3
|
|
3368
|
+
import uuid
|
|
3369
|
+
from langgraph.checkpoint.sqlite import SqliteSaver
|
|
3370
|
+
|
|
3371
|
+
config = ctx.obj['config']
|
|
3372
|
+
client = get_client(ctx)
|
|
3373
|
+
|
|
3374
|
+
try:
|
|
3375
|
+
# Load agent definition
|
|
3376
|
+
if not Path(agent_source).exists():
|
|
3377
|
+
raise click.ClickException(f"Agent definition not found: {agent_source}")
|
|
3378
|
+
|
|
3379
|
+
agent_def = load_agent_definition(agent_source)
|
|
3380
|
+
agent_name = agent_def.get('name', Path(agent_source).stem)
|
|
3381
|
+
|
|
3382
|
+
# Find all test case files (recursively search subdirectories)
|
|
3383
|
+
test_cases_path = Path(test_cases_dir)
|
|
3384
|
+
|
|
3385
|
+
# Filter test cases based on --test-case options
|
|
3386
|
+
if test_case_files:
|
|
3387
|
+
# User specified specific test case files
|
|
3388
|
+
test_case_files_set = set(test_case_files)
|
|
3389
|
+
all_test_cases = sorted(test_cases_path.rglob('TC-*.md'))
|
|
3390
|
+
test_case_files_list = [
|
|
3391
|
+
tc for tc in all_test_cases
|
|
3392
|
+
if tc.name in test_case_files_set
|
|
3393
|
+
]
|
|
3394
|
+
|
|
3395
|
+
# Check if all specified files were found
|
|
3396
|
+
found_names = {tc.name for tc in test_case_files_list}
|
|
3397
|
+
not_found = test_case_files_set - found_names
|
|
3398
|
+
if not_found:
|
|
3399
|
+
console.print(f"[yellow]⚠ Warning: Test case files not found: {', '.join(not_found)}[/yellow]")
|
|
3400
|
+
else:
|
|
3401
|
+
# Execute all test cases
|
|
3402
|
+
test_case_files_list = sorted(test_cases_path.rglob('TC-*.md'))
|
|
3403
|
+
|
|
3404
|
+
if not test_case_files_list:
|
|
3405
|
+
if test_case_files:
|
|
3406
|
+
console.print(f"[yellow]No matching test case files found in {test_cases_dir}[/yellow]")
|
|
3407
|
+
else:
|
|
3408
|
+
console.print(f"[yellow]No test case files found in {test_cases_dir}[/yellow]")
|
|
3409
|
+
return
|
|
3410
|
+
|
|
3411
|
+
console.print(f"\n[bold cyan]🧪 Test Execution Started[/bold cyan]")
|
|
3412
|
+
console.print(f"Agent: [bold]{agent_name}[/bold]")
|
|
3413
|
+
console.print(f"Test Cases: {len(test_case_files_list)}")
|
|
3414
|
+
if test_case_files:
|
|
3415
|
+
console.print(f"Selected: [cyan]{', '.join(test_case_files)}[/cyan]")
|
|
3416
|
+
console.print(f"Results Directory: {results_dir}\n")
|
|
3417
|
+
|
|
3418
|
+
data_gen_def = None
|
|
3419
|
+
if data_generator and not skip_data_generation:
|
|
3420
|
+
try:
|
|
3421
|
+
data_gen_def = load_agent_definition(data_generator)
|
|
3422
|
+
data_gen_name = data_gen_def.get('name', Path(data_generator).stem)
|
|
3423
|
+
console.print(f"Data Generator Agent: [bold]{data_gen_name}[/bold]\n")
|
|
3424
|
+
except Exception as e:
|
|
3425
|
+
console.print(f"[yellow]⚠ Warning: Failed to setup data generator: {e}[/yellow]")
|
|
3426
|
+
console.print("[yellow]Continuing with test execution...[/yellow]\n")
|
|
3427
|
+
logger.debug(f"Data generator setup error: {e}", exc_info=True)
|
|
3428
|
+
|
|
3429
|
+
# Load validator agent definition
|
|
3430
|
+
validator_def = None
|
|
3431
|
+
validator_agent_name = "Default Validator"
|
|
3432
|
+
|
|
3433
|
+
# Try to load validator from specified path or default location
|
|
3434
|
+
validator_path = validator
|
|
3435
|
+
if not validator_path:
|
|
3436
|
+
# Default to .alita/agents/test-validator.agent.md
|
|
3437
|
+
default_validator = Path.cwd() / '.alita' / 'agents' / 'test-validator.agent.md'
|
|
3438
|
+
if default_validator.exists():
|
|
3439
|
+
validator_path = str(default_validator)
|
|
3440
|
+
|
|
3441
|
+
if validator_path and Path(validator_path).exists():
|
|
3442
|
+
try:
|
|
3443
|
+
validator_def = load_agent_definition(validator_path)
|
|
3444
|
+
validator_agent_name = validator_def.get('name', Path(validator_path).stem)
|
|
3445
|
+
console.print(f"Validator Agent: [bold]{validator_agent_name}[/bold]")
|
|
3446
|
+
console.print(f"[dim]Using: {validator_path}[/dim]\n")
|
|
3447
|
+
except Exception as e:
|
|
3448
|
+
console.print(f"[yellow]⚠ Warning: Failed to load validator agent: {e}[/yellow]")
|
|
3449
|
+
console.print(f"[yellow]Will use test runner agent for validation[/yellow]\n")
|
|
3450
|
+
logger.debug(f"Validator load error: {e}", exc_info=True)
|
|
3451
|
+
else:
|
|
3452
|
+
console.print(f"[dim]No validator agent specified, using test runner agent for validation[/dim]\n")
|
|
3453
|
+
|
|
3454
|
+
# Store bulk data generation chat history to pass to test executors
|
|
3455
|
+
bulk_gen_chat_history = []
|
|
3456
|
+
|
|
3457
|
+
# Parse all test cases upfront for bulk data generation
|
|
3458
|
+
parsed_test_cases = []
|
|
3459
|
+
for test_file in test_case_files_list:
|
|
3460
|
+
try:
|
|
3461
|
+
test_case = parse_test_case(str(test_file))
|
|
3462
|
+
parsed_test_cases.append({
|
|
3463
|
+
'file': test_file,
|
|
3464
|
+
'data': test_case
|
|
3465
|
+
})
|
|
3466
|
+
except Exception as e:
|
|
3467
|
+
console.print(f"[yellow]⚠ Warning: Failed to parse {test_file.name}: {e}[/yellow]")
|
|
3468
|
+
logger.debug(f"Parse error for {test_file.name}: {e}", exc_info=True)
|
|
3469
|
+
|
|
3470
|
+
# Filter test cases that need data generation
|
|
3471
|
+
test_cases_needing_data_gen = [
|
|
3472
|
+
tc for tc in parsed_test_cases
|
|
3473
|
+
if tc['data'].get('generate_test_data', True)
|
|
3474
|
+
]
|
|
3475
|
+
|
|
3476
|
+
# Bulk test data generation (if enabled)
|
|
3477
|
+
if data_gen_def and not skip_data_generation and test_cases_needing_data_gen:
|
|
3478
|
+
console.print(f"\n[bold yellow]🔧 Bulk Test Data Generation[/bold yellow]")
|
|
3479
|
+
console.print(f"Generating test data for {len(test_cases_needing_data_gen)} test cases...\n")
|
|
3480
|
+
console.print(f"[dim]Skipping {len(parsed_test_cases) - len(test_cases_needing_data_gen)} test cases with generateTestData: false[/dim]\n")
|
|
3481
|
+
|
|
3482
|
+
bulk_data_gen_prompt = _build_bulk_data_gen_prompt(test_cases_needing_data_gen)
|
|
3483
|
+
|
|
3484
|
+
console.print(f"Executing test data generation prompt {bulk_data_gen_prompt}\n")
|
|
3485
|
+
|
|
3486
|
+
try:
|
|
3487
|
+
# Setup data generator agent
|
|
3488
|
+
bulk_memory = SqliteSaver(sqlite3.connect(":memory:", check_same_thread=False))
|
|
3489
|
+
|
|
3490
|
+
# Use first test case's config or empty tuple
|
|
3491
|
+
first_config_path = None
|
|
3492
|
+
if parsed_test_cases:
|
|
3493
|
+
first_tc = parsed_test_cases[0]
|
|
3494
|
+
first_config_path = resolve_toolkit_config_path(
|
|
3495
|
+
first_tc['data'].get('config_path', ''),
|
|
3496
|
+
first_tc['file'],
|
|
3497
|
+
test_cases_path
|
|
3498
|
+
)
|
|
3499
|
+
|
|
3500
|
+
data_gen_config_tuple = (first_config_path,) if first_config_path else ()
|
|
3501
|
+
data_gen_executor, _, _, _, _, _, _ = _setup_local_agent_executor(
|
|
3502
|
+
client, data_gen_def, data_gen_config_tuple, config,
|
|
3503
|
+
model, temperature, max_tokens, bulk_memory, work_dir
|
|
3504
|
+
)
|
|
3505
|
+
|
|
3506
|
+
if data_gen_executor:
|
|
3507
|
+
with console.status("[yellow]Generating test data for all test cases...[/yellow]", spinner="dots"):
|
|
3508
|
+
bulk_gen_result = data_gen_executor.invoke({
|
|
3509
|
+
"input": bulk_data_gen_prompt,
|
|
3510
|
+
"chat_history": []
|
|
3511
|
+
})
|
|
3512
|
+
bulk_gen_output = extract_output_from_result(bulk_gen_result)
|
|
3513
|
+
console.print(f"[green]✓ Bulk test data generation completed[/green]")
|
|
3514
|
+
console.print(f"[dim]{bulk_gen_output}...[/dim]\n")
|
|
3515
|
+
|
|
3516
|
+
# Store chat history from data generation to pass to test executors
|
|
3517
|
+
bulk_gen_chat_history = [
|
|
3518
|
+
{"role": "user", "content": bulk_data_gen_prompt},
|
|
3519
|
+
{"role": "assistant", "content": bulk_gen_output}
|
|
3520
|
+
]
|
|
3521
|
+
else:
|
|
3522
|
+
console.print(f"[yellow]⚠ Warning: Data generator has no executor[/yellow]\n")
|
|
3523
|
+
except Exception as e:
|
|
3524
|
+
console.print(f"[yellow]⚠ Warning: Bulk data generation failed: {e}[/yellow]")
|
|
3525
|
+
console.print("[yellow]Continuing with test execution...[/yellow]\n")
|
|
3526
|
+
logger.debug(f"Bulk data generation error: {e}", exc_info=True)
|
|
3527
|
+
|
|
3528
|
+
# Execute test cases sequentially with executor caching
|
|
3529
|
+
if not parsed_test_cases:
|
|
3530
|
+
console.print("[yellow]No test cases to execute[/yellow]")
|
|
3531
|
+
return
|
|
3532
|
+
|
|
3533
|
+
console.print(f"\n[bold yellow]📋 Executing test cases sequentially...[/bold yellow]\n")
|
|
3534
|
+
|
|
3535
|
+
# Show data generation context availability
|
|
3536
|
+
if bulk_gen_chat_history:
|
|
3537
|
+
console.print(f"[dim]✓ Data generation history available ({len(bulk_gen_chat_history)} messages) - shared with all test cases[/dim]\n")
|
|
3538
|
+
else:
|
|
3539
|
+
console.print(f"[dim]ℹ No data generation history (skipped or disabled)[/dim]\n")
|
|
3540
|
+
|
|
3541
|
+
# Executor cache: key = toolkit_config_path, value = (agent_executor, memory, mcp_session_manager)
|
|
3542
|
+
executor_cache = {}
|
|
3543
|
+
|
|
3544
|
+
# Validation executor cache: separate isolated executors for validation
|
|
3545
|
+
# key = toolkit_config_path, value = (agent_executor, memory, mcp_session_manager)
|
|
3546
|
+
validation_executor_cache = {}
|
|
3547
|
+
|
|
3548
|
+
# Execute each test case sequentially
|
|
3549
|
+
test_results = []
|
|
3550
|
+
total_tests = len(parsed_test_cases)
|
|
3551
|
+
|
|
3552
|
+
for idx, tc_info in enumerate(parsed_test_cases, 1):
|
|
3553
|
+
test_case = tc_info['data']
|
|
3554
|
+
test_file = tc_info['file']
|
|
3555
|
+
test_name = test_case['name']
|
|
3556
|
+
|
|
3557
|
+
# Display progress
|
|
3558
|
+
console.print(f"[bold cyan]Test Case {idx}/{total_tests} - {test_name}[/bold cyan]")
|
|
3559
|
+
|
|
3560
|
+
try:
|
|
3561
|
+
# Resolve toolkit config path for this test case
|
|
3562
|
+
toolkit_config_path = resolve_toolkit_config_path(
|
|
3563
|
+
test_case.get('config_path', ''),
|
|
3564
|
+
test_file,
|
|
3565
|
+
test_cases_path
|
|
3566
|
+
)
|
|
3567
|
+
|
|
3568
|
+
# Use cache key (None if no config)
|
|
3569
|
+
cache_key = toolkit_config_path if toolkit_config_path else '__no_config__'
|
|
3570
|
+
thread_id = f"test_case_{idx}_{uuid.uuid4().hex[:8]}"
|
|
3571
|
+
|
|
3572
|
+
# Get or create executor from cache
|
|
3573
|
+
agent_executor, memory, mcp_session_manager = _create_executor_from_cache(
|
|
3574
|
+
executor_cache, cache_key, client, agent_def, toolkit_config_path,
|
|
3575
|
+
config, model, temperature, max_tokens, work_dir
|
|
3576
|
+
)
|
|
3577
|
+
|
|
3578
|
+
# Build execution prompt for single test case
|
|
3579
|
+
execution_prompt = _build_single_test_execution_prompt(tc_info, idx)
|
|
3580
|
+
console.print(f"[dim]Executing with {len(bulk_gen_chat_history)} history messages[/dim]")
|
|
3581
|
+
|
|
3582
|
+
# Execute test case
|
|
3583
|
+
execution_output = ""
|
|
3584
|
+
if agent_executor:
|
|
3585
|
+
with console.status(f"[yellow]Executing test case...[/yellow]", spinner="dots"):
|
|
3586
|
+
exec_result = agent_executor.invoke({
|
|
3587
|
+
"input": execution_prompt,
|
|
3588
|
+
"chat_history": bulk_gen_chat_history # ONLY data gen history, no accumulation
|
|
3589
|
+
}, config={"configurable": {"thread_id": thread_id}})
|
|
3590
|
+
execution_output = extract_output_from_result(exec_result)
|
|
3591
|
+
|
|
3592
|
+
console.print(f"[green]✓ Test case executed[/green]")
|
|
3593
|
+
console.print(f"[dim]{execution_output}[/dim]\n")
|
|
3594
|
+
|
|
3595
|
+
# No history accumulation - each test case is independent
|
|
3596
|
+
else:
|
|
3597
|
+
console.print(f"[red]✗ No agent executor available[/red]")
|
|
3598
|
+
# Create fallback result for this test
|
|
3599
|
+
test_results.append({
|
|
3600
|
+
'title': test_name,
|
|
3601
|
+
'passed': False,
|
|
3602
|
+
'file': test_file.name,
|
|
3603
|
+
'step_results': []
|
|
3604
|
+
})
|
|
3605
|
+
continue
|
|
3606
|
+
|
|
3607
|
+
# Validate test case using ISOLATED validation executor
|
|
3608
|
+
validation_prompt = _build_single_test_validation_prompt(tc_info, idx, execution_output)
|
|
3609
|
+
|
|
3610
|
+
console.print(f"[bold yellow]🔍 Validating test case (isolated context)...[/bold yellow]")
|
|
3611
|
+
|
|
3612
|
+
# Create or retrieve isolated validation executor
|
|
3613
|
+
validation_cache_key = f"{cache_key}_validation"
|
|
3614
|
+
validation_agent_def = validator_def if validator_def else agent_def
|
|
3615
|
+
|
|
3616
|
+
validation_executor, validation_memory, validation_mcp_session = _create_executor_from_cache(
|
|
3617
|
+
validation_executor_cache, validation_cache_key, client, validation_agent_def,
|
|
3618
|
+
toolkit_config_path, config, model, temperature, max_tokens, work_dir
|
|
3619
|
+
)
|
|
3620
|
+
|
|
3621
|
+
if validation_cache_key not in validation_executor_cache:
|
|
3622
|
+
console.print(f"[dim]Created new isolated validation executor[/dim]")
|
|
3623
|
+
else:
|
|
3624
|
+
console.print(f"[dim]Using cached validation executor[/dim]")
|
|
3625
|
+
|
|
3626
|
+
# For validation, use a separate thread with NO chat history (isolated from data gen)
|
|
3627
|
+
# This prevents the agent from using tools and encourages direct JSON output
|
|
3628
|
+
validation_thread_id = f"validation_{idx}_{uuid.uuid4().hex[:8]}"
|
|
3629
|
+
|
|
3630
|
+
validation_output = ""
|
|
3631
|
+
if validation_executor:
|
|
3632
|
+
with console.status(f"[yellow]Validating test case...[/yellow]", spinner="dots"):
|
|
3633
|
+
validation_result = validation_executor.invoke({
|
|
3634
|
+
"input": validation_prompt,
|
|
3635
|
+
"chat_history": [] # ISOLATED: No data gen history for validation
|
|
3636
|
+
}, {"configurable": {"thread_id": validation_thread_id}})
|
|
3637
|
+
|
|
3638
|
+
validation_output = extract_output_from_result(validation_result)
|
|
3639
|
+
else:
|
|
3640
|
+
console.print(f"[red]✗ No validation executor available[/red]")
|
|
3641
|
+
validation_output = "{}"
|
|
3642
|
+
|
|
3643
|
+
console.print(f"[bold cyan]Full LLM Validation Response:[/bold cyan]")
|
|
3644
|
+
console.print(f"[dim]{validation_output}[/dim]\n")
|
|
3645
|
+
|
|
3646
|
+
# No history update - validation is isolated from test execution
|
|
3647
|
+
|
|
3648
|
+
# Parse validation JSON
|
|
3649
|
+
try:
|
|
3650
|
+
validation_json = _extract_json_from_text(validation_output)
|
|
3651
|
+
step_results = validation_json.get('steps', [])
|
|
3652
|
+
|
|
3653
|
+
# Determine if test passed (all steps must pass)
|
|
3654
|
+
test_passed = all(step.get('passed', False) for step in step_results) if step_results else False
|
|
3655
|
+
|
|
3656
|
+
if test_passed:
|
|
3657
|
+
console.print(f"[bold green]✅ Test PASSED: {test_name}[/bold green]")
|
|
3658
|
+
else:
|
|
3659
|
+
console.print(f"[bold red]❌ Test FAILED: {test_name}[/bold red]")
|
|
3660
|
+
|
|
3661
|
+
# Display individual step results
|
|
3662
|
+
for step_result in step_results:
|
|
3663
|
+
step_num = step_result.get('step_number')
|
|
3664
|
+
step_title = step_result.get('title', '')
|
|
3665
|
+
passed = step_result.get('passed', False)
|
|
3666
|
+
details = step_result.get('details', '')
|
|
3667
|
+
|
|
3668
|
+
if passed:
|
|
3669
|
+
console.print(f" [green]✓ Step {step_num}: {step_title}[/green]")
|
|
3670
|
+
console.print(f" [dim]{details}[/dim]")
|
|
3671
|
+
else:
|
|
3672
|
+
console.print(f" [red]✗ Step {step_num}: {step_title}[/red]")
|
|
3673
|
+
console.print(f" [dim]{details}[/dim]")
|
|
3674
|
+
|
|
3675
|
+
console.print()
|
|
3676
|
+
|
|
3677
|
+
# Store result
|
|
3678
|
+
test_results.append({
|
|
3679
|
+
'title': test_name,
|
|
3680
|
+
'passed': test_passed,
|
|
3681
|
+
'file': test_file.name,
|
|
3682
|
+
'step_results': step_results
|
|
3683
|
+
})
|
|
3684
|
+
|
|
3685
|
+
except Exception as e:
|
|
3686
|
+
logger.debug(f"Validation parsing failed for {test_name}: {e}", exc_info=True)
|
|
3687
|
+
console.print(f"[yellow]⚠ Warning: Could not parse validation results for {test_name}[/yellow]")
|
|
3688
|
+
console.print(f"[yellow]Error: {str(e)}[/yellow]")
|
|
3689
|
+
|
|
3690
|
+
# Enhanced diagnostic output
|
|
3691
|
+
_print_validation_diagnostics(validation_output)
|
|
3692
|
+
|
|
3693
|
+
# Generate fallback result using helper function
|
|
3694
|
+
console.print(f"\n[yellow]🔄 Generating fallback validation result...[/yellow]")
|
|
3695
|
+
fallback_result = _create_fallback_result_for_test(
|
|
3696
|
+
test_case,
|
|
3697
|
+
test_file,
|
|
3698
|
+
f'Validation failed - could not parse validator output: {str(e)}'
|
|
3699
|
+
)
|
|
3700
|
+
console.print(f"[dim]Created {len(fallback_result['step_results'])} fallback step results[/dim]\n")
|
|
3701
|
+
|
|
3702
|
+
test_results.append(fallback_result)
|
|
3703
|
+
console.print()
|
|
3704
|
+
|
|
3705
|
+
except Exception as e:
|
|
3706
|
+
logger.debug(f"Test execution failed for {test_name}: {e}", exc_info=True)
|
|
3707
|
+
console.print(f"[red]✗ Test execution failed: {e}[/red]")
|
|
3708
|
+
|
|
3709
|
+
# Create fallback result using helper function
|
|
3710
|
+
fallback_result = _create_fallback_result_for_test(
|
|
3711
|
+
test_case,
|
|
3712
|
+
test_file,
|
|
3713
|
+
f'Test execution failed: {str(e)}'
|
|
3714
|
+
)
|
|
3715
|
+
test_results.append(fallback_result)
|
|
3716
|
+
console.print()
|
|
3717
|
+
|
|
3718
|
+
# Cleanup: Close executor cache resources
|
|
3719
|
+
_cleanup_executor_cache(executor_cache, "executor")
|
|
3720
|
+
_cleanup_executor_cache(validation_executor_cache, "validation executor")
|
|
3721
|
+
|
|
3722
|
+
# Calculate totals
|
|
3723
|
+
total_tests = len(test_results)
|
|
3724
|
+
passed_tests = sum(1 for r in test_results if r['passed'])
|
|
3725
|
+
failed_tests = total_tests - passed_tests
|
|
3726
|
+
|
|
3727
|
+
# Generate summary report
|
|
3728
|
+
console.print(f"\n[bold]{'='*60}[/bold]")
|
|
3729
|
+
console.print(f"[bold cyan]📊 Test Execution Summary[/bold cyan]")
|
|
3730
|
+
console.print(f"[bold]{'='*60}[/bold]\n")
|
|
3731
|
+
|
|
3732
|
+
summary_table = Table(box=box.ROUNDED, border_style="cyan")
|
|
3733
|
+
summary_table.add_column("Metric", style="bold")
|
|
3734
|
+
summary_table.add_column("Value", justify="right")
|
|
3735
|
+
|
|
3736
|
+
summary_table.add_row("Total Tests", str(total_tests))
|
|
3737
|
+
summary_table.add_row("Passed", f"[green]{passed_tests}[/green]")
|
|
3738
|
+
summary_table.add_row("Failed", f"[red]{failed_tests}[/red]")
|
|
3739
|
+
|
|
3740
|
+
if total_tests > 0:
|
|
3741
|
+
pass_rate = (passed_tests / total_tests) * 100
|
|
3742
|
+
summary_table.add_row("Pass Rate", f"{pass_rate:.1f}%")
|
|
3743
|
+
|
|
3744
|
+
console.print(summary_table)
|
|
3745
|
+
|
|
3746
|
+
# Generate structured JSON report
|
|
3747
|
+
overall_result = "pass" if failed_tests == 0 else "fail"
|
|
3748
|
+
|
|
3749
|
+
structured_report = {
|
|
3750
|
+
"test_cases": [
|
|
3751
|
+
{
|
|
3752
|
+
"title": r['title'],
|
|
3753
|
+
"passed": r['passed'],
|
|
3754
|
+
"steps": r.get('step_results', [])
|
|
3755
|
+
}
|
|
3756
|
+
for r in test_results
|
|
3757
|
+
],
|
|
3758
|
+
"overall_result": overall_result,
|
|
3759
|
+
"summary": {
|
|
3760
|
+
"total_tests": total_tests,
|
|
3761
|
+
"passed": passed_tests,
|
|
3762
|
+
"failed": failed_tests,
|
|
3763
|
+
"pass_rate": f"{pass_rate:.1f}%" if total_tests > 0 else "0%"
|
|
3764
|
+
},
|
|
3765
|
+
"timestamp": datetime.now().isoformat()
|
|
3766
|
+
}
|
|
3767
|
+
|
|
3768
|
+
# Save structured report
|
|
3769
|
+
results_path = Path(results_dir)
|
|
3770
|
+
results_path.mkdir(parents=True, exist_ok=True)
|
|
3771
|
+
summary_file = results_path / "test_execution_summary.json"
|
|
3772
|
+
|
|
3773
|
+
console.print(f"\n[bold yellow]💾 Saving test execution summary...[/bold yellow]")
|
|
3774
|
+
with open(summary_file, 'w') as f:
|
|
3775
|
+
json.dump(structured_report, f, indent=2)
|
|
3776
|
+
console.print(f"[green]✓ Summary saved to {summary_file}[/green]\n")
|
|
3777
|
+
|
|
3778
|
+
# Exit with error code if any tests failed
|
|
3779
|
+
if failed_tests > 0:
|
|
3780
|
+
sys.exit(1)
|
|
3781
|
+
|
|
3782
|
+
except click.ClickException:
|
|
3783
|
+
raise
|
|
3784
|
+
except Exception as e:
|
|
3785
|
+
logger.exception("Failed to execute test cases")
|
|
3786
|
+
error_panel = Panel(
|
|
3787
|
+
str(e),
|
|
3788
|
+
title="Error",
|
|
3789
|
+
border_style="red",
|
|
3790
|
+
box=box.ROUNDED
|
|
3791
|
+
)
|
|
3792
|
+
console.print(error_panel, style="red")
|
|
3793
|
+
raise click.Abort()
|
|
3794
|
+
|