alita-sdk 0.3.462__py3-none-any.whl → 0.3.627__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +15 -3
- alita_sdk/cli/agent_loader.py +56 -8
- alita_sdk/cli/agent_ui.py +93 -31
- alita_sdk/cli/agents.py +2274 -230
- alita_sdk/cli/callbacks.py +96 -25
- alita_sdk/cli/cli.py +10 -1
- alita_sdk/cli/config.py +162 -9
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1073 -0
- alita_sdk/cli/testcases/__init__.py +94 -0
- alita_sdk/cli/testcases/data_generation.py +119 -0
- alita_sdk/cli/testcases/discovery.py +96 -0
- alita_sdk/cli/testcases/executor.py +84 -0
- alita_sdk/cli/testcases/logger.py +85 -0
- alita_sdk/cli/testcases/parser.py +172 -0
- alita_sdk/cli/testcases/prompts.py +91 -0
- alita_sdk/cli/testcases/reporting.py +125 -0
- alita_sdk/cli/testcases/setup.py +108 -0
- alita_sdk/cli/testcases/test_runner.py +282 -0
- alita_sdk/cli/testcases/utils.py +39 -0
- alita_sdk/cli/testcases/validation.py +90 -0
- alita_sdk/cli/testcases/workflow.py +196 -0
- alita_sdk/cli/toolkit.py +14 -17
- alita_sdk/cli/toolkit_loader.py +35 -5
- alita_sdk/cli/tools/__init__.py +36 -2
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +910 -64
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +72 -12
- alita_sdk/community/inventory/__init__.py +236 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +1 -1
- alita_sdk/configurations/ado.py +141 -20
- alita_sdk/configurations/bitbucket.py +0 -3
- alita_sdk/configurations/confluence.py +76 -42
- alita_sdk/configurations/figma.py +76 -0
- alita_sdk/configurations/gitlab.py +17 -5
- alita_sdk/configurations/openapi.py +329 -0
- alita_sdk/configurations/qtest.py +72 -1
- alita_sdk/configurations/report_portal.py +96 -0
- alita_sdk/configurations/sharepoint.py +148 -0
- alita_sdk/configurations/testio.py +83 -0
- alita_sdk/runtime/clients/artifact.py +3 -3
- alita_sdk/runtime/clients/client.py +353 -48
- alita_sdk/runtime/clients/sandbox_client.py +0 -21
- alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
- alita_sdk/runtime/langchain/assistant.py +123 -26
- alita_sdk/runtime/langchain/constants.py +642 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +6 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +226 -7
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
- alita_sdk/runtime/langchain/document_loaders/constants.py +12 -7
- alita_sdk/runtime/langchain/langraph_agent.py +279 -73
- alita_sdk/runtime/langchain/utils.py +82 -15
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/skills/__init__.py +91 -0
- alita_sdk/runtime/skills/callbacks.py +498 -0
- alita_sdk/runtime/skills/discovery.py +540 -0
- alita_sdk/runtime/skills/executor.py +610 -0
- alita_sdk/runtime/skills/input_builder.py +371 -0
- alita_sdk/runtime/skills/models.py +330 -0
- alita_sdk/runtime/skills/registry.py +355 -0
- alita_sdk/runtime/skills/skill_runner.py +330 -0
- alita_sdk/runtime/toolkits/__init__.py +7 -0
- alita_sdk/runtime/toolkits/application.py +21 -9
- alita_sdk/runtime/toolkits/artifact.py +15 -5
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +139 -251
- alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/skill_router.py +238 -0
- alita_sdk/runtime/toolkits/subgraph.py +251 -6
- alita_sdk/runtime/toolkits/tools.py +238 -32
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/__init__.py +3 -1
- alita_sdk/runtime/tools/application.py +20 -6
- alita_sdk/runtime/tools/artifact.py +511 -28
- alita_sdk/runtime/tools/data_analysis.py +183 -0
- alita_sdk/runtime/tools/function.py +43 -15
- alita_sdk/runtime/tools/image_generation.py +50 -44
- alita_sdk/runtime/tools/llm.py +852 -67
- alita_sdk/runtime/tools/loop.py +3 -1
- alita_sdk/runtime/tools/loop_output.py +3 -1
- alita_sdk/runtime/tools/mcp_remote_tool.py +25 -10
- alita_sdk/runtime/tools/mcp_server_tool.py +7 -6
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -4
- alita_sdk/runtime/tools/sandbox.py +9 -6
- alita_sdk/runtime/tools/skill_router.py +776 -0
- alita_sdk/runtime/tools/tool.py +3 -1
- alita_sdk/runtime/tools/vectorstore.py +7 -2
- alita_sdk/runtime/tools/vectorstore_base.py +51 -11
- alita_sdk/runtime/utils/AlitaCallback.py +137 -21
- alita_sdk/runtime/utils/constants.py +5 -1
- alita_sdk/runtime/utils/mcp_client.py +492 -0
- alita_sdk/runtime/utils/mcp_oauth.py +202 -5
- alita_sdk/runtime/utils/mcp_sse_client.py +36 -7
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/serialization.py +155 -0
- alita_sdk/runtime/utils/streamlit.py +6 -10
- alita_sdk/runtime/utils/toolkit_utils.py +16 -5
- alita_sdk/runtime/utils/utils.py +36 -0
- alita_sdk/tools/__init__.py +113 -29
- alita_sdk/tools/ado/repos/__init__.py +51 -33
- alita_sdk/tools/ado/repos/repos_wrapper.py +148 -89
- alita_sdk/tools/ado/test_plan/__init__.py +25 -9
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
- alita_sdk/tools/ado/utils.py +1 -18
- alita_sdk/tools/ado/wiki/__init__.py +25 -8
- alita_sdk/tools/ado/wiki/ado_wrapper.py +291 -22
- alita_sdk/tools/ado/work_item/__init__.py +26 -9
- alita_sdk/tools/ado/work_item/ado_wrapper.py +56 -3
- alita_sdk/tools/advanced_jira_mining/__init__.py +11 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +13 -9
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +11 -8
- alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +170 -45
- alita_sdk/tools/bitbucket/__init__.py +17 -12
- alita_sdk/tools/bitbucket/api_wrapper.py +59 -11
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +49 -35
- alita_sdk/tools/browser/__init__.py +5 -4
- alita_sdk/tools/carrier/__init__.py +5 -6
- alita_sdk/tools/carrier/backend_reports_tool.py +6 -6
- alita_sdk/tools/carrier/run_ui_test_tool.py +6 -6
- alita_sdk/tools/carrier/ui_reports_tool.py +5 -5
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
- alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +10 -7
- alita_sdk/tools/cloud/azure/__init__.py +10 -7
- alita_sdk/tools/cloud/gcp/__init__.py +10 -7
- alita_sdk/tools/cloud/k8s/__init__.py +10 -7
- alita_sdk/tools/code/linter/__init__.py +10 -8
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +10 -7
- alita_sdk/tools/code_indexer_toolkit.py +73 -23
- alita_sdk/tools/confluence/__init__.py +21 -15
- alita_sdk/tools/confluence/api_wrapper.py +78 -23
- alita_sdk/tools/confluence/loader.py +4 -2
- alita_sdk/tools/custom_open_api/__init__.py +12 -5
- alita_sdk/tools/elastic/__init__.py +11 -8
- alita_sdk/tools/elitea_base.py +493 -30
- alita_sdk/tools/figma/__init__.py +58 -11
- alita_sdk/tools/figma/api_wrapper.py +1235 -143
- alita_sdk/tools/figma/figma_client.py +73 -0
- alita_sdk/tools/figma/toon_tools.py +2748 -0
- alita_sdk/tools/github/__init__.py +13 -14
- alita_sdk/tools/github/github_client.py +224 -100
- alita_sdk/tools/github/graphql_client_wrapper.py +119 -33
- alita_sdk/tools/github/schemas.py +14 -5
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/github/tool_prompts.py +9 -22
- alita_sdk/tools/gitlab/__init__.py +15 -11
- alita_sdk/tools/gitlab/api_wrapper.py +207 -41
- alita_sdk/tools/gitlab_org/__init__.py +10 -8
- alita_sdk/tools/gitlab_org/api_wrapper.py +63 -64
- alita_sdk/tools/google/bigquery/__init__.py +13 -12
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +10 -8
- alita_sdk/tools/google_places/api_wrapper.py +1 -1
- alita_sdk/tools/jira/__init__.py +17 -11
- alita_sdk/tools/jira/api_wrapper.py +91 -40
- alita_sdk/tools/keycloak/__init__.py +11 -8
- alita_sdk/tools/localgit/__init__.py +9 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +11 -3
- alita_sdk/tools/non_code_indexer_toolkit.py +1 -0
- alita_sdk/tools/ocr/__init__.py +11 -8
- alita_sdk/tools/openapi/__init__.py +490 -114
- alita_sdk/tools/openapi/api_wrapper.py +1368 -0
- alita_sdk/tools/openapi/tool.py +20 -0
- alita_sdk/tools/pandas/__init__.py +20 -12
- alita_sdk/tools/pandas/api_wrapper.py +38 -25
- alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
- alita_sdk/tools/postman/__init__.py +11 -11
- alita_sdk/tools/pptx/__init__.py +10 -9
- alita_sdk/tools/pptx/pptx_wrapper.py +1 -1
- alita_sdk/tools/qtest/__init__.py +30 -10
- alita_sdk/tools/qtest/api_wrapper.py +430 -13
- alita_sdk/tools/rally/__init__.py +10 -8
- alita_sdk/tools/rally/api_wrapper.py +1 -1
- alita_sdk/tools/report_portal/__init__.py +12 -9
- alita_sdk/tools/salesforce/__init__.py +10 -9
- alita_sdk/tools/servicenow/__init__.py +17 -14
- alita_sdk/tools/servicenow/api_wrapper.py +1 -1
- alita_sdk/tools/sharepoint/__init__.py +10 -8
- alita_sdk/tools/sharepoint/api_wrapper.py +4 -4
- alita_sdk/tools/slack/__init__.py +10 -8
- alita_sdk/tools/slack/api_wrapper.py +2 -2
- alita_sdk/tools/sql/__init__.py +11 -9
- alita_sdk/tools/testio/__init__.py +10 -8
- alita_sdk/tools/testrail/__init__.py +11 -8
- alita_sdk/tools/testrail/api_wrapper.py +1 -1
- alita_sdk/tools/utils/__init__.py +9 -4
- alita_sdk/tools/utils/content_parser.py +77 -3
- alita_sdk/tools/utils/text_operations.py +410 -0
- alita_sdk/tools/utils/tool_prompts.py +79 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +17 -13
- alita_sdk/tools/xray/__init__.py +12 -9
- alita_sdk/tools/yagmail/__init__.py +9 -3
- alita_sdk/tools/zephyr/__init__.py +9 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +11 -8
- alita_sdk/tools/zephyr_essential/__init__.py +10 -8
- alita_sdk/tools/zephyr_essential/api_wrapper.py +30 -13
- alita_sdk/tools/zephyr_essential/client.py +2 -2
- alita_sdk/tools/zephyr_scale/__init__.py +11 -9
- alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
- alita_sdk/tools/zephyr_squad/__init__.py +10 -8
- {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/METADATA +147 -7
- alita_sdk-0.3.627.dist-info/RECORD +468 -0
- alita_sdk-0.3.627.dist-info/entry_points.txt +2 -0
- alita_sdk-0.3.462.dist-info/RECORD +0 -384
- alita_sdk-0.3.462.dist-info/entry_points.txt +0 -2
- {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/top_level.txt +0 -0
alita_sdk/cli/agents.py
CHANGED
|
@@ -11,8 +11,10 @@ import json
|
|
|
11
11
|
import logging
|
|
12
12
|
import sqlite3
|
|
13
13
|
import sys
|
|
14
|
+
import re
|
|
14
15
|
from typing import Optional, Dict, Any, List
|
|
15
16
|
from pathlib import Path
|
|
17
|
+
from datetime import datetime
|
|
16
18
|
import yaml
|
|
17
19
|
|
|
18
20
|
from rich.console import Console
|
|
@@ -31,12 +33,288 @@ from .agent_loader import load_agent_definition
|
|
|
31
33
|
from .agent_executor import create_llm_instance, create_agent_executor, create_agent_executor_with_mcp
|
|
32
34
|
from .toolkit_loader import load_toolkit_config, load_toolkit_configs
|
|
33
35
|
from .callbacks import create_cli_callback, CLICallbackHandler
|
|
36
|
+
from .input_handler import get_input_handler, styled_input, styled_selection_input
|
|
37
|
+
# Context management for chat history
|
|
38
|
+
from .context import CLIContextManager, CLIMessage, purge_old_sessions as purge_context_sessions
|
|
39
|
+
# Test execution utilities
|
|
40
|
+
from .testcases import (
|
|
41
|
+
parse_test_case,
|
|
42
|
+
resolve_toolkit_config_path,
|
|
43
|
+
build_bulk_data_gen_prompt,
|
|
44
|
+
build_single_test_execution_prompt,
|
|
45
|
+
build_single_test_validation_prompt,
|
|
46
|
+
extract_json_from_text,
|
|
47
|
+
create_fallback_result_for_test,
|
|
48
|
+
print_validation_diagnostics,
|
|
49
|
+
TestLogCapture,
|
|
50
|
+
create_executor_from_cache,
|
|
51
|
+
cleanup_executor_cache,
|
|
52
|
+
extract_toolkit_name,
|
|
53
|
+
# New helper functions
|
|
54
|
+
load_test_runner_agent,
|
|
55
|
+
load_data_generator_agent,
|
|
56
|
+
load_validator_agent,
|
|
57
|
+
discover_test_case_files,
|
|
58
|
+
validate_test_case_files,
|
|
59
|
+
print_test_execution_header,
|
|
60
|
+
execute_bulk_data_generation,
|
|
61
|
+
execute_single_test_case,
|
|
62
|
+
validate_single_test_case,
|
|
63
|
+
generate_summary_report,
|
|
64
|
+
save_structured_report,
|
|
65
|
+
print_test_execution_summary,
|
|
66
|
+
# Workflow orchestration
|
|
67
|
+
parse_all_test_cases,
|
|
68
|
+
filter_test_cases_needing_data_gen,
|
|
69
|
+
execute_all_test_cases,
|
|
70
|
+
)
|
|
34
71
|
|
|
35
72
|
logger = logging.getLogger(__name__)
|
|
36
73
|
|
|
37
74
|
# Create a rich console for beautiful output
|
|
38
75
|
console = Console()
|
|
39
76
|
|
|
77
|
+
def _get_alita_system_prompt(config) -> str:
|
|
78
|
+
"""
|
|
79
|
+
Get the Alita system prompt from user config or fallback to default.
|
|
80
|
+
|
|
81
|
+
Checks for $ALITA_DIR/agents/default.agent.md first, then falls back
|
|
82
|
+
to the built-in DEFAULT_PROMPT.
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
The system prompt string for Alita
|
|
86
|
+
"""
|
|
87
|
+
from .agent.default import DEFAULT_PROMPT
|
|
88
|
+
|
|
89
|
+
# Check for user-customized prompt
|
|
90
|
+
custom_prompt_path = Path(config.agents_dir) / 'default.agent.md'
|
|
91
|
+
|
|
92
|
+
if custom_prompt_path.exists():
|
|
93
|
+
try:
|
|
94
|
+
content = custom_prompt_path.read_text(encoding='utf-8')
|
|
95
|
+
# Parse the agent.md file - extract system_prompt from frontmatter or use content
|
|
96
|
+
if content.startswith('---'):
|
|
97
|
+
# Has YAML frontmatter, try to parse
|
|
98
|
+
try:
|
|
99
|
+
parts = content.split('---', 2)
|
|
100
|
+
if len(parts) >= 3:
|
|
101
|
+
frontmatter = yaml.safe_load(parts[1])
|
|
102
|
+
body = parts[2].strip()
|
|
103
|
+
# Use system_prompt from frontmatter if present, otherwise use body
|
|
104
|
+
return frontmatter.get('system_prompt', body) if frontmatter else body
|
|
105
|
+
except Exception:
|
|
106
|
+
pass
|
|
107
|
+
# No frontmatter or parsing failed, use entire content as prompt
|
|
108
|
+
return content.strip()
|
|
109
|
+
except Exception as e:
|
|
110
|
+
logger.debug(f"Failed to load custom Alita prompt from {custom_prompt_path}: {e}")
|
|
111
|
+
|
|
112
|
+
return DEFAULT_PROMPT
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _get_inventory_system_prompt(config) -> str:
|
|
116
|
+
"""
|
|
117
|
+
Get the Inventory agent system prompt from user config or fallback to default.
|
|
118
|
+
|
|
119
|
+
Checks for $ALITA_DIR/agents/inventory.agent.md first, then falls back
|
|
120
|
+
to the default prompt with inventory-specific instructions.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
The system prompt string for Inventory agent
|
|
124
|
+
"""
|
|
125
|
+
from .agent.default import DEFAULT_PROMPT
|
|
126
|
+
|
|
127
|
+
# Check for user-customized prompt
|
|
128
|
+
custom_prompt_path = Path(config.agents_dir) / 'inventory.agent.md'
|
|
129
|
+
|
|
130
|
+
if custom_prompt_path.exists():
|
|
131
|
+
try:
|
|
132
|
+
content = custom_prompt_path.read_text(encoding='utf-8')
|
|
133
|
+
# Parse the agent.md file - extract system_prompt from frontmatter or use content
|
|
134
|
+
if content.startswith('---'):
|
|
135
|
+
try:
|
|
136
|
+
parts = content.split('---', 2)
|
|
137
|
+
if len(parts) >= 3:
|
|
138
|
+
frontmatter = yaml.safe_load(parts[1])
|
|
139
|
+
body = parts[2].strip()
|
|
140
|
+
return frontmatter.get('system_prompt', body) if frontmatter else body
|
|
141
|
+
except Exception:
|
|
142
|
+
pass
|
|
143
|
+
return content.strip()
|
|
144
|
+
except Exception as e:
|
|
145
|
+
logger.debug(f"Failed to load custom Inventory prompt from {custom_prompt_path}: {e}")
|
|
146
|
+
|
|
147
|
+
# Use default prompt + inventory toolkit instructions
|
|
148
|
+
inventory_context = """
|
|
149
|
+
|
|
150
|
+
## Inventory Knowledge Graph
|
|
151
|
+
|
|
152
|
+
You have access to the Inventory toolkit for querying a knowledge graph of software entities and relationships.
|
|
153
|
+
Use these tools to help users understand their codebase:
|
|
154
|
+
|
|
155
|
+
- **search_entities**: Find entities by name, type, or path patterns
|
|
156
|
+
- **get_entity**: Get full details of a specific entity
|
|
157
|
+
- **get_relationships**: Find relationships from/to an entity
|
|
158
|
+
- **impact_analysis**: Analyze what depends on an entity (useful for change impact)
|
|
159
|
+
- **get_graph_stats**: Get statistics about the knowledge graph
|
|
160
|
+
|
|
161
|
+
When answering questions about the codebase, use these tools to provide accurate, citation-backed answers.
|
|
162
|
+
"""
|
|
163
|
+
return DEFAULT_PROMPT + inventory_context
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def _resolve_inventory_path(path: str, work_dir: Optional[str] = None) -> Optional[str]:
|
|
167
|
+
"""
|
|
168
|
+
Resolve an inventory/knowledge graph file path.
|
|
169
|
+
|
|
170
|
+
Tries locations in order:
|
|
171
|
+
1. Absolute path
|
|
172
|
+
2. Relative to current working directory (or work_dir if provided)
|
|
173
|
+
3. Relative to .alita/inventory/ in current directory
|
|
174
|
+
4. Relative to .alita/inventory/ in work_dir (if different)
|
|
175
|
+
|
|
176
|
+
Args:
|
|
177
|
+
path: The path to resolve (can be relative or absolute)
|
|
178
|
+
work_dir: Optional workspace directory to check
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
Absolute path to the file if found, None otherwise
|
|
182
|
+
"""
|
|
183
|
+
# Expand user home directory
|
|
184
|
+
path = str(Path(path).expanduser())
|
|
185
|
+
|
|
186
|
+
# Try absolute path first
|
|
187
|
+
if Path(path).is_absolute() and Path(path).exists():
|
|
188
|
+
return str(Path(path).resolve())
|
|
189
|
+
|
|
190
|
+
# Try relative to current working directory
|
|
191
|
+
cwd = Path.cwd()
|
|
192
|
+
cwd_path = cwd / path
|
|
193
|
+
if cwd_path.exists():
|
|
194
|
+
return str(cwd_path.resolve())
|
|
195
|
+
|
|
196
|
+
# Try .alita/inventory/ in current directory
|
|
197
|
+
alita_inventory_path = cwd / '.alita' / 'inventory' / path
|
|
198
|
+
if alita_inventory_path.exists():
|
|
199
|
+
return str(alita_inventory_path.resolve())
|
|
200
|
+
|
|
201
|
+
# If work_dir is different from cwd, try there too
|
|
202
|
+
if work_dir:
|
|
203
|
+
work_path = Path(work_dir)
|
|
204
|
+
if work_path != cwd:
|
|
205
|
+
# Try relative to work_dir
|
|
206
|
+
work_rel_path = work_path / path
|
|
207
|
+
if work_rel_path.exists():
|
|
208
|
+
return str(work_rel_path.resolve())
|
|
209
|
+
|
|
210
|
+
# Try .alita/inventory/ in work_dir
|
|
211
|
+
work_alita_path = work_path / '.alita' / 'inventory' / path
|
|
212
|
+
if work_alita_path.exists():
|
|
213
|
+
return str(work_alita_path.resolve())
|
|
214
|
+
|
|
215
|
+
return None
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def _build_inventory_config(path: str, work_dir: Optional[str] = None) -> Optional[Dict[str, Any]]:
|
|
219
|
+
"""
|
|
220
|
+
Build an inventory toolkit configuration from a file path.
|
|
221
|
+
|
|
222
|
+
The toolkit name is derived from the filename (stem).
|
|
223
|
+
All available tools are included.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
path: Path to the knowledge graph JSON file
|
|
227
|
+
work_dir: Optional workspace directory for path resolution
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
Toolkit configuration dict if file found, None otherwise
|
|
231
|
+
"""
|
|
232
|
+
# Resolve the path
|
|
233
|
+
resolved_path = _resolve_inventory_path(path, work_dir)
|
|
234
|
+
if not resolved_path:
|
|
235
|
+
return None
|
|
236
|
+
|
|
237
|
+
# Validate it's a JSON file
|
|
238
|
+
if not resolved_path.endswith('.json'):
|
|
239
|
+
return None
|
|
240
|
+
|
|
241
|
+
# Validate file exists and is readable
|
|
242
|
+
try:
|
|
243
|
+
with open(resolved_path, 'r') as f:
|
|
244
|
+
# Just check it's valid JSON
|
|
245
|
+
json.load(f)
|
|
246
|
+
except (IOError, json.JSONDecodeError):
|
|
247
|
+
return None
|
|
248
|
+
|
|
249
|
+
# Extract toolkit name from filename (e.g., 'alita' from 'alita.json')
|
|
250
|
+
toolkit_name = Path(resolved_path).stem
|
|
251
|
+
|
|
252
|
+
# Build configuration with all available tools
|
|
253
|
+
from .toolkit_loader import INVENTORY_TOOLS
|
|
254
|
+
|
|
255
|
+
return {
|
|
256
|
+
'type': 'inventory',
|
|
257
|
+
'toolkit_name': toolkit_name,
|
|
258
|
+
'graph_path': resolved_path,
|
|
259
|
+
'base_directory': work_dir,
|
|
260
|
+
'selected_tools': INVENTORY_TOOLS,
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def _get_inventory_json_files(work_dir: Optional[str] = None) -> List[str]:
|
|
265
|
+
"""
|
|
266
|
+
Get list of .json files for inventory path completion.
|
|
267
|
+
|
|
268
|
+
Searches:
|
|
269
|
+
1. Current working directory (*.json files)
|
|
270
|
+
2. .alita/inventory/ directory (*.json files)
|
|
271
|
+
3. work_dir and work_dir/.alita/inventory/ if different from cwd
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
work_dir: Optional workspace directory
|
|
275
|
+
|
|
276
|
+
Returns:
|
|
277
|
+
List of relative or display paths for completion
|
|
278
|
+
"""
|
|
279
|
+
suggestions = []
|
|
280
|
+
seen = set()
|
|
281
|
+
|
|
282
|
+
cwd = Path.cwd()
|
|
283
|
+
|
|
284
|
+
# Current directory .json files
|
|
285
|
+
for f in cwd.glob('*.json'):
|
|
286
|
+
if f.name not in seen:
|
|
287
|
+
suggestions.append(f.name)
|
|
288
|
+
seen.add(f.name)
|
|
289
|
+
|
|
290
|
+
# .alita/inventory/ directory
|
|
291
|
+
alita_inv = cwd / '.alita' / 'inventory'
|
|
292
|
+
if alita_inv.exists():
|
|
293
|
+
for f in alita_inv.glob('*.json'):
|
|
294
|
+
display = f'.alita/inventory/{f.name}'
|
|
295
|
+
if display not in seen:
|
|
296
|
+
suggestions.append(display)
|
|
297
|
+
seen.add(display)
|
|
298
|
+
|
|
299
|
+
# work_dir if different
|
|
300
|
+
if work_dir:
|
|
301
|
+
work_path = Path(work_dir)
|
|
302
|
+
if work_path != cwd:
|
|
303
|
+
for f in work_path.glob('*.json'):
|
|
304
|
+
if f.name not in seen:
|
|
305
|
+
suggestions.append(f.name)
|
|
306
|
+
seen.add(f.name)
|
|
307
|
+
|
|
308
|
+
work_alita_inv = work_path / '.alita' / 'inventory'
|
|
309
|
+
if work_alita_inv.exists():
|
|
310
|
+
for f in work_alita_inv.glob('*.json'):
|
|
311
|
+
display = f'.alita/inventory/{f.name}'
|
|
312
|
+
if display not in seen:
|
|
313
|
+
suggestions.append(display)
|
|
314
|
+
seen.add(display)
|
|
315
|
+
|
|
316
|
+
return sorted(suggestions)
|
|
317
|
+
|
|
40
318
|
|
|
41
319
|
def _load_mcp_tools(agent_def: Dict[str, Any], mcp_config_path: str) -> List[Dict[str, Any]]:
|
|
42
320
|
"""Load MCP tools from agent definition with tool-level filtering.
|
|
@@ -54,11 +332,16 @@ def _load_mcp_tools(agent_def: Dict[str, Any], mcp_config_path: str) -> List[Dic
|
|
|
54
332
|
|
|
55
333
|
def _setup_local_agent_executor(client, agent_def: Dict[str, Any], toolkit_config: tuple,
|
|
56
334
|
config, model: Optional[str], temperature: Optional[float],
|
|
57
|
-
max_tokens: Optional[int], memory,
|
|
335
|
+
max_tokens: Optional[int], memory, allowed_directories: Optional[List[str]],
|
|
336
|
+
plan_state: Optional[Dict] = None):
|
|
58
337
|
"""Setup local agent executor with all configurations.
|
|
59
338
|
|
|
339
|
+
Args:
|
|
340
|
+
allowed_directories: List of allowed directories for filesystem access.
|
|
341
|
+
First directory is the primary/base directory.
|
|
342
|
+
|
|
60
343
|
Returns:
|
|
61
|
-
Tuple of (agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools)
|
|
344
|
+
Tuple of (agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools)
|
|
62
345
|
"""
|
|
63
346
|
# Load toolkit configs
|
|
64
347
|
toolkit_configs = load_toolkit_configs(agent_def, toolkit_config)
|
|
@@ -72,17 +355,28 @@ def _setup_local_agent_executor(client, agent_def: Dict[str, Any], toolkit_confi
|
|
|
72
355
|
client, model, agent_def, temperature, max_tokens
|
|
73
356
|
)
|
|
74
357
|
|
|
75
|
-
# Add filesystem tools if
|
|
358
|
+
# Add filesystem tools if directories are provided
|
|
76
359
|
filesystem_tools = None
|
|
77
|
-
|
|
78
|
-
|
|
360
|
+
terminal_tools = None
|
|
361
|
+
if allowed_directories:
|
|
362
|
+
from .tools import get_filesystem_tools, get_terminal_tools
|
|
79
363
|
preset = agent_def.get('filesystem_tools_preset')
|
|
80
364
|
include_tools = agent_def.get('filesystem_tools_include')
|
|
81
365
|
exclude_tools = agent_def.get('filesystem_tools_exclude')
|
|
82
|
-
filesystem_tools = get_filesystem_tools(work_dir, include_tools, exclude_tools, preset)
|
|
83
366
|
|
|
84
|
-
|
|
85
|
-
|
|
367
|
+
# First directory is the primary base directory
|
|
368
|
+
base_dir = allowed_directories[0]
|
|
369
|
+
extra_dirs = allowed_directories[1:] if len(allowed_directories) > 1 else None
|
|
370
|
+
filesystem_tools = get_filesystem_tools(base_dir, include_tools, exclude_tools, preset, extra_dirs)
|
|
371
|
+
|
|
372
|
+
# Terminal tools use primary directory as cwd
|
|
373
|
+
terminal_tools = get_terminal_tools(base_dir)
|
|
374
|
+
|
|
375
|
+
tool_count = len(filesystem_tools) + len(terminal_tools)
|
|
376
|
+
if len(allowed_directories) == 1:
|
|
377
|
+
access_msg = f"✓ Granted filesystem & terminal access to: {base_dir} ({tool_count} tools)"
|
|
378
|
+
else:
|
|
379
|
+
access_msg = f"✓ Granted filesystem & terminal access to {len(allowed_directories)} directories ({tool_count} tools)"
|
|
86
380
|
if preset:
|
|
87
381
|
access_msg += f" [preset: {preset}]"
|
|
88
382
|
if include_tools:
|
|
@@ -91,12 +385,32 @@ def _setup_local_agent_executor(client, agent_def: Dict[str, Any], toolkit_confi
|
|
|
91
385
|
access_msg += f" [exclude: {', '.join(exclude_tools)}]"
|
|
92
386
|
console.print(f"[dim]{access_msg}[/dim]")
|
|
93
387
|
|
|
388
|
+
# Add planning tools (always available)
|
|
389
|
+
planning_tools = None
|
|
390
|
+
plan_state_obj = None
|
|
391
|
+
if plan_state is not None:
|
|
392
|
+
from .tools import get_planning_tools, PlanState
|
|
393
|
+
# Create a plan callback to update the dict when plan changes
|
|
394
|
+
def plan_callback(state: PlanState):
|
|
395
|
+
plan_state['title'] = state.title
|
|
396
|
+
plan_state['steps'] = state.to_dict()['steps']
|
|
397
|
+
plan_state['session_id'] = state.session_id
|
|
398
|
+
|
|
399
|
+
# Get session_id from plan_state dict if provided
|
|
400
|
+
session_id = plan_state.get('session_id')
|
|
401
|
+
planning_tools, plan_state_obj = get_planning_tools(
|
|
402
|
+
plan_state=None,
|
|
403
|
+
plan_callback=plan_callback,
|
|
404
|
+
session_id=session_id
|
|
405
|
+
)
|
|
406
|
+
console.print(f"[dim]✓ Planning tools enabled ({len(planning_tools)} tools) [session: {plan_state_obj.session_id}][/dim]")
|
|
407
|
+
|
|
94
408
|
# Check if we have tools
|
|
95
|
-
has_tools = bool(agent_def.get('tools') or toolkit_configs or filesystem_tools)
|
|
409
|
+
has_tools = bool(agent_def.get('tools') or toolkit_configs or filesystem_tools or terminal_tools or planning_tools)
|
|
96
410
|
has_mcp = any(tc.get('toolkit_type') == 'mcp' for tc in toolkit_configs)
|
|
97
411
|
|
|
98
412
|
if not has_tools:
|
|
99
|
-
return None, None, llm, llm_model, filesystem_tools
|
|
413
|
+
return None, None, llm, llm_model, filesystem_tools, terminal_tools, planning_tools
|
|
100
414
|
|
|
101
415
|
# Create agent executor with or without MCP
|
|
102
416
|
mcp_session_manager = None
|
|
@@ -116,17 +430,283 @@ def _setup_local_agent_executor(client, agent_def: Dict[str, Any], toolkit_confi
|
|
|
116
430
|
create_agent_executor_with_mcp(
|
|
117
431
|
client, agent_def, toolkit_configs,
|
|
118
432
|
llm, llm_model, llm_temperature, llm_max_tokens, memory,
|
|
119
|
-
filesystem_tools=filesystem_tools
|
|
433
|
+
filesystem_tools=filesystem_tools,
|
|
434
|
+
terminal_tools=terminal_tools,
|
|
435
|
+
planning_tools=planning_tools
|
|
120
436
|
)
|
|
121
437
|
)
|
|
122
438
|
else:
|
|
123
439
|
agent_executor = create_agent_executor(
|
|
124
440
|
client, agent_def, toolkit_configs,
|
|
125
441
|
llm, llm_model, llm_temperature, llm_max_tokens, memory,
|
|
126
|
-
filesystem_tools=filesystem_tools
|
|
442
|
+
filesystem_tools=filesystem_tools,
|
|
443
|
+
terminal_tools=terminal_tools,
|
|
444
|
+
planning_tools=planning_tools
|
|
127
445
|
)
|
|
128
446
|
|
|
129
|
-
return agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools
|
|
447
|
+
return agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
def _select_model_interactive(client) -> Optional[Dict[str, Any]]:
|
|
451
|
+
"""
|
|
452
|
+
Show interactive menu to select a model from available models.
|
|
453
|
+
|
|
454
|
+
Returns:
|
|
455
|
+
Selected model info dict or None if cancelled
|
|
456
|
+
"""
|
|
457
|
+
console.print("\n🔧 [bold cyan]Select a model:[/bold cyan]\n")
|
|
458
|
+
|
|
459
|
+
try:
|
|
460
|
+
# Use the new get_available_models API
|
|
461
|
+
models = client.get_available_models()
|
|
462
|
+
if not models:
|
|
463
|
+
console.print("[yellow]No models available from the platform.[/yellow]")
|
|
464
|
+
return None
|
|
465
|
+
|
|
466
|
+
# Build models list - API returns items[].name
|
|
467
|
+
models_list = []
|
|
468
|
+
for model in models:
|
|
469
|
+
model_name = model.get('name')
|
|
470
|
+
if model_name:
|
|
471
|
+
models_list.append({
|
|
472
|
+
'name': model_name,
|
|
473
|
+
'id': model.get('id'),
|
|
474
|
+
'model_data': model
|
|
475
|
+
})
|
|
476
|
+
|
|
477
|
+
if not models_list:
|
|
478
|
+
console.print("[yellow]No models found.[/yellow]")
|
|
479
|
+
return None
|
|
480
|
+
|
|
481
|
+
# Display models with numbers
|
|
482
|
+
table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE)
|
|
483
|
+
table.add_column("#", style="dim", width=4)
|
|
484
|
+
table.add_column("Model", style="cyan")
|
|
485
|
+
|
|
486
|
+
for i, model in enumerate(models_list, 1):
|
|
487
|
+
table.add_row(str(i), model['name'])
|
|
488
|
+
|
|
489
|
+
console.print(table)
|
|
490
|
+
console.print(f"\n[dim]0. Cancel[/dim]")
|
|
491
|
+
|
|
492
|
+
# Get user selection using styled input
|
|
493
|
+
while True:
|
|
494
|
+
try:
|
|
495
|
+
choice = styled_selection_input("Select model number")
|
|
496
|
+
|
|
497
|
+
if choice == '0':
|
|
498
|
+
return None
|
|
499
|
+
|
|
500
|
+
idx = int(choice) - 1
|
|
501
|
+
if 0 <= idx < len(models_list):
|
|
502
|
+
selected = models_list[idx]
|
|
503
|
+
console.print(f"✓ [green]Selected:[/green] [bold]{selected['name']}[/bold]")
|
|
504
|
+
return selected
|
|
505
|
+
else:
|
|
506
|
+
console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(models_list)}[/yellow]")
|
|
507
|
+
except ValueError:
|
|
508
|
+
console.print("[yellow]Please enter a valid number[/yellow]")
|
|
509
|
+
except (KeyboardInterrupt, EOFError):
|
|
510
|
+
return None
|
|
511
|
+
|
|
512
|
+
except Exception as e:
|
|
513
|
+
console.print(f"[red]Error fetching models: {e}[/red]")
|
|
514
|
+
return None
|
|
515
|
+
|
|
516
|
+
|
|
517
|
+
def _select_mcp_interactive(config) -> Optional[Dict[str, Any]]:
|
|
518
|
+
"""
|
|
519
|
+
Show interactive menu to select an MCP server from mcp.json.
|
|
520
|
+
|
|
521
|
+
Returns:
|
|
522
|
+
Selected MCP server config dict or None if cancelled
|
|
523
|
+
"""
|
|
524
|
+
from .mcp_loader import load_mcp_config
|
|
525
|
+
|
|
526
|
+
console.print("\n🔌 [bold cyan]Select an MCP server to add:[/bold cyan]\n")
|
|
527
|
+
|
|
528
|
+
mcp_config = load_mcp_config(config.mcp_config_path)
|
|
529
|
+
mcp_servers = mcp_config.get('mcpServers', {})
|
|
530
|
+
|
|
531
|
+
if not mcp_servers:
|
|
532
|
+
console.print(f"[yellow]No MCP servers found in {config.mcp_config_path}[/yellow]")
|
|
533
|
+
return None
|
|
534
|
+
|
|
535
|
+
servers_list = list(mcp_servers.items())
|
|
536
|
+
|
|
537
|
+
# Display servers with numbers
|
|
538
|
+
table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE)
|
|
539
|
+
table.add_column("#", style="dim", width=4)
|
|
540
|
+
table.add_column("Server", style="cyan")
|
|
541
|
+
table.add_column("Type", style="dim")
|
|
542
|
+
table.add_column("Command/URL", style="dim")
|
|
543
|
+
|
|
544
|
+
for i, (name, server_config) in enumerate(servers_list, 1):
|
|
545
|
+
server_type = server_config.get('type', 'stdio')
|
|
546
|
+
cmd_or_url = server_config.get('url') or server_config.get('command', '')
|
|
547
|
+
table.add_row(str(i), name, server_type, cmd_or_url[:40])
|
|
548
|
+
|
|
549
|
+
console.print(table)
|
|
550
|
+
console.print(f"\n[dim]0. Cancel[/dim]")
|
|
551
|
+
|
|
552
|
+
# Get user selection using styled input
|
|
553
|
+
while True:
|
|
554
|
+
try:
|
|
555
|
+
choice = styled_selection_input("Select MCP server number")
|
|
556
|
+
|
|
557
|
+
if choice == '0':
|
|
558
|
+
return None
|
|
559
|
+
|
|
560
|
+
idx = int(choice) - 1
|
|
561
|
+
if 0 <= idx < len(servers_list):
|
|
562
|
+
name, server_config = servers_list[idx]
|
|
563
|
+
console.print(f"✓ [green]Selected:[/green] [bold]{name}[/bold]")
|
|
564
|
+
return {'name': name, 'config': server_config}
|
|
565
|
+
else:
|
|
566
|
+
console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(servers_list)}[/yellow]")
|
|
567
|
+
except ValueError:
|
|
568
|
+
console.print("[yellow]Please enter a valid number[/yellow]")
|
|
569
|
+
except (KeyboardInterrupt, EOFError):
|
|
570
|
+
return None
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
def _select_toolkit_interactive(config) -> Optional[Dict[str, Any]]:
|
|
574
|
+
"""
|
|
575
|
+
Show interactive menu to select a toolkit from $ALITA_DIR/tools.
|
|
576
|
+
|
|
577
|
+
Returns:
|
|
578
|
+
Selected toolkit config dict or None if cancelled
|
|
579
|
+
"""
|
|
580
|
+
console.print("\n🧰 [bold cyan]Select a toolkit to add:[/bold cyan]\n")
|
|
581
|
+
|
|
582
|
+
tools_dir = Path(config.tools_dir)
|
|
583
|
+
|
|
584
|
+
if not tools_dir.exists():
|
|
585
|
+
console.print(f"[yellow]Tools directory not found: {tools_dir}[/yellow]")
|
|
586
|
+
return None
|
|
587
|
+
|
|
588
|
+
# Find all toolkit config files
|
|
589
|
+
toolkit_files = []
|
|
590
|
+
for pattern in ['*.json', '*.yaml', '*.yml']:
|
|
591
|
+
toolkit_files.extend(tools_dir.glob(pattern))
|
|
592
|
+
|
|
593
|
+
if not toolkit_files:
|
|
594
|
+
console.print(f"[yellow]No toolkit configurations found in {tools_dir}[/yellow]")
|
|
595
|
+
return None
|
|
596
|
+
|
|
597
|
+
# Load toolkit info
|
|
598
|
+
toolkits_list = []
|
|
599
|
+
for file_path in toolkit_files:
|
|
600
|
+
try:
|
|
601
|
+
config_data = load_toolkit_config(str(file_path))
|
|
602
|
+
toolkits_list.append({
|
|
603
|
+
'file': str(file_path),
|
|
604
|
+
'name': config_data.get('toolkit_name') or config_data.get('name') or file_path.stem,
|
|
605
|
+
'type': config_data.get('toolkit_type') or config_data.get('type', 'unknown'),
|
|
606
|
+
'config': config_data
|
|
607
|
+
})
|
|
608
|
+
except Exception as e:
|
|
609
|
+
logger.debug(f"Failed to load toolkit config {file_path}: {e}")
|
|
610
|
+
|
|
611
|
+
if not toolkits_list:
|
|
612
|
+
console.print(f"[yellow]No valid toolkit configurations found in {tools_dir}[/yellow]")
|
|
613
|
+
return None
|
|
614
|
+
|
|
615
|
+
# Display toolkits with numbers
|
|
616
|
+
table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE)
|
|
617
|
+
table.add_column("#", style="dim", width=4)
|
|
618
|
+
table.add_column("Toolkit", style="cyan")
|
|
619
|
+
table.add_column("Type", style="dim")
|
|
620
|
+
table.add_column("File", style="dim")
|
|
621
|
+
|
|
622
|
+
for i, toolkit in enumerate(toolkits_list, 1):
|
|
623
|
+
table.add_row(str(i), toolkit['name'], toolkit['type'], Path(toolkit['file']).name)
|
|
624
|
+
|
|
625
|
+
console.print(table)
|
|
626
|
+
console.print(f"\n[dim]0. Cancel[/dim]")
|
|
627
|
+
|
|
628
|
+
# Get user selection using styled input
|
|
629
|
+
while True:
|
|
630
|
+
try:
|
|
631
|
+
choice = styled_selection_input("Select toolkit number")
|
|
632
|
+
|
|
633
|
+
if choice == '0':
|
|
634
|
+
return None
|
|
635
|
+
|
|
636
|
+
idx = int(choice) - 1
|
|
637
|
+
if 0 <= idx < len(toolkits_list):
|
|
638
|
+
selected = toolkits_list[idx]
|
|
639
|
+
console.print(f"✓ [green]Selected:[/green] [bold]{selected['name']}[/bold]")
|
|
640
|
+
return selected
|
|
641
|
+
else:
|
|
642
|
+
console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(toolkits_list)}[/yellow]")
|
|
643
|
+
except ValueError:
|
|
644
|
+
console.print("[yellow]Please enter a valid number[/yellow]")
|
|
645
|
+
except (KeyboardInterrupt, EOFError):
|
|
646
|
+
return None
|
|
647
|
+
|
|
648
|
+
|
|
649
|
+
def _list_available_toolkits(config) -> List[str]:
|
|
650
|
+
"""
|
|
651
|
+
List names of all available toolkits in $ALITA_DIR/tools.
|
|
652
|
+
|
|
653
|
+
Returns:
|
|
654
|
+
List of toolkit names
|
|
655
|
+
"""
|
|
656
|
+
tools_dir = Path(config.tools_dir)
|
|
657
|
+
|
|
658
|
+
if not tools_dir.exists():
|
|
659
|
+
return []
|
|
660
|
+
|
|
661
|
+
toolkit_names = []
|
|
662
|
+
for pattern in ['*.json', '*.yaml', '*.yml']:
|
|
663
|
+
for file_path in tools_dir.glob(pattern):
|
|
664
|
+
try:
|
|
665
|
+
config_data = load_toolkit_config(str(file_path))
|
|
666
|
+
name = config_data.get('toolkit_name') or config_data.get('name') or file_path.stem
|
|
667
|
+
toolkit_names.append(name)
|
|
668
|
+
except Exception:
|
|
669
|
+
pass
|
|
670
|
+
|
|
671
|
+
return toolkit_names
|
|
672
|
+
|
|
673
|
+
|
|
674
|
+
def _find_toolkit_by_name(config, toolkit_name: str) -> Optional[Dict[str, Any]]:
|
|
675
|
+
"""
|
|
676
|
+
Find a toolkit by name in $ALITA_DIR/tools.
|
|
677
|
+
|
|
678
|
+
Args:
|
|
679
|
+
config: CLI configuration
|
|
680
|
+
toolkit_name: Name of the toolkit to find (case-insensitive)
|
|
681
|
+
|
|
682
|
+
Returns:
|
|
683
|
+
Toolkit config dict or None if not found
|
|
684
|
+
"""
|
|
685
|
+
tools_dir = Path(config.tools_dir)
|
|
686
|
+
|
|
687
|
+
if not tools_dir.exists():
|
|
688
|
+
return None
|
|
689
|
+
|
|
690
|
+
toolkit_name_lower = toolkit_name.lower()
|
|
691
|
+
|
|
692
|
+
for pattern in ['*.json', '*.yaml', '*.yml']:
|
|
693
|
+
for file_path in tools_dir.glob(pattern):
|
|
694
|
+
try:
|
|
695
|
+
config_data = load_toolkit_config(str(file_path))
|
|
696
|
+
name = config_data.get('toolkit_name') or config_data.get('name') or file_path.stem
|
|
697
|
+
|
|
698
|
+
# Match by name (case-insensitive) or file stem
|
|
699
|
+
if name.lower() == toolkit_name_lower or file_path.stem.lower() == toolkit_name_lower:
|
|
700
|
+
return {
|
|
701
|
+
'file': str(file_path),
|
|
702
|
+
'name': name,
|
|
703
|
+
'type': config_data.get('toolkit_type') or config_data.get('type', 'unknown'),
|
|
704
|
+
'config': config_data
|
|
705
|
+
}
|
|
706
|
+
except Exception:
|
|
707
|
+
pass
|
|
708
|
+
|
|
709
|
+
return None
|
|
130
710
|
|
|
131
711
|
|
|
132
712
|
def _select_agent_interactive(client, config) -> Optional[str]:
|
|
@@ -134,12 +714,19 @@ def _select_agent_interactive(client, config) -> Optional[str]:
|
|
|
134
714
|
Show interactive menu to select an agent from platform and local agents.
|
|
135
715
|
|
|
136
716
|
Returns:
|
|
137
|
-
Agent source (name/id for platform, file path for local
|
|
717
|
+
Agent source (name/id for platform, file path for local, '__direct__' for direct chat,
|
|
718
|
+
'__inventory__' for inventory agent) or None if cancelled
|
|
138
719
|
"""
|
|
139
720
|
from .config import CLIConfig
|
|
140
721
|
|
|
141
722
|
console.print("\n🤖 [bold cyan]Select an agent to chat with:[/bold cyan]\n")
|
|
142
723
|
|
|
724
|
+
# Built-in agents
|
|
725
|
+
console.print(f"1. [[bold]💬 Alita[/bold]] [cyan]Chat directly with LLM (no agent)[/cyan]")
|
|
726
|
+
console.print(f" [dim]Direct conversation with the model without agent configuration[/dim]")
|
|
727
|
+
console.print(f"2. [[bold]📊 Inventory[/bold]] [cyan]Knowledge graph builder agent[/cyan]")
|
|
728
|
+
console.print(f" [dim]Build inventories from connected toolkits (use --toolkit-config to add sources)[/dim]")
|
|
729
|
+
|
|
143
730
|
agents_list = []
|
|
144
731
|
|
|
145
732
|
# Load platform agents
|
|
@@ -173,12 +760,8 @@ def _select_agent_interactive(client, config) -> Optional[str]:
|
|
|
173
760
|
except Exception as e:
|
|
174
761
|
logger.debug(f"Failed to load {file_path}: {e}")
|
|
175
762
|
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
return None
|
|
179
|
-
|
|
180
|
-
# Display agents with numbers using rich
|
|
181
|
-
for i, agent in enumerate(agents_list, 1):
|
|
763
|
+
# Display agents with numbers using rich (starting from 3 since 1-2 are built-in)
|
|
764
|
+
for i, agent in enumerate(agents_list, 3):
|
|
182
765
|
agent_type = "📦 Platform" if agent['type'] == 'platform' else "📁 Local"
|
|
183
766
|
console.print(f"{i}. [[bold]{agent_type}[/bold]] [cyan]{agent['name']}[/cyan]")
|
|
184
767
|
if agent['description']:
|
|
@@ -186,25 +769,33 @@ def _select_agent_interactive(client, config) -> Optional[str]:
|
|
|
186
769
|
|
|
187
770
|
console.print(f"\n[dim]0. Cancel[/dim]")
|
|
188
771
|
|
|
189
|
-
# Get user selection
|
|
772
|
+
# Get user selection using styled input
|
|
190
773
|
while True:
|
|
191
774
|
try:
|
|
192
|
-
choice =
|
|
775
|
+
choice = styled_selection_input("Select agent number")
|
|
193
776
|
|
|
194
777
|
if choice == '0':
|
|
195
778
|
return None
|
|
196
779
|
|
|
197
|
-
|
|
780
|
+
if choice == '1':
|
|
781
|
+
console.print(f"✓ [green]Selected:[/green] [bold]Alita[/bold]")
|
|
782
|
+
return '__direct__'
|
|
783
|
+
|
|
784
|
+
if choice == '2':
|
|
785
|
+
console.print(f"✓ [green]Selected:[/green] [bold]Inventory[/bold]")
|
|
786
|
+
return '__inventory__'
|
|
787
|
+
|
|
788
|
+
idx = int(choice) - 3 # Offset by 3 since 1-2 are built-in agents
|
|
198
789
|
if 0 <= idx < len(agents_list):
|
|
199
790
|
selected = agents_list[idx]
|
|
200
|
-
console.print(f"
|
|
791
|
+
console.print(f"✓ [green]Selected:[/green] [bold]{selected['name']}[/bold]")
|
|
201
792
|
return selected['source']
|
|
202
793
|
else:
|
|
203
|
-
console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(agents_list)}[/yellow]")
|
|
794
|
+
console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(agents_list) + 2}[/yellow]")
|
|
204
795
|
except ValueError:
|
|
205
796
|
console.print("[yellow]Please enter a valid number[/yellow]")
|
|
206
797
|
except (KeyboardInterrupt, EOFError):
|
|
207
|
-
console.print("\n
|
|
798
|
+
console.print("\n[dim]Cancelled.[/dim]")
|
|
208
799
|
return None
|
|
209
800
|
|
|
210
801
|
|
|
@@ -374,6 +965,10 @@ def agent_show(ctx, agent_source: str, version: Optional[str]):
|
|
|
374
965
|
details.append("Temperature: ", style="bold")
|
|
375
966
|
details.append(f"{agent_def['temperature']}\n", style="cyan")
|
|
376
967
|
|
|
968
|
+
if agent_def.get('persona'):
|
|
969
|
+
details.append("Persona: ", style="bold")
|
|
970
|
+
details.append(f"{agent_def['persona']}\n", style="cyan")
|
|
971
|
+
|
|
377
972
|
panel = Panel(
|
|
378
973
|
details,
|
|
379
974
|
title=f"Local Agent: {agent_def.get('name', 'Unknown')}",
|
|
@@ -459,6 +1054,8 @@ def agent_show(ctx, agent_source: str, version: Optional[str]):
|
|
|
459
1054
|
@click.option('--version', help='Agent version (for platform agents)')
|
|
460
1055
|
@click.option('--toolkit-config', multiple=True, type=click.Path(exists=True),
|
|
461
1056
|
help='Toolkit configuration files (can specify multiple)')
|
|
1057
|
+
@click.option('--inventory', 'inventory_path', type=str,
|
|
1058
|
+
help='Load inventory/knowledge graph from JSON file (e.g., alita.json or .alita/inventory/alita.json)')
|
|
462
1059
|
@click.option('--thread-id', help='Continue existing conversation thread')
|
|
463
1060
|
@click.option('--model', help='Override LLM model')
|
|
464
1061
|
@click.option('--temperature', type=float, help='Override temperature')
|
|
@@ -467,49 +1064,27 @@ def agent_show(ctx, agent_source: str, version: Optional[str]):
|
|
|
467
1064
|
help='Grant agent filesystem access to this directory')
|
|
468
1065
|
@click.option('--verbose', '-v', type=click.Choice(['quiet', 'default', 'debug']), default='default',
|
|
469
1066
|
help='Output verbosity level: quiet (final output only), default (tool calls + outputs), debug (all including LLM calls)')
|
|
1067
|
+
@click.option('--recursion-limit', type=int, default=50,
|
|
1068
|
+
help='Maximum number of tool execution steps per turn')
|
|
470
1069
|
@click.pass_context
|
|
471
1070
|
def agent_chat(ctx, agent_source: Optional[str], version: Optional[str],
|
|
472
|
-
toolkit_config: tuple, thread_id: Optional[str],
|
|
1071
|
+
toolkit_config: tuple, inventory_path: Optional[str], thread_id: Optional[str],
|
|
473
1072
|
model: Optional[str], temperature: Optional[float],
|
|
474
1073
|
max_tokens: Optional[int], work_dir: Optional[str],
|
|
475
|
-
verbose: str):
|
|
476
|
-
"""
|
|
477
|
-
Start interactive chat with an agent.
|
|
478
|
-
|
|
479
|
-
If AGENT_SOURCE is not provided, shows an interactive menu to select from
|
|
480
|
-
available agents (both platform and local).
|
|
481
|
-
|
|
482
|
-
AGENT_SOURCE can be:
|
|
483
|
-
- Platform agent ID or name
|
|
484
|
-
- Path to local agent file
|
|
1074
|
+
verbose: str, recursion_limit: Optional[int]):
|
|
1075
|
+
"""Start interactive chat with an agent.
|
|
485
1076
|
|
|
1077
|
+
\b
|
|
486
1078
|
Examples:
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
# With toolkit configurations
|
|
498
|
-
alita-cli agent chat my-agent \\
|
|
499
|
-
--toolkit-config jira-config.json \\
|
|
500
|
-
--toolkit-config github-config.json
|
|
501
|
-
|
|
502
|
-
# With filesystem access
|
|
503
|
-
alita-cli agent chat my-agent --dir ./workspace
|
|
504
|
-
|
|
505
|
-
# Continue previous conversation
|
|
506
|
-
alita-cli agent chat my-agent --thread-id abc123
|
|
507
|
-
|
|
508
|
-
# Quiet mode (hide tool calls and thinking)
|
|
509
|
-
alita-cli agent chat my-agent --verbose quiet
|
|
510
|
-
|
|
511
|
-
# Debug mode (show all including LLM calls)
|
|
512
|
-
alita-cli agent chat my-agent --verbose debug
|
|
1079
|
+
alita chat # Interactive agent selection
|
|
1080
|
+
alita chat my-agent # Chat with platform agent
|
|
1081
|
+
alita chat ./agent.md # Chat with local agent file
|
|
1082
|
+
alita chat --inventory alita.json
|
|
1083
|
+
alita chat my-agent --dir ./src
|
|
1084
|
+
alita chat my-agent --thread-id abc123
|
|
1085
|
+
alita chat my-agent -v quiet # Hide tool calls
|
|
1086
|
+
alita chat my-agent -v debug # Show all LLM calls
|
|
1087
|
+
alita chat __inventory__ --toolkit-config jira.json
|
|
513
1088
|
"""
|
|
514
1089
|
formatter = ctx.obj['formatter']
|
|
515
1090
|
config = ctx.obj['config']
|
|
@@ -520,17 +1095,79 @@ def agent_chat(ctx, agent_source: Optional[str], version: Optional[str],
|
|
|
520
1095
|
debug_mode = verbose == 'debug'
|
|
521
1096
|
|
|
522
1097
|
try:
|
|
523
|
-
# If no agent specified,
|
|
1098
|
+
# If no agent specified, start with direct chat by default
|
|
524
1099
|
if not agent_source:
|
|
525
|
-
agent_source =
|
|
526
|
-
if not agent_source:
|
|
527
|
-
console.print("[yellow]No agent selected. Exiting.[/yellow]")
|
|
528
|
-
return
|
|
1100
|
+
agent_source = '__direct__'
|
|
529
1101
|
|
|
530
|
-
#
|
|
531
|
-
|
|
1102
|
+
# Check for built-in agent modes
|
|
1103
|
+
is_direct = agent_source == '__direct__'
|
|
1104
|
+
is_inventory = agent_source == '__inventory__'
|
|
1105
|
+
is_builtin = is_direct or is_inventory
|
|
1106
|
+
is_local = not is_builtin and Path(agent_source).exists()
|
|
532
1107
|
|
|
533
|
-
|
|
1108
|
+
# Get defaults from config
|
|
1109
|
+
default_model = config.default_model or 'gpt-4o'
|
|
1110
|
+
default_temperature = config.default_temperature if config.default_temperature is not None else 0.1
|
|
1111
|
+
default_max_tokens = config.default_max_tokens or 4096
|
|
1112
|
+
|
|
1113
|
+
# Initialize variables for dynamic updates
|
|
1114
|
+
current_model = model
|
|
1115
|
+
current_temperature = temperature
|
|
1116
|
+
current_max_tokens = max_tokens
|
|
1117
|
+
added_mcp_configs = []
|
|
1118
|
+
added_toolkit_configs = list(toolkit_config) if toolkit_config else []
|
|
1119
|
+
mcp_session_manager = None
|
|
1120
|
+
llm = None
|
|
1121
|
+
agent_executor = None
|
|
1122
|
+
agent_def = {}
|
|
1123
|
+
filesystem_tools = None
|
|
1124
|
+
terminal_tools = None
|
|
1125
|
+
planning_tools = None
|
|
1126
|
+
plan_state = None
|
|
1127
|
+
|
|
1128
|
+
# Handle --inventory option: add inventory toolkit config at startup
|
|
1129
|
+
if inventory_path:
|
|
1130
|
+
inventory_config = _build_inventory_config(inventory_path, work_dir)
|
|
1131
|
+
if inventory_config:
|
|
1132
|
+
added_toolkit_configs.append(inventory_config)
|
|
1133
|
+
console.print(f"[dim]✓ Loading inventory: {inventory_config['toolkit_name']} ({inventory_config['graph_path']})[/dim]")
|
|
1134
|
+
else:
|
|
1135
|
+
console.print(f"[yellow]Warning: Inventory file not found: {inventory_path}[/yellow]")
|
|
1136
|
+
console.print("[dim]Searched in current directory and .alita/inventory/[/dim]")
|
|
1137
|
+
|
|
1138
|
+
# Approval mode: 'always' (confirm each tool), 'auto' (no confirmation), 'yolo' (no safety checks)
|
|
1139
|
+
approval_mode = 'always'
|
|
1140
|
+
allowed_directories = [work_dir] if work_dir else [] # Track allowed directories for /dir command
|
|
1141
|
+
current_agent_file = agent_source if is_local else None # Track agent file for /reload command
|
|
1142
|
+
|
|
1143
|
+
if is_direct:
|
|
1144
|
+
# Direct chat mode - no agent, just LLM with Alita instructions
|
|
1145
|
+
agent_name = "Alita"
|
|
1146
|
+
agent_type = "Direct LLM"
|
|
1147
|
+
alita_prompt = _get_alita_system_prompt(config)
|
|
1148
|
+
agent_def = {
|
|
1149
|
+
'model': model or default_model,
|
|
1150
|
+
'temperature': temperature if temperature is not None else default_temperature,
|
|
1151
|
+
'max_tokens': max_tokens or default_max_tokens,
|
|
1152
|
+
'system_prompt': alita_prompt
|
|
1153
|
+
}
|
|
1154
|
+
elif is_inventory:
|
|
1155
|
+
# Inventory agent mode - knowledge graph builder with inventory toolkit
|
|
1156
|
+
agent_name = "Inventory"
|
|
1157
|
+
agent_type = "Built-in Agent"
|
|
1158
|
+
inventory_prompt = _get_inventory_system_prompt(config)
|
|
1159
|
+
agent_def = {
|
|
1160
|
+
'name': 'inventory-agent',
|
|
1161
|
+
'model': model or default_model,
|
|
1162
|
+
'temperature': temperature if temperature is not None else 0.3,
|
|
1163
|
+
'max_tokens': max_tokens or default_max_tokens,
|
|
1164
|
+
'system_prompt': inventory_prompt,
|
|
1165
|
+
# Include inventory toolkit by default
|
|
1166
|
+
'toolkit_configs': [
|
|
1167
|
+
{'type': 'inventory', 'graph_path': './knowledge_graph.json'}
|
|
1168
|
+
]
|
|
1169
|
+
}
|
|
1170
|
+
elif is_local:
|
|
534
1171
|
agent_def = load_agent_definition(agent_source)
|
|
535
1172
|
agent_name = agent_def.get('name', Path(agent_source).stem)
|
|
536
1173
|
agent_type = "Local Agent"
|
|
@@ -551,32 +1188,79 @@ def agent_chat(ctx, agent_source: Optional[str], version: Optional[str],
|
|
|
551
1188
|
agent_name = agent['name']
|
|
552
1189
|
agent_type = "Platform Agent"
|
|
553
1190
|
|
|
1191
|
+
# Get model and temperature for welcome banner
|
|
1192
|
+
llm_model_display = current_model or agent_def.get('model', default_model)
|
|
1193
|
+
llm_temperature_display = current_temperature if current_temperature is not None else agent_def.get('temperature', default_temperature)
|
|
1194
|
+
|
|
554
1195
|
# Print nice welcome banner
|
|
555
|
-
print_welcome(agent_name,
|
|
1196
|
+
print_welcome(agent_name, llm_model_display, llm_temperature_display, approval_mode)
|
|
556
1197
|
|
|
557
1198
|
# Initialize conversation
|
|
558
1199
|
chat_history = []
|
|
559
1200
|
|
|
560
|
-
#
|
|
561
|
-
from
|
|
562
|
-
|
|
1201
|
+
# Initialize session for persistence (memory + plan)
|
|
1202
|
+
from .tools import generate_session_id, create_session_memory, save_session_metadata, to_portable_path
|
|
1203
|
+
current_session_id = generate_session_id()
|
|
1204
|
+
plan_state = {'session_id': current_session_id}
|
|
1205
|
+
|
|
1206
|
+
# Create persistent memory for agent (stored in session directory)
|
|
1207
|
+
memory = create_session_memory(current_session_id)
|
|
1208
|
+
|
|
1209
|
+
# Save session metadata with agent source for session resume
|
|
1210
|
+
agent_source_portable = to_portable_path(current_agent_file) if current_agent_file else None
|
|
1211
|
+
# Filter out transient inventory configs (dicts) - only save file paths
|
|
1212
|
+
serializable_toolkit_configs = [tc for tc in added_toolkit_configs if isinstance(tc, str)]
|
|
1213
|
+
# Extract inventory graph path if present
|
|
1214
|
+
inventory_graph = None
|
|
1215
|
+
for tc in added_toolkit_configs:
|
|
1216
|
+
if isinstance(tc, dict) and tc.get('type') == 'inventory':
|
|
1217
|
+
inventory_graph = tc.get('graph_path')
|
|
1218
|
+
break
|
|
1219
|
+
save_session_metadata(current_session_id, {
|
|
1220
|
+
'agent_name': agent_name,
|
|
1221
|
+
'agent_type': agent_type if 'agent_type' in dir() else 'Direct LLM',
|
|
1222
|
+
'agent_source': agent_source_portable,
|
|
1223
|
+
'model': llm_model_display,
|
|
1224
|
+
'temperature': llm_temperature_display,
|
|
1225
|
+
'work_dir': work_dir,
|
|
1226
|
+
'is_direct': is_direct,
|
|
1227
|
+
'is_local': is_local,
|
|
1228
|
+
'is_inventory': is_inventory,
|
|
1229
|
+
'added_toolkit_configs': serializable_toolkit_configs,
|
|
1230
|
+
'inventory_graph': inventory_graph,
|
|
1231
|
+
'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])],
|
|
1232
|
+
})
|
|
1233
|
+
console.print(f"[dim]Session: {current_session_id}[/dim]")
|
|
1234
|
+
|
|
1235
|
+
# Initialize context manager for chat history management
|
|
1236
|
+
context_config = config.context_management
|
|
1237
|
+
ctx_manager = CLIContextManager(
|
|
1238
|
+
session_id=current_session_id,
|
|
1239
|
+
max_context_tokens=context_config.get('max_context_tokens', 8000),
|
|
1240
|
+
preserve_recent=context_config.get('preserve_recent_messages', 5),
|
|
1241
|
+
pruning_method=context_config.get('pruning_method', 'oldest_first'),
|
|
1242
|
+
enable_summarization=context_config.get('enable_summarization', True),
|
|
1243
|
+
summary_trigger_ratio=context_config.get('summary_trigger_ratio', 0.8),
|
|
1244
|
+
summaries_limit=context_config.get('summaries_limit_count', 5),
|
|
1245
|
+
llm=None # Will be set after LLM creation
|
|
1246
|
+
)
|
|
1247
|
+
|
|
1248
|
+
# Purge old sessions on startup (cleanup task)
|
|
1249
|
+
try:
|
|
1250
|
+
purge_context_sessions(
|
|
1251
|
+
sessions_dir=config.sessions_dir,
|
|
1252
|
+
max_age_days=context_config.get('session_max_age_days', 30),
|
|
1253
|
+
max_sessions=context_config.get('max_sessions', 100)
|
|
1254
|
+
)
|
|
1255
|
+
except Exception as e:
|
|
1256
|
+
logger.debug(f"Session cleanup failed: {e}")
|
|
563
1257
|
|
|
564
1258
|
# Create agent executor
|
|
565
|
-
if is_local:
|
|
566
|
-
# Display configuration
|
|
567
|
-
llm_model_display = model or agent_def.get('model', 'gpt-4o')
|
|
568
|
-
llm_temperature_display = temperature if temperature is not None else agent_def.get('temperature', 0.7)
|
|
569
|
-
console.print()
|
|
570
|
-
console.print(f"✓ [green]Using model:[/green] [bold]{llm_model_display}[/bold]")
|
|
571
|
-
console.print(f"✓ [green]Temperature:[/green] [bold]{llm_temperature_display}[/bold]")
|
|
572
|
-
if agent_def.get('tools'):
|
|
573
|
-
console.print(f"✓ [green]Tools:[/green] [bold]{', '.join(agent_def['tools'])}[/bold]")
|
|
574
|
-
console.print()
|
|
575
|
-
|
|
1259
|
+
if is_direct or is_local or is_inventory:
|
|
576
1260
|
# Setup local agent executor (handles all config, tools, MCP, etc.)
|
|
577
1261
|
try:
|
|
578
|
-
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools = _setup_local_agent_executor(
|
|
579
|
-
client, agent_def,
|
|
1262
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1263
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, work_dir, plan_state
|
|
580
1264
|
)
|
|
581
1265
|
except Exception:
|
|
582
1266
|
return
|
|
@@ -606,23 +1290,53 @@ def agent_chat(ctx, agent_source: Optional[str], version: Optional[str],
|
|
|
606
1290
|
)
|
|
607
1291
|
llm = None # Platform agents don't use direct LLM
|
|
608
1292
|
|
|
1293
|
+
# Set LLM on context manager for summarization
|
|
1294
|
+
if llm is not None:
|
|
1295
|
+
ctx_manager.llm = llm
|
|
1296
|
+
|
|
1297
|
+
# Initialize input handler for readline support
|
|
1298
|
+
input_handler = get_input_handler()
|
|
1299
|
+
|
|
1300
|
+
# Set up toolkit names callback for tab completion
|
|
1301
|
+
from .input_handler import set_toolkit_names_callback, set_inventory_files_callback
|
|
1302
|
+
set_toolkit_names_callback(lambda: _list_available_toolkits(config))
|
|
1303
|
+
|
|
1304
|
+
# Set up inventory files callback for /inventory tab completion
|
|
1305
|
+
set_inventory_files_callback(lambda: _get_inventory_json_files(allowed_directories[0] if allowed_directories else None))
|
|
1306
|
+
|
|
609
1307
|
# Interactive chat loop
|
|
610
1308
|
while True:
|
|
611
1309
|
try:
|
|
612
|
-
#
|
|
613
|
-
|
|
614
|
-
|
|
1310
|
+
# Get context info for the UI indicator
|
|
1311
|
+
context_info = ctx_manager.get_context_info()
|
|
1312
|
+
|
|
1313
|
+
# Get input with styled prompt (prompt is part of input() for proper readline handling)
|
|
1314
|
+
user_input = styled_input(context_info=context_info).strip()
|
|
615
1315
|
|
|
616
1316
|
if not user_input:
|
|
617
1317
|
continue
|
|
618
1318
|
|
|
619
1319
|
# Handle commands
|
|
620
1320
|
if user_input.lower() in ['exit', 'quit']:
|
|
1321
|
+
# Save final session state before exiting
|
|
1322
|
+
try:
|
|
1323
|
+
from .tools import update_session_metadata, to_portable_path
|
|
1324
|
+
update_session_metadata(current_session_id, {
|
|
1325
|
+
'agent_source': to_portable_path(current_agent_file) if current_agent_file else None,
|
|
1326
|
+
'model': current_model or llm_model_display,
|
|
1327
|
+
'temperature': current_temperature if current_temperature is not None else llm_temperature_display,
|
|
1328
|
+
'allowed_directories': allowed_directories,
|
|
1329
|
+
'added_toolkit_configs': list(added_toolkit_configs),
|
|
1330
|
+
'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])],
|
|
1331
|
+
})
|
|
1332
|
+
except Exception as e:
|
|
1333
|
+
logger.debug(f"Failed to save session state on exit: {e}")
|
|
621
1334
|
console.print("\n[bold cyan]👋 Goodbye![/bold cyan]\n")
|
|
622
1335
|
break
|
|
623
1336
|
|
|
624
1337
|
if user_input == '/clear':
|
|
625
1338
|
chat_history = []
|
|
1339
|
+
ctx_manager.clear()
|
|
626
1340
|
console.print("[green]✓ Conversation history cleared.[/green]")
|
|
627
1341
|
continue
|
|
628
1342
|
|
|
@@ -635,7 +1349,8 @@ def agent_chat(ctx, agent_source: Optional[str], version: Optional[str],
|
|
|
635
1349
|
role = msg.get('role', 'unknown')
|
|
636
1350
|
content = msg.get('content', '')
|
|
637
1351
|
role_color = 'blue' if role == 'user' else 'green'
|
|
638
|
-
|
|
1352
|
+
included_marker = "" if ctx_manager.is_message_included(i - 1) else " [dim](pruned)[/dim]"
|
|
1353
|
+
console.print(f"\n[bold {role_color}]{i}. {role.upper()}:[/bold {role_color}] {content[:100]}...{included_marker}")
|
|
639
1354
|
continue
|
|
640
1355
|
|
|
641
1356
|
if user_input == '/save':
|
|
@@ -651,16 +1366,895 @@ def agent_chat(ctx, agent_source: Optional[str], version: Optional[str],
|
|
|
651
1366
|
print_help()
|
|
652
1367
|
continue
|
|
653
1368
|
|
|
654
|
-
#
|
|
655
|
-
if
|
|
656
|
-
|
|
657
|
-
|
|
1369
|
+
# /model command - switch model
|
|
1370
|
+
if user_input == '/model':
|
|
1371
|
+
if not (is_direct or is_local):
|
|
1372
|
+
console.print("[yellow]Model switching is only available for local agents and direct chat.[/yellow]")
|
|
1373
|
+
continue
|
|
1374
|
+
|
|
1375
|
+
selected_model = _select_model_interactive(client)
|
|
1376
|
+
if selected_model:
|
|
1377
|
+
current_model = selected_model['name']
|
|
1378
|
+
agent_def['model'] = current_model
|
|
1379
|
+
|
|
1380
|
+
# Recreate LLM and agent executor - use session memory to preserve history
|
|
1381
|
+
from .tools import create_session_memory, update_session_metadata
|
|
1382
|
+
memory = create_session_memory(current_session_id)
|
|
1383
|
+
try:
|
|
1384
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1385
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
1386
|
+
)
|
|
1387
|
+
# Persist model change to session
|
|
1388
|
+
update_session_metadata(current_session_id, {
|
|
1389
|
+
'model': current_model,
|
|
1390
|
+
'temperature': current_temperature if current_temperature is not None else agent_def.get('temperature', 0.7)
|
|
1391
|
+
})
|
|
1392
|
+
console.print(Panel(
|
|
1393
|
+
f"[cyan]ℹ Model switched to [bold]{current_model}[/bold]. Agent state reset, chat history preserved.[/cyan]",
|
|
1394
|
+
border_style="cyan",
|
|
1395
|
+
box=box.ROUNDED
|
|
1396
|
+
))
|
|
1397
|
+
except Exception as e:
|
|
1398
|
+
console.print(f"[red]Error switching model: {e}[/red]")
|
|
1399
|
+
continue
|
|
1400
|
+
|
|
1401
|
+
# /reload command - reload agent definition from file
|
|
1402
|
+
if user_input == '/reload':
|
|
1403
|
+
if not is_local:
|
|
1404
|
+
if is_direct or is_inventory:
|
|
1405
|
+
console.print("[yellow]Cannot reload built-in agent mode - no agent file to reload.[/yellow]")
|
|
1406
|
+
else:
|
|
1407
|
+
console.print("[yellow]Reload is only available for local agents (file-based).[/yellow]")
|
|
1408
|
+
continue
|
|
1409
|
+
|
|
1410
|
+
if not current_agent_file or not Path(current_agent_file).exists():
|
|
1411
|
+
console.print("[red]Agent file not found. Cannot reload.[/red]")
|
|
1412
|
+
continue
|
|
1413
|
+
|
|
1414
|
+
try:
|
|
1415
|
+
# Reload agent definition from file
|
|
1416
|
+
new_agent_def = load_agent_definition(current_agent_file)
|
|
1417
|
+
|
|
1418
|
+
# Preserve runtime additions (MCPs, tools added via commands)
|
|
1419
|
+
if 'mcps' in agent_def and agent_def['mcps']:
|
|
1420
|
+
# Merge MCPs: file MCPs + runtime added MCPs
|
|
1421
|
+
file_mcps = new_agent_def.get('mcps', [])
|
|
1422
|
+
for mcp in agent_def['mcps']:
|
|
1423
|
+
mcp_name = mcp if isinstance(mcp, str) else mcp.get('name')
|
|
1424
|
+
file_mcp_names = [m if isinstance(m, str) else m.get('name') for m in file_mcps]
|
|
1425
|
+
if mcp_name not in file_mcp_names:
|
|
1426
|
+
file_mcps.append(mcp)
|
|
1427
|
+
new_agent_def['mcps'] = file_mcps
|
|
1428
|
+
|
|
1429
|
+
# Update agent_def with new values (preserving model/temp overrides)
|
|
1430
|
+
old_system_prompt = agent_def.get('system_prompt', '')
|
|
1431
|
+
new_system_prompt = new_agent_def.get('system_prompt', '')
|
|
1432
|
+
|
|
1433
|
+
agent_def.update(new_agent_def)
|
|
1434
|
+
|
|
1435
|
+
# Restore runtime overrides
|
|
1436
|
+
if current_model:
|
|
1437
|
+
agent_def['model'] = current_model
|
|
1438
|
+
if current_temperature is not None:
|
|
1439
|
+
agent_def['temperature'] = current_temperature
|
|
1440
|
+
if current_max_tokens:
|
|
1441
|
+
agent_def['max_tokens'] = current_max_tokens
|
|
1442
|
+
|
|
1443
|
+
# Recreate agent executor with reloaded definition
|
|
1444
|
+
from .tools import create_session_memory
|
|
1445
|
+
memory = create_session_memory(current_session_id)
|
|
1446
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1447
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
1448
|
+
)
|
|
1449
|
+
|
|
1450
|
+
# Show what changed
|
|
1451
|
+
prompt_changed = old_system_prompt != new_system_prompt
|
|
1452
|
+
agent_name = agent_def.get('name', Path(current_agent_file).stem)
|
|
1453
|
+
|
|
1454
|
+
if prompt_changed:
|
|
1455
|
+
console.print(Panel(
|
|
1456
|
+
f"[green]✓ Reloaded agent: [bold]{agent_name}[/bold][/green]\n"
|
|
1457
|
+
f"[dim]System prompt updated ({len(new_system_prompt)} chars)[/dim]",
|
|
1458
|
+
border_style="green",
|
|
1459
|
+
box=box.ROUNDED
|
|
1460
|
+
))
|
|
1461
|
+
else:
|
|
1462
|
+
console.print(Panel(
|
|
1463
|
+
f"[cyan]ℹ Reloaded agent: [bold]{agent_name}[/bold][/cyan]\n"
|
|
1464
|
+
f"[dim]No changes detected in system prompt[/dim]",
|
|
1465
|
+
border_style="cyan",
|
|
1466
|
+
box=box.ROUNDED
|
|
1467
|
+
))
|
|
1468
|
+
except Exception as e:
|
|
1469
|
+
console.print(f"[red]Error reloading agent: {e}[/red]")
|
|
1470
|
+
continue
|
|
1471
|
+
|
|
1472
|
+
# /add_mcp command - add MCP server
|
|
1473
|
+
if user_input == '/add_mcp':
|
|
1474
|
+
if not (is_direct or is_local or is_inventory):
|
|
1475
|
+
console.print("[yellow]Adding MCP is only available for local agents and built-in agents.[/yellow]")
|
|
1476
|
+
continue
|
|
1477
|
+
|
|
1478
|
+
selected_mcp = _select_mcp_interactive(config)
|
|
1479
|
+
if selected_mcp:
|
|
1480
|
+
mcp_name = selected_mcp['name']
|
|
1481
|
+
# Add MCP to agent definition
|
|
1482
|
+
if 'mcps' not in agent_def:
|
|
1483
|
+
agent_def['mcps'] = []
|
|
1484
|
+
if mcp_name not in [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])]:
|
|
1485
|
+
agent_def['mcps'].append(mcp_name)
|
|
1486
|
+
|
|
1487
|
+
# Recreate agent executor with new MCP - use session memory to preserve history
|
|
1488
|
+
from .tools import create_session_memory, update_session_metadata
|
|
1489
|
+
memory = create_session_memory(current_session_id)
|
|
1490
|
+
try:
|
|
1491
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1492
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
1493
|
+
)
|
|
1494
|
+
# Persist added MCPs to session
|
|
1495
|
+
update_session_metadata(current_session_id, {
|
|
1496
|
+
'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])]
|
|
1497
|
+
})
|
|
1498
|
+
console.print(Panel(
|
|
1499
|
+
f"[cyan]ℹ Added MCP: [bold]{mcp_name}[/bold]. Agent state reset, chat history preserved.[/cyan]",
|
|
1500
|
+
border_style="cyan",
|
|
1501
|
+
box=box.ROUNDED
|
|
1502
|
+
))
|
|
1503
|
+
except Exception as e:
|
|
1504
|
+
console.print(f"[red]Error adding MCP: {e}[/red]")
|
|
1505
|
+
continue
|
|
1506
|
+
|
|
1507
|
+
# /add_toolkit command - add toolkit
|
|
1508
|
+
if user_input == '/add_toolkit' or user_input.startswith('/add_toolkit '):
|
|
1509
|
+
if not (is_direct or is_local or is_inventory):
|
|
1510
|
+
console.print("[yellow]Adding toolkit is only available for local agents and built-in agents.[/yellow]")
|
|
1511
|
+
continue
|
|
1512
|
+
|
|
1513
|
+
parts = user_input.split(maxsplit=1)
|
|
1514
|
+
if len(parts) == 2:
|
|
1515
|
+
# Direct toolkit selection by name
|
|
1516
|
+
toolkit_name_arg = parts[1].strip()
|
|
1517
|
+
selected_toolkit = _find_toolkit_by_name(config, toolkit_name_arg)
|
|
1518
|
+
if not selected_toolkit:
|
|
1519
|
+
console.print(f"[yellow]Toolkit '{toolkit_name_arg}' not found.[/yellow]")
|
|
1520
|
+
# Show available toolkits
|
|
1521
|
+
available = _list_available_toolkits(config)
|
|
1522
|
+
if available:
|
|
1523
|
+
console.print(f"[dim]Available toolkits: {', '.join(available)}[/dim]")
|
|
1524
|
+
continue
|
|
1525
|
+
else:
|
|
1526
|
+
# Interactive selection
|
|
1527
|
+
selected_toolkit = _select_toolkit_interactive(config)
|
|
1528
|
+
|
|
1529
|
+
if selected_toolkit:
|
|
1530
|
+
toolkit_name = selected_toolkit['name']
|
|
1531
|
+
toolkit_file = selected_toolkit['file']
|
|
1532
|
+
|
|
1533
|
+
# Add toolkit config path
|
|
1534
|
+
if toolkit_file not in added_toolkit_configs:
|
|
1535
|
+
added_toolkit_configs.append(toolkit_file)
|
|
1536
|
+
|
|
1537
|
+
# Recreate agent executor with new toolkit - use session memory to preserve history
|
|
1538
|
+
from .tools import create_session_memory, update_session_metadata
|
|
1539
|
+
memory = create_session_memory(current_session_id)
|
|
1540
|
+
try:
|
|
1541
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1542
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
1543
|
+
)
|
|
1544
|
+
# Persist added toolkits to session
|
|
1545
|
+
update_session_metadata(current_session_id, {
|
|
1546
|
+
'added_toolkit_configs': list(added_toolkit_configs)
|
|
1547
|
+
})
|
|
1548
|
+
console.print(Panel(
|
|
1549
|
+
f"[cyan]ℹ Added toolkit: [bold]{toolkit_name}[/bold]. Agent state reset, chat history preserved.[/cyan]",
|
|
1550
|
+
border_style="cyan",
|
|
1551
|
+
box=box.ROUNDED
|
|
1552
|
+
))
|
|
1553
|
+
except Exception as e:
|
|
1554
|
+
console.print(f"[red]Error adding toolkit: {e}[/red]")
|
|
1555
|
+
continue
|
|
1556
|
+
|
|
1557
|
+
# /rm_mcp command - remove MCP server
|
|
1558
|
+
if user_input == '/rm_mcp' or user_input.startswith('/rm_mcp '):
|
|
1559
|
+
if not (is_direct or is_local or is_inventory):
|
|
1560
|
+
console.print("[yellow]Removing MCP is only available for local agents and built-in agents.[/yellow]")
|
|
1561
|
+
continue
|
|
1562
|
+
|
|
1563
|
+
current_mcps = agent_def.get('mcps', [])
|
|
1564
|
+
if not current_mcps:
|
|
1565
|
+
console.print("[yellow]No MCP servers are currently loaded.[/yellow]")
|
|
1566
|
+
continue
|
|
1567
|
+
|
|
1568
|
+
# Get list of MCP names
|
|
1569
|
+
mcp_names = [m if isinstance(m, str) else m.get('name') for m in current_mcps]
|
|
1570
|
+
|
|
1571
|
+
parts = user_input.split(maxsplit=1)
|
|
1572
|
+
if len(parts) == 2:
|
|
1573
|
+
# Direct removal by name
|
|
1574
|
+
mcp_name_to_remove = parts[1].strip()
|
|
1575
|
+
if mcp_name_to_remove not in mcp_names:
|
|
1576
|
+
console.print(f"[yellow]MCP '{mcp_name_to_remove}' not found.[/yellow]")
|
|
1577
|
+
console.print(f"[dim]Loaded MCPs: {', '.join(mcp_names)}[/dim]")
|
|
1578
|
+
continue
|
|
1579
|
+
else:
|
|
1580
|
+
# Interactive selection
|
|
1581
|
+
console.print("\n🔌 [bold cyan]Remove MCP Server[/bold cyan]\n")
|
|
1582
|
+
for i, name in enumerate(mcp_names, 1):
|
|
1583
|
+
console.print(f" [bold]{i}[/bold]. {name}")
|
|
1584
|
+
console.print(f" [bold]0[/bold]. [dim]Cancel[/dim]")
|
|
1585
|
+
console.print()
|
|
1586
|
+
|
|
1587
|
+
try:
|
|
1588
|
+
choice = int(input("Select MCP to remove: ").strip())
|
|
1589
|
+
if choice == 0:
|
|
1590
|
+
continue
|
|
1591
|
+
if 1 <= choice <= len(mcp_names):
|
|
1592
|
+
mcp_name_to_remove = mcp_names[choice - 1]
|
|
1593
|
+
else:
|
|
1594
|
+
console.print("[yellow]Invalid selection.[/yellow]")
|
|
1595
|
+
continue
|
|
1596
|
+
except (ValueError, KeyboardInterrupt):
|
|
1597
|
+
continue
|
|
1598
|
+
|
|
1599
|
+
# Remove the MCP
|
|
1600
|
+
agent_def['mcps'] = [m for m in current_mcps if (m if isinstance(m, str) else m.get('name')) != mcp_name_to_remove]
|
|
1601
|
+
|
|
1602
|
+
# Recreate agent executor without the MCP
|
|
1603
|
+
from .tools import create_session_memory, update_session_metadata
|
|
1604
|
+
memory = create_session_memory(current_session_id)
|
|
1605
|
+
try:
|
|
1606
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1607
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
1608
|
+
)
|
|
1609
|
+
# Persist updated MCPs to session
|
|
1610
|
+
update_session_metadata(current_session_id, {
|
|
1611
|
+
'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])]
|
|
1612
|
+
})
|
|
1613
|
+
console.print(Panel(
|
|
1614
|
+
f"[cyan]ℹ Removed MCP: [bold]{mcp_name_to_remove}[/bold]. Agent state reset, chat history preserved.[/cyan]",
|
|
1615
|
+
border_style="cyan",
|
|
1616
|
+
box=box.ROUNDED
|
|
1617
|
+
))
|
|
1618
|
+
except Exception as e:
|
|
1619
|
+
console.print(f"[red]Error removing MCP: {e}[/red]")
|
|
1620
|
+
continue
|
|
1621
|
+
|
|
1622
|
+
# /rm_toolkit command - remove toolkit
|
|
1623
|
+
if user_input == '/rm_toolkit' or user_input.startswith('/rm_toolkit '):
|
|
1624
|
+
if not (is_direct or is_local or is_inventory):
|
|
1625
|
+
console.print("[yellow]Removing toolkit is only available for local agents and built-in agents.[/yellow]")
|
|
1626
|
+
continue
|
|
1627
|
+
|
|
1628
|
+
if not added_toolkit_configs:
|
|
1629
|
+
console.print("[yellow]No toolkits are currently loaded.[/yellow]")
|
|
1630
|
+
continue
|
|
1631
|
+
|
|
1632
|
+
# Get toolkit names from config files
|
|
1633
|
+
toolkit_info = [] # List of (name, file_path)
|
|
1634
|
+
for toolkit_file in added_toolkit_configs:
|
|
1635
|
+
try:
|
|
1636
|
+
with open(toolkit_file, 'r') as f:
|
|
1637
|
+
tk_config = json.load(f)
|
|
1638
|
+
tk_name = tk_config.get('toolkit_name', Path(toolkit_file).stem)
|
|
1639
|
+
toolkit_info.append((tk_name, toolkit_file))
|
|
1640
|
+
except Exception:
|
|
1641
|
+
toolkit_info.append((Path(toolkit_file).stem, toolkit_file))
|
|
1642
|
+
|
|
1643
|
+
parts = user_input.split(maxsplit=1)
|
|
1644
|
+
if len(parts) == 2:
|
|
1645
|
+
# Direct removal by name
|
|
1646
|
+
toolkit_name_to_remove = parts[1].strip()
|
|
1647
|
+
matching = [(name, path) for name, path in toolkit_info if name == toolkit_name_to_remove]
|
|
1648
|
+
if not matching:
|
|
1649
|
+
console.print(f"[yellow]Toolkit '{toolkit_name_to_remove}' not found.[/yellow]")
|
|
1650
|
+
console.print(f"[dim]Loaded toolkits: {', '.join(name for name, _ in toolkit_info)}[/dim]")
|
|
1651
|
+
continue
|
|
1652
|
+
toolkit_file_to_remove = matching[0][1]
|
|
1653
|
+
else:
|
|
1654
|
+
# Interactive selection
|
|
1655
|
+
console.print("\n🔧 [bold cyan]Remove Toolkit[/bold cyan]\n")
|
|
1656
|
+
for i, (name, _) in enumerate(toolkit_info, 1):
|
|
1657
|
+
console.print(f" [bold]{i}[/bold]. {name}")
|
|
1658
|
+
console.print(f" [bold]0[/bold]. [dim]Cancel[/dim]")
|
|
1659
|
+
console.print()
|
|
1660
|
+
|
|
1661
|
+
try:
|
|
1662
|
+
choice = int(input("Select toolkit to remove: ").strip())
|
|
1663
|
+
if choice == 0:
|
|
1664
|
+
continue
|
|
1665
|
+
if 1 <= choice <= len(toolkit_info):
|
|
1666
|
+
toolkit_name_to_remove, toolkit_file_to_remove = toolkit_info[choice - 1]
|
|
1667
|
+
else:
|
|
1668
|
+
console.print("[yellow]Invalid selection.[/yellow]")
|
|
1669
|
+
continue
|
|
1670
|
+
except (ValueError, KeyboardInterrupt):
|
|
1671
|
+
continue
|
|
1672
|
+
|
|
1673
|
+
# Remove the toolkit
|
|
1674
|
+
added_toolkit_configs.remove(toolkit_file_to_remove)
|
|
1675
|
+
|
|
1676
|
+
# Recreate agent executor without the toolkit
|
|
1677
|
+
from .tools import create_session_memory, update_session_metadata
|
|
1678
|
+
memory = create_session_memory(current_session_id)
|
|
1679
|
+
try:
|
|
1680
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1681
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
1682
|
+
)
|
|
1683
|
+
# Persist updated toolkits to session
|
|
1684
|
+
update_session_metadata(current_session_id, {
|
|
1685
|
+
'added_toolkit_configs': list(added_toolkit_configs)
|
|
1686
|
+
})
|
|
1687
|
+
console.print(Panel(
|
|
1688
|
+
f"[cyan]ℹ Removed toolkit: [bold]{toolkit_name_to_remove}[/bold]. Agent state reset, chat history preserved.[/cyan]",
|
|
1689
|
+
border_style="cyan",
|
|
1690
|
+
box=box.ROUNDED
|
|
1691
|
+
))
|
|
1692
|
+
except Exception as e:
|
|
1693
|
+
console.print(f"[red]Error removing toolkit: {e}[/red]")
|
|
1694
|
+
continue
|
|
1695
|
+
|
|
1696
|
+
# /mode command - set approval mode
|
|
1697
|
+
if user_input == '/mode' or user_input.startswith('/mode '):
|
|
1698
|
+
parts = user_input.split(maxsplit=1)
|
|
1699
|
+
if len(parts) == 1:
|
|
1700
|
+
# Show current mode and options
|
|
1701
|
+
mode_info = {
|
|
1702
|
+
'always': ('yellow', 'Confirm before each tool execution'),
|
|
1703
|
+
'auto': ('green', 'Execute tools without confirmation'),
|
|
1704
|
+
'yolo': ('red', 'No confirmations, skip safety warnings')
|
|
1705
|
+
}
|
|
1706
|
+
console.print("\n🔧 [bold cyan]Approval Mode:[/bold cyan]\n")
|
|
1707
|
+
for mode_name, (color, desc) in mode_info.items():
|
|
1708
|
+
marker = "●" if mode_name == approval_mode else "○"
|
|
1709
|
+
console.print(f" [{color}]{marker}[/{color}] [bold]{mode_name}[/bold] - {desc}")
|
|
1710
|
+
console.print(f"\n[dim]Usage: /mode <always|auto|yolo>[/dim]")
|
|
1711
|
+
else:
|
|
1712
|
+
new_mode = parts[1].lower().strip()
|
|
1713
|
+
if new_mode in ['always', 'auto', 'yolo']:
|
|
1714
|
+
approval_mode = new_mode
|
|
1715
|
+
mode_colors = {'always': 'yellow', 'auto': 'green', 'yolo': 'red'}
|
|
1716
|
+
console.print(f"✓ [green]Mode set to[/green] [{mode_colors[new_mode]}][bold]{new_mode}[/bold][/{mode_colors[new_mode]}]")
|
|
1717
|
+
else:
|
|
1718
|
+
console.print(f"[yellow]Unknown mode: {new_mode}. Use: always, auto, or yolo[/yellow]")
|
|
1719
|
+
continue
|
|
1720
|
+
|
|
1721
|
+
# /dir command - manage allowed directories
|
|
1722
|
+
if user_input == '/dir' or user_input.startswith('/dir '):
|
|
1723
|
+
parts = user_input.split()
|
|
1724
|
+
|
|
1725
|
+
if len(parts) == 1:
|
|
1726
|
+
# /dir - list all allowed directories
|
|
1727
|
+
if allowed_directories:
|
|
1728
|
+
console.print("📁 [bold cyan]Allowed directories:[/bold cyan]")
|
|
1729
|
+
for i, d in enumerate(allowed_directories):
|
|
1730
|
+
marker = "●" if i == 0 else "○"
|
|
1731
|
+
label = " [dim](primary)[/dim]" if i == 0 else ""
|
|
1732
|
+
console.print(f" {marker} {d}{label}")
|
|
1733
|
+
else:
|
|
1734
|
+
console.print("[yellow]No directories allowed.[/yellow]")
|
|
1735
|
+
console.print("[dim]Usage: /dir [add|rm|remove] /path/to/directory[/dim]")
|
|
1736
|
+
continue
|
|
1737
|
+
|
|
1738
|
+
action = parts[1].lower()
|
|
1739
|
+
|
|
1740
|
+
# Handle /dir add /path or /dir /path (add is default)
|
|
1741
|
+
if action in ['add', 'rm', 'remove']:
|
|
1742
|
+
if len(parts) < 3:
|
|
1743
|
+
console.print(f"[yellow]Missing path. Usage: /dir {action} /path/to/directory[/yellow]")
|
|
1744
|
+
continue
|
|
1745
|
+
dir_path = parts[2]
|
|
1746
|
+
else:
|
|
1747
|
+
# /dir /path - default to add
|
|
1748
|
+
action = 'add'
|
|
1749
|
+
dir_path = parts[1]
|
|
1750
|
+
|
|
1751
|
+
dir_path = str(Path(dir_path).expanduser().resolve())
|
|
1752
|
+
|
|
1753
|
+
if action == 'add':
|
|
1754
|
+
if not Path(dir_path).exists():
|
|
1755
|
+
console.print(f"[red]Directory not found: {dir_path}[/red]")
|
|
1756
|
+
continue
|
|
1757
|
+
if not Path(dir_path).is_dir():
|
|
1758
|
+
console.print(f"[red]Not a directory: {dir_path}[/red]")
|
|
1759
|
+
continue
|
|
1760
|
+
|
|
1761
|
+
if dir_path in allowed_directories:
|
|
1762
|
+
console.print(f"[yellow]Directory already allowed: {dir_path}[/yellow]")
|
|
1763
|
+
continue
|
|
1764
|
+
|
|
1765
|
+
allowed_directories.append(dir_path)
|
|
1766
|
+
|
|
1767
|
+
# Recreate agent executor with updated directories
|
|
1768
|
+
if is_direct or is_local or is_inventory:
|
|
1769
|
+
from .tools import create_session_memory
|
|
1770
|
+
memory = create_session_memory(current_session_id)
|
|
1771
|
+
try:
|
|
1772
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1773
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
1774
|
+
)
|
|
1775
|
+
console.print(Panel(
|
|
1776
|
+
f"[cyan]✓ Added directory: [bold]{dir_path}[/bold]\n Total allowed: {len(allowed_directories)}[/cyan]",
|
|
1777
|
+
border_style="cyan",
|
|
1778
|
+
box=box.ROUNDED
|
|
1779
|
+
))
|
|
1780
|
+
except Exception as e:
|
|
1781
|
+
allowed_directories.remove(dir_path) # Rollback
|
|
1782
|
+
console.print(f"[red]Error adding directory: {e}[/red]")
|
|
1783
|
+
else:
|
|
1784
|
+
console.print("[yellow]Directory mounting is only available for local agents and built-in agents.[/yellow]")
|
|
1785
|
+
|
|
1786
|
+
elif action in ['rm', 'remove']:
|
|
1787
|
+
if dir_path not in allowed_directories:
|
|
1788
|
+
console.print(f"[yellow]Directory not in allowed list: {dir_path}[/yellow]")
|
|
1789
|
+
if allowed_directories:
|
|
1790
|
+
console.print("[dim]Currently allowed:[/dim]")
|
|
1791
|
+
for d in allowed_directories:
|
|
1792
|
+
console.print(f"[dim] - {d}[/dim]")
|
|
1793
|
+
continue
|
|
1794
|
+
|
|
1795
|
+
if len(allowed_directories) == 1:
|
|
1796
|
+
console.print("[yellow]Cannot remove the last directory. Use /dir add first to add another.[/yellow]")
|
|
1797
|
+
continue
|
|
1798
|
+
|
|
1799
|
+
allowed_directories.remove(dir_path)
|
|
1800
|
+
|
|
1801
|
+
# Recreate agent executor with updated directories
|
|
1802
|
+
if is_direct or is_local or is_inventory:
|
|
1803
|
+
from .tools import create_session_memory
|
|
1804
|
+
memory = create_session_memory(current_session_id)
|
|
1805
|
+
try:
|
|
1806
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1807
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
1808
|
+
)
|
|
1809
|
+
console.print(Panel(
|
|
1810
|
+
f"[cyan]✓ Removed directory: [bold]{dir_path}[/bold]\n Remaining: {len(allowed_directories)}[/cyan]",
|
|
1811
|
+
border_style="cyan",
|
|
1812
|
+
box=box.ROUNDED
|
|
1813
|
+
))
|
|
1814
|
+
except Exception as e:
|
|
1815
|
+
allowed_directories.append(dir_path) # Rollback
|
|
1816
|
+
console.print(f"[red]Error removing directory: {e}[/red]")
|
|
1817
|
+
else:
|
|
1818
|
+
console.print("[yellow]Directory mounting is only available for local agents and built-in agents.[/yellow]")
|
|
1819
|
+
continue
|
|
1820
|
+
|
|
1821
|
+
# /inventory command - load inventory/knowledge graph from path
|
|
1822
|
+
if user_input == '/inventory' or user_input.startswith('/inventory '):
|
|
1823
|
+
if not (is_direct or is_local or is_inventory):
|
|
1824
|
+
console.print("[yellow]Loading inventory is only available for local agents and built-in agents.[/yellow]")
|
|
1825
|
+
continue
|
|
1826
|
+
|
|
1827
|
+
parts = user_input.split(maxsplit=1)
|
|
1828
|
+
if len(parts) == 1:
|
|
1829
|
+
# Show current inventory and available files
|
|
1830
|
+
current_inventory = None
|
|
1831
|
+
for tc in added_toolkit_configs:
|
|
1832
|
+
if isinstance(tc, dict) and tc.get('type') == 'inventory':
|
|
1833
|
+
current_inventory = tc.get('graph_path')
|
|
1834
|
+
break
|
|
1835
|
+
elif isinstance(tc, str):
|
|
1836
|
+
try:
|
|
1837
|
+
with open(tc, 'r') as f:
|
|
1838
|
+
cfg = json.load(f)
|
|
1839
|
+
if cfg.get('type') == 'inventory':
|
|
1840
|
+
current_inventory = cfg.get('graph_path')
|
|
1841
|
+
break
|
|
1842
|
+
except Exception:
|
|
1843
|
+
pass
|
|
1844
|
+
|
|
1845
|
+
if current_inventory:
|
|
1846
|
+
console.print(f"📊 [bold cyan]Current inventory:[/bold cyan] {current_inventory}")
|
|
1847
|
+
else:
|
|
1848
|
+
console.print("[yellow]No inventory loaded.[/yellow]")
|
|
1849
|
+
|
|
1850
|
+
# Show available .json files
|
|
1851
|
+
primary_dir = allowed_directories[0] if allowed_directories else None
|
|
1852
|
+
available = _get_inventory_json_files(primary_dir)
|
|
1853
|
+
if available:
|
|
1854
|
+
console.print(f"[dim]Available files: {', '.join(available[:10])}")
|
|
1855
|
+
if len(available) > 10:
|
|
1856
|
+
console.print(f"[dim] ... and {len(available) - 10} more[/dim]")
|
|
1857
|
+
console.print("[dim]Usage: /inventory <path/to/graph.json>[/dim]")
|
|
1858
|
+
else:
|
|
1859
|
+
inventory_path = parts[1].strip()
|
|
1860
|
+
|
|
1861
|
+
# Build inventory config from path
|
|
1862
|
+
primary_dir = allowed_directories[0] if allowed_directories else None
|
|
1863
|
+
inventory_config = _build_inventory_config(inventory_path, primary_dir)
|
|
1864
|
+
if not inventory_config:
|
|
1865
|
+
console.print(f"[red]Inventory file not found: {inventory_path}[/red]")
|
|
1866
|
+
# Show search locations
|
|
1867
|
+
console.print("[dim]Searched in:[/dim]")
|
|
1868
|
+
console.print(f"[dim] - {Path.cwd()}[/dim]")
|
|
1869
|
+
console.print(f"[dim] - {Path.cwd() / '.alita' / 'inventory'}[/dim]")
|
|
1870
|
+
if primary_dir:
|
|
1871
|
+
console.print(f"[dim] - {primary_dir}[/dim]")
|
|
1872
|
+
console.print(f"[dim] - {Path(primary_dir) / '.alita' / 'inventory'}[/dim]")
|
|
1873
|
+
continue
|
|
1874
|
+
|
|
1875
|
+
# Remove any existing inventory toolkit configs
|
|
1876
|
+
new_toolkit_configs = []
|
|
1877
|
+
removed_inventory = None
|
|
1878
|
+
for tc in added_toolkit_configs:
|
|
1879
|
+
if isinstance(tc, dict) and tc.get('type') == 'inventory':
|
|
1880
|
+
removed_inventory = tc.get('toolkit_name', 'inventory')
|
|
1881
|
+
continue # Skip existing inventory
|
|
1882
|
+
elif isinstance(tc, str):
|
|
1883
|
+
try:
|
|
1884
|
+
with open(tc, 'r') as f:
|
|
1885
|
+
cfg = json.load(f)
|
|
1886
|
+
if cfg.get('type') == 'inventory':
|
|
1887
|
+
removed_inventory = cfg.get('toolkit_name', Path(tc).stem)
|
|
1888
|
+
continue # Skip existing inventory
|
|
1889
|
+
except Exception:
|
|
1890
|
+
pass
|
|
1891
|
+
new_toolkit_configs.append(tc)
|
|
1892
|
+
|
|
1893
|
+
# Add new inventory config
|
|
1894
|
+
new_toolkit_configs.append(inventory_config)
|
|
1895
|
+
added_toolkit_configs = new_toolkit_configs
|
|
1896
|
+
|
|
1897
|
+
# Recreate agent executor with new inventory
|
|
1898
|
+
from .tools import create_session_memory, update_session_metadata
|
|
1899
|
+
memory = create_session_memory(current_session_id)
|
|
1900
|
+
try:
|
|
1901
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
1902
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
1903
|
+
)
|
|
1904
|
+
# Persist updated toolkits to session (exclude transient inventory configs)
|
|
1905
|
+
serializable_configs = [tc for tc in added_toolkit_configs if isinstance(tc, str)]
|
|
1906
|
+
update_session_metadata(current_session_id, {
|
|
1907
|
+
'added_toolkit_configs': serializable_configs,
|
|
1908
|
+
'inventory_graph': inventory_config.get('graph_path') # Save just the graph path
|
|
1909
|
+
})
|
|
1910
|
+
|
|
1911
|
+
toolkit_name = inventory_config['toolkit_name']
|
|
1912
|
+
graph_path = inventory_config['graph_path']
|
|
1913
|
+
if removed_inventory:
|
|
1914
|
+
console.print(Panel(
|
|
1915
|
+
f"[cyan]ℹ Replaced inventory [bold]{removed_inventory}[/bold] with [bold]{toolkit_name}[/bold]\n"
|
|
1916
|
+
f" Graph: {graph_path}[/cyan]",
|
|
1917
|
+
border_style="cyan",
|
|
1918
|
+
box=box.ROUNDED
|
|
1919
|
+
))
|
|
1920
|
+
else:
|
|
1921
|
+
console.print(Panel(
|
|
1922
|
+
f"[cyan]✓ Loaded inventory: [bold]{toolkit_name}[/bold]\n"
|
|
1923
|
+
f" Graph: {graph_path}[/cyan]",
|
|
1924
|
+
border_style="cyan",
|
|
1925
|
+
box=box.ROUNDED
|
|
1926
|
+
))
|
|
1927
|
+
except Exception as e:
|
|
1928
|
+
console.print(f"[red]Error loading inventory: {e}[/red]")
|
|
1929
|
+
continue
|
|
1930
|
+
|
|
1931
|
+
# /session command - list or resume sessions
|
|
1932
|
+
if user_input == '/session' or user_input.startswith('/session '):
|
|
1933
|
+
from .tools import list_sessions, PlanState
|
|
1934
|
+
parts = user_input.split(maxsplit=2)
|
|
1935
|
+
|
|
1936
|
+
if len(parts) == 1 or parts[1] == 'list':
|
|
1937
|
+
# List all sessions with plans
|
|
1938
|
+
sessions = list_sessions()
|
|
1939
|
+
if not sessions:
|
|
1940
|
+
console.print("[dim]No saved sessions found.[/dim]")
|
|
1941
|
+
console.print("[dim]Sessions are created when you start chatting.[/dim]")
|
|
1942
|
+
else:
|
|
1943
|
+
console.print("\n📋 [bold cyan]Saved Sessions:[/bold cyan]\n")
|
|
1944
|
+
from datetime import datetime
|
|
1945
|
+
for i, sess in enumerate(sessions[:10], 1): # Show last 10
|
|
1946
|
+
modified = datetime.fromtimestamp(sess['modified']).strftime('%Y-%m-%d %H:%M')
|
|
1947
|
+
|
|
1948
|
+
# Build session info line
|
|
1949
|
+
agent_info = sess.get('agent_name', 'unknown')
|
|
1950
|
+
model_info = sess.get('model', '')
|
|
1951
|
+
if model_info:
|
|
1952
|
+
agent_info = f"{agent_info} ({model_info})"
|
|
1953
|
+
|
|
1954
|
+
# Check if this is current session
|
|
1955
|
+
is_current = sess['session_id'] == current_session_id
|
|
1956
|
+
current_marker = " [green]◀ current[/green]" if is_current else ""
|
|
1957
|
+
|
|
1958
|
+
# Plan progress if available
|
|
1959
|
+
if sess.get('steps_total', 0) > 0:
|
|
1960
|
+
progress = f"[{sess['steps_completed']}/{sess['steps_total']}]"
|
|
1961
|
+
status = "✓" if sess['steps_completed'] == sess['steps_total'] else "○"
|
|
1962
|
+
plan_info = f" - {sess.get('title', 'Untitled')} {progress}"
|
|
1963
|
+
else:
|
|
1964
|
+
status = "●"
|
|
1965
|
+
plan_info = ""
|
|
1966
|
+
|
|
1967
|
+
console.print(f" {status} [cyan]{sess['session_id']}[/cyan]{plan_info}")
|
|
1968
|
+
console.print(f" [dim]{agent_info} • {modified}[/dim]{current_marker}")
|
|
1969
|
+
console.print(f"\n[dim]Usage: /session resume <session_id>[/dim]")
|
|
1970
|
+
|
|
1971
|
+
elif parts[1] == 'resume' and len(parts) > 2:
|
|
1972
|
+
session_id = parts[2].strip()
|
|
1973
|
+
from .tools import load_session_metadata, create_session_memory, from_portable_path
|
|
1974
|
+
|
|
1975
|
+
# Check if session exists (either plan or metadata)
|
|
1976
|
+
loaded_state = PlanState.load(session_id)
|
|
1977
|
+
session_metadata = load_session_metadata(session_id)
|
|
1978
|
+
|
|
1979
|
+
if loaded_state or session_metadata:
|
|
1980
|
+
# Update current session to use this session_id
|
|
1981
|
+
current_session_id = session_id
|
|
1982
|
+
|
|
1983
|
+
# Restore memory from session SQLite (reuses existing memory.db file)
|
|
1984
|
+
memory = create_session_memory(session_id)
|
|
1985
|
+
|
|
1986
|
+
# Update plan state if available
|
|
1987
|
+
if loaded_state:
|
|
1988
|
+
plan_state.update(loaded_state.to_dict())
|
|
1989
|
+
resume_info = f"\n\n{loaded_state.render()}"
|
|
1990
|
+
else:
|
|
1991
|
+
plan_state['session_id'] = session_id
|
|
1992
|
+
resume_info = ""
|
|
1993
|
+
|
|
1994
|
+
# Restore agent source and reload agent definition if available
|
|
1995
|
+
restored_agent = False
|
|
1996
|
+
if session_metadata:
|
|
1997
|
+
agent_source = session_metadata.get('agent_source')
|
|
1998
|
+
if agent_source:
|
|
1999
|
+
agent_file_path = from_portable_path(agent_source)
|
|
2000
|
+
if Path(agent_file_path).exists():
|
|
2001
|
+
try:
|
|
2002
|
+
agent_def = load_agent_definition(agent_file_path)
|
|
2003
|
+
current_agent_file = agent_file_path
|
|
2004
|
+
agent_name = agent_def.get('name', Path(agent_file_path).stem)
|
|
2005
|
+
is_local = True
|
|
2006
|
+
is_direct = False
|
|
2007
|
+
restored_agent = True
|
|
2008
|
+
except Exception as e:
|
|
2009
|
+
console.print(f"[yellow]Warning: Could not reload agent from {agent_source}: {e}[/yellow]")
|
|
2010
|
+
|
|
2011
|
+
# Restore added toolkit configs
|
|
2012
|
+
restored_toolkit_configs = session_metadata.get('added_toolkit_configs', [])
|
|
2013
|
+
if restored_toolkit_configs:
|
|
2014
|
+
added_toolkit_configs.clear()
|
|
2015
|
+
added_toolkit_configs.extend(restored_toolkit_configs)
|
|
2016
|
+
|
|
2017
|
+
# Restore added MCPs to agent_def
|
|
2018
|
+
restored_mcps = session_metadata.get('added_mcps', [])
|
|
2019
|
+
if restored_mcps and restored_agent:
|
|
2020
|
+
if 'mcps' not in agent_def:
|
|
2021
|
+
agent_def['mcps'] = []
|
|
2022
|
+
for mcp_name in restored_mcps:
|
|
2023
|
+
if mcp_name not in [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])]:
|
|
2024
|
+
agent_def['mcps'].append(mcp_name)
|
|
2025
|
+
|
|
2026
|
+
# Restore model/temperature overrides
|
|
2027
|
+
if session_metadata.get('model'):
|
|
2028
|
+
current_model = session_metadata['model']
|
|
2029
|
+
if restored_agent:
|
|
2030
|
+
agent_def['model'] = current_model
|
|
2031
|
+
if session_metadata.get('temperature') is not None:
|
|
2032
|
+
current_temperature = session_metadata['temperature']
|
|
2033
|
+
if restored_agent:
|
|
2034
|
+
agent_def['temperature'] = current_temperature
|
|
2035
|
+
|
|
2036
|
+
# Restore allowed directories
|
|
2037
|
+
if session_metadata.get('allowed_directories'):
|
|
2038
|
+
allowed_directories = session_metadata['allowed_directories']
|
|
2039
|
+
elif session_metadata.get('work_dir'):
|
|
2040
|
+
# Backward compatibility with old sessions
|
|
2041
|
+
allowed_directories = [session_metadata['work_dir']]
|
|
2042
|
+
|
|
2043
|
+
# Reinitialize context manager with resumed session_id to load chat history
|
|
2044
|
+
ctx_manager = CLIContextManager(
|
|
2045
|
+
session_id=session_id,
|
|
2046
|
+
max_context_tokens=context_config.get('max_context_tokens', 8000),
|
|
2047
|
+
preserve_recent=context_config.get('preserve_recent_messages', 5),
|
|
2048
|
+
pruning_method=context_config.get('pruning_method', 'oldest_first'),
|
|
2049
|
+
enable_summarization=context_config.get('enable_summarization', True),
|
|
2050
|
+
summary_trigger_ratio=context_config.get('summary_trigger_ratio', 0.8),
|
|
2051
|
+
summaries_limit=context_config.get('summaries_limit_count', 5),
|
|
2052
|
+
llm=llm if 'llm' in dir() else None
|
|
2053
|
+
)
|
|
2054
|
+
|
|
2055
|
+
# Show session info
|
|
2056
|
+
agent_info = session_metadata.get('agent_name', 'unknown') if session_metadata else 'unknown'
|
|
2057
|
+
model_info = session_metadata.get('model', '') if session_metadata else ''
|
|
2058
|
+
|
|
2059
|
+
console.print(Panel(
|
|
2060
|
+
f"[green]✓ Resumed session:[/green] [bold]{session_id}[/bold]\n"
|
|
2061
|
+
f"[dim]Agent: {agent_info}" + (f" • Model: {model_info}" if model_info else "") + f"[/dim]"
|
|
2062
|
+
f"{resume_info}",
|
|
2063
|
+
border_style="green",
|
|
2064
|
+
box=box.ROUNDED
|
|
2065
|
+
))
|
|
2066
|
+
|
|
2067
|
+
# Display restored chat history
|
|
2068
|
+
chat_history_export = ctx_manager.export_chat_history(include_only=False)
|
|
2069
|
+
if chat_history_export:
|
|
2070
|
+
preserve_recent = context_config.get('preserve_recent_messages', 5)
|
|
2071
|
+
total_messages = len(chat_history_export)
|
|
2072
|
+
|
|
2073
|
+
if total_messages > preserve_recent:
|
|
2074
|
+
console.print(f"\n[dim]... {total_messages - preserve_recent} earlier messages in context[/dim]")
|
|
2075
|
+
messages_to_show = chat_history_export[-preserve_recent:]
|
|
2076
|
+
else:
|
|
2077
|
+
messages_to_show = chat_history_export
|
|
2078
|
+
|
|
2079
|
+
for msg in messages_to_show:
|
|
2080
|
+
role = msg.get('role', 'user')
|
|
2081
|
+
content = msg.get('content', '')[:200] # Truncate for display
|
|
2082
|
+
if len(msg.get('content', '')) > 200:
|
|
2083
|
+
content += '...'
|
|
2084
|
+
role_color = 'cyan' if role == 'user' else 'green'
|
|
2085
|
+
role_label = 'You' if role == 'user' else 'Assistant'
|
|
2086
|
+
console.print(f"[dim][{role_color}]{role_label}:[/{role_color}] {content}[/dim]")
|
|
2087
|
+
console.print()
|
|
2088
|
+
|
|
2089
|
+
# Recreate agent executor with restored tools if we have a local/built-in agent
|
|
2090
|
+
if (is_direct or is_local or is_inventory) and restored_agent:
|
|
2091
|
+
try:
|
|
2092
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2093
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
2094
|
+
)
|
|
2095
|
+
ctx_manager.llm = llm # Update LLM for summarization
|
|
2096
|
+
|
|
2097
|
+
# Warn about MCP state loss
|
|
2098
|
+
if restored_mcps:
|
|
2099
|
+
console.print("[yellow]Note: MCP connections re-initialized (stateful server state like browser sessions are lost)[/yellow]")
|
|
2100
|
+
except Exception as e:
|
|
2101
|
+
console.print(f"[red]Error recreating agent executor: {e}[/red]")
|
|
2102
|
+
console.print("[yellow]Session state loaded but agent not fully restored. Some tools may not work.[/yellow]")
|
|
2103
|
+
elif is_direct or is_local or is_inventory:
|
|
2104
|
+
# Just update planning tools if we couldn't restore agent
|
|
2105
|
+
try:
|
|
2106
|
+
from .tools import get_planning_tools
|
|
2107
|
+
if loaded_state:
|
|
2108
|
+
planning_tools, _ = get_planning_tools(loaded_state)
|
|
2109
|
+
except Exception as e:
|
|
2110
|
+
console.print(f"[yellow]Warning: Could not reload planning tools: {e}[/yellow]")
|
|
2111
|
+
else:
|
|
2112
|
+
console.print(f"[red]Session not found: {session_id}[/red]")
|
|
2113
|
+
else:
|
|
2114
|
+
console.print("[dim]Usage: /session [list] or /session resume <session_id>[/dim]")
|
|
2115
|
+
continue
|
|
2116
|
+
|
|
2117
|
+
# /agent command - switch to a different agent
|
|
2118
|
+
if user_input == '/agent':
|
|
2119
|
+
selected_agent = _select_agent_interactive(client, config)
|
|
2120
|
+
if selected_agent and selected_agent != '__direct__' and selected_agent != '__inventory__':
|
|
2121
|
+
# Load the new agent
|
|
2122
|
+
new_is_local = Path(selected_agent).exists()
|
|
2123
|
+
|
|
2124
|
+
if new_is_local:
|
|
2125
|
+
agent_def = load_agent_definition(selected_agent)
|
|
2126
|
+
agent_name = agent_def.get('name', Path(selected_agent).stem)
|
|
2127
|
+
agent_type = "Local Agent"
|
|
2128
|
+
is_local = True
|
|
2129
|
+
is_direct = False
|
|
2130
|
+
is_inventory = False
|
|
2131
|
+
current_agent_file = selected_agent # Track for /reload
|
|
2132
|
+
else:
|
|
2133
|
+
# Platform agent
|
|
2134
|
+
agents = client.get_list_of_apps()
|
|
2135
|
+
new_agent = None
|
|
2136
|
+
try:
|
|
2137
|
+
agent_id = int(selected_agent)
|
|
2138
|
+
new_agent = next((a for a in agents if a['id'] == agent_id), None)
|
|
2139
|
+
except ValueError:
|
|
2140
|
+
new_agent = next((a for a in agents if a['name'] == selected_agent), None)
|
|
2141
|
+
|
|
2142
|
+
if new_agent:
|
|
2143
|
+
agent_name = new_agent['name']
|
|
2144
|
+
agent_type = "Platform Agent"
|
|
2145
|
+
is_local = False
|
|
2146
|
+
is_direct = False
|
|
2147
|
+
current_agent_file = None # No file for platform agents
|
|
2148
|
+
|
|
2149
|
+
# Setup platform agent
|
|
2150
|
+
details = client.get_app_details(new_agent['id'])
|
|
2151
|
+
version_id = details['versions'][0]['id']
|
|
2152
|
+
agent_executor = client.application(
|
|
2153
|
+
application_id=new_agent['id'],
|
|
2154
|
+
application_version_id=version_id,
|
|
2155
|
+
memory=memory,
|
|
2156
|
+
chat_history=chat_history
|
|
2157
|
+
)
|
|
2158
|
+
console.print(Panel(
|
|
2159
|
+
f"[cyan]ℹ Switched to agent: [bold]{agent_name}[/bold] ({agent_type}). Chat history preserved.[/cyan]",
|
|
2160
|
+
border_style="cyan",
|
|
2161
|
+
box=box.ROUNDED
|
|
2162
|
+
))
|
|
2163
|
+
continue
|
|
2164
|
+
|
|
2165
|
+
# For local agents, recreate executor
|
|
2166
|
+
if new_is_local:
|
|
2167
|
+
from .tools import create_session_memory
|
|
2168
|
+
memory = create_session_memory(current_session_id)
|
|
2169
|
+
added_toolkit_configs = []
|
|
2170
|
+
try:
|
|
2171
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2172
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
2173
|
+
)
|
|
2174
|
+
console.print(Panel(
|
|
2175
|
+
f"[cyan]ℹ Switched to agent: [bold]{agent_name}[/bold] ({agent_type}). Agent state reset, chat history preserved.[/cyan]",
|
|
2176
|
+
border_style="cyan",
|
|
2177
|
+
box=box.ROUNDED
|
|
2178
|
+
))
|
|
2179
|
+
except Exception as e:
|
|
2180
|
+
console.print(f"[red]Error switching agent: {e}[/red]")
|
|
2181
|
+
elif selected_agent == '__direct__':
|
|
2182
|
+
# Switch back to direct mode
|
|
2183
|
+
is_direct = True
|
|
2184
|
+
is_local = False
|
|
2185
|
+
is_inventory = False
|
|
2186
|
+
current_agent_file = None # No file for direct mode
|
|
2187
|
+
agent_name = "Alita"
|
|
2188
|
+
agent_type = "Direct LLM"
|
|
2189
|
+
alita_prompt = _get_alita_system_prompt(config)
|
|
2190
|
+
agent_def = {
|
|
2191
|
+
'model': current_model or default_model,
|
|
2192
|
+
'temperature': current_temperature if current_temperature is not None else default_temperature,
|
|
2193
|
+
'max_tokens': current_max_tokens or default_max_tokens,
|
|
2194
|
+
'system_prompt': alita_prompt
|
|
2195
|
+
}
|
|
2196
|
+
from .tools import create_session_memory
|
|
2197
|
+
memory = create_session_memory(current_session_id)
|
|
2198
|
+
try:
|
|
2199
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2200
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
2201
|
+
)
|
|
2202
|
+
console.print(Panel(
|
|
2203
|
+
f"[cyan]ℹ Switched to [bold]Alita[/bold]. Agent state reset, chat history preserved.[/cyan]",
|
|
2204
|
+
border_style="cyan",
|
|
2205
|
+
box=box.ROUNDED
|
|
2206
|
+
))
|
|
2207
|
+
except Exception as e:
|
|
2208
|
+
console.print(f"[red]Error switching to direct mode: {e}[/red]")
|
|
2209
|
+
elif selected_agent == '__inventory__':
|
|
2210
|
+
# Switch to inventory mode
|
|
2211
|
+
is_direct = False
|
|
2212
|
+
is_local = False
|
|
2213
|
+
is_inventory = True
|
|
2214
|
+
current_agent_file = None # No file for inventory mode
|
|
2215
|
+
agent_name = "Inventory"
|
|
2216
|
+
agent_type = "Built-in Agent"
|
|
2217
|
+
inventory_prompt = _get_inventory_system_prompt(config)
|
|
2218
|
+
agent_def = {
|
|
2219
|
+
'name': 'inventory-agent',
|
|
2220
|
+
'model': current_model or default_model,
|
|
2221
|
+
'temperature': current_temperature if current_temperature is not None else 0.3,
|
|
2222
|
+
'max_tokens': current_max_tokens or default_max_tokens,
|
|
2223
|
+
'system_prompt': inventory_prompt,
|
|
2224
|
+
'toolkit_configs': [
|
|
2225
|
+
{'type': 'inventory', 'graph_path': './knowledge_graph.json'}
|
|
2226
|
+
]
|
|
2227
|
+
}
|
|
2228
|
+
from .tools import create_session_memory
|
|
2229
|
+
memory = create_session_memory(current_session_id)
|
|
2230
|
+
try:
|
|
2231
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2232
|
+
client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
|
|
2233
|
+
)
|
|
2234
|
+
console.print(Panel(
|
|
2235
|
+
f"[cyan]ℹ Switched to [bold]Inventory[/bold] agent. Use /add_toolkit to add source toolkits.[/cyan]",
|
|
2236
|
+
border_style="cyan",
|
|
2237
|
+
box=box.ROUNDED
|
|
2238
|
+
))
|
|
2239
|
+
except Exception as e:
|
|
2240
|
+
console.print(f"[red]Error switching to inventory mode: {e}[/red]")
|
|
2241
|
+
continue
|
|
2242
|
+
|
|
2243
|
+
# Execute agent
|
|
2244
|
+
# Track if history was already added during continuation handling
|
|
2245
|
+
history_already_added = False
|
|
2246
|
+
original_user_input = user_input # Preserve for history tracking
|
|
2247
|
+
|
|
2248
|
+
if (is_direct or is_local or is_inventory) and agent_executor is None:
|
|
2249
|
+
# Local agent without tools: use direct LLM call with streaming
|
|
2250
|
+
system_prompt = agent_def.get('system_prompt', '')
|
|
658
2251
|
messages = []
|
|
659
2252
|
if system_prompt:
|
|
660
2253
|
messages.append({"role": "system", "content": system_prompt})
|
|
661
2254
|
|
|
662
|
-
#
|
|
663
|
-
|
|
2255
|
+
# Build pruned context from context manager
|
|
2256
|
+
context_messages = ctx_manager.build_context()
|
|
2257
|
+
for msg in context_messages:
|
|
664
2258
|
messages.append(msg)
|
|
665
2259
|
|
|
666
2260
|
# Add user message
|
|
@@ -722,50 +2316,268 @@ def agent_chat(ctx, agent_source: Optional[str], version: Optional[str],
|
|
|
722
2316
|
# Agent with tools or platform agent: use agent executor
|
|
723
2317
|
# Setup callback for verbose output
|
|
724
2318
|
from langchain_core.runnables import RunnableConfig
|
|
2319
|
+
from langgraph.errors import GraphRecursionError
|
|
725
2320
|
|
|
726
|
-
invoke_config
|
|
2321
|
+
# Initialize invoke_config with thread_id for checkpointing
|
|
2322
|
+
# This ensures the same thread is used across continuations
|
|
2323
|
+
invoke_config = RunnableConfig(
|
|
2324
|
+
configurable={"thread_id": current_session_id}
|
|
2325
|
+
)
|
|
2326
|
+
# always proceed with continuation enabled
|
|
2327
|
+
invoke_config["should_continue"] = True
|
|
2328
|
+
# Set recursion limit for tool executions
|
|
2329
|
+
logger.debug(f"Setting tool steps limit to {recursion_limit}")
|
|
2330
|
+
invoke_config["recursion_limit"] = recursion_limit
|
|
2331
|
+
cli_callback = None
|
|
727
2332
|
if show_verbose:
|
|
728
2333
|
cli_callback = create_cli_callback(verbose=True, debug=debug_mode)
|
|
729
|
-
invoke_config =
|
|
2334
|
+
invoke_config["callbacks"] = [cli_callback]
|
|
730
2335
|
|
|
731
|
-
#
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
2336
|
+
# Track recursion continuation state
|
|
2337
|
+
continue_from_recursion = False
|
|
2338
|
+
recursion_attempts = 0
|
|
2339
|
+
tool_limit_attempts = 0 # Track tool limit continuation attempts
|
|
2340
|
+
max_recursion_continues = 5 # Prevent infinite continuation loops
|
|
2341
|
+
output = None # Initialize output before loop
|
|
2342
|
+
result = None # Initialize result before loop
|
|
2343
|
+
|
|
2344
|
+
while True:
|
|
2345
|
+
try:
|
|
2346
|
+
# Always start with a thinking spinner
|
|
2347
|
+
status = console.status("[yellow]Thinking...[/yellow]", spinner="dots")
|
|
2348
|
+
status.start()
|
|
2349
|
+
|
|
2350
|
+
# Pass status to callback so it can stop it when tool calls start
|
|
2351
|
+
if cli_callback:
|
|
2352
|
+
cli_callback.status = status
|
|
2353
|
+
|
|
2354
|
+
try:
|
|
2355
|
+
result = agent_executor.invoke(
|
|
2356
|
+
{
|
|
2357
|
+
"input": [user_input] if not is_local else user_input,
|
|
2358
|
+
"chat_history": ctx_manager.build_context()
|
|
2359
|
+
},
|
|
2360
|
+
config=invoke_config
|
|
2361
|
+
)
|
|
2362
|
+
finally:
|
|
2363
|
+
# Make sure spinner is stopped
|
|
2364
|
+
try:
|
|
2365
|
+
status.stop()
|
|
2366
|
+
except Exception:
|
|
2367
|
+
pass
|
|
2368
|
+
|
|
2369
|
+
# Extract output from result
|
|
2370
|
+
if result is not None:
|
|
2371
|
+
output = extract_output_from_result(result)
|
|
2372
|
+
|
|
2373
|
+
# Check if max tool iterations were reached and prompt user
|
|
2374
|
+
if output and "Maximum tool execution iterations" in output and "reached" in output:
|
|
2375
|
+
tool_limit_attempts += 1
|
|
2376
|
+
|
|
2377
|
+
console.print()
|
|
2378
|
+
console.print(Panel(
|
|
2379
|
+
f"[yellow]⚠ Tool execution limit reached[/yellow]\n\n"
|
|
2380
|
+
f"The agent has executed the maximum number of tool calls in a single turn.\n"
|
|
2381
|
+
f"This usually happens with complex tasks that require many sequential operations.\n\n"
|
|
2382
|
+
f"[dim]Attempt {tool_limit_attempts}/{max_recursion_continues}[/dim]",
|
|
2383
|
+
title="Tool Limit Reached",
|
|
2384
|
+
border_style="yellow",
|
|
2385
|
+
box=box.ROUNDED
|
|
2386
|
+
))
|
|
2387
|
+
|
|
2388
|
+
if tool_limit_attempts >= max_recursion_continues:
|
|
2389
|
+
console.print("[red]Maximum continuation attempts reached. Please break down your request into smaller tasks.[/red]")
|
|
2390
|
+
break
|
|
2391
|
+
|
|
2392
|
+
console.print("\nWhat would you like to do?")
|
|
2393
|
+
console.print(" [bold cyan]c[/bold cyan] - Continue execution (tell agent to resume)")
|
|
2394
|
+
console.print(" [bold cyan]s[/bold cyan] - Stop and keep partial results")
|
|
2395
|
+
console.print(" [bold cyan]n[/bold cyan] - Start a new request")
|
|
2396
|
+
console.print()
|
|
2397
|
+
|
|
2398
|
+
try:
|
|
2399
|
+
choice = input_handler.get_input("Choice [c/s/n]: ").strip().lower()
|
|
2400
|
+
except (KeyboardInterrupt, EOFError):
|
|
2401
|
+
choice = 's'
|
|
2402
|
+
|
|
2403
|
+
if choice == 'c':
|
|
2404
|
+
# Continue - send a follow-up message to resume
|
|
2405
|
+
console.print("\n[cyan]Continuing execution...[/cyan]\n")
|
|
2406
|
+
|
|
2407
|
+
# Clean up the output - remove the tool limit warning message
|
|
2408
|
+
clean_output = output
|
|
2409
|
+
if "Maximum tool execution iterations" in output:
|
|
2410
|
+
# Strip the warning from the end of the output
|
|
2411
|
+
lines = output.split('\n')
|
|
2412
|
+
clean_lines = [l for l in lines if "Maximum tool execution iterations" not in l and "Stopping tool execution" not in l]
|
|
2413
|
+
clean_output = '\n'.join(clean_lines).strip()
|
|
2414
|
+
|
|
2415
|
+
# Add current output to history first (without the warning)
|
|
2416
|
+
# Use original user input for first continuation, current for subsequent
|
|
2417
|
+
history_input = original_user_input if not history_already_added else user_input
|
|
2418
|
+
if clean_output:
|
|
2419
|
+
chat_history.append({"role": "user", "content": history_input})
|
|
2420
|
+
chat_history.append({"role": "assistant", "content": clean_output})
|
|
2421
|
+
ctx_manager.add_message("user", history_input)
|
|
2422
|
+
ctx_manager.add_message("assistant", clean_output)
|
|
2423
|
+
history_already_added = True
|
|
2424
|
+
|
|
2425
|
+
# CRITICAL: Use a new thread_id when continuing to avoid corrupted
|
|
2426
|
+
# checkpoint state. The tool limit may have left the checkpoint with
|
|
2427
|
+
# an AIMessage containing tool_calls without corresponding ToolMessages.
|
|
2428
|
+
# Using a new thread_id starts fresh with our clean context manager state.
|
|
2429
|
+
import uuid
|
|
2430
|
+
continuation_thread_id = f"{current_session_id}-cont-{uuid.uuid4().hex[:8]}"
|
|
2431
|
+
invoke_config = RunnableConfig(
|
|
2432
|
+
configurable={"thread_id": continuation_thread_id}
|
|
2433
|
+
)
|
|
2434
|
+
invoke_config["should_continue"] = True
|
|
2435
|
+
invoke_config["recursion_limit"] = recursion_limit
|
|
2436
|
+
if cli_callback:
|
|
2437
|
+
invoke_config["callbacks"] = [cli_callback]
|
|
2438
|
+
|
|
2439
|
+
# Set new input to continue with a more explicit continuation message
|
|
2440
|
+
# Include context about the task limit to help the agent understand
|
|
2441
|
+
user_input = (
|
|
2442
|
+
"The previous response was interrupted due to reaching the tool execution limit. "
|
|
2443
|
+
"Continue from where you left off and complete the remaining steps of the original task. "
|
|
2444
|
+
"Focus on what still needs to be done - do not repeat completed work."
|
|
2445
|
+
)
|
|
2446
|
+
continue # Retry the invoke in this inner loop
|
|
2447
|
+
|
|
2448
|
+
elif choice == 's':
|
|
2449
|
+
console.print("\n[yellow]Stopped. Partial work has been completed.[/yellow]")
|
|
2450
|
+
break # Exit retry loop and show output
|
|
2451
|
+
|
|
2452
|
+
else: # 'n' or anything else
|
|
2453
|
+
console.print("\n[dim]Skipped. Enter a new request.[/dim]")
|
|
2454
|
+
output = None
|
|
2455
|
+
break # Exit retry loop
|
|
2456
|
+
|
|
2457
|
+
# Success - exit the retry loop
|
|
2458
|
+
break
|
|
2459
|
+
|
|
2460
|
+
except GraphRecursionError as e:
|
|
2461
|
+
recursion_attempts += 1
|
|
2462
|
+
step_limit = getattr(e, 'recursion_limit', 25)
|
|
2463
|
+
|
|
2464
|
+
console.print()
|
|
2465
|
+
console.print(Panel(
|
|
2466
|
+
f"[yellow]⚠ Step limit reached ({step_limit} steps)[/yellow]\n\n"
|
|
2467
|
+
f"The agent has executed the maximum number of steps allowed.\n"
|
|
2468
|
+
f"This usually happens with complex tasks that require many tool calls.\n\n"
|
|
2469
|
+
f"[dim]Attempt {recursion_attempts}/{max_recursion_continues}[/dim]",
|
|
2470
|
+
title="Step Limit Reached",
|
|
2471
|
+
border_style="yellow",
|
|
2472
|
+
box=box.ROUNDED
|
|
2473
|
+
))
|
|
2474
|
+
|
|
2475
|
+
if recursion_attempts >= max_recursion_continues:
|
|
2476
|
+
console.print("[red]Maximum continuation attempts reached. Please break down your request into smaller tasks.[/red]")
|
|
2477
|
+
output = f"[Step limit reached after {recursion_attempts} continuation attempts. The task may be too complex - please break it into smaller steps.]"
|
|
2478
|
+
break
|
|
2479
|
+
|
|
2480
|
+
# Prompt user for action
|
|
2481
|
+
console.print("\nWhat would you like to do?")
|
|
2482
|
+
console.print(" [bold cyan]c[/bold cyan] - Continue execution (agent will resume from checkpoint)")
|
|
2483
|
+
console.print(" [bold cyan]s[/bold cyan] - Stop and get partial results")
|
|
2484
|
+
console.print(" [bold cyan]n[/bold cyan] - Start a new request")
|
|
2485
|
+
console.print()
|
|
2486
|
+
|
|
2487
|
+
try:
|
|
2488
|
+
choice = input_handler.get_input("Choice [c/s/n]: ").strip().lower()
|
|
2489
|
+
except (KeyboardInterrupt, EOFError):
|
|
2490
|
+
choice = 's'
|
|
2491
|
+
|
|
2492
|
+
if choice == 'c':
|
|
2493
|
+
# Continue - Use a new thread_id to avoid corrupted checkpoint state.
|
|
2494
|
+
# GraphRecursionError may have left the checkpoint with an AIMessage
|
|
2495
|
+
# containing tool_calls without corresponding ToolMessages.
|
|
2496
|
+
# Using a new thread_id starts fresh with our clean context manager state.
|
|
2497
|
+
continue_from_recursion = True
|
|
2498
|
+
console.print("\n[cyan]Continuing with fresh context...[/cyan]\n")
|
|
2499
|
+
|
|
2500
|
+
# Add current progress to history if we have it
|
|
2501
|
+
# (GraphRecursionError doesn't give us partial output, but context may have been updated)
|
|
2502
|
+
history_input = original_user_input if not history_already_added else user_input
|
|
2503
|
+
ctx_manager.add_message("user", history_input)
|
|
2504
|
+
ctx_manager.add_message("assistant", "[Previous task interrupted - continuing...]")
|
|
2505
|
+
history_already_added = True
|
|
2506
|
+
|
|
2507
|
+
# Create new thread_id to avoid corrupted checkpoint
|
|
2508
|
+
import uuid
|
|
2509
|
+
continuation_thread_id = f"{current_session_id}-cont-{uuid.uuid4().hex[:8]}"
|
|
2510
|
+
invoke_config = RunnableConfig(
|
|
2511
|
+
configurable={"thread_id": continuation_thread_id}
|
|
2512
|
+
)
|
|
2513
|
+
if cli_callback:
|
|
2514
|
+
invoke_config["callbacks"] = [cli_callback]
|
|
2515
|
+
|
|
2516
|
+
# More explicit continuation message
|
|
2517
|
+
user_input = (
|
|
2518
|
+
"The previous response was interrupted due to reaching the step limit. "
|
|
2519
|
+
"Continue from where you left off and complete the remaining steps of the original task. "
|
|
2520
|
+
"Focus on what still needs to be done - do not repeat completed work."
|
|
2521
|
+
)
|
|
2522
|
+
continue # Retry the invoke
|
|
2523
|
+
|
|
2524
|
+
elif choice == 's':
|
|
2525
|
+
# Stop and try to extract partial results
|
|
2526
|
+
console.print("\n[yellow]Stopped. Attempting to extract partial results...[/yellow]")
|
|
2527
|
+
output = "[Task stopped due to step limit. Partial work may have been completed - check any files or state that were modified.]"
|
|
2528
|
+
break
|
|
2529
|
+
|
|
2530
|
+
else: # 'n' or anything else
|
|
2531
|
+
console.print("\n[dim]Skipped. Enter a new request.[/dim]")
|
|
2532
|
+
output = None
|
|
2533
|
+
break
|
|
753
2534
|
|
|
754
|
-
#
|
|
755
|
-
|
|
2535
|
+
# Skip chat history update if we bailed out (no result)
|
|
2536
|
+
if output is None:
|
|
2537
|
+
continue
|
|
2538
|
+
|
|
2539
|
+
# Display response in a clear format
|
|
2540
|
+
console.print() # Add spacing
|
|
2541
|
+
console.print(f"[bold bright_cyan]{agent_name}:[/bold bright_cyan]")
|
|
2542
|
+
console.print() # Add spacing before response
|
|
756
2543
|
if any(marker in output for marker in ['```', '**', '##', '- ', '* ']):
|
|
757
2544
|
console.print(Markdown(output))
|
|
758
2545
|
else:
|
|
759
2546
|
console.print(output)
|
|
2547
|
+
console.print() # Add spacing after response
|
|
760
2548
|
|
|
761
|
-
# Update chat history
|
|
762
|
-
|
|
763
|
-
|
|
2549
|
+
# Update chat history and context manager (skip if already added during continuation)
|
|
2550
|
+
if not history_already_added:
|
|
2551
|
+
chat_history.append({"role": "user", "content": original_user_input})
|
|
2552
|
+
chat_history.append({"role": "assistant", "content": output})
|
|
2553
|
+
|
|
2554
|
+
# Add messages to context manager for token tracking and pruning
|
|
2555
|
+
ctx_manager.add_message("user", original_user_input)
|
|
2556
|
+
ctx_manager.add_message("assistant", output)
|
|
2557
|
+
else:
|
|
2558
|
+
# During continuation, add the final response with continuation message
|
|
2559
|
+
chat_history.append({"role": "user", "content": user_input})
|
|
2560
|
+
chat_history.append({"role": "assistant", "content": output})
|
|
2561
|
+
ctx_manager.add_message("user", user_input)
|
|
2562
|
+
ctx_manager.add_message("assistant", output)
|
|
764
2563
|
|
|
765
2564
|
except KeyboardInterrupt:
|
|
766
2565
|
console.print("\n\n[yellow]Interrupted. Type 'exit' to quit or continue chatting.[/yellow]")
|
|
767
2566
|
continue
|
|
768
2567
|
except EOFError:
|
|
2568
|
+
# Save final session state before exiting
|
|
2569
|
+
try:
|
|
2570
|
+
from .tools import update_session_metadata, to_portable_path
|
|
2571
|
+
update_session_metadata(current_session_id, {
|
|
2572
|
+
'agent_source': to_portable_path(current_agent_file) if current_agent_file else None,
|
|
2573
|
+
'model': current_model or llm_model_display,
|
|
2574
|
+
'temperature': current_temperature if current_temperature is not None else llm_temperature_display,
|
|
2575
|
+
'allowed_directories': allowed_directories,
|
|
2576
|
+
'added_toolkit_configs': list(added_toolkit_configs),
|
|
2577
|
+
'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])],
|
|
2578
|
+
})
|
|
2579
|
+
except Exception as e:
|
|
2580
|
+
logger.debug(f"Failed to save session state on exit: {e}")
|
|
769
2581
|
console.print("\n\n[bold cyan]Goodbye! 👋[/bold cyan]")
|
|
770
2582
|
break
|
|
771
2583
|
|
|
@@ -803,40 +2615,24 @@ def agent_run(ctx, agent_source: str, message: str, version: Optional[str],
|
|
|
803
2615
|
temperature: Optional[float], max_tokens: Optional[int],
|
|
804
2616
|
save_thread: Optional[str], work_dir: Optional[str],
|
|
805
2617
|
verbose: str):
|
|
806
|
-
"""
|
|
807
|
-
Run agent with a single message (handoff mode).
|
|
2618
|
+
"""Run agent with a single message (handoff mode).
|
|
808
2619
|
|
|
2620
|
+
\b
|
|
809
2621
|
AGENT_SOURCE can be:
|
|
810
|
-
|
|
811
|
-
|
|
2622
|
+
- Platform agent ID or name
|
|
2623
|
+
- Path to local agent file
|
|
812
2624
|
|
|
813
2625
|
MESSAGE is the input message to send to the agent.
|
|
814
2626
|
|
|
2627
|
+
\b
|
|
815
2628
|
Examples:
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
# With toolkit configs and JSON output
|
|
825
|
-
alita-cli --output json agent run my-agent "Search for bugs" \\
|
|
826
|
-
--toolkit-config jira-config.json
|
|
827
|
-
|
|
828
|
-
# With filesystem access
|
|
829
|
-
alita-cli agent run my-agent "Analyze the code in src/" --dir ./myproject
|
|
830
|
-
|
|
831
|
-
# Save thread for continuation
|
|
832
|
-
alita-cli agent run my-agent "Start task" \\
|
|
833
|
-
--save-thread thread.txt
|
|
834
|
-
|
|
835
|
-
# Quiet mode (hide tool calls and thinking)
|
|
836
|
-
alita-cli agent run my-agent "Query" --verbose quiet
|
|
837
|
-
|
|
838
|
-
# Debug mode (show all including LLM calls)
|
|
839
|
-
alita-cli agent run my-agent "Query" --verbose debug
|
|
2629
|
+
alita run my-agent "What is the status of JIRA-123?"
|
|
2630
|
+
alita run ./agent.md "Create a new toolkit for Stripe API"
|
|
2631
|
+
alita -o json run my-agent "Search for bugs" --toolkit-config jira.json
|
|
2632
|
+
alita run my-agent "Analyze code" --dir ./myproject
|
|
2633
|
+
alita run my-agent "Start task" --save-thread thread.txt
|
|
2634
|
+
alita run my-agent "Query" -v quiet
|
|
2635
|
+
alita run my-agent "Query" -v debug
|
|
840
2636
|
"""
|
|
841
2637
|
formatter = ctx.obj['formatter']
|
|
842
2638
|
client = get_client(ctx)
|
|
@@ -859,8 +2655,8 @@ def agent_run(ctx, agent_source: str, message: str, version: Optional[str],
|
|
|
859
2655
|
|
|
860
2656
|
# Setup local agent executor (reuses same logic as agent_chat)
|
|
861
2657
|
try:
|
|
862
|
-
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools = _setup_local_agent_executor(
|
|
863
|
-
client, agent_def, toolkit_config, ctx.obj['config'], model, temperature, max_tokens, memory, work_dir
|
|
2658
|
+
agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
|
|
2659
|
+
client, agent_def, toolkit_config, ctx.obj['config'], model, temperature, max_tokens, memory, work_dir, {}
|
|
864
2660
|
)
|
|
865
2661
|
except Exception as e:
|
|
866
2662
|
error_panel = Panel(
|
|
@@ -876,31 +2672,42 @@ def agent_run(ctx, agent_source: str, message: str, version: Optional[str],
|
|
|
876
2672
|
if agent_executor:
|
|
877
2673
|
# Setup callback for verbose output
|
|
878
2674
|
from langchain_core.runnables import RunnableConfig
|
|
2675
|
+
from langgraph.errors import GraphRecursionError
|
|
879
2676
|
|
|
880
2677
|
invoke_config = None
|
|
881
2678
|
if show_verbose:
|
|
882
2679
|
cli_callback = create_cli_callback(verbose=True, debug=debug_mode)
|
|
883
2680
|
invoke_config = RunnableConfig(callbacks=[cli_callback])
|
|
884
2681
|
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
result = agent_executor.invoke({
|
|
890
|
-
"input": message,
|
|
891
|
-
"chat_history": []
|
|
892
|
-
})
|
|
893
|
-
|
|
894
|
-
click.echo(formatter._dump({
|
|
895
|
-
'agent': agent_name,
|
|
896
|
-
'message': message,
|
|
897
|
-
'response': extract_output_from_result(result),
|
|
898
|
-
'full_result': result
|
|
899
|
-
}))
|
|
900
|
-
else:
|
|
901
|
-
# Show status only when not verbose (verbose shows its own progress)
|
|
902
|
-
if not show_verbose:
|
|
2682
|
+
try:
|
|
2683
|
+
# Execute with spinner for non-JSON output
|
|
2684
|
+
if formatter.__class__.__name__ == 'JSONFormatter':
|
|
2685
|
+
# JSON output: always quiet, no callbacks
|
|
903
2686
|
with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
|
|
2687
|
+
result = agent_executor.invoke({
|
|
2688
|
+
"input": message,
|
|
2689
|
+
"chat_history": []
|
|
2690
|
+
})
|
|
2691
|
+
|
|
2692
|
+
click.echo(formatter._dump({
|
|
2693
|
+
'agent': agent_name,
|
|
2694
|
+
'message': message,
|
|
2695
|
+
'response': extract_output_from_result(result),
|
|
2696
|
+
'full_result': result
|
|
2697
|
+
}))
|
|
2698
|
+
else:
|
|
2699
|
+
# Show status only when not verbose (verbose shows its own progress)
|
|
2700
|
+
if not show_verbose:
|
|
2701
|
+
with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
|
|
2702
|
+
result = agent_executor.invoke(
|
|
2703
|
+
{
|
|
2704
|
+
"input": message,
|
|
2705
|
+
"chat_history": []
|
|
2706
|
+
},
|
|
2707
|
+
config=invoke_config
|
|
2708
|
+
)
|
|
2709
|
+
else:
|
|
2710
|
+
console.print() # Add spacing before tool calls
|
|
904
2711
|
result = agent_executor.invoke(
|
|
905
2712
|
{
|
|
906
2713
|
"input": message,
|
|
@@ -908,19 +2715,34 @@ def agent_run(ctx, agent_source: str, message: str, version: Optional[str],
|
|
|
908
2715
|
},
|
|
909
2716
|
config=invoke_config
|
|
910
2717
|
)
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
2718
|
+
|
|
2719
|
+
# Extract and display output
|
|
2720
|
+
output = extract_output_from_result(result)
|
|
2721
|
+
display_output(agent_name, message, output)
|
|
2722
|
+
|
|
2723
|
+
except GraphRecursionError as e:
|
|
2724
|
+
step_limit = getattr(e, 'recursion_limit', 25)
|
|
2725
|
+
console.print()
|
|
2726
|
+
console.print(Panel(
|
|
2727
|
+
f"[yellow]⚠ Step limit reached ({step_limit} steps)[/yellow]\n\n"
|
|
2728
|
+
f"The agent exceeded the maximum number of steps.\n"
|
|
2729
|
+
f"This task may be too complex for a single run.\n\n"
|
|
2730
|
+
f"[bold]Suggestions:[/bold]\n"
|
|
2731
|
+
f"• Use [cyan]alita agent chat[/cyan] for interactive continuation\n"
|
|
2732
|
+
f"• Break the task into smaller, focused requests\n"
|
|
2733
|
+
f"• Check if partial work was completed (files created, etc.)",
|
|
2734
|
+
title="Step Limit Reached",
|
|
2735
|
+
border_style="yellow",
|
|
2736
|
+
box=box.ROUNDED
|
|
2737
|
+
))
|
|
2738
|
+
if formatter.__class__.__name__ == 'JSONFormatter':
|
|
2739
|
+
click.echo(formatter._dump({
|
|
2740
|
+
'agent': agent_name,
|
|
2741
|
+
'message': message,
|
|
2742
|
+
'error': 'step_limit_reached',
|
|
2743
|
+
'step_limit': step_limit,
|
|
2744
|
+
'response': f'Step limit of {step_limit} reached. Task may be too complex.'
|
|
2745
|
+
}))
|
|
924
2746
|
else:
|
|
925
2747
|
# Simple LLM mode without tools
|
|
926
2748
|
system_prompt = agent_def.get('system_prompt', '')
|
|
@@ -998,29 +2820,40 @@ def agent_run(ctx, agent_source: str, message: str, version: Optional[str],
|
|
|
998
2820
|
|
|
999
2821
|
# Setup callback for verbose output
|
|
1000
2822
|
from langchain_core.runnables import RunnableConfig
|
|
2823
|
+
from langgraph.errors import GraphRecursionError
|
|
1001
2824
|
|
|
1002
2825
|
invoke_config = None
|
|
1003
2826
|
if show_verbose:
|
|
1004
2827
|
cli_callback = create_cli_callback(verbose=True, debug=debug_mode)
|
|
1005
2828
|
invoke_config = RunnableConfig(callbacks=[cli_callback])
|
|
1006
2829
|
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
2830
|
+
try:
|
|
2831
|
+
# Execute with spinner for non-JSON output
|
|
2832
|
+
if formatter.__class__.__name__ == 'JSONFormatter':
|
|
2833
|
+
result = agent_executor.invoke({
|
|
2834
|
+
"input": [message],
|
|
2835
|
+
"chat_history": []
|
|
2836
|
+
})
|
|
2837
|
+
|
|
2838
|
+
click.echo(formatter._dump({
|
|
2839
|
+
'agent': agent['name'],
|
|
2840
|
+
'message': message,
|
|
2841
|
+
'response': result.get('output', ''),
|
|
2842
|
+
'full_result': result
|
|
2843
|
+
}))
|
|
2844
|
+
else:
|
|
2845
|
+
# Show status only when not verbose
|
|
2846
|
+
if not show_verbose:
|
|
2847
|
+
with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
|
|
2848
|
+
result = agent_executor.invoke(
|
|
2849
|
+
{
|
|
2850
|
+
"input": [message],
|
|
2851
|
+
"chat_history": []
|
|
2852
|
+
},
|
|
2853
|
+
config=invoke_config
|
|
2854
|
+
)
|
|
2855
|
+
else:
|
|
2856
|
+
console.print() # Add spacing before tool calls
|
|
1024
2857
|
result = agent_executor.invoke(
|
|
1025
2858
|
{
|
|
1026
2859
|
"input": [message],
|
|
@@ -1028,32 +2861,47 @@ def agent_run(ctx, agent_source: str, message: str, version: Optional[str],
|
|
|
1028
2861
|
},
|
|
1029
2862
|
config=invoke_config
|
|
1030
2863
|
)
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
"input": [message],
|
|
1036
|
-
"chat_history": []
|
|
1037
|
-
},
|
|
1038
|
-
config=invoke_config
|
|
1039
|
-
)
|
|
2864
|
+
|
|
2865
|
+
# Display output
|
|
2866
|
+
response = result.get('output', 'No response')
|
|
2867
|
+
display_output(agent['name'], message, response)
|
|
1040
2868
|
|
|
1041
|
-
#
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
'
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
2869
|
+
# Save thread if requested
|
|
2870
|
+
if save_thread:
|
|
2871
|
+
thread_data = {
|
|
2872
|
+
'agent_id': agent['id'],
|
|
2873
|
+
'agent_name': agent['name'],
|
|
2874
|
+
'version_id': version_id,
|
|
2875
|
+
'thread_id': result.get('thread_id'),
|
|
2876
|
+
'last_message': message
|
|
2877
|
+
}
|
|
2878
|
+
with open(save_thread, 'w') as f:
|
|
2879
|
+
json.dump(thread_data, f, indent=2)
|
|
2880
|
+
logger.info(f"Thread saved to {save_thread}")
|
|
2881
|
+
|
|
2882
|
+
except GraphRecursionError as e:
|
|
2883
|
+
step_limit = getattr(e, 'recursion_limit', 25)
|
|
2884
|
+
console.print()
|
|
2885
|
+
console.print(Panel(
|
|
2886
|
+
f"[yellow]⚠ Step limit reached ({step_limit} steps)[/yellow]\n\n"
|
|
2887
|
+
f"The agent exceeded the maximum number of steps.\n"
|
|
2888
|
+
f"This task may be too complex for a single run.\n\n"
|
|
2889
|
+
f"[bold]Suggestions:[/bold]\n"
|
|
2890
|
+
f"• Use [cyan]alita agent chat[/cyan] for interactive continuation\n"
|
|
2891
|
+
f"• Break the task into smaller, focused requests\n"
|
|
2892
|
+
f"• Check if partial work was completed (files created, etc.)",
|
|
2893
|
+
title="Step Limit Reached",
|
|
2894
|
+
border_style="yellow",
|
|
2895
|
+
box=box.ROUNDED
|
|
2896
|
+
))
|
|
2897
|
+
if formatter.__class__.__name__ == 'JSONFormatter':
|
|
2898
|
+
click.echo(formatter._dump({
|
|
2899
|
+
'agent': agent['name'],
|
|
2900
|
+
'message': message,
|
|
2901
|
+
'error': 'step_limit_reached',
|
|
2902
|
+
'step_limit': step_limit,
|
|
2903
|
+
'response': f'Step limit of {step_limit} reached. Task may be too complex.'
|
|
2904
|
+
}))
|
|
1057
2905
|
|
|
1058
2906
|
except click.ClickException:
|
|
1059
2907
|
raise
|
|
@@ -1067,3 +2915,199 @@ def agent_run(ctx, agent_source: str, message: str, version: Optional[str],
|
|
|
1067
2915
|
)
|
|
1068
2916
|
console.print(error_panel, style="red")
|
|
1069
2917
|
raise click.Abort()
|
|
2918
|
+
|
|
2919
|
+
|
|
2920
|
+
@agent.command('execute-test-cases')
|
|
2921
|
+
@click.option(
|
|
2922
|
+
'--agent_source',
|
|
2923
|
+
'--agent-source',
|
|
2924
|
+
'agent_source',
|
|
2925
|
+
required=False,
|
|
2926
|
+
default=str(Path('.alita') / 'agents' / 'test-runner.agent.md'),
|
|
2927
|
+
show_default=True,
|
|
2928
|
+
type=click.Path(exists=False, file_okay=True, dir_okay=False),
|
|
2929
|
+
help='Path to test runner agent definition file'
|
|
2930
|
+
)
|
|
2931
|
+
@click.option('--test-cases-dir', required=True, type=click.Path(exists=True, file_okay=False, dir_okay=True),
|
|
2932
|
+
help='Directory containing test case files')
|
|
2933
|
+
@click.option('--results-dir', required=False, default=str(Path('.alita') / 'tests' / 'results'),
|
|
2934
|
+
type=click.Path(file_okay=False, dir_okay=True),
|
|
2935
|
+
help='Directory where test results will be saved')
|
|
2936
|
+
@click.option('--test-case', 'test_case_files', multiple=True,
|
|
2937
|
+
help='Specific test case file(s) to execute (e.g., TC-001.md). Can specify multiple times. If not specified, executes all test cases.')
|
|
2938
|
+
@click.option('--model', help='Override LLM model')
|
|
2939
|
+
@click.option('--temperature', type=float, help='Override temperature')
|
|
2940
|
+
@click.option('--max-tokens', type=int, help='Override max tokens')
|
|
2941
|
+
@click.option('--dir', 'work_dir', required=False, default=str(Path('.alita')),
|
|
2942
|
+
type=click.Path(exists=True, file_okay=False, dir_okay=True),
|
|
2943
|
+
help='Grant agent filesystem access to this directory')
|
|
2944
|
+
@click.option('--data-generator', required=False, default=str(Path('.alita') / 'agents' / 'test-data-generator.agent.md'),
|
|
2945
|
+
type=click.Path(exists=True),
|
|
2946
|
+
help='Path to test data generator agent definition file')
|
|
2947
|
+
@click.option('--validator', type=click.Path(exists=True),
|
|
2948
|
+
help='Path to test validator agent definition file (default: .alita/agents/test-validator.agent.md)')
|
|
2949
|
+
@click.option('--skip-data-generation', is_flag=True,
|
|
2950
|
+
help='Skip test data generation step')
|
|
2951
|
+
@click.option('--verbose', '-v', type=click.Choice(['quiet', 'default', 'debug']), default='default',
|
|
2952
|
+
help='Output verbosity level: quiet (final output only), default (tool calls + outputs), debug (all including LLM calls)')
|
|
2953
|
+
@click.pass_context
|
|
2954
|
+
def execute_test_cases(ctx, agent_source: str, test_cases_dir: str, results_dir: str,
|
|
2955
|
+
test_case_files: tuple, model: Optional[str], temperature: Optional[float],
|
|
2956
|
+
max_tokens: Optional[int], work_dir: str,
|
|
2957
|
+
data_generator: str, validator: Optional[str],
|
|
2958
|
+
skip_data_generation: bool,
|
|
2959
|
+
verbose: str):
|
|
2960
|
+
"""
|
|
2961
|
+
Execute test cases from a directory and save results.
|
|
2962
|
+
|
|
2963
|
+
This command:
|
|
2964
|
+
1. (Optional) Executes test data generator agent to provision test data
|
|
2965
|
+
2. Scans TEST_CASES_DIR for test case markdown files (TC-*.md)
|
|
2966
|
+
3. For each test case:
|
|
2967
|
+
- Parses the test case to extract config, steps, and expectations
|
|
2968
|
+
- Loads the agent with the toolkit config specified in the test case
|
|
2969
|
+
- Executes each test step
|
|
2970
|
+
- Validates output against expectations
|
|
2971
|
+
- Generates a test result file
|
|
2972
|
+
4. Saves all results to RESULTS_DIR
|
|
2973
|
+
|
|
2974
|
+
--agent_source: Path to test runner agent definition file
|
|
2975
|
+
|
|
2976
|
+
\b
|
|
2977
|
+
Examples:
|
|
2978
|
+
alita agent execute-test-cases --test-cases-dir ./tests --results-dir ./results
|
|
2979
|
+
alita agent execute-test-cases --agent_source ./agent.json --test-cases-dir ./tests --results-dir ./results \
|
|
2980
|
+
--data-generator ./data-gen.json
|
|
2981
|
+
alita agent execute-test-cases --agent_source ./agent.json --test-cases-dir ./tests --results-dir ./results \
|
|
2982
|
+
--test-case TC-001.md --test-case TC-002.md
|
|
2983
|
+
alita agent execute-test-cases --agent_source ./agent.json --test-cases-dir ./tests --results-dir ./results \
|
|
2984
|
+
--skip-data-generation --model gpt-4o
|
|
2985
|
+
"""
|
|
2986
|
+
# Import dependencies at function start
|
|
2987
|
+
import sqlite3
|
|
2988
|
+
import uuid
|
|
2989
|
+
from langgraph.checkpoint.sqlite import SqliteSaver
|
|
2990
|
+
|
|
2991
|
+
config = ctx.obj['config']
|
|
2992
|
+
client = get_client(ctx)
|
|
2993
|
+
|
|
2994
|
+
# Setup verbose level
|
|
2995
|
+
show_verbose = verbose != 'quiet'
|
|
2996
|
+
debug_mode = verbose == 'debug'
|
|
2997
|
+
|
|
2998
|
+
# Sanity-check committed defaults (should exist; fail early with a clear message if not)
|
|
2999
|
+
if results_dir and not Path(results_dir).exists():
|
|
3000
|
+
raise click.ClickException(
|
|
3001
|
+
f"Results directory not found: {results_dir}. "
|
|
3002
|
+
f"If you are running outside the repo root, pass --results-dir explicitly."
|
|
3003
|
+
)
|
|
3004
|
+
|
|
3005
|
+
try:
|
|
3006
|
+
# Load test runner agent
|
|
3007
|
+
agent_def, agent_name = load_test_runner_agent(agent_source)
|
|
3008
|
+
|
|
3009
|
+
# Find and filter test case files
|
|
3010
|
+
test_cases_path = Path(test_cases_dir)
|
|
3011
|
+
test_case_files_list = discover_test_case_files(test_cases_dir, test_case_files)
|
|
3012
|
+
|
|
3013
|
+
# Validate that test cases were found
|
|
3014
|
+
if not validate_test_case_files(test_case_files_list, test_cases_dir, test_case_files):
|
|
3015
|
+
return
|
|
3016
|
+
|
|
3017
|
+
# Print execution header
|
|
3018
|
+
print_test_execution_header(agent_name, test_case_files_list, test_case_files, results_dir)
|
|
3019
|
+
|
|
3020
|
+
# Load data generator agent (if applicable)
|
|
3021
|
+
data_gen_def = load_data_generator_agent(data_generator, skip_data_generation)
|
|
3022
|
+
|
|
3023
|
+
# Load validator agent
|
|
3024
|
+
validator_def, validator_agent_name, validator_path = load_validator_agent(validator)
|
|
3025
|
+
|
|
3026
|
+
# Store bulk data generation chat history to pass to test executors
|
|
3027
|
+
bulk_gen_chat_history = []
|
|
3028
|
+
|
|
3029
|
+
# Parse all test cases upfront
|
|
3030
|
+
parsed_test_cases = []
|
|
3031
|
+
test_cases_needing_data_gen = []
|
|
3032
|
+
|
|
3033
|
+
# Create master log for entire test execution session
|
|
3034
|
+
results_path = Path(results_dir)
|
|
3035
|
+
session_name = f"test-execution-{test_cases_path.name}"
|
|
3036
|
+
|
|
3037
|
+
# Use the callbacks module console so tool-call panels are printed and captured.
|
|
3038
|
+
from .callbacks import console as callbacks_console
|
|
3039
|
+
with TestLogCapture(results_path, session_name, console=callbacks_console) as master_log:
|
|
3040
|
+
# Write header information to log
|
|
3041
|
+
master_log.print(f"\n[bold cyan]🧪 Test Execution Started[/bold cyan]")
|
|
3042
|
+
master_log.print(f"Agent: [bold]{agent_name}[/bold]")
|
|
3043
|
+
master_log.print(f"Test Cases: {len(test_case_files_list)}")
|
|
3044
|
+
if test_case_files:
|
|
3045
|
+
master_log.print(f"Selected: [cyan]{', '.join(test_case_files)}[/cyan]")
|
|
3046
|
+
master_log.print(f"Results Directory: {results_dir}\n")
|
|
3047
|
+
|
|
3048
|
+
if data_gen_def:
|
|
3049
|
+
data_gen_name = data_gen_def.get('name', Path(data_generator).stem if data_generator else 'Data Generator')
|
|
3050
|
+
master_log.print(f"Data Generator Agent: [bold]{data_gen_name}[/bold]\n")
|
|
3051
|
+
|
|
3052
|
+
if validator_def:
|
|
3053
|
+
master_log.print(f"Validator Agent: [bold]{validator_agent_name}[/bold]")
|
|
3054
|
+
master_log.print(f"[dim]Using: {validator_path}[/dim]\n")
|
|
3055
|
+
else:
|
|
3056
|
+
master_log.print(f"[dim]No validator agent specified, using test runner agent for validation[/dim]\n")
|
|
3057
|
+
|
|
3058
|
+
# Parse all test cases
|
|
3059
|
+
parsed_test_cases = parse_all_test_cases(test_case_files_list, master_log)
|
|
3060
|
+
test_cases_needing_data_gen = filter_test_cases_needing_data_gen(parsed_test_cases)
|
|
3061
|
+
|
|
3062
|
+
# Bulk test data generation (if enabled)
|
|
3063
|
+
if data_gen_def and not skip_data_generation and test_cases_needing_data_gen:
|
|
3064
|
+
bulk_gen_chat_history = execute_bulk_data_generation(
|
|
3065
|
+
data_gen_def, test_cases_needing_data_gen, parsed_test_cases,
|
|
3066
|
+
test_cases_path, client, config, model, temperature, max_tokens,
|
|
3067
|
+
work_dir, master_log, _setup_local_agent_executor,
|
|
3068
|
+
verbose=show_verbose,
|
|
3069
|
+
debug=debug_mode,
|
|
3070
|
+
)
|
|
3071
|
+
|
|
3072
|
+
# Execute all test cases
|
|
3073
|
+
test_results = execute_all_test_cases(
|
|
3074
|
+
parsed_test_cases, bulk_gen_chat_history, test_cases_path,
|
|
3075
|
+
agent_def, validator_def, client, config, model, temperature,
|
|
3076
|
+
max_tokens, work_dir, master_log, _setup_local_agent_executor,
|
|
3077
|
+
verbose=show_verbose,
|
|
3078
|
+
debug=debug_mode,
|
|
3079
|
+
)
|
|
3080
|
+
|
|
3081
|
+
# End of master_log context - log file saved automatically
|
|
3082
|
+
|
|
3083
|
+
# Print test execution summary
|
|
3084
|
+
print_test_execution_summary(test_results, results_dir, session_name)
|
|
3085
|
+
|
|
3086
|
+
# Save structured JSON report
|
|
3087
|
+
log_file = None
|
|
3088
|
+
toolkit_name = session_name.replace('test-execution-', '')
|
|
3089
|
+
toolkit_dir = results_path / toolkit_name
|
|
3090
|
+
log_files = sorted(toolkit_dir.glob(f"*{session_name}.txt")) if toolkit_dir.exists() else []
|
|
3091
|
+
if log_files:
|
|
3092
|
+
log_file = log_files[0]
|
|
3093
|
+
|
|
3094
|
+
save_structured_report(test_results, results_dir, log_file)
|
|
3095
|
+
|
|
3096
|
+
# Exit with error code if any tests failed
|
|
3097
|
+
failed_tests = sum(1 for r in test_results if not r['passed'])
|
|
3098
|
+
if failed_tests > 0:
|
|
3099
|
+
sys.exit(1)
|
|
3100
|
+
|
|
3101
|
+
except click.ClickException:
|
|
3102
|
+
raise
|
|
3103
|
+
except Exception as e:
|
|
3104
|
+
logger.exception("Failed to execute test cases")
|
|
3105
|
+
error_panel = Panel(
|
|
3106
|
+
str(e),
|
|
3107
|
+
title="Error",
|
|
3108
|
+
border_style="red",
|
|
3109
|
+
box=box.ROUNDED
|
|
3110
|
+
)
|
|
3111
|
+
console.print(error_panel, style="red")
|
|
3112
|
+
raise click.Abort()
|
|
3113
|
+
|