alita-sdk 0.3.379__py3-none-any.whl → 0.3.627__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (278) hide show
  1. alita_sdk/cli/__init__.py +10 -0
  2. alita_sdk/cli/__main__.py +17 -0
  3. alita_sdk/cli/agent/__init__.py +5 -0
  4. alita_sdk/cli/agent/default.py +258 -0
  5. alita_sdk/cli/agent_executor.py +156 -0
  6. alita_sdk/cli/agent_loader.py +245 -0
  7. alita_sdk/cli/agent_ui.py +228 -0
  8. alita_sdk/cli/agents.py +3113 -0
  9. alita_sdk/cli/callbacks.py +647 -0
  10. alita_sdk/cli/cli.py +168 -0
  11. alita_sdk/cli/config.py +306 -0
  12. alita_sdk/cli/context/__init__.py +30 -0
  13. alita_sdk/cli/context/cleanup.py +198 -0
  14. alita_sdk/cli/context/manager.py +731 -0
  15. alita_sdk/cli/context/message.py +285 -0
  16. alita_sdk/cli/context/strategies.py +289 -0
  17. alita_sdk/cli/context/token_estimation.py +127 -0
  18. alita_sdk/cli/formatting.py +182 -0
  19. alita_sdk/cli/input_handler.py +419 -0
  20. alita_sdk/cli/inventory.py +1073 -0
  21. alita_sdk/cli/mcp_loader.py +315 -0
  22. alita_sdk/cli/testcases/__init__.py +94 -0
  23. alita_sdk/cli/testcases/data_generation.py +119 -0
  24. alita_sdk/cli/testcases/discovery.py +96 -0
  25. alita_sdk/cli/testcases/executor.py +84 -0
  26. alita_sdk/cli/testcases/logger.py +85 -0
  27. alita_sdk/cli/testcases/parser.py +172 -0
  28. alita_sdk/cli/testcases/prompts.py +91 -0
  29. alita_sdk/cli/testcases/reporting.py +125 -0
  30. alita_sdk/cli/testcases/setup.py +108 -0
  31. alita_sdk/cli/testcases/test_runner.py +282 -0
  32. alita_sdk/cli/testcases/utils.py +39 -0
  33. alita_sdk/cli/testcases/validation.py +90 -0
  34. alita_sdk/cli/testcases/workflow.py +196 -0
  35. alita_sdk/cli/toolkit.py +327 -0
  36. alita_sdk/cli/toolkit_loader.py +85 -0
  37. alita_sdk/cli/tools/__init__.py +43 -0
  38. alita_sdk/cli/tools/approval.py +224 -0
  39. alita_sdk/cli/tools/filesystem.py +1751 -0
  40. alita_sdk/cli/tools/planning.py +389 -0
  41. alita_sdk/cli/tools/terminal.py +414 -0
  42. alita_sdk/community/__init__.py +72 -12
  43. alita_sdk/community/inventory/__init__.py +236 -0
  44. alita_sdk/community/inventory/config.py +257 -0
  45. alita_sdk/community/inventory/enrichment.py +2137 -0
  46. alita_sdk/community/inventory/extractors.py +1469 -0
  47. alita_sdk/community/inventory/ingestion.py +3172 -0
  48. alita_sdk/community/inventory/knowledge_graph.py +1457 -0
  49. alita_sdk/community/inventory/parsers/__init__.py +218 -0
  50. alita_sdk/community/inventory/parsers/base.py +295 -0
  51. alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
  52. alita_sdk/community/inventory/parsers/go_parser.py +851 -0
  53. alita_sdk/community/inventory/parsers/html_parser.py +389 -0
  54. alita_sdk/community/inventory/parsers/java_parser.py +593 -0
  55. alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
  56. alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
  57. alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
  58. alita_sdk/community/inventory/parsers/python_parser.py +604 -0
  59. alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
  60. alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
  61. alita_sdk/community/inventory/parsers/text_parser.py +322 -0
  62. alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
  63. alita_sdk/community/inventory/patterns/__init__.py +61 -0
  64. alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
  65. alita_sdk/community/inventory/patterns/loader.py +348 -0
  66. alita_sdk/community/inventory/patterns/registry.py +198 -0
  67. alita_sdk/community/inventory/presets.py +535 -0
  68. alita_sdk/community/inventory/retrieval.py +1403 -0
  69. alita_sdk/community/inventory/toolkit.py +173 -0
  70. alita_sdk/community/inventory/toolkit_utils.py +176 -0
  71. alita_sdk/community/inventory/visualize.py +1370 -0
  72. alita_sdk/configurations/__init__.py +1 -1
  73. alita_sdk/configurations/ado.py +141 -20
  74. alita_sdk/configurations/bitbucket.py +94 -2
  75. alita_sdk/configurations/confluence.py +130 -1
  76. alita_sdk/configurations/figma.py +76 -0
  77. alita_sdk/configurations/gitlab.py +91 -0
  78. alita_sdk/configurations/jira.py +103 -0
  79. alita_sdk/configurations/openapi.py +329 -0
  80. alita_sdk/configurations/qtest.py +72 -1
  81. alita_sdk/configurations/report_portal.py +96 -0
  82. alita_sdk/configurations/sharepoint.py +148 -0
  83. alita_sdk/configurations/testio.py +83 -0
  84. alita_sdk/configurations/testrail.py +88 -0
  85. alita_sdk/configurations/xray.py +93 -0
  86. alita_sdk/configurations/zephyr_enterprise.py +93 -0
  87. alita_sdk/configurations/zephyr_essential.py +75 -0
  88. alita_sdk/runtime/clients/artifact.py +3 -3
  89. alita_sdk/runtime/clients/client.py +388 -46
  90. alita_sdk/runtime/clients/mcp_discovery.py +342 -0
  91. alita_sdk/runtime/clients/mcp_manager.py +262 -0
  92. alita_sdk/runtime/clients/sandbox_client.py +8 -21
  93. alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
  94. alita_sdk/runtime/langchain/assistant.py +157 -39
  95. alita_sdk/runtime/langchain/constants.py +647 -1
  96. alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
  97. alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
  98. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
  99. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +10 -4
  100. alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +226 -7
  101. alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
  102. alita_sdk/runtime/langchain/document_loaders/constants.py +40 -19
  103. alita_sdk/runtime/langchain/langraph_agent.py +405 -84
  104. alita_sdk/runtime/langchain/utils.py +106 -7
  105. alita_sdk/runtime/llms/preloaded.py +2 -6
  106. alita_sdk/runtime/models/mcp_models.py +61 -0
  107. alita_sdk/runtime/skills/__init__.py +91 -0
  108. alita_sdk/runtime/skills/callbacks.py +498 -0
  109. alita_sdk/runtime/skills/discovery.py +540 -0
  110. alita_sdk/runtime/skills/executor.py +610 -0
  111. alita_sdk/runtime/skills/input_builder.py +371 -0
  112. alita_sdk/runtime/skills/models.py +330 -0
  113. alita_sdk/runtime/skills/registry.py +355 -0
  114. alita_sdk/runtime/skills/skill_runner.py +330 -0
  115. alita_sdk/runtime/toolkits/__init__.py +31 -0
  116. alita_sdk/runtime/toolkits/application.py +29 -10
  117. alita_sdk/runtime/toolkits/artifact.py +20 -11
  118. alita_sdk/runtime/toolkits/datasource.py +13 -6
  119. alita_sdk/runtime/toolkits/mcp.py +783 -0
  120. alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
  121. alita_sdk/runtime/toolkits/planning.py +178 -0
  122. alita_sdk/runtime/toolkits/skill_router.py +238 -0
  123. alita_sdk/runtime/toolkits/subgraph.py +251 -6
  124. alita_sdk/runtime/toolkits/tools.py +356 -69
  125. alita_sdk/runtime/toolkits/vectorstore.py +11 -5
  126. alita_sdk/runtime/tools/__init__.py +10 -3
  127. alita_sdk/runtime/tools/application.py +27 -6
  128. alita_sdk/runtime/tools/artifact.py +511 -28
  129. alita_sdk/runtime/tools/data_analysis.py +183 -0
  130. alita_sdk/runtime/tools/function.py +67 -35
  131. alita_sdk/runtime/tools/graph.py +10 -4
  132. alita_sdk/runtime/tools/image_generation.py +148 -46
  133. alita_sdk/runtime/tools/llm.py +1003 -128
  134. alita_sdk/runtime/tools/loop.py +3 -1
  135. alita_sdk/runtime/tools/loop_output.py +3 -1
  136. alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
  137. alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
  138. alita_sdk/runtime/tools/mcp_server_tool.py +8 -5
  139. alita_sdk/runtime/tools/planning/__init__.py +36 -0
  140. alita_sdk/runtime/tools/planning/models.py +246 -0
  141. alita_sdk/runtime/tools/planning/wrapper.py +607 -0
  142. alita_sdk/runtime/tools/router.py +2 -4
  143. alita_sdk/runtime/tools/sandbox.py +65 -48
  144. alita_sdk/runtime/tools/skill_router.py +776 -0
  145. alita_sdk/runtime/tools/tool.py +3 -1
  146. alita_sdk/runtime/tools/vectorstore.py +9 -3
  147. alita_sdk/runtime/tools/vectorstore_base.py +70 -14
  148. alita_sdk/runtime/utils/AlitaCallback.py +137 -21
  149. alita_sdk/runtime/utils/constants.py +5 -1
  150. alita_sdk/runtime/utils/mcp_client.py +492 -0
  151. alita_sdk/runtime/utils/mcp_oauth.py +361 -0
  152. alita_sdk/runtime/utils/mcp_sse_client.py +434 -0
  153. alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
  154. alita_sdk/runtime/utils/serialization.py +155 -0
  155. alita_sdk/runtime/utils/streamlit.py +40 -13
  156. alita_sdk/runtime/utils/toolkit_utils.py +30 -9
  157. alita_sdk/runtime/utils/utils.py +36 -0
  158. alita_sdk/tools/__init__.py +134 -35
  159. alita_sdk/tools/ado/repos/__init__.py +51 -32
  160. alita_sdk/tools/ado/repos/repos_wrapper.py +148 -89
  161. alita_sdk/tools/ado/test_plan/__init__.py +25 -9
  162. alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
  163. alita_sdk/tools/ado/utils.py +1 -18
  164. alita_sdk/tools/ado/wiki/__init__.py +25 -12
  165. alita_sdk/tools/ado/wiki/ado_wrapper.py +291 -22
  166. alita_sdk/tools/ado/work_item/__init__.py +26 -13
  167. alita_sdk/tools/ado/work_item/ado_wrapper.py +73 -11
  168. alita_sdk/tools/advanced_jira_mining/__init__.py +11 -8
  169. alita_sdk/tools/aws/delta_lake/__init__.py +13 -9
  170. alita_sdk/tools/aws/delta_lake/tool.py +5 -1
  171. alita_sdk/tools/azure_ai/search/__init__.py +11 -8
  172. alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
  173. alita_sdk/tools/base/tool.py +5 -1
  174. alita_sdk/tools/base_indexer_toolkit.py +271 -84
  175. alita_sdk/tools/bitbucket/__init__.py +17 -11
  176. alita_sdk/tools/bitbucket/api_wrapper.py +59 -11
  177. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +49 -35
  178. alita_sdk/tools/browser/__init__.py +5 -4
  179. alita_sdk/tools/carrier/__init__.py +5 -6
  180. alita_sdk/tools/carrier/backend_reports_tool.py +6 -6
  181. alita_sdk/tools/carrier/run_ui_test_tool.py +6 -6
  182. alita_sdk/tools/carrier/ui_reports_tool.py +5 -5
  183. alita_sdk/tools/chunkers/__init__.py +3 -1
  184. alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
  185. alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
  186. alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
  187. alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
  188. alita_sdk/tools/chunkers/universal_chunker.py +270 -0
  189. alita_sdk/tools/cloud/aws/__init__.py +10 -7
  190. alita_sdk/tools/cloud/azure/__init__.py +10 -7
  191. alita_sdk/tools/cloud/gcp/__init__.py +10 -7
  192. alita_sdk/tools/cloud/k8s/__init__.py +10 -7
  193. alita_sdk/tools/code/linter/__init__.py +10 -8
  194. alita_sdk/tools/code/loaders/codesearcher.py +3 -2
  195. alita_sdk/tools/code/sonar/__init__.py +11 -8
  196. alita_sdk/tools/code_indexer_toolkit.py +82 -22
  197. alita_sdk/tools/confluence/__init__.py +22 -16
  198. alita_sdk/tools/confluence/api_wrapper.py +107 -30
  199. alita_sdk/tools/confluence/loader.py +14 -2
  200. alita_sdk/tools/custom_open_api/__init__.py +12 -5
  201. alita_sdk/tools/elastic/__init__.py +11 -8
  202. alita_sdk/tools/elitea_base.py +493 -30
  203. alita_sdk/tools/figma/__init__.py +58 -11
  204. alita_sdk/tools/figma/api_wrapper.py +1235 -143
  205. alita_sdk/tools/figma/figma_client.py +73 -0
  206. alita_sdk/tools/figma/toon_tools.py +2748 -0
  207. alita_sdk/tools/github/__init__.py +14 -15
  208. alita_sdk/tools/github/github_client.py +224 -100
  209. alita_sdk/tools/github/graphql_client_wrapper.py +119 -33
  210. alita_sdk/tools/github/schemas.py +14 -5
  211. alita_sdk/tools/github/tool.py +5 -1
  212. alita_sdk/tools/github/tool_prompts.py +9 -22
  213. alita_sdk/tools/gitlab/__init__.py +16 -11
  214. alita_sdk/tools/gitlab/api_wrapper.py +218 -48
  215. alita_sdk/tools/gitlab_org/__init__.py +10 -9
  216. alita_sdk/tools/gitlab_org/api_wrapper.py +63 -64
  217. alita_sdk/tools/google/bigquery/__init__.py +13 -12
  218. alita_sdk/tools/google/bigquery/tool.py +5 -1
  219. alita_sdk/tools/google_places/__init__.py +11 -8
  220. alita_sdk/tools/google_places/api_wrapper.py +1 -1
  221. alita_sdk/tools/jira/__init__.py +17 -10
  222. alita_sdk/tools/jira/api_wrapper.py +92 -41
  223. alita_sdk/tools/keycloak/__init__.py +11 -8
  224. alita_sdk/tools/localgit/__init__.py +9 -3
  225. alita_sdk/tools/localgit/local_git.py +62 -54
  226. alita_sdk/tools/localgit/tool.py +5 -1
  227. alita_sdk/tools/memory/__init__.py +12 -4
  228. alita_sdk/tools/non_code_indexer_toolkit.py +1 -0
  229. alita_sdk/tools/ocr/__init__.py +11 -8
  230. alita_sdk/tools/openapi/__init__.py +491 -106
  231. alita_sdk/tools/openapi/api_wrapper.py +1368 -0
  232. alita_sdk/tools/openapi/tool.py +20 -0
  233. alita_sdk/tools/pandas/__init__.py +20 -12
  234. alita_sdk/tools/pandas/api_wrapper.py +38 -25
  235. alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
  236. alita_sdk/tools/postman/__init__.py +10 -9
  237. alita_sdk/tools/pptx/__init__.py +11 -10
  238. alita_sdk/tools/pptx/pptx_wrapper.py +1 -1
  239. alita_sdk/tools/qtest/__init__.py +31 -11
  240. alita_sdk/tools/qtest/api_wrapper.py +2135 -86
  241. alita_sdk/tools/rally/__init__.py +10 -9
  242. alita_sdk/tools/rally/api_wrapper.py +1 -1
  243. alita_sdk/tools/report_portal/__init__.py +12 -8
  244. alita_sdk/tools/salesforce/__init__.py +10 -8
  245. alita_sdk/tools/servicenow/__init__.py +17 -15
  246. alita_sdk/tools/servicenow/api_wrapper.py +1 -1
  247. alita_sdk/tools/sharepoint/__init__.py +10 -7
  248. alita_sdk/tools/sharepoint/api_wrapper.py +129 -38
  249. alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
  250. alita_sdk/tools/sharepoint/utils.py +8 -2
  251. alita_sdk/tools/slack/__init__.py +10 -7
  252. alita_sdk/tools/slack/api_wrapper.py +2 -2
  253. alita_sdk/tools/sql/__init__.py +12 -9
  254. alita_sdk/tools/testio/__init__.py +10 -7
  255. alita_sdk/tools/testrail/__init__.py +11 -10
  256. alita_sdk/tools/testrail/api_wrapper.py +1 -1
  257. alita_sdk/tools/utils/__init__.py +9 -4
  258. alita_sdk/tools/utils/content_parser.py +103 -18
  259. alita_sdk/tools/utils/text_operations.py +410 -0
  260. alita_sdk/tools/utils/tool_prompts.py +79 -0
  261. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +30 -13
  262. alita_sdk/tools/xray/__init__.py +13 -9
  263. alita_sdk/tools/yagmail/__init__.py +9 -3
  264. alita_sdk/tools/zephyr/__init__.py +10 -7
  265. alita_sdk/tools/zephyr_enterprise/__init__.py +11 -7
  266. alita_sdk/tools/zephyr_essential/__init__.py +10 -7
  267. alita_sdk/tools/zephyr_essential/api_wrapper.py +30 -13
  268. alita_sdk/tools/zephyr_essential/client.py +2 -2
  269. alita_sdk/tools/zephyr_scale/__init__.py +11 -8
  270. alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
  271. alita_sdk/tools/zephyr_squad/__init__.py +10 -7
  272. {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/METADATA +154 -8
  273. alita_sdk-0.3.627.dist-info/RECORD +468 -0
  274. alita_sdk-0.3.627.dist-info/entry_points.txt +2 -0
  275. alita_sdk-0.3.379.dist-info/RECORD +0 -360
  276. {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/WHEEL +0 -0
  277. {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/licenses/LICENSE +0 -0
  278. {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,3113 @@
1
+ """
2
+ Agent commands for Alita CLI.
3
+
4
+ Provides commands to work with agents interactively or in handoff mode,
5
+ supporting both platform agents and local agent definition files.
6
+ """
7
+
8
+ import asyncio
9
+ import click
10
+ import json
11
+ import logging
12
+ import sqlite3
13
+ import sys
14
+ import re
15
+ from typing import Optional, Dict, Any, List
16
+ from pathlib import Path
17
+ from datetime import datetime
18
+ import yaml
19
+
20
+ from rich.console import Console
21
+ from rich.panel import Panel
22
+ from rich.table import Table
23
+ from rich.markdown import Markdown
24
+ from rich import box
25
+ from rich.text import Text
26
+ from rich.status import Status
27
+ from rich.live import Live
28
+
29
+ from .cli import get_client
30
+ # Import from refactored modules
31
+ from .agent_ui import print_welcome, print_help, display_output, extract_output_from_result
32
+ from .agent_loader import load_agent_definition
33
+ from .agent_executor import create_llm_instance, create_agent_executor, create_agent_executor_with_mcp
34
+ from .toolkit_loader import load_toolkit_config, load_toolkit_configs
35
+ from .callbacks import create_cli_callback, CLICallbackHandler
36
+ from .input_handler import get_input_handler, styled_input, styled_selection_input
37
+ # Context management for chat history
38
+ from .context import CLIContextManager, CLIMessage, purge_old_sessions as purge_context_sessions
39
+ # Test execution utilities
40
+ from .testcases import (
41
+ parse_test_case,
42
+ resolve_toolkit_config_path,
43
+ build_bulk_data_gen_prompt,
44
+ build_single_test_execution_prompt,
45
+ build_single_test_validation_prompt,
46
+ extract_json_from_text,
47
+ create_fallback_result_for_test,
48
+ print_validation_diagnostics,
49
+ TestLogCapture,
50
+ create_executor_from_cache,
51
+ cleanup_executor_cache,
52
+ extract_toolkit_name,
53
+ # New helper functions
54
+ load_test_runner_agent,
55
+ load_data_generator_agent,
56
+ load_validator_agent,
57
+ discover_test_case_files,
58
+ validate_test_case_files,
59
+ print_test_execution_header,
60
+ execute_bulk_data_generation,
61
+ execute_single_test_case,
62
+ validate_single_test_case,
63
+ generate_summary_report,
64
+ save_structured_report,
65
+ print_test_execution_summary,
66
+ # Workflow orchestration
67
+ parse_all_test_cases,
68
+ filter_test_cases_needing_data_gen,
69
+ execute_all_test_cases,
70
+ )
71
+
72
+ logger = logging.getLogger(__name__)
73
+
74
+ # Create a rich console for beautiful output
75
+ console = Console()
76
+
77
+ def _get_alita_system_prompt(config) -> str:
78
+ """
79
+ Get the Alita system prompt from user config or fallback to default.
80
+
81
+ Checks for $ALITA_DIR/agents/default.agent.md first, then falls back
82
+ to the built-in DEFAULT_PROMPT.
83
+
84
+ Returns:
85
+ The system prompt string for Alita
86
+ """
87
+ from .agent.default import DEFAULT_PROMPT
88
+
89
+ # Check for user-customized prompt
90
+ custom_prompt_path = Path(config.agents_dir) / 'default.agent.md'
91
+
92
+ if custom_prompt_path.exists():
93
+ try:
94
+ content = custom_prompt_path.read_text(encoding='utf-8')
95
+ # Parse the agent.md file - extract system_prompt from frontmatter or use content
96
+ if content.startswith('---'):
97
+ # Has YAML frontmatter, try to parse
98
+ try:
99
+ parts = content.split('---', 2)
100
+ if len(parts) >= 3:
101
+ frontmatter = yaml.safe_load(parts[1])
102
+ body = parts[2].strip()
103
+ # Use system_prompt from frontmatter if present, otherwise use body
104
+ return frontmatter.get('system_prompt', body) if frontmatter else body
105
+ except Exception:
106
+ pass
107
+ # No frontmatter or parsing failed, use entire content as prompt
108
+ return content.strip()
109
+ except Exception as e:
110
+ logger.debug(f"Failed to load custom Alita prompt from {custom_prompt_path}: {e}")
111
+
112
+ return DEFAULT_PROMPT
113
+
114
+
115
+ def _get_inventory_system_prompt(config) -> str:
116
+ """
117
+ Get the Inventory agent system prompt from user config or fallback to default.
118
+
119
+ Checks for $ALITA_DIR/agents/inventory.agent.md first, then falls back
120
+ to the default prompt with inventory-specific instructions.
121
+
122
+ Returns:
123
+ The system prompt string for Inventory agent
124
+ """
125
+ from .agent.default import DEFAULT_PROMPT
126
+
127
+ # Check for user-customized prompt
128
+ custom_prompt_path = Path(config.agents_dir) / 'inventory.agent.md'
129
+
130
+ if custom_prompt_path.exists():
131
+ try:
132
+ content = custom_prompt_path.read_text(encoding='utf-8')
133
+ # Parse the agent.md file - extract system_prompt from frontmatter or use content
134
+ if content.startswith('---'):
135
+ try:
136
+ parts = content.split('---', 2)
137
+ if len(parts) >= 3:
138
+ frontmatter = yaml.safe_load(parts[1])
139
+ body = parts[2].strip()
140
+ return frontmatter.get('system_prompt', body) if frontmatter else body
141
+ except Exception:
142
+ pass
143
+ return content.strip()
144
+ except Exception as e:
145
+ logger.debug(f"Failed to load custom Inventory prompt from {custom_prompt_path}: {e}")
146
+
147
+ # Use default prompt + inventory toolkit instructions
148
+ inventory_context = """
149
+
150
+ ## Inventory Knowledge Graph
151
+
152
+ You have access to the Inventory toolkit for querying a knowledge graph of software entities and relationships.
153
+ Use these tools to help users understand their codebase:
154
+
155
+ - **search_entities**: Find entities by name, type, or path patterns
156
+ - **get_entity**: Get full details of a specific entity
157
+ - **get_relationships**: Find relationships from/to an entity
158
+ - **impact_analysis**: Analyze what depends on an entity (useful for change impact)
159
+ - **get_graph_stats**: Get statistics about the knowledge graph
160
+
161
+ When answering questions about the codebase, use these tools to provide accurate, citation-backed answers.
162
+ """
163
+ return DEFAULT_PROMPT + inventory_context
164
+
165
+
166
+ def _resolve_inventory_path(path: str, work_dir: Optional[str] = None) -> Optional[str]:
167
+ """
168
+ Resolve an inventory/knowledge graph file path.
169
+
170
+ Tries locations in order:
171
+ 1. Absolute path
172
+ 2. Relative to current working directory (or work_dir if provided)
173
+ 3. Relative to .alita/inventory/ in current directory
174
+ 4. Relative to .alita/inventory/ in work_dir (if different)
175
+
176
+ Args:
177
+ path: The path to resolve (can be relative or absolute)
178
+ work_dir: Optional workspace directory to check
179
+
180
+ Returns:
181
+ Absolute path to the file if found, None otherwise
182
+ """
183
+ # Expand user home directory
184
+ path = str(Path(path).expanduser())
185
+
186
+ # Try absolute path first
187
+ if Path(path).is_absolute() and Path(path).exists():
188
+ return str(Path(path).resolve())
189
+
190
+ # Try relative to current working directory
191
+ cwd = Path.cwd()
192
+ cwd_path = cwd / path
193
+ if cwd_path.exists():
194
+ return str(cwd_path.resolve())
195
+
196
+ # Try .alita/inventory/ in current directory
197
+ alita_inventory_path = cwd / '.alita' / 'inventory' / path
198
+ if alita_inventory_path.exists():
199
+ return str(alita_inventory_path.resolve())
200
+
201
+ # If work_dir is different from cwd, try there too
202
+ if work_dir:
203
+ work_path = Path(work_dir)
204
+ if work_path != cwd:
205
+ # Try relative to work_dir
206
+ work_rel_path = work_path / path
207
+ if work_rel_path.exists():
208
+ return str(work_rel_path.resolve())
209
+
210
+ # Try .alita/inventory/ in work_dir
211
+ work_alita_path = work_path / '.alita' / 'inventory' / path
212
+ if work_alita_path.exists():
213
+ return str(work_alita_path.resolve())
214
+
215
+ return None
216
+
217
+
218
+ def _build_inventory_config(path: str, work_dir: Optional[str] = None) -> Optional[Dict[str, Any]]:
219
+ """
220
+ Build an inventory toolkit configuration from a file path.
221
+
222
+ The toolkit name is derived from the filename (stem).
223
+ All available tools are included.
224
+
225
+ Args:
226
+ path: Path to the knowledge graph JSON file
227
+ work_dir: Optional workspace directory for path resolution
228
+
229
+ Returns:
230
+ Toolkit configuration dict if file found, None otherwise
231
+ """
232
+ # Resolve the path
233
+ resolved_path = _resolve_inventory_path(path, work_dir)
234
+ if not resolved_path:
235
+ return None
236
+
237
+ # Validate it's a JSON file
238
+ if not resolved_path.endswith('.json'):
239
+ return None
240
+
241
+ # Validate file exists and is readable
242
+ try:
243
+ with open(resolved_path, 'r') as f:
244
+ # Just check it's valid JSON
245
+ json.load(f)
246
+ except (IOError, json.JSONDecodeError):
247
+ return None
248
+
249
+ # Extract toolkit name from filename (e.g., 'alita' from 'alita.json')
250
+ toolkit_name = Path(resolved_path).stem
251
+
252
+ # Build configuration with all available tools
253
+ from .toolkit_loader import INVENTORY_TOOLS
254
+
255
+ return {
256
+ 'type': 'inventory',
257
+ 'toolkit_name': toolkit_name,
258
+ 'graph_path': resolved_path,
259
+ 'base_directory': work_dir,
260
+ 'selected_tools': INVENTORY_TOOLS,
261
+ }
262
+
263
+
264
+ def _get_inventory_json_files(work_dir: Optional[str] = None) -> List[str]:
265
+ """
266
+ Get list of .json files for inventory path completion.
267
+
268
+ Searches:
269
+ 1. Current working directory (*.json files)
270
+ 2. .alita/inventory/ directory (*.json files)
271
+ 3. work_dir and work_dir/.alita/inventory/ if different from cwd
272
+
273
+ Args:
274
+ work_dir: Optional workspace directory
275
+
276
+ Returns:
277
+ List of relative or display paths for completion
278
+ """
279
+ suggestions = []
280
+ seen = set()
281
+
282
+ cwd = Path.cwd()
283
+
284
+ # Current directory .json files
285
+ for f in cwd.glob('*.json'):
286
+ if f.name not in seen:
287
+ suggestions.append(f.name)
288
+ seen.add(f.name)
289
+
290
+ # .alita/inventory/ directory
291
+ alita_inv = cwd / '.alita' / 'inventory'
292
+ if alita_inv.exists():
293
+ for f in alita_inv.glob('*.json'):
294
+ display = f'.alita/inventory/{f.name}'
295
+ if display not in seen:
296
+ suggestions.append(display)
297
+ seen.add(display)
298
+
299
+ # work_dir if different
300
+ if work_dir:
301
+ work_path = Path(work_dir)
302
+ if work_path != cwd:
303
+ for f in work_path.glob('*.json'):
304
+ if f.name not in seen:
305
+ suggestions.append(f.name)
306
+ seen.add(f.name)
307
+
308
+ work_alita_inv = work_path / '.alita' / 'inventory'
309
+ if work_alita_inv.exists():
310
+ for f in work_alita_inv.glob('*.json'):
311
+ display = f'.alita/inventory/{f.name}'
312
+ if display not in seen:
313
+ suggestions.append(display)
314
+ seen.add(display)
315
+
316
+ return sorted(suggestions)
317
+
318
+
319
+ def _load_mcp_tools(agent_def: Dict[str, Any], mcp_config_path: str) -> List[Dict[str, Any]]:
320
+ """Load MCP tools from agent definition with tool-level filtering.
321
+
322
+ Args:
323
+ agent_def: Agent definition dictionary containing mcps list
324
+ mcp_config_path: Path to mcp.json configuration file (workspace-level)
325
+
326
+ Returns:
327
+ List of toolkit configurations for MCP servers
328
+ """
329
+ from .mcp_loader import load_mcp_tools
330
+ return load_mcp_tools(agent_def, mcp_config_path)
331
+
332
+
333
+ def _setup_local_agent_executor(client, agent_def: Dict[str, Any], toolkit_config: tuple,
334
+ config, model: Optional[str], temperature: Optional[float],
335
+ max_tokens: Optional[int], memory, allowed_directories: Optional[List[str]],
336
+ plan_state: Optional[Dict] = None):
337
+ """Setup local agent executor with all configurations.
338
+
339
+ Args:
340
+ allowed_directories: List of allowed directories for filesystem access.
341
+ First directory is the primary/base directory.
342
+
343
+ Returns:
344
+ Tuple of (agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools)
345
+ """
346
+ # Load toolkit configs
347
+ toolkit_configs = load_toolkit_configs(agent_def, toolkit_config)
348
+
349
+ # Load MCP tools
350
+ mcp_toolkit_configs = _load_mcp_tools(agent_def, config.mcp_config_path)
351
+ toolkit_configs.extend(mcp_toolkit_configs)
352
+
353
+ # Create LLM instance
354
+ llm, llm_model, llm_temperature, llm_max_tokens = create_llm_instance(
355
+ client, model, agent_def, temperature, max_tokens
356
+ )
357
+
358
+ # Add filesystem tools if directories are provided
359
+ filesystem_tools = None
360
+ terminal_tools = None
361
+ if allowed_directories:
362
+ from .tools import get_filesystem_tools, get_terminal_tools
363
+ preset = agent_def.get('filesystem_tools_preset')
364
+ include_tools = agent_def.get('filesystem_tools_include')
365
+ exclude_tools = agent_def.get('filesystem_tools_exclude')
366
+
367
+ # First directory is the primary base directory
368
+ base_dir = allowed_directories[0]
369
+ extra_dirs = allowed_directories[1:] if len(allowed_directories) > 1 else None
370
+ filesystem_tools = get_filesystem_tools(base_dir, include_tools, exclude_tools, preset, extra_dirs)
371
+
372
+ # Terminal tools use primary directory as cwd
373
+ terminal_tools = get_terminal_tools(base_dir)
374
+
375
+ tool_count = len(filesystem_tools) + len(terminal_tools)
376
+ if len(allowed_directories) == 1:
377
+ access_msg = f"✓ Granted filesystem & terminal access to: {base_dir} ({tool_count} tools)"
378
+ else:
379
+ access_msg = f"✓ Granted filesystem & terminal access to {len(allowed_directories)} directories ({tool_count} tools)"
380
+ if preset:
381
+ access_msg += f" [preset: {preset}]"
382
+ if include_tools:
383
+ access_msg += f" [include: {', '.join(include_tools)}]"
384
+ if exclude_tools:
385
+ access_msg += f" [exclude: {', '.join(exclude_tools)}]"
386
+ console.print(f"[dim]{access_msg}[/dim]")
387
+
388
+ # Add planning tools (always available)
389
+ planning_tools = None
390
+ plan_state_obj = None
391
+ if plan_state is not None:
392
+ from .tools import get_planning_tools, PlanState
393
+ # Create a plan callback to update the dict when plan changes
394
+ def plan_callback(state: PlanState):
395
+ plan_state['title'] = state.title
396
+ plan_state['steps'] = state.to_dict()['steps']
397
+ plan_state['session_id'] = state.session_id
398
+
399
+ # Get session_id from plan_state dict if provided
400
+ session_id = plan_state.get('session_id')
401
+ planning_tools, plan_state_obj = get_planning_tools(
402
+ plan_state=None,
403
+ plan_callback=plan_callback,
404
+ session_id=session_id
405
+ )
406
+ console.print(f"[dim]✓ Planning tools enabled ({len(planning_tools)} tools) [session: {plan_state_obj.session_id}][/dim]")
407
+
408
+ # Check if we have tools
409
+ has_tools = bool(agent_def.get('tools') or toolkit_configs or filesystem_tools or terminal_tools or planning_tools)
410
+ has_mcp = any(tc.get('toolkit_type') == 'mcp' for tc in toolkit_configs)
411
+
412
+ if not has_tools:
413
+ return None, None, llm, llm_model, filesystem_tools, terminal_tools, planning_tools
414
+
415
+ # Create agent executor with or without MCP
416
+ mcp_session_manager = None
417
+ if has_mcp:
418
+ # Create persistent event loop for MCP tools
419
+ from alita_sdk.runtime.tools.llm import LLMNode
420
+ if not hasattr(LLMNode, '_persistent_loop') or \
421
+ LLMNode._persistent_loop is None or \
422
+ LLMNode._persistent_loop.is_closed():
423
+ LLMNode._persistent_loop = asyncio.new_event_loop()
424
+ console.print("[dim]Created persistent event loop for MCP tools[/dim]")
425
+
426
+ # Load MCP tools using persistent loop
427
+ loop = LLMNode._persistent_loop
428
+ asyncio.set_event_loop(loop)
429
+ agent_executor, mcp_session_manager = loop.run_until_complete(
430
+ create_agent_executor_with_mcp(
431
+ client, agent_def, toolkit_configs,
432
+ llm, llm_model, llm_temperature, llm_max_tokens, memory,
433
+ filesystem_tools=filesystem_tools,
434
+ terminal_tools=terminal_tools,
435
+ planning_tools=planning_tools
436
+ )
437
+ )
438
+ else:
439
+ agent_executor = create_agent_executor(
440
+ client, agent_def, toolkit_configs,
441
+ llm, llm_model, llm_temperature, llm_max_tokens, memory,
442
+ filesystem_tools=filesystem_tools,
443
+ terminal_tools=terminal_tools,
444
+ planning_tools=planning_tools
445
+ )
446
+
447
+ return agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools
448
+
449
+
450
+ def _select_model_interactive(client) -> Optional[Dict[str, Any]]:
451
+ """
452
+ Show interactive menu to select a model from available models.
453
+
454
+ Returns:
455
+ Selected model info dict or None if cancelled
456
+ """
457
+ console.print("\n🔧 [bold cyan]Select a model:[/bold cyan]\n")
458
+
459
+ try:
460
+ # Use the new get_available_models API
461
+ models = client.get_available_models()
462
+ if not models:
463
+ console.print("[yellow]No models available from the platform.[/yellow]")
464
+ return None
465
+
466
+ # Build models list - API returns items[].name
467
+ models_list = []
468
+ for model in models:
469
+ model_name = model.get('name')
470
+ if model_name:
471
+ models_list.append({
472
+ 'name': model_name,
473
+ 'id': model.get('id'),
474
+ 'model_data': model
475
+ })
476
+
477
+ if not models_list:
478
+ console.print("[yellow]No models found.[/yellow]")
479
+ return None
480
+
481
+ # Display models with numbers
482
+ table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE)
483
+ table.add_column("#", style="dim", width=4)
484
+ table.add_column("Model", style="cyan")
485
+
486
+ for i, model in enumerate(models_list, 1):
487
+ table.add_row(str(i), model['name'])
488
+
489
+ console.print(table)
490
+ console.print(f"\n[dim]0. Cancel[/dim]")
491
+
492
+ # Get user selection using styled input
493
+ while True:
494
+ try:
495
+ choice = styled_selection_input("Select model number")
496
+
497
+ if choice == '0':
498
+ return None
499
+
500
+ idx = int(choice) - 1
501
+ if 0 <= idx < len(models_list):
502
+ selected = models_list[idx]
503
+ console.print(f"✓ [green]Selected:[/green] [bold]{selected['name']}[/bold]")
504
+ return selected
505
+ else:
506
+ console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(models_list)}[/yellow]")
507
+ except ValueError:
508
+ console.print("[yellow]Please enter a valid number[/yellow]")
509
+ except (KeyboardInterrupt, EOFError):
510
+ return None
511
+
512
+ except Exception as e:
513
+ console.print(f"[red]Error fetching models: {e}[/red]")
514
+ return None
515
+
516
+
517
+ def _select_mcp_interactive(config) -> Optional[Dict[str, Any]]:
518
+ """
519
+ Show interactive menu to select an MCP server from mcp.json.
520
+
521
+ Returns:
522
+ Selected MCP server config dict or None if cancelled
523
+ """
524
+ from .mcp_loader import load_mcp_config
525
+
526
+ console.print("\n🔌 [bold cyan]Select an MCP server to add:[/bold cyan]\n")
527
+
528
+ mcp_config = load_mcp_config(config.mcp_config_path)
529
+ mcp_servers = mcp_config.get('mcpServers', {})
530
+
531
+ if not mcp_servers:
532
+ console.print(f"[yellow]No MCP servers found in {config.mcp_config_path}[/yellow]")
533
+ return None
534
+
535
+ servers_list = list(mcp_servers.items())
536
+
537
+ # Display servers with numbers
538
+ table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE)
539
+ table.add_column("#", style="dim", width=4)
540
+ table.add_column("Server", style="cyan")
541
+ table.add_column("Type", style="dim")
542
+ table.add_column("Command/URL", style="dim")
543
+
544
+ for i, (name, server_config) in enumerate(servers_list, 1):
545
+ server_type = server_config.get('type', 'stdio')
546
+ cmd_or_url = server_config.get('url') or server_config.get('command', '')
547
+ table.add_row(str(i), name, server_type, cmd_or_url[:40])
548
+
549
+ console.print(table)
550
+ console.print(f"\n[dim]0. Cancel[/dim]")
551
+
552
+ # Get user selection using styled input
553
+ while True:
554
+ try:
555
+ choice = styled_selection_input("Select MCP server number")
556
+
557
+ if choice == '0':
558
+ return None
559
+
560
+ idx = int(choice) - 1
561
+ if 0 <= idx < len(servers_list):
562
+ name, server_config = servers_list[idx]
563
+ console.print(f"✓ [green]Selected:[/green] [bold]{name}[/bold]")
564
+ return {'name': name, 'config': server_config}
565
+ else:
566
+ console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(servers_list)}[/yellow]")
567
+ except ValueError:
568
+ console.print("[yellow]Please enter a valid number[/yellow]")
569
+ except (KeyboardInterrupt, EOFError):
570
+ return None
571
+
572
+
573
+ def _select_toolkit_interactive(config) -> Optional[Dict[str, Any]]:
574
+ """
575
+ Show interactive menu to select a toolkit from $ALITA_DIR/tools.
576
+
577
+ Returns:
578
+ Selected toolkit config dict or None if cancelled
579
+ """
580
+ console.print("\n🧰 [bold cyan]Select a toolkit to add:[/bold cyan]\n")
581
+
582
+ tools_dir = Path(config.tools_dir)
583
+
584
+ if not tools_dir.exists():
585
+ console.print(f"[yellow]Tools directory not found: {tools_dir}[/yellow]")
586
+ return None
587
+
588
+ # Find all toolkit config files
589
+ toolkit_files = []
590
+ for pattern in ['*.json', '*.yaml', '*.yml']:
591
+ toolkit_files.extend(tools_dir.glob(pattern))
592
+
593
+ if not toolkit_files:
594
+ console.print(f"[yellow]No toolkit configurations found in {tools_dir}[/yellow]")
595
+ return None
596
+
597
+ # Load toolkit info
598
+ toolkits_list = []
599
+ for file_path in toolkit_files:
600
+ try:
601
+ config_data = load_toolkit_config(str(file_path))
602
+ toolkits_list.append({
603
+ 'file': str(file_path),
604
+ 'name': config_data.get('toolkit_name') or config_data.get('name') or file_path.stem,
605
+ 'type': config_data.get('toolkit_type') or config_data.get('type', 'unknown'),
606
+ 'config': config_data
607
+ })
608
+ except Exception as e:
609
+ logger.debug(f"Failed to load toolkit config {file_path}: {e}")
610
+
611
+ if not toolkits_list:
612
+ console.print(f"[yellow]No valid toolkit configurations found in {tools_dir}[/yellow]")
613
+ return None
614
+
615
+ # Display toolkits with numbers
616
+ table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE)
617
+ table.add_column("#", style="dim", width=4)
618
+ table.add_column("Toolkit", style="cyan")
619
+ table.add_column("Type", style="dim")
620
+ table.add_column("File", style="dim")
621
+
622
+ for i, toolkit in enumerate(toolkits_list, 1):
623
+ table.add_row(str(i), toolkit['name'], toolkit['type'], Path(toolkit['file']).name)
624
+
625
+ console.print(table)
626
+ console.print(f"\n[dim]0. Cancel[/dim]")
627
+
628
+ # Get user selection using styled input
629
+ while True:
630
+ try:
631
+ choice = styled_selection_input("Select toolkit number")
632
+
633
+ if choice == '0':
634
+ return None
635
+
636
+ idx = int(choice) - 1
637
+ if 0 <= idx < len(toolkits_list):
638
+ selected = toolkits_list[idx]
639
+ console.print(f"✓ [green]Selected:[/green] [bold]{selected['name']}[/bold]")
640
+ return selected
641
+ else:
642
+ console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(toolkits_list)}[/yellow]")
643
+ except ValueError:
644
+ console.print("[yellow]Please enter a valid number[/yellow]")
645
+ except (KeyboardInterrupt, EOFError):
646
+ return None
647
+
648
+
649
+ def _list_available_toolkits(config) -> List[str]:
650
+ """
651
+ List names of all available toolkits in $ALITA_DIR/tools.
652
+
653
+ Returns:
654
+ List of toolkit names
655
+ """
656
+ tools_dir = Path(config.tools_dir)
657
+
658
+ if not tools_dir.exists():
659
+ return []
660
+
661
+ toolkit_names = []
662
+ for pattern in ['*.json', '*.yaml', '*.yml']:
663
+ for file_path in tools_dir.glob(pattern):
664
+ try:
665
+ config_data = load_toolkit_config(str(file_path))
666
+ name = config_data.get('toolkit_name') or config_data.get('name') or file_path.stem
667
+ toolkit_names.append(name)
668
+ except Exception:
669
+ pass
670
+
671
+ return toolkit_names
672
+
673
+
674
+ def _find_toolkit_by_name(config, toolkit_name: str) -> Optional[Dict[str, Any]]:
675
+ """
676
+ Find a toolkit by name in $ALITA_DIR/tools.
677
+
678
+ Args:
679
+ config: CLI configuration
680
+ toolkit_name: Name of the toolkit to find (case-insensitive)
681
+
682
+ Returns:
683
+ Toolkit config dict or None if not found
684
+ """
685
+ tools_dir = Path(config.tools_dir)
686
+
687
+ if not tools_dir.exists():
688
+ return None
689
+
690
+ toolkit_name_lower = toolkit_name.lower()
691
+
692
+ for pattern in ['*.json', '*.yaml', '*.yml']:
693
+ for file_path in tools_dir.glob(pattern):
694
+ try:
695
+ config_data = load_toolkit_config(str(file_path))
696
+ name = config_data.get('toolkit_name') or config_data.get('name') or file_path.stem
697
+
698
+ # Match by name (case-insensitive) or file stem
699
+ if name.lower() == toolkit_name_lower or file_path.stem.lower() == toolkit_name_lower:
700
+ return {
701
+ 'file': str(file_path),
702
+ 'name': name,
703
+ 'type': config_data.get('toolkit_type') or config_data.get('type', 'unknown'),
704
+ 'config': config_data
705
+ }
706
+ except Exception:
707
+ pass
708
+
709
+ return None
710
+
711
+
712
+ def _select_agent_interactive(client, config) -> Optional[str]:
713
+ """
714
+ Show interactive menu to select an agent from platform and local agents.
715
+
716
+ Returns:
717
+ Agent source (name/id for platform, file path for local, '__direct__' for direct chat,
718
+ '__inventory__' for inventory agent) or None if cancelled
719
+ """
720
+ from .config import CLIConfig
721
+
722
+ console.print("\n🤖 [bold cyan]Select an agent to chat with:[/bold cyan]\n")
723
+
724
+ # Built-in agents
725
+ console.print(f"1. [[bold]💬 Alita[/bold]] [cyan]Chat directly with LLM (no agent)[/cyan]")
726
+ console.print(f" [dim]Direct conversation with the model without agent configuration[/dim]")
727
+ console.print(f"2. [[bold]📊 Inventory[/bold]] [cyan]Knowledge graph builder agent[/cyan]")
728
+ console.print(f" [dim]Build inventories from connected toolkits (use --toolkit-config to add sources)[/dim]")
729
+
730
+ agents_list = []
731
+
732
+ # Load platform agents
733
+ try:
734
+ platform_agents = client.get_list_of_apps()
735
+ for agent in platform_agents:
736
+ agents_list.append({
737
+ 'type': 'platform',
738
+ 'name': agent['name'],
739
+ 'source': agent['name'],
740
+ 'description': agent.get('description', '')[:60]
741
+ })
742
+ except Exception as e:
743
+ logger.debug(f"Failed to load platform agents: {e}")
744
+
745
+ # Load local agents
746
+ agents_dir = config.agents_dir
747
+ search_dir = Path(agents_dir)
748
+
749
+ if search_dir.exists():
750
+ for pattern in ['*.agent.md', '*.agent.yaml', '*.agent.yml', '*.agent.json']:
751
+ for file_path in search_dir.rglob(pattern):
752
+ try:
753
+ agent_def = load_agent_definition(str(file_path))
754
+ agents_list.append({
755
+ 'type': 'local',
756
+ 'name': agent_def.get('name', file_path.stem),
757
+ 'source': str(file_path),
758
+ 'description': agent_def.get('description', '')[:60]
759
+ })
760
+ except Exception as e:
761
+ logger.debug(f"Failed to load {file_path}: {e}")
762
+
763
+ # Display agents with numbers using rich (starting from 3 since 1-2 are built-in)
764
+ for i, agent in enumerate(agents_list, 3):
765
+ agent_type = "📦 Platform" if agent['type'] == 'platform' else "📁 Local"
766
+ console.print(f"{i}. [[bold]{agent_type}[/bold]] [cyan]{agent['name']}[/cyan]")
767
+ if agent['description']:
768
+ console.print(f" [dim]{agent['description']}[/dim]")
769
+
770
+ console.print(f"\n[dim]0. Cancel[/dim]")
771
+
772
+ # Get user selection using styled input
773
+ while True:
774
+ try:
775
+ choice = styled_selection_input("Select agent number")
776
+
777
+ if choice == '0':
778
+ return None
779
+
780
+ if choice == '1':
781
+ console.print(f"✓ [green]Selected:[/green] [bold]Alita[/bold]")
782
+ return '__direct__'
783
+
784
+ if choice == '2':
785
+ console.print(f"✓ [green]Selected:[/green] [bold]Inventory[/bold]")
786
+ return '__inventory__'
787
+
788
+ idx = int(choice) - 3 # Offset by 3 since 1-2 are built-in agents
789
+ if 0 <= idx < len(agents_list):
790
+ selected = agents_list[idx]
791
+ console.print(f"✓ [green]Selected:[/green] [bold]{selected['name']}[/bold]")
792
+ return selected['source']
793
+ else:
794
+ console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(agents_list) + 2}[/yellow]")
795
+ except ValueError:
796
+ console.print("[yellow]Please enter a valid number[/yellow]")
797
+ except (KeyboardInterrupt, EOFError):
798
+ console.print("\n[dim]Cancelled.[/dim]")
799
+ return None
800
+
801
+
802
+ @click.group()
803
+ def agent():
804
+ """Agent testing and interaction commands."""
805
+ pass
806
+
807
+
808
+ @agent.command('list')
809
+ @click.option('--local', is_flag=True, help='List local agent definition files')
810
+ @click.option('--directory', default=None, help='Directory to search for local agents (defaults to AGENTS_DIR from .env)')
811
+ @click.pass_context
812
+ def agent_list(ctx, local: bool, directory: Optional[str]):
813
+ """
814
+ List available agents.
815
+
816
+ By default, lists agents from the platform.
817
+ Use --local to list agent definition files in the local directory.
818
+ """
819
+ formatter = ctx.obj['formatter']
820
+ config = ctx.obj['config']
821
+
822
+ try:
823
+ if local:
824
+ # List local agent definition files
825
+ if directory is None:
826
+ directory = config.agents_dir
827
+ search_dir = Path(directory)
828
+
829
+ if not search_dir.exists():
830
+ console.print(f"[red]Directory not found: {directory}[/red]")
831
+ return
832
+
833
+ agents = []
834
+
835
+ # Find agent definition files
836
+ for pattern in ['*.agent.md', '*.agent.yaml', '*.agent.yml', '*.agent.json']:
837
+ for file_path in search_dir.rglob(pattern):
838
+ try:
839
+ agent_def = load_agent_definition(str(file_path))
840
+ # Use relative path if already relative, otherwise make it relative to cwd
841
+ try:
842
+ display_path = str(file_path.relative_to(Path.cwd()))
843
+ except ValueError:
844
+ display_path = str(file_path)
845
+
846
+ agents.append({
847
+ 'name': agent_def.get('name', file_path.stem),
848
+ 'file': display_path,
849
+ 'description': agent_def.get('description', '')[:80]
850
+ })
851
+ except Exception as e:
852
+ logger.debug(f"Failed to load {file_path}: {e}")
853
+
854
+ if not agents:
855
+ console.print(f"\n[yellow]No agent definition files found in {directory}[/yellow]")
856
+ return
857
+
858
+ # Display local agents in a table
859
+ table = Table(
860
+ title=f"Local Agent Definitions in {directory}",
861
+ show_header=True,
862
+ header_style="bold cyan",
863
+ border_style="cyan",
864
+ box=box.ROUNDED
865
+ )
866
+ table.add_column("Name", style="bold cyan", no_wrap=True)
867
+ table.add_column("File", style="dim")
868
+ table.add_column("Description", style="white")
869
+
870
+ for agent_info in sorted(agents, key=lambda x: x['name']):
871
+ table.add_row(
872
+ agent_info['name'],
873
+ agent_info['file'],
874
+ agent_info['description'] or "-"
875
+ )
876
+
877
+ console.print("\n")
878
+ console.print(table)
879
+ console.print(f"\n[green]Total: {len(agents)} local agents[/green]")
880
+
881
+ else:
882
+ # List platform agents
883
+ client = get_client(ctx)
884
+
885
+ agents = client.get_list_of_apps()
886
+
887
+ if formatter.__class__.__name__ == 'JSONFormatter':
888
+ click.echo(formatter._dump({'agents': agents, 'total': len(agents)}))
889
+ else:
890
+ table = Table(
891
+ title="Available Platform Agents",
892
+ show_header=True,
893
+ header_style="bold cyan",
894
+ border_style="cyan",
895
+ box=box.ROUNDED
896
+ )
897
+ table.add_column("ID", style="yellow", no_wrap=True)
898
+ table.add_column("Name", style="bold cyan")
899
+ table.add_column("Description", style="white")
900
+
901
+ for agent_info in agents:
902
+ table.add_row(
903
+ str(agent_info['id']),
904
+ agent_info['name'],
905
+ agent_info.get('description', '')[:80] or "-"
906
+ )
907
+
908
+ console.print("\n")
909
+ console.print(table)
910
+ console.print(f"\n[green]Total: {len(agents)} agents[/green]")
911
+
912
+ except Exception as e:
913
+ logger.exception("Failed to list agents")
914
+ error_panel = Panel(
915
+ str(e),
916
+ title="Error",
917
+ border_style="red",
918
+ box=box.ROUNDED
919
+ )
920
+ console.print(error_panel, style="red")
921
+ raise click.Abort()
922
+
923
+
924
+ @agent.command('show')
925
+ @click.argument('agent_source')
926
+ @click.option('--version', help='Agent version (for platform agents)')
927
+ @click.pass_context
928
+ def agent_show(ctx, agent_source: str, version: Optional[str]):
929
+ """
930
+ Show agent details.
931
+
932
+ AGENT_SOURCE can be:
933
+ - Platform agent ID or name (e.g., "123" or "my-agent")
934
+ - Path to local agent file (e.g., ".github/agents/sdk-dev.agent.md")
935
+ """
936
+ formatter = ctx.obj['formatter']
937
+
938
+ try:
939
+ # Check if it's a file path
940
+ if Path(agent_source).exists():
941
+ # Local agent file
942
+ agent_def = load_agent_definition(agent_source)
943
+
944
+ if formatter.__class__.__name__ == 'JSONFormatter':
945
+ click.echo(formatter._dump(agent_def))
946
+ else:
947
+ # Create details panel
948
+ details = Text()
949
+ details.append("File: ", style="bold")
950
+ details.append(f"{agent_source}\n", style="cyan")
951
+
952
+ if agent_def.get('description'):
953
+ details.append("\nDescription: ", style="bold")
954
+ details.append(f"{agent_def['description']}\n", style="white")
955
+
956
+ if agent_def.get('model'):
957
+ details.append("Model: ", style="bold")
958
+ details.append(f"{agent_def['model']}\n", style="cyan")
959
+
960
+ if agent_def.get('tools'):
961
+ details.append("Tools: ", style="bold")
962
+ details.append(f"{', '.join(agent_def['tools'])}\n", style="cyan")
963
+
964
+ if agent_def.get('temperature') is not None:
965
+ details.append("Temperature: ", style="bold")
966
+ details.append(f"{agent_def['temperature']}\n", style="cyan")
967
+
968
+ if agent_def.get('persona'):
969
+ details.append("Persona: ", style="bold")
970
+ details.append(f"{agent_def['persona']}\n", style="cyan")
971
+
972
+ panel = Panel(
973
+ details,
974
+ title=f"Local Agent: {agent_def.get('name', 'Unknown')}",
975
+ title_align="left",
976
+ border_style="cyan",
977
+ box=box.ROUNDED
978
+ )
979
+ console.print("\n")
980
+ console.print(panel)
981
+
982
+ if agent_def.get('system_prompt'):
983
+ console.print("\n[bold]System Prompt:[/bold]")
984
+ console.print(Panel(agent_def['system_prompt'][:500] + "...", border_style="dim", box=box.ROUNDED))
985
+
986
+ else:
987
+ # Platform agent
988
+ client = get_client(ctx)
989
+
990
+ # Try to find agent by ID or name
991
+ agents = client.get_list_of_apps()
992
+
993
+ agent = None
994
+ try:
995
+ agent_id = int(agent_source)
996
+ agent = next((a for a in agents if a['id'] == agent_id), None)
997
+ except ValueError:
998
+ agent = next((a for a in agents if a['name'] == agent_source), None)
999
+
1000
+ if not agent:
1001
+ raise click.ClickException(f"Agent '{agent_source}' not found")
1002
+
1003
+ # Get details
1004
+ details = client.get_app_details(agent['id'])
1005
+
1006
+ if formatter.__class__.__name__ == 'JSONFormatter':
1007
+ click.echo(formatter._dump(details))
1008
+ else:
1009
+ # Create platform agent details panel
1010
+ content = Text()
1011
+ content.append("ID: ", style="bold")
1012
+ content.append(f"{details['id']}\n", style="yellow")
1013
+
1014
+ if details.get('description'):
1015
+ content.append("\nDescription: ", style="bold")
1016
+ content.append(f"{details['description']}\n", style="white")
1017
+
1018
+ panel = Panel(
1019
+ content,
1020
+ title=f"Agent: {details['name']}",
1021
+ title_align="left",
1022
+ border_style="cyan",
1023
+ box=box.ROUNDED
1024
+ )
1025
+ console.print("\n")
1026
+ console.print(panel)
1027
+
1028
+ # Display versions in a table
1029
+ if details.get('versions'):
1030
+ console.print("\n[bold]Versions:[/bold]")
1031
+ versions_table = Table(box=box.ROUNDED, border_style="dim")
1032
+ versions_table.add_column("Name", style="cyan")
1033
+ versions_table.add_column("ID", style="yellow")
1034
+ for ver in details.get('versions', []):
1035
+ versions_table.add_row(ver['name'], str(ver['id']))
1036
+ console.print(versions_table)
1037
+
1038
+ except click.ClickException:
1039
+ raise
1040
+ except Exception as e:
1041
+ logger.exception("Failed to show agent details")
1042
+ error_panel = Panel(
1043
+ str(e),
1044
+ title="Error",
1045
+ border_style="red",
1046
+ box=box.ROUNDED
1047
+ )
1048
+ console.print(error_panel, style="red")
1049
+ raise click.Abort()
1050
+
1051
+
1052
+ @agent.command('chat')
1053
+ @click.argument('agent_source', required=False)
1054
+ @click.option('--version', help='Agent version (for platform agents)')
1055
+ @click.option('--toolkit-config', multiple=True, type=click.Path(exists=True),
1056
+ help='Toolkit configuration files (can specify multiple)')
1057
+ @click.option('--inventory', 'inventory_path', type=str,
1058
+ help='Load inventory/knowledge graph from JSON file (e.g., alita.json or .alita/inventory/alita.json)')
1059
+ @click.option('--thread-id', help='Continue existing conversation thread')
1060
+ @click.option('--model', help='Override LLM model')
1061
+ @click.option('--temperature', type=float, help='Override temperature')
1062
+ @click.option('--max-tokens', type=int, help='Override max tokens')
1063
+ @click.option('--dir', 'work_dir', type=click.Path(exists=True, file_okay=False, dir_okay=True),
1064
+ help='Grant agent filesystem access to this directory')
1065
+ @click.option('--verbose', '-v', type=click.Choice(['quiet', 'default', 'debug']), default='default',
1066
+ help='Output verbosity level: quiet (final output only), default (tool calls + outputs), debug (all including LLM calls)')
1067
+ @click.option('--recursion-limit', type=int, default=50,
1068
+ help='Maximum number of tool execution steps per turn')
1069
+ @click.pass_context
1070
+ def agent_chat(ctx, agent_source: Optional[str], version: Optional[str],
1071
+ toolkit_config: tuple, inventory_path: Optional[str], thread_id: Optional[str],
1072
+ model: Optional[str], temperature: Optional[float],
1073
+ max_tokens: Optional[int], work_dir: Optional[str],
1074
+ verbose: str, recursion_limit: Optional[int]):
1075
+ """Start interactive chat with an agent.
1076
+
1077
+ \b
1078
+ Examples:
1079
+ alita chat # Interactive agent selection
1080
+ alita chat my-agent # Chat with platform agent
1081
+ alita chat ./agent.md # Chat with local agent file
1082
+ alita chat --inventory alita.json
1083
+ alita chat my-agent --dir ./src
1084
+ alita chat my-agent --thread-id abc123
1085
+ alita chat my-agent -v quiet # Hide tool calls
1086
+ alita chat my-agent -v debug # Show all LLM calls
1087
+ alita chat __inventory__ --toolkit-config jira.json
1088
+ """
1089
+ formatter = ctx.obj['formatter']
1090
+ config = ctx.obj['config']
1091
+ client = get_client(ctx)
1092
+
1093
+ # Setup verbose level
1094
+ show_verbose = verbose != 'quiet'
1095
+ debug_mode = verbose == 'debug'
1096
+
1097
+ try:
1098
+ # If no agent specified, start with direct chat by default
1099
+ if not agent_source:
1100
+ agent_source = '__direct__'
1101
+
1102
+ # Check for built-in agent modes
1103
+ is_direct = agent_source == '__direct__'
1104
+ is_inventory = agent_source == '__inventory__'
1105
+ is_builtin = is_direct or is_inventory
1106
+ is_local = not is_builtin and Path(agent_source).exists()
1107
+
1108
+ # Get defaults from config
1109
+ default_model = config.default_model or 'gpt-4o'
1110
+ default_temperature = config.default_temperature if config.default_temperature is not None else 0.1
1111
+ default_max_tokens = config.default_max_tokens or 4096
1112
+
1113
+ # Initialize variables for dynamic updates
1114
+ current_model = model
1115
+ current_temperature = temperature
1116
+ current_max_tokens = max_tokens
1117
+ added_mcp_configs = []
1118
+ added_toolkit_configs = list(toolkit_config) if toolkit_config else []
1119
+ mcp_session_manager = None
1120
+ llm = None
1121
+ agent_executor = None
1122
+ agent_def = {}
1123
+ filesystem_tools = None
1124
+ terminal_tools = None
1125
+ planning_tools = None
1126
+ plan_state = None
1127
+
1128
+ # Handle --inventory option: add inventory toolkit config at startup
1129
+ if inventory_path:
1130
+ inventory_config = _build_inventory_config(inventory_path, work_dir)
1131
+ if inventory_config:
1132
+ added_toolkit_configs.append(inventory_config)
1133
+ console.print(f"[dim]✓ Loading inventory: {inventory_config['toolkit_name']} ({inventory_config['graph_path']})[/dim]")
1134
+ else:
1135
+ console.print(f"[yellow]Warning: Inventory file not found: {inventory_path}[/yellow]")
1136
+ console.print("[dim]Searched in current directory and .alita/inventory/[/dim]")
1137
+
1138
+ # Approval mode: 'always' (confirm each tool), 'auto' (no confirmation), 'yolo' (no safety checks)
1139
+ approval_mode = 'always'
1140
+ allowed_directories = [work_dir] if work_dir else [] # Track allowed directories for /dir command
1141
+ current_agent_file = agent_source if is_local else None # Track agent file for /reload command
1142
+
1143
+ if is_direct:
1144
+ # Direct chat mode - no agent, just LLM with Alita instructions
1145
+ agent_name = "Alita"
1146
+ agent_type = "Direct LLM"
1147
+ alita_prompt = _get_alita_system_prompt(config)
1148
+ agent_def = {
1149
+ 'model': model or default_model,
1150
+ 'temperature': temperature if temperature is not None else default_temperature,
1151
+ 'max_tokens': max_tokens or default_max_tokens,
1152
+ 'system_prompt': alita_prompt
1153
+ }
1154
+ elif is_inventory:
1155
+ # Inventory agent mode - knowledge graph builder with inventory toolkit
1156
+ agent_name = "Inventory"
1157
+ agent_type = "Built-in Agent"
1158
+ inventory_prompt = _get_inventory_system_prompt(config)
1159
+ agent_def = {
1160
+ 'name': 'inventory-agent',
1161
+ 'model': model or default_model,
1162
+ 'temperature': temperature if temperature is not None else 0.3,
1163
+ 'max_tokens': max_tokens or default_max_tokens,
1164
+ 'system_prompt': inventory_prompt,
1165
+ # Include inventory toolkit by default
1166
+ 'toolkit_configs': [
1167
+ {'type': 'inventory', 'graph_path': './knowledge_graph.json'}
1168
+ ]
1169
+ }
1170
+ elif is_local:
1171
+ agent_def = load_agent_definition(agent_source)
1172
+ agent_name = agent_def.get('name', Path(agent_source).stem)
1173
+ agent_type = "Local Agent"
1174
+ else:
1175
+ # Platform agent - find it
1176
+ agents = client.get_list_of_apps()
1177
+ agent = None
1178
+
1179
+ try:
1180
+ agent_id = int(agent_source)
1181
+ agent = next((a for a in agents if a['id'] == agent_id), None)
1182
+ except ValueError:
1183
+ agent = next((a for a in agents if a['name'] == agent_source), None)
1184
+
1185
+ if not agent:
1186
+ raise click.ClickException(f"Agent '{agent_source}' not found")
1187
+
1188
+ agent_name = agent['name']
1189
+ agent_type = "Platform Agent"
1190
+
1191
+ # Get model and temperature for welcome banner
1192
+ llm_model_display = current_model or agent_def.get('model', default_model)
1193
+ llm_temperature_display = current_temperature if current_temperature is not None else agent_def.get('temperature', default_temperature)
1194
+
1195
+ # Print nice welcome banner
1196
+ print_welcome(agent_name, llm_model_display, llm_temperature_display, approval_mode)
1197
+
1198
+ # Initialize conversation
1199
+ chat_history = []
1200
+
1201
+ # Initialize session for persistence (memory + plan)
1202
+ from .tools import generate_session_id, create_session_memory, save_session_metadata, to_portable_path
1203
+ current_session_id = generate_session_id()
1204
+ plan_state = {'session_id': current_session_id}
1205
+
1206
+ # Create persistent memory for agent (stored in session directory)
1207
+ memory = create_session_memory(current_session_id)
1208
+
1209
+ # Save session metadata with agent source for session resume
1210
+ agent_source_portable = to_portable_path(current_agent_file) if current_agent_file else None
1211
+ # Filter out transient inventory configs (dicts) - only save file paths
1212
+ serializable_toolkit_configs = [tc for tc in added_toolkit_configs if isinstance(tc, str)]
1213
+ # Extract inventory graph path if present
1214
+ inventory_graph = None
1215
+ for tc in added_toolkit_configs:
1216
+ if isinstance(tc, dict) and tc.get('type') == 'inventory':
1217
+ inventory_graph = tc.get('graph_path')
1218
+ break
1219
+ save_session_metadata(current_session_id, {
1220
+ 'agent_name': agent_name,
1221
+ 'agent_type': agent_type if 'agent_type' in dir() else 'Direct LLM',
1222
+ 'agent_source': agent_source_portable,
1223
+ 'model': llm_model_display,
1224
+ 'temperature': llm_temperature_display,
1225
+ 'work_dir': work_dir,
1226
+ 'is_direct': is_direct,
1227
+ 'is_local': is_local,
1228
+ 'is_inventory': is_inventory,
1229
+ 'added_toolkit_configs': serializable_toolkit_configs,
1230
+ 'inventory_graph': inventory_graph,
1231
+ 'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])],
1232
+ })
1233
+ console.print(f"[dim]Session: {current_session_id}[/dim]")
1234
+
1235
+ # Initialize context manager for chat history management
1236
+ context_config = config.context_management
1237
+ ctx_manager = CLIContextManager(
1238
+ session_id=current_session_id,
1239
+ max_context_tokens=context_config.get('max_context_tokens', 8000),
1240
+ preserve_recent=context_config.get('preserve_recent_messages', 5),
1241
+ pruning_method=context_config.get('pruning_method', 'oldest_first'),
1242
+ enable_summarization=context_config.get('enable_summarization', True),
1243
+ summary_trigger_ratio=context_config.get('summary_trigger_ratio', 0.8),
1244
+ summaries_limit=context_config.get('summaries_limit_count', 5),
1245
+ llm=None # Will be set after LLM creation
1246
+ )
1247
+
1248
+ # Purge old sessions on startup (cleanup task)
1249
+ try:
1250
+ purge_context_sessions(
1251
+ sessions_dir=config.sessions_dir,
1252
+ max_age_days=context_config.get('session_max_age_days', 30),
1253
+ max_sessions=context_config.get('max_sessions', 100)
1254
+ )
1255
+ except Exception as e:
1256
+ logger.debug(f"Session cleanup failed: {e}")
1257
+
1258
+ # Create agent executor
1259
+ if is_direct or is_local or is_inventory:
1260
+ # Setup local agent executor (handles all config, tools, MCP, etc.)
1261
+ try:
1262
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1263
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, work_dir, plan_state
1264
+ )
1265
+ except Exception:
1266
+ return
1267
+ else:
1268
+ # Platform agent
1269
+ details = client.get_app_details(agent['id'])
1270
+
1271
+ if version:
1272
+ version_obj = next((v for v in details['versions'] if v['name'] == version), None)
1273
+ if not version_obj:
1274
+ raise click.ClickException(f"Version '{version}' not found")
1275
+ version_id = version_obj['id']
1276
+ else:
1277
+ # Use first version
1278
+ version_id = details['versions'][0]['id']
1279
+
1280
+ # Display configuration
1281
+ console.print()
1282
+ console.print("✓ [green]Connected to platform agent[/green]")
1283
+ console.print()
1284
+
1285
+ agent_executor = client.application(
1286
+ application_id=agent['id'],
1287
+ application_version_id=version_id,
1288
+ memory=memory,
1289
+ chat_history=chat_history
1290
+ )
1291
+ llm = None # Platform agents don't use direct LLM
1292
+
1293
+ # Set LLM on context manager for summarization
1294
+ if llm is not None:
1295
+ ctx_manager.llm = llm
1296
+
1297
+ # Initialize input handler for readline support
1298
+ input_handler = get_input_handler()
1299
+
1300
+ # Set up toolkit names callback for tab completion
1301
+ from .input_handler import set_toolkit_names_callback, set_inventory_files_callback
1302
+ set_toolkit_names_callback(lambda: _list_available_toolkits(config))
1303
+
1304
+ # Set up inventory files callback for /inventory tab completion
1305
+ set_inventory_files_callback(lambda: _get_inventory_json_files(allowed_directories[0] if allowed_directories else None))
1306
+
1307
+ # Interactive chat loop
1308
+ while True:
1309
+ try:
1310
+ # Get context info for the UI indicator
1311
+ context_info = ctx_manager.get_context_info()
1312
+
1313
+ # Get input with styled prompt (prompt is part of input() for proper readline handling)
1314
+ user_input = styled_input(context_info=context_info).strip()
1315
+
1316
+ if not user_input:
1317
+ continue
1318
+
1319
+ # Handle commands
1320
+ if user_input.lower() in ['exit', 'quit']:
1321
+ # Save final session state before exiting
1322
+ try:
1323
+ from .tools import update_session_metadata, to_portable_path
1324
+ update_session_metadata(current_session_id, {
1325
+ 'agent_source': to_portable_path(current_agent_file) if current_agent_file else None,
1326
+ 'model': current_model or llm_model_display,
1327
+ 'temperature': current_temperature if current_temperature is not None else llm_temperature_display,
1328
+ 'allowed_directories': allowed_directories,
1329
+ 'added_toolkit_configs': list(added_toolkit_configs),
1330
+ 'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])],
1331
+ })
1332
+ except Exception as e:
1333
+ logger.debug(f"Failed to save session state on exit: {e}")
1334
+ console.print("\n[bold cyan]👋 Goodbye![/bold cyan]\n")
1335
+ break
1336
+
1337
+ if user_input == '/clear':
1338
+ chat_history = []
1339
+ ctx_manager.clear()
1340
+ console.print("[green]✓ Conversation history cleared.[/green]")
1341
+ continue
1342
+
1343
+ if user_input == '/history':
1344
+ if not chat_history:
1345
+ console.print("[yellow]No conversation history yet.[/yellow]")
1346
+ else:
1347
+ console.print("\n[bold cyan]── Conversation History ──[/bold cyan]")
1348
+ for i, msg in enumerate(chat_history, 1):
1349
+ role = msg.get('role', 'unknown')
1350
+ content = msg.get('content', '')
1351
+ role_color = 'blue' if role == 'user' else 'green'
1352
+ included_marker = "" if ctx_manager.is_message_included(i - 1) else " [dim](pruned)[/dim]"
1353
+ console.print(f"\n[bold {role_color}]{i}. {role.upper()}:[/bold {role_color}] {content[:100]}...{included_marker}")
1354
+ continue
1355
+
1356
+ if user_input == '/save':
1357
+ console.print("[yellow]Save to file (default: conversation.json):[/yellow] ", end="")
1358
+ filename = input().strip()
1359
+ filename = filename or "conversation.json"
1360
+ with open(filename, 'w') as f:
1361
+ json.dump({'history': chat_history}, f, indent=2)
1362
+ console.print(f"[green]✓ Conversation saved to {filename}[/green]")
1363
+ continue
1364
+
1365
+ if user_input == '/help':
1366
+ print_help()
1367
+ continue
1368
+
1369
+ # /model command - switch model
1370
+ if user_input == '/model':
1371
+ if not (is_direct or is_local):
1372
+ console.print("[yellow]Model switching is only available for local agents and direct chat.[/yellow]")
1373
+ continue
1374
+
1375
+ selected_model = _select_model_interactive(client)
1376
+ if selected_model:
1377
+ current_model = selected_model['name']
1378
+ agent_def['model'] = current_model
1379
+
1380
+ # Recreate LLM and agent executor - use session memory to preserve history
1381
+ from .tools import create_session_memory, update_session_metadata
1382
+ memory = create_session_memory(current_session_id)
1383
+ try:
1384
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1385
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
1386
+ )
1387
+ # Persist model change to session
1388
+ update_session_metadata(current_session_id, {
1389
+ 'model': current_model,
1390
+ 'temperature': current_temperature if current_temperature is not None else agent_def.get('temperature', 0.7)
1391
+ })
1392
+ console.print(Panel(
1393
+ f"[cyan]ℹ Model switched to [bold]{current_model}[/bold]. Agent state reset, chat history preserved.[/cyan]",
1394
+ border_style="cyan",
1395
+ box=box.ROUNDED
1396
+ ))
1397
+ except Exception as e:
1398
+ console.print(f"[red]Error switching model: {e}[/red]")
1399
+ continue
1400
+
1401
+ # /reload command - reload agent definition from file
1402
+ if user_input == '/reload':
1403
+ if not is_local:
1404
+ if is_direct or is_inventory:
1405
+ console.print("[yellow]Cannot reload built-in agent mode - no agent file to reload.[/yellow]")
1406
+ else:
1407
+ console.print("[yellow]Reload is only available for local agents (file-based).[/yellow]")
1408
+ continue
1409
+
1410
+ if not current_agent_file or not Path(current_agent_file).exists():
1411
+ console.print("[red]Agent file not found. Cannot reload.[/red]")
1412
+ continue
1413
+
1414
+ try:
1415
+ # Reload agent definition from file
1416
+ new_agent_def = load_agent_definition(current_agent_file)
1417
+
1418
+ # Preserve runtime additions (MCPs, tools added via commands)
1419
+ if 'mcps' in agent_def and agent_def['mcps']:
1420
+ # Merge MCPs: file MCPs + runtime added MCPs
1421
+ file_mcps = new_agent_def.get('mcps', [])
1422
+ for mcp in agent_def['mcps']:
1423
+ mcp_name = mcp if isinstance(mcp, str) else mcp.get('name')
1424
+ file_mcp_names = [m if isinstance(m, str) else m.get('name') for m in file_mcps]
1425
+ if mcp_name not in file_mcp_names:
1426
+ file_mcps.append(mcp)
1427
+ new_agent_def['mcps'] = file_mcps
1428
+
1429
+ # Update agent_def with new values (preserving model/temp overrides)
1430
+ old_system_prompt = agent_def.get('system_prompt', '')
1431
+ new_system_prompt = new_agent_def.get('system_prompt', '')
1432
+
1433
+ agent_def.update(new_agent_def)
1434
+
1435
+ # Restore runtime overrides
1436
+ if current_model:
1437
+ agent_def['model'] = current_model
1438
+ if current_temperature is not None:
1439
+ agent_def['temperature'] = current_temperature
1440
+ if current_max_tokens:
1441
+ agent_def['max_tokens'] = current_max_tokens
1442
+
1443
+ # Recreate agent executor with reloaded definition
1444
+ from .tools import create_session_memory
1445
+ memory = create_session_memory(current_session_id)
1446
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1447
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
1448
+ )
1449
+
1450
+ # Show what changed
1451
+ prompt_changed = old_system_prompt != new_system_prompt
1452
+ agent_name = agent_def.get('name', Path(current_agent_file).stem)
1453
+
1454
+ if prompt_changed:
1455
+ console.print(Panel(
1456
+ f"[green]✓ Reloaded agent: [bold]{agent_name}[/bold][/green]\n"
1457
+ f"[dim]System prompt updated ({len(new_system_prompt)} chars)[/dim]",
1458
+ border_style="green",
1459
+ box=box.ROUNDED
1460
+ ))
1461
+ else:
1462
+ console.print(Panel(
1463
+ f"[cyan]ℹ Reloaded agent: [bold]{agent_name}[/bold][/cyan]\n"
1464
+ f"[dim]No changes detected in system prompt[/dim]",
1465
+ border_style="cyan",
1466
+ box=box.ROUNDED
1467
+ ))
1468
+ except Exception as e:
1469
+ console.print(f"[red]Error reloading agent: {e}[/red]")
1470
+ continue
1471
+
1472
+ # /add_mcp command - add MCP server
1473
+ if user_input == '/add_mcp':
1474
+ if not (is_direct or is_local or is_inventory):
1475
+ console.print("[yellow]Adding MCP is only available for local agents and built-in agents.[/yellow]")
1476
+ continue
1477
+
1478
+ selected_mcp = _select_mcp_interactive(config)
1479
+ if selected_mcp:
1480
+ mcp_name = selected_mcp['name']
1481
+ # Add MCP to agent definition
1482
+ if 'mcps' not in agent_def:
1483
+ agent_def['mcps'] = []
1484
+ if mcp_name not in [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])]:
1485
+ agent_def['mcps'].append(mcp_name)
1486
+
1487
+ # Recreate agent executor with new MCP - use session memory to preserve history
1488
+ from .tools import create_session_memory, update_session_metadata
1489
+ memory = create_session_memory(current_session_id)
1490
+ try:
1491
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1492
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
1493
+ )
1494
+ # Persist added MCPs to session
1495
+ update_session_metadata(current_session_id, {
1496
+ 'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])]
1497
+ })
1498
+ console.print(Panel(
1499
+ f"[cyan]ℹ Added MCP: [bold]{mcp_name}[/bold]. Agent state reset, chat history preserved.[/cyan]",
1500
+ border_style="cyan",
1501
+ box=box.ROUNDED
1502
+ ))
1503
+ except Exception as e:
1504
+ console.print(f"[red]Error adding MCP: {e}[/red]")
1505
+ continue
1506
+
1507
+ # /add_toolkit command - add toolkit
1508
+ if user_input == '/add_toolkit' or user_input.startswith('/add_toolkit '):
1509
+ if not (is_direct or is_local or is_inventory):
1510
+ console.print("[yellow]Adding toolkit is only available for local agents and built-in agents.[/yellow]")
1511
+ continue
1512
+
1513
+ parts = user_input.split(maxsplit=1)
1514
+ if len(parts) == 2:
1515
+ # Direct toolkit selection by name
1516
+ toolkit_name_arg = parts[1].strip()
1517
+ selected_toolkit = _find_toolkit_by_name(config, toolkit_name_arg)
1518
+ if not selected_toolkit:
1519
+ console.print(f"[yellow]Toolkit '{toolkit_name_arg}' not found.[/yellow]")
1520
+ # Show available toolkits
1521
+ available = _list_available_toolkits(config)
1522
+ if available:
1523
+ console.print(f"[dim]Available toolkits: {', '.join(available)}[/dim]")
1524
+ continue
1525
+ else:
1526
+ # Interactive selection
1527
+ selected_toolkit = _select_toolkit_interactive(config)
1528
+
1529
+ if selected_toolkit:
1530
+ toolkit_name = selected_toolkit['name']
1531
+ toolkit_file = selected_toolkit['file']
1532
+
1533
+ # Add toolkit config path
1534
+ if toolkit_file not in added_toolkit_configs:
1535
+ added_toolkit_configs.append(toolkit_file)
1536
+
1537
+ # Recreate agent executor with new toolkit - use session memory to preserve history
1538
+ from .tools import create_session_memory, update_session_metadata
1539
+ memory = create_session_memory(current_session_id)
1540
+ try:
1541
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1542
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
1543
+ )
1544
+ # Persist added toolkits to session
1545
+ update_session_metadata(current_session_id, {
1546
+ 'added_toolkit_configs': list(added_toolkit_configs)
1547
+ })
1548
+ console.print(Panel(
1549
+ f"[cyan]ℹ Added toolkit: [bold]{toolkit_name}[/bold]. Agent state reset, chat history preserved.[/cyan]",
1550
+ border_style="cyan",
1551
+ box=box.ROUNDED
1552
+ ))
1553
+ except Exception as e:
1554
+ console.print(f"[red]Error adding toolkit: {e}[/red]")
1555
+ continue
1556
+
1557
+ # /rm_mcp command - remove MCP server
1558
+ if user_input == '/rm_mcp' or user_input.startswith('/rm_mcp '):
1559
+ if not (is_direct or is_local or is_inventory):
1560
+ console.print("[yellow]Removing MCP is only available for local agents and built-in agents.[/yellow]")
1561
+ continue
1562
+
1563
+ current_mcps = agent_def.get('mcps', [])
1564
+ if not current_mcps:
1565
+ console.print("[yellow]No MCP servers are currently loaded.[/yellow]")
1566
+ continue
1567
+
1568
+ # Get list of MCP names
1569
+ mcp_names = [m if isinstance(m, str) else m.get('name') for m in current_mcps]
1570
+
1571
+ parts = user_input.split(maxsplit=1)
1572
+ if len(parts) == 2:
1573
+ # Direct removal by name
1574
+ mcp_name_to_remove = parts[1].strip()
1575
+ if mcp_name_to_remove not in mcp_names:
1576
+ console.print(f"[yellow]MCP '{mcp_name_to_remove}' not found.[/yellow]")
1577
+ console.print(f"[dim]Loaded MCPs: {', '.join(mcp_names)}[/dim]")
1578
+ continue
1579
+ else:
1580
+ # Interactive selection
1581
+ console.print("\n🔌 [bold cyan]Remove MCP Server[/bold cyan]\n")
1582
+ for i, name in enumerate(mcp_names, 1):
1583
+ console.print(f" [bold]{i}[/bold]. {name}")
1584
+ console.print(f" [bold]0[/bold]. [dim]Cancel[/dim]")
1585
+ console.print()
1586
+
1587
+ try:
1588
+ choice = int(input("Select MCP to remove: ").strip())
1589
+ if choice == 0:
1590
+ continue
1591
+ if 1 <= choice <= len(mcp_names):
1592
+ mcp_name_to_remove = mcp_names[choice - 1]
1593
+ else:
1594
+ console.print("[yellow]Invalid selection.[/yellow]")
1595
+ continue
1596
+ except (ValueError, KeyboardInterrupt):
1597
+ continue
1598
+
1599
+ # Remove the MCP
1600
+ agent_def['mcps'] = [m for m in current_mcps if (m if isinstance(m, str) else m.get('name')) != mcp_name_to_remove]
1601
+
1602
+ # Recreate agent executor without the MCP
1603
+ from .tools import create_session_memory, update_session_metadata
1604
+ memory = create_session_memory(current_session_id)
1605
+ try:
1606
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1607
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
1608
+ )
1609
+ # Persist updated MCPs to session
1610
+ update_session_metadata(current_session_id, {
1611
+ 'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])]
1612
+ })
1613
+ console.print(Panel(
1614
+ f"[cyan]ℹ Removed MCP: [bold]{mcp_name_to_remove}[/bold]. Agent state reset, chat history preserved.[/cyan]",
1615
+ border_style="cyan",
1616
+ box=box.ROUNDED
1617
+ ))
1618
+ except Exception as e:
1619
+ console.print(f"[red]Error removing MCP: {e}[/red]")
1620
+ continue
1621
+
1622
+ # /rm_toolkit command - remove toolkit
1623
+ if user_input == '/rm_toolkit' or user_input.startswith('/rm_toolkit '):
1624
+ if not (is_direct or is_local or is_inventory):
1625
+ console.print("[yellow]Removing toolkit is only available for local agents and built-in agents.[/yellow]")
1626
+ continue
1627
+
1628
+ if not added_toolkit_configs:
1629
+ console.print("[yellow]No toolkits are currently loaded.[/yellow]")
1630
+ continue
1631
+
1632
+ # Get toolkit names from config files
1633
+ toolkit_info = [] # List of (name, file_path)
1634
+ for toolkit_file in added_toolkit_configs:
1635
+ try:
1636
+ with open(toolkit_file, 'r') as f:
1637
+ tk_config = json.load(f)
1638
+ tk_name = tk_config.get('toolkit_name', Path(toolkit_file).stem)
1639
+ toolkit_info.append((tk_name, toolkit_file))
1640
+ except Exception:
1641
+ toolkit_info.append((Path(toolkit_file).stem, toolkit_file))
1642
+
1643
+ parts = user_input.split(maxsplit=1)
1644
+ if len(parts) == 2:
1645
+ # Direct removal by name
1646
+ toolkit_name_to_remove = parts[1].strip()
1647
+ matching = [(name, path) for name, path in toolkit_info if name == toolkit_name_to_remove]
1648
+ if not matching:
1649
+ console.print(f"[yellow]Toolkit '{toolkit_name_to_remove}' not found.[/yellow]")
1650
+ console.print(f"[dim]Loaded toolkits: {', '.join(name for name, _ in toolkit_info)}[/dim]")
1651
+ continue
1652
+ toolkit_file_to_remove = matching[0][1]
1653
+ else:
1654
+ # Interactive selection
1655
+ console.print("\n🔧 [bold cyan]Remove Toolkit[/bold cyan]\n")
1656
+ for i, (name, _) in enumerate(toolkit_info, 1):
1657
+ console.print(f" [bold]{i}[/bold]. {name}")
1658
+ console.print(f" [bold]0[/bold]. [dim]Cancel[/dim]")
1659
+ console.print()
1660
+
1661
+ try:
1662
+ choice = int(input("Select toolkit to remove: ").strip())
1663
+ if choice == 0:
1664
+ continue
1665
+ if 1 <= choice <= len(toolkit_info):
1666
+ toolkit_name_to_remove, toolkit_file_to_remove = toolkit_info[choice - 1]
1667
+ else:
1668
+ console.print("[yellow]Invalid selection.[/yellow]")
1669
+ continue
1670
+ except (ValueError, KeyboardInterrupt):
1671
+ continue
1672
+
1673
+ # Remove the toolkit
1674
+ added_toolkit_configs.remove(toolkit_file_to_remove)
1675
+
1676
+ # Recreate agent executor without the toolkit
1677
+ from .tools import create_session_memory, update_session_metadata
1678
+ memory = create_session_memory(current_session_id)
1679
+ try:
1680
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1681
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
1682
+ )
1683
+ # Persist updated toolkits to session
1684
+ update_session_metadata(current_session_id, {
1685
+ 'added_toolkit_configs': list(added_toolkit_configs)
1686
+ })
1687
+ console.print(Panel(
1688
+ f"[cyan]ℹ Removed toolkit: [bold]{toolkit_name_to_remove}[/bold]. Agent state reset, chat history preserved.[/cyan]",
1689
+ border_style="cyan",
1690
+ box=box.ROUNDED
1691
+ ))
1692
+ except Exception as e:
1693
+ console.print(f"[red]Error removing toolkit: {e}[/red]")
1694
+ continue
1695
+
1696
+ # /mode command - set approval mode
1697
+ if user_input == '/mode' or user_input.startswith('/mode '):
1698
+ parts = user_input.split(maxsplit=1)
1699
+ if len(parts) == 1:
1700
+ # Show current mode and options
1701
+ mode_info = {
1702
+ 'always': ('yellow', 'Confirm before each tool execution'),
1703
+ 'auto': ('green', 'Execute tools without confirmation'),
1704
+ 'yolo': ('red', 'No confirmations, skip safety warnings')
1705
+ }
1706
+ console.print("\n🔧 [bold cyan]Approval Mode:[/bold cyan]\n")
1707
+ for mode_name, (color, desc) in mode_info.items():
1708
+ marker = "●" if mode_name == approval_mode else "○"
1709
+ console.print(f" [{color}]{marker}[/{color}] [bold]{mode_name}[/bold] - {desc}")
1710
+ console.print(f"\n[dim]Usage: /mode <always|auto|yolo>[/dim]")
1711
+ else:
1712
+ new_mode = parts[1].lower().strip()
1713
+ if new_mode in ['always', 'auto', 'yolo']:
1714
+ approval_mode = new_mode
1715
+ mode_colors = {'always': 'yellow', 'auto': 'green', 'yolo': 'red'}
1716
+ console.print(f"✓ [green]Mode set to[/green] [{mode_colors[new_mode]}][bold]{new_mode}[/bold][/{mode_colors[new_mode]}]")
1717
+ else:
1718
+ console.print(f"[yellow]Unknown mode: {new_mode}. Use: always, auto, or yolo[/yellow]")
1719
+ continue
1720
+
1721
+ # /dir command - manage allowed directories
1722
+ if user_input == '/dir' or user_input.startswith('/dir '):
1723
+ parts = user_input.split()
1724
+
1725
+ if len(parts) == 1:
1726
+ # /dir - list all allowed directories
1727
+ if allowed_directories:
1728
+ console.print("📁 [bold cyan]Allowed directories:[/bold cyan]")
1729
+ for i, d in enumerate(allowed_directories):
1730
+ marker = "●" if i == 0 else "○"
1731
+ label = " [dim](primary)[/dim]" if i == 0 else ""
1732
+ console.print(f" {marker} {d}{label}")
1733
+ else:
1734
+ console.print("[yellow]No directories allowed.[/yellow]")
1735
+ console.print("[dim]Usage: /dir [add|rm|remove] /path/to/directory[/dim]")
1736
+ continue
1737
+
1738
+ action = parts[1].lower()
1739
+
1740
+ # Handle /dir add /path or /dir /path (add is default)
1741
+ if action in ['add', 'rm', 'remove']:
1742
+ if len(parts) < 3:
1743
+ console.print(f"[yellow]Missing path. Usage: /dir {action} /path/to/directory[/yellow]")
1744
+ continue
1745
+ dir_path = parts[2]
1746
+ else:
1747
+ # /dir /path - default to add
1748
+ action = 'add'
1749
+ dir_path = parts[1]
1750
+
1751
+ dir_path = str(Path(dir_path).expanduser().resolve())
1752
+
1753
+ if action == 'add':
1754
+ if not Path(dir_path).exists():
1755
+ console.print(f"[red]Directory not found: {dir_path}[/red]")
1756
+ continue
1757
+ if not Path(dir_path).is_dir():
1758
+ console.print(f"[red]Not a directory: {dir_path}[/red]")
1759
+ continue
1760
+
1761
+ if dir_path in allowed_directories:
1762
+ console.print(f"[yellow]Directory already allowed: {dir_path}[/yellow]")
1763
+ continue
1764
+
1765
+ allowed_directories.append(dir_path)
1766
+
1767
+ # Recreate agent executor with updated directories
1768
+ if is_direct or is_local or is_inventory:
1769
+ from .tools import create_session_memory
1770
+ memory = create_session_memory(current_session_id)
1771
+ try:
1772
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1773
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
1774
+ )
1775
+ console.print(Panel(
1776
+ f"[cyan]✓ Added directory: [bold]{dir_path}[/bold]\n Total allowed: {len(allowed_directories)}[/cyan]",
1777
+ border_style="cyan",
1778
+ box=box.ROUNDED
1779
+ ))
1780
+ except Exception as e:
1781
+ allowed_directories.remove(dir_path) # Rollback
1782
+ console.print(f"[red]Error adding directory: {e}[/red]")
1783
+ else:
1784
+ console.print("[yellow]Directory mounting is only available for local agents and built-in agents.[/yellow]")
1785
+
1786
+ elif action in ['rm', 'remove']:
1787
+ if dir_path not in allowed_directories:
1788
+ console.print(f"[yellow]Directory not in allowed list: {dir_path}[/yellow]")
1789
+ if allowed_directories:
1790
+ console.print("[dim]Currently allowed:[/dim]")
1791
+ for d in allowed_directories:
1792
+ console.print(f"[dim] - {d}[/dim]")
1793
+ continue
1794
+
1795
+ if len(allowed_directories) == 1:
1796
+ console.print("[yellow]Cannot remove the last directory. Use /dir add first to add another.[/yellow]")
1797
+ continue
1798
+
1799
+ allowed_directories.remove(dir_path)
1800
+
1801
+ # Recreate agent executor with updated directories
1802
+ if is_direct or is_local or is_inventory:
1803
+ from .tools import create_session_memory
1804
+ memory = create_session_memory(current_session_id)
1805
+ try:
1806
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1807
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
1808
+ )
1809
+ console.print(Panel(
1810
+ f"[cyan]✓ Removed directory: [bold]{dir_path}[/bold]\n Remaining: {len(allowed_directories)}[/cyan]",
1811
+ border_style="cyan",
1812
+ box=box.ROUNDED
1813
+ ))
1814
+ except Exception as e:
1815
+ allowed_directories.append(dir_path) # Rollback
1816
+ console.print(f"[red]Error removing directory: {e}[/red]")
1817
+ else:
1818
+ console.print("[yellow]Directory mounting is only available for local agents and built-in agents.[/yellow]")
1819
+ continue
1820
+
1821
+ # /inventory command - load inventory/knowledge graph from path
1822
+ if user_input == '/inventory' or user_input.startswith('/inventory '):
1823
+ if not (is_direct or is_local or is_inventory):
1824
+ console.print("[yellow]Loading inventory is only available for local agents and built-in agents.[/yellow]")
1825
+ continue
1826
+
1827
+ parts = user_input.split(maxsplit=1)
1828
+ if len(parts) == 1:
1829
+ # Show current inventory and available files
1830
+ current_inventory = None
1831
+ for tc in added_toolkit_configs:
1832
+ if isinstance(tc, dict) and tc.get('type') == 'inventory':
1833
+ current_inventory = tc.get('graph_path')
1834
+ break
1835
+ elif isinstance(tc, str):
1836
+ try:
1837
+ with open(tc, 'r') as f:
1838
+ cfg = json.load(f)
1839
+ if cfg.get('type') == 'inventory':
1840
+ current_inventory = cfg.get('graph_path')
1841
+ break
1842
+ except Exception:
1843
+ pass
1844
+
1845
+ if current_inventory:
1846
+ console.print(f"📊 [bold cyan]Current inventory:[/bold cyan] {current_inventory}")
1847
+ else:
1848
+ console.print("[yellow]No inventory loaded.[/yellow]")
1849
+
1850
+ # Show available .json files
1851
+ primary_dir = allowed_directories[0] if allowed_directories else None
1852
+ available = _get_inventory_json_files(primary_dir)
1853
+ if available:
1854
+ console.print(f"[dim]Available files: {', '.join(available[:10])}")
1855
+ if len(available) > 10:
1856
+ console.print(f"[dim] ... and {len(available) - 10} more[/dim]")
1857
+ console.print("[dim]Usage: /inventory <path/to/graph.json>[/dim]")
1858
+ else:
1859
+ inventory_path = parts[1].strip()
1860
+
1861
+ # Build inventory config from path
1862
+ primary_dir = allowed_directories[0] if allowed_directories else None
1863
+ inventory_config = _build_inventory_config(inventory_path, primary_dir)
1864
+ if not inventory_config:
1865
+ console.print(f"[red]Inventory file not found: {inventory_path}[/red]")
1866
+ # Show search locations
1867
+ console.print("[dim]Searched in:[/dim]")
1868
+ console.print(f"[dim] - {Path.cwd()}[/dim]")
1869
+ console.print(f"[dim] - {Path.cwd() / '.alita' / 'inventory'}[/dim]")
1870
+ if primary_dir:
1871
+ console.print(f"[dim] - {primary_dir}[/dim]")
1872
+ console.print(f"[dim] - {Path(primary_dir) / '.alita' / 'inventory'}[/dim]")
1873
+ continue
1874
+
1875
+ # Remove any existing inventory toolkit configs
1876
+ new_toolkit_configs = []
1877
+ removed_inventory = None
1878
+ for tc in added_toolkit_configs:
1879
+ if isinstance(tc, dict) and tc.get('type') == 'inventory':
1880
+ removed_inventory = tc.get('toolkit_name', 'inventory')
1881
+ continue # Skip existing inventory
1882
+ elif isinstance(tc, str):
1883
+ try:
1884
+ with open(tc, 'r') as f:
1885
+ cfg = json.load(f)
1886
+ if cfg.get('type') == 'inventory':
1887
+ removed_inventory = cfg.get('toolkit_name', Path(tc).stem)
1888
+ continue # Skip existing inventory
1889
+ except Exception:
1890
+ pass
1891
+ new_toolkit_configs.append(tc)
1892
+
1893
+ # Add new inventory config
1894
+ new_toolkit_configs.append(inventory_config)
1895
+ added_toolkit_configs = new_toolkit_configs
1896
+
1897
+ # Recreate agent executor with new inventory
1898
+ from .tools import create_session_memory, update_session_metadata
1899
+ memory = create_session_memory(current_session_id)
1900
+ try:
1901
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1902
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
1903
+ )
1904
+ # Persist updated toolkits to session (exclude transient inventory configs)
1905
+ serializable_configs = [tc for tc in added_toolkit_configs if isinstance(tc, str)]
1906
+ update_session_metadata(current_session_id, {
1907
+ 'added_toolkit_configs': serializable_configs,
1908
+ 'inventory_graph': inventory_config.get('graph_path') # Save just the graph path
1909
+ })
1910
+
1911
+ toolkit_name = inventory_config['toolkit_name']
1912
+ graph_path = inventory_config['graph_path']
1913
+ if removed_inventory:
1914
+ console.print(Panel(
1915
+ f"[cyan]ℹ Replaced inventory [bold]{removed_inventory}[/bold] with [bold]{toolkit_name}[/bold]\n"
1916
+ f" Graph: {graph_path}[/cyan]",
1917
+ border_style="cyan",
1918
+ box=box.ROUNDED
1919
+ ))
1920
+ else:
1921
+ console.print(Panel(
1922
+ f"[cyan]✓ Loaded inventory: [bold]{toolkit_name}[/bold]\n"
1923
+ f" Graph: {graph_path}[/cyan]",
1924
+ border_style="cyan",
1925
+ box=box.ROUNDED
1926
+ ))
1927
+ except Exception as e:
1928
+ console.print(f"[red]Error loading inventory: {e}[/red]")
1929
+ continue
1930
+
1931
+ # /session command - list or resume sessions
1932
+ if user_input == '/session' or user_input.startswith('/session '):
1933
+ from .tools import list_sessions, PlanState
1934
+ parts = user_input.split(maxsplit=2)
1935
+
1936
+ if len(parts) == 1 or parts[1] == 'list':
1937
+ # List all sessions with plans
1938
+ sessions = list_sessions()
1939
+ if not sessions:
1940
+ console.print("[dim]No saved sessions found.[/dim]")
1941
+ console.print("[dim]Sessions are created when you start chatting.[/dim]")
1942
+ else:
1943
+ console.print("\n📋 [bold cyan]Saved Sessions:[/bold cyan]\n")
1944
+ from datetime import datetime
1945
+ for i, sess in enumerate(sessions[:10], 1): # Show last 10
1946
+ modified = datetime.fromtimestamp(sess['modified']).strftime('%Y-%m-%d %H:%M')
1947
+
1948
+ # Build session info line
1949
+ agent_info = sess.get('agent_name', 'unknown')
1950
+ model_info = sess.get('model', '')
1951
+ if model_info:
1952
+ agent_info = f"{agent_info} ({model_info})"
1953
+
1954
+ # Check if this is current session
1955
+ is_current = sess['session_id'] == current_session_id
1956
+ current_marker = " [green]◀ current[/green]" if is_current else ""
1957
+
1958
+ # Plan progress if available
1959
+ if sess.get('steps_total', 0) > 0:
1960
+ progress = f"[{sess['steps_completed']}/{sess['steps_total']}]"
1961
+ status = "✓" if sess['steps_completed'] == sess['steps_total'] else "○"
1962
+ plan_info = f" - {sess.get('title', 'Untitled')} {progress}"
1963
+ else:
1964
+ status = "●"
1965
+ plan_info = ""
1966
+
1967
+ console.print(f" {status} [cyan]{sess['session_id']}[/cyan]{plan_info}")
1968
+ console.print(f" [dim]{agent_info} • {modified}[/dim]{current_marker}")
1969
+ console.print(f"\n[dim]Usage: /session resume <session_id>[/dim]")
1970
+
1971
+ elif parts[1] == 'resume' and len(parts) > 2:
1972
+ session_id = parts[2].strip()
1973
+ from .tools import load_session_metadata, create_session_memory, from_portable_path
1974
+
1975
+ # Check if session exists (either plan or metadata)
1976
+ loaded_state = PlanState.load(session_id)
1977
+ session_metadata = load_session_metadata(session_id)
1978
+
1979
+ if loaded_state or session_metadata:
1980
+ # Update current session to use this session_id
1981
+ current_session_id = session_id
1982
+
1983
+ # Restore memory from session SQLite (reuses existing memory.db file)
1984
+ memory = create_session_memory(session_id)
1985
+
1986
+ # Update plan state if available
1987
+ if loaded_state:
1988
+ plan_state.update(loaded_state.to_dict())
1989
+ resume_info = f"\n\n{loaded_state.render()}"
1990
+ else:
1991
+ plan_state['session_id'] = session_id
1992
+ resume_info = ""
1993
+
1994
+ # Restore agent source and reload agent definition if available
1995
+ restored_agent = False
1996
+ if session_metadata:
1997
+ agent_source = session_metadata.get('agent_source')
1998
+ if agent_source:
1999
+ agent_file_path = from_portable_path(agent_source)
2000
+ if Path(agent_file_path).exists():
2001
+ try:
2002
+ agent_def = load_agent_definition(agent_file_path)
2003
+ current_agent_file = agent_file_path
2004
+ agent_name = agent_def.get('name', Path(agent_file_path).stem)
2005
+ is_local = True
2006
+ is_direct = False
2007
+ restored_agent = True
2008
+ except Exception as e:
2009
+ console.print(f"[yellow]Warning: Could not reload agent from {agent_source}: {e}[/yellow]")
2010
+
2011
+ # Restore added toolkit configs
2012
+ restored_toolkit_configs = session_metadata.get('added_toolkit_configs', [])
2013
+ if restored_toolkit_configs:
2014
+ added_toolkit_configs.clear()
2015
+ added_toolkit_configs.extend(restored_toolkit_configs)
2016
+
2017
+ # Restore added MCPs to agent_def
2018
+ restored_mcps = session_metadata.get('added_mcps', [])
2019
+ if restored_mcps and restored_agent:
2020
+ if 'mcps' not in agent_def:
2021
+ agent_def['mcps'] = []
2022
+ for mcp_name in restored_mcps:
2023
+ if mcp_name not in [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])]:
2024
+ agent_def['mcps'].append(mcp_name)
2025
+
2026
+ # Restore model/temperature overrides
2027
+ if session_metadata.get('model'):
2028
+ current_model = session_metadata['model']
2029
+ if restored_agent:
2030
+ agent_def['model'] = current_model
2031
+ if session_metadata.get('temperature') is not None:
2032
+ current_temperature = session_metadata['temperature']
2033
+ if restored_agent:
2034
+ agent_def['temperature'] = current_temperature
2035
+
2036
+ # Restore allowed directories
2037
+ if session_metadata.get('allowed_directories'):
2038
+ allowed_directories = session_metadata['allowed_directories']
2039
+ elif session_metadata.get('work_dir'):
2040
+ # Backward compatibility with old sessions
2041
+ allowed_directories = [session_metadata['work_dir']]
2042
+
2043
+ # Reinitialize context manager with resumed session_id to load chat history
2044
+ ctx_manager = CLIContextManager(
2045
+ session_id=session_id,
2046
+ max_context_tokens=context_config.get('max_context_tokens', 8000),
2047
+ preserve_recent=context_config.get('preserve_recent_messages', 5),
2048
+ pruning_method=context_config.get('pruning_method', 'oldest_first'),
2049
+ enable_summarization=context_config.get('enable_summarization', True),
2050
+ summary_trigger_ratio=context_config.get('summary_trigger_ratio', 0.8),
2051
+ summaries_limit=context_config.get('summaries_limit_count', 5),
2052
+ llm=llm if 'llm' in dir() else None
2053
+ )
2054
+
2055
+ # Show session info
2056
+ agent_info = session_metadata.get('agent_name', 'unknown') if session_metadata else 'unknown'
2057
+ model_info = session_metadata.get('model', '') if session_metadata else ''
2058
+
2059
+ console.print(Panel(
2060
+ f"[green]✓ Resumed session:[/green] [bold]{session_id}[/bold]\n"
2061
+ f"[dim]Agent: {agent_info}" + (f" • Model: {model_info}" if model_info else "") + f"[/dim]"
2062
+ f"{resume_info}",
2063
+ border_style="green",
2064
+ box=box.ROUNDED
2065
+ ))
2066
+
2067
+ # Display restored chat history
2068
+ chat_history_export = ctx_manager.export_chat_history(include_only=False)
2069
+ if chat_history_export:
2070
+ preserve_recent = context_config.get('preserve_recent_messages', 5)
2071
+ total_messages = len(chat_history_export)
2072
+
2073
+ if total_messages > preserve_recent:
2074
+ console.print(f"\n[dim]... {total_messages - preserve_recent} earlier messages in context[/dim]")
2075
+ messages_to_show = chat_history_export[-preserve_recent:]
2076
+ else:
2077
+ messages_to_show = chat_history_export
2078
+
2079
+ for msg in messages_to_show:
2080
+ role = msg.get('role', 'user')
2081
+ content = msg.get('content', '')[:200] # Truncate for display
2082
+ if len(msg.get('content', '')) > 200:
2083
+ content += '...'
2084
+ role_color = 'cyan' if role == 'user' else 'green'
2085
+ role_label = 'You' if role == 'user' else 'Assistant'
2086
+ console.print(f"[dim][{role_color}]{role_label}:[/{role_color}] {content}[/dim]")
2087
+ console.print()
2088
+
2089
+ # Recreate agent executor with restored tools if we have a local/built-in agent
2090
+ if (is_direct or is_local or is_inventory) and restored_agent:
2091
+ try:
2092
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
2093
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
2094
+ )
2095
+ ctx_manager.llm = llm # Update LLM for summarization
2096
+
2097
+ # Warn about MCP state loss
2098
+ if restored_mcps:
2099
+ console.print("[yellow]Note: MCP connections re-initialized (stateful server state like browser sessions are lost)[/yellow]")
2100
+ except Exception as e:
2101
+ console.print(f"[red]Error recreating agent executor: {e}[/red]")
2102
+ console.print("[yellow]Session state loaded but agent not fully restored. Some tools may not work.[/yellow]")
2103
+ elif is_direct or is_local or is_inventory:
2104
+ # Just update planning tools if we couldn't restore agent
2105
+ try:
2106
+ from .tools import get_planning_tools
2107
+ if loaded_state:
2108
+ planning_tools, _ = get_planning_tools(loaded_state)
2109
+ except Exception as e:
2110
+ console.print(f"[yellow]Warning: Could not reload planning tools: {e}[/yellow]")
2111
+ else:
2112
+ console.print(f"[red]Session not found: {session_id}[/red]")
2113
+ else:
2114
+ console.print("[dim]Usage: /session [list] or /session resume <session_id>[/dim]")
2115
+ continue
2116
+
2117
+ # /agent command - switch to a different agent
2118
+ if user_input == '/agent':
2119
+ selected_agent = _select_agent_interactive(client, config)
2120
+ if selected_agent and selected_agent != '__direct__' and selected_agent != '__inventory__':
2121
+ # Load the new agent
2122
+ new_is_local = Path(selected_agent).exists()
2123
+
2124
+ if new_is_local:
2125
+ agent_def = load_agent_definition(selected_agent)
2126
+ agent_name = agent_def.get('name', Path(selected_agent).stem)
2127
+ agent_type = "Local Agent"
2128
+ is_local = True
2129
+ is_direct = False
2130
+ is_inventory = False
2131
+ current_agent_file = selected_agent # Track for /reload
2132
+ else:
2133
+ # Platform agent
2134
+ agents = client.get_list_of_apps()
2135
+ new_agent = None
2136
+ try:
2137
+ agent_id = int(selected_agent)
2138
+ new_agent = next((a for a in agents if a['id'] == agent_id), None)
2139
+ except ValueError:
2140
+ new_agent = next((a for a in agents if a['name'] == selected_agent), None)
2141
+
2142
+ if new_agent:
2143
+ agent_name = new_agent['name']
2144
+ agent_type = "Platform Agent"
2145
+ is_local = False
2146
+ is_direct = False
2147
+ current_agent_file = None # No file for platform agents
2148
+
2149
+ # Setup platform agent
2150
+ details = client.get_app_details(new_agent['id'])
2151
+ version_id = details['versions'][0]['id']
2152
+ agent_executor = client.application(
2153
+ application_id=new_agent['id'],
2154
+ application_version_id=version_id,
2155
+ memory=memory,
2156
+ chat_history=chat_history
2157
+ )
2158
+ console.print(Panel(
2159
+ f"[cyan]ℹ Switched to agent: [bold]{agent_name}[/bold] ({agent_type}). Chat history preserved.[/cyan]",
2160
+ border_style="cyan",
2161
+ box=box.ROUNDED
2162
+ ))
2163
+ continue
2164
+
2165
+ # For local agents, recreate executor
2166
+ if new_is_local:
2167
+ from .tools import create_session_memory
2168
+ memory = create_session_memory(current_session_id)
2169
+ added_toolkit_configs = []
2170
+ try:
2171
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
2172
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
2173
+ )
2174
+ console.print(Panel(
2175
+ f"[cyan]ℹ Switched to agent: [bold]{agent_name}[/bold] ({agent_type}). Agent state reset, chat history preserved.[/cyan]",
2176
+ border_style="cyan",
2177
+ box=box.ROUNDED
2178
+ ))
2179
+ except Exception as e:
2180
+ console.print(f"[red]Error switching agent: {e}[/red]")
2181
+ elif selected_agent == '__direct__':
2182
+ # Switch back to direct mode
2183
+ is_direct = True
2184
+ is_local = False
2185
+ is_inventory = False
2186
+ current_agent_file = None # No file for direct mode
2187
+ agent_name = "Alita"
2188
+ agent_type = "Direct LLM"
2189
+ alita_prompt = _get_alita_system_prompt(config)
2190
+ agent_def = {
2191
+ 'model': current_model or default_model,
2192
+ 'temperature': current_temperature if current_temperature is not None else default_temperature,
2193
+ 'max_tokens': current_max_tokens or default_max_tokens,
2194
+ 'system_prompt': alita_prompt
2195
+ }
2196
+ from .tools import create_session_memory
2197
+ memory = create_session_memory(current_session_id)
2198
+ try:
2199
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
2200
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
2201
+ )
2202
+ console.print(Panel(
2203
+ f"[cyan]ℹ Switched to [bold]Alita[/bold]. Agent state reset, chat history preserved.[/cyan]",
2204
+ border_style="cyan",
2205
+ box=box.ROUNDED
2206
+ ))
2207
+ except Exception as e:
2208
+ console.print(f"[red]Error switching to direct mode: {e}[/red]")
2209
+ elif selected_agent == '__inventory__':
2210
+ # Switch to inventory mode
2211
+ is_direct = False
2212
+ is_local = False
2213
+ is_inventory = True
2214
+ current_agent_file = None # No file for inventory mode
2215
+ agent_name = "Inventory"
2216
+ agent_type = "Built-in Agent"
2217
+ inventory_prompt = _get_inventory_system_prompt(config)
2218
+ agent_def = {
2219
+ 'name': 'inventory-agent',
2220
+ 'model': current_model or default_model,
2221
+ 'temperature': current_temperature if current_temperature is not None else 0.3,
2222
+ 'max_tokens': current_max_tokens or default_max_tokens,
2223
+ 'system_prompt': inventory_prompt,
2224
+ 'toolkit_configs': [
2225
+ {'type': 'inventory', 'graph_path': './knowledge_graph.json'}
2226
+ ]
2227
+ }
2228
+ from .tools import create_session_memory
2229
+ memory = create_session_memory(current_session_id)
2230
+ try:
2231
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
2232
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, allowed_directories, plan_state
2233
+ )
2234
+ console.print(Panel(
2235
+ f"[cyan]ℹ Switched to [bold]Inventory[/bold] agent. Use /add_toolkit to add source toolkits.[/cyan]",
2236
+ border_style="cyan",
2237
+ box=box.ROUNDED
2238
+ ))
2239
+ except Exception as e:
2240
+ console.print(f"[red]Error switching to inventory mode: {e}[/red]")
2241
+ continue
2242
+
2243
+ # Execute agent
2244
+ # Track if history was already added during continuation handling
2245
+ history_already_added = False
2246
+ original_user_input = user_input # Preserve for history tracking
2247
+
2248
+ if (is_direct or is_local or is_inventory) and agent_executor is None:
2249
+ # Local agent without tools: use direct LLM call with streaming
2250
+ system_prompt = agent_def.get('system_prompt', '')
2251
+ messages = []
2252
+ if system_prompt:
2253
+ messages.append({"role": "system", "content": system_prompt})
2254
+
2255
+ # Build pruned context from context manager
2256
+ context_messages = ctx_manager.build_context()
2257
+ for msg in context_messages:
2258
+ messages.append(msg)
2259
+
2260
+ # Add user message
2261
+ messages.append({"role": "user", "content": user_input})
2262
+
2263
+ try:
2264
+ # Try streaming if available
2265
+ if hasattr(llm, 'stream'):
2266
+ output_chunks = []
2267
+ first_chunk = True
2268
+
2269
+ # Show spinner until first token arrives
2270
+ status = console.status("[yellow]Thinking...[/yellow]", spinner="dots")
2271
+ status.start()
2272
+
2273
+ # Stream the response token by token
2274
+ for chunk in llm.stream(messages):
2275
+ if hasattr(chunk, 'content'):
2276
+ token = chunk.content
2277
+ else:
2278
+ token = str(chunk)
2279
+
2280
+ if token:
2281
+ # Stop spinner and show agent name on first token
2282
+ if first_chunk:
2283
+ status.stop()
2284
+ console.print(f"\n[bold bright_cyan]{agent_name}:[/bold bright_cyan]\n", end="")
2285
+ first_chunk = False
2286
+
2287
+ console.print(token, end="", markup=False)
2288
+ output_chunks.append(token)
2289
+
2290
+ # Stop status if still running (no tokens received)
2291
+ if first_chunk:
2292
+ status.stop()
2293
+ console.print(f"\n[bold bright_cyan]{agent_name}:[/bold bright_cyan]\n", end="")
2294
+
2295
+ output = ''.join(output_chunks)
2296
+ console.print() # New line after streaming
2297
+ else:
2298
+ # Fallback to non-streaming with spinner
2299
+ with console.status("[yellow]Thinking...[/yellow]", spinner="dots"):
2300
+ response = llm.invoke(messages)
2301
+ if hasattr(response, 'content'):
2302
+ output = response.content
2303
+ else:
2304
+ output = str(response)
2305
+
2306
+ # Display response after spinner stops
2307
+ console.print(f"\n[bold bright_cyan]{agent_name}:[/bold bright_cyan]")
2308
+ if any(marker in output for marker in ['```', '**', '##', '- ', '* ']):
2309
+ console.print(Markdown(output))
2310
+ else:
2311
+ console.print(output)
2312
+ except Exception as e:
2313
+ console.print(f"\n[red]✗ Error: {e}[/red]\n")
2314
+ continue
2315
+ else:
2316
+ # Agent with tools or platform agent: use agent executor
2317
+ # Setup callback for verbose output
2318
+ from langchain_core.runnables import RunnableConfig
2319
+ from langgraph.errors import GraphRecursionError
2320
+
2321
+ # Initialize invoke_config with thread_id for checkpointing
2322
+ # This ensures the same thread is used across continuations
2323
+ invoke_config = RunnableConfig(
2324
+ configurable={"thread_id": current_session_id}
2325
+ )
2326
+ # always proceed with continuation enabled
2327
+ invoke_config["should_continue"] = True
2328
+ # Set recursion limit for tool executions
2329
+ logger.debug(f"Setting tool steps limit to {recursion_limit}")
2330
+ invoke_config["recursion_limit"] = recursion_limit
2331
+ cli_callback = None
2332
+ if show_verbose:
2333
+ cli_callback = create_cli_callback(verbose=True, debug=debug_mode)
2334
+ invoke_config["callbacks"] = [cli_callback]
2335
+
2336
+ # Track recursion continuation state
2337
+ continue_from_recursion = False
2338
+ recursion_attempts = 0
2339
+ tool_limit_attempts = 0 # Track tool limit continuation attempts
2340
+ max_recursion_continues = 5 # Prevent infinite continuation loops
2341
+ output = None # Initialize output before loop
2342
+ result = None # Initialize result before loop
2343
+
2344
+ while True:
2345
+ try:
2346
+ # Always start with a thinking spinner
2347
+ status = console.status("[yellow]Thinking...[/yellow]", spinner="dots")
2348
+ status.start()
2349
+
2350
+ # Pass status to callback so it can stop it when tool calls start
2351
+ if cli_callback:
2352
+ cli_callback.status = status
2353
+
2354
+ try:
2355
+ result = agent_executor.invoke(
2356
+ {
2357
+ "input": [user_input] if not is_local else user_input,
2358
+ "chat_history": ctx_manager.build_context()
2359
+ },
2360
+ config=invoke_config
2361
+ )
2362
+ finally:
2363
+ # Make sure spinner is stopped
2364
+ try:
2365
+ status.stop()
2366
+ except Exception:
2367
+ pass
2368
+
2369
+ # Extract output from result
2370
+ if result is not None:
2371
+ output = extract_output_from_result(result)
2372
+
2373
+ # Check if max tool iterations were reached and prompt user
2374
+ if output and "Maximum tool execution iterations" in output and "reached" in output:
2375
+ tool_limit_attempts += 1
2376
+
2377
+ console.print()
2378
+ console.print(Panel(
2379
+ f"[yellow]⚠ Tool execution limit reached[/yellow]\n\n"
2380
+ f"The agent has executed the maximum number of tool calls in a single turn.\n"
2381
+ f"This usually happens with complex tasks that require many sequential operations.\n\n"
2382
+ f"[dim]Attempt {tool_limit_attempts}/{max_recursion_continues}[/dim]",
2383
+ title="Tool Limit Reached",
2384
+ border_style="yellow",
2385
+ box=box.ROUNDED
2386
+ ))
2387
+
2388
+ if tool_limit_attempts >= max_recursion_continues:
2389
+ console.print("[red]Maximum continuation attempts reached. Please break down your request into smaller tasks.[/red]")
2390
+ break
2391
+
2392
+ console.print("\nWhat would you like to do?")
2393
+ console.print(" [bold cyan]c[/bold cyan] - Continue execution (tell agent to resume)")
2394
+ console.print(" [bold cyan]s[/bold cyan] - Stop and keep partial results")
2395
+ console.print(" [bold cyan]n[/bold cyan] - Start a new request")
2396
+ console.print()
2397
+
2398
+ try:
2399
+ choice = input_handler.get_input("Choice [c/s/n]: ").strip().lower()
2400
+ except (KeyboardInterrupt, EOFError):
2401
+ choice = 's'
2402
+
2403
+ if choice == 'c':
2404
+ # Continue - send a follow-up message to resume
2405
+ console.print("\n[cyan]Continuing execution...[/cyan]\n")
2406
+
2407
+ # Clean up the output - remove the tool limit warning message
2408
+ clean_output = output
2409
+ if "Maximum tool execution iterations" in output:
2410
+ # Strip the warning from the end of the output
2411
+ lines = output.split('\n')
2412
+ clean_lines = [l for l in lines if "Maximum tool execution iterations" not in l and "Stopping tool execution" not in l]
2413
+ clean_output = '\n'.join(clean_lines).strip()
2414
+
2415
+ # Add current output to history first (without the warning)
2416
+ # Use original user input for first continuation, current for subsequent
2417
+ history_input = original_user_input if not history_already_added else user_input
2418
+ if clean_output:
2419
+ chat_history.append({"role": "user", "content": history_input})
2420
+ chat_history.append({"role": "assistant", "content": clean_output})
2421
+ ctx_manager.add_message("user", history_input)
2422
+ ctx_manager.add_message("assistant", clean_output)
2423
+ history_already_added = True
2424
+
2425
+ # CRITICAL: Use a new thread_id when continuing to avoid corrupted
2426
+ # checkpoint state. The tool limit may have left the checkpoint with
2427
+ # an AIMessage containing tool_calls without corresponding ToolMessages.
2428
+ # Using a new thread_id starts fresh with our clean context manager state.
2429
+ import uuid
2430
+ continuation_thread_id = f"{current_session_id}-cont-{uuid.uuid4().hex[:8]}"
2431
+ invoke_config = RunnableConfig(
2432
+ configurable={"thread_id": continuation_thread_id}
2433
+ )
2434
+ invoke_config["should_continue"] = True
2435
+ invoke_config["recursion_limit"] = recursion_limit
2436
+ if cli_callback:
2437
+ invoke_config["callbacks"] = [cli_callback]
2438
+
2439
+ # Set new input to continue with a more explicit continuation message
2440
+ # Include context about the task limit to help the agent understand
2441
+ user_input = (
2442
+ "The previous response was interrupted due to reaching the tool execution limit. "
2443
+ "Continue from where you left off and complete the remaining steps of the original task. "
2444
+ "Focus on what still needs to be done - do not repeat completed work."
2445
+ )
2446
+ continue # Retry the invoke in this inner loop
2447
+
2448
+ elif choice == 's':
2449
+ console.print("\n[yellow]Stopped. Partial work has been completed.[/yellow]")
2450
+ break # Exit retry loop and show output
2451
+
2452
+ else: # 'n' or anything else
2453
+ console.print("\n[dim]Skipped. Enter a new request.[/dim]")
2454
+ output = None
2455
+ break # Exit retry loop
2456
+
2457
+ # Success - exit the retry loop
2458
+ break
2459
+
2460
+ except GraphRecursionError as e:
2461
+ recursion_attempts += 1
2462
+ step_limit = getattr(e, 'recursion_limit', 25)
2463
+
2464
+ console.print()
2465
+ console.print(Panel(
2466
+ f"[yellow]⚠ Step limit reached ({step_limit} steps)[/yellow]\n\n"
2467
+ f"The agent has executed the maximum number of steps allowed.\n"
2468
+ f"This usually happens with complex tasks that require many tool calls.\n\n"
2469
+ f"[dim]Attempt {recursion_attempts}/{max_recursion_continues}[/dim]",
2470
+ title="Step Limit Reached",
2471
+ border_style="yellow",
2472
+ box=box.ROUNDED
2473
+ ))
2474
+
2475
+ if recursion_attempts >= max_recursion_continues:
2476
+ console.print("[red]Maximum continuation attempts reached. Please break down your request into smaller tasks.[/red]")
2477
+ output = f"[Step limit reached after {recursion_attempts} continuation attempts. The task may be too complex - please break it into smaller steps.]"
2478
+ break
2479
+
2480
+ # Prompt user for action
2481
+ console.print("\nWhat would you like to do?")
2482
+ console.print(" [bold cyan]c[/bold cyan] - Continue execution (agent will resume from checkpoint)")
2483
+ console.print(" [bold cyan]s[/bold cyan] - Stop and get partial results")
2484
+ console.print(" [bold cyan]n[/bold cyan] - Start a new request")
2485
+ console.print()
2486
+
2487
+ try:
2488
+ choice = input_handler.get_input("Choice [c/s/n]: ").strip().lower()
2489
+ except (KeyboardInterrupt, EOFError):
2490
+ choice = 's'
2491
+
2492
+ if choice == 'c':
2493
+ # Continue - Use a new thread_id to avoid corrupted checkpoint state.
2494
+ # GraphRecursionError may have left the checkpoint with an AIMessage
2495
+ # containing tool_calls without corresponding ToolMessages.
2496
+ # Using a new thread_id starts fresh with our clean context manager state.
2497
+ continue_from_recursion = True
2498
+ console.print("\n[cyan]Continuing with fresh context...[/cyan]\n")
2499
+
2500
+ # Add current progress to history if we have it
2501
+ # (GraphRecursionError doesn't give us partial output, but context may have been updated)
2502
+ history_input = original_user_input if not history_already_added else user_input
2503
+ ctx_manager.add_message("user", history_input)
2504
+ ctx_manager.add_message("assistant", "[Previous task interrupted - continuing...]")
2505
+ history_already_added = True
2506
+
2507
+ # Create new thread_id to avoid corrupted checkpoint
2508
+ import uuid
2509
+ continuation_thread_id = f"{current_session_id}-cont-{uuid.uuid4().hex[:8]}"
2510
+ invoke_config = RunnableConfig(
2511
+ configurable={"thread_id": continuation_thread_id}
2512
+ )
2513
+ if cli_callback:
2514
+ invoke_config["callbacks"] = [cli_callback]
2515
+
2516
+ # More explicit continuation message
2517
+ user_input = (
2518
+ "The previous response was interrupted due to reaching the step limit. "
2519
+ "Continue from where you left off and complete the remaining steps of the original task. "
2520
+ "Focus on what still needs to be done - do not repeat completed work."
2521
+ )
2522
+ continue # Retry the invoke
2523
+
2524
+ elif choice == 's':
2525
+ # Stop and try to extract partial results
2526
+ console.print("\n[yellow]Stopped. Attempting to extract partial results...[/yellow]")
2527
+ output = "[Task stopped due to step limit. Partial work may have been completed - check any files or state that were modified.]"
2528
+ break
2529
+
2530
+ else: # 'n' or anything else
2531
+ console.print("\n[dim]Skipped. Enter a new request.[/dim]")
2532
+ output = None
2533
+ break
2534
+
2535
+ # Skip chat history update if we bailed out (no result)
2536
+ if output is None:
2537
+ continue
2538
+
2539
+ # Display response in a clear format
2540
+ console.print() # Add spacing
2541
+ console.print(f"[bold bright_cyan]{agent_name}:[/bold bright_cyan]")
2542
+ console.print() # Add spacing before response
2543
+ if any(marker in output for marker in ['```', '**', '##', '- ', '* ']):
2544
+ console.print(Markdown(output))
2545
+ else:
2546
+ console.print(output)
2547
+ console.print() # Add spacing after response
2548
+
2549
+ # Update chat history and context manager (skip if already added during continuation)
2550
+ if not history_already_added:
2551
+ chat_history.append({"role": "user", "content": original_user_input})
2552
+ chat_history.append({"role": "assistant", "content": output})
2553
+
2554
+ # Add messages to context manager for token tracking and pruning
2555
+ ctx_manager.add_message("user", original_user_input)
2556
+ ctx_manager.add_message("assistant", output)
2557
+ else:
2558
+ # During continuation, add the final response with continuation message
2559
+ chat_history.append({"role": "user", "content": user_input})
2560
+ chat_history.append({"role": "assistant", "content": output})
2561
+ ctx_manager.add_message("user", user_input)
2562
+ ctx_manager.add_message("assistant", output)
2563
+
2564
+ except KeyboardInterrupt:
2565
+ console.print("\n\n[yellow]Interrupted. Type 'exit' to quit or continue chatting.[/yellow]")
2566
+ continue
2567
+ except EOFError:
2568
+ # Save final session state before exiting
2569
+ try:
2570
+ from .tools import update_session_metadata, to_portable_path
2571
+ update_session_metadata(current_session_id, {
2572
+ 'agent_source': to_portable_path(current_agent_file) if current_agent_file else None,
2573
+ 'model': current_model or llm_model_display,
2574
+ 'temperature': current_temperature if current_temperature is not None else llm_temperature_display,
2575
+ 'allowed_directories': allowed_directories,
2576
+ 'added_toolkit_configs': list(added_toolkit_configs),
2577
+ 'added_mcps': [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])],
2578
+ })
2579
+ except Exception as e:
2580
+ logger.debug(f"Failed to save session state on exit: {e}")
2581
+ console.print("\n\n[bold cyan]Goodbye! 👋[/bold cyan]")
2582
+ break
2583
+
2584
+ except click.ClickException:
2585
+ raise
2586
+ except Exception as e:
2587
+ logger.exception("Failed to start chat")
2588
+ error_panel = Panel(
2589
+ str(e),
2590
+ title="Error",
2591
+ border_style="red",
2592
+ box=box.ROUNDED
2593
+ )
2594
+ console.print(error_panel, style="red")
2595
+ raise click.Abort()
2596
+
2597
+
2598
+ @agent.command('run')
2599
+ @click.argument('agent_source')
2600
+ @click.argument('message')
2601
+ @click.option('--version', help='Agent version (for platform agents)')
2602
+ @click.option('--toolkit-config', multiple=True, type=click.Path(exists=True),
2603
+ help='Toolkit configuration files')
2604
+ @click.option('--model', help='Override LLM model')
2605
+ @click.option('--temperature', type=float, help='Override temperature')
2606
+ @click.option('--max-tokens', type=int, help='Override max tokens')
2607
+ @click.option('--save-thread', help='Save thread ID to file for continuation')
2608
+ @click.option('--dir', 'work_dir', type=click.Path(exists=True, file_okay=False, dir_okay=True),
2609
+ help='Grant agent filesystem access to this directory')
2610
+ @click.option('--verbose', '-v', type=click.Choice(['quiet', 'default', 'debug']), default='default',
2611
+ help='Output verbosity level: quiet (final output only), default (tool calls + outputs), debug (all including LLM calls)')
2612
+ @click.pass_context
2613
+ def agent_run(ctx, agent_source: str, message: str, version: Optional[str],
2614
+ toolkit_config: tuple, model: Optional[str],
2615
+ temperature: Optional[float], max_tokens: Optional[int],
2616
+ save_thread: Optional[str], work_dir: Optional[str],
2617
+ verbose: str):
2618
+ """Run agent with a single message (handoff mode).
2619
+
2620
+ \b
2621
+ AGENT_SOURCE can be:
2622
+ - Platform agent ID or name
2623
+ - Path to local agent file
2624
+
2625
+ MESSAGE is the input message to send to the agent.
2626
+
2627
+ \b
2628
+ Examples:
2629
+ alita run my-agent "What is the status of JIRA-123?"
2630
+ alita run ./agent.md "Create a new toolkit for Stripe API"
2631
+ alita -o json run my-agent "Search for bugs" --toolkit-config jira.json
2632
+ alita run my-agent "Analyze code" --dir ./myproject
2633
+ alita run my-agent "Start task" --save-thread thread.txt
2634
+ alita run my-agent "Query" -v quiet
2635
+ alita run my-agent "Query" -v debug
2636
+ """
2637
+ formatter = ctx.obj['formatter']
2638
+ client = get_client(ctx)
2639
+
2640
+ # Setup verbose level
2641
+ show_verbose = verbose != 'quiet'
2642
+ debug_mode = verbose == 'debug'
2643
+
2644
+ try:
2645
+ # Load agent
2646
+ is_local = Path(agent_source).exists()
2647
+
2648
+ if is_local:
2649
+ agent_def = load_agent_definition(agent_source)
2650
+ agent_name = agent_def.get('name', Path(agent_source).stem)
2651
+
2652
+ # Create memory for agent
2653
+ from langgraph.checkpoint.sqlite import SqliteSaver
2654
+ memory = SqliteSaver(sqlite3.connect(":memory:", check_same_thread=False))
2655
+
2656
+ # Setup local agent executor (reuses same logic as agent_chat)
2657
+ try:
2658
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
2659
+ client, agent_def, toolkit_config, ctx.obj['config'], model, temperature, max_tokens, memory, work_dir, {}
2660
+ )
2661
+ except Exception as e:
2662
+ error_panel = Panel(
2663
+ f"Failed to setup agent: {e}",
2664
+ title="Error",
2665
+ border_style="red",
2666
+ box=box.ROUNDED
2667
+ )
2668
+ console.print(error_panel, style="red")
2669
+ raise click.Abort()
2670
+
2671
+ # Execute agent
2672
+ if agent_executor:
2673
+ # Setup callback for verbose output
2674
+ from langchain_core.runnables import RunnableConfig
2675
+ from langgraph.errors import GraphRecursionError
2676
+
2677
+ invoke_config = None
2678
+ if show_verbose:
2679
+ cli_callback = create_cli_callback(verbose=True, debug=debug_mode)
2680
+ invoke_config = RunnableConfig(callbacks=[cli_callback])
2681
+
2682
+ try:
2683
+ # Execute with spinner for non-JSON output
2684
+ if formatter.__class__.__name__ == 'JSONFormatter':
2685
+ # JSON output: always quiet, no callbacks
2686
+ with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
2687
+ result = agent_executor.invoke({
2688
+ "input": message,
2689
+ "chat_history": []
2690
+ })
2691
+
2692
+ click.echo(formatter._dump({
2693
+ 'agent': agent_name,
2694
+ 'message': message,
2695
+ 'response': extract_output_from_result(result),
2696
+ 'full_result': result
2697
+ }))
2698
+ else:
2699
+ # Show status only when not verbose (verbose shows its own progress)
2700
+ if not show_verbose:
2701
+ with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
2702
+ result = agent_executor.invoke(
2703
+ {
2704
+ "input": message,
2705
+ "chat_history": []
2706
+ },
2707
+ config=invoke_config
2708
+ )
2709
+ else:
2710
+ console.print() # Add spacing before tool calls
2711
+ result = agent_executor.invoke(
2712
+ {
2713
+ "input": message,
2714
+ "chat_history": []
2715
+ },
2716
+ config=invoke_config
2717
+ )
2718
+
2719
+ # Extract and display output
2720
+ output = extract_output_from_result(result)
2721
+ display_output(agent_name, message, output)
2722
+
2723
+ except GraphRecursionError as e:
2724
+ step_limit = getattr(e, 'recursion_limit', 25)
2725
+ console.print()
2726
+ console.print(Panel(
2727
+ f"[yellow]⚠ Step limit reached ({step_limit} steps)[/yellow]\n\n"
2728
+ f"The agent exceeded the maximum number of steps.\n"
2729
+ f"This task may be too complex for a single run.\n\n"
2730
+ f"[bold]Suggestions:[/bold]\n"
2731
+ f"• Use [cyan]alita agent chat[/cyan] for interactive continuation\n"
2732
+ f"• Break the task into smaller, focused requests\n"
2733
+ f"• Check if partial work was completed (files created, etc.)",
2734
+ title="Step Limit Reached",
2735
+ border_style="yellow",
2736
+ box=box.ROUNDED
2737
+ ))
2738
+ if formatter.__class__.__name__ == 'JSONFormatter':
2739
+ click.echo(formatter._dump({
2740
+ 'agent': agent_name,
2741
+ 'message': message,
2742
+ 'error': 'step_limit_reached',
2743
+ 'step_limit': step_limit,
2744
+ 'response': f'Step limit of {step_limit} reached. Task may be too complex.'
2745
+ }))
2746
+ else:
2747
+ # Simple LLM mode without tools
2748
+ system_prompt = agent_def.get('system_prompt', '')
2749
+ messages = []
2750
+ if system_prompt:
2751
+ messages.append({"role": "system", "content": system_prompt})
2752
+ messages.append({"role": "user", "content": message})
2753
+
2754
+ # Execute with spinner for non-JSON output
2755
+ if formatter.__class__.__name__ == 'JSONFormatter':
2756
+ response = llm.invoke(messages)
2757
+ if hasattr(response, 'content'):
2758
+ output = response.content
2759
+ else:
2760
+ output = str(response)
2761
+
2762
+ click.echo(formatter._dump({
2763
+ 'agent': agent_name,
2764
+ 'message': message,
2765
+ 'response': output
2766
+ }))
2767
+ else:
2768
+ # Show spinner while executing
2769
+ with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
2770
+ response = llm.invoke(messages)
2771
+ if hasattr(response, 'content'):
2772
+ output = response.content
2773
+ else:
2774
+ output = str(response)
2775
+
2776
+ # Display output
2777
+ display_output(agent_name, message, output)
2778
+
2779
+ else:
2780
+ # Platform agent
2781
+ agents = client.get_list_of_apps()
2782
+ agent = None
2783
+
2784
+ try:
2785
+ agent_id = int(agent_source)
2786
+ agent = next((a for a in agents if a['id'] == agent_id), None)
2787
+ except ValueError:
2788
+ agent = next((a for a in agents if a['name'] == agent_source), None)
2789
+
2790
+ if not agent:
2791
+ raise click.ClickException(f"Agent '{agent_source}' not found")
2792
+
2793
+ # Get version
2794
+ details = client.get_app_details(agent['id'])
2795
+
2796
+ if version:
2797
+ version_obj = next((v for v in details['versions'] if v['name'] == version), None)
2798
+ if not version_obj:
2799
+ raise click.ClickException(f"Version '{version}' not found")
2800
+ version_id = version_obj['id']
2801
+ else:
2802
+ version_id = details['versions'][0]['id']
2803
+
2804
+ # Load toolkit configs from CLI options
2805
+ toolkit_configs = []
2806
+ if toolkit_config:
2807
+ for config_path in toolkit_config:
2808
+ toolkit_configs.append(load_toolkit_config(config_path))
2809
+
2810
+ # Create memory
2811
+ from langgraph.checkpoint.sqlite import SqliteSaver
2812
+ memory = SqliteSaver(sqlite3.connect(":memory:", check_same_thread=False))
2813
+
2814
+ # Create agent executor
2815
+ agent_executor = client.application(
2816
+ application_id=agent['id'],
2817
+ application_version_id=version_id,
2818
+ memory=memory
2819
+ )
2820
+
2821
+ # Setup callback for verbose output
2822
+ from langchain_core.runnables import RunnableConfig
2823
+ from langgraph.errors import GraphRecursionError
2824
+
2825
+ invoke_config = None
2826
+ if show_verbose:
2827
+ cli_callback = create_cli_callback(verbose=True, debug=debug_mode)
2828
+ invoke_config = RunnableConfig(callbacks=[cli_callback])
2829
+
2830
+ try:
2831
+ # Execute with spinner for non-JSON output
2832
+ if formatter.__class__.__name__ == 'JSONFormatter':
2833
+ result = agent_executor.invoke({
2834
+ "input": [message],
2835
+ "chat_history": []
2836
+ })
2837
+
2838
+ click.echo(formatter._dump({
2839
+ 'agent': agent['name'],
2840
+ 'message': message,
2841
+ 'response': result.get('output', ''),
2842
+ 'full_result': result
2843
+ }))
2844
+ else:
2845
+ # Show status only when not verbose
2846
+ if not show_verbose:
2847
+ with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
2848
+ result = agent_executor.invoke(
2849
+ {
2850
+ "input": [message],
2851
+ "chat_history": []
2852
+ },
2853
+ config=invoke_config
2854
+ )
2855
+ else:
2856
+ console.print() # Add spacing before tool calls
2857
+ result = agent_executor.invoke(
2858
+ {
2859
+ "input": [message],
2860
+ "chat_history": []
2861
+ },
2862
+ config=invoke_config
2863
+ )
2864
+
2865
+ # Display output
2866
+ response = result.get('output', 'No response')
2867
+ display_output(agent['name'], message, response)
2868
+
2869
+ # Save thread if requested
2870
+ if save_thread:
2871
+ thread_data = {
2872
+ 'agent_id': agent['id'],
2873
+ 'agent_name': agent['name'],
2874
+ 'version_id': version_id,
2875
+ 'thread_id': result.get('thread_id'),
2876
+ 'last_message': message
2877
+ }
2878
+ with open(save_thread, 'w') as f:
2879
+ json.dump(thread_data, f, indent=2)
2880
+ logger.info(f"Thread saved to {save_thread}")
2881
+
2882
+ except GraphRecursionError as e:
2883
+ step_limit = getattr(e, 'recursion_limit', 25)
2884
+ console.print()
2885
+ console.print(Panel(
2886
+ f"[yellow]⚠ Step limit reached ({step_limit} steps)[/yellow]\n\n"
2887
+ f"The agent exceeded the maximum number of steps.\n"
2888
+ f"This task may be too complex for a single run.\n\n"
2889
+ f"[bold]Suggestions:[/bold]\n"
2890
+ f"• Use [cyan]alita agent chat[/cyan] for interactive continuation\n"
2891
+ f"• Break the task into smaller, focused requests\n"
2892
+ f"• Check if partial work was completed (files created, etc.)",
2893
+ title="Step Limit Reached",
2894
+ border_style="yellow",
2895
+ box=box.ROUNDED
2896
+ ))
2897
+ if formatter.__class__.__name__ == 'JSONFormatter':
2898
+ click.echo(formatter._dump({
2899
+ 'agent': agent['name'],
2900
+ 'message': message,
2901
+ 'error': 'step_limit_reached',
2902
+ 'step_limit': step_limit,
2903
+ 'response': f'Step limit of {step_limit} reached. Task may be too complex.'
2904
+ }))
2905
+
2906
+ except click.ClickException:
2907
+ raise
2908
+ except Exception as e:
2909
+ logger.exception("Failed to run agent")
2910
+ error_panel = Panel(
2911
+ str(e),
2912
+ title="Error",
2913
+ border_style="red",
2914
+ box=box.ROUNDED
2915
+ )
2916
+ console.print(error_panel, style="red")
2917
+ raise click.Abort()
2918
+
2919
+
2920
+ @agent.command('execute-test-cases')
2921
+ @click.option(
2922
+ '--agent_source',
2923
+ '--agent-source',
2924
+ 'agent_source',
2925
+ required=False,
2926
+ default=str(Path('.alita') / 'agents' / 'test-runner.agent.md'),
2927
+ show_default=True,
2928
+ type=click.Path(exists=False, file_okay=True, dir_okay=False),
2929
+ help='Path to test runner agent definition file'
2930
+ )
2931
+ @click.option('--test-cases-dir', required=True, type=click.Path(exists=True, file_okay=False, dir_okay=True),
2932
+ help='Directory containing test case files')
2933
+ @click.option('--results-dir', required=False, default=str(Path('.alita') / 'tests' / 'results'),
2934
+ type=click.Path(file_okay=False, dir_okay=True),
2935
+ help='Directory where test results will be saved')
2936
+ @click.option('--test-case', 'test_case_files', multiple=True,
2937
+ help='Specific test case file(s) to execute (e.g., TC-001.md). Can specify multiple times. If not specified, executes all test cases.')
2938
+ @click.option('--model', help='Override LLM model')
2939
+ @click.option('--temperature', type=float, help='Override temperature')
2940
+ @click.option('--max-tokens', type=int, help='Override max tokens')
2941
+ @click.option('--dir', 'work_dir', required=False, default=str(Path('.alita')),
2942
+ type=click.Path(exists=True, file_okay=False, dir_okay=True),
2943
+ help='Grant agent filesystem access to this directory')
2944
+ @click.option('--data-generator', required=False, default=str(Path('.alita') / 'agents' / 'test-data-generator.agent.md'),
2945
+ type=click.Path(exists=True),
2946
+ help='Path to test data generator agent definition file')
2947
+ @click.option('--validator', type=click.Path(exists=True),
2948
+ help='Path to test validator agent definition file (default: .alita/agents/test-validator.agent.md)')
2949
+ @click.option('--skip-data-generation', is_flag=True,
2950
+ help='Skip test data generation step')
2951
+ @click.option('--verbose', '-v', type=click.Choice(['quiet', 'default', 'debug']), default='default',
2952
+ help='Output verbosity level: quiet (final output only), default (tool calls + outputs), debug (all including LLM calls)')
2953
+ @click.pass_context
2954
+ def execute_test_cases(ctx, agent_source: str, test_cases_dir: str, results_dir: str,
2955
+ test_case_files: tuple, model: Optional[str], temperature: Optional[float],
2956
+ max_tokens: Optional[int], work_dir: str,
2957
+ data_generator: str, validator: Optional[str],
2958
+ skip_data_generation: bool,
2959
+ verbose: str):
2960
+ """
2961
+ Execute test cases from a directory and save results.
2962
+
2963
+ This command:
2964
+ 1. (Optional) Executes test data generator agent to provision test data
2965
+ 2. Scans TEST_CASES_DIR for test case markdown files (TC-*.md)
2966
+ 3. For each test case:
2967
+ - Parses the test case to extract config, steps, and expectations
2968
+ - Loads the agent with the toolkit config specified in the test case
2969
+ - Executes each test step
2970
+ - Validates output against expectations
2971
+ - Generates a test result file
2972
+ 4. Saves all results to RESULTS_DIR
2973
+
2974
+ --agent_source: Path to test runner agent definition file
2975
+
2976
+ \b
2977
+ Examples:
2978
+ alita agent execute-test-cases --test-cases-dir ./tests --results-dir ./results
2979
+ alita agent execute-test-cases --agent_source ./agent.json --test-cases-dir ./tests --results-dir ./results \
2980
+ --data-generator ./data-gen.json
2981
+ alita agent execute-test-cases --agent_source ./agent.json --test-cases-dir ./tests --results-dir ./results \
2982
+ --test-case TC-001.md --test-case TC-002.md
2983
+ alita agent execute-test-cases --agent_source ./agent.json --test-cases-dir ./tests --results-dir ./results \
2984
+ --skip-data-generation --model gpt-4o
2985
+ """
2986
+ # Import dependencies at function start
2987
+ import sqlite3
2988
+ import uuid
2989
+ from langgraph.checkpoint.sqlite import SqliteSaver
2990
+
2991
+ config = ctx.obj['config']
2992
+ client = get_client(ctx)
2993
+
2994
+ # Setup verbose level
2995
+ show_verbose = verbose != 'quiet'
2996
+ debug_mode = verbose == 'debug'
2997
+
2998
+ # Sanity-check committed defaults (should exist; fail early with a clear message if not)
2999
+ if results_dir and not Path(results_dir).exists():
3000
+ raise click.ClickException(
3001
+ f"Results directory not found: {results_dir}. "
3002
+ f"If you are running outside the repo root, pass --results-dir explicitly."
3003
+ )
3004
+
3005
+ try:
3006
+ # Load test runner agent
3007
+ agent_def, agent_name = load_test_runner_agent(agent_source)
3008
+
3009
+ # Find and filter test case files
3010
+ test_cases_path = Path(test_cases_dir)
3011
+ test_case_files_list = discover_test_case_files(test_cases_dir, test_case_files)
3012
+
3013
+ # Validate that test cases were found
3014
+ if not validate_test_case_files(test_case_files_list, test_cases_dir, test_case_files):
3015
+ return
3016
+
3017
+ # Print execution header
3018
+ print_test_execution_header(agent_name, test_case_files_list, test_case_files, results_dir)
3019
+
3020
+ # Load data generator agent (if applicable)
3021
+ data_gen_def = load_data_generator_agent(data_generator, skip_data_generation)
3022
+
3023
+ # Load validator agent
3024
+ validator_def, validator_agent_name, validator_path = load_validator_agent(validator)
3025
+
3026
+ # Store bulk data generation chat history to pass to test executors
3027
+ bulk_gen_chat_history = []
3028
+
3029
+ # Parse all test cases upfront
3030
+ parsed_test_cases = []
3031
+ test_cases_needing_data_gen = []
3032
+
3033
+ # Create master log for entire test execution session
3034
+ results_path = Path(results_dir)
3035
+ session_name = f"test-execution-{test_cases_path.name}"
3036
+
3037
+ # Use the callbacks module console so tool-call panels are printed and captured.
3038
+ from .callbacks import console as callbacks_console
3039
+ with TestLogCapture(results_path, session_name, console=callbacks_console) as master_log:
3040
+ # Write header information to log
3041
+ master_log.print(f"\n[bold cyan]🧪 Test Execution Started[/bold cyan]")
3042
+ master_log.print(f"Agent: [bold]{agent_name}[/bold]")
3043
+ master_log.print(f"Test Cases: {len(test_case_files_list)}")
3044
+ if test_case_files:
3045
+ master_log.print(f"Selected: [cyan]{', '.join(test_case_files)}[/cyan]")
3046
+ master_log.print(f"Results Directory: {results_dir}\n")
3047
+
3048
+ if data_gen_def:
3049
+ data_gen_name = data_gen_def.get('name', Path(data_generator).stem if data_generator else 'Data Generator')
3050
+ master_log.print(f"Data Generator Agent: [bold]{data_gen_name}[/bold]\n")
3051
+
3052
+ if validator_def:
3053
+ master_log.print(f"Validator Agent: [bold]{validator_agent_name}[/bold]")
3054
+ master_log.print(f"[dim]Using: {validator_path}[/dim]\n")
3055
+ else:
3056
+ master_log.print(f"[dim]No validator agent specified, using test runner agent for validation[/dim]\n")
3057
+
3058
+ # Parse all test cases
3059
+ parsed_test_cases = parse_all_test_cases(test_case_files_list, master_log)
3060
+ test_cases_needing_data_gen = filter_test_cases_needing_data_gen(parsed_test_cases)
3061
+
3062
+ # Bulk test data generation (if enabled)
3063
+ if data_gen_def and not skip_data_generation and test_cases_needing_data_gen:
3064
+ bulk_gen_chat_history = execute_bulk_data_generation(
3065
+ data_gen_def, test_cases_needing_data_gen, parsed_test_cases,
3066
+ test_cases_path, client, config, model, temperature, max_tokens,
3067
+ work_dir, master_log, _setup_local_agent_executor,
3068
+ verbose=show_verbose,
3069
+ debug=debug_mode,
3070
+ )
3071
+
3072
+ # Execute all test cases
3073
+ test_results = execute_all_test_cases(
3074
+ parsed_test_cases, bulk_gen_chat_history, test_cases_path,
3075
+ agent_def, validator_def, client, config, model, temperature,
3076
+ max_tokens, work_dir, master_log, _setup_local_agent_executor,
3077
+ verbose=show_verbose,
3078
+ debug=debug_mode,
3079
+ )
3080
+
3081
+ # End of master_log context - log file saved automatically
3082
+
3083
+ # Print test execution summary
3084
+ print_test_execution_summary(test_results, results_dir, session_name)
3085
+
3086
+ # Save structured JSON report
3087
+ log_file = None
3088
+ toolkit_name = session_name.replace('test-execution-', '')
3089
+ toolkit_dir = results_path / toolkit_name
3090
+ log_files = sorted(toolkit_dir.glob(f"*{session_name}.txt")) if toolkit_dir.exists() else []
3091
+ if log_files:
3092
+ log_file = log_files[0]
3093
+
3094
+ save_structured_report(test_results, results_dir, log_file)
3095
+
3096
+ # Exit with error code if any tests failed
3097
+ failed_tests = sum(1 for r in test_results if not r['passed'])
3098
+ if failed_tests > 0:
3099
+ sys.exit(1)
3100
+
3101
+ except click.ClickException:
3102
+ raise
3103
+ except Exception as e:
3104
+ logger.exception("Failed to execute test cases")
3105
+ error_panel = Panel(
3106
+ str(e),
3107
+ title="Error",
3108
+ border_style="red",
3109
+ box=box.ROUNDED
3110
+ )
3111
+ console.print(error_panel, style="red")
3112
+ raise click.Abort()
3113
+