alita-sdk 0.3.365__py3-none-any.whl → 0.3.462__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (118) hide show
  1. alita_sdk/cli/__init__.py +10 -0
  2. alita_sdk/cli/__main__.py +17 -0
  3. alita_sdk/cli/agent_executor.py +144 -0
  4. alita_sdk/cli/agent_loader.py +197 -0
  5. alita_sdk/cli/agent_ui.py +166 -0
  6. alita_sdk/cli/agents.py +1069 -0
  7. alita_sdk/cli/callbacks.py +576 -0
  8. alita_sdk/cli/cli.py +159 -0
  9. alita_sdk/cli/config.py +153 -0
  10. alita_sdk/cli/formatting.py +182 -0
  11. alita_sdk/cli/mcp_loader.py +315 -0
  12. alita_sdk/cli/toolkit.py +330 -0
  13. alita_sdk/cli/toolkit_loader.py +55 -0
  14. alita_sdk/cli/tools/__init__.py +9 -0
  15. alita_sdk/cli/tools/filesystem.py +905 -0
  16. alita_sdk/configurations/bitbucket.py +95 -0
  17. alita_sdk/configurations/confluence.py +96 -1
  18. alita_sdk/configurations/gitlab.py +79 -0
  19. alita_sdk/configurations/jira.py +103 -0
  20. alita_sdk/configurations/testrail.py +88 -0
  21. alita_sdk/configurations/xray.py +93 -0
  22. alita_sdk/configurations/zephyr_enterprise.py +93 -0
  23. alita_sdk/configurations/zephyr_essential.py +75 -0
  24. alita_sdk/runtime/clients/artifact.py +1 -1
  25. alita_sdk/runtime/clients/client.py +47 -10
  26. alita_sdk/runtime/clients/mcp_discovery.py +342 -0
  27. alita_sdk/runtime/clients/mcp_manager.py +262 -0
  28. alita_sdk/runtime/clients/sandbox_client.py +373 -0
  29. alita_sdk/runtime/langchain/assistant.py +70 -41
  30. alita_sdk/runtime/langchain/constants.py +6 -1
  31. alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
  32. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
  33. alita_sdk/runtime/langchain/document_loaders/constants.py +73 -100
  34. alita_sdk/runtime/langchain/langraph_agent.py +164 -38
  35. alita_sdk/runtime/langchain/utils.py +43 -7
  36. alita_sdk/runtime/models/mcp_models.py +61 -0
  37. alita_sdk/runtime/toolkits/__init__.py +24 -0
  38. alita_sdk/runtime/toolkits/application.py +8 -1
  39. alita_sdk/runtime/toolkits/artifact.py +5 -6
  40. alita_sdk/runtime/toolkits/mcp.py +895 -0
  41. alita_sdk/runtime/toolkits/tools.py +140 -50
  42. alita_sdk/runtime/tools/__init__.py +7 -2
  43. alita_sdk/runtime/tools/application.py +7 -0
  44. alita_sdk/runtime/tools/function.py +94 -5
  45. alita_sdk/runtime/tools/graph.py +10 -4
  46. alita_sdk/runtime/tools/image_generation.py +104 -8
  47. alita_sdk/runtime/tools/llm.py +204 -114
  48. alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
  49. alita_sdk/runtime/tools/mcp_remote_tool.py +166 -0
  50. alita_sdk/runtime/tools/mcp_server_tool.py +3 -1
  51. alita_sdk/runtime/tools/sandbox.py +180 -79
  52. alita_sdk/runtime/tools/vectorstore.py +22 -21
  53. alita_sdk/runtime/tools/vectorstore_base.py +79 -26
  54. alita_sdk/runtime/utils/mcp_oauth.py +164 -0
  55. alita_sdk/runtime/utils/mcp_sse_client.py +405 -0
  56. alita_sdk/runtime/utils/streamlit.py +34 -3
  57. alita_sdk/runtime/utils/toolkit_utils.py +14 -4
  58. alita_sdk/runtime/utils/utils.py +1 -0
  59. alita_sdk/tools/__init__.py +48 -31
  60. alita_sdk/tools/ado/repos/__init__.py +1 -0
  61. alita_sdk/tools/ado/test_plan/__init__.py +1 -1
  62. alita_sdk/tools/ado/wiki/__init__.py +1 -5
  63. alita_sdk/tools/ado/work_item/__init__.py +1 -5
  64. alita_sdk/tools/ado/work_item/ado_wrapper.py +17 -8
  65. alita_sdk/tools/base_indexer_toolkit.py +194 -112
  66. alita_sdk/tools/bitbucket/__init__.py +1 -0
  67. alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
  68. alita_sdk/tools/code/sonar/__init__.py +1 -1
  69. alita_sdk/tools/code_indexer_toolkit.py +15 -5
  70. alita_sdk/tools/confluence/__init__.py +2 -2
  71. alita_sdk/tools/confluence/api_wrapper.py +110 -63
  72. alita_sdk/tools/confluence/loader.py +10 -0
  73. alita_sdk/tools/elitea_base.py +22 -22
  74. alita_sdk/tools/github/__init__.py +2 -2
  75. alita_sdk/tools/gitlab/__init__.py +2 -1
  76. alita_sdk/tools/gitlab/api_wrapper.py +11 -7
  77. alita_sdk/tools/gitlab_org/__init__.py +1 -2
  78. alita_sdk/tools/google_places/__init__.py +2 -1
  79. alita_sdk/tools/jira/__init__.py +1 -0
  80. alita_sdk/tools/jira/api_wrapper.py +1 -1
  81. alita_sdk/tools/memory/__init__.py +1 -1
  82. alita_sdk/tools/non_code_indexer_toolkit.py +2 -2
  83. alita_sdk/tools/openapi/__init__.py +10 -1
  84. alita_sdk/tools/pandas/__init__.py +1 -1
  85. alita_sdk/tools/postman/__init__.py +2 -1
  86. alita_sdk/tools/postman/api_wrapper.py +18 -8
  87. alita_sdk/tools/postman/postman_analysis.py +8 -1
  88. alita_sdk/tools/pptx/__init__.py +2 -2
  89. alita_sdk/tools/qtest/__init__.py +3 -3
  90. alita_sdk/tools/qtest/api_wrapper.py +1708 -76
  91. alita_sdk/tools/rally/__init__.py +1 -2
  92. alita_sdk/tools/report_portal/__init__.py +1 -0
  93. alita_sdk/tools/salesforce/__init__.py +1 -0
  94. alita_sdk/tools/servicenow/__init__.py +2 -3
  95. alita_sdk/tools/sharepoint/__init__.py +1 -0
  96. alita_sdk/tools/sharepoint/api_wrapper.py +125 -34
  97. alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
  98. alita_sdk/tools/sharepoint/utils.py +8 -2
  99. alita_sdk/tools/slack/__init__.py +1 -0
  100. alita_sdk/tools/sql/__init__.py +2 -1
  101. alita_sdk/tools/sql/api_wrapper.py +71 -23
  102. alita_sdk/tools/testio/__init__.py +1 -0
  103. alita_sdk/tools/testrail/__init__.py +1 -3
  104. alita_sdk/tools/utils/__init__.py +17 -0
  105. alita_sdk/tools/utils/content_parser.py +35 -24
  106. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +67 -21
  107. alita_sdk/tools/xray/__init__.py +2 -1
  108. alita_sdk/tools/zephyr/__init__.py +2 -1
  109. alita_sdk/tools/zephyr_enterprise/__init__.py +1 -0
  110. alita_sdk/tools/zephyr_essential/__init__.py +1 -0
  111. alita_sdk/tools/zephyr_scale/__init__.py +1 -0
  112. alita_sdk/tools/zephyr_squad/__init__.py +1 -0
  113. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/METADATA +8 -2
  114. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/RECORD +118 -93
  115. alita_sdk-0.3.462.dist-info/entry_points.txt +2 -0
  116. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/WHEEL +0 -0
  117. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/licenses/LICENSE +0 -0
  118. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,10 @@
1
+ """
2
+ Alita SDK CLI - Command-line interface for testing agents and toolkits.
3
+
4
+ This module provides a CLI alternative to the Streamlit interface, enabling
5
+ direct terminal access for GitHub Copilot integration and automation workflows.
6
+ """
7
+
8
+ from .cli import cli
9
+
10
+ __all__ = ['cli']
@@ -0,0 +1,17 @@
1
+ """
2
+ Entry point for running the Alita CLI as a module.
3
+
4
+ Usage:
5
+ python -m alita_sdk.cli [command] [options]
6
+ """
7
+
8
+ # Suppress warnings before any imports
9
+ import warnings
10
+ warnings.filterwarnings('ignore', category=DeprecationWarning)
11
+ warnings.filterwarnings('ignore', category=UserWarning)
12
+ warnings.filterwarnings('ignore', message='Unverified HTTPS request')
13
+
14
+ from .cli import cli
15
+
16
+ if __name__ == '__main__':
17
+ cli()
@@ -0,0 +1,144 @@
1
+ """
2
+ Agent executor creation and management.
3
+
4
+ Creates LLM instances and agent executors with support for MCP tools.
5
+ """
6
+
7
+ from typing import Optional, Dict, Any, List, Tuple
8
+ from rich.console import Console
9
+
10
+ from .agent_loader import build_agent_data_structure
11
+ from alita_sdk.runtime.langchain.assistant import Assistant
12
+
13
+ console = Console()
14
+
15
+
16
+ def create_llm_instance(client, model: Optional[str], agent_def: Dict[str, Any],
17
+ temperature: Optional[float], max_tokens: Optional[int]):
18
+ """Create LLM instance with appropriate configuration."""
19
+ llm_model = model or agent_def.get('model', 'gpt-4o')
20
+ llm_temperature = temperature if temperature is not None else agent_def.get('temperature', 0.7)
21
+ llm_max_tokens = max_tokens or agent_def.get('max_tokens', 2000)
22
+
23
+ try:
24
+ llm = client.get_llm(
25
+ model_name=llm_model,
26
+ model_config={
27
+ 'temperature': llm_temperature,
28
+ 'max_tokens': llm_max_tokens
29
+ }
30
+ )
31
+ return llm, llm_model, llm_temperature, llm_max_tokens
32
+ except Exception as e:
33
+ console.print(f"\n✗ [red]Failed to create LLM instance:[/red] {e}")
34
+ console.print("[yellow]Hint: Make sure OPENAI_API_KEY or other LLM credentials are set[/yellow]")
35
+ raise
36
+
37
+
38
+ def _create_assistant(client, agent_data: Dict[str, Any], llm, memory, tools: List) -> Assistant:
39
+ """Create Assistant instance with given configuration.
40
+
41
+ Args:
42
+ client: Alita client instance
43
+ agent_data: Agent configuration data
44
+ llm: LLM instance
45
+ memory: Memory/checkpoint instance
46
+ tools: List of tools to add to agent
47
+
48
+ Returns:
49
+ Assistant instance
50
+ """
51
+ return Assistant(
52
+ alita=client,
53
+ data=agent_data,
54
+ client=llm,
55
+ chat_history=[],
56
+ app_type=agent_data.get('agent_type', 'react'),
57
+ tools=tools,
58
+ memory=memory,
59
+ store=None,
60
+ debug_mode=False,
61
+ mcp_tokens=None
62
+ )
63
+
64
+
65
+ def create_agent_executor(client, agent_def: Dict[str, Any], toolkit_configs: List[Dict[str, Any]],
66
+ llm, llm_model: str, llm_temperature: float, llm_max_tokens: int, memory,
67
+ filesystem_tools: Optional[List] = None, mcp_tools: Optional[List] = None):
68
+ """Create agent executor for local agents with tools (sync version).
69
+
70
+ Note: mcp_tools parameter is deprecated - use create_agent_executor_with_mcp for MCP support.
71
+ """
72
+ agent_data = build_agent_data_structure(
73
+ agent_def=agent_def,
74
+ toolkit_configs=toolkit_configs,
75
+ llm_model=llm_model,
76
+ llm_temperature=llm_temperature,
77
+ llm_max_tokens=llm_max_tokens
78
+ )
79
+
80
+ # Combine all tools
81
+ additional_tools = []
82
+ if filesystem_tools:
83
+ additional_tools.extend(filesystem_tools)
84
+ if mcp_tools:
85
+ additional_tools.extend(mcp_tools)
86
+
87
+ assistant = _create_assistant(client, agent_data, llm, memory, additional_tools)
88
+ return assistant.runnable()
89
+
90
+
91
+ async def create_agent_executor_with_mcp(
92
+ client,
93
+ agent_def: Dict[str, Any],
94
+ toolkit_configs: List[Dict[str, Any]],
95
+ llm,
96
+ llm_model: str,
97
+ llm_temperature: float,
98
+ llm_max_tokens: int,
99
+ memory,
100
+ filesystem_tools: Optional[List] = None
101
+ ) -> Tuple[Any, Optional[Any]]:
102
+ """Create agent executor with MCP tools using persistent sessions.
103
+
104
+ Returns:
105
+ Tuple of (agent_executor, mcp_session_manager) where session_manager must be kept alive
106
+ to maintain stateful MCP server state (e.g., Playwright browser sessions).
107
+
108
+ See: https://github.com/langchain-ai/langchain-mcp-adapters/issues/178
109
+ """
110
+ from .mcp_loader import load_mcp_tools_async
111
+
112
+ # Separate MCP toolkit configs from regular configs
113
+ mcp_configs = [tc for tc in toolkit_configs if tc.get('toolkit_type') == 'mcp']
114
+ regular_configs = [tc for tc in toolkit_configs if tc.get('toolkit_type') != 'mcp']
115
+
116
+ # Load MCP tools with persistent sessions
117
+ mcp_session_manager = None
118
+ mcp_tools = []
119
+ if mcp_configs:
120
+ console.print("\n[cyan]Loading MCP tools with persistent sessions...[/cyan]")
121
+ mcp_session_manager, mcp_tools = await load_mcp_tools_async(mcp_configs)
122
+ if mcp_tools:
123
+ console.print(f"[green]✓ Loaded {len(mcp_tools)} MCP tools with persistent sessions[/green]\n")
124
+
125
+ # Build agent data structure
126
+ agent_data = build_agent_data_structure(
127
+ agent_def=agent_def,
128
+ toolkit_configs=regular_configs,
129
+ llm_model=llm_model,
130
+ llm_temperature=llm_temperature,
131
+ llm_max_tokens=llm_max_tokens
132
+ )
133
+
134
+ # Combine all tools
135
+ additional_tools = []
136
+ if filesystem_tools:
137
+ additional_tools.extend(filesystem_tools)
138
+ if mcp_tools:
139
+ additional_tools.extend(mcp_tools)
140
+
141
+ assistant = _create_assistant(client, agent_data, llm, memory, additional_tools)
142
+
143
+ # Return agent and session manager (must be kept alive for stateful MCP tools)
144
+ return assistant.runnable(), mcp_session_manager
@@ -0,0 +1,197 @@
1
+ """
2
+ Agent loading and definition management.
3
+
4
+ Handles loading agent definitions from various file formats (YAML, JSON, Markdown).
5
+ """
6
+
7
+ import json
8
+ import yaml
9
+ from pathlib import Path
10
+ from typing import Dict, Any
11
+
12
+ from .config import substitute_env_vars
13
+
14
+
15
+ def load_agent_definition(file_path: str) -> Dict[str, Any]:
16
+ """
17
+ Load agent definition from file.
18
+
19
+ Supports:
20
+ - YAML files (.yaml, .yml)
21
+ - JSON files (.json)
22
+ - Markdown files with YAML frontmatter (.md)
23
+
24
+ Args:
25
+ file_path: Path to agent definition file
26
+
27
+ Returns:
28
+ Dictionary with agent configuration
29
+ """
30
+ path = Path(file_path)
31
+
32
+ if not path.exists():
33
+ raise FileNotFoundError(f"Agent definition not found: {file_path}")
34
+
35
+ content = path.read_text()
36
+
37
+ # Handle markdown with YAML frontmatter
38
+ if path.suffix == '.md':
39
+ if content.startswith('---'):
40
+ parts = content.split('---', 2)
41
+ if len(parts) >= 3:
42
+ frontmatter = yaml.safe_load(parts[1])
43
+ system_prompt = parts[2].strip()
44
+
45
+ # Apply environment variable substitution
46
+ system_prompt = substitute_env_vars(system_prompt)
47
+
48
+ return {
49
+ 'name': frontmatter.get('name', path.stem),
50
+ 'description': frontmatter.get('description', ''),
51
+ 'system_prompt': system_prompt,
52
+ 'model': frontmatter.get('model'),
53
+ 'tools': frontmatter.get('tools', []),
54
+ 'temperature': frontmatter.get('temperature'),
55
+ 'max_tokens': frontmatter.get('max_tokens'),
56
+ 'toolkit_configs': frontmatter.get('toolkit_configs', []),
57
+ 'filesystem_tools_preset': frontmatter.get('filesystem_tools_preset'),
58
+ 'filesystem_tools_include': frontmatter.get('filesystem_tools_include'),
59
+ 'filesystem_tools_exclude': frontmatter.get('filesystem_tools_exclude'),
60
+ 'mcps': frontmatter.get('mcps', [])
61
+ }
62
+
63
+ # Plain markdown - use content as system prompt
64
+ return {
65
+ 'name': path.stem,
66
+ 'system_prompt': substitute_env_vars(content),
67
+ }
68
+
69
+ # Handle YAML
70
+ if path.suffix in ['.yaml', '.yml']:
71
+ content = substitute_env_vars(content)
72
+ config = yaml.safe_load(content)
73
+ if 'system_prompt' in config:
74
+ config['system_prompt'] = substitute_env_vars(config['system_prompt'])
75
+ return config
76
+
77
+ # Handle JSON
78
+ if path.suffix == '.json':
79
+ content = substitute_env_vars(content)
80
+ config = json.loads(content)
81
+ if 'system_prompt' in config:
82
+ config['system_prompt'] = substitute_env_vars(config['system_prompt'])
83
+ return config
84
+
85
+ raise ValueError(f"Unsupported file format: {path.suffix}")
86
+
87
+
88
+ def build_agent_data_structure(agent_def: Dict[str, Any], toolkit_configs: list,
89
+ llm_model: str, llm_temperature: float, llm_max_tokens: int) -> Dict[str, Any]:
90
+ """
91
+ Convert a local agent definition to the data structure expected by the Assistant class.
92
+
93
+ This utility function bridges between simple agent definition formats (e.g., from markdown files)
94
+ and the structured format that the Assistant class requires internally.
95
+
96
+ Args:
97
+ agent_def: The agent definition loaded from a local file (markdown, YAML, or JSON)
98
+ toolkit_configs: List of toolkit configurations to be used by the agent
99
+ llm_model: The LLM model name (e.g., 'gpt-4o')
100
+ llm_temperature: Temperature setting for the model
101
+ llm_max_tokens: Maximum tokens for model responses
102
+
103
+ Returns:
104
+ A dictionary in the format expected by the Assistant constructor with keys:
105
+ - instructions: System prompt for the agent
106
+ - tools: List of tool/toolkit configurations
107
+ - variables: Agent variables (empty for local agents)
108
+ - meta: Metadata including step_limit and internal_tools
109
+ - llm_settings: Complete LLM configuration
110
+ - agent_type: Type of agent (react, openai, etc.)
111
+ """
112
+ # Import toolkit registry to validate configs
113
+ from alita_sdk.tools import AVAILABLE_TOOLS
114
+
115
+ # Build the tools list from agent definition and toolkit configs
116
+ tools = []
117
+ processed_toolkit_names = set()
118
+
119
+ # Validate and process toolkit configs through their Pydantic schemas
120
+ validated_toolkit_configs = []
121
+ for toolkit_config in toolkit_configs:
122
+ toolkit_type = toolkit_config.get('type')
123
+ if toolkit_type and toolkit_type in AVAILABLE_TOOLS:
124
+ try:
125
+ toolkit_info = AVAILABLE_TOOLS[toolkit_type]
126
+ if 'toolkit_class' in toolkit_info:
127
+ toolkit_class = toolkit_info['toolkit_class']
128
+ if hasattr(toolkit_class, 'toolkit_config_schema'):
129
+ schema = toolkit_class.toolkit_config_schema()
130
+ validated_config = schema(**toolkit_config)
131
+ validated_dict = validated_config.model_dump()
132
+ validated_dict['type'] = toolkit_config.get('type')
133
+ validated_dict['toolkit_name'] = toolkit_config.get('toolkit_name')
134
+ validated_toolkit_configs.append(validated_dict)
135
+ else:
136
+ validated_toolkit_configs.append(toolkit_config)
137
+ else:
138
+ validated_toolkit_configs.append(toolkit_config)
139
+ except Exception:
140
+ validated_toolkit_configs.append(toolkit_config)
141
+ else:
142
+ validated_toolkit_configs.append(toolkit_config)
143
+
144
+ # Add tools from agent definition
145
+ for tool_name in agent_def.get('tools', []):
146
+ toolkit_config = next((tk for tk in validated_toolkit_configs if tk.get('toolkit_name') == tool_name), None)
147
+ if toolkit_config:
148
+ tools.append({
149
+ 'type': toolkit_config.get('type'),
150
+ 'toolkit_name': toolkit_config.get('toolkit_name'),
151
+ 'settings': toolkit_config,
152
+ 'selected_tools': toolkit_config.get('selected_tools', [])
153
+ })
154
+ processed_toolkit_names.add(tool_name)
155
+ else:
156
+ tools.append({
157
+ 'type': tool_name,
158
+ 'name': tool_name
159
+ })
160
+
161
+ # Add toolkit_configs that weren't already referenced
162
+ for toolkit_config in validated_toolkit_configs:
163
+ toolkit_name = toolkit_config.get('toolkit_name')
164
+ if toolkit_name and toolkit_name not in processed_toolkit_names:
165
+ tools.append({
166
+ 'type': toolkit_config.get('type'),
167
+ 'toolkit_name': toolkit_name,
168
+ 'settings': toolkit_config,
169
+ 'selected_tools': toolkit_config.get('selected_tools', [])
170
+ })
171
+
172
+ return {
173
+ 'instructions': agent_def.get('system_prompt', ''),
174
+ 'tools': tools,
175
+ 'variables': [],
176
+ 'meta': {
177
+ 'step_limit': agent_def.get('step_limit', 25),
178
+ 'internal_tools': agent_def.get('internal_tools', [])
179
+ },
180
+ 'llm_settings': {
181
+ 'model_name': llm_model,
182
+ 'max_tokens': llm_max_tokens,
183
+ 'temperature': llm_temperature,
184
+ 'top_p': 1.0,
185
+ 'top_k': 0,
186
+ 'integration_uid': None,
187
+ 'indexer_config': {
188
+ 'ai_model': 'langchain_openai.ChatOpenAI',
189
+ 'ai_model_params': {
190
+ 'model': llm_model,
191
+ 'temperature': llm_temperature,
192
+ 'max_tokens': llm_max_tokens
193
+ }
194
+ }
195
+ },
196
+ 'agent_type': agent_def.get('agent_type', 'react')
197
+ }
@@ -0,0 +1,166 @@
1
+ """
2
+ Agent UI and display utilities.
3
+
4
+ Rich console formatting for agent interactions.
5
+ """
6
+
7
+ from rich.console import Console, Group
8
+ from rich.panel import Panel
9
+ from rich.table import Table
10
+ from rich.markdown import Markdown
11
+ from rich import box
12
+ from rich.text import Text
13
+ from rich.align import Align
14
+ from rich.columns import Columns
15
+ from rich.rule import Rule
16
+
17
+ console = Console()
18
+
19
+ # ALITA ASCII Art Logo - block letters
20
+ ALITA_LOGO = [
21
+ " █████╗ ██╗ ██╗████████╗ █████╗ ",
22
+ "██╔══██╗██║ ██║╚══██╔══╝██╔══██╗",
23
+ "███████║██║ ██║ ██║ ███████║",
24
+ "██╔══██║██║ ██║ ██║ ██╔══██║",
25
+ "██║ ██║███████╗██║ ██║ ██║ ██║",
26
+ "╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝",
27
+ ]
28
+
29
+
30
+ def get_version():
31
+ """Get CLI version from package."""
32
+ try:
33
+ from importlib.metadata import version
34
+ return version('alita_sdk')
35
+ except Exception:
36
+ return "0.3.x"
37
+
38
+
39
+ def print_banner(agent_name: str, agent_type: str = "Local Agent"):
40
+ """Print a cool banner for the chat session with ASCII art."""
41
+
42
+ version = get_version()
43
+
44
+ # Create the main banner content
45
+ banner_text = Text()
46
+
47
+ # Add logo lines with cyan styling
48
+ for line in ALITA_LOGO:
49
+ banner_text.append(" " + line + "\n", style="bold cyan")
50
+
51
+ banner_text.append("\n")
52
+ banner_text.append(" CLI ", style="dim")
53
+ banner_text.append(f"v{version}\n\n", style="bold white")
54
+
55
+ # Agent info
56
+ banner_text.append(" ● ", style="bold green")
57
+ banner_text.append("Agent: ", style="bold white")
58
+ banner_text.append(f"{agent_name}\n", style="bold cyan")
59
+ banner_text.append(" ● ", style="bold green")
60
+ banner_text.append("Type: ", style="bold white")
61
+ banner_text.append(f"{agent_type}\n", style="cyan")
62
+
63
+ console.print()
64
+ console.print(Panel(
65
+ banner_text,
66
+ box=box.DOUBLE,
67
+ border_style="cyan",
68
+ padding=(0, 2),
69
+ ))
70
+
71
+
72
+ def print_help():
73
+ """Print help message with commands using rich table."""
74
+ table = Table(
75
+ show_header=True,
76
+ header_style="bold cyan",
77
+ border_style="dim",
78
+ box=box.SIMPLE,
79
+ padding=(0, 1),
80
+ )
81
+
82
+ table.add_column("Command", style="bold yellow", no_wrap=True, width=12)
83
+ table.add_column("Description", style="white")
84
+
85
+ table.add_row("/clear", "Clear conversation history")
86
+ table.add_row("/history", "Show conversation history")
87
+ table.add_row("/save", "Save conversation to file")
88
+ table.add_row("/help", "Show this help")
89
+ table.add_row("exit", "End conversation")
90
+
91
+ console.print(table)
92
+ console.print()
93
+
94
+
95
+ def print_welcome(agent_name: str, agent_type: str = "Local Agent"):
96
+ """Print combined welcome banner with logo, agent info, and help."""
97
+
98
+ version = get_version()
99
+
100
+ # Build the complete welcome message using Text objects
101
+ content = Text()
102
+
103
+ # Add logo lines with cyan styling
104
+ for line in ALITA_LOGO:
105
+ content.append(" " + line + "\n", style="bold cyan")
106
+
107
+ content.append(" CLI ", style="dim")
108
+ content.append(f"v{version}\n\n", style="bold white")
109
+
110
+ # Connection status
111
+ content.append(" ● ", style="bold green")
112
+ content.append("Agent: ", style="bold white")
113
+ content.append(f"{agent_name}\n", style="bold cyan")
114
+ content.append(" ● ", style="bold green")
115
+ content.append("Type: ", style="bold white")
116
+ content.append(f"{agent_type}\n\n", style="cyan")
117
+
118
+ # Quick help section
119
+ content.append(" Type a message to chat, or use these commands:\n\n", style="dim")
120
+
121
+ # Commands
122
+ content.append(" /help", style="bold yellow")
123
+ content.append(" Show all commands\n", style="white")
124
+ content.append(" /clear", style="bold yellow")
125
+ content.append(" Clear history\n", style="white")
126
+ content.append(" exit", style="bold yellow")
127
+ content.append(" End conversation\n", style="white")
128
+
129
+ console.print()
130
+ console.print(Panel(
131
+ content,
132
+ box=box.DOUBLE,
133
+ border_style="cyan",
134
+ padding=(0, 2),
135
+ ))
136
+ console.print()
137
+
138
+
139
+ def display_output(agent_name: str, message: str, output: str):
140
+ """Display agent output with markdown rendering if applicable."""
141
+ console.print(f"\n[bold cyan]🤖 Agent: {agent_name}[/bold cyan]\n")
142
+ console.print(f"[bold]Message:[/bold] {message}\n")
143
+ console.print("[bold]Response:[/bold]")
144
+ if any(marker in output for marker in ['```', '**', '##', '- ', '* ']):
145
+ console.print(Markdown(output))
146
+ else:
147
+ console.print(output)
148
+ console.print()
149
+
150
+
151
+ def extract_output_from_result(result) -> str:
152
+ """Extract output string from agent result (handles multiple formats)."""
153
+ if isinstance(result, dict):
154
+ # Try different keys that might contain the response
155
+ output = result.get('output')
156
+ if output is None and 'messages' in result:
157
+ # LangGraph format - get last message
158
+ messages = result['messages']
159
+ if messages and len(messages) > 0:
160
+ last_msg = messages[-1]
161
+ output = last_msg.content if hasattr(last_msg, 'content') else str(last_msg)
162
+ if output is None:
163
+ output = str(result)
164
+ else:
165
+ output = str(result)
166
+ return output