alita-sdk 0.3.449__py3-none-any.whl → 0.3.465__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (74) hide show
  1. alita_sdk/cli/__init__.py +10 -0
  2. alita_sdk/cli/__main__.py +17 -0
  3. alita_sdk/cli/agent/__init__.py +0 -0
  4. alita_sdk/cli/agent/default.py +176 -0
  5. alita_sdk/cli/agent_executor.py +155 -0
  6. alita_sdk/cli/agent_loader.py +197 -0
  7. alita_sdk/cli/agent_ui.py +218 -0
  8. alita_sdk/cli/agents.py +1911 -0
  9. alita_sdk/cli/callbacks.py +576 -0
  10. alita_sdk/cli/cli.py +159 -0
  11. alita_sdk/cli/config.py +164 -0
  12. alita_sdk/cli/formatting.py +182 -0
  13. alita_sdk/cli/input_handler.py +256 -0
  14. alita_sdk/cli/mcp_loader.py +315 -0
  15. alita_sdk/cli/toolkit.py +330 -0
  16. alita_sdk/cli/toolkit_loader.py +55 -0
  17. alita_sdk/cli/tools/__init__.py +36 -0
  18. alita_sdk/cli/tools/approval.py +224 -0
  19. alita_sdk/cli/tools/filesystem.py +905 -0
  20. alita_sdk/cli/tools/planning.py +403 -0
  21. alita_sdk/cli/tools/terminal.py +280 -0
  22. alita_sdk/runtime/clients/client.py +16 -1
  23. alita_sdk/runtime/langchain/constants.py +2 -1
  24. alita_sdk/runtime/langchain/langraph_agent.py +74 -20
  25. alita_sdk/runtime/langchain/utils.py +20 -4
  26. alita_sdk/runtime/toolkits/artifact.py +5 -6
  27. alita_sdk/runtime/toolkits/mcp.py +5 -2
  28. alita_sdk/runtime/toolkits/tools.py +1 -0
  29. alita_sdk/runtime/tools/function.py +19 -6
  30. alita_sdk/runtime/tools/llm.py +65 -7
  31. alita_sdk/runtime/tools/vectorstore_base.py +17 -2
  32. alita_sdk/runtime/utils/mcp_sse_client.py +64 -6
  33. alita_sdk/tools/ado/repos/__init__.py +1 -0
  34. alita_sdk/tools/ado/test_plan/__init__.py +1 -1
  35. alita_sdk/tools/ado/wiki/__init__.py +1 -5
  36. alita_sdk/tools/ado/work_item/__init__.py +1 -5
  37. alita_sdk/tools/base_indexer_toolkit.py +64 -8
  38. alita_sdk/tools/bitbucket/__init__.py +1 -0
  39. alita_sdk/tools/code/sonar/__init__.py +1 -1
  40. alita_sdk/tools/confluence/__init__.py +2 -2
  41. alita_sdk/tools/github/__init__.py +2 -2
  42. alita_sdk/tools/gitlab/__init__.py +2 -1
  43. alita_sdk/tools/gitlab_org/__init__.py +1 -2
  44. alita_sdk/tools/google_places/__init__.py +2 -1
  45. alita_sdk/tools/jira/__init__.py +1 -0
  46. alita_sdk/tools/memory/__init__.py +1 -1
  47. alita_sdk/tools/pandas/__init__.py +1 -1
  48. alita_sdk/tools/postman/__init__.py +2 -1
  49. alita_sdk/tools/pptx/__init__.py +2 -2
  50. alita_sdk/tools/qtest/__init__.py +3 -3
  51. alita_sdk/tools/qtest/api_wrapper.py +1235 -51
  52. alita_sdk/tools/rally/__init__.py +1 -2
  53. alita_sdk/tools/report_portal/__init__.py +1 -0
  54. alita_sdk/tools/salesforce/__init__.py +1 -0
  55. alita_sdk/tools/servicenow/__init__.py +2 -3
  56. alita_sdk/tools/sharepoint/__init__.py +1 -0
  57. alita_sdk/tools/sharepoint/api_wrapper.py +22 -2
  58. alita_sdk/tools/sharepoint/authorization_helper.py +17 -1
  59. alita_sdk/tools/slack/__init__.py +1 -0
  60. alita_sdk/tools/sql/__init__.py +2 -1
  61. alita_sdk/tools/testio/__init__.py +1 -0
  62. alita_sdk/tools/testrail/__init__.py +1 -3
  63. alita_sdk/tools/xray/__init__.py +2 -1
  64. alita_sdk/tools/zephyr/__init__.py +2 -1
  65. alita_sdk/tools/zephyr_enterprise/__init__.py +1 -0
  66. alita_sdk/tools/zephyr_essential/__init__.py +1 -0
  67. alita_sdk/tools/zephyr_scale/__init__.py +1 -0
  68. alita_sdk/tools/zephyr_squad/__init__.py +1 -0
  69. {alita_sdk-0.3.449.dist-info → alita_sdk-0.3.465.dist-info}/METADATA +145 -2
  70. {alita_sdk-0.3.449.dist-info → alita_sdk-0.3.465.dist-info}/RECORD +74 -52
  71. alita_sdk-0.3.465.dist-info/entry_points.txt +2 -0
  72. {alita_sdk-0.3.449.dist-info → alita_sdk-0.3.465.dist-info}/WHEEL +0 -0
  73. {alita_sdk-0.3.449.dist-info → alita_sdk-0.3.465.dist-info}/licenses/LICENSE +0 -0
  74. {alita_sdk-0.3.449.dist-info → alita_sdk-0.3.465.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1911 @@
1
+ """
2
+ Agent commands for Alita CLI.
3
+
4
+ Provides commands to work with agents interactively or in handoff mode,
5
+ supporting both platform agents and local agent definition files.
6
+ """
7
+
8
+ import asyncio
9
+ import click
10
+ import json
11
+ import logging
12
+ import sqlite3
13
+ import sys
14
+ from typing import Optional, Dict, Any, List
15
+ from pathlib import Path
16
+ import yaml
17
+
18
+ from rich.console import Console
19
+ from rich.panel import Panel
20
+ from rich.table import Table
21
+ from rich.markdown import Markdown
22
+ from rich import box
23
+ from rich.text import Text
24
+ from rich.status import Status
25
+ from rich.live import Live
26
+
27
+ from .cli import get_client
28
+ # Import from refactored modules
29
+ from .agent_ui import print_welcome, print_help, display_output, extract_output_from_result
30
+ from .agent_loader import load_agent_definition
31
+ from .agent_executor import create_llm_instance, create_agent_executor, create_agent_executor_with_mcp
32
+ from .toolkit_loader import load_toolkit_config, load_toolkit_configs
33
+ from .callbacks import create_cli_callback, CLICallbackHandler
34
+ from .input_handler import get_input_handler, styled_input, styled_selection_input
35
+
36
+ logger = logging.getLogger(__name__)
37
+
38
+ # Create a rich console for beautiful output
39
+ console = Console()
40
+
41
+
42
+ def _get_alita_system_prompt(config) -> str:
43
+ """
44
+ Get the Alita system prompt from user config or fallback to default.
45
+
46
+ Checks for $ALITA_DIR/agents/default.agent.md first, then falls back
47
+ to the built-in DEFAULT_PROMPT.
48
+
49
+ Returns:
50
+ The system prompt string for Alita
51
+ """
52
+ from .agent.default import DEFAULT_PROMPT
53
+
54
+ # Check for user-customized prompt
55
+ custom_prompt_path = Path(config.agents_dir) / 'default.agent.md'
56
+
57
+ if custom_prompt_path.exists():
58
+ try:
59
+ content = custom_prompt_path.read_text(encoding='utf-8')
60
+ # Parse the agent.md file - extract system_prompt from frontmatter or use content
61
+ if content.startswith('---'):
62
+ # Has YAML frontmatter, try to parse
63
+ try:
64
+ parts = content.split('---', 2)
65
+ if len(parts) >= 3:
66
+ frontmatter = yaml.safe_load(parts[1])
67
+ body = parts[2].strip()
68
+ # Use system_prompt from frontmatter if present, otherwise use body
69
+ return frontmatter.get('system_prompt', body) if frontmatter else body
70
+ except Exception:
71
+ pass
72
+ # No frontmatter or parsing failed, use entire content as prompt
73
+ return content.strip()
74
+ except Exception as e:
75
+ logger.debug(f"Failed to load custom Alita prompt from {custom_prompt_path}: {e}")
76
+
77
+ return DEFAULT_PROMPT
78
+
79
+
80
+ def _load_mcp_tools(agent_def: Dict[str, Any], mcp_config_path: str) -> List[Dict[str, Any]]:
81
+ """Load MCP tools from agent definition with tool-level filtering.
82
+
83
+ Args:
84
+ agent_def: Agent definition dictionary containing mcps list
85
+ mcp_config_path: Path to mcp.json configuration file (workspace-level)
86
+
87
+ Returns:
88
+ List of toolkit configurations for MCP servers
89
+ """
90
+ from .mcp_loader import load_mcp_tools
91
+ return load_mcp_tools(agent_def, mcp_config_path)
92
+
93
+
94
+ def _setup_local_agent_executor(client, agent_def: Dict[str, Any], toolkit_config: tuple,
95
+ config, model: Optional[str], temperature: Optional[float],
96
+ max_tokens: Optional[int], memory, work_dir: Optional[str],
97
+ plan_state: Optional[Dict] = None):
98
+ """Setup local agent executor with all configurations.
99
+
100
+ Returns:
101
+ Tuple of (agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools)
102
+ """
103
+ # Load toolkit configs
104
+ toolkit_configs = load_toolkit_configs(agent_def, toolkit_config)
105
+
106
+ # Load MCP tools
107
+ mcp_toolkit_configs = _load_mcp_tools(agent_def, config.mcp_config_path)
108
+ toolkit_configs.extend(mcp_toolkit_configs)
109
+
110
+ # Create LLM instance
111
+ llm, llm_model, llm_temperature, llm_max_tokens = create_llm_instance(
112
+ client, model, agent_def, temperature, max_tokens
113
+ )
114
+
115
+ # Add filesystem tools if --dir is provided
116
+ filesystem_tools = None
117
+ terminal_tools = None
118
+ if work_dir:
119
+ from .tools import get_filesystem_tools, get_terminal_tools
120
+ preset = agent_def.get('filesystem_tools_preset')
121
+ include_tools = agent_def.get('filesystem_tools_include')
122
+ exclude_tools = agent_def.get('filesystem_tools_exclude')
123
+ filesystem_tools = get_filesystem_tools(work_dir, include_tools, exclude_tools, preset)
124
+
125
+ # Also add terminal tools when work_dir is set
126
+ terminal_tools = get_terminal_tools(work_dir)
127
+
128
+ tool_count = len(filesystem_tools) + len(terminal_tools)
129
+ access_msg = f"✓ Granted filesystem & terminal access to: {work_dir} ({tool_count} tools)"
130
+ if preset:
131
+ access_msg += f" [preset: {preset}]"
132
+ if include_tools:
133
+ access_msg += f" [include: {', '.join(include_tools)}]"
134
+ if exclude_tools:
135
+ access_msg += f" [exclude: {', '.join(exclude_tools)}]"
136
+ console.print(f"[dim]{access_msg}[/dim]")
137
+
138
+ # Add planning tools (always available)
139
+ planning_tools = None
140
+ plan_state_obj = None
141
+ if plan_state is not None:
142
+ from .tools import get_planning_tools, PlanState
143
+ # Create a plan callback to update the dict when plan changes
144
+ def plan_callback(state: PlanState):
145
+ plan_state['title'] = state.title
146
+ plan_state['steps'] = state.to_dict()['steps']
147
+ plan_state['session_id'] = state.session_id
148
+
149
+ # Get session_id from plan_state dict if provided
150
+ session_id = plan_state.get('session_id')
151
+ planning_tools, plan_state_obj = get_planning_tools(
152
+ plan_state=None,
153
+ plan_callback=plan_callback,
154
+ session_id=session_id
155
+ )
156
+ console.print(f"[dim]✓ Planning tools enabled ({len(planning_tools)} tools) [session: {plan_state_obj.session_id}][/dim]")
157
+
158
+ # Check if we have tools
159
+ has_tools = bool(agent_def.get('tools') or toolkit_configs or filesystem_tools or terminal_tools or planning_tools)
160
+ has_mcp = any(tc.get('toolkit_type') == 'mcp' for tc in toolkit_configs)
161
+
162
+ if not has_tools:
163
+ return None, None, llm, llm_model, filesystem_tools, terminal_tools, planning_tools
164
+
165
+ # Create agent executor with or without MCP
166
+ mcp_session_manager = None
167
+ if has_mcp:
168
+ # Create persistent event loop for MCP tools
169
+ from alita_sdk.runtime.tools.llm import LLMNode
170
+ if not hasattr(LLMNode, '_persistent_loop') or \
171
+ LLMNode._persistent_loop is None or \
172
+ LLMNode._persistent_loop.is_closed():
173
+ LLMNode._persistent_loop = asyncio.new_event_loop()
174
+ console.print("[dim]Created persistent event loop for MCP tools[/dim]")
175
+
176
+ # Load MCP tools using persistent loop
177
+ loop = LLMNode._persistent_loop
178
+ asyncio.set_event_loop(loop)
179
+ agent_executor, mcp_session_manager = loop.run_until_complete(
180
+ create_agent_executor_with_mcp(
181
+ client, agent_def, toolkit_configs,
182
+ llm, llm_model, llm_temperature, llm_max_tokens, memory,
183
+ filesystem_tools=filesystem_tools,
184
+ terminal_tools=terminal_tools,
185
+ planning_tools=planning_tools
186
+ )
187
+ )
188
+ else:
189
+ agent_executor = create_agent_executor(
190
+ client, agent_def, toolkit_configs,
191
+ llm, llm_model, llm_temperature, llm_max_tokens, memory,
192
+ filesystem_tools=filesystem_tools,
193
+ terminal_tools=terminal_tools,
194
+ planning_tools=planning_tools
195
+ )
196
+
197
+ return agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools
198
+
199
+
200
+ def _select_model_interactive(client) -> Optional[Dict[str, Any]]:
201
+ """
202
+ Show interactive menu to select a model from available models.
203
+
204
+ Returns:
205
+ Selected model info dict or None if cancelled
206
+ """
207
+ console.print("\n🔧 [bold cyan]Select a model:[/bold cyan]\n")
208
+
209
+ try:
210
+ # Use the new get_available_models API
211
+ models = client.get_available_models()
212
+ if not models:
213
+ console.print("[yellow]No models available from the platform.[/yellow]")
214
+ return None
215
+
216
+ # Build models list - API returns items[].name
217
+ models_list = []
218
+ for model in models:
219
+ model_name = model.get('name')
220
+ if model_name:
221
+ models_list.append({
222
+ 'name': model_name,
223
+ 'id': model.get('id'),
224
+ 'model_data': model
225
+ })
226
+
227
+ if not models_list:
228
+ console.print("[yellow]No models found.[/yellow]")
229
+ return None
230
+
231
+ # Display models with numbers
232
+ table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE)
233
+ table.add_column("#", style="dim", width=4)
234
+ table.add_column("Model", style="cyan")
235
+
236
+ for i, model in enumerate(models_list, 1):
237
+ table.add_row(str(i), model['name'])
238
+
239
+ console.print(table)
240
+ console.print(f"\n[dim]0. Cancel[/dim]")
241
+
242
+ # Get user selection using styled input
243
+ while True:
244
+ try:
245
+ choice = styled_selection_input("Select model number")
246
+
247
+ if choice == '0':
248
+ return None
249
+
250
+ idx = int(choice) - 1
251
+ if 0 <= idx < len(models_list):
252
+ selected = models_list[idx]
253
+ console.print(f"✓ [green]Selected:[/green] [bold]{selected['name']}[/bold]")
254
+ return selected
255
+ else:
256
+ console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(models_list)}[/yellow]")
257
+ except ValueError:
258
+ console.print("[yellow]Please enter a valid number[/yellow]")
259
+ except (KeyboardInterrupt, EOFError):
260
+ return None
261
+
262
+ except Exception as e:
263
+ console.print(f"[red]Error fetching models: {e}[/red]")
264
+ return None
265
+
266
+
267
+ def _select_mcp_interactive(config) -> Optional[Dict[str, Any]]:
268
+ """
269
+ Show interactive menu to select an MCP server from mcp.json.
270
+
271
+ Returns:
272
+ Selected MCP server config dict or None if cancelled
273
+ """
274
+ from .mcp_loader import load_mcp_config
275
+
276
+ console.print("\n🔌 [bold cyan]Select an MCP server to add:[/bold cyan]\n")
277
+
278
+ mcp_config = load_mcp_config(config.mcp_config_path)
279
+ mcp_servers = mcp_config.get('mcpServers', {})
280
+
281
+ if not mcp_servers:
282
+ console.print(f"[yellow]No MCP servers found in {config.mcp_config_path}[/yellow]")
283
+ return None
284
+
285
+ servers_list = list(mcp_servers.items())
286
+
287
+ # Display servers with numbers
288
+ table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE)
289
+ table.add_column("#", style="dim", width=4)
290
+ table.add_column("Server", style="cyan")
291
+ table.add_column("Type", style="dim")
292
+ table.add_column("Command/URL", style="dim")
293
+
294
+ for i, (name, server_config) in enumerate(servers_list, 1):
295
+ server_type = server_config.get('type', 'stdio')
296
+ cmd_or_url = server_config.get('url') or server_config.get('command', '')
297
+ table.add_row(str(i), name, server_type, cmd_or_url[:40])
298
+
299
+ console.print(table)
300
+ console.print(f"\n[dim]0. Cancel[/dim]")
301
+
302
+ # Get user selection using styled input
303
+ while True:
304
+ try:
305
+ choice = styled_selection_input("Select MCP server number")
306
+
307
+ if choice == '0':
308
+ return None
309
+
310
+ idx = int(choice) - 1
311
+ if 0 <= idx < len(servers_list):
312
+ name, server_config = servers_list[idx]
313
+ console.print(f"✓ [green]Selected:[/green] [bold]{name}[/bold]")
314
+ return {'name': name, 'config': server_config}
315
+ else:
316
+ console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(servers_list)}[/yellow]")
317
+ except ValueError:
318
+ console.print("[yellow]Please enter a valid number[/yellow]")
319
+ except (KeyboardInterrupt, EOFError):
320
+ return None
321
+
322
+
323
+ def _select_toolkit_interactive(config) -> Optional[Dict[str, Any]]:
324
+ """
325
+ Show interactive menu to select a toolkit from $ALITA_DIR/tools.
326
+
327
+ Returns:
328
+ Selected toolkit config dict or None if cancelled
329
+ """
330
+ console.print("\n🧰 [bold cyan]Select a toolkit to add:[/bold cyan]\n")
331
+
332
+ tools_dir = Path(config.tools_dir)
333
+
334
+ if not tools_dir.exists():
335
+ console.print(f"[yellow]Tools directory not found: {tools_dir}[/yellow]")
336
+ return None
337
+
338
+ # Find all toolkit config files
339
+ toolkit_files = []
340
+ for pattern in ['*.json', '*.yaml', '*.yml']:
341
+ toolkit_files.extend(tools_dir.glob(pattern))
342
+
343
+ if not toolkit_files:
344
+ console.print(f"[yellow]No toolkit configurations found in {tools_dir}[/yellow]")
345
+ return None
346
+
347
+ # Load toolkit info
348
+ toolkits_list = []
349
+ for file_path in toolkit_files:
350
+ try:
351
+ config_data = load_toolkit_config(str(file_path))
352
+ toolkits_list.append({
353
+ 'file': str(file_path),
354
+ 'name': config_data.get('toolkit_name') or config_data.get('name') or file_path.stem,
355
+ 'type': config_data.get('toolkit_type') or config_data.get('type', 'unknown'),
356
+ 'config': config_data
357
+ })
358
+ except Exception as e:
359
+ logger.debug(f"Failed to load toolkit config {file_path}: {e}")
360
+
361
+ if not toolkits_list:
362
+ console.print(f"[yellow]No valid toolkit configurations found in {tools_dir}[/yellow]")
363
+ return None
364
+
365
+ # Display toolkits with numbers
366
+ table = Table(show_header=True, header_style="bold cyan", box=box.SIMPLE)
367
+ table.add_column("#", style="dim", width=4)
368
+ table.add_column("Toolkit", style="cyan")
369
+ table.add_column("Type", style="dim")
370
+ table.add_column("File", style="dim")
371
+
372
+ for i, toolkit in enumerate(toolkits_list, 1):
373
+ table.add_row(str(i), toolkit['name'], toolkit['type'], Path(toolkit['file']).name)
374
+
375
+ console.print(table)
376
+ console.print(f"\n[dim]0. Cancel[/dim]")
377
+
378
+ # Get user selection using styled input
379
+ while True:
380
+ try:
381
+ choice = styled_selection_input("Select toolkit number")
382
+
383
+ if choice == '0':
384
+ return None
385
+
386
+ idx = int(choice) - 1
387
+ if 0 <= idx < len(toolkits_list):
388
+ selected = toolkits_list[idx]
389
+ console.print(f"✓ [green]Selected:[/green] [bold]{selected['name']}[/bold]")
390
+ return selected
391
+ else:
392
+ console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(toolkits_list)}[/yellow]")
393
+ except ValueError:
394
+ console.print("[yellow]Please enter a valid number[/yellow]")
395
+ except (KeyboardInterrupt, EOFError):
396
+ return None
397
+
398
+
399
+ def _select_agent_interactive(client, config) -> Optional[str]:
400
+ """
401
+ Show interactive menu to select an agent from platform and local agents.
402
+
403
+ Returns:
404
+ Agent source (name/id for platform, file path for local, '__direct__' for direct chat) or None if cancelled
405
+ """
406
+ from .config import CLIConfig
407
+
408
+ console.print("\n🤖 [bold cyan]Select an agent to chat with:[/bold cyan]\n")
409
+
410
+ # First option: Alita (direct LLM chat, no agent)
411
+ console.print(f"1. [[bold]💬 Alita[/bold]] [cyan]Chat directly with LLM (no agent)[/cyan]")
412
+ console.print(f" [dim]Direct conversation with the model without agent configuration[/dim]")
413
+
414
+ agents_list = []
415
+
416
+ # Load platform agents
417
+ try:
418
+ platform_agents = client.get_list_of_apps()
419
+ for agent in platform_agents:
420
+ agents_list.append({
421
+ 'type': 'platform',
422
+ 'name': agent['name'],
423
+ 'source': agent['name'],
424
+ 'description': agent.get('description', '')[:60]
425
+ })
426
+ except Exception as e:
427
+ logger.debug(f"Failed to load platform agents: {e}")
428
+
429
+ # Load local agents
430
+ agents_dir = config.agents_dir
431
+ search_dir = Path(agents_dir)
432
+
433
+ if search_dir.exists():
434
+ for pattern in ['*.agent.md', '*.agent.yaml', '*.agent.yml', '*.agent.json']:
435
+ for file_path in search_dir.rglob(pattern):
436
+ try:
437
+ agent_def = load_agent_definition(str(file_path))
438
+ agents_list.append({
439
+ 'type': 'local',
440
+ 'name': agent_def.get('name', file_path.stem),
441
+ 'source': str(file_path),
442
+ 'description': agent_def.get('description', '')[:60]
443
+ })
444
+ except Exception as e:
445
+ logger.debug(f"Failed to load {file_path}: {e}")
446
+
447
+ # Display agents with numbers using rich (starting from 2 since 1 is direct chat)
448
+ for i, agent in enumerate(agents_list, 2):
449
+ agent_type = "📦 Platform" if agent['type'] == 'platform' else "📁 Local"
450
+ console.print(f"{i}. [[bold]{agent_type}[/bold]] [cyan]{agent['name']}[/cyan]")
451
+ if agent['description']:
452
+ console.print(f" [dim]{agent['description']}[/dim]")
453
+
454
+ console.print(f"\n[dim]0. Cancel[/dim]")
455
+
456
+ # Get user selection using styled input
457
+ while True:
458
+ try:
459
+ choice = styled_selection_input("Select agent number")
460
+
461
+ if choice == '0':
462
+ return None
463
+
464
+ if choice == '1':
465
+ console.print(f"✓ [green]Selected:[/green] [bold]Alita[/bold]")
466
+ return '__direct__'
467
+
468
+ idx = int(choice) - 2 # Offset by 2 since 1 is direct chat
469
+ if 0 <= idx < len(agents_list):
470
+ selected = agents_list[idx]
471
+ console.print(f"✓ [green]Selected:[/green] [bold]{selected['name']}[/bold]")
472
+ return selected['source']
473
+ else:
474
+ console.print(f"[yellow]Invalid selection. Please enter a number between 0 and {len(agents_list) + 1}[/yellow]")
475
+ except ValueError:
476
+ console.print("[yellow]Please enter a valid number[/yellow]")
477
+ except (KeyboardInterrupt, EOFError):
478
+ console.print("\n[dim]Cancelled.[/dim]")
479
+ return None
480
+
481
+
482
+ @click.group()
483
+ def agent():
484
+ """Agent testing and interaction commands."""
485
+ pass
486
+
487
+
488
+ @agent.command('list')
489
+ @click.option('--local', is_flag=True, help='List local agent definition files')
490
+ @click.option('--directory', default=None, help='Directory to search for local agents (defaults to AGENTS_DIR from .env)')
491
+ @click.pass_context
492
+ def agent_list(ctx, local: bool, directory: Optional[str]):
493
+ """
494
+ List available agents.
495
+
496
+ By default, lists agents from the platform.
497
+ Use --local to list agent definition files in the local directory.
498
+ """
499
+ formatter = ctx.obj['formatter']
500
+ config = ctx.obj['config']
501
+
502
+ try:
503
+ if local:
504
+ # List local agent definition files
505
+ if directory is None:
506
+ directory = config.agents_dir
507
+ search_dir = Path(directory)
508
+
509
+ if not search_dir.exists():
510
+ console.print(f"[red]Directory not found: {directory}[/red]")
511
+ return
512
+
513
+ agents = []
514
+
515
+ # Find agent definition files
516
+ for pattern in ['*.agent.md', '*.agent.yaml', '*.agent.yml', '*.agent.json']:
517
+ for file_path in search_dir.rglob(pattern):
518
+ try:
519
+ agent_def = load_agent_definition(str(file_path))
520
+ # Use relative path if already relative, otherwise make it relative to cwd
521
+ try:
522
+ display_path = str(file_path.relative_to(Path.cwd()))
523
+ except ValueError:
524
+ display_path = str(file_path)
525
+
526
+ agents.append({
527
+ 'name': agent_def.get('name', file_path.stem),
528
+ 'file': display_path,
529
+ 'description': agent_def.get('description', '')[:80]
530
+ })
531
+ except Exception as e:
532
+ logger.debug(f"Failed to load {file_path}: {e}")
533
+
534
+ if not agents:
535
+ console.print(f"\n[yellow]No agent definition files found in {directory}[/yellow]")
536
+ return
537
+
538
+ # Display local agents in a table
539
+ table = Table(
540
+ title=f"Local Agent Definitions in {directory}",
541
+ show_header=True,
542
+ header_style="bold cyan",
543
+ border_style="cyan",
544
+ box=box.ROUNDED
545
+ )
546
+ table.add_column("Name", style="bold cyan", no_wrap=True)
547
+ table.add_column("File", style="dim")
548
+ table.add_column("Description", style="white")
549
+
550
+ for agent_info in sorted(agents, key=lambda x: x['name']):
551
+ table.add_row(
552
+ agent_info['name'],
553
+ agent_info['file'],
554
+ agent_info['description'] or "-"
555
+ )
556
+
557
+ console.print("\n")
558
+ console.print(table)
559
+ console.print(f"\n[green]Total: {len(agents)} local agents[/green]")
560
+
561
+ else:
562
+ # List platform agents
563
+ client = get_client(ctx)
564
+
565
+ agents = client.get_list_of_apps()
566
+
567
+ if formatter.__class__.__name__ == 'JSONFormatter':
568
+ click.echo(formatter._dump({'agents': agents, 'total': len(agents)}))
569
+ else:
570
+ table = Table(
571
+ title="Available Platform Agents",
572
+ show_header=True,
573
+ header_style="bold cyan",
574
+ border_style="cyan",
575
+ box=box.ROUNDED
576
+ )
577
+ table.add_column("ID", style="yellow", no_wrap=True)
578
+ table.add_column("Name", style="bold cyan")
579
+ table.add_column("Description", style="white")
580
+
581
+ for agent_info in agents:
582
+ table.add_row(
583
+ str(agent_info['id']),
584
+ agent_info['name'],
585
+ agent_info.get('description', '')[:80] or "-"
586
+ )
587
+
588
+ console.print("\n")
589
+ console.print(table)
590
+ console.print(f"\n[green]Total: {len(agents)} agents[/green]")
591
+
592
+ except Exception as e:
593
+ logger.exception("Failed to list agents")
594
+ error_panel = Panel(
595
+ str(e),
596
+ title="Error",
597
+ border_style="red",
598
+ box=box.ROUNDED
599
+ )
600
+ console.print(error_panel, style="red")
601
+ raise click.Abort()
602
+
603
+
604
+ @agent.command('show')
605
+ @click.argument('agent_source')
606
+ @click.option('--version', help='Agent version (for platform agents)')
607
+ @click.pass_context
608
+ def agent_show(ctx, agent_source: str, version: Optional[str]):
609
+ """
610
+ Show agent details.
611
+
612
+ AGENT_SOURCE can be:
613
+ - Platform agent ID or name (e.g., "123" or "my-agent")
614
+ - Path to local agent file (e.g., ".github/agents/sdk-dev.agent.md")
615
+ """
616
+ formatter = ctx.obj['formatter']
617
+
618
+ try:
619
+ # Check if it's a file path
620
+ if Path(agent_source).exists():
621
+ # Local agent file
622
+ agent_def = load_agent_definition(agent_source)
623
+
624
+ if formatter.__class__.__name__ == 'JSONFormatter':
625
+ click.echo(formatter._dump(agent_def))
626
+ else:
627
+ # Create details panel
628
+ details = Text()
629
+ details.append("File: ", style="bold")
630
+ details.append(f"{agent_source}\n", style="cyan")
631
+
632
+ if agent_def.get('description'):
633
+ details.append("\nDescription: ", style="bold")
634
+ details.append(f"{agent_def['description']}\n", style="white")
635
+
636
+ if agent_def.get('model'):
637
+ details.append("Model: ", style="bold")
638
+ details.append(f"{agent_def['model']}\n", style="cyan")
639
+
640
+ if agent_def.get('tools'):
641
+ details.append("Tools: ", style="bold")
642
+ details.append(f"{', '.join(agent_def['tools'])}\n", style="cyan")
643
+
644
+ if agent_def.get('temperature') is not None:
645
+ details.append("Temperature: ", style="bold")
646
+ details.append(f"{agent_def['temperature']}\n", style="cyan")
647
+
648
+ panel = Panel(
649
+ details,
650
+ title=f"Local Agent: {agent_def.get('name', 'Unknown')}",
651
+ title_align="left",
652
+ border_style="cyan",
653
+ box=box.ROUNDED
654
+ )
655
+ console.print("\n")
656
+ console.print(panel)
657
+
658
+ if agent_def.get('system_prompt'):
659
+ console.print("\n[bold]System Prompt:[/bold]")
660
+ console.print(Panel(agent_def['system_prompt'][:500] + "...", border_style="dim", box=box.ROUNDED))
661
+
662
+ else:
663
+ # Platform agent
664
+ client = get_client(ctx)
665
+
666
+ # Try to find agent by ID or name
667
+ agents = client.get_list_of_apps()
668
+
669
+ agent = None
670
+ try:
671
+ agent_id = int(agent_source)
672
+ agent = next((a for a in agents if a['id'] == agent_id), None)
673
+ except ValueError:
674
+ agent = next((a for a in agents if a['name'] == agent_source), None)
675
+
676
+ if not agent:
677
+ raise click.ClickException(f"Agent '{agent_source}' not found")
678
+
679
+ # Get details
680
+ details = client.get_app_details(agent['id'])
681
+
682
+ if formatter.__class__.__name__ == 'JSONFormatter':
683
+ click.echo(formatter._dump(details))
684
+ else:
685
+ # Create platform agent details panel
686
+ content = Text()
687
+ content.append("ID: ", style="bold")
688
+ content.append(f"{details['id']}\n", style="yellow")
689
+
690
+ if details.get('description'):
691
+ content.append("\nDescription: ", style="bold")
692
+ content.append(f"{details['description']}\n", style="white")
693
+
694
+ panel = Panel(
695
+ content,
696
+ title=f"Agent: {details['name']}",
697
+ title_align="left",
698
+ border_style="cyan",
699
+ box=box.ROUNDED
700
+ )
701
+ console.print("\n")
702
+ console.print(panel)
703
+
704
+ # Display versions in a table
705
+ if details.get('versions'):
706
+ console.print("\n[bold]Versions:[/bold]")
707
+ versions_table = Table(box=box.ROUNDED, border_style="dim")
708
+ versions_table.add_column("Name", style="cyan")
709
+ versions_table.add_column("ID", style="yellow")
710
+ for ver in details.get('versions', []):
711
+ versions_table.add_row(ver['name'], str(ver['id']))
712
+ console.print(versions_table)
713
+
714
+ except click.ClickException:
715
+ raise
716
+ except Exception as e:
717
+ logger.exception("Failed to show agent details")
718
+ error_panel = Panel(
719
+ str(e),
720
+ title="Error",
721
+ border_style="red",
722
+ box=box.ROUNDED
723
+ )
724
+ console.print(error_panel, style="red")
725
+ raise click.Abort()
726
+
727
+
728
+ @agent.command('chat')
729
+ @click.argument('agent_source', required=False)
730
+ @click.option('--version', help='Agent version (for platform agents)')
731
+ @click.option('--toolkit-config', multiple=True, type=click.Path(exists=True),
732
+ help='Toolkit configuration files (can specify multiple)')
733
+ @click.option('--thread-id', help='Continue existing conversation thread')
734
+ @click.option('--model', help='Override LLM model')
735
+ @click.option('--temperature', type=float, help='Override temperature')
736
+ @click.option('--max-tokens', type=int, help='Override max tokens')
737
+ @click.option('--dir', 'work_dir', type=click.Path(exists=True, file_okay=False, dir_okay=True),
738
+ help='Grant agent filesystem access to this directory')
739
+ @click.option('--verbose', '-v', type=click.Choice(['quiet', 'default', 'debug']), default='default',
740
+ help='Output verbosity level: quiet (final output only), default (tool calls + outputs), debug (all including LLM calls)')
741
+ @click.pass_context
742
+ def agent_chat(ctx, agent_source: Optional[str], version: Optional[str],
743
+ toolkit_config: tuple, thread_id: Optional[str],
744
+ model: Optional[str], temperature: Optional[float],
745
+ max_tokens: Optional[int], work_dir: Optional[str],
746
+ verbose: str):
747
+ """
748
+ Start interactive chat with an agent.
749
+
750
+ If AGENT_SOURCE is not provided, shows an interactive menu to select from
751
+ available agents (both platform and local).
752
+
753
+ AGENT_SOURCE can be:
754
+ - Platform agent ID or name
755
+ - Path to local agent file
756
+
757
+ Examples:
758
+
759
+ # Interactive selection
760
+ alita-cli agent chat
761
+
762
+ # Chat with platform agent
763
+ alita-cli agent chat my-agent
764
+
765
+ # Chat with local agent
766
+ alita-cli agent chat .github/agents/sdk-dev.agent.md
767
+
768
+ # With toolkit configurations
769
+ alita-cli agent chat my-agent \\
770
+ --toolkit-config jira-config.json \\
771
+ --toolkit-config github-config.json
772
+
773
+ # With filesystem access
774
+ alita-cli agent chat my-agent --dir ./workspace
775
+
776
+ # Continue previous conversation
777
+ alita-cli agent chat my-agent --thread-id abc123
778
+
779
+ # Quiet mode (hide tool calls and thinking)
780
+ alita-cli agent chat my-agent --verbose quiet
781
+
782
+ # Debug mode (show all including LLM calls)
783
+ alita-cli agent chat my-agent --verbose debug
784
+ """
785
+ formatter = ctx.obj['formatter']
786
+ config = ctx.obj['config']
787
+ client = get_client(ctx)
788
+
789
+ # Setup verbose level
790
+ show_verbose = verbose != 'quiet'
791
+ debug_mode = verbose == 'debug'
792
+
793
+ try:
794
+ # If no agent specified, start with direct chat by default
795
+ if not agent_source:
796
+ agent_source = '__direct__'
797
+
798
+ # Check for direct chat mode
799
+ is_direct = agent_source == '__direct__'
800
+ is_local = not is_direct and Path(agent_source).exists()
801
+
802
+ # Initialize variables for dynamic updates
803
+ current_model = model
804
+ current_temperature = temperature
805
+ current_max_tokens = max_tokens
806
+ added_mcp_configs = []
807
+ added_toolkit_configs = list(toolkit_config) if toolkit_config else []
808
+ mcp_session_manager = None
809
+ llm = None
810
+ agent_executor = None
811
+ agent_def = {}
812
+ filesystem_tools = None
813
+ terminal_tools = None
814
+ planning_tools = None
815
+ plan_state = None
816
+
817
+ # Approval mode: 'always' (confirm each tool), 'auto' (no confirmation), 'yolo' (no safety checks)
818
+ approval_mode = 'always'
819
+ current_work_dir = work_dir # Track work_dir for /dir command
820
+ current_agent_file = agent_source if is_local else None # Track agent file for /reload command
821
+
822
+ if is_direct:
823
+ # Direct chat mode - no agent, just LLM with Alita instructions
824
+ agent_name = "Alita"
825
+ agent_type = "Direct LLM"
826
+ alita_prompt = _get_alita_system_prompt(config)
827
+ agent_def = {
828
+ 'model': model or 'gpt-5',
829
+ 'temperature': temperature if temperature is not None else 0.1,
830
+ 'max_tokens': max_tokens or 4096,
831
+ 'system_prompt': alita_prompt
832
+ }
833
+ elif is_local:
834
+ agent_def = load_agent_definition(agent_source)
835
+ agent_name = agent_def.get('name', Path(agent_source).stem)
836
+ agent_type = "Local Agent"
837
+ else:
838
+ # Platform agent - find it
839
+ agents = client.get_list_of_apps()
840
+ agent = None
841
+
842
+ try:
843
+ agent_id = int(agent_source)
844
+ agent = next((a for a in agents if a['id'] == agent_id), None)
845
+ except ValueError:
846
+ agent = next((a for a in agents if a['name'] == agent_source), None)
847
+
848
+ if not agent:
849
+ raise click.ClickException(f"Agent '{agent_source}' not found")
850
+
851
+ agent_name = agent['name']
852
+ agent_type = "Platform Agent"
853
+
854
+ # Get model and temperature for welcome banner
855
+ llm_model_display = current_model or agent_def.get('model', 'gpt-4o')
856
+ llm_temperature_display = current_temperature if current_temperature is not None else agent_def.get('temperature', 0.1)
857
+
858
+ # Print nice welcome banner
859
+ print_welcome(agent_name, llm_model_display, llm_temperature_display, approval_mode)
860
+
861
+ # Initialize conversation
862
+ chat_history = []
863
+
864
+ # Initialize session for persistence (memory + plan)
865
+ from .tools import generate_session_id, create_session_memory, save_session_metadata
866
+ current_session_id = generate_session_id()
867
+ plan_state = {'session_id': current_session_id}
868
+
869
+ # Create persistent memory for agent (stored in session directory)
870
+ memory = create_session_memory(current_session_id)
871
+
872
+ # Save session metadata
873
+ save_session_metadata(current_session_id, {
874
+ 'agent_name': agent_name,
875
+ 'agent_type': agent_type if 'agent_type' in dir() else 'Direct LLM',
876
+ 'model': llm_model_display,
877
+ 'temperature': llm_temperature_display,
878
+ 'work_dir': work_dir,
879
+ 'is_direct': is_direct,
880
+ 'is_local': is_local,
881
+ })
882
+ console.print(f"[dim]Session: {current_session_id}[/dim]")
883
+
884
+ # Create agent executor
885
+ if is_direct or is_local:
886
+ # Setup local agent executor (handles all config, tools, MCP, etc.)
887
+ try:
888
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
889
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, work_dir, plan_state
890
+ )
891
+ except Exception:
892
+ return
893
+ else:
894
+ # Platform agent
895
+ details = client.get_app_details(agent['id'])
896
+
897
+ if version:
898
+ version_obj = next((v for v in details['versions'] if v['name'] == version), None)
899
+ if not version_obj:
900
+ raise click.ClickException(f"Version '{version}' not found")
901
+ version_id = version_obj['id']
902
+ else:
903
+ # Use first version
904
+ version_id = details['versions'][0]['id']
905
+
906
+ # Display configuration
907
+ console.print()
908
+ console.print("✓ [green]Connected to platform agent[/green]")
909
+ console.print()
910
+
911
+ agent_executor = client.application(
912
+ application_id=agent['id'],
913
+ application_version_id=version_id,
914
+ memory=memory,
915
+ chat_history=chat_history
916
+ )
917
+ llm = None # Platform agents don't use direct LLM
918
+
919
+ # Initialize input handler for readline support
920
+ input_handler = get_input_handler()
921
+
922
+ # Interactive chat loop
923
+ while True:
924
+ try:
925
+ # Get input with styled prompt (prompt is part of input() for proper readline handling)
926
+ user_input = styled_input().strip()
927
+
928
+ if not user_input:
929
+ continue
930
+
931
+ # Handle commands
932
+ if user_input.lower() in ['exit', 'quit']:
933
+ console.print("\n[bold cyan]👋 Goodbye![/bold cyan]\n")
934
+ break
935
+
936
+ if user_input == '/clear':
937
+ chat_history = []
938
+ console.print("[green]✓ Conversation history cleared.[/green]")
939
+ continue
940
+
941
+ if user_input == '/history':
942
+ if not chat_history:
943
+ console.print("[yellow]No conversation history yet.[/yellow]")
944
+ else:
945
+ console.print("\n[bold cyan]── Conversation History ──[/bold cyan]")
946
+ for i, msg in enumerate(chat_history, 1):
947
+ role = msg.get('role', 'unknown')
948
+ content = msg.get('content', '')
949
+ role_color = 'blue' if role == 'user' else 'green'
950
+ console.print(f"\n[bold {role_color}]{i}. {role.upper()}:[/bold {role_color}] {content[:100]}...")
951
+ continue
952
+
953
+ if user_input == '/save':
954
+ console.print("[yellow]Save to file (default: conversation.json):[/yellow] ", end="")
955
+ filename = input().strip()
956
+ filename = filename or "conversation.json"
957
+ with open(filename, 'w') as f:
958
+ json.dump({'history': chat_history}, f, indent=2)
959
+ console.print(f"[green]✓ Conversation saved to {filename}[/green]")
960
+ continue
961
+
962
+ if user_input == '/help':
963
+ print_help()
964
+ continue
965
+
966
+ # /model command - switch model
967
+ if user_input == '/model':
968
+ if not (is_direct or is_local):
969
+ console.print("[yellow]Model switching is only available for local agents and direct chat.[/yellow]")
970
+ continue
971
+
972
+ selected_model = _select_model_interactive(client)
973
+ if selected_model:
974
+ current_model = selected_model['name']
975
+ agent_def['model'] = current_model
976
+
977
+ # Recreate LLM and agent executor - use session memory to preserve history
978
+ from .tools import create_session_memory
979
+ memory = create_session_memory(current_session_id)
980
+ try:
981
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
982
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, current_work_dir, plan_state
983
+ )
984
+ console.print(Panel(
985
+ f"[cyan]ℹ Model switched to [bold]{current_model}[/bold]. Agent state reset, chat history preserved.[/cyan]",
986
+ border_style="cyan",
987
+ box=box.ROUNDED
988
+ ))
989
+ except Exception as e:
990
+ console.print(f"[red]Error switching model: {e}[/red]")
991
+ continue
992
+
993
+ # /reload command - reload agent definition from file
994
+ if user_input == '/reload':
995
+ if not is_local:
996
+ if is_direct:
997
+ console.print("[yellow]Cannot reload direct chat mode - no agent file to reload.[/yellow]")
998
+ else:
999
+ console.print("[yellow]Reload is only available for local agents (file-based).[/yellow]")
1000
+ continue
1001
+
1002
+ if not current_agent_file or not Path(current_agent_file).exists():
1003
+ console.print("[red]Agent file not found. Cannot reload.[/red]")
1004
+ continue
1005
+
1006
+ try:
1007
+ # Reload agent definition from file
1008
+ new_agent_def = load_agent_definition(current_agent_file)
1009
+
1010
+ # Preserve runtime additions (MCPs, tools added via commands)
1011
+ if 'mcps' in agent_def and agent_def['mcps']:
1012
+ # Merge MCPs: file MCPs + runtime added MCPs
1013
+ file_mcps = new_agent_def.get('mcps', [])
1014
+ for mcp in agent_def['mcps']:
1015
+ mcp_name = mcp if isinstance(mcp, str) else mcp.get('name')
1016
+ file_mcp_names = [m if isinstance(m, str) else m.get('name') for m in file_mcps]
1017
+ if mcp_name not in file_mcp_names:
1018
+ file_mcps.append(mcp)
1019
+ new_agent_def['mcps'] = file_mcps
1020
+
1021
+ # Update agent_def with new values (preserving model/temp overrides)
1022
+ old_system_prompt = agent_def.get('system_prompt', '')
1023
+ new_system_prompt = new_agent_def.get('system_prompt', '')
1024
+
1025
+ agent_def.update(new_agent_def)
1026
+
1027
+ # Restore runtime overrides
1028
+ if current_model:
1029
+ agent_def['model'] = current_model
1030
+ if current_temperature is not None:
1031
+ agent_def['temperature'] = current_temperature
1032
+ if current_max_tokens:
1033
+ agent_def['max_tokens'] = current_max_tokens
1034
+
1035
+ # Recreate agent executor with reloaded definition
1036
+ from .tools import create_session_memory
1037
+ memory = create_session_memory(current_session_id)
1038
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1039
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, current_work_dir, plan_state
1040
+ )
1041
+
1042
+ # Show what changed
1043
+ prompt_changed = old_system_prompt != new_system_prompt
1044
+ agent_name = agent_def.get('name', Path(current_agent_file).stem)
1045
+
1046
+ if prompt_changed:
1047
+ console.print(Panel(
1048
+ f"[green]✓ Reloaded agent: [bold]{agent_name}[/bold][/green]\n"
1049
+ f"[dim]System prompt updated ({len(new_system_prompt)} chars)[/dim]",
1050
+ border_style="green",
1051
+ box=box.ROUNDED
1052
+ ))
1053
+ else:
1054
+ console.print(Panel(
1055
+ f"[cyan]ℹ Reloaded agent: [bold]{agent_name}[/bold][/cyan]\n"
1056
+ f"[dim]No changes detected in system prompt[/dim]",
1057
+ border_style="cyan",
1058
+ box=box.ROUNDED
1059
+ ))
1060
+ except Exception as e:
1061
+ console.print(f"[red]Error reloading agent: {e}[/red]")
1062
+ continue
1063
+
1064
+ # /add_mcp command - add MCP server
1065
+ if user_input == '/add_mcp':
1066
+ if not (is_direct or is_local):
1067
+ console.print("[yellow]Adding MCP is only available for local agents and direct chat.[/yellow]")
1068
+ continue
1069
+
1070
+ selected_mcp = _select_mcp_interactive(config)
1071
+ if selected_mcp:
1072
+ mcp_name = selected_mcp['name']
1073
+ # Add MCP to agent definition
1074
+ if 'mcps' not in agent_def:
1075
+ agent_def['mcps'] = []
1076
+ if mcp_name not in [m if isinstance(m, str) else m.get('name') for m in agent_def.get('mcps', [])]:
1077
+ agent_def['mcps'].append(mcp_name)
1078
+
1079
+ # Recreate agent executor with new MCP - use session memory to preserve history
1080
+ from .tools import create_session_memory
1081
+ memory = create_session_memory(current_session_id)
1082
+ try:
1083
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1084
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, current_work_dir, plan_state
1085
+ )
1086
+ console.print(Panel(
1087
+ f"[cyan]ℹ Added MCP: [bold]{mcp_name}[/bold]. Agent state reset, chat history preserved.[/cyan]",
1088
+ border_style="cyan",
1089
+ box=box.ROUNDED
1090
+ ))
1091
+ except Exception as e:
1092
+ console.print(f"[red]Error adding MCP: {e}[/red]")
1093
+ continue
1094
+
1095
+ # /add_toolkit command - add toolkit
1096
+ if user_input == '/add_toolkit':
1097
+ if not (is_direct or is_local):
1098
+ console.print("[yellow]Adding toolkit is only available for local agents and direct chat.[/yellow]")
1099
+ continue
1100
+
1101
+ selected_toolkit = _select_toolkit_interactive(config)
1102
+ if selected_toolkit:
1103
+ toolkit_name = selected_toolkit['name']
1104
+ toolkit_file = selected_toolkit['file']
1105
+
1106
+ # Add toolkit config path
1107
+ if toolkit_file not in added_toolkit_configs:
1108
+ added_toolkit_configs.append(toolkit_file)
1109
+
1110
+ # Recreate agent executor with new toolkit - use session memory to preserve history
1111
+ from .tools import create_session_memory
1112
+ memory = create_session_memory(current_session_id)
1113
+ try:
1114
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1115
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, current_work_dir, plan_state
1116
+ )
1117
+ console.print(Panel(
1118
+ f"[cyan]ℹ Added toolkit: [bold]{toolkit_name}[/bold]. Agent state reset, chat history preserved.[/cyan]",
1119
+ border_style="cyan",
1120
+ box=box.ROUNDED
1121
+ ))
1122
+ except Exception as e:
1123
+ console.print(f"[red]Error adding toolkit: {e}[/red]")
1124
+ continue
1125
+
1126
+ # /mode command - set approval mode
1127
+ if user_input == '/mode' or user_input.startswith('/mode '):
1128
+ parts = user_input.split(maxsplit=1)
1129
+ if len(parts) == 1:
1130
+ # Show current mode and options
1131
+ mode_info = {
1132
+ 'always': ('yellow', 'Confirm before each tool execution'),
1133
+ 'auto': ('green', 'Execute tools without confirmation'),
1134
+ 'yolo': ('red', 'No confirmations, skip safety warnings')
1135
+ }
1136
+ console.print("\n🔧 [bold cyan]Approval Mode:[/bold cyan]\n")
1137
+ for mode_name, (color, desc) in mode_info.items():
1138
+ marker = "●" if mode_name == approval_mode else "○"
1139
+ console.print(f" [{color}]{marker}[/{color}] [bold]{mode_name}[/bold] - {desc}")
1140
+ console.print(f"\n[dim]Usage: /mode <always|auto|yolo>[/dim]")
1141
+ else:
1142
+ new_mode = parts[1].lower().strip()
1143
+ if new_mode in ['always', 'auto', 'yolo']:
1144
+ approval_mode = new_mode
1145
+ mode_colors = {'always': 'yellow', 'auto': 'green', 'yolo': 'red'}
1146
+ console.print(f"✓ [green]Mode set to[/green] [{mode_colors[new_mode]}][bold]{new_mode}[/bold][/{mode_colors[new_mode]}]")
1147
+ else:
1148
+ console.print(f"[yellow]Unknown mode: {new_mode}. Use: always, auto, or yolo[/yellow]")
1149
+ continue
1150
+
1151
+ # /dir command - mount workspace directory
1152
+ if user_input == '/dir' or user_input.startswith('/dir '):
1153
+ parts = user_input.split(maxsplit=1)
1154
+ if len(parts) == 1:
1155
+ if current_work_dir:
1156
+ console.print(f"📁 [bold cyan]Current workspace:[/bold cyan] {current_work_dir}")
1157
+ else:
1158
+ console.print("[yellow]No workspace mounted. Usage: /dir /path/to/workspace[/yellow]")
1159
+ else:
1160
+ new_dir = parts[1].strip()
1161
+ new_dir_path = Path(new_dir).expanduser().resolve()
1162
+
1163
+ if not new_dir_path.exists():
1164
+ console.print(f"[red]Directory not found: {new_dir}[/red]")
1165
+ continue
1166
+ if not new_dir_path.is_dir():
1167
+ console.print(f"[red]Not a directory: {new_dir}[/red]")
1168
+ continue
1169
+
1170
+ current_work_dir = str(new_dir_path)
1171
+
1172
+ # Recreate agent executor with new work_dir - use session memory
1173
+ if is_direct or is_local:
1174
+ from .tools import create_session_memory
1175
+ memory = create_session_memory(current_session_id)
1176
+ try:
1177
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1178
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, current_work_dir, plan_state
1179
+ )
1180
+ console.print(Panel(
1181
+ f"[cyan]✓ Mounted: [bold]{current_work_dir}[/bold]\n Terminal + filesystem tools enabled.[/cyan]",
1182
+ border_style="cyan",
1183
+ box=box.ROUNDED
1184
+ ))
1185
+ except Exception as e:
1186
+ console.print(f"[red]Error mounting directory: {e}[/red]")
1187
+ else:
1188
+ console.print("[yellow]Directory mounting is only available for local agents and direct chat.[/yellow]")
1189
+ continue
1190
+
1191
+ # /session command - list or resume sessions
1192
+ if user_input == '/session' or user_input.startswith('/session '):
1193
+ from .tools import list_sessions, PlanState
1194
+ parts = user_input.split(maxsplit=2)
1195
+
1196
+ if len(parts) == 1 or parts[1] == 'list':
1197
+ # List all sessions with plans
1198
+ sessions = list_sessions()
1199
+ if not sessions:
1200
+ console.print("[dim]No saved sessions found.[/dim]")
1201
+ console.print("[dim]Sessions are created when you start chatting.[/dim]")
1202
+ else:
1203
+ console.print("\n📋 [bold cyan]Saved Sessions:[/bold cyan]\n")
1204
+ from datetime import datetime
1205
+ for i, sess in enumerate(sessions[:10], 1): # Show last 10
1206
+ modified = datetime.fromtimestamp(sess['modified']).strftime('%Y-%m-%d %H:%M')
1207
+
1208
+ # Build session info line
1209
+ agent_info = sess.get('agent_name', 'unknown')
1210
+ model_info = sess.get('model', '')
1211
+ if model_info:
1212
+ agent_info = f"{agent_info} ({model_info})"
1213
+
1214
+ # Check if this is current session
1215
+ is_current = sess['session_id'] == current_session_id
1216
+ current_marker = " [green]◀ current[/green]" if is_current else ""
1217
+
1218
+ # Plan progress if available
1219
+ if sess.get('steps_total', 0) > 0:
1220
+ progress = f"[{sess['steps_completed']}/{sess['steps_total']}]"
1221
+ status = "✓" if sess['steps_completed'] == sess['steps_total'] else "○"
1222
+ plan_info = f" - {sess.get('title', 'Untitled')} {progress}"
1223
+ else:
1224
+ status = "●"
1225
+ plan_info = ""
1226
+
1227
+ console.print(f" {status} [cyan]{sess['session_id']}[/cyan]{plan_info}")
1228
+ console.print(f" [dim]{agent_info} • {modified}[/dim]{current_marker}")
1229
+ console.print(f"\n[dim]Usage: /session resume <session_id>[/dim]")
1230
+
1231
+ elif parts[1] == 'resume' and len(parts) > 2:
1232
+ session_id = parts[2].strip()
1233
+ from .tools import load_session_metadata, create_session_memory
1234
+
1235
+ # Check if session exists (either plan or metadata)
1236
+ loaded_state = PlanState.load(session_id)
1237
+ session_metadata = load_session_metadata(session_id)
1238
+
1239
+ if loaded_state or session_metadata:
1240
+ # Update current session to use this session_id
1241
+ current_session_id = session_id
1242
+
1243
+ # Restore memory from session SQLite (reuses existing memory.db file)
1244
+ memory = create_session_memory(session_id)
1245
+
1246
+ # Update plan state if available
1247
+ if loaded_state:
1248
+ plan_state.update(loaded_state.to_dict())
1249
+ resume_info = f"\n\n{loaded_state.render()}"
1250
+ else:
1251
+ plan_state['session_id'] = session_id
1252
+ resume_info = ""
1253
+
1254
+ # Show session info
1255
+ agent_info = session_metadata.get('agent_name', 'unknown') if session_metadata else 'unknown'
1256
+ model_info = session_metadata.get('model', '') if session_metadata else ''
1257
+
1258
+ console.print(Panel(
1259
+ f"[green]✓ Resumed session:[/green] [bold]{session_id}[/bold]\n"
1260
+ f"[dim]Agent: {agent_info}" + (f" • Model: {model_info}" if model_info else "") + f"[/dim]"
1261
+ f"{resume_info}",
1262
+ border_style="green",
1263
+ box=box.ROUNDED
1264
+ ))
1265
+
1266
+ # Recreate planning tools with loaded state
1267
+ if is_direct or is_local:
1268
+ try:
1269
+ from .tools import get_planning_tools
1270
+ if loaded_state:
1271
+ planning_tools, _ = get_planning_tools(loaded_state)
1272
+ # Note: We'd need to rebuild the agent to inject new tools
1273
+ # For now, the plan state dict is updated so new tool calls will see it
1274
+ except Exception as e:
1275
+ console.print(f"[yellow]Warning: Could not reload planning tools: {e}[/yellow]")
1276
+ else:
1277
+ console.print(f"[red]Session not found: {session_id}[/red]")
1278
+ else:
1279
+ console.print("[dim]Usage: /session [list] or /session resume <session_id>[/dim]")
1280
+ continue
1281
+
1282
+ # /agent command - switch to a different agent
1283
+ if user_input == '/agent':
1284
+ selected_agent = _select_agent_interactive(client, config)
1285
+ if selected_agent and selected_agent != '__direct__':
1286
+ # Load the new agent
1287
+ new_is_local = Path(selected_agent).exists()
1288
+
1289
+ if new_is_local:
1290
+ agent_def = load_agent_definition(selected_agent)
1291
+ agent_name = agent_def.get('name', Path(selected_agent).stem)
1292
+ agent_type = "Local Agent"
1293
+ is_local = True
1294
+ is_direct = False
1295
+ current_agent_file = selected_agent # Track for /reload
1296
+ else:
1297
+ # Platform agent
1298
+ agents = client.get_list_of_apps()
1299
+ new_agent = None
1300
+ try:
1301
+ agent_id = int(selected_agent)
1302
+ new_agent = next((a for a in agents if a['id'] == agent_id), None)
1303
+ except ValueError:
1304
+ new_agent = next((a for a in agents if a['name'] == selected_agent), None)
1305
+
1306
+ if new_agent:
1307
+ agent_name = new_agent['name']
1308
+ agent_type = "Platform Agent"
1309
+ is_local = False
1310
+ is_direct = False
1311
+ current_agent_file = None # No file for platform agents
1312
+
1313
+ # Setup platform agent
1314
+ details = client.get_app_details(new_agent['id'])
1315
+ version_id = details['versions'][0]['id']
1316
+ agent_executor = client.application(
1317
+ application_id=new_agent['id'],
1318
+ application_version_id=version_id,
1319
+ memory=memory,
1320
+ chat_history=chat_history
1321
+ )
1322
+ console.print(Panel(
1323
+ f"[cyan]ℹ Switched to agent: [bold]{agent_name}[/bold] ({agent_type}). Chat history preserved.[/cyan]",
1324
+ border_style="cyan",
1325
+ box=box.ROUNDED
1326
+ ))
1327
+ continue
1328
+
1329
+ # For local agents, recreate executor
1330
+ if new_is_local:
1331
+ from .tools import create_session_memory
1332
+ memory = create_session_memory(current_session_id)
1333
+ added_toolkit_configs = []
1334
+ try:
1335
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1336
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, current_work_dir, plan_state
1337
+ )
1338
+ console.print(Panel(
1339
+ f"[cyan]ℹ Switched to agent: [bold]{agent_name}[/bold] ({agent_type}). Agent state reset, chat history preserved.[/cyan]",
1340
+ border_style="cyan",
1341
+ box=box.ROUNDED
1342
+ ))
1343
+ except Exception as e:
1344
+ console.print(f"[red]Error switching agent: {e}[/red]")
1345
+ elif selected_agent == '__direct__':
1346
+ # Switch back to direct mode
1347
+ is_direct = True
1348
+ is_local = False
1349
+ current_agent_file = None # No file for direct mode
1350
+ agent_name = "Alita"
1351
+ agent_type = "Direct LLM"
1352
+ alita_prompt = _get_alita_system_prompt(config)
1353
+ agent_def = {
1354
+ 'model': current_model or 'gpt-4o',
1355
+ 'temperature': current_temperature if current_temperature is not None else 0.1,
1356
+ 'max_tokens': current_max_tokens or 4096,
1357
+ 'system_prompt': alita_prompt
1358
+ }
1359
+ from .tools import create_session_memory
1360
+ memory = create_session_memory(current_session_id)
1361
+ try:
1362
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1363
+ client, agent_def, tuple(added_toolkit_configs), config, current_model, current_temperature, current_max_tokens, memory, current_work_dir, plan_state
1364
+ )
1365
+ console.print(Panel(
1366
+ f"[cyan]ℹ Switched to [bold]Alita[/bold]. Agent state reset, chat history preserved.[/cyan]",
1367
+ border_style="cyan",
1368
+ box=box.ROUNDED
1369
+ ))
1370
+ except Exception as e:
1371
+ console.print(f"[red]Error switching to direct mode: {e}[/red]")
1372
+ continue
1373
+
1374
+ # Execute agent
1375
+ if (is_direct or is_local) and agent_executor is None:
1376
+ # Local agent without tools: use direct LLM call with streaming
1377
+ system_prompt = agent_def.get('system_prompt', '')
1378
+ messages = []
1379
+ if system_prompt:
1380
+ messages.append({"role": "system", "content": system_prompt})
1381
+
1382
+ # Add chat history
1383
+ for msg in chat_history:
1384
+ messages.append(msg)
1385
+
1386
+ # Add user message
1387
+ messages.append({"role": "user", "content": user_input})
1388
+
1389
+ try:
1390
+ # Try streaming if available
1391
+ if hasattr(llm, 'stream'):
1392
+ output_chunks = []
1393
+ first_chunk = True
1394
+
1395
+ # Show spinner until first token arrives
1396
+ status = console.status("[yellow]Thinking...[/yellow]", spinner="dots")
1397
+ status.start()
1398
+
1399
+ # Stream the response token by token
1400
+ for chunk in llm.stream(messages):
1401
+ if hasattr(chunk, 'content'):
1402
+ token = chunk.content
1403
+ else:
1404
+ token = str(chunk)
1405
+
1406
+ if token:
1407
+ # Stop spinner and show agent name on first token
1408
+ if first_chunk:
1409
+ status.stop()
1410
+ console.print(f"\n[bold bright_cyan]{agent_name}:[/bold bright_cyan]\n", end="")
1411
+ first_chunk = False
1412
+
1413
+ console.print(token, end="", markup=False)
1414
+ output_chunks.append(token)
1415
+
1416
+ # Stop status if still running (no tokens received)
1417
+ if first_chunk:
1418
+ status.stop()
1419
+ console.print(f"\n[bold bright_cyan]{agent_name}:[/bold bright_cyan]\n", end="")
1420
+
1421
+ output = ''.join(output_chunks)
1422
+ console.print() # New line after streaming
1423
+ else:
1424
+ # Fallback to non-streaming with spinner
1425
+ with console.status("[yellow]Thinking...[/yellow]", spinner="dots"):
1426
+ response = llm.invoke(messages)
1427
+ if hasattr(response, 'content'):
1428
+ output = response.content
1429
+ else:
1430
+ output = str(response)
1431
+
1432
+ # Display response after spinner stops
1433
+ console.print(f"\n[bold bright_cyan]{agent_name}:[/bold bright_cyan]")
1434
+ if any(marker in output for marker in ['```', '**', '##', '- ', '* ']):
1435
+ console.print(Markdown(output))
1436
+ else:
1437
+ console.print(output)
1438
+ except Exception as e:
1439
+ console.print(f"\n[red]✗ Error: {e}[/red]\n")
1440
+ continue
1441
+ else:
1442
+ # Agent with tools or platform agent: use agent executor
1443
+ # Setup callback for verbose output
1444
+ from langchain_core.runnables import RunnableConfig
1445
+ from langgraph.errors import GraphRecursionError
1446
+
1447
+ invoke_config = None
1448
+ if show_verbose:
1449
+ cli_callback = create_cli_callback(verbose=True, debug=debug_mode)
1450
+ invoke_config = RunnableConfig(callbacks=[cli_callback])
1451
+
1452
+ # Track recursion continuation state
1453
+ continue_from_recursion = False
1454
+ recursion_attempts = 0
1455
+ max_recursion_continues = 5 # Prevent infinite continuation loops
1456
+
1457
+ while True:
1458
+ try:
1459
+ # Show status only when not verbose (verbose shows its own progress)
1460
+ if not show_verbose:
1461
+ with console.status("[yellow]Thinking...[/yellow]", spinner="dots"):
1462
+ result = agent_executor.invoke(
1463
+ {
1464
+ "input": [user_input] if not is_local else user_input,
1465
+ "chat_history": chat_history
1466
+ },
1467
+ config=invoke_config
1468
+ )
1469
+ else:
1470
+ if not continue_from_recursion:
1471
+ console.print() # Add spacing before tool calls
1472
+ result = agent_executor.invoke(
1473
+ {
1474
+ "input": [user_input] if not is_local else user_input,
1475
+ "chat_history": chat_history
1476
+ },
1477
+ config=invoke_config
1478
+ )
1479
+
1480
+ # Success - exit the retry loop
1481
+ break
1482
+
1483
+ except GraphRecursionError as e:
1484
+ recursion_attempts += 1
1485
+ step_limit = getattr(e, 'recursion_limit', 25)
1486
+
1487
+ console.print()
1488
+ console.print(Panel(
1489
+ f"[yellow]⚠ Step limit reached ({step_limit} steps)[/yellow]\n\n"
1490
+ f"The agent has executed the maximum number of steps allowed.\n"
1491
+ f"This usually happens with complex tasks that require many tool calls.\n\n"
1492
+ f"[dim]Attempt {recursion_attempts}/{max_recursion_continues}[/dim]",
1493
+ title="Step Limit Reached",
1494
+ border_style="yellow",
1495
+ box=box.ROUNDED
1496
+ ))
1497
+
1498
+ if recursion_attempts >= max_recursion_continues:
1499
+ console.print("[red]Maximum continuation attempts reached. Please break down your request into smaller tasks.[/red]")
1500
+ output = f"[Step limit reached after {recursion_attempts} continuation attempts. The task may be too complex - please break it into smaller steps.]"
1501
+ break
1502
+
1503
+ # Prompt user for action
1504
+ console.print("\nWhat would you like to do?")
1505
+ console.print(" [bold cyan]c[/bold cyan] - Continue execution (agent will resume from checkpoint)")
1506
+ console.print(" [bold cyan]s[/bold cyan] - Stop and get partial results")
1507
+ console.print(" [bold cyan]n[/bold cyan] - Start a new request")
1508
+ console.print()
1509
+
1510
+ try:
1511
+ choice = input_handler.get_input("Choice [c/s/n]: ").strip().lower()
1512
+ except (KeyboardInterrupt, EOFError):
1513
+ choice = 's'
1514
+
1515
+ if choice == 'c':
1516
+ # Continue - the checkpoint should preserve state
1517
+ # We'll re-invoke with a continuation message
1518
+ continue_from_recursion = True
1519
+ console.print("\n[cyan]Continuing from last checkpoint...[/cyan]\n")
1520
+
1521
+ # Modify the input to signal continuation
1522
+ user_input = "Continue from where you left off. Complete the remaining steps of the task."
1523
+ continue # Retry the invoke
1524
+
1525
+ elif choice == 's':
1526
+ # Stop and try to extract partial results
1527
+ console.print("\n[yellow]Stopped. Attempting to extract partial results...[/yellow]")
1528
+ output = "[Task stopped due to step limit. Partial work may have been completed - check any files or state that were modified.]"
1529
+ break
1530
+
1531
+ else: # 'n' or anything else
1532
+ console.print("\n[dim]Skipped. Enter a new request.[/dim]")
1533
+ output = None
1534
+ break
1535
+
1536
+ # Skip chat history update if we bailed out
1537
+ if output is None:
1538
+ continue
1539
+
1540
+ # Extract output from result (if we have a result)
1541
+ if 'result' in dir() and result is not None:
1542
+ output = extract_output_from_result(result)
1543
+
1544
+ # Display response
1545
+ console.print(f"\n[bold bright_cyan]{agent_name}:[/bold bright_cyan]")
1546
+ if any(marker in output for marker in ['```', '**', '##', '- ', '* ']):
1547
+ console.print(Markdown(output))
1548
+ else:
1549
+ console.print(output)
1550
+
1551
+ # Update chat history
1552
+ chat_history.append({"role": "user", "content": user_input})
1553
+ chat_history.append({"role": "assistant", "content": output})
1554
+
1555
+ except KeyboardInterrupt:
1556
+ console.print("\n\n[yellow]Interrupted. Type 'exit' to quit or continue chatting.[/yellow]")
1557
+ continue
1558
+ except EOFError:
1559
+ console.print("\n\n[bold cyan]Goodbye! 👋[/bold cyan]")
1560
+ break
1561
+
1562
+ except click.ClickException:
1563
+ raise
1564
+ except Exception as e:
1565
+ logger.exception("Failed to start chat")
1566
+ error_panel = Panel(
1567
+ str(e),
1568
+ title="Error",
1569
+ border_style="red",
1570
+ box=box.ROUNDED
1571
+ )
1572
+ console.print(error_panel, style="red")
1573
+ raise click.Abort()
1574
+
1575
+
1576
+ @agent.command('run')
1577
+ @click.argument('agent_source')
1578
+ @click.argument('message')
1579
+ @click.option('--version', help='Agent version (for platform agents)')
1580
+ @click.option('--toolkit-config', multiple=True, type=click.Path(exists=True),
1581
+ help='Toolkit configuration files')
1582
+ @click.option('--model', help='Override LLM model')
1583
+ @click.option('--temperature', type=float, help='Override temperature')
1584
+ @click.option('--max-tokens', type=int, help='Override max tokens')
1585
+ @click.option('--save-thread', help='Save thread ID to file for continuation')
1586
+ @click.option('--dir', 'work_dir', type=click.Path(exists=True, file_okay=False, dir_okay=True),
1587
+ help='Grant agent filesystem access to this directory')
1588
+ @click.option('--verbose', '-v', type=click.Choice(['quiet', 'default', 'debug']), default='default',
1589
+ help='Output verbosity level: quiet (final output only), default (tool calls + outputs), debug (all including LLM calls)')
1590
+ @click.pass_context
1591
+ def agent_run(ctx, agent_source: str, message: str, version: Optional[str],
1592
+ toolkit_config: tuple, model: Optional[str],
1593
+ temperature: Optional[float], max_tokens: Optional[int],
1594
+ save_thread: Optional[str], work_dir: Optional[str],
1595
+ verbose: str):
1596
+ """
1597
+ Run agent with a single message (handoff mode).
1598
+
1599
+ AGENT_SOURCE can be:
1600
+ - Platform agent ID or name
1601
+ - Path to local agent file
1602
+
1603
+ MESSAGE is the input message to send to the agent.
1604
+
1605
+ Examples:
1606
+
1607
+ # Simple query
1608
+ alita-cli agent run my-agent "What is the status of JIRA-123?"
1609
+
1610
+ # With local agent
1611
+ alita-cli agent run .github/agents/sdk-dev.agent.md \\
1612
+ "Create a new toolkit for Stripe API"
1613
+
1614
+ # With toolkit configs and JSON output
1615
+ alita-cli --output json agent run my-agent "Search for bugs" \\
1616
+ --toolkit-config jira-config.json
1617
+
1618
+ # With filesystem access
1619
+ alita-cli agent run my-agent "Analyze the code in src/" --dir ./myproject
1620
+
1621
+ # Save thread for continuation
1622
+ alita-cli agent run my-agent "Start task" \\
1623
+ --save-thread thread.txt
1624
+
1625
+ # Quiet mode (hide tool calls and thinking)
1626
+ alita-cli agent run my-agent "Query" --verbose quiet
1627
+
1628
+ # Debug mode (show all including LLM calls)
1629
+ alita-cli agent run my-agent "Query" --verbose debug
1630
+ """
1631
+ formatter = ctx.obj['formatter']
1632
+ client = get_client(ctx)
1633
+
1634
+ # Setup verbose level
1635
+ show_verbose = verbose != 'quiet'
1636
+ debug_mode = verbose == 'debug'
1637
+
1638
+ try:
1639
+ # Load agent
1640
+ is_local = Path(agent_source).exists()
1641
+
1642
+ if is_local:
1643
+ agent_def = load_agent_definition(agent_source)
1644
+ agent_name = agent_def.get('name', Path(agent_source).stem)
1645
+
1646
+ # Create memory for agent
1647
+ from langgraph.checkpoint.sqlite import SqliteSaver
1648
+ memory = SqliteSaver(sqlite3.connect(":memory:", check_same_thread=False))
1649
+
1650
+ # Setup local agent executor (reuses same logic as agent_chat)
1651
+ try:
1652
+ agent_executor, mcp_session_manager, llm, llm_model, filesystem_tools, terminal_tools, planning_tools = _setup_local_agent_executor(
1653
+ client, agent_def, toolkit_config, ctx.obj['config'], model, temperature, max_tokens, memory, work_dir, {}
1654
+ )
1655
+ except Exception as e:
1656
+ error_panel = Panel(
1657
+ f"Failed to setup agent: {e}",
1658
+ title="Error",
1659
+ border_style="red",
1660
+ box=box.ROUNDED
1661
+ )
1662
+ console.print(error_panel, style="red")
1663
+ raise click.Abort()
1664
+
1665
+ # Execute agent
1666
+ if agent_executor:
1667
+ # Setup callback for verbose output
1668
+ from langchain_core.runnables import RunnableConfig
1669
+ from langgraph.errors import GraphRecursionError
1670
+
1671
+ invoke_config = None
1672
+ if show_verbose:
1673
+ cli_callback = create_cli_callback(verbose=True, debug=debug_mode)
1674
+ invoke_config = RunnableConfig(callbacks=[cli_callback])
1675
+
1676
+ try:
1677
+ # Execute with spinner for non-JSON output
1678
+ if formatter.__class__.__name__ == 'JSONFormatter':
1679
+ # JSON output: always quiet, no callbacks
1680
+ with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
1681
+ result = agent_executor.invoke({
1682
+ "input": message,
1683
+ "chat_history": []
1684
+ })
1685
+
1686
+ click.echo(formatter._dump({
1687
+ 'agent': agent_name,
1688
+ 'message': message,
1689
+ 'response': extract_output_from_result(result),
1690
+ 'full_result': result
1691
+ }))
1692
+ else:
1693
+ # Show status only when not verbose (verbose shows its own progress)
1694
+ if not show_verbose:
1695
+ with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
1696
+ result = agent_executor.invoke(
1697
+ {
1698
+ "input": message,
1699
+ "chat_history": []
1700
+ },
1701
+ config=invoke_config
1702
+ )
1703
+ else:
1704
+ console.print() # Add spacing before tool calls
1705
+ result = agent_executor.invoke(
1706
+ {
1707
+ "input": message,
1708
+ "chat_history": []
1709
+ },
1710
+ config=invoke_config
1711
+ )
1712
+
1713
+ # Extract and display output
1714
+ output = extract_output_from_result(result)
1715
+ display_output(agent_name, message, output)
1716
+
1717
+ except GraphRecursionError as e:
1718
+ step_limit = getattr(e, 'recursion_limit', 25)
1719
+ console.print()
1720
+ console.print(Panel(
1721
+ f"[yellow]⚠ Step limit reached ({step_limit} steps)[/yellow]\n\n"
1722
+ f"The agent exceeded the maximum number of steps.\n"
1723
+ f"This task may be too complex for a single run.\n\n"
1724
+ f"[bold]Suggestions:[/bold]\n"
1725
+ f"• Use [cyan]alita agent chat[/cyan] for interactive continuation\n"
1726
+ f"• Break the task into smaller, focused requests\n"
1727
+ f"• Check if partial work was completed (files created, etc.)",
1728
+ title="Step Limit Reached",
1729
+ border_style="yellow",
1730
+ box=box.ROUNDED
1731
+ ))
1732
+ if formatter.__class__.__name__ == 'JSONFormatter':
1733
+ click.echo(formatter._dump({
1734
+ 'agent': agent_name,
1735
+ 'message': message,
1736
+ 'error': 'step_limit_reached',
1737
+ 'step_limit': step_limit,
1738
+ 'response': f'Step limit of {step_limit} reached. Task may be too complex.'
1739
+ }))
1740
+ else:
1741
+ # Simple LLM mode without tools
1742
+ system_prompt = agent_def.get('system_prompt', '')
1743
+ messages = []
1744
+ if system_prompt:
1745
+ messages.append({"role": "system", "content": system_prompt})
1746
+ messages.append({"role": "user", "content": message})
1747
+
1748
+ # Execute with spinner for non-JSON output
1749
+ if formatter.__class__.__name__ == 'JSONFormatter':
1750
+ response = llm.invoke(messages)
1751
+ if hasattr(response, 'content'):
1752
+ output = response.content
1753
+ else:
1754
+ output = str(response)
1755
+
1756
+ click.echo(formatter._dump({
1757
+ 'agent': agent_name,
1758
+ 'message': message,
1759
+ 'response': output
1760
+ }))
1761
+ else:
1762
+ # Show spinner while executing
1763
+ with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
1764
+ response = llm.invoke(messages)
1765
+ if hasattr(response, 'content'):
1766
+ output = response.content
1767
+ else:
1768
+ output = str(response)
1769
+
1770
+ # Display output
1771
+ display_output(agent_name, message, output)
1772
+
1773
+ else:
1774
+ # Platform agent
1775
+ agents = client.get_list_of_apps()
1776
+ agent = None
1777
+
1778
+ try:
1779
+ agent_id = int(agent_source)
1780
+ agent = next((a for a in agents if a['id'] == agent_id), None)
1781
+ except ValueError:
1782
+ agent = next((a for a in agents if a['name'] == agent_source), None)
1783
+
1784
+ if not agent:
1785
+ raise click.ClickException(f"Agent '{agent_source}' not found")
1786
+
1787
+ # Get version
1788
+ details = client.get_app_details(agent['id'])
1789
+
1790
+ if version:
1791
+ version_obj = next((v for v in details['versions'] if v['name'] == version), None)
1792
+ if not version_obj:
1793
+ raise click.ClickException(f"Version '{version}' not found")
1794
+ version_id = version_obj['id']
1795
+ else:
1796
+ version_id = details['versions'][0]['id']
1797
+
1798
+ # Load toolkit configs from CLI options
1799
+ toolkit_configs = []
1800
+ if toolkit_config:
1801
+ for config_path in toolkit_config:
1802
+ toolkit_configs.append(load_toolkit_config(config_path))
1803
+
1804
+ # Create memory
1805
+ from langgraph.checkpoint.sqlite import SqliteSaver
1806
+ memory = SqliteSaver(sqlite3.connect(":memory:", check_same_thread=False))
1807
+
1808
+ # Create agent executor
1809
+ agent_executor = client.application(
1810
+ application_id=agent['id'],
1811
+ application_version_id=version_id,
1812
+ memory=memory
1813
+ )
1814
+
1815
+ # Setup callback for verbose output
1816
+ from langchain_core.runnables import RunnableConfig
1817
+ from langgraph.errors import GraphRecursionError
1818
+
1819
+ invoke_config = None
1820
+ if show_verbose:
1821
+ cli_callback = create_cli_callback(verbose=True, debug=debug_mode)
1822
+ invoke_config = RunnableConfig(callbacks=[cli_callback])
1823
+
1824
+ try:
1825
+ # Execute with spinner for non-JSON output
1826
+ if formatter.__class__.__name__ == 'JSONFormatter':
1827
+ result = agent_executor.invoke({
1828
+ "input": [message],
1829
+ "chat_history": []
1830
+ })
1831
+
1832
+ click.echo(formatter._dump({
1833
+ 'agent': agent['name'],
1834
+ 'message': message,
1835
+ 'response': result.get('output', ''),
1836
+ 'full_result': result
1837
+ }))
1838
+ else:
1839
+ # Show status only when not verbose
1840
+ if not show_verbose:
1841
+ with console.status("[yellow]Processing...[/yellow]", spinner="dots"):
1842
+ result = agent_executor.invoke(
1843
+ {
1844
+ "input": [message],
1845
+ "chat_history": []
1846
+ },
1847
+ config=invoke_config
1848
+ )
1849
+ else:
1850
+ console.print() # Add spacing before tool calls
1851
+ result = agent_executor.invoke(
1852
+ {
1853
+ "input": [message],
1854
+ "chat_history": []
1855
+ },
1856
+ config=invoke_config
1857
+ )
1858
+
1859
+ # Display output
1860
+ response = result.get('output', 'No response')
1861
+ display_output(agent['name'], message, response)
1862
+
1863
+ # Save thread if requested
1864
+ if save_thread:
1865
+ thread_data = {
1866
+ 'agent_id': agent['id'],
1867
+ 'agent_name': agent['name'],
1868
+ 'version_id': version_id,
1869
+ 'thread_id': result.get('thread_id'),
1870
+ 'last_message': message
1871
+ }
1872
+ with open(save_thread, 'w') as f:
1873
+ json.dump(thread_data, f, indent=2)
1874
+ logger.info(f"Thread saved to {save_thread}")
1875
+
1876
+ except GraphRecursionError as e:
1877
+ step_limit = getattr(e, 'recursion_limit', 25)
1878
+ console.print()
1879
+ console.print(Panel(
1880
+ f"[yellow]⚠ Step limit reached ({step_limit} steps)[/yellow]\n\n"
1881
+ f"The agent exceeded the maximum number of steps.\n"
1882
+ f"This task may be too complex for a single run.\n\n"
1883
+ f"[bold]Suggestions:[/bold]\n"
1884
+ f"• Use [cyan]alita agent chat[/cyan] for interactive continuation\n"
1885
+ f"• Break the task into smaller, focused requests\n"
1886
+ f"• Check if partial work was completed (files created, etc.)",
1887
+ title="Step Limit Reached",
1888
+ border_style="yellow",
1889
+ box=box.ROUNDED
1890
+ ))
1891
+ if formatter.__class__.__name__ == 'JSONFormatter':
1892
+ click.echo(formatter._dump({
1893
+ 'agent': agent['name'],
1894
+ 'message': message,
1895
+ 'error': 'step_limit_reached',
1896
+ 'step_limit': step_limit,
1897
+ 'response': f'Step limit of {step_limit} reached. Task may be too complex.'
1898
+ }))
1899
+
1900
+ except click.ClickException:
1901
+ raise
1902
+ except Exception as e:
1903
+ logger.exception("Failed to run agent")
1904
+ error_panel = Panel(
1905
+ str(e),
1906
+ title="Error",
1907
+ border_style="red",
1908
+ box=box.ROUNDED
1909
+ )
1910
+ console.print(error_panel, style="red")
1911
+ raise click.Abort()