alita-sdk 0.3.457__py3-none-any.whl → 0.3.465__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (37) hide show
  1. alita_sdk/cli/__init__.py +10 -0
  2. alita_sdk/cli/__main__.py +17 -0
  3. alita_sdk/cli/agent/__init__.py +0 -0
  4. alita_sdk/cli/agent/default.py +176 -0
  5. alita_sdk/cli/agent_executor.py +155 -0
  6. alita_sdk/cli/agent_loader.py +197 -0
  7. alita_sdk/cli/agent_ui.py +218 -0
  8. alita_sdk/cli/agents.py +1911 -0
  9. alita_sdk/cli/callbacks.py +576 -0
  10. alita_sdk/cli/cli.py +159 -0
  11. alita_sdk/cli/config.py +164 -0
  12. alita_sdk/cli/formatting.py +182 -0
  13. alita_sdk/cli/input_handler.py +256 -0
  14. alita_sdk/cli/mcp_loader.py +315 -0
  15. alita_sdk/cli/toolkit.py +330 -0
  16. alita_sdk/cli/toolkit_loader.py +55 -0
  17. alita_sdk/cli/tools/__init__.py +36 -0
  18. alita_sdk/cli/tools/approval.py +224 -0
  19. alita_sdk/cli/tools/filesystem.py +905 -0
  20. alita_sdk/cli/tools/planning.py +403 -0
  21. alita_sdk/cli/tools/terminal.py +280 -0
  22. alita_sdk/runtime/clients/client.py +16 -1
  23. alita_sdk/runtime/langchain/constants.py +2 -1
  24. alita_sdk/runtime/langchain/langraph_agent.py +17 -5
  25. alita_sdk/runtime/langchain/utils.py +1 -1
  26. alita_sdk/runtime/tools/function.py +17 -5
  27. alita_sdk/runtime/tools/llm.py +65 -7
  28. alita_sdk/tools/base_indexer_toolkit.py +54 -2
  29. alita_sdk/tools/qtest/api_wrapper.py +871 -32
  30. alita_sdk/tools/sharepoint/api_wrapper.py +22 -2
  31. alita_sdk/tools/sharepoint/authorization_helper.py +17 -1
  32. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/METADATA +145 -2
  33. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/RECORD +37 -15
  34. alita_sdk-0.3.465.dist-info/entry_points.txt +2 -0
  35. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/WHEEL +0 -0
  36. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/licenses/LICENSE +0 -0
  37. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.465.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,576 @@
1
+ """
2
+ CLI Callback Handler for Alita CLI.
3
+
4
+ Provides rich console output for tool calls, LLM thinking, and agent steps
5
+ during agent execution in the CLI with beautifully styled blocks.
6
+ """
7
+
8
+ import logging
9
+ import json
10
+ import traceback
11
+ from datetime import datetime, timezone
12
+ from uuid import UUID
13
+ from typing import Any, Dict, List, Optional, Sequence
14
+ from collections import defaultdict
15
+
16
+ from langchain_core.callbacks import BaseCallbackHandler
17
+ from langchain_core.outputs import ChatGenerationChunk, LLMResult
18
+ from langchain_core.messages import BaseMessage, AIMessage, ToolMessage
19
+
20
+ from rich.console import Console, Group
21
+ from rich.panel import Panel
22
+ from rich.syntax import Syntax
23
+ from rich.text import Text
24
+ from rich.table import Table
25
+ from rich.tree import Tree
26
+ from rich import box
27
+ from rich.markdown import Markdown
28
+ from rich.rule import Rule
29
+ from rich.padding import Padding
30
+
31
+ logger = logging.getLogger(__name__)
32
+
33
+ # Create a rich console for beautiful output
34
+ console = Console()
35
+
36
+ # Custom box styles for different block types
37
+ TOOL_BOX = box.ROUNDED
38
+ OUTPUT_BOX = box.ROUNDED
39
+ ERROR_BOX = box.HEAVY
40
+
41
+
42
+ class CLICallbackHandler(BaseCallbackHandler):
43
+ """
44
+ CLI Callback handler that displays tool calls, LLM responses, and agent steps
45
+ with rich formatting using beautifully styled blocks.
46
+ """
47
+
48
+ def __init__(self, verbose: bool = True, show_tool_outputs: bool = True,
49
+ show_thinking: bool = True, show_llm_calls: bool = False):
50
+ """
51
+ Initialize the CLI callback handler.
52
+
53
+ Args:
54
+ verbose: Show detailed output for all operations
55
+ show_tool_outputs: Show tool call inputs and outputs
56
+ show_thinking: Show LLM thinking/reasoning process
57
+ show_llm_calls: Show LLM call start/end (can be noisy)
58
+ """
59
+ super().__init__()
60
+ self.verbose = verbose
61
+ self.show_tool_outputs = show_tool_outputs
62
+ self.show_thinking = show_thinking
63
+ self.show_llm_calls = show_llm_calls
64
+
65
+ # Track state
66
+ self.tool_runs: Dict[str, Dict[str, Any]] = {}
67
+ self.llm_runs: Dict[str, Dict[str, Any]] = {}
68
+ self.pending_tokens: Dict[str, List[str]] = defaultdict(list)
69
+ self.current_model: str = ""
70
+ self.step_counter: int = 0
71
+
72
+ def _format_json_content(self, data: Any, max_length: int = 1500) -> str:
73
+ """Format data as pretty JSON string."""
74
+ try:
75
+ if isinstance(data, str):
76
+ # Try to parse if it looks like JSON
77
+ if data.strip().startswith(('{', '[')):
78
+ try:
79
+ data = json.loads(data)
80
+ except json.JSONDecodeError:
81
+ return data[:max_length] + ('...' if len(data) > max_length else '')
82
+
83
+ formatted = json.dumps(data, indent=2, ensure_ascii=False, default=str)
84
+ if len(formatted) > max_length:
85
+ formatted = formatted[:max_length] + f"\n... (truncated)"
86
+ return formatted
87
+ except Exception:
88
+ return str(data)[:max_length]
89
+
90
+ def _format_tool_output_content(self, output: Any) -> Any:
91
+ """Format tool output for display in panel."""
92
+ if output is None:
93
+ return Text("(no output)", style="dim italic")
94
+
95
+ try:
96
+ output_str = str(output)
97
+ max_length = 2000
98
+
99
+ # Check if it's JSON-like
100
+ if output_str.strip().startswith(('{', '[')):
101
+ try:
102
+ parsed = json.loads(output_str)
103
+ formatted = json.dumps(parsed, indent=2, ensure_ascii=False)
104
+ if len(formatted) > max_length:
105
+ formatted = formatted[:max_length] + f"\n... (truncated, {len(output_str)} chars total)"
106
+ return Syntax(formatted, "json", theme="monokai", word_wrap=True, line_numbers=False)
107
+ except json.JSONDecodeError:
108
+ pass
109
+
110
+ # Truncate if needed
111
+ if len(output_str) > max_length:
112
+ output_str = output_str[:max_length] + f"\n... (truncated, {len(str(output))} chars total)"
113
+
114
+ # Check for markdown-like content
115
+ if any(marker in output_str for marker in ['```', '**', '##', '- ', '* ', '\n\n']):
116
+ return Markdown(output_str)
117
+
118
+ return Text(output_str, style="white")
119
+
120
+ except Exception:
121
+ return Text(str(output)[:500], style="white")
122
+
123
+ #
124
+ # Tool Callbacks
125
+ #
126
+
127
+ def on_tool_start(
128
+ self,
129
+ serialized: Dict[str, Any],
130
+ input_str: str,
131
+ *,
132
+ run_id: UUID,
133
+ parent_run_id: Optional[UUID] = None,
134
+ tags: Optional[List[str]] = None,
135
+ metadata: Optional[Dict[str, Any]] = None,
136
+ inputs: Optional[Dict[str, Any]] = None,
137
+ **kwargs: Any,
138
+ ) -> None:
139
+ """Called when a tool starts running."""
140
+ if not self.show_tool_outputs:
141
+ return
142
+
143
+ tool_name = serialized.get("name", "Unknown Tool")
144
+ tool_run_id = str(run_id)
145
+ self.step_counter += 1
146
+
147
+ # Store tool run info
148
+ self.tool_runs[tool_run_id] = {
149
+ "name": tool_name,
150
+ "start_time": datetime.now(tz=timezone.utc),
151
+ "inputs": inputs or input_str,
152
+ "step": self.step_counter,
153
+ }
154
+
155
+ # Format inputs
156
+ tool_inputs = inputs if inputs else input_str
157
+
158
+ # Create the tool call panel
159
+ console.print()
160
+
161
+ # Build content for the panel
162
+ content_parts = []
163
+
164
+ if tool_inputs:
165
+ if isinstance(tool_inputs, dict):
166
+ formatted_input = self._format_json_content(tool_inputs, max_length=1200)
167
+ input_syntax = Syntax(formatted_input, "json", theme="monokai",
168
+ word_wrap=True, line_numbers=False)
169
+ content_parts.append(input_syntax)
170
+ elif isinstance(tool_inputs, str) and len(tool_inputs) > 0:
171
+ display_input = tool_inputs[:800] + "..." if len(tool_inputs) > 800 else tool_inputs
172
+ content_parts.append(Text(display_input, style="white"))
173
+
174
+ if content_parts:
175
+ panel_content = Group(*content_parts)
176
+ else:
177
+ panel_content = Text("(no input)", style="dim italic")
178
+
179
+ # Create styled panel
180
+ panel = Panel(
181
+ panel_content,
182
+ title=f"[bold yellow]🔧 Tool Call[/bold yellow] [dim]│[/dim] [bold cyan]{tool_name}[/bold cyan]",
183
+ title_align="left",
184
+ subtitle=f"[dim]Step {self.step_counter}[/dim]",
185
+ subtitle_align="right",
186
+ border_style="yellow",
187
+ box=TOOL_BOX,
188
+ padding=(0, 1),
189
+ )
190
+ console.print(panel)
191
+
192
+ def on_tool_end(
193
+ self,
194
+ output: Any,
195
+ *,
196
+ run_id: UUID,
197
+ parent_run_id: Optional[UUID] = None,
198
+ tags: Optional[List[str]] = None,
199
+ **kwargs: Any,
200
+ ) -> None:
201
+ """Called when a tool finishes running."""
202
+ if not self.show_tool_outputs:
203
+ return
204
+
205
+ tool_run_id = str(run_id)
206
+ tool_info = self.tool_runs.pop(tool_run_id, {})
207
+ tool_name = tool_info.get("name", kwargs.get("name", "Unknown"))
208
+ step_num = tool_info.get("step", "?")
209
+
210
+ # Calculate duration
211
+ start_time = tool_info.get("start_time")
212
+ duration_str = ""
213
+ if start_time:
214
+ elapsed = (datetime.now(tz=timezone.utc) - start_time).total_seconds()
215
+ duration_str = f" │ {elapsed:.2f}s"
216
+
217
+ # Format output
218
+ output_content = self._format_tool_output_content(output)
219
+
220
+ # Create result panel
221
+ panel = Panel(
222
+ output_content,
223
+ title=f"[bold green]✓ Result[/bold green] [dim]│[/dim] [dim]{tool_name}[/dim]",
224
+ title_align="left",
225
+ subtitle=f"[dim]Step {step_num}{duration_str}[/dim]",
226
+ subtitle_align="right",
227
+ border_style="green",
228
+ box=OUTPUT_BOX,
229
+ padding=(0, 1),
230
+ )
231
+ console.print(panel)
232
+ console.print()
233
+
234
+ def on_tool_error(
235
+ self,
236
+ error: BaseException,
237
+ *,
238
+ run_id: UUID,
239
+ parent_run_id: Optional[UUID] = None,
240
+ tags: Optional[List[str]] = None,
241
+ **kwargs: Any,
242
+ ) -> None:
243
+ """Called when a tool errors."""
244
+ tool_run_id = str(run_id)
245
+ tool_info = self.tool_runs.pop(tool_run_id, {})
246
+ tool_name = tool_info.get("name", kwargs.get("name", "Unknown"))
247
+ step_num = tool_info.get("step", "?")
248
+
249
+ # Calculate duration
250
+ start_time = tool_info.get("start_time")
251
+ duration_str = ""
252
+ if start_time:
253
+ elapsed = (datetime.now(tz=timezone.utc) - start_time).total_seconds()
254
+ duration_str = f" │ {elapsed:.2f}s"
255
+
256
+ # Build error content with exception details
257
+ content_parts = []
258
+
259
+ # Error message
260
+ error_msg = str(error)
261
+ content_parts.append(Text(error_msg, style="red bold"))
262
+
263
+ # Add traceback if available
264
+ tb_str = "".join(traceback.format_exception(type(error), error, error.__traceback__))
265
+ if tb_str and tb_str.strip():
266
+ content_parts.append(Text("")) # blank line
267
+ content_parts.append(Text("Exception Traceback:", style="dim bold"))
268
+ # Truncate if too long
269
+ max_tb_len = 1500
270
+ if len(tb_str) > max_tb_len:
271
+ tb_str = tb_str[:max_tb_len] + f"\n... (truncated, {len(tb_str)} chars total)"
272
+ content_parts.append(Syntax(tb_str, "python", theme="monokai",
273
+ word_wrap=True, line_numbers=False))
274
+
275
+ panel_content = Group(*content_parts) if len(content_parts) > 1 else content_parts[0]
276
+
277
+ panel = Panel(
278
+ panel_content,
279
+ title=f"[bold red]✗ Error[/bold red] [dim]│[/dim] [bold]{tool_name}[/bold]",
280
+ title_align="left",
281
+ subtitle=f"[dim]Step {step_num}{duration_str}[/dim]",
282
+ subtitle_align="right",
283
+ border_style="red",
284
+ box=ERROR_BOX,
285
+ padding=(0, 1),
286
+ )
287
+ console.print()
288
+ console.print(panel)
289
+ console.print()
290
+
291
+ #
292
+ # LLM Callbacks
293
+ #
294
+
295
+ def on_llm_start(
296
+ self,
297
+ serialized: Dict[str, Any],
298
+ prompts: List[str],
299
+ *,
300
+ run_id: UUID,
301
+ parent_run_id: Optional[UUID] = None,
302
+ tags: Optional[List[str]] = None,
303
+ metadata: Optional[Dict[str, Any]] = None,
304
+ **kwargs: Any,
305
+ ) -> None:
306
+ """Called when LLM starts generating."""
307
+ if not self.show_llm_calls:
308
+ return
309
+
310
+ llm_run_id = str(run_id)
311
+ model_name = metadata.get("ls_model_name", "") if metadata else ""
312
+ self.current_model = model_name
313
+
314
+ self.llm_runs[llm_run_id] = {
315
+ "model": model_name,
316
+ "start_time": datetime.now(tz=timezone.utc),
317
+ }
318
+
319
+ # Display thinking indicator
320
+ console.print()
321
+ console.print(Panel(
322
+ Text("Processing...", style="italic"),
323
+ title=f"[bold blue]🤔 LLM[/bold blue] [dim]│[/dim] [dim]{model_name or 'model'}[/dim]",
324
+ title_align="left",
325
+ border_style="blue",
326
+ box=box.SIMPLE,
327
+ padding=(0, 1),
328
+ ))
329
+
330
+ def on_chat_model_start(
331
+ self,
332
+ serialized: Dict[str, Any],
333
+ messages: List[List[BaseMessage]],
334
+ *,
335
+ run_id: UUID,
336
+ parent_run_id: Optional[UUID] = None,
337
+ tags: Optional[List[str]] = None,
338
+ metadata: Optional[Dict[str, Any]] = None,
339
+ **kwargs: Any,
340
+ ) -> None:
341
+ """Called when chat model starts."""
342
+ if not self.show_llm_calls:
343
+ return
344
+
345
+ llm_run_id = str(run_id)
346
+ model_name = metadata.get("ls_model_name", "") if metadata else ""
347
+ self.current_model = model_name
348
+
349
+ self.llm_runs[llm_run_id] = {
350
+ "model": model_name,
351
+ "start_time": datetime.now(tz=timezone.utc),
352
+ }
353
+
354
+ # Display thinking indicator
355
+ console.print()
356
+ console.print(Panel(
357
+ Text("Processing...", style="italic"),
358
+ title=f"[bold blue]🤔 LLM[/bold blue] [dim]│[/dim] [dim]{model_name or 'model'}[/dim]",
359
+ title_align="left",
360
+ border_style="blue",
361
+ box=box.SIMPLE,
362
+ padding=(0, 1),
363
+ ))
364
+
365
+ def on_llm_new_token(
366
+ self,
367
+ token: str,
368
+ *,
369
+ chunk: Optional[ChatGenerationChunk] = None,
370
+ run_id: UUID,
371
+ parent_run_id: Optional[UUID] = None,
372
+ tags: Optional[List[str]] = None,
373
+ **kwargs: Any,
374
+ ) -> None:
375
+ """Called on each new LLM token."""
376
+ # Stream tokens if showing thinking process
377
+ if self.show_thinking and token:
378
+ self.pending_tokens[str(run_id)].append(token)
379
+
380
+ def on_llm_end(
381
+ self,
382
+ response: LLMResult,
383
+ *,
384
+ run_id: UUID,
385
+ parent_run_id: Optional[UUID] = None,
386
+ tags: Optional[List[str]] = None,
387
+ **kwargs: Any,
388
+ ) -> None:
389
+ """Called when LLM finishes."""
390
+ llm_run_id = str(run_id)
391
+ llm_info = self.llm_runs.pop(llm_run_id, {})
392
+
393
+ # Show any pending tokens (streaming output)
394
+ if self.show_thinking:
395
+ tokens = self.pending_tokens.pop(llm_run_id, [])
396
+ if tokens:
397
+ thinking_text = "".join(tokens)
398
+ if thinking_text.strip():
399
+ # Show thinking in a subtle panel
400
+ max_len = 600
401
+ display_text = thinking_text[:max_len] + ('...' if len(thinking_text) > max_len else '')
402
+ console.print(Panel(
403
+ Text(display_text, style="dim italic"),
404
+ title="[dim]💭 Thinking[/dim]",
405
+ title_align="left",
406
+ border_style="dim",
407
+ box=box.SIMPLE,
408
+ padding=(0, 1),
409
+ ))
410
+
411
+ if self.show_llm_calls and llm_info:
412
+ start_time = llm_info.get("start_time")
413
+ model = llm_info.get("model", "model")
414
+ if start_time:
415
+ elapsed = (datetime.now(tz=timezone.utc) - start_time).total_seconds()
416
+ console.print(f"[dim]✓ LLM complete ({model}, {elapsed:.2f}s)[/dim]")
417
+
418
+ def on_llm_error(
419
+ self,
420
+ error: BaseException,
421
+ *,
422
+ run_id: UUID,
423
+ parent_run_id: Optional[UUID] = None,
424
+ tags: Optional[List[str]] = None,
425
+ **kwargs: Any,
426
+ ) -> None:
427
+ """Called when LLM errors."""
428
+ console.print()
429
+ console.print(Panel(
430
+ Text(str(error), style="red"),
431
+ title="[bold red]✗ LLM Error[/bold red]",
432
+ title_align="left",
433
+ border_style="red",
434
+ box=ERROR_BOX,
435
+ padding=(0, 1),
436
+ ))
437
+
438
+ #
439
+ # Chain Callbacks
440
+ #
441
+
442
+ def on_chain_start(
443
+ self,
444
+ serialized: Dict[str, Any],
445
+ inputs: Dict[str, Any],
446
+ *,
447
+ run_id: UUID,
448
+ parent_run_id: Optional[UUID] = None,
449
+ tags: Optional[List[str]] = None,
450
+ metadata: Optional[Dict[str, Any]] = None,
451
+ **kwargs: Any,
452
+ ) -> None:
453
+ """Called when chain starts."""
454
+ pass # Can be noisy, skip by default
455
+
456
+ def on_chain_end(
457
+ self,
458
+ outputs: Dict[str, Any],
459
+ *,
460
+ run_id: UUID,
461
+ parent_run_id: Optional[UUID] = None,
462
+ tags: Optional[List[str]] = None,
463
+ **kwargs: Any,
464
+ ) -> None:
465
+ """Called when chain ends."""
466
+ pass # Can be noisy, skip by default
467
+
468
+ def on_chain_error(
469
+ self,
470
+ error: BaseException,
471
+ *,
472
+ run_id: UUID,
473
+ parent_run_id: Optional[UUID] = None,
474
+ tags: Optional[List[str]] = None,
475
+ **kwargs: Any,
476
+ ) -> None:
477
+ """Called when chain errors."""
478
+ if self.verbose:
479
+ console.print()
480
+ console.print(Panel(
481
+ Text(str(error), style="red"),
482
+ title="[bold red]✗ Chain Error[/bold red]",
483
+ title_align="left",
484
+ border_style="red",
485
+ box=ERROR_BOX,
486
+ padding=(0, 1),
487
+ ))
488
+
489
+ #
490
+ # Agent Callbacks
491
+ #
492
+
493
+ def on_agent_action(
494
+ self,
495
+ action: Any,
496
+ *,
497
+ run_id: UUID,
498
+ parent_run_id: Optional[UUID] = None,
499
+ tags: Optional[List[str]] = None,
500
+ **kwargs: Any,
501
+ ) -> None:
502
+ """Called when agent takes an action."""
503
+ # This is handled by on_tool_start, so we skip to avoid duplicates
504
+ pass
505
+
506
+ def on_agent_finish(
507
+ self,
508
+ finish: Any,
509
+ *,
510
+ run_id: UUID,
511
+ parent_run_id: Optional[UUID] = None,
512
+ tags: Optional[List[str]] = None,
513
+ **kwargs: Any,
514
+ ) -> None:
515
+ """Called when agent finishes."""
516
+ if self.verbose and self.show_llm_calls:
517
+ console.print(Rule("Agent Complete", style="dim"))
518
+
519
+ #
520
+ # Custom Events (LangGraph)
521
+ #
522
+
523
+ def on_custom_event(
524
+ self,
525
+ name: str,
526
+ data: Any,
527
+ *,
528
+ run_id: UUID,
529
+ tags: Optional[List[str]] = None,
530
+ metadata: Optional[Dict[str, Any]] = None,
531
+ **kwargs: Any,
532
+ ) -> None:
533
+ """Handle custom events from LangGraph."""
534
+ if not self.verbose:
535
+ return
536
+
537
+ if name == "on_conditional_edge":
538
+ # Show decision making in debug mode
539
+ if self.show_llm_calls:
540
+ condition = data.get('condition', '')
541
+ if condition:
542
+ console.print(f"[dim]📍 Conditional: {condition[:100]}[/dim]")
543
+
544
+ elif name == "on_transitional_edge":
545
+ # Show transitions in debug mode
546
+ if self.show_llm_calls:
547
+ next_step = data.get("next_step", "")
548
+ if next_step and next_step != "__end__":
549
+ console.print(f"[dim]→ Transition: {next_step}[/dim]")
550
+
551
+ #
552
+ # Utility Methods
553
+ #
554
+
555
+ def reset_step_counter(self) -> None:
556
+ """Reset the step counter for a new conversation."""
557
+ self.step_counter = 0
558
+
559
+
560
+ def create_cli_callback(verbose: bool = True, debug: bool = False) -> CLICallbackHandler:
561
+ """
562
+ Create a CLI callback handler with appropriate settings.
563
+
564
+ Args:
565
+ verbose: Enable verbose output (tool calls and outputs)
566
+ debug: Enable debug output (includes LLM calls and detailed info)
567
+
568
+ Returns:
569
+ CLICallbackHandler instance configured for the verbosity level
570
+ """
571
+ return CLICallbackHandler(
572
+ verbose=verbose,
573
+ show_tool_outputs=verbose,
574
+ show_thinking=verbose,
575
+ show_llm_calls=debug # Only show LLM calls in debug mode
576
+ )