alita-sdk 0.3.257__py3-none-any.whl → 0.3.584__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +155 -0
- alita_sdk/cli/agent_loader.py +215 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3794 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1073 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +72 -12
- alita_sdk/community/inventory/__init__.py +236 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +11 -0
- alita_sdk/configurations/ado.py +148 -2
- alita_sdk/configurations/azure_search.py +1 -1
- alita_sdk/configurations/bigquery.py +1 -1
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/browser.py +18 -0
- alita_sdk/configurations/carrier.py +19 -0
- alita_sdk/configurations/confluence.py +130 -1
- alita_sdk/configurations/delta_lake.py +1 -1
- alita_sdk/configurations/figma.py +76 -5
- alita_sdk/configurations/github.py +65 -1
- alita_sdk/configurations/gitlab.py +81 -0
- alita_sdk/configurations/google_places.py +17 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/openapi.py +323 -0
- alita_sdk/configurations/postman.py +1 -1
- alita_sdk/configurations/qtest.py +72 -3
- alita_sdk/configurations/report_portal.py +115 -0
- alita_sdk/configurations/salesforce.py +19 -0
- alita_sdk/configurations/service_now.py +1 -12
- alita_sdk/configurations/sharepoint.py +167 -0
- alita_sdk/configurations/sonar.py +18 -0
- alita_sdk/configurations/sql.py +20 -0
- alita_sdk/configurations/testio.py +101 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +94 -1
- alita_sdk/configurations/zephyr_enterprise.py +94 -1
- alita_sdk/configurations/zephyr_essential.py +95 -0
- alita_sdk/runtime/clients/artifact.py +21 -4
- alita_sdk/runtime/clients/client.py +458 -67
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +352 -0
- alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
- alita_sdk/runtime/langchain/assistant.py +183 -43
- alita_sdk/runtime/langchain/constants.py +647 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +209 -31
- alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py +1 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +10 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaMarkdownLoader.py +66 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py +79 -10
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +52 -15
- alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py +9 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -4
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +15 -2
- alita_sdk/runtime/langchain/document_loaders/ImageParser.py +30 -0
- alita_sdk/runtime/langchain/document_loaders/constants.py +189 -41
- alita_sdk/runtime/langchain/interfaces/llm_processor.py +4 -2
- alita_sdk/runtime/langchain/langraph_agent.py +493 -105
- alita_sdk/runtime/langchain/utils.py +118 -8
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/skills/__init__.py +91 -0
- alita_sdk/runtime/skills/callbacks.py +498 -0
- alita_sdk/runtime/skills/discovery.py +540 -0
- alita_sdk/runtime/skills/executor.py +610 -0
- alita_sdk/runtime/skills/input_builder.py +371 -0
- alita_sdk/runtime/skills/models.py +330 -0
- alita_sdk/runtime/skills/registry.py +355 -0
- alita_sdk/runtime/skills/skill_runner.py +330 -0
- alita_sdk/runtime/toolkits/__init__.py +28 -0
- alita_sdk/runtime/toolkits/application.py +14 -4
- alita_sdk/runtime/toolkits/artifact.py +25 -9
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +782 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/skill_router.py +238 -0
- alita_sdk/runtime/toolkits/subgraph.py +11 -6
- alita_sdk/runtime/toolkits/tools.py +314 -70
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/__init__.py +24 -0
- alita_sdk/runtime/tools/application.py +16 -4
- alita_sdk/runtime/tools/artifact.py +367 -33
- alita_sdk/runtime/tools/data_analysis.py +183 -0
- alita_sdk/runtime/tools/function.py +100 -4
- alita_sdk/runtime/tools/graph.py +81 -0
- alita_sdk/runtime/tools/image_generation.py +218 -0
- alita_sdk/runtime/tools/llm.py +1032 -177
- alita_sdk/runtime/tools/loop.py +3 -1
- alita_sdk/runtime/tools/loop_output.py +3 -1
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +3 -1
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -1
- alita_sdk/runtime/tools/sandbox.py +375 -0
- alita_sdk/runtime/tools/skill_router.py +776 -0
- alita_sdk/runtime/tools/tool.py +3 -1
- alita_sdk/runtime/tools/vectorstore.py +69 -65
- alita_sdk/runtime/tools/vectorstore_base.py +163 -90
- alita_sdk/runtime/utils/AlitaCallback.py +137 -21
- alita_sdk/runtime/utils/constants.py +5 -1
- alita_sdk/runtime/utils/mcp_client.py +492 -0
- alita_sdk/runtime/utils/mcp_oauth.py +361 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +434 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/streamlit.py +41 -14
- alita_sdk/runtime/utils/toolkit_utils.py +28 -9
- alita_sdk/runtime/utils/utils.py +48 -0
- alita_sdk/tools/__init__.py +135 -37
- alita_sdk/tools/ado/__init__.py +2 -2
- alita_sdk/tools/ado/repos/__init__.py +16 -19
- alita_sdk/tools/ado/repos/repos_wrapper.py +12 -20
- alita_sdk/tools/ado/test_plan/__init__.py +27 -8
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +56 -28
- alita_sdk/tools/ado/wiki/__init__.py +28 -12
- alita_sdk/tools/ado/wiki/ado_wrapper.py +114 -40
- alita_sdk/tools/ado/work_item/__init__.py +28 -12
- alita_sdk/tools/ado/work_item/ado_wrapper.py +95 -11
- alita_sdk/tools/advanced_jira_mining/__init__.py +13 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +15 -11
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +14 -8
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +454 -110
- alita_sdk/tools/bitbucket/__init__.py +28 -19
- alita_sdk/tools/bitbucket/api_wrapper.py +285 -27
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +5 -5
- alita_sdk/tools/browser/__init__.py +41 -16
- alita_sdk/tools/browser/crawler.py +3 -1
- alita_sdk/tools/browser/utils.py +15 -6
- alita_sdk/tools/carrier/__init__.py +18 -17
- alita_sdk/tools/carrier/backend_reports_tool.py +8 -4
- alita_sdk/tools/carrier/excel_reporter.py +8 -4
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/codeparser.py +1 -1
- alita_sdk/tools/chunkers/sematic/json_chunker.py +2 -1
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +12 -7
- alita_sdk/tools/cloud/azure/__init__.py +12 -7
- alita_sdk/tools/cloud/gcp/__init__.py +12 -7
- alita_sdk/tools/cloud/k8s/__init__.py +12 -7
- alita_sdk/tools/code/linter/__init__.py +10 -8
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +21 -13
- alita_sdk/tools/code_indexer_toolkit.py +199 -0
- alita_sdk/tools/confluence/__init__.py +22 -14
- alita_sdk/tools/confluence/api_wrapper.py +197 -58
- alita_sdk/tools/confluence/loader.py +14 -2
- alita_sdk/tools/custom_open_api/__init__.py +12 -5
- alita_sdk/tools/elastic/__init__.py +11 -8
- alita_sdk/tools/elitea_base.py +546 -64
- alita_sdk/tools/figma/__init__.py +60 -11
- alita_sdk/tools/figma/api_wrapper.py +1400 -167
- alita_sdk/tools/figma/figma_client.py +73 -0
- alita_sdk/tools/figma/toon_tools.py +2748 -0
- alita_sdk/tools/github/__init__.py +18 -17
- alita_sdk/tools/github/api_wrapper.py +9 -26
- alita_sdk/tools/github/github_client.py +81 -12
- alita_sdk/tools/github/schemas.py +2 -1
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/gitlab/__init__.py +19 -13
- alita_sdk/tools/gitlab/api_wrapper.py +256 -80
- alita_sdk/tools/gitlab_org/__init__.py +14 -10
- alita_sdk/tools/google/bigquery/__init__.py +14 -13
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +21 -11
- alita_sdk/tools/jira/__init__.py +22 -11
- alita_sdk/tools/jira/api_wrapper.py +315 -168
- alita_sdk/tools/keycloak/__init__.py +11 -8
- alita_sdk/tools/localgit/__init__.py +9 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +38 -14
- alita_sdk/tools/non_code_indexer_toolkit.py +7 -2
- alita_sdk/tools/ocr/__init__.py +11 -8
- alita_sdk/tools/openapi/__init__.py +491 -106
- alita_sdk/tools/openapi/api_wrapper.py +1357 -0
- alita_sdk/tools/openapi/tool.py +20 -0
- alita_sdk/tools/pandas/__init__.py +20 -12
- alita_sdk/tools/pandas/api_wrapper.py +40 -45
- alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
- alita_sdk/tools/postman/__init__.py +11 -11
- alita_sdk/tools/postman/api_wrapper.py +19 -8
- alita_sdk/tools/postman/postman_analysis.py +8 -1
- alita_sdk/tools/pptx/__init__.py +11 -10
- alita_sdk/tools/qtest/__init__.py +22 -14
- alita_sdk/tools/qtest/api_wrapper.py +1784 -88
- alita_sdk/tools/rally/__init__.py +13 -10
- alita_sdk/tools/report_portal/__init__.py +23 -16
- alita_sdk/tools/salesforce/__init__.py +22 -16
- alita_sdk/tools/servicenow/__init__.py +21 -16
- alita_sdk/tools/servicenow/api_wrapper.py +1 -1
- alita_sdk/tools/sharepoint/__init__.py +17 -14
- alita_sdk/tools/sharepoint/api_wrapper.py +179 -39
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +13 -8
- alita_sdk/tools/sql/__init__.py +22 -19
- alita_sdk/tools/sql/api_wrapper.py +71 -23
- alita_sdk/tools/testio/__init__.py +21 -13
- alita_sdk/tools/testrail/__init__.py +13 -11
- alita_sdk/tools/testrail/api_wrapper.py +214 -46
- alita_sdk/tools/utils/__init__.py +28 -4
- alita_sdk/tools/utils/content_parser.py +241 -55
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +83 -27
- alita_sdk/tools/xray/__init__.py +18 -14
- alita_sdk/tools/xray/api_wrapper.py +58 -113
- alita_sdk/tools/yagmail/__init__.py +9 -3
- alita_sdk/tools/zephyr/__init__.py +12 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +16 -9
- alita_sdk/tools/zephyr_enterprise/api_wrapper.py +30 -15
- alita_sdk/tools/zephyr_essential/__init__.py +16 -10
- alita_sdk/tools/zephyr_essential/api_wrapper.py +297 -54
- alita_sdk/tools/zephyr_essential/client.py +6 -4
- alita_sdk/tools/zephyr_scale/__init__.py +13 -8
- alita_sdk/tools/zephyr_scale/api_wrapper.py +39 -31
- alita_sdk/tools/zephyr_squad/__init__.py +12 -7
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/METADATA +184 -37
- alita_sdk-0.3.584.dist-info/RECORD +452 -0
- alita_sdk-0.3.584.dist-info/entry_points.txt +2 -0
- alita_sdk/tools/bitbucket/tools.py +0 -304
- alita_sdk-0.3.257.dist-info/RECORD +0 -343
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,647 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CLI Callback Handler for Alita CLI.
|
|
3
|
+
|
|
4
|
+
Provides rich console output for tool calls, LLM thinking, and agent steps
|
|
5
|
+
during agent execution in the CLI with beautifully styled blocks.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import json
|
|
10
|
+
import traceback
|
|
11
|
+
from datetime import datetime, timezone
|
|
12
|
+
from uuid import UUID
|
|
13
|
+
from typing import Any, Dict, List, Optional, Sequence
|
|
14
|
+
from collections import defaultdict
|
|
15
|
+
|
|
16
|
+
from langchain_core.callbacks import BaseCallbackHandler
|
|
17
|
+
from langchain_core.outputs import ChatGenerationChunk, LLMResult
|
|
18
|
+
from langchain_core.messages import BaseMessage, AIMessage, ToolMessage
|
|
19
|
+
|
|
20
|
+
from rich.console import Console, Group
|
|
21
|
+
from rich.panel import Panel
|
|
22
|
+
from rich.syntax import Syntax
|
|
23
|
+
from rich.text import Text
|
|
24
|
+
from rich.table import Table
|
|
25
|
+
from rich.tree import Tree
|
|
26
|
+
from rich import box
|
|
27
|
+
from rich.markdown import Markdown
|
|
28
|
+
from rich.rule import Rule
|
|
29
|
+
from rich.padding import Padding
|
|
30
|
+
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
# Create a rich console for beautiful output
|
|
34
|
+
console = Console()
|
|
35
|
+
|
|
36
|
+
# Custom box styles for different block types
|
|
37
|
+
TOOL_BOX = box.ROUNDED
|
|
38
|
+
OUTPUT_BOX = box.ROUNDED
|
|
39
|
+
ERROR_BOX = box.HEAVY
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class CLICallbackHandler(BaseCallbackHandler):
|
|
43
|
+
"""
|
|
44
|
+
CLI Callback handler that displays tool calls, LLM responses, and agent steps
|
|
45
|
+
with rich formatting using beautifully styled blocks.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def __init__(self, verbose: bool = True, show_tool_outputs: bool = True,
|
|
49
|
+
show_thinking: bool = True, show_llm_calls: bool = False):
|
|
50
|
+
"""
|
|
51
|
+
Initialize the CLI callback handler.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
verbose: Show detailed output for all operations
|
|
55
|
+
show_tool_outputs: Show tool call inputs and outputs
|
|
56
|
+
show_thinking: Show LLM thinking/reasoning process
|
|
57
|
+
show_llm_calls: Show LLM call start/end (can be noisy)
|
|
58
|
+
"""
|
|
59
|
+
super().__init__()
|
|
60
|
+
self.verbose = verbose
|
|
61
|
+
self.show_tool_outputs = show_tool_outputs
|
|
62
|
+
self.show_thinking = show_thinking
|
|
63
|
+
self.show_llm_calls = show_llm_calls
|
|
64
|
+
|
|
65
|
+
# Track state
|
|
66
|
+
self.tool_runs: Dict[str, Dict[str, Any]] = {}
|
|
67
|
+
self.llm_runs: Dict[str, Dict[str, Any]] = {}
|
|
68
|
+
self.pending_tokens: Dict[str, List[str]] = defaultdict(list)
|
|
69
|
+
self.current_model: str = ""
|
|
70
|
+
self.step_counter: int = 0
|
|
71
|
+
|
|
72
|
+
# External status spinner that can be stopped
|
|
73
|
+
self.status = None
|
|
74
|
+
|
|
75
|
+
def _stop_status(self):
|
|
76
|
+
"""Stop the external status spinner if set."""
|
|
77
|
+
if self.status is not None:
|
|
78
|
+
try:
|
|
79
|
+
self.status.stop()
|
|
80
|
+
self.status = None
|
|
81
|
+
except Exception:
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
def _format_json_content(self, data: Any, max_length: int = 1500) -> str:
|
|
85
|
+
"""Format data as pretty JSON string."""
|
|
86
|
+
try:
|
|
87
|
+
if isinstance(data, str):
|
|
88
|
+
# Try to parse if it looks like JSON
|
|
89
|
+
if data.strip().startswith(('{', '[')):
|
|
90
|
+
try:
|
|
91
|
+
data = json.loads(data)
|
|
92
|
+
except json.JSONDecodeError:
|
|
93
|
+
return data[:max_length] + ('...' if len(data) > max_length else '')
|
|
94
|
+
|
|
95
|
+
formatted = json.dumps(data, indent=2, ensure_ascii=False, default=str)
|
|
96
|
+
if len(formatted) > max_length:
|
|
97
|
+
formatted = formatted[:max_length] + f"\n... (truncated)"
|
|
98
|
+
return formatted
|
|
99
|
+
except Exception:
|
|
100
|
+
return str(data)[:max_length]
|
|
101
|
+
|
|
102
|
+
def _format_tool_output_content(self, output: Any) -> Any:
|
|
103
|
+
"""Format tool output for display in panel."""
|
|
104
|
+
if output is None:
|
|
105
|
+
return Text("(no output)", style="dim italic")
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
output_str = str(output)
|
|
109
|
+
max_length = 2000
|
|
110
|
+
|
|
111
|
+
# Check if it's JSON-like
|
|
112
|
+
if output_str.strip().startswith(('{', '[')):
|
|
113
|
+
try:
|
|
114
|
+
parsed = json.loads(output_str)
|
|
115
|
+
formatted = json.dumps(parsed, indent=2, ensure_ascii=False)
|
|
116
|
+
if len(formatted) > max_length:
|
|
117
|
+
formatted = formatted[:max_length] + f"\n... (truncated, {len(output_str)} chars total)"
|
|
118
|
+
return Syntax(formatted, "json", theme="monokai", word_wrap=True, line_numbers=False)
|
|
119
|
+
except json.JSONDecodeError:
|
|
120
|
+
pass
|
|
121
|
+
|
|
122
|
+
# Truncate if needed
|
|
123
|
+
if len(output_str) > max_length:
|
|
124
|
+
output_str = output_str[:max_length] + f"\n... (truncated, {len(str(output))} chars total)"
|
|
125
|
+
|
|
126
|
+
# Check for markdown-like content
|
|
127
|
+
if any(marker in output_str for marker in ['```', '**', '##', '- ', '* ', '\n\n']):
|
|
128
|
+
return Markdown(output_str)
|
|
129
|
+
|
|
130
|
+
return Text(output_str, style="white")
|
|
131
|
+
|
|
132
|
+
except Exception:
|
|
133
|
+
return Text(str(output)[:500], style="white")
|
|
134
|
+
|
|
135
|
+
#
|
|
136
|
+
# Tool Callbacks
|
|
137
|
+
#
|
|
138
|
+
|
|
139
|
+
def on_tool_start(
|
|
140
|
+
self,
|
|
141
|
+
serialized: Dict[str, Any],
|
|
142
|
+
input_str: str,
|
|
143
|
+
*,
|
|
144
|
+
run_id: UUID,
|
|
145
|
+
parent_run_id: Optional[UUID] = None,
|
|
146
|
+
tags: Optional[List[str]] = None,
|
|
147
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
148
|
+
inputs: Optional[Dict[str, Any]] = None,
|
|
149
|
+
**kwargs: Any,
|
|
150
|
+
) -> None:
|
|
151
|
+
"""Called when a tool starts running."""
|
|
152
|
+
# Stop the thinking spinner when a tool starts
|
|
153
|
+
self._stop_status()
|
|
154
|
+
|
|
155
|
+
if not self.show_tool_outputs:
|
|
156
|
+
return
|
|
157
|
+
|
|
158
|
+
tool_name = serialized.get("name", "Unknown Tool")
|
|
159
|
+
tool_run_id = str(run_id)
|
|
160
|
+
self.step_counter += 1
|
|
161
|
+
|
|
162
|
+
# Store tool run info
|
|
163
|
+
self.tool_runs[tool_run_id] = {
|
|
164
|
+
"name": tool_name,
|
|
165
|
+
"start_time": datetime.now(tz=timezone.utc),
|
|
166
|
+
"inputs": inputs or input_str,
|
|
167
|
+
"step": self.step_counter,
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
# Format inputs
|
|
171
|
+
tool_inputs = inputs if inputs else input_str
|
|
172
|
+
|
|
173
|
+
# Create the tool call panel
|
|
174
|
+
console.print()
|
|
175
|
+
|
|
176
|
+
# Build content for the panel
|
|
177
|
+
content_parts = []
|
|
178
|
+
|
|
179
|
+
if tool_inputs:
|
|
180
|
+
if isinstance(tool_inputs, dict):
|
|
181
|
+
formatted_input = self._format_json_content(tool_inputs, max_length=1200)
|
|
182
|
+
input_syntax = Syntax(formatted_input, "json", theme="monokai",
|
|
183
|
+
word_wrap=True, line_numbers=False)
|
|
184
|
+
content_parts.append(input_syntax)
|
|
185
|
+
elif isinstance(tool_inputs, str) and len(tool_inputs) > 0:
|
|
186
|
+
display_input = tool_inputs[:800] + "..." if len(tool_inputs) > 800 else tool_inputs
|
|
187
|
+
content_parts.append(Text(display_input, style="white"))
|
|
188
|
+
|
|
189
|
+
if content_parts:
|
|
190
|
+
panel_content = Group(*content_parts)
|
|
191
|
+
else:
|
|
192
|
+
panel_content = Text("(no input)", style="dim italic")
|
|
193
|
+
|
|
194
|
+
# Create styled panel
|
|
195
|
+
panel = Panel(
|
|
196
|
+
panel_content,
|
|
197
|
+
title=f"[bold yellow]🔧 Tool Call[/bold yellow] [dim]│[/dim] [bold cyan]{tool_name}[/bold cyan]",
|
|
198
|
+
title_align="left",
|
|
199
|
+
subtitle=f"[dim]Step {self.step_counter}[/dim]",
|
|
200
|
+
subtitle_align="right",
|
|
201
|
+
border_style="yellow",
|
|
202
|
+
box=TOOL_BOX,
|
|
203
|
+
padding=(0, 1),
|
|
204
|
+
)
|
|
205
|
+
console.print(panel)
|
|
206
|
+
|
|
207
|
+
def on_tool_end(
|
|
208
|
+
self,
|
|
209
|
+
output: Any,
|
|
210
|
+
*,
|
|
211
|
+
run_id: UUID,
|
|
212
|
+
parent_run_id: Optional[UUID] = None,
|
|
213
|
+
tags: Optional[List[str]] = None,
|
|
214
|
+
**kwargs: Any,
|
|
215
|
+
) -> None:
|
|
216
|
+
"""Called when a tool finishes running."""
|
|
217
|
+
if not self.show_tool_outputs:
|
|
218
|
+
return
|
|
219
|
+
|
|
220
|
+
tool_run_id = str(run_id)
|
|
221
|
+
tool_info = self.tool_runs.pop(tool_run_id, {})
|
|
222
|
+
tool_name = tool_info.get("name", kwargs.get("name", "Unknown"))
|
|
223
|
+
step_num = tool_info.get("step", "?")
|
|
224
|
+
|
|
225
|
+
# Calculate duration
|
|
226
|
+
start_time = tool_info.get("start_time")
|
|
227
|
+
duration_str = ""
|
|
228
|
+
if start_time:
|
|
229
|
+
elapsed = (datetime.now(tz=timezone.utc) - start_time).total_seconds()
|
|
230
|
+
duration_str = f" │ {elapsed:.2f}s"
|
|
231
|
+
|
|
232
|
+
# Format output
|
|
233
|
+
output_content = self._format_tool_output_content(output)
|
|
234
|
+
|
|
235
|
+
# Create result panel
|
|
236
|
+
panel = Panel(
|
|
237
|
+
output_content,
|
|
238
|
+
title=f"[bold green]✓ Result[/bold green] [dim]│[/dim] [dim]{tool_name}[/dim]",
|
|
239
|
+
title_align="left",
|
|
240
|
+
subtitle=f"[dim]Step {step_num}{duration_str}[/dim]",
|
|
241
|
+
subtitle_align="right",
|
|
242
|
+
border_style="green",
|
|
243
|
+
box=OUTPUT_BOX,
|
|
244
|
+
padding=(0, 1),
|
|
245
|
+
)
|
|
246
|
+
console.print(panel)
|
|
247
|
+
console.print()
|
|
248
|
+
|
|
249
|
+
def on_tool_error(
|
|
250
|
+
self,
|
|
251
|
+
error: BaseException,
|
|
252
|
+
*,
|
|
253
|
+
run_id: UUID,
|
|
254
|
+
parent_run_id: Optional[UUID] = None,
|
|
255
|
+
tags: Optional[List[str]] = None,
|
|
256
|
+
**kwargs: Any,
|
|
257
|
+
) -> None:
|
|
258
|
+
"""Called when a tool errors."""
|
|
259
|
+
tool_run_id = str(run_id)
|
|
260
|
+
tool_info = self.tool_runs.pop(tool_run_id, {})
|
|
261
|
+
tool_name = tool_info.get("name", kwargs.get("name", "Unknown"))
|
|
262
|
+
step_num = tool_info.get("step", "?")
|
|
263
|
+
|
|
264
|
+
# Calculate duration
|
|
265
|
+
start_time = tool_info.get("start_time")
|
|
266
|
+
duration_str = ""
|
|
267
|
+
if start_time:
|
|
268
|
+
elapsed = (datetime.now(tz=timezone.utc) - start_time).total_seconds()
|
|
269
|
+
duration_str = f" │ {elapsed:.2f}s"
|
|
270
|
+
|
|
271
|
+
# Build error content with exception details
|
|
272
|
+
content_parts = []
|
|
273
|
+
|
|
274
|
+
# Error message
|
|
275
|
+
error_msg = str(error)
|
|
276
|
+
content_parts.append(Text(error_msg, style="red bold"))
|
|
277
|
+
|
|
278
|
+
# Add traceback if available
|
|
279
|
+
tb_str = "".join(traceback.format_exception(type(error), error, error.__traceback__))
|
|
280
|
+
if tb_str and tb_str.strip():
|
|
281
|
+
content_parts.append(Text("")) # blank line
|
|
282
|
+
content_parts.append(Text("Exception Traceback:", style="dim bold"))
|
|
283
|
+
# Truncate if too long
|
|
284
|
+
max_tb_len = 1500
|
|
285
|
+
if len(tb_str) > max_tb_len:
|
|
286
|
+
tb_str = tb_str[:max_tb_len] + f"\n... (truncated, {len(tb_str)} chars total)"
|
|
287
|
+
content_parts.append(Syntax(tb_str, "python", theme="monokai",
|
|
288
|
+
word_wrap=True, line_numbers=False))
|
|
289
|
+
|
|
290
|
+
panel_content = Group(*content_parts) if len(content_parts) > 1 else content_parts[0]
|
|
291
|
+
|
|
292
|
+
panel = Panel(
|
|
293
|
+
panel_content,
|
|
294
|
+
title=f"[bold red]✗ Error[/bold red] [dim]│[/dim] [bold]{tool_name}[/bold]",
|
|
295
|
+
title_align="left",
|
|
296
|
+
subtitle=f"[dim]Step {step_num}{duration_str}[/dim]",
|
|
297
|
+
subtitle_align="right",
|
|
298
|
+
border_style="red",
|
|
299
|
+
box=ERROR_BOX,
|
|
300
|
+
padding=(0, 1),
|
|
301
|
+
)
|
|
302
|
+
console.print()
|
|
303
|
+
console.print(panel)
|
|
304
|
+
console.print()
|
|
305
|
+
|
|
306
|
+
#
|
|
307
|
+
# LLM Callbacks
|
|
308
|
+
#
|
|
309
|
+
|
|
310
|
+
def on_llm_start(
|
|
311
|
+
self,
|
|
312
|
+
serialized: Dict[str, Any],
|
|
313
|
+
prompts: List[str],
|
|
314
|
+
*,
|
|
315
|
+
run_id: UUID,
|
|
316
|
+
parent_run_id: Optional[UUID] = None,
|
|
317
|
+
tags: Optional[List[str]] = None,
|
|
318
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
319
|
+
**kwargs: Any,
|
|
320
|
+
) -> None:
|
|
321
|
+
"""Called when LLM starts generating."""
|
|
322
|
+
if not self.show_llm_calls:
|
|
323
|
+
return
|
|
324
|
+
|
|
325
|
+
llm_run_id = str(run_id)
|
|
326
|
+
model_name = metadata.get("ls_model_name", "") if metadata else ""
|
|
327
|
+
self.current_model = model_name
|
|
328
|
+
|
|
329
|
+
self.llm_runs[llm_run_id] = {
|
|
330
|
+
"model": model_name,
|
|
331
|
+
"start_time": datetime.now(tz=timezone.utc),
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
# Display thinking indicator
|
|
335
|
+
console.print()
|
|
336
|
+
console.print(Panel(
|
|
337
|
+
Text("Processing...", style="italic"),
|
|
338
|
+
title=f"[bold blue]🤔 LLM[/bold blue] [dim]│[/dim] [dim]{model_name or 'model'}[/dim]",
|
|
339
|
+
title_align="left",
|
|
340
|
+
border_style="blue",
|
|
341
|
+
box=box.SIMPLE,
|
|
342
|
+
padding=(0, 1),
|
|
343
|
+
))
|
|
344
|
+
|
|
345
|
+
def on_chat_model_start(
|
|
346
|
+
self,
|
|
347
|
+
serialized: Dict[str, Any],
|
|
348
|
+
messages: List[List[BaseMessage]],
|
|
349
|
+
*,
|
|
350
|
+
run_id: UUID,
|
|
351
|
+
parent_run_id: Optional[UUID] = None,
|
|
352
|
+
tags: Optional[List[str]] = None,
|
|
353
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
354
|
+
**kwargs: Any,
|
|
355
|
+
) -> None:
|
|
356
|
+
"""Called when chat model starts."""
|
|
357
|
+
if not self.show_llm_calls:
|
|
358
|
+
return
|
|
359
|
+
|
|
360
|
+
llm_run_id = str(run_id)
|
|
361
|
+
model_name = metadata.get("ls_model_name", "") if metadata else ""
|
|
362
|
+
self.current_model = model_name
|
|
363
|
+
|
|
364
|
+
self.llm_runs[llm_run_id] = {
|
|
365
|
+
"model": model_name,
|
|
366
|
+
"start_time": datetime.now(tz=timezone.utc),
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
# Display thinking indicator
|
|
370
|
+
console.print()
|
|
371
|
+
console.print(Panel(
|
|
372
|
+
Text("Processing...", style="italic"),
|
|
373
|
+
title=f"[bold blue]🤔 LLM[/bold blue] [dim]│[/dim] [dim]{model_name or 'model'}[/dim]",
|
|
374
|
+
title_align="left",
|
|
375
|
+
border_style="blue",
|
|
376
|
+
box=box.SIMPLE,
|
|
377
|
+
padding=(0, 1),
|
|
378
|
+
))
|
|
379
|
+
|
|
380
|
+
def on_llm_new_token(
|
|
381
|
+
self,
|
|
382
|
+
token: str,
|
|
383
|
+
*,
|
|
384
|
+
chunk: Optional[ChatGenerationChunk] = None,
|
|
385
|
+
run_id: UUID,
|
|
386
|
+
parent_run_id: Optional[UUID] = None,
|
|
387
|
+
tags: Optional[List[str]] = None,
|
|
388
|
+
**kwargs: Any,
|
|
389
|
+
) -> None:
|
|
390
|
+
"""Called on each new LLM token."""
|
|
391
|
+
# Stream tokens if showing thinking process
|
|
392
|
+
if self.show_thinking and token:
|
|
393
|
+
self.pending_tokens[str(run_id)].append(token)
|
|
394
|
+
|
|
395
|
+
def on_llm_end(
|
|
396
|
+
self,
|
|
397
|
+
response: LLMResult,
|
|
398
|
+
*,
|
|
399
|
+
run_id: UUID,
|
|
400
|
+
parent_run_id: Optional[UUID] = None,
|
|
401
|
+
tags: Optional[List[str]] = None,
|
|
402
|
+
**kwargs: Any,
|
|
403
|
+
) -> None:
|
|
404
|
+
"""Called when LLM finishes."""
|
|
405
|
+
llm_run_id = str(run_id)
|
|
406
|
+
llm_info = self.llm_runs.pop(llm_run_id, {})
|
|
407
|
+
|
|
408
|
+
# Clear pending tokens - we don't show them as "Thinking" anymore
|
|
409
|
+
# The final response will be displayed by the main chat loop
|
|
410
|
+
# Only show thinking if there were tool calls (indicated by having active tool runs)
|
|
411
|
+
tokens = self.pending_tokens.pop(llm_run_id, [])
|
|
412
|
+
|
|
413
|
+
# Only show thinking panel if we have active tool context (intermediate reasoning)
|
|
414
|
+
if self.show_thinking and tokens and len(self.tool_runs) > 0:
|
|
415
|
+
thinking_text = "".join(tokens)
|
|
416
|
+
if thinking_text.strip():
|
|
417
|
+
# Show thinking in a subtle panel
|
|
418
|
+
max_len = 600
|
|
419
|
+
display_text = thinking_text[:max_len] + ('...' if len(thinking_text) > max_len else '')
|
|
420
|
+
console.print(Panel(
|
|
421
|
+
Text(display_text, style="dim italic"),
|
|
422
|
+
title="[dim]💭 Thinking[/dim]",
|
|
423
|
+
title_align="left",
|
|
424
|
+
border_style="dim",
|
|
425
|
+
box=box.SIMPLE,
|
|
426
|
+
padding=(0, 1),
|
|
427
|
+
))
|
|
428
|
+
|
|
429
|
+
if self.show_llm_calls and llm_info:
|
|
430
|
+
start_time = llm_info.get("start_time")
|
|
431
|
+
model = llm_info.get("model", "model")
|
|
432
|
+
if start_time:
|
|
433
|
+
elapsed = (datetime.now(tz=timezone.utc) - start_time).total_seconds()
|
|
434
|
+
console.print(f"[dim]✓ LLM complete ({model}, {elapsed:.2f}s)[/dim]")
|
|
435
|
+
|
|
436
|
+
def on_llm_error(
|
|
437
|
+
self,
|
|
438
|
+
error: BaseException,
|
|
439
|
+
*,
|
|
440
|
+
run_id: UUID,
|
|
441
|
+
parent_run_id: Optional[UUID] = None,
|
|
442
|
+
tags: Optional[List[str]] = None,
|
|
443
|
+
**kwargs: Any,
|
|
444
|
+
) -> None:
|
|
445
|
+
"""Called when LLM errors."""
|
|
446
|
+
error_str = str(error)
|
|
447
|
+
|
|
448
|
+
# Parse common error patterns for user-friendly messages
|
|
449
|
+
user_message = None
|
|
450
|
+
hint = None
|
|
451
|
+
|
|
452
|
+
# Invalid model identifier (Bedrock/Claude)
|
|
453
|
+
if "model identifier is invalid" in error_str.lower() or "BedrockException" in error_str:
|
|
454
|
+
user_message = "Invalid model identifier"
|
|
455
|
+
hint = "The model may not be available in your region or the model ID is incorrect.\nUse /model to switch to a different model."
|
|
456
|
+
|
|
457
|
+
# Rate limiting
|
|
458
|
+
elif "rate limit" in error_str.lower() or "too many requests" in error_str.lower():
|
|
459
|
+
user_message = "Rate limit exceeded"
|
|
460
|
+
hint = "Wait a moment and try again, or switch to a different model with /model."
|
|
461
|
+
|
|
462
|
+
# Token/context length exceeded
|
|
463
|
+
elif "context length" in error_str.lower() or "maximum.*tokens" in error_str.lower() or "too long" in error_str.lower():
|
|
464
|
+
user_message = "Context length exceeded"
|
|
465
|
+
hint = "The conversation is too long. Start a new session or use /clear to reset."
|
|
466
|
+
|
|
467
|
+
# Authentication errors
|
|
468
|
+
elif "authentication" in error_str.lower() or "unauthorized" in error_str.lower() or "api key" in error_str.lower():
|
|
469
|
+
user_message = "Authentication failed"
|
|
470
|
+
hint = "Check your API credentials in the configuration."
|
|
471
|
+
|
|
472
|
+
# Model not found/available
|
|
473
|
+
elif "model not found" in error_str.lower() or "does not exist" in error_str.lower():
|
|
474
|
+
user_message = "Model not found"
|
|
475
|
+
hint = "The requested model is not available. Use /model to select a different one."
|
|
476
|
+
|
|
477
|
+
# Build the display message
|
|
478
|
+
console.print()
|
|
479
|
+
if user_message:
|
|
480
|
+
content = Text()
|
|
481
|
+
content.append(f"❌ {user_message}\n\n", style="bold red")
|
|
482
|
+
if hint:
|
|
483
|
+
content.append(f"💡 {hint}\n\n", style="yellow")
|
|
484
|
+
content.append("Technical details:\n", style="dim")
|
|
485
|
+
# Truncate long error messages
|
|
486
|
+
if len(error_str) > 300:
|
|
487
|
+
content.append(error_str[:300] + "...", style="dim")
|
|
488
|
+
else:
|
|
489
|
+
content.append(error_str, style="dim")
|
|
490
|
+
console.print(Panel(
|
|
491
|
+
content,
|
|
492
|
+
title="[bold red]✗ LLM Error[/bold red]",
|
|
493
|
+
title_align="left",
|
|
494
|
+
border_style="red",
|
|
495
|
+
box=ERROR_BOX,
|
|
496
|
+
padding=(0, 1),
|
|
497
|
+
))
|
|
498
|
+
else:
|
|
499
|
+
# Fallback to original behavior for unrecognized errors
|
|
500
|
+
console.print(Panel(
|
|
501
|
+
Text(str(error), style="red"),
|
|
502
|
+
title="[bold red]✗ LLM Error[/bold red]",
|
|
503
|
+
title_align="left",
|
|
504
|
+
border_style="red",
|
|
505
|
+
box=ERROR_BOX,
|
|
506
|
+
padding=(0, 1),
|
|
507
|
+
))
|
|
508
|
+
|
|
509
|
+
#
|
|
510
|
+
# Chain Callbacks
|
|
511
|
+
#
|
|
512
|
+
|
|
513
|
+
def on_chain_start(
|
|
514
|
+
self,
|
|
515
|
+
serialized: Dict[str, Any],
|
|
516
|
+
inputs: Dict[str, Any],
|
|
517
|
+
*,
|
|
518
|
+
run_id: UUID,
|
|
519
|
+
parent_run_id: Optional[UUID] = None,
|
|
520
|
+
tags: Optional[List[str]] = None,
|
|
521
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
522
|
+
**kwargs: Any,
|
|
523
|
+
) -> None:
|
|
524
|
+
"""Called when chain starts."""
|
|
525
|
+
pass # Can be noisy, skip by default
|
|
526
|
+
|
|
527
|
+
def on_chain_end(
|
|
528
|
+
self,
|
|
529
|
+
outputs: Dict[str, Any],
|
|
530
|
+
*,
|
|
531
|
+
run_id: UUID,
|
|
532
|
+
parent_run_id: Optional[UUID] = None,
|
|
533
|
+
tags: Optional[List[str]] = None,
|
|
534
|
+
**kwargs: Any,
|
|
535
|
+
) -> None:
|
|
536
|
+
"""Called when chain ends."""
|
|
537
|
+
pass # Can be noisy, skip by default
|
|
538
|
+
|
|
539
|
+
def on_chain_error(
|
|
540
|
+
self,
|
|
541
|
+
error: BaseException,
|
|
542
|
+
*,
|
|
543
|
+
run_id: UUID,
|
|
544
|
+
parent_run_id: Optional[UUID] = None,
|
|
545
|
+
tags: Optional[List[str]] = None,
|
|
546
|
+
**kwargs: Any,
|
|
547
|
+
) -> None:
|
|
548
|
+
"""Called when chain errors."""
|
|
549
|
+
if self.verbose:
|
|
550
|
+
console.print()
|
|
551
|
+
console.print(Panel(
|
|
552
|
+
Text(str(error), style="red"),
|
|
553
|
+
title="[bold red]✗ Chain Error[/bold red]",
|
|
554
|
+
title_align="left",
|
|
555
|
+
border_style="red",
|
|
556
|
+
box=ERROR_BOX,
|
|
557
|
+
padding=(0, 1),
|
|
558
|
+
))
|
|
559
|
+
|
|
560
|
+
#
|
|
561
|
+
# Agent Callbacks
|
|
562
|
+
#
|
|
563
|
+
|
|
564
|
+
def on_agent_action(
|
|
565
|
+
self,
|
|
566
|
+
action: Any,
|
|
567
|
+
*,
|
|
568
|
+
run_id: UUID,
|
|
569
|
+
parent_run_id: Optional[UUID] = None,
|
|
570
|
+
tags: Optional[List[str]] = None,
|
|
571
|
+
**kwargs: Any,
|
|
572
|
+
) -> None:
|
|
573
|
+
"""Called when agent takes an action."""
|
|
574
|
+
# This is handled by on_tool_start, so we skip to avoid duplicates
|
|
575
|
+
pass
|
|
576
|
+
|
|
577
|
+
def on_agent_finish(
|
|
578
|
+
self,
|
|
579
|
+
finish: Any,
|
|
580
|
+
*,
|
|
581
|
+
run_id: UUID,
|
|
582
|
+
parent_run_id: Optional[UUID] = None,
|
|
583
|
+
tags: Optional[List[str]] = None,
|
|
584
|
+
**kwargs: Any,
|
|
585
|
+
) -> None:
|
|
586
|
+
"""Called when agent finishes."""
|
|
587
|
+
if self.verbose and self.show_llm_calls:
|
|
588
|
+
console.print(Rule("Agent Complete", style="dim"))
|
|
589
|
+
|
|
590
|
+
#
|
|
591
|
+
# Custom Events (LangGraph)
|
|
592
|
+
#
|
|
593
|
+
|
|
594
|
+
def on_custom_event(
|
|
595
|
+
self,
|
|
596
|
+
name: str,
|
|
597
|
+
data: Any,
|
|
598
|
+
*,
|
|
599
|
+
run_id: UUID,
|
|
600
|
+
tags: Optional[List[str]] = None,
|
|
601
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
602
|
+
**kwargs: Any,
|
|
603
|
+
) -> None:
|
|
604
|
+
"""Handle custom events from LangGraph."""
|
|
605
|
+
if not self.verbose:
|
|
606
|
+
return
|
|
607
|
+
|
|
608
|
+
if name == "on_conditional_edge":
|
|
609
|
+
# Show decision making in debug mode
|
|
610
|
+
if self.show_llm_calls:
|
|
611
|
+
condition = data.get('condition', '')
|
|
612
|
+
if condition:
|
|
613
|
+
console.print(f"[dim]📍 Conditional: {condition[:100]}[/dim]")
|
|
614
|
+
|
|
615
|
+
elif name == "on_transitional_edge":
|
|
616
|
+
# Show transitions in debug mode
|
|
617
|
+
if self.show_llm_calls:
|
|
618
|
+
next_step = data.get("next_step", "")
|
|
619
|
+
if next_step and next_step != "__end__":
|
|
620
|
+
console.print(f"[dim]→ Transition: {next_step}[/dim]")
|
|
621
|
+
|
|
622
|
+
#
|
|
623
|
+
# Utility Methods
|
|
624
|
+
#
|
|
625
|
+
|
|
626
|
+
def reset_step_counter(self) -> None:
|
|
627
|
+
"""Reset the step counter for a new conversation."""
|
|
628
|
+
self.step_counter = 0
|
|
629
|
+
|
|
630
|
+
|
|
631
|
+
def create_cli_callback(verbose: bool = True, debug: bool = False) -> CLICallbackHandler:
|
|
632
|
+
"""
|
|
633
|
+
Create a CLI callback handler with appropriate settings.
|
|
634
|
+
|
|
635
|
+
Args:
|
|
636
|
+
verbose: Enable verbose output (tool calls and outputs)
|
|
637
|
+
debug: Enable debug output (includes LLM calls and detailed info)
|
|
638
|
+
|
|
639
|
+
Returns:
|
|
640
|
+
CLICallbackHandler instance configured for the verbosity level
|
|
641
|
+
"""
|
|
642
|
+
return CLICallbackHandler(
|
|
643
|
+
verbose=verbose,
|
|
644
|
+
show_tool_outputs=verbose,
|
|
645
|
+
show_thinking=verbose,
|
|
646
|
+
show_llm_calls=debug # Only show LLM calls in debug mode
|
|
647
|
+
)
|