alita-sdk 0.3.462__py3-none-any.whl → 0.3.627__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +15 -3
- alita_sdk/cli/agent_loader.py +56 -8
- alita_sdk/cli/agent_ui.py +93 -31
- alita_sdk/cli/agents.py +2274 -230
- alita_sdk/cli/callbacks.py +96 -25
- alita_sdk/cli/cli.py +10 -1
- alita_sdk/cli/config.py +162 -9
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1073 -0
- alita_sdk/cli/testcases/__init__.py +94 -0
- alita_sdk/cli/testcases/data_generation.py +119 -0
- alita_sdk/cli/testcases/discovery.py +96 -0
- alita_sdk/cli/testcases/executor.py +84 -0
- alita_sdk/cli/testcases/logger.py +85 -0
- alita_sdk/cli/testcases/parser.py +172 -0
- alita_sdk/cli/testcases/prompts.py +91 -0
- alita_sdk/cli/testcases/reporting.py +125 -0
- alita_sdk/cli/testcases/setup.py +108 -0
- alita_sdk/cli/testcases/test_runner.py +282 -0
- alita_sdk/cli/testcases/utils.py +39 -0
- alita_sdk/cli/testcases/validation.py +90 -0
- alita_sdk/cli/testcases/workflow.py +196 -0
- alita_sdk/cli/toolkit.py +14 -17
- alita_sdk/cli/toolkit_loader.py +35 -5
- alita_sdk/cli/tools/__init__.py +36 -2
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +910 -64
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +72 -12
- alita_sdk/community/inventory/__init__.py +236 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +1 -1
- alita_sdk/configurations/ado.py +141 -20
- alita_sdk/configurations/bitbucket.py +0 -3
- alita_sdk/configurations/confluence.py +76 -42
- alita_sdk/configurations/figma.py +76 -0
- alita_sdk/configurations/gitlab.py +17 -5
- alita_sdk/configurations/openapi.py +329 -0
- alita_sdk/configurations/qtest.py +72 -1
- alita_sdk/configurations/report_portal.py +96 -0
- alita_sdk/configurations/sharepoint.py +148 -0
- alita_sdk/configurations/testio.py +83 -0
- alita_sdk/runtime/clients/artifact.py +3 -3
- alita_sdk/runtime/clients/client.py +353 -48
- alita_sdk/runtime/clients/sandbox_client.py +0 -21
- alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
- alita_sdk/runtime/langchain/assistant.py +123 -26
- alita_sdk/runtime/langchain/constants.py +642 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +6 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +226 -7
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
- alita_sdk/runtime/langchain/document_loaders/constants.py +12 -7
- alita_sdk/runtime/langchain/langraph_agent.py +279 -73
- alita_sdk/runtime/langchain/utils.py +82 -15
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/skills/__init__.py +91 -0
- alita_sdk/runtime/skills/callbacks.py +498 -0
- alita_sdk/runtime/skills/discovery.py +540 -0
- alita_sdk/runtime/skills/executor.py +610 -0
- alita_sdk/runtime/skills/input_builder.py +371 -0
- alita_sdk/runtime/skills/models.py +330 -0
- alita_sdk/runtime/skills/registry.py +355 -0
- alita_sdk/runtime/skills/skill_runner.py +330 -0
- alita_sdk/runtime/toolkits/__init__.py +7 -0
- alita_sdk/runtime/toolkits/application.py +21 -9
- alita_sdk/runtime/toolkits/artifact.py +15 -5
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +139 -251
- alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/skill_router.py +238 -0
- alita_sdk/runtime/toolkits/subgraph.py +251 -6
- alita_sdk/runtime/toolkits/tools.py +238 -32
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/__init__.py +3 -1
- alita_sdk/runtime/tools/application.py +20 -6
- alita_sdk/runtime/tools/artifact.py +511 -28
- alita_sdk/runtime/tools/data_analysis.py +183 -0
- alita_sdk/runtime/tools/function.py +43 -15
- alita_sdk/runtime/tools/image_generation.py +50 -44
- alita_sdk/runtime/tools/llm.py +852 -67
- alita_sdk/runtime/tools/loop.py +3 -1
- alita_sdk/runtime/tools/loop_output.py +3 -1
- alita_sdk/runtime/tools/mcp_remote_tool.py +25 -10
- alita_sdk/runtime/tools/mcp_server_tool.py +7 -6
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -4
- alita_sdk/runtime/tools/sandbox.py +9 -6
- alita_sdk/runtime/tools/skill_router.py +776 -0
- alita_sdk/runtime/tools/tool.py +3 -1
- alita_sdk/runtime/tools/vectorstore.py +7 -2
- alita_sdk/runtime/tools/vectorstore_base.py +51 -11
- alita_sdk/runtime/utils/AlitaCallback.py +137 -21
- alita_sdk/runtime/utils/constants.py +5 -1
- alita_sdk/runtime/utils/mcp_client.py +492 -0
- alita_sdk/runtime/utils/mcp_oauth.py +202 -5
- alita_sdk/runtime/utils/mcp_sse_client.py +36 -7
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/serialization.py +155 -0
- alita_sdk/runtime/utils/streamlit.py +6 -10
- alita_sdk/runtime/utils/toolkit_utils.py +16 -5
- alita_sdk/runtime/utils/utils.py +36 -0
- alita_sdk/tools/__init__.py +113 -29
- alita_sdk/tools/ado/repos/__init__.py +51 -33
- alita_sdk/tools/ado/repos/repos_wrapper.py +148 -89
- alita_sdk/tools/ado/test_plan/__init__.py +25 -9
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
- alita_sdk/tools/ado/utils.py +1 -18
- alita_sdk/tools/ado/wiki/__init__.py +25 -8
- alita_sdk/tools/ado/wiki/ado_wrapper.py +291 -22
- alita_sdk/tools/ado/work_item/__init__.py +26 -9
- alita_sdk/tools/ado/work_item/ado_wrapper.py +56 -3
- alita_sdk/tools/advanced_jira_mining/__init__.py +11 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +13 -9
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +11 -8
- alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +170 -45
- alita_sdk/tools/bitbucket/__init__.py +17 -12
- alita_sdk/tools/bitbucket/api_wrapper.py +59 -11
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +49 -35
- alita_sdk/tools/browser/__init__.py +5 -4
- alita_sdk/tools/carrier/__init__.py +5 -6
- alita_sdk/tools/carrier/backend_reports_tool.py +6 -6
- alita_sdk/tools/carrier/run_ui_test_tool.py +6 -6
- alita_sdk/tools/carrier/ui_reports_tool.py +5 -5
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
- alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +10 -7
- alita_sdk/tools/cloud/azure/__init__.py +10 -7
- alita_sdk/tools/cloud/gcp/__init__.py +10 -7
- alita_sdk/tools/cloud/k8s/__init__.py +10 -7
- alita_sdk/tools/code/linter/__init__.py +10 -8
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +10 -7
- alita_sdk/tools/code_indexer_toolkit.py +73 -23
- alita_sdk/tools/confluence/__init__.py +21 -15
- alita_sdk/tools/confluence/api_wrapper.py +78 -23
- alita_sdk/tools/confluence/loader.py +4 -2
- alita_sdk/tools/custom_open_api/__init__.py +12 -5
- alita_sdk/tools/elastic/__init__.py +11 -8
- alita_sdk/tools/elitea_base.py +493 -30
- alita_sdk/tools/figma/__init__.py +58 -11
- alita_sdk/tools/figma/api_wrapper.py +1235 -143
- alita_sdk/tools/figma/figma_client.py +73 -0
- alita_sdk/tools/figma/toon_tools.py +2748 -0
- alita_sdk/tools/github/__init__.py +13 -14
- alita_sdk/tools/github/github_client.py +224 -100
- alita_sdk/tools/github/graphql_client_wrapper.py +119 -33
- alita_sdk/tools/github/schemas.py +14 -5
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/github/tool_prompts.py +9 -22
- alita_sdk/tools/gitlab/__init__.py +15 -11
- alita_sdk/tools/gitlab/api_wrapper.py +207 -41
- alita_sdk/tools/gitlab_org/__init__.py +10 -8
- alita_sdk/tools/gitlab_org/api_wrapper.py +63 -64
- alita_sdk/tools/google/bigquery/__init__.py +13 -12
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +10 -8
- alita_sdk/tools/google_places/api_wrapper.py +1 -1
- alita_sdk/tools/jira/__init__.py +17 -11
- alita_sdk/tools/jira/api_wrapper.py +91 -40
- alita_sdk/tools/keycloak/__init__.py +11 -8
- alita_sdk/tools/localgit/__init__.py +9 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +11 -3
- alita_sdk/tools/non_code_indexer_toolkit.py +1 -0
- alita_sdk/tools/ocr/__init__.py +11 -8
- alita_sdk/tools/openapi/__init__.py +490 -114
- alita_sdk/tools/openapi/api_wrapper.py +1368 -0
- alita_sdk/tools/openapi/tool.py +20 -0
- alita_sdk/tools/pandas/__init__.py +20 -12
- alita_sdk/tools/pandas/api_wrapper.py +38 -25
- alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
- alita_sdk/tools/postman/__init__.py +11 -11
- alita_sdk/tools/pptx/__init__.py +10 -9
- alita_sdk/tools/pptx/pptx_wrapper.py +1 -1
- alita_sdk/tools/qtest/__init__.py +30 -10
- alita_sdk/tools/qtest/api_wrapper.py +430 -13
- alita_sdk/tools/rally/__init__.py +10 -8
- alita_sdk/tools/rally/api_wrapper.py +1 -1
- alita_sdk/tools/report_portal/__init__.py +12 -9
- alita_sdk/tools/salesforce/__init__.py +10 -9
- alita_sdk/tools/servicenow/__init__.py +17 -14
- alita_sdk/tools/servicenow/api_wrapper.py +1 -1
- alita_sdk/tools/sharepoint/__init__.py +10 -8
- alita_sdk/tools/sharepoint/api_wrapper.py +4 -4
- alita_sdk/tools/slack/__init__.py +10 -8
- alita_sdk/tools/slack/api_wrapper.py +2 -2
- alita_sdk/tools/sql/__init__.py +11 -9
- alita_sdk/tools/testio/__init__.py +10 -8
- alita_sdk/tools/testrail/__init__.py +11 -8
- alita_sdk/tools/testrail/api_wrapper.py +1 -1
- alita_sdk/tools/utils/__init__.py +9 -4
- alita_sdk/tools/utils/content_parser.py +77 -3
- alita_sdk/tools/utils/text_operations.py +410 -0
- alita_sdk/tools/utils/tool_prompts.py +79 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +17 -13
- alita_sdk/tools/xray/__init__.py +12 -9
- alita_sdk/tools/yagmail/__init__.py +9 -3
- alita_sdk/tools/zephyr/__init__.py +9 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +11 -8
- alita_sdk/tools/zephyr_essential/__init__.py +10 -8
- alita_sdk/tools/zephyr_essential/api_wrapper.py +30 -13
- alita_sdk/tools/zephyr_essential/client.py +2 -2
- alita_sdk/tools/zephyr_scale/__init__.py +11 -9
- alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
- alita_sdk/tools/zephyr_squad/__init__.py +10 -8
- {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/METADATA +147 -7
- alita_sdk-0.3.627.dist-info/RECORD +468 -0
- alita_sdk-0.3.627.dist-info/entry_points.txt +2 -0
- alita_sdk-0.3.462.dist-info/RECORD +0 -384
- alita_sdk-0.3.462.dist-info/entry_points.txt +0 -2
- {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,410 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Shared text operations utilities for file manipulation across toolkits.
|
|
3
|
+
|
|
4
|
+
Provides common functionality for:
|
|
5
|
+
- Parsing OLD/NEW marker-based edits
|
|
6
|
+
- Text file validation
|
|
7
|
+
- Line-based slicing and partial reads
|
|
8
|
+
- Content searching with context
|
|
9
|
+
"""
|
|
10
|
+
import re
|
|
11
|
+
import logging
|
|
12
|
+
from typing import List, Tuple, Dict, Optional
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
# Text file extensions that support editing
|
|
17
|
+
TEXT_EDITABLE_EXTENSIONS = {
|
|
18
|
+
'.md', '.txt', '.csv', '.json', '.xml', '.html',
|
|
19
|
+
'.yaml', '.yml', '.ini', '.conf', '.log', '.sh',
|
|
20
|
+
'.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.go',
|
|
21
|
+
'.rb', '.php', '.c', '.cpp', '.h', '.hpp', '.cs',
|
|
22
|
+
'.sql', '.r', '.m', '.swift', '.kt', '.rs', '.scala'
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def parse_old_new_markers(file_query: str) -> List[Tuple[str, str]]:
|
|
27
|
+
"""
|
|
28
|
+
Parse OLD/NEW marker-based edit instructions.
|
|
29
|
+
|
|
30
|
+
Extracts the first pair of old and new content from a file query using markers:
|
|
31
|
+
- OLD <<<< ... >>>> OLD
|
|
32
|
+
- NEW <<<< ... >>>> NEW
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
file_query: String containing marked old and new content sections
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
List with at most one tuple (old_content, new_content) for the first edit pair.
|
|
39
|
+
Returns empty list if no valid OLD/NEW pair is found.
|
|
40
|
+
|
|
41
|
+
Example:
|
|
42
|
+
>>> query = '''
|
|
43
|
+
... OLD <<<<
|
|
44
|
+
... Hello World
|
|
45
|
+
... >>>> OLD
|
|
46
|
+
... NEW <<<<
|
|
47
|
+
... Hello Mars
|
|
48
|
+
... >>>> NEW
|
|
49
|
+
... '''
|
|
50
|
+
>>> parse_old_new_markers(query)
|
|
51
|
+
[('Hello World', 'Hello Mars')]
|
|
52
|
+
"""
|
|
53
|
+
# Split the file content by lines
|
|
54
|
+
code_lines = file_query.split("\n")
|
|
55
|
+
|
|
56
|
+
# Initialize variables to track section state
|
|
57
|
+
in_old_section = False
|
|
58
|
+
in_new_section = False
|
|
59
|
+
old_content = None
|
|
60
|
+
new_content = None
|
|
61
|
+
|
|
62
|
+
# Temporary storage for the current section's content
|
|
63
|
+
current_section_content = []
|
|
64
|
+
|
|
65
|
+
# Iterate through each line in the file content
|
|
66
|
+
for line in code_lines:
|
|
67
|
+
# Check for OLD section start
|
|
68
|
+
if "OLD <<<" in line:
|
|
69
|
+
in_old_section = True
|
|
70
|
+
current_section_content = [] # Reset current section content
|
|
71
|
+
continue # Skip the line with the marker
|
|
72
|
+
|
|
73
|
+
# Check for OLD section end
|
|
74
|
+
if ">>>> OLD" in line:
|
|
75
|
+
in_old_section = False
|
|
76
|
+
old_content = "\n".join(current_section_content).strip()
|
|
77
|
+
current_section_content = [] # Reset current section content
|
|
78
|
+
continue # Skip the line with the marker
|
|
79
|
+
|
|
80
|
+
# Check for NEW section start
|
|
81
|
+
if "NEW <<<" in line:
|
|
82
|
+
in_new_section = True
|
|
83
|
+
current_section_content = [] # Reset current section content
|
|
84
|
+
continue # Skip the line with the marker
|
|
85
|
+
|
|
86
|
+
# Check for NEW section end
|
|
87
|
+
if ">>>> NEW" in line:
|
|
88
|
+
in_new_section = False
|
|
89
|
+
new_content = "\n".join(current_section_content).strip()
|
|
90
|
+
# Return immediately after finding the first complete pair
|
|
91
|
+
if old_content is not None and new_content is not None:
|
|
92
|
+
return [(old_content, new_content)]
|
|
93
|
+
current_section_content = [] # Reset current section content
|
|
94
|
+
continue # Skip the line with the marker
|
|
95
|
+
|
|
96
|
+
# If currently in an OLD or NEW section, add the line to the current section content
|
|
97
|
+
if in_old_section or in_new_section:
|
|
98
|
+
current_section_content.append(line)
|
|
99
|
+
|
|
100
|
+
# Return empty list if no complete pair found
|
|
101
|
+
return []
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def is_text_editable(filename: str) -> bool:
|
|
105
|
+
"""
|
|
106
|
+
Check if a file is editable as text based on its extension.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
filename: Name or path of the file to check
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
True if file extension is in the text-editable whitelist
|
|
113
|
+
|
|
114
|
+
Example:
|
|
115
|
+
>>> is_text_editable("config.json")
|
|
116
|
+
True
|
|
117
|
+
>>> is_text_editable("image.png")
|
|
118
|
+
False
|
|
119
|
+
"""
|
|
120
|
+
from pathlib import Path
|
|
121
|
+
ext = Path(filename).suffix.lower()
|
|
122
|
+
return ext in TEXT_EDITABLE_EXTENSIONS
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def apply_line_slice(
|
|
126
|
+
content: str,
|
|
127
|
+
offset: Optional[int] = None,
|
|
128
|
+
limit: Optional[int] = None,
|
|
129
|
+
head: Optional[int] = None,
|
|
130
|
+
tail: Optional[int] = None
|
|
131
|
+
) -> str:
|
|
132
|
+
"""
|
|
133
|
+
Apply line-based slicing to text content.
|
|
134
|
+
|
|
135
|
+
Supports multiple modes:
|
|
136
|
+
- offset + limit: Read from line `offset` for `limit` lines (1-indexed)
|
|
137
|
+
- head: Read only first N lines
|
|
138
|
+
- tail: Read only last N lines
|
|
139
|
+
- No params: Return full content
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
content: Text content to slice
|
|
143
|
+
offset: Starting line number (1-indexed, inclusive)
|
|
144
|
+
limit: Number of lines to read from offset
|
|
145
|
+
head: Return only first N lines
|
|
146
|
+
tail: Return only last N lines
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Sliced content as string
|
|
150
|
+
|
|
151
|
+
Example:
|
|
152
|
+
>>> text = "line1\\nline2\\nline3\\nline4\\nline5"
|
|
153
|
+
>>> apply_line_slice(text, offset=2, limit=2)
|
|
154
|
+
'line2\\nline3'
|
|
155
|
+
>>> apply_line_slice(text, head=2)
|
|
156
|
+
'line1\\nline2'
|
|
157
|
+
>>> apply_line_slice(text, tail=2)
|
|
158
|
+
'line4\\nline5'
|
|
159
|
+
"""
|
|
160
|
+
if not content:
|
|
161
|
+
return content
|
|
162
|
+
|
|
163
|
+
lines = content.splitlines(keepends=True)
|
|
164
|
+
|
|
165
|
+
# Head mode: first N lines
|
|
166
|
+
if head is not None:
|
|
167
|
+
return ''.join(lines[:head])
|
|
168
|
+
|
|
169
|
+
# Tail mode: last N lines
|
|
170
|
+
if tail is not None:
|
|
171
|
+
return ''.join(lines[-tail:] if tail > 0 else lines)
|
|
172
|
+
|
|
173
|
+
# Offset + limit mode: slice from offset for limit lines
|
|
174
|
+
if offset is not None:
|
|
175
|
+
start_idx = max(0, offset - 1) # Convert 1-indexed to 0-indexed
|
|
176
|
+
if limit is not None:
|
|
177
|
+
end_idx = start_idx + limit
|
|
178
|
+
return ''.join(lines[start_idx:end_idx])
|
|
179
|
+
else:
|
|
180
|
+
return ''.join(lines[start_idx:])
|
|
181
|
+
|
|
182
|
+
# No slicing parameters: return full content
|
|
183
|
+
return content
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def search_in_content(
|
|
187
|
+
content: str,
|
|
188
|
+
pattern: str,
|
|
189
|
+
is_regex: bool = True,
|
|
190
|
+
context_lines: int = 2
|
|
191
|
+
) -> List[Dict[str, any]]:
|
|
192
|
+
"""
|
|
193
|
+
Search for pattern in content with context lines.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
content: Text content to search
|
|
197
|
+
pattern: Search pattern (regex if is_regex=True, else literal string)
|
|
198
|
+
is_regex: Whether to treat pattern as regex (default True)
|
|
199
|
+
context_lines: Number of lines before/after match to include (default 2)
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
List of match dictionaries with keys:
|
|
203
|
+
- line_number: 1-indexed line number of match
|
|
204
|
+
- line_content: The matching line
|
|
205
|
+
- match_text: The actual matched text
|
|
206
|
+
- context_before: List of lines before match
|
|
207
|
+
- context_after: List of lines after match
|
|
208
|
+
|
|
209
|
+
Example:
|
|
210
|
+
>>> text = "line1\\nHello World\\nline3"
|
|
211
|
+
>>> matches = search_in_content(text, "Hello", is_regex=False)
|
|
212
|
+
>>> matches[0]['line_number']
|
|
213
|
+
2
|
|
214
|
+
>>> matches[0]['match_text']
|
|
215
|
+
'Hello'
|
|
216
|
+
"""
|
|
217
|
+
if not content:
|
|
218
|
+
return []
|
|
219
|
+
|
|
220
|
+
lines = content.splitlines()
|
|
221
|
+
matches = []
|
|
222
|
+
|
|
223
|
+
# Compile regex pattern or escape for literal search
|
|
224
|
+
if is_regex:
|
|
225
|
+
try:
|
|
226
|
+
regex = re.compile(pattern, re.IGNORECASE)
|
|
227
|
+
except re.error as e:
|
|
228
|
+
logger.warning(f"Invalid regex pattern '{pattern}': {e}")
|
|
229
|
+
return []
|
|
230
|
+
else:
|
|
231
|
+
regex = re.compile(re.escape(pattern), re.IGNORECASE)
|
|
232
|
+
|
|
233
|
+
# Search each line
|
|
234
|
+
for line_idx, line in enumerate(lines):
|
|
235
|
+
match = regex.search(line)
|
|
236
|
+
if match:
|
|
237
|
+
line_number = line_idx + 1 # Convert to 1-indexed
|
|
238
|
+
|
|
239
|
+
# Get context lines
|
|
240
|
+
context_start = max(0, line_idx - context_lines)
|
|
241
|
+
context_end = min(len(lines), line_idx + context_lines + 1)
|
|
242
|
+
|
|
243
|
+
context_before = lines[context_start:line_idx]
|
|
244
|
+
context_after = lines[line_idx + 1:context_end]
|
|
245
|
+
|
|
246
|
+
matches.append({
|
|
247
|
+
'line_number': line_number,
|
|
248
|
+
'line_content': line,
|
|
249
|
+
'match_text': match.group(0),
|
|
250
|
+
'context_before': context_before,
|
|
251
|
+
'context_after': context_after,
|
|
252
|
+
})
|
|
253
|
+
|
|
254
|
+
return matches
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def _normalize_for_match(text: str) -> str:
|
|
258
|
+
"""Normalize text for tolerant OLD/NEW matching.
|
|
259
|
+
|
|
260
|
+
- Split into lines
|
|
261
|
+
- Replace common Unicode spaces with regular spaces
|
|
262
|
+
- Strip leading/trailing whitespace per line
|
|
263
|
+
- Collapse internal whitespace runs to a single space
|
|
264
|
+
- Join with '\n'
|
|
265
|
+
"""
|
|
266
|
+
lines = text.splitlines()
|
|
267
|
+
norm_lines = []
|
|
268
|
+
for line in lines:
|
|
269
|
+
# Normalize common Unicode spaces to regular space
|
|
270
|
+
line = line.replace("\u00A0", " ").replace("\u2009", " ")
|
|
271
|
+
# Strip outer whitespace
|
|
272
|
+
line = line.strip()
|
|
273
|
+
# Collapse internal whitespace
|
|
274
|
+
line = re.sub(r"\s+", " ", line)
|
|
275
|
+
norm_lines.append(line)
|
|
276
|
+
return "\n".join(norm_lines)
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def try_apply_edit(
|
|
280
|
+
content: str,
|
|
281
|
+
old_text: str,
|
|
282
|
+
new_text: str,
|
|
283
|
+
file_path: Optional[str] = None,
|
|
284
|
+
) -> Tuple[str, Optional[str]]:
|
|
285
|
+
"""Apply a single OLD/NEW edit with a tolerant fallback.
|
|
286
|
+
|
|
287
|
+
This helper is used by edit_file to apply one (old_text, new_text) pair:
|
|
288
|
+
|
|
289
|
+
1. First tries exact substring replacement (old_text in content).
|
|
290
|
+
2. If that fails, performs a tolerant, line-based match:
|
|
291
|
+
- Builds a logical OLD sequence without empty/whitespace-only lines
|
|
292
|
+
- Scans content while skipping empty/whitespace-only lines
|
|
293
|
+
- Compares using `_normalize_for_match` so minor spacing differences
|
|
294
|
+
don't break the match
|
|
295
|
+
- If exactly one such region is found, replaces that region with new_text
|
|
296
|
+
- If zero or multiple regions are found, no change is applied
|
|
297
|
+
|
|
298
|
+
Args:
|
|
299
|
+
content: Current file content
|
|
300
|
+
old_text: OLD block extracted from markers
|
|
301
|
+
new_text: NEW block extracted from markers
|
|
302
|
+
file_path: Optional path for logging context
|
|
303
|
+
|
|
304
|
+
Returns:
|
|
305
|
+
(updated_content, warning_message)
|
|
306
|
+
- updated_content: resulting content (may be unchanged)
|
|
307
|
+
- warning_message: human-readable warning if no edit was applied
|
|
308
|
+
or if the operation was ambiguous; None if an edit was
|
|
309
|
+
successfully and unambiguously applied.
|
|
310
|
+
"""
|
|
311
|
+
# Stage 1: exact match
|
|
312
|
+
if old_text:
|
|
313
|
+
occurrences = content.count(old_text)
|
|
314
|
+
if occurrences == 1:
|
|
315
|
+
return content.replace(old_text, new_text, 1), None
|
|
316
|
+
if occurrences > 1:
|
|
317
|
+
msg = (
|
|
318
|
+
"Exact OLD block appears %d times in %s; no replacement applied to avoid ambiguity. "
|
|
319
|
+
"OLD value: %r" % (
|
|
320
|
+
occurrences,
|
|
321
|
+
file_path or "<unknown>",
|
|
322
|
+
old_text,
|
|
323
|
+
)
|
|
324
|
+
)
|
|
325
|
+
logger.warning(msg)
|
|
326
|
+
return content, msg
|
|
327
|
+
|
|
328
|
+
# Stage 2: tolerant match
|
|
329
|
+
if not old_text or not old_text.strip() or not content:
|
|
330
|
+
msg = None
|
|
331
|
+
if not old_text or not old_text.strip():
|
|
332
|
+
msg = (
|
|
333
|
+
"OLD block is empty or whitespace-only; no replacement applied. "
|
|
334
|
+
"OLD value: %r" % (old_text,)
|
|
335
|
+
)
|
|
336
|
+
elif not content:
|
|
337
|
+
msg = "Content is empty; no replacement applied."
|
|
338
|
+
if msg:
|
|
339
|
+
logger.warning(msg)
|
|
340
|
+
return content, msg
|
|
341
|
+
|
|
342
|
+
# Logical OLD: drop empty/whitespace-only lines
|
|
343
|
+
old_lines_raw = old_text.splitlines()
|
|
344
|
+
old_lines = [l for l in old_lines_raw if l.strip()]
|
|
345
|
+
if not old_lines:
|
|
346
|
+
msg = (
|
|
347
|
+
"OLD block contains only empty/whitespace lines; no replacement applied. "
|
|
348
|
+
"OLD value: %r" % (old_text,)
|
|
349
|
+
)
|
|
350
|
+
logger.warning(msg)
|
|
351
|
+
return content, msg
|
|
352
|
+
|
|
353
|
+
# Precompute normalized OLD (joined by '\n')
|
|
354
|
+
norm_old = _normalize_for_match("\n".join(old_lines))
|
|
355
|
+
|
|
356
|
+
content_lines = content.splitlines(keepends=True)
|
|
357
|
+
total = len(content_lines)
|
|
358
|
+
candidates: list[tuple[int, int, str]] = [] # (start_idx, end_idx, block)
|
|
359
|
+
|
|
360
|
+
# Scan content for regions whose non-empty, normalized lines match norm_old
|
|
361
|
+
for start in range(total):
|
|
362
|
+
idx = start
|
|
363
|
+
collected_non_empty: list[str] = []
|
|
364
|
+
window_lines: list[str] = []
|
|
365
|
+
|
|
366
|
+
while idx < total and len(collected_non_empty) < len(old_lines):
|
|
367
|
+
line = content_lines[idx]
|
|
368
|
+
window_lines.append(line)
|
|
369
|
+
if line.strip():
|
|
370
|
+
collected_non_empty.append(line)
|
|
371
|
+
idx += 1
|
|
372
|
+
|
|
373
|
+
if len(collected_non_empty) < len(old_lines):
|
|
374
|
+
# Not enough non-empty lines from this start; no more windows possible
|
|
375
|
+
break
|
|
376
|
+
|
|
377
|
+
# Compare normalized non-empty content lines to normalized OLD
|
|
378
|
+
candidate_norm = _normalize_for_match("".join(collected_non_empty))
|
|
379
|
+
if candidate_norm == norm_old:
|
|
380
|
+
block = "".join(window_lines)
|
|
381
|
+
candidates.append((start, idx, block))
|
|
382
|
+
|
|
383
|
+
if not candidates:
|
|
384
|
+
msg = (
|
|
385
|
+
"Normalized OLD block not found in %s. OLD value: %r"
|
|
386
|
+
% (file_path or "<unknown>", old_text)
|
|
387
|
+
)
|
|
388
|
+
logger.warning(msg)
|
|
389
|
+
return content, msg
|
|
390
|
+
|
|
391
|
+
if len(candidates) > 1:
|
|
392
|
+
msg = (
|
|
393
|
+
"Multiple candidate regions for OLD block in %s; "
|
|
394
|
+
"no change applied to avoid ambiguity. OLD value: %r"
|
|
395
|
+
% (file_path or "<unknown>", old_text)
|
|
396
|
+
)
|
|
397
|
+
logger.warning(msg)
|
|
398
|
+
return content, msg
|
|
399
|
+
|
|
400
|
+
start_idx, end_idx, candidate_block = candidates[0]
|
|
401
|
+
updated = content.replace(candidate_block, new_text, 1)
|
|
402
|
+
|
|
403
|
+
logger.info(
|
|
404
|
+
"Applied tolerant OLD/NEW replacement in %s around lines %d-%d",
|
|
405
|
+
file_path or "<unknown>",
|
|
406
|
+
start_idx + 1,
|
|
407
|
+
start_idx + len(old_lines),
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
return updated, None
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Shared tool prompt constants for file operations across toolkits.
|
|
3
|
+
|
|
4
|
+
These constants provide consistent descriptions for update_file and edit_file tools.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
# Base description for OLD/NEW marker format - used by all file editing operations
|
|
8
|
+
UPDATE_FILE_MARKERS_DESCRIPTION = """
|
|
9
|
+
**Marker format:**
|
|
10
|
+
- OLD block: starts with `OLD <<<<` and ends with `>>>> OLD`
|
|
11
|
+
- NEW block: starts with `NEW <<<<` and ends with `>>>> NEW`
|
|
12
|
+
- Content must be on separate lines between opening and closing markers
|
|
13
|
+
- Leading/trailing whitespace in content is stripped
|
|
14
|
+
- Only the first OLD/NEW pair is processed
|
|
15
|
+
|
|
16
|
+
**Examples:**
|
|
17
|
+
|
|
18
|
+
Example 1 - Replace single line:
|
|
19
|
+
```
|
|
20
|
+
OLD <<<<
|
|
21
|
+
old contents
|
|
22
|
+
>>>> OLD
|
|
23
|
+
NEW <<<<
|
|
24
|
+
new contents
|
|
25
|
+
>>>> NEW
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
Example 2 - Add new lines:
|
|
29
|
+
```
|
|
30
|
+
OLD <<<<
|
|
31
|
+
existing line
|
|
32
|
+
>>>> OLD
|
|
33
|
+
NEW <<<<
|
|
34
|
+
existing line
|
|
35
|
+
added line
|
|
36
|
+
>>>> NEW
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
Example 3 - Replace multiple lines:
|
|
40
|
+
```
|
|
41
|
+
OLD <<<<
|
|
42
|
+
old line 1
|
|
43
|
+
old line 2
|
|
44
|
+
>>>> OLD
|
|
45
|
+
NEW <<<<
|
|
46
|
+
new line 1
|
|
47
|
+
new line 2
|
|
48
|
+
new line 3
|
|
49
|
+
>>>> NEW
|
|
50
|
+
```"""
|
|
51
|
+
|
|
52
|
+
# Description for update_file when file_path is a separate parameter
|
|
53
|
+
UPDATE_FILE_PROMPT_NO_PATH = f"""Updates a file using OLD/NEW markers.
|
|
54
|
+
{UPDATE_FILE_MARKERS_DESCRIPTION}"""
|
|
55
|
+
|
|
56
|
+
# Description for update_file when file_path is in the first line of file_query
|
|
57
|
+
UPDATE_FILE_PROMPT_WITH_PATH = f"""Updates a file in repository.
|
|
58
|
+
|
|
59
|
+
**Input format:**
|
|
60
|
+
First non-empty line must be the file path (must not start with a slash), followed by OLD/NEW markers.
|
|
61
|
+
|
|
62
|
+
Example:
|
|
63
|
+
```
|
|
64
|
+
path/to/file.txt
|
|
65
|
+
OLD <<<<
|
|
66
|
+
old content
|
|
67
|
+
>>>> OLD
|
|
68
|
+
NEW <<<<
|
|
69
|
+
new content
|
|
70
|
+
>>>> NEW
|
|
71
|
+
```
|
|
72
|
+
{UPDATE_FILE_MARKERS_DESCRIPTION}"""
|
|
73
|
+
|
|
74
|
+
# Common description for edit_file/update_file operations
|
|
75
|
+
EDIT_FILE_DESCRIPTION = """Edit file by path using OLD/NEW markers for precise replacements.
|
|
76
|
+
|
|
77
|
+
Only works with text files (markdown, txt, csv, json, xml, html, yaml, code files).
|
|
78
|
+
"""
|
|
79
|
+
|
|
@@ -31,8 +31,8 @@ class VectorStoreAdapter(ABC):
|
|
|
31
31
|
pass
|
|
32
32
|
|
|
33
33
|
@abstractmethod
|
|
34
|
-
def clean_collection(self, vectorstore_wrapper, index_name: str = ''):
|
|
35
|
-
"""Clean the vectorstore collection by deleting all indexed data."""
|
|
34
|
+
def clean_collection(self, vectorstore_wrapper, index_name: str = '', including_index_meta: bool = False):
|
|
35
|
+
"""Clean the vectorstore collection by deleting all indexed data. If including_index_meta is True, skip the index_meta records."""
|
|
36
36
|
pass
|
|
37
37
|
|
|
38
38
|
@abstractmethod
|
|
@@ -132,18 +132,22 @@ class PGVectorAdapter(VectorStoreAdapter):
|
|
|
132
132
|
logger.error(f"Failed to get indexed IDs from PGVector: {str(e)}")
|
|
133
133
|
return []
|
|
134
134
|
|
|
135
|
-
def clean_collection(self, vectorstore_wrapper, index_name: str = ''):
|
|
136
|
-
"""Clean the vectorstore collection by deleting all indexed data."""
|
|
137
|
-
# This logic deletes all data from the vectorstore collection without removal of collection.
|
|
138
|
-
# Collection itself remains available for future indexing.
|
|
135
|
+
def clean_collection(self, vectorstore_wrapper, index_name: str = '', including_index_meta: bool = False):
|
|
136
|
+
"""Clean the vectorstore collection by deleting all indexed data. If including_index_meta is True, skip the index_meta records."""
|
|
139
137
|
from sqlalchemy.orm import Session
|
|
140
|
-
from sqlalchemy import func
|
|
141
|
-
|
|
138
|
+
from sqlalchemy import func, or_
|
|
142
139
|
store = vectorstore_wrapper.vectorstore
|
|
143
140
|
with Session(store.session_maker.bind) as session:
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
141
|
+
if including_index_meta:
|
|
142
|
+
session.query(store.EmbeddingStore).filter(
|
|
143
|
+
func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'collection') == index_name
|
|
144
|
+
).delete(synchronize_session=False)
|
|
145
|
+
else:
|
|
146
|
+
session.query(store.EmbeddingStore).filter(
|
|
147
|
+
func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'collection') == index_name,
|
|
148
|
+
or_(func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'type').is_(None),
|
|
149
|
+
func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'type') != IndexerKeywords.INDEX_META_TYPE.value)
|
|
150
|
+
).delete(synchronize_session=False)
|
|
147
151
|
session.commit()
|
|
148
152
|
|
|
149
153
|
def is_vectorstore_type(self, vectorstore) -> bool:
|
|
@@ -334,8 +338,8 @@ class ChromaAdapter(VectorStoreAdapter):
|
|
|
334
338
|
logger.error(f"Failed to get indexed IDs from Chroma: {str(e)}")
|
|
335
339
|
return []
|
|
336
340
|
|
|
337
|
-
def clean_collection(self, vectorstore_wrapper, index_name: str = ''):
|
|
338
|
-
"""Clean the vectorstore collection by deleting all indexed data."""
|
|
341
|
+
def clean_collection(self, vectorstore_wrapper, index_name: str = '', including_index_meta: bool = False):
|
|
342
|
+
"""Clean the vectorstore collection by deleting all indexed data. including_index_meta is ignored."""
|
|
339
343
|
vectorstore_wrapper.vectorstore.delete(ids=self.get_indexed_ids(vectorstore_wrapper, index_name))
|
|
340
344
|
|
|
341
345
|
def get_indexed_data(self, vectorstore_wrapper):
|
alita_sdk/tools/xray/__init__.py
CHANGED
|
@@ -8,9 +8,10 @@ from pydantic import create_model, BaseModel, Field
|
|
|
8
8
|
from .api_wrapper import XrayApiWrapper
|
|
9
9
|
from ..base.tool import BaseAction
|
|
10
10
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
11
|
-
from ..utils import clean_string, get_max_toolkit_length
|
|
11
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
12
12
|
from ...configurations.pgvector import PgVectorConfiguration
|
|
13
13
|
from ...configurations.xray import XrayConfiguration
|
|
14
|
+
from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
|
|
14
15
|
|
|
15
16
|
name = "xray_cloud"
|
|
16
17
|
|
|
@@ -34,15 +35,13 @@ def get_tools(tool):
|
|
|
34
35
|
|
|
35
36
|
class XrayToolkit(BaseToolkit):
|
|
36
37
|
tools: List[BaseTool] = []
|
|
37
|
-
toolkit_max_length: int = 0
|
|
38
38
|
|
|
39
39
|
@staticmethod
|
|
40
40
|
def toolkit_config_schema() -> BaseModel:
|
|
41
41
|
selected_tools = {x['name']: x['args_schema'].schema() for x in XrayApiWrapper.model_construct().get_available_tools()}
|
|
42
|
-
XrayToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
43
42
|
return create_model(
|
|
44
43
|
name,
|
|
45
|
-
limit=(Optional[int], Field(description="Limit", default=100)),
|
|
44
|
+
limit=(Optional[int], Field(description="Limit", default=100, gt=0)),
|
|
46
45
|
xray_configuration=(XrayConfiguration, Field(description="Xray Configuration", json_schema_extra={'configuration_types': ['xray']})),
|
|
47
46
|
pgvector_configuration=(Optional[PgVectorConfiguration], Field(default=None,
|
|
48
47
|
description="PgVector Configuration",
|
|
@@ -56,7 +55,6 @@ class XrayToolkit(BaseToolkit):
|
|
|
56
55
|
{
|
|
57
56
|
'metadata': {
|
|
58
57
|
"label": "XRAY cloud", "icon_url": "xray.svg",
|
|
59
|
-
"max_length": XrayToolkit.toolkit_max_length,
|
|
60
58
|
"categories": ["test management"],
|
|
61
59
|
"extra_categories": ["test automation", "test case management", "test planning"]
|
|
62
60
|
}
|
|
@@ -76,18 +74,23 @@ class XrayToolkit(BaseToolkit):
|
|
|
76
74
|
**(kwargs.get('pgvector_configuration') or {}),
|
|
77
75
|
}
|
|
78
76
|
xray_api_wrapper = XrayApiWrapper(**wrapper_payload)
|
|
79
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
80
77
|
available_tools = xray_api_wrapper.get_available_tools()
|
|
81
78
|
tools = []
|
|
82
79
|
for tool in available_tools:
|
|
83
80
|
if selected_tools:
|
|
84
81
|
if tool["name"] not in selected_tools:
|
|
85
82
|
continue
|
|
83
|
+
description = tool["description"]
|
|
84
|
+
if toolkit_name:
|
|
85
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
86
|
+
description = description + "\nXray instance: " + xray_api_wrapper.base_url
|
|
87
|
+
description = description[:1000]
|
|
86
88
|
tools.append(BaseAction(
|
|
87
89
|
api_wrapper=xray_api_wrapper,
|
|
88
|
-
name=
|
|
89
|
-
description=
|
|
90
|
-
args_schema=tool["args_schema"]
|
|
90
|
+
name=tool["name"],
|
|
91
|
+
description=description,
|
|
92
|
+
args_schema=tool["args_schema"],
|
|
93
|
+
metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
|
|
91
94
|
))
|
|
92
95
|
return cls(tools=tools)
|
|
93
96
|
|
|
@@ -6,6 +6,7 @@ from pydantic import create_model, BaseModel, Field, SecretStr
|
|
|
6
6
|
|
|
7
7
|
from .yagmail_wrapper import YagmailWrapper, SMTP_SERVER
|
|
8
8
|
from ..base.tool import BaseAction
|
|
9
|
+
from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
|
|
9
10
|
|
|
10
11
|
name = "yagmail"
|
|
11
12
|
|
|
@@ -34,7 +35,7 @@ class AlitaYagmailToolkit(BaseToolkit):
|
|
|
34
35
|
)
|
|
35
36
|
|
|
36
37
|
@classmethod
|
|
37
|
-
def get_toolkit(cls, selected_tools: list[str] | None = None, **kwargs):
|
|
38
|
+
def get_toolkit(cls, selected_tools: list[str] | None = None, toolkit_name: Optional[str] = None, **kwargs):
|
|
38
39
|
if selected_tools is None:
|
|
39
40
|
selected_tools = []
|
|
40
41
|
yagmail_wrapper = YagmailWrapper(**kwargs)
|
|
@@ -44,11 +45,16 @@ class AlitaYagmailToolkit(BaseToolkit):
|
|
|
44
45
|
if selected_tools:
|
|
45
46
|
if tool["name"] not in selected_tools:
|
|
46
47
|
continue
|
|
48
|
+
description = tool["description"]
|
|
49
|
+
if toolkit_name:
|
|
50
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
51
|
+
description = description[:1000]
|
|
47
52
|
tools.append(BaseAction(
|
|
48
53
|
api_wrapper=yagmail_wrapper,
|
|
49
54
|
name=tool["name"],
|
|
50
|
-
description=
|
|
51
|
-
args_schema=tool["args_schema"]
|
|
55
|
+
description=description,
|
|
56
|
+
args_schema=tool["args_schema"],
|
|
57
|
+
metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
|
|
52
58
|
))
|
|
53
59
|
return cls(tools=tools)
|
|
54
60
|
|