alita-sdk 0.3.497__py3-none-any.whl → 0.3.516__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/cli/inventory.py +12 -195
- alita_sdk/community/inventory/__init__.py +12 -0
- alita_sdk/community/inventory/toolkit.py +9 -5
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/configurations/ado.py +144 -0
- alita_sdk/configurations/confluence.py +76 -42
- alita_sdk/configurations/figma.py +76 -0
- alita_sdk/configurations/gitlab.py +2 -0
- alita_sdk/configurations/qtest.py +72 -1
- alita_sdk/configurations/report_portal.py +96 -0
- alita_sdk/configurations/sharepoint.py +148 -0
- alita_sdk/configurations/testio.py +83 -0
- alita_sdk/runtime/clients/artifact.py +2 -2
- alita_sdk/runtime/clients/client.py +24 -19
- alita_sdk/runtime/clients/sandbox_client.py +14 -0
- alita_sdk/runtime/langchain/assistant.py +64 -23
- alita_sdk/runtime/langchain/constants.py +270 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +2 -1
- alita_sdk/runtime/langchain/document_loaders/constants.py +2 -1
- alita_sdk/runtime/langchain/langraph_agent.py +8 -9
- alita_sdk/runtime/langchain/utils.py +6 -1
- alita_sdk/runtime/toolkits/artifact.py +14 -5
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +26 -157
- alita_sdk/runtime/toolkits/planning.py +10 -5
- alita_sdk/runtime/toolkits/tools.py +23 -7
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/artifact.py +139 -6
- alita_sdk/runtime/tools/llm.py +20 -10
- alita_sdk/runtime/tools/mcp_remote_tool.py +2 -3
- alita_sdk/runtime/tools/mcp_server_tool.py +2 -4
- alita_sdk/runtime/utils/AlitaCallback.py +30 -1
- alita_sdk/runtime/utils/mcp_client.py +33 -6
- alita_sdk/runtime/utils/mcp_oauth.py +125 -8
- alita_sdk/runtime/utils/mcp_sse_client.py +35 -6
- alita_sdk/runtime/utils/utils.py +2 -0
- alita_sdk/tools/__init__.py +15 -0
- alita_sdk/tools/ado/repos/__init__.py +10 -12
- alita_sdk/tools/ado/test_plan/__init__.py +23 -8
- alita_sdk/tools/ado/wiki/__init__.py +24 -8
- alita_sdk/tools/ado/wiki/ado_wrapper.py +21 -7
- alita_sdk/tools/ado/work_item/__init__.py +24 -8
- alita_sdk/tools/advanced_jira_mining/__init__.py +10 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +12 -9
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +9 -7
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +25 -0
- alita_sdk/tools/bitbucket/__init__.py +14 -10
- alita_sdk/tools/bitbucket/api_wrapper.py +50 -2
- alita_sdk/tools/browser/__init__.py +5 -4
- alita_sdk/tools/carrier/__init__.py +5 -6
- alita_sdk/tools/cloud/aws/__init__.py +9 -7
- alita_sdk/tools/cloud/azure/__init__.py +9 -7
- alita_sdk/tools/cloud/gcp/__init__.py +9 -7
- alita_sdk/tools/cloud/k8s/__init__.py +9 -7
- alita_sdk/tools/code/linter/__init__.py +9 -8
- alita_sdk/tools/code/sonar/__init__.py +9 -7
- alita_sdk/tools/confluence/__init__.py +15 -10
- alita_sdk/tools/custom_open_api/__init__.py +11 -5
- alita_sdk/tools/elastic/__init__.py +10 -8
- alita_sdk/tools/elitea_base.py +387 -9
- alita_sdk/tools/figma/__init__.py +8 -7
- alita_sdk/tools/github/__init__.py +12 -14
- alita_sdk/tools/github/github_client.py +68 -2
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/gitlab/__init__.py +14 -11
- alita_sdk/tools/gitlab/api_wrapper.py +81 -1
- alita_sdk/tools/gitlab_org/__init__.py +9 -8
- alita_sdk/tools/google/bigquery/__init__.py +12 -12
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +9 -8
- alita_sdk/tools/jira/__init__.py +15 -10
- alita_sdk/tools/keycloak/__init__.py +10 -8
- alita_sdk/tools/localgit/__init__.py +8 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +11 -3
- alita_sdk/tools/ocr/__init__.py +10 -8
- alita_sdk/tools/openapi/__init__.py +6 -2
- alita_sdk/tools/pandas/__init__.py +9 -7
- alita_sdk/tools/postman/__init__.py +10 -11
- alita_sdk/tools/pptx/__init__.py +9 -9
- alita_sdk/tools/qtest/__init__.py +9 -8
- alita_sdk/tools/rally/__init__.py +9 -8
- alita_sdk/tools/report_portal/__init__.py +11 -9
- alita_sdk/tools/salesforce/__init__.py +9 -9
- alita_sdk/tools/servicenow/__init__.py +10 -8
- alita_sdk/tools/sharepoint/__init__.py +9 -8
- alita_sdk/tools/slack/__init__.py +8 -7
- alita_sdk/tools/sql/__init__.py +9 -8
- alita_sdk/tools/testio/__init__.py +9 -8
- alita_sdk/tools/testrail/__init__.py +10 -8
- alita_sdk/tools/utils/__init__.py +9 -4
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/xray/__init__.py +10 -8
- alita_sdk/tools/yagmail/__init__.py +8 -3
- alita_sdk/tools/zephyr/__init__.py +8 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +10 -8
- alita_sdk/tools/zephyr_essential/__init__.py +9 -8
- alita_sdk/tools/zephyr_scale/__init__.py +9 -8
- alita_sdk/tools/zephyr_squad/__init__.py +9 -8
- {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/RECORD +109 -106
- {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/entry_points.txt +0 -0
- {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/top_level.txt +0 -0
|
@@ -5,7 +5,7 @@ from ..base.tool import BaseAction
|
|
|
5
5
|
from pydantic import create_model, BaseModel, ConfigDict, Field
|
|
6
6
|
|
|
7
7
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
8
|
-
from ..utils import clean_string,
|
|
8
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
9
9
|
from ...configurations.salesforce import SalesforceConfiguration
|
|
10
10
|
|
|
11
11
|
name = "salesforce"
|
|
@@ -19,11 +19,9 @@ def get_tools(tool):
|
|
|
19
19
|
|
|
20
20
|
class SalesforceToolkit(BaseToolkit):
|
|
21
21
|
tools: List[BaseTool] = []
|
|
22
|
-
toolkit_max_length: int = 0
|
|
23
22
|
@staticmethod
|
|
24
23
|
def toolkit_config_schema() -> BaseModel:
|
|
25
24
|
available_tools = {x['name']: x['args_schema'].schema() for x in SalesforceApiWrapper.model_construct().get_available_tools()}
|
|
26
|
-
SalesforceToolkit.toolkit_max_length = get_max_toolkit_length(available_tools)
|
|
27
25
|
return create_model(
|
|
28
26
|
name,
|
|
29
27
|
api_version=(str, Field(description="Salesforce API Version", default='v59.0')),
|
|
@@ -31,7 +29,6 @@ class SalesforceToolkit(BaseToolkit):
|
|
|
31
29
|
selected_tools=(List[Literal[tuple(available_tools)]], Field(default=[], json_schema_extra={'args_schemas': available_tools})),
|
|
32
30
|
__config__=ConfigDict(json_schema_extra={'metadata': {
|
|
33
31
|
"label": "Salesforce", "icon_url": "salesforce-icon.svg",
|
|
34
|
-
"max_length": SalesforceToolkit.toolkit_max_length,
|
|
35
32
|
"categories": ["other"],
|
|
36
33
|
"extra_categories": ["customer relationship management", "cloud computing", "marketing automation", "salesforce"]
|
|
37
34
|
}})
|
|
@@ -48,18 +45,21 @@ class SalesforceToolkit(BaseToolkit):
|
|
|
48
45
|
**kwargs.get('salesforce_configuration', {}),
|
|
49
46
|
}
|
|
50
47
|
api_wrapper = SalesforceApiWrapper(**wrapper_payload)
|
|
51
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
52
48
|
tools = []
|
|
53
49
|
|
|
54
50
|
for tool in api_wrapper.get_available_tools():
|
|
55
51
|
if selected_tools and tool["name"] not in selected_tools:
|
|
56
52
|
continue
|
|
57
|
-
|
|
53
|
+
description = f"Salesforce Tool: {tool['description']}"
|
|
54
|
+
if toolkit_name:
|
|
55
|
+
description = f"{description}\nToolkit: {toolkit_name}"
|
|
56
|
+
description = description[:1000]
|
|
58
57
|
tools.append(BaseAction(
|
|
59
58
|
api_wrapper=api_wrapper,
|
|
60
|
-
name=
|
|
61
|
-
description=
|
|
62
|
-
args_schema=tool["args_schema"]
|
|
59
|
+
name=tool["name"],
|
|
60
|
+
description=description,
|
|
61
|
+
args_schema=tool["args_schema"],
|
|
62
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
63
63
|
))
|
|
64
64
|
|
|
65
65
|
return cls(tools=tools)
|
|
@@ -7,7 +7,7 @@ from ..base.tool import BaseAction
|
|
|
7
7
|
from pydantic import create_model, BaseModel, ConfigDict, Field
|
|
8
8
|
|
|
9
9
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
10
|
-
from ..utils import clean_string,
|
|
10
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
11
11
|
from ...configurations.service_now import ServiceNowConfiguration
|
|
12
12
|
|
|
13
13
|
|
|
@@ -26,13 +26,11 @@ def get_tools(tool):
|
|
|
26
26
|
|
|
27
27
|
class ServiceNowToolkit(BaseToolkit):
|
|
28
28
|
tools: List[BaseTool] = []
|
|
29
|
-
toolkit_max_length: int = 0
|
|
30
29
|
|
|
31
30
|
@staticmethod
|
|
32
31
|
def toolkit_config_schema() -> BaseModel:
|
|
33
32
|
selected_tools = {x['name']: x['args_schema'].schema() for x in
|
|
34
33
|
ServiceNowAPIWrapper.model_construct().get_available_tools()}
|
|
35
|
-
ServiceNowToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
36
34
|
return create_model(
|
|
37
35
|
name,
|
|
38
36
|
name=(str, Field(description="Toolkit name")),
|
|
@@ -47,7 +45,6 @@ class ServiceNowToolkit(BaseToolkit):
|
|
|
47
45
|
'metadata': {
|
|
48
46
|
"label": "ServiceNow",
|
|
49
47
|
"icon_url": "service-now.svg",
|
|
50
|
-
"max_length": ServiceNowToolkit.toolkit_max_length,
|
|
51
48
|
"hidden": False,
|
|
52
49
|
"sections": {
|
|
53
50
|
"auth": {
|
|
@@ -79,18 +76,23 @@ class ServiceNowToolkit(BaseToolkit):
|
|
|
79
76
|
**kwargs['servicenow_configuration'],
|
|
80
77
|
}
|
|
81
78
|
servicenow_api_wrapper = ServiceNowAPIWrapper(**wrapper_payload)
|
|
82
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
83
79
|
available_tools = servicenow_api_wrapper.get_available_tools()
|
|
84
80
|
tools = []
|
|
85
81
|
for tool in available_tools:
|
|
86
82
|
if selected_tools:
|
|
87
83
|
if tool["name"] not in selected_tools:
|
|
88
84
|
continue
|
|
85
|
+
description = tool["description"]
|
|
86
|
+
if toolkit_name:
|
|
87
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
88
|
+
description = f"ServiceNow: {servicenow_api_wrapper.base_url}\n{description}"
|
|
89
|
+
description = description[:1000]
|
|
89
90
|
tools.append(BaseAction(
|
|
90
91
|
api_wrapper=servicenow_api_wrapper,
|
|
91
|
-
name=
|
|
92
|
-
description=
|
|
93
|
-
args_schema=tool["args_schema"]
|
|
92
|
+
name=tool["name"],
|
|
93
|
+
description=description,
|
|
94
|
+
args_schema=tool["args_schema"],
|
|
95
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
94
96
|
))
|
|
95
97
|
return cls(tools=tools)
|
|
96
98
|
|
|
@@ -5,7 +5,7 @@ from pydantic import create_model, BaseModel, ConfigDict, Field
|
|
|
5
5
|
from .api_wrapper import SharepointApiWrapper
|
|
6
6
|
from ..base.tool import BaseAction
|
|
7
7
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
8
|
-
from ..utils import clean_string,
|
|
8
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
9
9
|
from ...configurations.pgvector import PgVectorConfiguration
|
|
10
10
|
from ...configurations.sharepoint import SharepointConfiguration
|
|
11
11
|
|
|
@@ -29,12 +29,10 @@ def get_tools(tool):
|
|
|
29
29
|
|
|
30
30
|
class SharepointToolkit(BaseToolkit):
|
|
31
31
|
tools: List[BaseTool] = []
|
|
32
|
-
toolkit_max_length: int = 0
|
|
33
32
|
|
|
34
33
|
@staticmethod
|
|
35
34
|
def toolkit_config_schema() -> BaseModel:
|
|
36
35
|
selected_tools = {x['name']: x['args_schema'].schema() for x in SharepointApiWrapper.model_construct().get_available_tools()}
|
|
37
|
-
SharepointToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
38
36
|
return create_model(
|
|
39
37
|
name,
|
|
40
38
|
sharepoint_configuration=(SharepointConfiguration, Field(description="SharePoint Configuration", json_schema_extra={'configuration_types': ['sharepoint']})),
|
|
@@ -48,7 +46,6 @@ class SharepointToolkit(BaseToolkit):
|
|
|
48
46
|
__config__=ConfigDict(json_schema_extra={
|
|
49
47
|
'metadata': {
|
|
50
48
|
"label": "Sharepoint", "icon_url": "sharepoint.svg",
|
|
51
|
-
"max_length": SharepointToolkit.toolkit_max_length,
|
|
52
49
|
"categories": ["office"],
|
|
53
50
|
"extra_categories": ["microsoft", "cloud storage", "team collaboration", "content management"]
|
|
54
51
|
}})
|
|
@@ -65,18 +62,22 @@ class SharepointToolkit(BaseToolkit):
|
|
|
65
62
|
**(kwargs.get('pgvector_configuration') or {}),
|
|
66
63
|
}
|
|
67
64
|
sharepoint_api_wrapper = SharepointApiWrapper(**wrapper_payload)
|
|
68
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
69
65
|
available_tools = sharepoint_api_wrapper.get_available_tools()
|
|
70
66
|
tools = []
|
|
71
67
|
for tool in available_tools:
|
|
72
68
|
if selected_tools:
|
|
73
69
|
if tool["name"] not in selected_tools:
|
|
74
70
|
continue
|
|
71
|
+
description = f"Sharepoint {sharepoint_api_wrapper.site_url}\n{tool['description']}"
|
|
72
|
+
if toolkit_name:
|
|
73
|
+
description = f"{description}\nToolkit: {toolkit_name}"
|
|
74
|
+
description = description[:1000]
|
|
75
75
|
tools.append(BaseAction(
|
|
76
76
|
api_wrapper=sharepoint_api_wrapper,
|
|
77
|
-
name=
|
|
78
|
-
description=
|
|
79
|
-
args_schema=tool["args_schema"]
|
|
77
|
+
name=tool["name"],
|
|
78
|
+
description=description,
|
|
79
|
+
args_schema=tool["args_schema"],
|
|
80
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
80
81
|
))
|
|
81
82
|
return cls(tools=tools)
|
|
82
83
|
|
|
@@ -12,7 +12,7 @@ from pydantic import create_model, BaseModel, Field
|
|
|
12
12
|
from ..base.tool import BaseAction
|
|
13
13
|
|
|
14
14
|
from .api_wrapper import SlackApiWrapper
|
|
15
|
-
from ..utils import
|
|
15
|
+
from ..utils import clean_string, get_max_toolkit_length, check_connection_response
|
|
16
16
|
from slack_sdk.errors import SlackApiError
|
|
17
17
|
from slack_sdk import WebClient
|
|
18
18
|
|
|
@@ -28,12 +28,10 @@ def get_tools(tool):
|
|
|
28
28
|
|
|
29
29
|
class SlackToolkit(BaseToolkit):
|
|
30
30
|
tools: List[BaseTool] = []
|
|
31
|
-
toolkit_max_length: int = 0
|
|
32
31
|
|
|
33
32
|
@staticmethod
|
|
34
33
|
def toolkit_config_schema() -> BaseModel:
|
|
35
34
|
selected_tools = {x['name']: x['args_schema'].schema() for x in SlackApiWrapper.model_construct().get_available_tools()}
|
|
36
|
-
SlackToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
37
35
|
|
|
38
36
|
@check_connection_response
|
|
39
37
|
def check_connection(self):
|
|
@@ -59,7 +57,6 @@ class SlackToolkit(BaseToolkit):
|
|
|
59
57
|
'metadata': {
|
|
60
58
|
"label": "Slack",
|
|
61
59
|
"icon_url": "slack-icon.svg",
|
|
62
|
-
"max_length": SlackToolkit.toolkit_max_length,
|
|
63
60
|
"categories": ["communication"],
|
|
64
61
|
"extra_categories": ["slack", "chat", "messaging", "collaboration"],
|
|
65
62
|
}
|
|
@@ -79,17 +76,21 @@ class SlackToolkit(BaseToolkit):
|
|
|
79
76
|
**kwargs['slack_configuration'],
|
|
80
77
|
}
|
|
81
78
|
slack_api_wrapper = SlackApiWrapper(**wrapper_payload)
|
|
82
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
83
79
|
available_tools = slack_api_wrapper.get_available_tools()
|
|
84
80
|
tools = []
|
|
85
81
|
for tool in available_tools:
|
|
86
82
|
if selected_tools and tool["name"] not in selected_tools:
|
|
87
83
|
continue
|
|
84
|
+
description = f"Slack Tool: {tool['description']}"
|
|
85
|
+
if toolkit_name:
|
|
86
|
+
description = f"{description}\nToolkit: {toolkit_name}"
|
|
87
|
+
description = description[:1000]
|
|
88
88
|
tools.append(BaseAction(
|
|
89
89
|
api_wrapper=slack_api_wrapper,
|
|
90
|
-
name=
|
|
91
|
-
description=
|
|
90
|
+
name=tool["name"],
|
|
91
|
+
description=description,
|
|
92
92
|
args_schema=tool["args_schema"],
|
|
93
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
93
94
|
))
|
|
94
95
|
return cls(tools=tools)
|
|
95
96
|
|
alita_sdk/tools/sql/__init__.py
CHANGED
|
@@ -7,7 +7,7 @@ from .api_wrapper import SQLApiWrapper
|
|
|
7
7
|
from ..base.tool import BaseAction
|
|
8
8
|
from .models import SQLDialect
|
|
9
9
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
10
|
-
from ..utils import
|
|
10
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
11
11
|
from ...configurations.sql import SqlConfiguration
|
|
12
12
|
|
|
13
13
|
name = "sql"
|
|
@@ -24,12 +24,10 @@ def get_tools(tool):
|
|
|
24
24
|
|
|
25
25
|
class SQLToolkit(BaseToolkit):
|
|
26
26
|
tools: list[BaseTool] = []
|
|
27
|
-
toolkit_max_length: int = 0
|
|
28
27
|
|
|
29
28
|
@staticmethod
|
|
30
29
|
def toolkit_config_schema() -> BaseModel:
|
|
31
30
|
selected_tools = {x['name']: x['args_schema'].schema() for x in SQLApiWrapper.model_construct().get_available_tools()}
|
|
32
|
-
SQLToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
33
31
|
supported_dialects = (d.value for d in SQLDialect)
|
|
34
32
|
return create_model(
|
|
35
33
|
name,
|
|
@@ -42,7 +40,6 @@ class SQLToolkit(BaseToolkit):
|
|
|
42
40
|
'metadata':
|
|
43
41
|
{
|
|
44
42
|
"label": "SQL", "icon_url": "sql-icon.svg",
|
|
45
|
-
"max_length": SQLToolkit.toolkit_max_length,
|
|
46
43
|
"categories": ["development"],
|
|
47
44
|
"extra_categories": ["sql", "data management", "data analysis"]}})
|
|
48
45
|
)
|
|
@@ -57,17 +54,21 @@ class SQLToolkit(BaseToolkit):
|
|
|
57
54
|
**kwargs.get('sql_configuration', {}),
|
|
58
55
|
}
|
|
59
56
|
sql_api_wrapper = SQLApiWrapper(**wrapper_payload)
|
|
60
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
61
57
|
available_tools = sql_api_wrapper.get_available_tools()
|
|
62
58
|
tools = []
|
|
63
59
|
for tool in available_tools:
|
|
64
60
|
if selected_tools and tool["name"] not in selected_tools:
|
|
65
61
|
continue
|
|
62
|
+
description = f"{tool['description']}\nDatabase: {sql_api_wrapper.database_name}. Host: {sql_api_wrapper.host}"
|
|
63
|
+
if toolkit_name:
|
|
64
|
+
description = f"{description}\nToolkit: {toolkit_name}"
|
|
65
|
+
description = description[:1000]
|
|
66
66
|
tools.append(BaseAction(
|
|
67
67
|
api_wrapper=sql_api_wrapper,
|
|
68
|
-
name=
|
|
69
|
-
description=
|
|
70
|
-
args_schema=tool["args_schema"]
|
|
68
|
+
name=tool["name"],
|
|
69
|
+
description=description,
|
|
70
|
+
args_schema=tool["args_schema"],
|
|
71
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
71
72
|
))
|
|
72
73
|
return cls(tools=tools)
|
|
73
74
|
|
|
@@ -6,7 +6,7 @@ from pydantic import create_model, BaseModel, ConfigDict, Field
|
|
|
6
6
|
from .api_wrapper import TestIOApiWrapper
|
|
7
7
|
from ..base.tool import BaseAction
|
|
8
8
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
9
|
-
from ..utils import clean_string,
|
|
9
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
10
10
|
from ...configurations.testio import TestIOConfiguration
|
|
11
11
|
|
|
12
12
|
name = "testio"
|
|
@@ -19,8 +19,6 @@ def get_tools(tool):
|
|
|
19
19
|
).get_tools()
|
|
20
20
|
|
|
21
21
|
|
|
22
|
-
TOOLKIT_MAX_LENGTH = 25
|
|
23
|
-
|
|
24
22
|
class TestIOToolkit(BaseToolkit):
|
|
25
23
|
tools: list[BaseTool] = []
|
|
26
24
|
|
|
@@ -33,7 +31,6 @@ class TestIOToolkit(BaseToolkit):
|
|
|
33
31
|
testio_configuration=(TestIOConfiguration, Field(description="TestIO Configuration", json_schema_extra={'configuration_types': ['testio']})),
|
|
34
32
|
selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
|
|
35
33
|
__config__=ConfigDict(json_schema_extra={'metadata': {"label": "TestIO", "icon_url": "testio-icon.svg",
|
|
36
|
-
"max_length": TOOLKIT_MAX_LENGTH,
|
|
37
34
|
"categories": ["testing"],
|
|
38
35
|
"extra_categories": ["test automation", "test case management", "test planning"]}})
|
|
39
36
|
)
|
|
@@ -48,17 +45,21 @@ class TestIOToolkit(BaseToolkit):
|
|
|
48
45
|
**kwargs.get('testio_configuration', {}),
|
|
49
46
|
}
|
|
50
47
|
testio_api_wrapper = TestIOApiWrapper(**wrapper_payload)
|
|
51
|
-
prefix = clean_string(toolkit_name, TOOLKIT_MAX_LENGTH) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
52
48
|
available_tools = testio_api_wrapper.get_available_tools()
|
|
53
49
|
tools = []
|
|
54
50
|
for tool in available_tools:
|
|
55
51
|
if selected_tools and tool["name"] not in selected_tools:
|
|
56
52
|
continue
|
|
53
|
+
description = tool["description"]
|
|
54
|
+
if toolkit_name:
|
|
55
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
56
|
+
description = description[:1000]
|
|
57
57
|
tools.append(BaseAction(
|
|
58
58
|
api_wrapper=testio_api_wrapper,
|
|
59
|
-
name=
|
|
60
|
-
description=
|
|
61
|
-
args_schema=tool["args_schema"]
|
|
59
|
+
name=tool["name"],
|
|
60
|
+
description=description,
|
|
61
|
+
args_schema=tool["args_schema"],
|
|
62
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
62
63
|
))
|
|
63
64
|
return cls(tools=tools)
|
|
64
65
|
|
|
@@ -7,7 +7,7 @@ import requests
|
|
|
7
7
|
from .api_wrapper import TestrailAPIWrapper
|
|
8
8
|
from ..base.tool import BaseAction
|
|
9
9
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
10
|
-
from ..utils import clean_string,
|
|
10
|
+
from ..utils import clean_string, get_max_toolkit_length, check_connection_response
|
|
11
11
|
from ...configurations.testrail import TestRailConfiguration
|
|
12
12
|
from ...configurations.pgvector import PgVectorConfiguration
|
|
13
13
|
|
|
@@ -31,12 +31,10 @@ def get_tools(tool):
|
|
|
31
31
|
|
|
32
32
|
class TestrailToolkit(BaseToolkit):
|
|
33
33
|
tools: List[BaseTool] = []
|
|
34
|
-
toolkit_max_length: int = 0
|
|
35
34
|
|
|
36
35
|
@staticmethod
|
|
37
36
|
def toolkit_config_schema() -> BaseModel:
|
|
38
37
|
selected_tools = {x['name']: x['args_schema'].schema() for x in TestrailAPIWrapper.model_construct().get_available_tools()}
|
|
39
|
-
TestrailToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
40
38
|
m = create_model(
|
|
41
39
|
name,
|
|
42
40
|
testrail_configuration=(Optional[TestRailConfiguration], Field(description="TestRail Configuration", json_schema_extra={'configuration_types': ['testrail']})),
|
|
@@ -47,7 +45,6 @@ class TestrailToolkit(BaseToolkit):
|
|
|
47
45
|
selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
|
|
48
46
|
__config__=ConfigDict(json_schema_extra={'metadata':
|
|
49
47
|
{"label": "Testrail", "icon_url": "testrail-icon.svg",
|
|
50
|
-
"max_length": TestrailToolkit.toolkit_max_length,
|
|
51
48
|
"categories": ["test management"],
|
|
52
49
|
"extra_categories": ["quality assurance", "test case management", "test planning"]
|
|
53
50
|
}})
|
|
@@ -77,18 +74,23 @@ class TestrailToolkit(BaseToolkit):
|
|
|
77
74
|
**(kwargs.get('pgvector_configuration') or {}),
|
|
78
75
|
}
|
|
79
76
|
testrail_api_wrapper = TestrailAPIWrapper(**wrapper_payload)
|
|
80
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
81
77
|
available_tools = testrail_api_wrapper.get_available_tools()
|
|
82
78
|
tools = []
|
|
83
79
|
for tool in available_tools:
|
|
84
80
|
if selected_tools:
|
|
85
81
|
if tool["name"] not in selected_tools:
|
|
86
82
|
continue
|
|
83
|
+
description = tool["description"]
|
|
84
|
+
if toolkit_name:
|
|
85
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
86
|
+
description = description + "\nTestrail instance: " + testrail_api_wrapper.url
|
|
87
|
+
description = description[:1000]
|
|
87
88
|
tools.append(BaseAction(
|
|
88
89
|
api_wrapper=testrail_api_wrapper,
|
|
89
|
-
name=
|
|
90
|
-
description=
|
|
91
|
-
args_schema=tool["args_schema"]
|
|
90
|
+
name=tool["name"],
|
|
91
|
+
description=description,
|
|
92
|
+
args_schema=tool["args_schema"],
|
|
93
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
92
94
|
))
|
|
93
95
|
return cls(tools=tools)
|
|
94
96
|
|
|
@@ -7,6 +7,8 @@ import requests
|
|
|
7
7
|
from pydantic import create_model, Field
|
|
8
8
|
|
|
9
9
|
|
|
10
|
+
# DEPRECATED: Tool names no longer use prefixes
|
|
11
|
+
# Kept for backward compatibility only
|
|
10
12
|
TOOLKIT_SPLITTER = "___"
|
|
11
13
|
TOOL_NAME_LIMIT = 64
|
|
12
14
|
|
|
@@ -22,10 +24,13 @@ def clean_string(s: str, max_length: int = 0):
|
|
|
22
24
|
|
|
23
25
|
|
|
24
26
|
def get_max_toolkit_length(selected_tools: Any):
|
|
25
|
-
"""Calculates the maximum length of the toolkit name
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
27
|
+
"""DEPRECATED: Calculates the maximum length of the toolkit name.
|
|
28
|
+
|
|
29
|
+
This function is deprecated as tool names no longer use prefixes.
|
|
30
|
+
Returns a fixed value for backward compatibility.
|
|
31
|
+
"""
|
|
32
|
+
# Return a reasonable default since we no longer use prefixes
|
|
33
|
+
return 50
|
|
29
34
|
|
|
30
35
|
|
|
31
36
|
def parse_list(list_str: str = None) -> List[str]:
|
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Shared text operations utilities for file manipulation across toolkits.
|
|
3
|
+
|
|
4
|
+
Provides common functionality for:
|
|
5
|
+
- Parsing OLD/NEW marker-based edits
|
|
6
|
+
- Text file validation
|
|
7
|
+
- Line-based slicing and partial reads
|
|
8
|
+
- Content searching with context
|
|
9
|
+
"""
|
|
10
|
+
import re
|
|
11
|
+
import logging
|
|
12
|
+
from typing import List, Tuple, Dict, Optional
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
# Text file extensions that support editing
|
|
17
|
+
TEXT_EDITABLE_EXTENSIONS = {
|
|
18
|
+
'.md', '.txt', '.csv', '.json', '.xml', '.html',
|
|
19
|
+
'.yaml', '.yml', '.ini', '.conf', '.log', '.sh',
|
|
20
|
+
'.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.go',
|
|
21
|
+
'.rb', '.php', '.c', '.cpp', '.h', '.hpp', '.cs',
|
|
22
|
+
'.sql', '.r', '.m', '.swift', '.kt', '.rs', '.scala'
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def parse_old_new_markers(file_query: str) -> List[Tuple[str, str]]:
|
|
27
|
+
"""
|
|
28
|
+
Parse OLD/NEW marker-based edit instructions.
|
|
29
|
+
|
|
30
|
+
Extracts pairs of old and new content from a file query using markers:
|
|
31
|
+
- OLD <<<< ... >>>> OLD
|
|
32
|
+
- NEW <<<< ... >>>> NEW
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
file_query: String containing marked old and new content sections
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
List of tuples (old_content, new_content) for each edit pair
|
|
39
|
+
|
|
40
|
+
Example:
|
|
41
|
+
>>> query = '''
|
|
42
|
+
... OLD <<<<
|
|
43
|
+
... Hello World
|
|
44
|
+
... >>>> OLD
|
|
45
|
+
... NEW <<<<
|
|
46
|
+
... Hello Mars
|
|
47
|
+
... >>>> NEW
|
|
48
|
+
... '''
|
|
49
|
+
>>> parse_old_new_markers(query)
|
|
50
|
+
[('Hello World', 'Hello Mars')]
|
|
51
|
+
"""
|
|
52
|
+
# Split the file content by lines
|
|
53
|
+
code_lines = file_query.split("\n")
|
|
54
|
+
|
|
55
|
+
# Initialize lists to hold the contents of OLD and NEW sections
|
|
56
|
+
old_contents = []
|
|
57
|
+
new_contents = []
|
|
58
|
+
|
|
59
|
+
# Initialize variables to track whether the current line is within an OLD or NEW section
|
|
60
|
+
in_old_section = False
|
|
61
|
+
in_new_section = False
|
|
62
|
+
|
|
63
|
+
# Temporary storage for the current section's content
|
|
64
|
+
current_section_content = []
|
|
65
|
+
|
|
66
|
+
# Iterate through each line in the file content
|
|
67
|
+
for line in code_lines:
|
|
68
|
+
# Check for OLD section start
|
|
69
|
+
if "OLD <<<" in line:
|
|
70
|
+
in_old_section = True
|
|
71
|
+
current_section_content = [] # Reset current section content
|
|
72
|
+
continue # Skip the line with the marker
|
|
73
|
+
|
|
74
|
+
# Check for OLD section end
|
|
75
|
+
if ">>>> OLD" in line:
|
|
76
|
+
in_old_section = False
|
|
77
|
+
old_contents.append("\n".join(current_section_content).strip()) # Add the captured content
|
|
78
|
+
current_section_content = [] # Reset current section content
|
|
79
|
+
continue # Skip the line with the marker
|
|
80
|
+
|
|
81
|
+
# Check for NEW section start
|
|
82
|
+
if "NEW <<<" in line:
|
|
83
|
+
in_new_section = True
|
|
84
|
+
current_section_content = [] # Reset current section content
|
|
85
|
+
continue # Skip the line with the marker
|
|
86
|
+
|
|
87
|
+
# Check for NEW section end
|
|
88
|
+
if ">>>> NEW" in line:
|
|
89
|
+
in_new_section = False
|
|
90
|
+
new_contents.append("\n".join(current_section_content).strip()) # Add the captured content
|
|
91
|
+
current_section_content = [] # Reset current section content
|
|
92
|
+
continue # Skip the line with the marker
|
|
93
|
+
|
|
94
|
+
# If currently in an OLD or NEW section, add the line to the current section content
|
|
95
|
+
if in_old_section or in_new_section:
|
|
96
|
+
current_section_content.append(line)
|
|
97
|
+
|
|
98
|
+
# Pair the OLD and NEW contents
|
|
99
|
+
paired_contents = list(zip(old_contents, new_contents))
|
|
100
|
+
|
|
101
|
+
return paired_contents
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def is_text_editable(filename: str) -> bool:
|
|
105
|
+
"""
|
|
106
|
+
Check if a file is editable as text based on its extension.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
filename: Name or path of the file to check
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
True if file extension is in the text-editable whitelist
|
|
113
|
+
|
|
114
|
+
Example:
|
|
115
|
+
>>> is_text_editable("config.json")
|
|
116
|
+
True
|
|
117
|
+
>>> is_text_editable("image.png")
|
|
118
|
+
False
|
|
119
|
+
"""
|
|
120
|
+
from pathlib import Path
|
|
121
|
+
ext = Path(filename).suffix.lower()
|
|
122
|
+
return ext in TEXT_EDITABLE_EXTENSIONS
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def apply_line_slice(
|
|
126
|
+
content: str,
|
|
127
|
+
offset: Optional[int] = None,
|
|
128
|
+
limit: Optional[int] = None,
|
|
129
|
+
head: Optional[int] = None,
|
|
130
|
+
tail: Optional[int] = None
|
|
131
|
+
) -> str:
|
|
132
|
+
"""
|
|
133
|
+
Apply line-based slicing to text content.
|
|
134
|
+
|
|
135
|
+
Supports multiple modes:
|
|
136
|
+
- offset + limit: Read from line `offset` for `limit` lines (1-indexed)
|
|
137
|
+
- head: Read only first N lines
|
|
138
|
+
- tail: Read only last N lines
|
|
139
|
+
- No params: Return full content
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
content: Text content to slice
|
|
143
|
+
offset: Starting line number (1-indexed, inclusive)
|
|
144
|
+
limit: Number of lines to read from offset
|
|
145
|
+
head: Return only first N lines
|
|
146
|
+
tail: Return only last N lines
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Sliced content as string
|
|
150
|
+
|
|
151
|
+
Example:
|
|
152
|
+
>>> text = "line1\\nline2\\nline3\\nline4\\nline5"
|
|
153
|
+
>>> apply_line_slice(text, offset=2, limit=2)
|
|
154
|
+
'line2\\nline3'
|
|
155
|
+
>>> apply_line_slice(text, head=2)
|
|
156
|
+
'line1\\nline2'
|
|
157
|
+
>>> apply_line_slice(text, tail=2)
|
|
158
|
+
'line4\\nline5'
|
|
159
|
+
"""
|
|
160
|
+
if not content:
|
|
161
|
+
return content
|
|
162
|
+
|
|
163
|
+
lines = content.splitlines(keepends=True)
|
|
164
|
+
|
|
165
|
+
# Head mode: first N lines
|
|
166
|
+
if head is not None:
|
|
167
|
+
return ''.join(lines[:head])
|
|
168
|
+
|
|
169
|
+
# Tail mode: last N lines
|
|
170
|
+
if tail is not None:
|
|
171
|
+
return ''.join(lines[-tail:] if tail > 0 else lines)
|
|
172
|
+
|
|
173
|
+
# Offset + limit mode: slice from offset for limit lines
|
|
174
|
+
if offset is not None:
|
|
175
|
+
start_idx = max(0, offset - 1) # Convert 1-indexed to 0-indexed
|
|
176
|
+
if limit is not None:
|
|
177
|
+
end_idx = start_idx + limit
|
|
178
|
+
return ''.join(lines[start_idx:end_idx])
|
|
179
|
+
else:
|
|
180
|
+
return ''.join(lines[start_idx:])
|
|
181
|
+
|
|
182
|
+
# No slicing parameters: return full content
|
|
183
|
+
return content
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def search_in_content(
|
|
187
|
+
content: str,
|
|
188
|
+
pattern: str,
|
|
189
|
+
is_regex: bool = True,
|
|
190
|
+
context_lines: int = 2
|
|
191
|
+
) -> List[Dict[str, any]]:
|
|
192
|
+
"""
|
|
193
|
+
Search for pattern in content with context lines.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
content: Text content to search
|
|
197
|
+
pattern: Search pattern (regex if is_regex=True, else literal string)
|
|
198
|
+
is_regex: Whether to treat pattern as regex (default True)
|
|
199
|
+
context_lines: Number of lines before/after match to include (default 2)
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
List of match dictionaries with keys:
|
|
203
|
+
- line_number: 1-indexed line number of match
|
|
204
|
+
- line_content: The matching line
|
|
205
|
+
- match_text: The actual matched text
|
|
206
|
+
- context_before: List of lines before match
|
|
207
|
+
- context_after: List of lines after match
|
|
208
|
+
|
|
209
|
+
Example:
|
|
210
|
+
>>> text = "line1\\nHello World\\nline3"
|
|
211
|
+
>>> matches = search_in_content(text, "Hello", is_regex=False)
|
|
212
|
+
>>> matches[0]['line_number']
|
|
213
|
+
2
|
|
214
|
+
>>> matches[0]['match_text']
|
|
215
|
+
'Hello'
|
|
216
|
+
"""
|
|
217
|
+
if not content:
|
|
218
|
+
return []
|
|
219
|
+
|
|
220
|
+
lines = content.splitlines()
|
|
221
|
+
matches = []
|
|
222
|
+
|
|
223
|
+
# Compile regex pattern or escape for literal search
|
|
224
|
+
if is_regex:
|
|
225
|
+
try:
|
|
226
|
+
regex = re.compile(pattern, re.IGNORECASE)
|
|
227
|
+
except re.error as e:
|
|
228
|
+
logger.warning(f"Invalid regex pattern '{pattern}': {e}")
|
|
229
|
+
return []
|
|
230
|
+
else:
|
|
231
|
+
regex = re.compile(re.escape(pattern), re.IGNORECASE)
|
|
232
|
+
|
|
233
|
+
# Search each line
|
|
234
|
+
for line_idx, line in enumerate(lines):
|
|
235
|
+
match = regex.search(line)
|
|
236
|
+
if match:
|
|
237
|
+
line_number = line_idx + 1 # Convert to 1-indexed
|
|
238
|
+
|
|
239
|
+
# Get context lines
|
|
240
|
+
context_start = max(0, line_idx - context_lines)
|
|
241
|
+
context_end = min(len(lines), line_idx + context_lines + 1)
|
|
242
|
+
|
|
243
|
+
context_before = lines[context_start:line_idx]
|
|
244
|
+
context_after = lines[line_idx + 1:context_end]
|
|
245
|
+
|
|
246
|
+
matches.append({
|
|
247
|
+
'line_number': line_number,
|
|
248
|
+
'line_content': line,
|
|
249
|
+
'match_text': match.group(0),
|
|
250
|
+
'context_before': context_before,
|
|
251
|
+
'context_after': context_after,
|
|
252
|
+
})
|
|
253
|
+
|
|
254
|
+
return matches
|