alita-sdk 0.3.257__py3-none-any.whl → 0.3.584__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +155 -0
- alita_sdk/cli/agent_loader.py +215 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3794 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1073 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +72 -12
- alita_sdk/community/inventory/__init__.py +236 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +11 -0
- alita_sdk/configurations/ado.py +148 -2
- alita_sdk/configurations/azure_search.py +1 -1
- alita_sdk/configurations/bigquery.py +1 -1
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/browser.py +18 -0
- alita_sdk/configurations/carrier.py +19 -0
- alita_sdk/configurations/confluence.py +130 -1
- alita_sdk/configurations/delta_lake.py +1 -1
- alita_sdk/configurations/figma.py +76 -5
- alita_sdk/configurations/github.py +65 -1
- alita_sdk/configurations/gitlab.py +81 -0
- alita_sdk/configurations/google_places.py +17 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/openapi.py +323 -0
- alita_sdk/configurations/postman.py +1 -1
- alita_sdk/configurations/qtest.py +72 -3
- alita_sdk/configurations/report_portal.py +115 -0
- alita_sdk/configurations/salesforce.py +19 -0
- alita_sdk/configurations/service_now.py +1 -12
- alita_sdk/configurations/sharepoint.py +167 -0
- alita_sdk/configurations/sonar.py +18 -0
- alita_sdk/configurations/sql.py +20 -0
- alita_sdk/configurations/testio.py +101 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +94 -1
- alita_sdk/configurations/zephyr_enterprise.py +94 -1
- alita_sdk/configurations/zephyr_essential.py +95 -0
- alita_sdk/runtime/clients/artifact.py +21 -4
- alita_sdk/runtime/clients/client.py +458 -67
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +352 -0
- alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
- alita_sdk/runtime/langchain/assistant.py +183 -43
- alita_sdk/runtime/langchain/constants.py +647 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +209 -31
- alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py +1 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +10 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaMarkdownLoader.py +66 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py +79 -10
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +52 -15
- alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py +9 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -4
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +15 -2
- alita_sdk/runtime/langchain/document_loaders/ImageParser.py +30 -0
- alita_sdk/runtime/langchain/document_loaders/constants.py +189 -41
- alita_sdk/runtime/langchain/interfaces/llm_processor.py +4 -2
- alita_sdk/runtime/langchain/langraph_agent.py +493 -105
- alita_sdk/runtime/langchain/utils.py +118 -8
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/skills/__init__.py +91 -0
- alita_sdk/runtime/skills/callbacks.py +498 -0
- alita_sdk/runtime/skills/discovery.py +540 -0
- alita_sdk/runtime/skills/executor.py +610 -0
- alita_sdk/runtime/skills/input_builder.py +371 -0
- alita_sdk/runtime/skills/models.py +330 -0
- alita_sdk/runtime/skills/registry.py +355 -0
- alita_sdk/runtime/skills/skill_runner.py +330 -0
- alita_sdk/runtime/toolkits/__init__.py +28 -0
- alita_sdk/runtime/toolkits/application.py +14 -4
- alita_sdk/runtime/toolkits/artifact.py +25 -9
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +782 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/skill_router.py +238 -0
- alita_sdk/runtime/toolkits/subgraph.py +11 -6
- alita_sdk/runtime/toolkits/tools.py +314 -70
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/__init__.py +24 -0
- alita_sdk/runtime/tools/application.py +16 -4
- alita_sdk/runtime/tools/artifact.py +367 -33
- alita_sdk/runtime/tools/data_analysis.py +183 -0
- alita_sdk/runtime/tools/function.py +100 -4
- alita_sdk/runtime/tools/graph.py +81 -0
- alita_sdk/runtime/tools/image_generation.py +218 -0
- alita_sdk/runtime/tools/llm.py +1032 -177
- alita_sdk/runtime/tools/loop.py +3 -1
- alita_sdk/runtime/tools/loop_output.py +3 -1
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +3 -1
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -1
- alita_sdk/runtime/tools/sandbox.py +375 -0
- alita_sdk/runtime/tools/skill_router.py +776 -0
- alita_sdk/runtime/tools/tool.py +3 -1
- alita_sdk/runtime/tools/vectorstore.py +69 -65
- alita_sdk/runtime/tools/vectorstore_base.py +163 -90
- alita_sdk/runtime/utils/AlitaCallback.py +137 -21
- alita_sdk/runtime/utils/constants.py +5 -1
- alita_sdk/runtime/utils/mcp_client.py +492 -0
- alita_sdk/runtime/utils/mcp_oauth.py +361 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +434 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/streamlit.py +41 -14
- alita_sdk/runtime/utils/toolkit_utils.py +28 -9
- alita_sdk/runtime/utils/utils.py +48 -0
- alita_sdk/tools/__init__.py +135 -37
- alita_sdk/tools/ado/__init__.py +2 -2
- alita_sdk/tools/ado/repos/__init__.py +16 -19
- alita_sdk/tools/ado/repos/repos_wrapper.py +12 -20
- alita_sdk/tools/ado/test_plan/__init__.py +27 -8
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +56 -28
- alita_sdk/tools/ado/wiki/__init__.py +28 -12
- alita_sdk/tools/ado/wiki/ado_wrapper.py +114 -40
- alita_sdk/tools/ado/work_item/__init__.py +28 -12
- alita_sdk/tools/ado/work_item/ado_wrapper.py +95 -11
- alita_sdk/tools/advanced_jira_mining/__init__.py +13 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +15 -11
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +14 -8
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +454 -110
- alita_sdk/tools/bitbucket/__init__.py +28 -19
- alita_sdk/tools/bitbucket/api_wrapper.py +285 -27
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +5 -5
- alita_sdk/tools/browser/__init__.py +41 -16
- alita_sdk/tools/browser/crawler.py +3 -1
- alita_sdk/tools/browser/utils.py +15 -6
- alita_sdk/tools/carrier/__init__.py +18 -17
- alita_sdk/tools/carrier/backend_reports_tool.py +8 -4
- alita_sdk/tools/carrier/excel_reporter.py +8 -4
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/codeparser.py +1 -1
- alita_sdk/tools/chunkers/sematic/json_chunker.py +2 -1
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +12 -7
- alita_sdk/tools/cloud/azure/__init__.py +12 -7
- alita_sdk/tools/cloud/gcp/__init__.py +12 -7
- alita_sdk/tools/cloud/k8s/__init__.py +12 -7
- alita_sdk/tools/code/linter/__init__.py +10 -8
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +21 -13
- alita_sdk/tools/code_indexer_toolkit.py +199 -0
- alita_sdk/tools/confluence/__init__.py +22 -14
- alita_sdk/tools/confluence/api_wrapper.py +197 -58
- alita_sdk/tools/confluence/loader.py +14 -2
- alita_sdk/tools/custom_open_api/__init__.py +12 -5
- alita_sdk/tools/elastic/__init__.py +11 -8
- alita_sdk/tools/elitea_base.py +546 -64
- alita_sdk/tools/figma/__init__.py +60 -11
- alita_sdk/tools/figma/api_wrapper.py +1400 -167
- alita_sdk/tools/figma/figma_client.py +73 -0
- alita_sdk/tools/figma/toon_tools.py +2748 -0
- alita_sdk/tools/github/__init__.py +18 -17
- alita_sdk/tools/github/api_wrapper.py +9 -26
- alita_sdk/tools/github/github_client.py +81 -12
- alita_sdk/tools/github/schemas.py +2 -1
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/gitlab/__init__.py +19 -13
- alita_sdk/tools/gitlab/api_wrapper.py +256 -80
- alita_sdk/tools/gitlab_org/__init__.py +14 -10
- alita_sdk/tools/google/bigquery/__init__.py +14 -13
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +21 -11
- alita_sdk/tools/jira/__init__.py +22 -11
- alita_sdk/tools/jira/api_wrapper.py +315 -168
- alita_sdk/tools/keycloak/__init__.py +11 -8
- alita_sdk/tools/localgit/__init__.py +9 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +38 -14
- alita_sdk/tools/non_code_indexer_toolkit.py +7 -2
- alita_sdk/tools/ocr/__init__.py +11 -8
- alita_sdk/tools/openapi/__init__.py +491 -106
- alita_sdk/tools/openapi/api_wrapper.py +1357 -0
- alita_sdk/tools/openapi/tool.py +20 -0
- alita_sdk/tools/pandas/__init__.py +20 -12
- alita_sdk/tools/pandas/api_wrapper.py +40 -45
- alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
- alita_sdk/tools/postman/__init__.py +11 -11
- alita_sdk/tools/postman/api_wrapper.py +19 -8
- alita_sdk/tools/postman/postman_analysis.py +8 -1
- alita_sdk/tools/pptx/__init__.py +11 -10
- alita_sdk/tools/qtest/__init__.py +22 -14
- alita_sdk/tools/qtest/api_wrapper.py +1784 -88
- alita_sdk/tools/rally/__init__.py +13 -10
- alita_sdk/tools/report_portal/__init__.py +23 -16
- alita_sdk/tools/salesforce/__init__.py +22 -16
- alita_sdk/tools/servicenow/__init__.py +21 -16
- alita_sdk/tools/servicenow/api_wrapper.py +1 -1
- alita_sdk/tools/sharepoint/__init__.py +17 -14
- alita_sdk/tools/sharepoint/api_wrapper.py +179 -39
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +13 -8
- alita_sdk/tools/sql/__init__.py +22 -19
- alita_sdk/tools/sql/api_wrapper.py +71 -23
- alita_sdk/tools/testio/__init__.py +21 -13
- alita_sdk/tools/testrail/__init__.py +13 -11
- alita_sdk/tools/testrail/api_wrapper.py +214 -46
- alita_sdk/tools/utils/__init__.py +28 -4
- alita_sdk/tools/utils/content_parser.py +241 -55
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +83 -27
- alita_sdk/tools/xray/__init__.py +18 -14
- alita_sdk/tools/xray/api_wrapper.py +58 -113
- alita_sdk/tools/yagmail/__init__.py +9 -3
- alita_sdk/tools/zephyr/__init__.py +12 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +16 -9
- alita_sdk/tools/zephyr_enterprise/api_wrapper.py +30 -15
- alita_sdk/tools/zephyr_essential/__init__.py +16 -10
- alita_sdk/tools/zephyr_essential/api_wrapper.py +297 -54
- alita_sdk/tools/zephyr_essential/client.py +6 -4
- alita_sdk/tools/zephyr_scale/__init__.py +13 -8
- alita_sdk/tools/zephyr_scale/api_wrapper.py +39 -31
- alita_sdk/tools/zephyr_squad/__init__.py +12 -7
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/METADATA +184 -37
- alita_sdk-0.3.584.dist-info/RECORD +452 -0
- alita_sdk-0.3.584.dist-info/entry_points.txt +2 -0
- alita_sdk/tools/bitbucket/tools.py +0 -304
- alita_sdk-0.3.257.dist-info/RECORD +0 -343
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from typing import Optional, Type
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field, field_validator
|
|
4
|
+
|
|
5
|
+
from ..base.tool import BaseAction
|
|
6
|
+
from .api_wrapper import OpenApiApiWrapper
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class OpenApiAction(BaseAction):
|
|
10
|
+
"""Tool for executing a single OpenAPI operation."""
|
|
11
|
+
|
|
12
|
+
api_wrapper: OpenApiApiWrapper = Field(default_factory=OpenApiApiWrapper)
|
|
13
|
+
name: str
|
|
14
|
+
description: str = ""
|
|
15
|
+
args_schema: Optional[Type[BaseModel]] = None
|
|
16
|
+
|
|
17
|
+
@field_validator('name', mode='before')
|
|
18
|
+
@classmethod
|
|
19
|
+
def remove_spaces(cls, v: str) -> str:
|
|
20
|
+
return v.replace(' ', '')
|
|
@@ -5,7 +5,8 @@ from pydantic import BaseModel, ConfigDict, create_model, Field
|
|
|
5
5
|
|
|
6
6
|
from .api_wrapper import PandasWrapper
|
|
7
7
|
from ..base.tool import BaseAction
|
|
8
|
-
from ..utils import clean_string,
|
|
8
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
9
|
+
from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
|
|
9
10
|
|
|
10
11
|
name = "pandas"
|
|
11
12
|
|
|
@@ -21,19 +22,22 @@ def get_tools(tool):
|
|
|
21
22
|
|
|
22
23
|
class PandasToolkit(BaseToolkit):
|
|
23
24
|
tools: List[BaseTool] = []
|
|
24
|
-
toolkit_max_length: int = 0
|
|
25
25
|
|
|
26
26
|
@staticmethod
|
|
27
27
|
def toolkit_config_schema() -> BaseModel:
|
|
28
28
|
selected_tools = {x['name']: x['args_schema'].schema() for x in PandasWrapper.model_construct().get_available_tools()}
|
|
29
|
-
PandasToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
30
29
|
return create_model(
|
|
31
30
|
name,
|
|
32
|
-
bucket_name=(str, Field(default=None, title="Bucket name", description="Bucket where the content file is stored"
|
|
31
|
+
bucket_name=(str, Field(default=None, title="Bucket name", description="Bucket where the content file is stored")),
|
|
33
32
|
selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
|
|
34
|
-
__config__=ConfigDict(json_schema_extra={'metadata': {
|
|
35
|
-
|
|
36
|
-
|
|
33
|
+
__config__=ConfigDict(json_schema_extra={'metadata': {
|
|
34
|
+
"label": "Pandas (Deprecated)",
|
|
35
|
+
"icon_url": "pandas-icon.svg",
|
|
36
|
+
"categories": ["analysis"],
|
|
37
|
+
"deprecated": True,
|
|
38
|
+
"deprecation_message": "This toolkit is deprecated. Use the 'Data Analysis' internal tool instead. Enable it via the 'Internal Tools' menu in chat.",
|
|
39
|
+
"extra_categories": ["data science", "data manipulation", "dataframes"]
|
|
40
|
+
}})
|
|
37
41
|
)
|
|
38
42
|
|
|
39
43
|
@classmethod
|
|
@@ -41,17 +45,21 @@ class PandasToolkit(BaseToolkit):
|
|
|
41
45
|
if selected_tools is None:
|
|
42
46
|
selected_tools = []
|
|
43
47
|
csv_tool_api_wrapper = PandasWrapper(**kwargs)
|
|
44
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
45
48
|
available_tools = csv_tool_api_wrapper.get_available_tools()
|
|
46
49
|
tools = []
|
|
47
50
|
for tool in available_tools:
|
|
48
51
|
if selected_tools and tool["name"] not in selected_tools:
|
|
49
52
|
continue
|
|
53
|
+
description = tool["description"]
|
|
54
|
+
if toolkit_name:
|
|
55
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
56
|
+
description = description[:1000]
|
|
50
57
|
tools.append(BaseAction(
|
|
51
|
-
api_wrapper=
|
|
52
|
-
name=
|
|
53
|
-
description=
|
|
54
|
-
args_schema=tool["args_schema"]
|
|
58
|
+
api_wrapper=pandas_api_wrapper,
|
|
59
|
+
name=tool["name"],
|
|
60
|
+
description=description,
|
|
61
|
+
args_schema=tool["args_schema"],
|
|
62
|
+
metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
|
|
55
63
|
))
|
|
56
64
|
return cls(tools=tools)
|
|
57
65
|
|
|
@@ -93,7 +93,7 @@ class PandasWrapper(BaseToolApiWrapper):
|
|
|
93
93
|
if file_extension in ['csv', 'txt']:
|
|
94
94
|
df = pd.read_csv(file_obj)
|
|
95
95
|
elif file_extension in ['xlsx', 'xls']:
|
|
96
|
-
df = pd.read_excel(file_obj)
|
|
96
|
+
df = pd.read_excel(file_obj, engine='calamine')
|
|
97
97
|
elif file_extension == 'parquet':
|
|
98
98
|
df = pd.read_parquet(file_obj)
|
|
99
99
|
elif file_extension == 'json':
|
|
@@ -158,57 +158,62 @@ class PandasWrapper(BaseToolApiWrapper):
|
|
|
158
158
|
f"Retrying Code Generation ({attempts}/{max_retries})..."
|
|
159
159
|
)
|
|
160
160
|
|
|
161
|
-
def
|
|
162
|
-
"""Analyze
|
|
161
|
+
def pandas_analyze_data(self, query: str, filename: str) -> str:
|
|
162
|
+
"""Analyze data from a file using natural language query.
|
|
163
|
+
|
|
164
|
+
This tool allows you to perform data analysis operations on files using natural language.
|
|
165
|
+
It automatically generates and executes Python pandas code based on your query.
|
|
166
|
+
|
|
167
|
+
Supported file formats: CSV, Excel (.xlsx, .xls), Parquet, JSON, XML, HDF5, Feather, Pickle
|
|
168
|
+
|
|
169
|
+
Parameters:
|
|
170
|
+
query: Natural language description of the analysis to perform. Examples:
|
|
171
|
+
- "Calculate the average sales by region"
|
|
172
|
+
- "Show me a bar chart of products by revenue"
|
|
173
|
+
- "Filter rows where price > 100 and status is 'active'"
|
|
174
|
+
- "What is the correlation between age and income?"
|
|
175
|
+
filename: Name of the file in the artifact bucket (e.g., 'sales_data.csv', 'report.xlsx')
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
Analysis results as text, or confirmation message if a chart was generated and saved.
|
|
179
|
+
Charts are automatically saved to the artifact bucket as PNG files.
|
|
180
|
+
|
|
181
|
+
Examples:
|
|
182
|
+
- pandas_analyze_data(query="Show summary statistics", filename="data.csv")
|
|
183
|
+
- pandas_analyze_data(query="Create a histogram of ages", filename="customers.xlsx")
|
|
184
|
+
- pandas_analyze_data(query="What's the total revenue by month?", filename="sales.parquet")
|
|
185
|
+
"""
|
|
163
186
|
df = self._get_dataframe(filename)
|
|
164
187
|
code = self.generate_code_with_retries(df, query)
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
data={
|
|
168
|
-
"message": f"Executing generated code... \n\n```python\n{code}\n```",
|
|
169
|
-
"tool_name": "process_query",
|
|
170
|
-
"toolkit": "pandas"
|
|
171
|
-
}
|
|
172
|
-
)
|
|
188
|
+
self._log_tool_event(tool_name="pandas_analyze_data",
|
|
189
|
+
message=f"Executing generated code... \n\n```python\n{code}\n```")
|
|
173
190
|
try:
|
|
174
191
|
result = self.execute_code(df, code)
|
|
175
192
|
except Exception as e:
|
|
176
193
|
logger.error(f"Code execution failed: {format_exc()}")
|
|
177
|
-
dispatch_custom_event(
|
|
178
|
-
name="thinking_step",
|
|
179
|
-
data={
|
|
180
|
-
"message": f"Code execution failed: {format_exc()}",
|
|
181
|
-
"tool_name": "process_query",
|
|
182
|
-
"toolkit": "pandas"
|
|
183
|
-
}
|
|
184
|
-
)
|
|
185
194
|
raise
|
|
186
|
-
dispatch_custom_event(
|
|
187
|
-
name="thinking_step",
|
|
188
|
-
data={
|
|
189
|
-
"message": f"Result of code execution... \n\n```\n{result['result']}\n```",
|
|
190
|
-
"tool_name": "process_query",
|
|
191
|
-
"toolkit": "pandas"
|
|
192
|
-
}
|
|
193
|
-
)
|
|
194
195
|
if result.get("df") is not None:
|
|
195
196
|
df = result.pop("df")
|
|
196
197
|
# Not saving dataframe to artifact repo for now
|
|
197
198
|
# self._save_dataframe(df, filename)
|
|
198
199
|
if result.get('chart'):
|
|
200
|
+
chart_results = []
|
|
199
201
|
if isinstance(result['chart'], list):
|
|
200
202
|
for ind, chart in enumerate(result['chart']):
|
|
201
203
|
chart_filename = f"chart_{uuid4()}.png"
|
|
202
204
|
chart_data = base64.b64decode(chart)
|
|
203
205
|
self.alita.create_artifact(self.bucket_name, chart_filename, chart_data)
|
|
204
|
-
|
|
206
|
+
chart_url = f"{self.alita.base_url}/api/v1/artifacts/artifact/default/{self.alita.project_id}/{self.bucket_name}/{chart_filename}"
|
|
207
|
+
chart_results.append(f"Chart #{ind+1} saved and available at: {chart_url}")
|
|
208
|
+
result['result'] = "\n".join(chart_results)
|
|
205
209
|
else:
|
|
206
210
|
# Handle single chart case (not in a list)
|
|
207
211
|
chart = result['chart']
|
|
208
212
|
chart_filename = f"chart_{uuid4()}.png"
|
|
209
213
|
chart_data = base64.b64decode(chart)
|
|
210
214
|
self.alita.create_artifact(self.bucket_name, chart_filename, chart_data)
|
|
211
|
-
|
|
215
|
+
chart_url = f"{self.alita.base_url}/api/v1/artifacts/artifact/default/{self.alita.project_id}/{self.bucket_name}/{chart_filename}"
|
|
216
|
+
result['result'] = f"Chart saved and available at: {chart_url}\n\nYou can embed this image in your response using markdown: "
|
|
212
217
|
return result.get("result", None)
|
|
213
218
|
|
|
214
219
|
def save_dataframe(self, source_df: str, target_file: str) -> str:
|
|
@@ -271,23 +276,13 @@ class PandasWrapper(BaseToolApiWrapper):
|
|
|
271
276
|
def get_available_tools(self):
|
|
272
277
|
return [
|
|
273
278
|
{
|
|
274
|
-
"name": "
|
|
275
|
-
"ref": self.
|
|
276
|
-
"description": self.
|
|
277
|
-
"args_schema": create_model(
|
|
278
|
-
"ProcessQueryModel",
|
|
279
|
-
query=(str, Field(description="Task to solve")),
|
|
280
|
-
filename=(str, Field(description="File to be processed"))
|
|
281
|
-
)
|
|
282
|
-
},
|
|
283
|
-
{
|
|
284
|
-
"name": "save_dataframe",
|
|
285
|
-
"ref": self.save_dataframe,
|
|
286
|
-
"description": self.save_dataframe.__doc__,
|
|
279
|
+
"name": "pandas_analyze_data",
|
|
280
|
+
"ref": self.pandas_analyze_data,
|
|
281
|
+
"description": self.pandas_analyze_data.__doc__,
|
|
287
282
|
"args_schema": create_model(
|
|
288
|
-
"
|
|
289
|
-
|
|
290
|
-
|
|
283
|
+
"AnalyseDataModel",
|
|
284
|
+
query=(str, Field(description="Natural language query describing what analysis to perform on the data")),
|
|
285
|
+
filename=(str, Field(description="Name of the file to analyze (e.g., 'data.csv', 'report.xlsx')"))
|
|
291
286
|
)
|
|
292
287
|
}
|
|
293
288
|
]
|
|
@@ -39,7 +39,9 @@ class CodeGenerator:
|
|
|
39
39
|
{"role": "user", "content": [{"type": "text", "text": prompt}]}
|
|
40
40
|
]
|
|
41
41
|
# Generate the code
|
|
42
|
-
|
|
42
|
+
from alita_sdk.runtime.langchain.utils import extract_text_from_completion
|
|
43
|
+
completion = self.llm.invoke(messages)
|
|
44
|
+
code = extract_text_from_completion(completion)
|
|
43
45
|
return self.validate_and_clean_code(code)
|
|
44
46
|
|
|
45
47
|
except Exception as e:
|
|
@@ -6,8 +6,9 @@ from pydantic import create_model, BaseModel, ConfigDict, Field, field_validator
|
|
|
6
6
|
from ..base.tool import BaseAction
|
|
7
7
|
|
|
8
8
|
from .api_wrapper import PostmanApiWrapper
|
|
9
|
-
from ..utils import clean_string, get_max_toolkit_length,
|
|
9
|
+
from ..utils import clean_string, get_max_toolkit_length, check_connection_response
|
|
10
10
|
from ...configurations.postman import PostmanConfiguration
|
|
11
|
+
from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
|
|
11
12
|
|
|
12
13
|
name = "postman"
|
|
13
14
|
|
|
@@ -43,20 +44,16 @@ def get_tools(tool):
|
|
|
43
44
|
|
|
44
45
|
class PostmanToolkit(BaseToolkit):
|
|
45
46
|
tools: List[BaseTool] = []
|
|
46
|
-
toolkit_max_length: int = 0
|
|
47
47
|
|
|
48
48
|
@staticmethod
|
|
49
49
|
def toolkit_config_schema() -> BaseModel:
|
|
50
50
|
selected_tools = {x['name']: x['args_schema'].schema(
|
|
51
51
|
) for x in PostmanApiWrapper.model_construct().get_available_tools()}
|
|
52
|
-
PostmanToolkit.toolkit_max_length = get_max_toolkit_length(
|
|
53
|
-
selected_tools)
|
|
54
52
|
m = create_model(
|
|
55
53
|
name,
|
|
56
54
|
postman_configuration=(Optional[PostmanConfiguration], Field(description="Postman Configuration",
|
|
57
55
|
json_schema_extra={'configuration_types': ['postman']})),
|
|
58
|
-
collection_id=(str, Field(description="Default collection ID",
|
|
59
|
-
'toolkit_name': True, 'max_toolkit_length': PostmanToolkit.toolkit_max_length})),
|
|
56
|
+
collection_id=(str, Field(description="Default collection ID")),
|
|
60
57
|
environment_config=(dict, Field(
|
|
61
58
|
description="JSON configuration for request execution (auth headers, project IDs, base URLs, etc.)",
|
|
62
59
|
default={})),
|
|
@@ -90,20 +87,23 @@ class PostmanToolkit(BaseToolkit):
|
|
|
90
87
|
**kwargs['postman_configuration'],
|
|
91
88
|
}
|
|
92
89
|
postman_api_wrapper = PostmanApiWrapper(**wrapper_payload)
|
|
93
|
-
prefix = clean_string(str(toolkit_name), cls.toolkit_max_length) + \
|
|
94
|
-
TOOLKIT_SPLITTER if toolkit_name else ''
|
|
95
90
|
available_tools = postman_api_wrapper.get_available_tools()
|
|
96
91
|
tools = []
|
|
97
92
|
for tool in available_tools:
|
|
98
93
|
if selected_tools:
|
|
99
94
|
if tool["name"] not in selected_tools:
|
|
100
95
|
continue
|
|
96
|
+
description = f"{tool['description']}\nAPI URL: {postman_api_wrapper.base_url}"
|
|
97
|
+
if toolkit_name:
|
|
98
|
+
description = f"{description}\nToolkit: {toolkit_name}"
|
|
99
|
+
description = description[:1000]
|
|
101
100
|
tools.append(PostmanAction(
|
|
102
101
|
api_wrapper=postman_api_wrapper,
|
|
103
|
-
name=
|
|
102
|
+
name=tool["name"],
|
|
104
103
|
mode=tool["mode"],
|
|
105
|
-
description=
|
|
106
|
-
args_schema=tool["args_schema"]
|
|
104
|
+
description=description,
|
|
105
|
+
args_schema=tool["args_schema"],
|
|
106
|
+
metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
|
|
107
107
|
))
|
|
108
108
|
return cls(tools=tools)
|
|
109
109
|
|
|
@@ -340,7 +340,7 @@ class PostmanApiWrapper(BaseToolApiWrapper):
|
|
|
340
340
|
raise ToolException(
|
|
341
341
|
f"Invalid JSON response from Postman API: {str(e)}")
|
|
342
342
|
|
|
343
|
-
def _apply_authentication(self, headers, params, all_variables, resolve_variables):
|
|
343
|
+
def _apply_authentication(self, headers, params, all_variables, native_auth, resolve_variables):
|
|
344
344
|
"""Apply authentication based on environment_config auth settings.
|
|
345
345
|
|
|
346
346
|
Supports multiple authentication types:
|
|
@@ -363,14 +363,15 @@ class PostmanApiWrapper(BaseToolApiWrapper):
|
|
|
363
363
|
import base64
|
|
364
364
|
|
|
365
365
|
# Handle structured auth configuration only - no backward compatibility
|
|
366
|
-
auth_config = self.environment_config.get('auth')
|
|
366
|
+
auth_config = self.environment_config.get('auth', native_auth)
|
|
367
367
|
if auth_config and isinstance(auth_config, dict):
|
|
368
368
|
auth_type = auth_config.get('type', '').lower()
|
|
369
369
|
auth_params = auth_config.get('params', {})
|
|
370
370
|
|
|
371
371
|
if auth_type == 'bearer':
|
|
372
372
|
# Bearer token authentication
|
|
373
|
-
|
|
373
|
+
tokent_raw = auth_config.get('bearer', [{}])[0].get('value', '')
|
|
374
|
+
token = resolve_variables(str(tokent_raw))
|
|
374
375
|
if token:
|
|
375
376
|
headers['Authorization'] = f'Bearer {token}'
|
|
376
377
|
|
|
@@ -739,7 +740,7 @@ class PostmanApiWrapper(BaseToolApiWrapper):
|
|
|
739
740
|
all_variables = {}
|
|
740
741
|
|
|
741
742
|
# 1. Start with environment_config variables (lowest priority)
|
|
742
|
-
all_variables.update(self.
|
|
743
|
+
all_variables.update(self._get_variables_from_env_config())
|
|
743
744
|
|
|
744
745
|
# 2. Add collection variables
|
|
745
746
|
collection_variables = collection_data.get('variable', [])
|
|
@@ -760,8 +761,9 @@ class PostmanApiWrapper(BaseToolApiWrapper):
|
|
|
760
761
|
import re
|
|
761
762
|
def replace_var(match):
|
|
762
763
|
var_name = match.group(1)
|
|
763
|
-
|
|
764
|
-
|
|
764
|
+
value = all_variables.get(var_name, None)
|
|
765
|
+
return resolve_variables(str(value)) if value else match.group(0)
|
|
766
|
+
|
|
765
767
|
return re.sub(r'\{\{([^}]+)\}\}', replace_var, text)
|
|
766
768
|
|
|
767
769
|
# Prepare the request
|
|
@@ -791,7 +793,7 @@ class PostmanApiWrapper(BaseToolApiWrapper):
|
|
|
791
793
|
headers = {}
|
|
792
794
|
|
|
793
795
|
# Handle authentication from environment_config
|
|
794
|
-
self._apply_authentication(headers, params, all_variables, resolve_variables)
|
|
796
|
+
self._apply_authentication(headers, params, all_variables, request_data.get('auth', None), resolve_variables)
|
|
795
797
|
|
|
796
798
|
# Add headers from request
|
|
797
799
|
request_headers = request_data.get('header', [])
|
|
@@ -1640,7 +1642,7 @@ class PostmanApiWrapper(BaseToolApiWrapper):
|
|
|
1640
1642
|
|
|
1641
1643
|
# Find the request
|
|
1642
1644
|
request_item = self.analyzer.find_request_by_path(
|
|
1643
|
-
collection_data["item"], request_path)
|
|
1645
|
+
collection_data["item"], request_path, collection_data.get("auth", None))
|
|
1644
1646
|
if not request_item:
|
|
1645
1647
|
raise ToolException(f"Request '{request_path}' not found")
|
|
1646
1648
|
|
|
@@ -2161,3 +2163,12 @@ class PostmanApiWrapper(BaseToolApiWrapper):
|
|
|
2161
2163
|
parse_items(items)
|
|
2162
2164
|
|
|
2163
2165
|
return result
|
|
2166
|
+
|
|
2167
|
+
def _get_variables_from_env_config(self):
|
|
2168
|
+
"""Extracts all enabled variables from the 'values' field in environment_config."""
|
|
2169
|
+
result = {}
|
|
2170
|
+
values = self.environment_config.get("values", [])
|
|
2171
|
+
for var in values:
|
|
2172
|
+
if var.get("enabled", True) and "key" in var and "value" in var:
|
|
2173
|
+
result[var["key"]] = var["value"]
|
|
2174
|
+
return result
|
|
@@ -1049,13 +1049,14 @@ class PostmanAnalyzer:
|
|
|
1049
1049
|
find_in_items(items, path_parts)
|
|
1050
1050
|
return results
|
|
1051
1051
|
|
|
1052
|
-
def find_request_by_path(self, items: List[Dict], request_path: str) -> Optional[Dict]:
|
|
1052
|
+
def find_request_by_path(self, items: List[Dict], request_path: str, auth = None) -> Optional[Dict]:
|
|
1053
1053
|
"""Find a request by its path."""
|
|
1054
1054
|
path_parts = [part.strip() for part in request_path.split('/') if part.strip()]
|
|
1055
1055
|
if not path_parts:
|
|
1056
1056
|
return None
|
|
1057
1057
|
|
|
1058
1058
|
current_items = items
|
|
1059
|
+
current_auth = auth
|
|
1059
1060
|
|
|
1060
1061
|
# Navigate through folders to the request
|
|
1061
1062
|
for i, part in enumerate(path_parts):
|
|
@@ -1065,6 +1066,9 @@ class PostmanAnalyzer:
|
|
|
1065
1066
|
if i == len(path_parts) - 1:
|
|
1066
1067
|
# This should be the request
|
|
1067
1068
|
if item.get('request'):
|
|
1069
|
+
# if request has no auth, inherit from parent
|
|
1070
|
+
if not item['request'].get('auth') and current_auth:
|
|
1071
|
+
item['request']['auth'] = current_auth
|
|
1068
1072
|
return item
|
|
1069
1073
|
else:
|
|
1070
1074
|
return None
|
|
@@ -1072,6 +1076,9 @@ class PostmanAnalyzer:
|
|
|
1072
1076
|
# This should be a folder
|
|
1073
1077
|
if item.get('item'):
|
|
1074
1078
|
current_items = item['item']
|
|
1079
|
+
# Update current_auth if folder has auth
|
|
1080
|
+
if item.get('auth'):
|
|
1081
|
+
current_auth = item['auth']
|
|
1075
1082
|
found = True
|
|
1076
1083
|
break
|
|
1077
1084
|
else:
|
alita_sdk/tools/pptx/__init__.py
CHANGED
|
@@ -7,7 +7,8 @@ from pydantic import create_model, BaseModel, ConfigDict, Field
|
|
|
7
7
|
from .pptx_wrapper import PPTXWrapper
|
|
8
8
|
|
|
9
9
|
from ..base.tool import BaseAction
|
|
10
|
-
from ..utils import clean_string,
|
|
10
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
11
|
+
from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
|
|
11
12
|
|
|
12
13
|
logger = logging.getLogger(__name__)
|
|
13
14
|
|
|
@@ -27,8 +28,6 @@ def get_tools(tool):
|
|
|
27
28
|
).get_tools()
|
|
28
29
|
|
|
29
30
|
|
|
30
|
-
TOOLKIT_MAX_LENGTH = 25
|
|
31
|
-
|
|
32
31
|
class PPTXToolkit(BaseToolkit):
|
|
33
32
|
"""
|
|
34
33
|
PowerPoint (PPTX) manipulation toolkit for Alita.
|
|
@@ -45,8 +44,7 @@ class PPTXToolkit(BaseToolkit):
|
|
|
45
44
|
|
|
46
45
|
return create_model(
|
|
47
46
|
name,
|
|
48
|
-
bucket_name=(str, Field(description="Bucket name where PPTX files are stored",
|
|
49
|
-
json_schema_extra={'toolkit_name': True, 'max_toolkit_length': TOOLKIT_MAX_LENGTH})),
|
|
47
|
+
bucket_name=(str, Field(description="Bucket name where PPTX files are stored")),
|
|
50
48
|
selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
|
|
51
49
|
__config__=ConfigDict(json_schema_extra={
|
|
52
50
|
'metadata': {
|
|
@@ -75,19 +73,22 @@ class PPTXToolkit(BaseToolkit):
|
|
|
75
73
|
selected_tools = []
|
|
76
74
|
|
|
77
75
|
pptx_api_wrapper = PPTXWrapper(**kwargs)
|
|
78
|
-
prefix = clean_string(toolkit_name, TOOLKIT_MAX_LENGTH) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
79
76
|
available_tools = pptx_api_wrapper.get_available_tools()
|
|
80
77
|
tools = []
|
|
81
78
|
|
|
82
79
|
for tool in available_tools:
|
|
83
80
|
if selected_tools and tool["name"] not in selected_tools:
|
|
84
81
|
continue
|
|
85
|
-
|
|
82
|
+
description = tool["description"]
|
|
83
|
+
if toolkit_name:
|
|
84
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
85
|
+
description = description[:1000]
|
|
86
86
|
tools.append(BaseAction(
|
|
87
87
|
api_wrapper=pptx_api_wrapper,
|
|
88
|
-
name=
|
|
89
|
-
description=
|
|
90
|
-
args_schema=tool["args_schema"]
|
|
88
|
+
name=tool["name"],
|
|
89
|
+
description=description,
|
|
90
|
+
args_schema=tool["args_schema"],
|
|
91
|
+
metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
|
|
91
92
|
))
|
|
92
93
|
|
|
93
94
|
return cls(tools=tools)
|
|
@@ -6,8 +6,10 @@ from pydantic import create_model, BaseModel, ConfigDict, Field, SecretStr
|
|
|
6
6
|
|
|
7
7
|
from .api_wrapper import QtestApiWrapper
|
|
8
8
|
from .tool import QtestAction
|
|
9
|
-
from ..
|
|
9
|
+
from ..elitea_base import filter_missconfigured_index_tools
|
|
10
|
+
from ..utils import clean_string, get_max_toolkit_length, check_connection_response
|
|
10
11
|
from ...configurations.qtest import QtestConfiguration
|
|
12
|
+
from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
|
|
11
13
|
|
|
12
14
|
name = "qtest"
|
|
13
15
|
|
|
@@ -15,29 +17,30 @@ name = "qtest"
|
|
|
15
17
|
def get_tools(tool):
|
|
16
18
|
toolkit = QtestToolkit.get_toolkit(
|
|
17
19
|
selected_tools=tool['settings'].get('selected_tools', []),
|
|
18
|
-
base_url=tool['settings'].get('base_url', None),
|
|
19
20
|
qtest_project_id=tool['settings'].get('qtest_project_id', tool['settings'].get('project_id', None)),
|
|
21
|
+
no_of_tests_shown_in_dql_search=tool['settings'].get('no_of_tests_shown_in_dql_search'),
|
|
20
22
|
qtest_configuration=tool['settings']['qtest_configuration'],
|
|
21
|
-
toolkit_name=tool.get('toolkit_name')
|
|
23
|
+
toolkit_name=tool.get('toolkit_name'),
|
|
24
|
+
llm=tool['settings'].get('llm', None)
|
|
22
25
|
)
|
|
23
26
|
return toolkit.tools
|
|
24
27
|
|
|
25
28
|
|
|
26
29
|
class QtestToolkit(BaseToolkit):
|
|
27
30
|
tools: List[BaseTool] = []
|
|
28
|
-
toolkit_max_length: int = 0
|
|
29
31
|
|
|
30
32
|
@staticmethod
|
|
31
33
|
def toolkit_config_schema() -> BaseModel:
|
|
32
34
|
selected_tools = {x['name']: x['args_schema'].schema() for x in QtestApiWrapper.model_construct().get_available_tools()}
|
|
33
|
-
QtestToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
34
35
|
m = create_model(
|
|
35
36
|
name,
|
|
36
|
-
qtest_configuration=(
|
|
37
|
+
qtest_configuration=(QtestConfiguration, Field(description="QTest API token", json_schema_extra={
|
|
37
38
|
'configuration_types': ['qtest']})),
|
|
38
|
-
qtest_project_id=(int, Field(
|
|
39
|
-
|
|
40
|
-
|
|
39
|
+
qtest_project_id=(int, Field(description="QTest project id")),
|
|
40
|
+
no_of_tests_shown_in_dql_search=(Optional[int], Field(description="Max number of items returned by dql search",
|
|
41
|
+
default=10)),
|
|
42
|
+
|
|
43
|
+
selected_tools=(List[Literal[tuple(selected_tools)]],
|
|
41
44
|
Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
|
|
42
45
|
__config__=ConfigDict(json_schema_extra={'metadata': {"label": "QTest", "icon_url": "qtest.svg",
|
|
43
46
|
"categories": ["test management"],
|
|
@@ -61,6 +64,7 @@ class QtestToolkit(BaseToolkit):
|
|
|
61
64
|
return m
|
|
62
65
|
|
|
63
66
|
@classmethod
|
|
67
|
+
@filter_missconfigured_index_tools
|
|
64
68
|
def get_toolkit(cls, selected_tools: list[str] | None = None, toolkit_name: Optional[str] = None, **kwargs):
|
|
65
69
|
if selected_tools is None:
|
|
66
70
|
selected_tools = []
|
|
@@ -70,21 +74,25 @@ class QtestToolkit(BaseToolkit):
|
|
|
70
74
|
**kwargs['qtest_configuration'],
|
|
71
75
|
}
|
|
72
76
|
qtest_api_wrapper = QtestApiWrapper(**wrapper_payload)
|
|
73
|
-
prefix = clean_string(str(toolkit_name), cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
74
77
|
available_tools = qtest_api_wrapper.get_available_tools()
|
|
75
78
|
tools = []
|
|
76
79
|
for tool in available_tools:
|
|
77
80
|
if selected_tools:
|
|
78
81
|
if tool["name"] not in selected_tools:
|
|
79
82
|
continue
|
|
83
|
+
description = f"{tool['description']}\nUrl: {qtest_api_wrapper.base_url}. Project id: {qtest_api_wrapper.qtest_project_id}"
|
|
84
|
+
if toolkit_name:
|
|
85
|
+
description = f"{description}\nToolkit: {toolkit_name}"
|
|
86
|
+
description = description[:1000]
|
|
80
87
|
tools.append(QtestAction(
|
|
81
88
|
api_wrapper=qtest_api_wrapper,
|
|
82
|
-
name=
|
|
89
|
+
name=tool["name"],
|
|
83
90
|
mode=tool["mode"],
|
|
84
|
-
description=
|
|
85
|
-
args_schema=tool["args_schema"]
|
|
91
|
+
description=description,
|
|
92
|
+
args_schema=tool["args_schema"],
|
|
93
|
+
metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
|
|
86
94
|
))
|
|
87
95
|
return cls(tools=tools)
|
|
88
96
|
|
|
89
97
|
def get_tools(self):
|
|
90
|
-
return self.tools
|
|
98
|
+
return self.tools
|