alita-sdk 0.3.263__py3-none-any.whl → 0.3.499__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +155 -0
- alita_sdk/cli/agent_loader.py +215 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3601 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1256 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +64 -8
- alita_sdk/community/inventory/__init__.py +224 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +10 -0
- alita_sdk/configurations/ado.py +4 -2
- alita_sdk/configurations/azure_search.py +1 -1
- alita_sdk/configurations/bigquery.py +1 -1
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/browser.py +18 -0
- alita_sdk/configurations/carrier.py +19 -0
- alita_sdk/configurations/confluence.py +96 -1
- alita_sdk/configurations/delta_lake.py +1 -1
- alita_sdk/configurations/figma.py +0 -5
- alita_sdk/configurations/github.py +65 -1
- alita_sdk/configurations/gitlab.py +79 -0
- alita_sdk/configurations/google_places.py +17 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/postman.py +1 -1
- alita_sdk/configurations/qtest.py +1 -3
- alita_sdk/configurations/report_portal.py +19 -0
- alita_sdk/configurations/salesforce.py +19 -0
- alita_sdk/configurations/service_now.py +1 -12
- alita_sdk/configurations/sharepoint.py +19 -0
- alita_sdk/configurations/sonar.py +18 -0
- alita_sdk/configurations/sql.py +20 -0
- alita_sdk/configurations/testio.py +18 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +94 -1
- alita_sdk/configurations/zephyr_enterprise.py +94 -1
- alita_sdk/configurations/zephyr_essential.py +95 -0
- alita_sdk/runtime/clients/artifact.py +12 -2
- alita_sdk/runtime/clients/client.py +235 -66
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +373 -0
- alita_sdk/runtime/langchain/assistant.py +123 -17
- alita_sdk/runtime/langchain/constants.py +8 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +209 -31
- alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py +1 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +8 -2
- alita_sdk/runtime/langchain/document_loaders/AlitaMarkdownLoader.py +66 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py +79 -10
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +52 -15
- alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py +9 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -4
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +15 -2
- alita_sdk/runtime/langchain/document_loaders/ImageParser.py +30 -0
- alita_sdk/runtime/langchain/document_loaders/constants.py +187 -40
- alita_sdk/runtime/langchain/interfaces/llm_processor.py +4 -2
- alita_sdk/runtime/langchain/langraph_agent.py +406 -91
- alita_sdk/runtime/langchain/utils.py +51 -8
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/toolkits/__init__.py +26 -0
- alita_sdk/runtime/toolkits/application.py +9 -2
- alita_sdk/runtime/toolkits/artifact.py +19 -7
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +780 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/subgraph.py +11 -6
- alita_sdk/runtime/toolkits/tools.py +214 -60
- alita_sdk/runtime/toolkits/vectorstore.py +9 -4
- alita_sdk/runtime/tools/__init__.py +22 -0
- alita_sdk/runtime/tools/application.py +16 -4
- alita_sdk/runtime/tools/artifact.py +312 -19
- alita_sdk/runtime/tools/function.py +100 -4
- alita_sdk/runtime/tools/graph.py +81 -0
- alita_sdk/runtime/tools/image_generation.py +212 -0
- alita_sdk/runtime/tools/llm.py +539 -180
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +3 -1
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -1
- alita_sdk/runtime/tools/sandbox.py +375 -0
- alita_sdk/runtime/tools/vectorstore.py +62 -63
- alita_sdk/runtime/tools/vectorstore_base.py +156 -85
- alita_sdk/runtime/utils/AlitaCallback.py +106 -20
- alita_sdk/runtime/utils/mcp_client.py +465 -0
- alita_sdk/runtime/utils/mcp_oauth.py +244 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +405 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/streamlit.py +41 -14
- alita_sdk/runtime/utils/toolkit_utils.py +28 -9
- alita_sdk/runtime/utils/utils.py +14 -0
- alita_sdk/tools/__init__.py +78 -35
- alita_sdk/tools/ado/__init__.py +0 -1
- alita_sdk/tools/ado/repos/__init__.py +10 -6
- alita_sdk/tools/ado/repos/repos_wrapper.py +12 -11
- alita_sdk/tools/ado/test_plan/__init__.py +10 -7
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +56 -23
- alita_sdk/tools/ado/wiki/__init__.py +10 -11
- alita_sdk/tools/ado/wiki/ado_wrapper.py +114 -28
- alita_sdk/tools/ado/work_item/__init__.py +10 -11
- alita_sdk/tools/ado/work_item/ado_wrapper.py +63 -10
- alita_sdk/tools/advanced_jira_mining/__init__.py +10 -7
- alita_sdk/tools/aws/delta_lake/__init__.py +13 -11
- alita_sdk/tools/azure_ai/search/__init__.py +11 -7
- alita_sdk/tools/base_indexer_toolkit.py +392 -86
- alita_sdk/tools/bitbucket/__init__.py +18 -11
- alita_sdk/tools/bitbucket/api_wrapper.py +52 -9
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +5 -5
- alita_sdk/tools/browser/__init__.py +40 -16
- alita_sdk/tools/browser/crawler.py +3 -1
- alita_sdk/tools/browser/utils.py +15 -6
- alita_sdk/tools/carrier/__init__.py +17 -17
- alita_sdk/tools/carrier/backend_reports_tool.py +8 -4
- alita_sdk/tools/carrier/excel_reporter.py +8 -4
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/codeparser.py +1 -1
- alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +9 -6
- alita_sdk/tools/cloud/azure/__init__.py +9 -6
- alita_sdk/tools/cloud/gcp/__init__.py +9 -6
- alita_sdk/tools/cloud/k8s/__init__.py +9 -6
- alita_sdk/tools/code/linter/__init__.py +7 -7
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +18 -12
- alita_sdk/tools/code_indexer_toolkit.py +199 -0
- alita_sdk/tools/confluence/__init__.py +14 -11
- alita_sdk/tools/confluence/api_wrapper.py +198 -58
- alita_sdk/tools/confluence/loader.py +10 -0
- alita_sdk/tools/custom_open_api/__init__.py +9 -4
- alita_sdk/tools/elastic/__init__.py +8 -7
- alita_sdk/tools/elitea_base.py +543 -64
- alita_sdk/tools/figma/__init__.py +10 -8
- alita_sdk/tools/figma/api_wrapper.py +352 -153
- alita_sdk/tools/github/__init__.py +13 -11
- alita_sdk/tools/github/api_wrapper.py +9 -26
- alita_sdk/tools/github/github_client.py +75 -12
- alita_sdk/tools/github/schemas.py +2 -1
- alita_sdk/tools/gitlab/__init__.py +11 -10
- alita_sdk/tools/gitlab/api_wrapper.py +135 -45
- alita_sdk/tools/gitlab_org/__init__.py +11 -9
- alita_sdk/tools/google/bigquery/__init__.py +12 -13
- alita_sdk/tools/google_places/__init__.py +18 -10
- alita_sdk/tools/jira/__init__.py +14 -8
- alita_sdk/tools/jira/api_wrapper.py +315 -168
- alita_sdk/tools/keycloak/__init__.py +8 -7
- alita_sdk/tools/localgit/local_git.py +56 -54
- alita_sdk/tools/memory/__init__.py +27 -11
- alita_sdk/tools/non_code_indexer_toolkit.py +7 -2
- alita_sdk/tools/ocr/__init__.py +8 -7
- alita_sdk/tools/openapi/__init__.py +10 -1
- alita_sdk/tools/pandas/__init__.py +8 -7
- alita_sdk/tools/pandas/api_wrapper.py +7 -25
- alita_sdk/tools/postman/__init__.py +8 -10
- alita_sdk/tools/postman/api_wrapper.py +19 -8
- alita_sdk/tools/postman/postman_analysis.py +8 -1
- alita_sdk/tools/pptx/__init__.py +8 -9
- alita_sdk/tools/qtest/__init__.py +19 -13
- alita_sdk/tools/qtest/api_wrapper.py +1784 -88
- alita_sdk/tools/rally/__init__.py +10 -9
- alita_sdk/tools/report_portal/__init__.py +20 -15
- alita_sdk/tools/salesforce/__init__.py +19 -15
- alita_sdk/tools/servicenow/__init__.py +14 -11
- alita_sdk/tools/sharepoint/__init__.py +14 -13
- alita_sdk/tools/sharepoint/api_wrapper.py +179 -39
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +10 -7
- alita_sdk/tools/sql/__init__.py +19 -18
- alita_sdk/tools/sql/api_wrapper.py +71 -23
- alita_sdk/tools/testio/__init__.py +18 -12
- alita_sdk/tools/testrail/__init__.py +10 -10
- alita_sdk/tools/testrail/api_wrapper.py +213 -45
- alita_sdk/tools/utils/__init__.py +28 -4
- alita_sdk/tools/utils/content_parser.py +181 -61
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +83 -27
- alita_sdk/tools/xray/__init__.py +12 -7
- alita_sdk/tools/xray/api_wrapper.py +58 -113
- alita_sdk/tools/zephyr/__init__.py +9 -6
- alita_sdk/tools/zephyr_enterprise/__init__.py +13 -8
- alita_sdk/tools/zephyr_enterprise/api_wrapper.py +17 -7
- alita_sdk/tools/zephyr_essential/__init__.py +13 -9
- alita_sdk/tools/zephyr_essential/api_wrapper.py +289 -47
- alita_sdk/tools/zephyr_essential/client.py +6 -4
- alita_sdk/tools/zephyr_scale/__init__.py +10 -7
- alita_sdk/tools/zephyr_scale/api_wrapper.py +6 -2
- alita_sdk/tools/zephyr_squad/__init__.py +9 -6
- {alita_sdk-0.3.263.dist-info → alita_sdk-0.3.499.dist-info}/METADATA +180 -33
- alita_sdk-0.3.499.dist-info/RECORD +433 -0
- alita_sdk-0.3.499.dist-info/entry_points.txt +2 -0
- alita_sdk-0.3.263.dist-info/RECORD +0 -342
- {alita_sdk-0.3.263.dist-info → alita_sdk-0.3.499.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.263.dist-info → alita_sdk-0.3.499.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.263.dist-info → alita_sdk-0.3.499.dist-info}/top_level.txt +0 -0
|
@@ -6,7 +6,10 @@ from .api_wrapper import BitbucketAPIWrapper
|
|
|
6
6
|
from langchain_core.tools import BaseToolkit
|
|
7
7
|
from langchain_core.tools import BaseTool
|
|
8
8
|
from pydantic import BaseModel, Field, ConfigDict, create_model
|
|
9
|
-
|
|
9
|
+
|
|
10
|
+
from ..base.tool import BaseAction
|
|
11
|
+
from ..elitea_base import filter_missconfigured_index_tools
|
|
12
|
+
from ..utils import clean_string, get_max_toolkit_length, check_connection_response
|
|
10
13
|
from ...configurations.bitbucket import BitbucketConfiguration
|
|
11
14
|
from ...configurations.pgvector import PgVectorConfiguration
|
|
12
15
|
import requests
|
|
@@ -18,7 +21,6 @@ name = "bitbucket"
|
|
|
18
21
|
def get_tools(tool):
|
|
19
22
|
return AlitaBitbucketToolkit.get_toolkit(
|
|
20
23
|
selected_tools=tool['settings'].get('selected_tools', []),
|
|
21
|
-
url=tool['settings']['url'],
|
|
22
24
|
project=tool['settings']['project'],
|
|
23
25
|
repository=tool['settings']['repository'],
|
|
24
26
|
bitbucket_configuration=tool['settings']['bitbucket_configuration'],
|
|
@@ -36,20 +38,18 @@ def get_tools(tool):
|
|
|
36
38
|
|
|
37
39
|
class AlitaBitbucketToolkit(BaseToolkit):
|
|
38
40
|
tools: List[BaseTool] = []
|
|
39
|
-
toolkit_max_length: int = 0
|
|
40
41
|
|
|
41
42
|
@staticmethod
|
|
42
43
|
def toolkit_config_schema() -> BaseModel:
|
|
43
44
|
selected_tools = {x['name']: x['args_schema'].schema() for x in
|
|
44
45
|
BitbucketAPIWrapper.model_construct().get_available_tools()}
|
|
45
|
-
AlitaBitbucketToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
46
46
|
m = create_model(
|
|
47
47
|
name,
|
|
48
|
-
project=(str, Field(description="Project/Workspace"
|
|
49
|
-
repository=(str, Field(description="Repository"
|
|
48
|
+
project=(str, Field(description="Project/Workspace")),
|
|
49
|
+
repository=(str, Field(description="Repository")),
|
|
50
50
|
branch=(str, Field(description="Main branch", default="main")),
|
|
51
51
|
cloud=(Optional[bool], Field(description="Hosting Option", default=None)),
|
|
52
|
-
bitbucket_configuration=(
|
|
52
|
+
bitbucket_configuration=(BitbucketConfiguration, Field(description="Bitbucket Configuration", json_schema_extra={'configuration_types': ['bitbucket']})),
|
|
53
53
|
pgvector_configuration=(Optional[PgVectorConfiguration], Field(default=None, description="PgVector Configuration", json_schema_extra={'configuration_types': ['pgvector']})),
|
|
54
54
|
# embedder settings
|
|
55
55
|
embedding_model=(Optional[str], Field(default=None, description="Embedding configuration.", json_schema_extra={'configuration_model': 'embedding'})),
|
|
@@ -83,6 +83,7 @@ class AlitaBitbucketToolkit(BaseToolkit):
|
|
|
83
83
|
return m
|
|
84
84
|
|
|
85
85
|
@classmethod
|
|
86
|
+
@filter_missconfigured_index_tools
|
|
86
87
|
def get_toolkit(cls, selected_tools: list[str] | None = None, toolkit_name: Optional[str] = None, **kwargs):
|
|
87
88
|
if selected_tools is None:
|
|
88
89
|
selected_tools = []
|
|
@@ -96,15 +97,21 @@ class AlitaBitbucketToolkit(BaseToolkit):
|
|
|
96
97
|
}
|
|
97
98
|
bitbucket_api_wrapper = BitbucketAPIWrapper(**wrapper_payload)
|
|
98
99
|
available_tools: List[Dict] = bitbucket_api_wrapper.get_available_tools()
|
|
99
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
100
100
|
tools = []
|
|
101
101
|
for tool in available_tools:
|
|
102
102
|
if selected_tools:
|
|
103
103
|
if tool['name'] not in selected_tools:
|
|
104
104
|
continue
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
105
|
+
description = tool["description"] + f"\nrepo: {bitbucket_api_wrapper.repository}"
|
|
106
|
+
if toolkit_name:
|
|
107
|
+
description = f"{description}\nToolkit: {toolkit_name}"
|
|
108
|
+
description = description[:1000]
|
|
109
|
+
tools.append(BaseAction(
|
|
110
|
+
api_wrapper=bitbucket_api_wrapper,
|
|
111
|
+
name=tool["name"],
|
|
112
|
+
description=description,
|
|
113
|
+
args_schema=tool["args_schema"]
|
|
114
|
+
))
|
|
108
115
|
return cls(tools=tools)
|
|
109
116
|
|
|
110
117
|
def get_tools(self):
|
|
@@ -11,7 +11,9 @@ from .bitbucket_constants import create_pr_data
|
|
|
11
11
|
from .cloud_api_wrapper import BitbucketCloudApi, BitbucketServerApi
|
|
12
12
|
from pydantic.fields import PrivateAttr
|
|
13
13
|
|
|
14
|
-
from ..
|
|
14
|
+
from ..code_indexer_toolkit import CodeIndexerToolkit
|
|
15
|
+
from ..utils.available_tools_decorator import extend_with_parent_available_tools
|
|
16
|
+
from ..elitea_base import extend_with_file_operations
|
|
15
17
|
|
|
16
18
|
logger = logging.getLogger(__name__)
|
|
17
19
|
|
|
@@ -61,7 +63,7 @@ ListBranchesInRepoModel = create_model(
|
|
|
61
63
|
|
|
62
64
|
ListFilesModel = create_model(
|
|
63
65
|
"ListFilesModel",
|
|
64
|
-
path=(Optional[str], Field(description="The path to list files from")),
|
|
66
|
+
path=(Optional[str], Field(description="The path to list files from", default=None)),
|
|
65
67
|
recursive=(bool, Field(description="Whether to list files recursively", default=True)),
|
|
66
68
|
branch=(Optional[str], Field(description="The branch to list files from")),
|
|
67
69
|
)
|
|
@@ -117,7 +119,7 @@ CommentOnIssueModel = create_model(
|
|
|
117
119
|
)
|
|
118
120
|
|
|
119
121
|
|
|
120
|
-
class BitbucketAPIWrapper(
|
|
122
|
+
class BitbucketAPIWrapper(CodeIndexerToolkit):
|
|
121
123
|
"""Wrapper for Bitbucket API."""
|
|
122
124
|
|
|
123
125
|
_bitbucket: Any = PrivateAttr()
|
|
@@ -167,7 +169,7 @@ class BitbucketAPIWrapper(BaseCodeToolApiWrapper):
|
|
|
167
169
|
repository=values['repository']
|
|
168
170
|
)
|
|
169
171
|
cls._active_branch = values.get('branch')
|
|
170
|
-
return values
|
|
172
|
+
return super().validate_toolkit(values)
|
|
171
173
|
|
|
172
174
|
def set_active_branch(self, branch_name: str) -> str:
|
|
173
175
|
"""Set the active branch for the bot."""
|
|
@@ -194,7 +196,7 @@ class BitbucketAPIWrapper(BaseCodeToolApiWrapper):
|
|
|
194
196
|
if limit is not None:
|
|
195
197
|
branches = branches[:limit]
|
|
196
198
|
|
|
197
|
-
return branches
|
|
199
|
+
return "Found branches: " + ", ".join(branches)
|
|
198
200
|
except Exception as e:
|
|
199
201
|
return f"Failed to list branches: {str(e)}"
|
|
200
202
|
|
|
@@ -359,12 +361,15 @@ class BitbucketAPIWrapper(BaseCodeToolApiWrapper):
|
|
|
359
361
|
# except Exception as e:
|
|
360
362
|
# raise ToolException(f"Can't extract file commit hash (`{file_path}`) due to error:\n{str(e)}")
|
|
361
363
|
|
|
362
|
-
def _read_file(self, file_path: str, branch: str) -> str:
|
|
364
|
+
def _read_file(self, file_path: str, branch: str, **kwargs) -> str:
|
|
363
365
|
"""
|
|
364
|
-
Reads a file from the
|
|
366
|
+
Reads a file from the bitbucket repo with optional partial read support.
|
|
367
|
+
|
|
365
368
|
Parameters:
|
|
366
369
|
file_path(str): the file path
|
|
367
370
|
branch(str): branch name (by default: active_branch)
|
|
371
|
+
**kwargs: Additional parameters (offset, limit, head, tail) - currently ignored,
|
|
372
|
+
partial read handled client-side by base class methods
|
|
368
373
|
Returns:
|
|
369
374
|
str: The file decoded as a string
|
|
370
375
|
"""
|
|
@@ -398,8 +403,46 @@ class BitbucketAPIWrapper(BaseCodeToolApiWrapper):
|
|
|
398
403
|
return self._read_file(file_path, branch)
|
|
399
404
|
except Exception as e:
|
|
400
405
|
return f"Failed to read file {file_path}: {str(e)}"
|
|
406
|
+
|
|
407
|
+
def _write_file(
|
|
408
|
+
self,
|
|
409
|
+
file_path: str,
|
|
410
|
+
content: str,
|
|
411
|
+
branch: str = None,
|
|
412
|
+
commit_message: str = None
|
|
413
|
+
) -> str:
|
|
414
|
+
"""
|
|
415
|
+
Write content to a file (create or update).
|
|
416
|
+
|
|
417
|
+
Parameters:
|
|
418
|
+
file_path: Path to the file
|
|
419
|
+
content: New file content
|
|
420
|
+
branch: Branch name (uses active branch if None)
|
|
421
|
+
commit_message: Commit message (not used by Bitbucket API)
|
|
422
|
+
|
|
423
|
+
Returns:
|
|
424
|
+
Success message
|
|
425
|
+
"""
|
|
426
|
+
try:
|
|
427
|
+
branch = branch or self._active_branch
|
|
428
|
+
|
|
429
|
+
# Check if file exists by attempting to read it
|
|
430
|
+
try:
|
|
431
|
+
self._read_file(file_path, branch)
|
|
432
|
+
# File exists, update it using OLD/NEW format
|
|
433
|
+
old_content = self._read_file(file_path, branch)
|
|
434
|
+
update_query = f"OLD <<<<\n{old_content}\n>>>> OLD\nNEW <<<<\n{content}\n>>>> NEW"
|
|
435
|
+
self._bitbucket.update_file(file_path=file_path, update_query=update_query, branch=branch)
|
|
436
|
+
return f"Updated file {file_path}"
|
|
437
|
+
except:
|
|
438
|
+
# File doesn't exist, create it
|
|
439
|
+
self._bitbucket.create_file(file_path=file_path, file_contents=content, branch=branch)
|
|
440
|
+
return f"Created file {file_path}"
|
|
441
|
+
except Exception as e:
|
|
442
|
+
raise ToolException(f"Unable to write file {file_path}: {str(e)}")
|
|
401
443
|
|
|
402
|
-
@
|
|
444
|
+
@extend_with_parent_available_tools
|
|
445
|
+
@extend_with_file_operations
|
|
403
446
|
def get_available_tools(self):
|
|
404
447
|
return [
|
|
405
448
|
{
|
|
@@ -473,5 +516,5 @@ class BitbucketAPIWrapper(BaseCodeToolApiWrapper):
|
|
|
473
516
|
"ref": self.add_pull_request_comment,
|
|
474
517
|
"description": self.add_pull_request_comment.__doc__ or "Add a comment to a pull request in the repository.",
|
|
475
518
|
"args_schema": AddPullRequestCommentModel,
|
|
476
|
-
}
|
|
519
|
+
}
|
|
477
520
|
]
|
|
@@ -35,7 +35,7 @@ def normalize_response(response) -> Dict[str, Any]:
|
|
|
35
35
|
class BitbucketApiAbstract(ABC):
|
|
36
36
|
|
|
37
37
|
@abstractmethod
|
|
38
|
-
def list_branches(self) -> str:
|
|
38
|
+
def list_branches(self) -> List[str]:
|
|
39
39
|
pass
|
|
40
40
|
|
|
41
41
|
@abstractmethod
|
|
@@ -86,9 +86,9 @@ class BitbucketServerApi(BitbucketApiAbstract):
|
|
|
86
86
|
self.password = password
|
|
87
87
|
self.api_client = Bitbucket(url=url, username=username, password=password)
|
|
88
88
|
|
|
89
|
-
def list_branches(self) -> str:
|
|
89
|
+
def list_branches(self) -> List[str]:
|
|
90
90
|
branches = self.api_client.get_branches(project_key=self.project, repository_slug=self.repository)
|
|
91
|
-
return
|
|
91
|
+
return [branch['displayId'] for branch in branches]
|
|
92
92
|
|
|
93
93
|
def create_branch(self, branch_name: str, branch_from: str) -> Response:
|
|
94
94
|
return self.api_client.create_branch(
|
|
@@ -257,10 +257,10 @@ class BitbucketCloudApi(BitbucketApiAbstract):
|
|
|
257
257
|
except Exception as e:
|
|
258
258
|
raise ToolException(f"Unable to connect to the repository '{self.repository_name}' due to error:\n{str(e)}")
|
|
259
259
|
|
|
260
|
-
def list_branches(self) -> str:
|
|
260
|
+
def list_branches(self) -> List[str]:
|
|
261
261
|
branches = self.repository.branches.each()
|
|
262
262
|
branch_names = [branch.name for branch in branches]
|
|
263
|
-
return
|
|
263
|
+
return branch_names
|
|
264
264
|
|
|
265
265
|
def _get_branch(self, branch_name: str) -> Response:
|
|
266
266
|
return self.repository.branches.get(branch_name)
|
|
@@ -1,16 +1,19 @@
|
|
|
1
1
|
from typing import List, Optional, Literal
|
|
2
2
|
from langchain_core.tools import BaseTool, BaseToolkit
|
|
3
3
|
|
|
4
|
-
from pydantic import create_model, BaseModel, ConfigDict, Field,
|
|
4
|
+
from pydantic import create_model, BaseModel, ConfigDict, Field, model_validator
|
|
5
5
|
|
|
6
6
|
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
|
|
7
7
|
from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
|
|
8
8
|
from .google_search_rag import GoogleSearchResults
|
|
9
9
|
from .crawler import SingleURLCrawler, MultiURLCrawler, GetHTMLContent, GetPDFContent
|
|
10
10
|
from .wiki import WikipediaQueryRun
|
|
11
|
-
from ..utils import get_max_toolkit_length, clean_string
|
|
11
|
+
from ..utils import get_max_toolkit_length, clean_string
|
|
12
|
+
from ...configurations.browser import BrowserConfiguration
|
|
12
13
|
from logging import getLogger
|
|
13
14
|
|
|
15
|
+
from ...configurations.pgvector import PgVectorConfiguration
|
|
16
|
+
|
|
14
17
|
logger = getLogger(__name__)
|
|
15
18
|
|
|
16
19
|
name = "browser"
|
|
@@ -19,8 +22,9 @@ name = "browser"
|
|
|
19
22
|
def get_tools(tool):
|
|
20
23
|
return BrowserToolkit().get_toolkit(
|
|
21
24
|
selected_tools=tool['settings'].get('selected_tools', []),
|
|
22
|
-
|
|
23
|
-
|
|
25
|
+
browser_configuration=tool['settings']['browser_configuration'],
|
|
26
|
+
pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
|
|
27
|
+
embedding_model=tool['settings'].get('embedding_model'),
|
|
24
28
|
toolkit_name=tool.get('toolkit_name', '')
|
|
25
29
|
).get_tools()
|
|
26
30
|
|
|
@@ -38,22 +42,29 @@ class BrowserToolkit(BaseToolkit):
|
|
|
38
42
|
'google': GoogleSearchResults.__pydantic_fields__['args_schema'].default.schema(),
|
|
39
43
|
'wiki': WikipediaQueryRun.__pydantic_fields__['args_schema'].default.schema()
|
|
40
44
|
}
|
|
41
|
-
BrowserToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
42
45
|
|
|
43
46
|
def validate_google_fields(cls, values):
|
|
44
47
|
if 'google' in values.get('selected_tools', []):
|
|
45
|
-
|
|
46
|
-
|
|
48
|
+
browser_config = values.get('browser_configuration', {})
|
|
49
|
+
google_cse_id = browser_config.get('google_cse_id') is not None if browser_config else False
|
|
50
|
+
google_api_key = browser_config.get('google_api_key') is not None if browser_config else False
|
|
47
51
|
if not (google_cse_id and google_api_key):
|
|
48
52
|
raise ValueError("google_cse_id and google_api_key are required when 'google' is in selected_tools")
|
|
49
53
|
return values
|
|
50
54
|
|
|
51
55
|
return create_model(
|
|
52
56
|
name,
|
|
53
|
-
__config__=ConfigDict(json_schema_extra={'metadata': {"label": "Browser", "icon_url": None,
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
+
__config__=ConfigDict(json_schema_extra={'metadata': {"label": "Browser", "icon_url": None,
|
|
58
|
+
"categories": ["testing"],
|
|
59
|
+
"extra_categories": [
|
|
60
|
+
"web scraping", "search", "crawler"
|
|
61
|
+
]}}),
|
|
62
|
+
browser_configuration=(Optional[BrowserConfiguration],
|
|
63
|
+
Field(description="Browser Configuration (required for tools and `google`)",
|
|
64
|
+
default=None, json_schema_extra={'configuration_types': ['browser']})),
|
|
65
|
+
pgvector_configuration=(Optional[PgVectorConfiguration],
|
|
66
|
+
Field(description="PgVector configuration (required for tools `multi_url_crawler`)",
|
|
67
|
+
default=None, json_schema_extra={'configuration_types': ['pgvector']})),
|
|
57
68
|
selected_tools=(List[Literal[tuple(selected_tools)]],
|
|
58
69
|
Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
|
|
59
70
|
__validators__={
|
|
@@ -65,8 +76,19 @@ class BrowserToolkit(BaseToolkit):
|
|
|
65
76
|
def get_toolkit(cls, selected_tools: list[str] | None = None, toolkit_name: Optional[str] = None, **kwargs):
|
|
66
77
|
if selected_tools is None:
|
|
67
78
|
selected_tools = []
|
|
79
|
+
|
|
80
|
+
wrapper_payload_google = {
|
|
81
|
+
**kwargs,
|
|
82
|
+
**kwargs.get('browser_configuration', {}),
|
|
83
|
+
**kwargs.get('pgvector_configuration', {}),
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
wrapper_payload_rag_based = {
|
|
87
|
+
**kwargs,
|
|
88
|
+
**kwargs.get('pgvector_configuration', {}),
|
|
89
|
+
}
|
|
90
|
+
|
|
68
91
|
tools = []
|
|
69
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
70
92
|
if not selected_tools:
|
|
71
93
|
selected_tools = [
|
|
72
94
|
'single_url_crawler',
|
|
@@ -80,7 +102,7 @@ class BrowserToolkit(BaseToolkit):
|
|
|
80
102
|
if tool == 'single_url_crawler':
|
|
81
103
|
tool_entry = SingleURLCrawler()
|
|
82
104
|
elif tool == 'multi_url_crawler':
|
|
83
|
-
tool_entry = MultiURLCrawler()
|
|
105
|
+
tool_entry = MultiURLCrawler(**wrapper_payload_rag_based)
|
|
84
106
|
elif tool == 'get_html_content':
|
|
85
107
|
tool_entry = GetHTMLContent()
|
|
86
108
|
elif tool == 'get_pdf_content':
|
|
@@ -88,8 +110,8 @@ class BrowserToolkit(BaseToolkit):
|
|
|
88
110
|
elif tool == 'google':
|
|
89
111
|
try:
|
|
90
112
|
google_api_wrapper = GoogleSearchAPIWrapper(
|
|
91
|
-
google_api_key=
|
|
92
|
-
google_cse_id=
|
|
113
|
+
google_api_key=wrapper_payload_google.get('google_api_key'),
|
|
114
|
+
google_cse_id=wrapper_payload_google.get('google_cse_id')
|
|
93
115
|
)
|
|
94
116
|
tool_entry = GoogleSearchResults(api_wrapper=google_api_wrapper)
|
|
95
117
|
# rename the tool to avoid conflicts
|
|
@@ -103,7 +125,9 @@ class BrowserToolkit(BaseToolkit):
|
|
|
103
125
|
|
|
104
126
|
# Only add the tool if it was successfully created
|
|
105
127
|
if tool_entry is not None:
|
|
106
|
-
|
|
128
|
+
if toolkit_name:
|
|
129
|
+
tool_entry.description = f"{tool_entry.description}\nToolkit: {toolkit_name}"
|
|
130
|
+
tool_entry.description = tool_entry.description[:1000]
|
|
107
131
|
tools.append(tool_entry)
|
|
108
132
|
return cls(tools=tools)
|
|
109
133
|
|
|
@@ -27,13 +27,15 @@ class MultiURLCrawler(BaseTool):
|
|
|
27
27
|
max_response_size: int = 3000
|
|
28
28
|
name: str = "multi_url_crawler"
|
|
29
29
|
description: str = "Crawls multiple URLs and returns the content related to query"
|
|
30
|
+
connection_string: str = None
|
|
30
31
|
args_schema: Type[BaseModel] = create_model("MultiURLCrawlerModel",
|
|
31
32
|
query=(str, Field(description="Query text to search pages")),
|
|
32
33
|
urls=(list[str], Field(description="list of URLs to search like ['url1', 'url2']")))
|
|
33
34
|
|
|
34
35
|
def _run(self, query: str, urls: list[str], run_manager=None):
|
|
35
36
|
urls = [url.strip() for url in urls]
|
|
36
|
-
return webRag(urls, self.max_response_size, query
|
|
37
|
+
return webRag(urls=urls, max_response_size=self.max_response_size, query=query,
|
|
38
|
+
connection_string=self.connection_string)
|
|
37
39
|
|
|
38
40
|
|
|
39
41
|
class GetHTMLContent(BaseTool):
|
alita_sdk/tools/browser/utils.py
CHANGED
|
@@ -6,9 +6,9 @@ from langchain.text_splitter import CharacterTextSplitter
|
|
|
6
6
|
import fitz
|
|
7
7
|
|
|
8
8
|
try:
|
|
9
|
-
from
|
|
9
|
+
from langchain_postgres import PGVector
|
|
10
10
|
except ImportError:
|
|
11
|
-
|
|
11
|
+
PGVector = None
|
|
12
12
|
|
|
13
13
|
from langchain_community.embeddings.sentence_transformer import (
|
|
14
14
|
SentenceTransformerEmbeddings,
|
|
@@ -32,13 +32,22 @@ def get_page(urls, html_only=False):
|
|
|
32
32
|
return docs_transformed
|
|
33
33
|
|
|
34
34
|
|
|
35
|
-
def webRag(urls, max_response_size, query):
|
|
36
|
-
if
|
|
37
|
-
return "
|
|
35
|
+
def webRag(urls, max_response_size, query, connection_string=None):
|
|
36
|
+
if PGVector is None:
|
|
37
|
+
return "PGVector is not initialized. Web rag is not available."
|
|
38
|
+
|
|
39
|
+
if not connection_string:
|
|
40
|
+
return "Connection string or embedding model is missing. Web rag is not available."
|
|
38
41
|
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
|
|
39
42
|
docs = text_splitter.split_documents(get_page(urls))
|
|
40
43
|
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
|
|
41
|
-
db =
|
|
44
|
+
db = PGVector.from_documents(
|
|
45
|
+
documents=docs,
|
|
46
|
+
embedding=embedding_function,
|
|
47
|
+
collection_name="web_rag",
|
|
48
|
+
pre_delete_collection=True,
|
|
49
|
+
connection=connection_string
|
|
50
|
+
)
|
|
42
51
|
docs = db.search(query, "mmr", k=10)
|
|
43
52
|
text = ""
|
|
44
53
|
for doc in docs:
|
|
@@ -1,12 +1,14 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
from typing import Dict, List, Optional, Literal
|
|
3
3
|
from langchain_core.tools import BaseToolkit, BaseTool
|
|
4
|
-
from pydantic import create_model, BaseModel, ConfigDict, Field
|
|
4
|
+
from pydantic import create_model, BaseModel, ConfigDict, Field
|
|
5
5
|
from functools import lru_cache
|
|
6
6
|
|
|
7
7
|
from .api_wrapper import CarrierAPIWrapper
|
|
8
8
|
from .tools import __all__
|
|
9
|
-
from ..
|
|
9
|
+
from ..elitea_base import filter_missconfigured_index_tools
|
|
10
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
11
|
+
from ...configurations.carrier import CarrierConfiguration
|
|
10
12
|
|
|
11
13
|
logger = logging.getLogger(__name__)
|
|
12
14
|
|
|
@@ -15,7 +17,6 @@ name = 'carrier'
|
|
|
15
17
|
|
|
16
18
|
class AlitaCarrierToolkit(BaseToolkit):
|
|
17
19
|
tools: List[BaseTool] = []
|
|
18
|
-
toolkit_max_length: int = 100
|
|
19
20
|
|
|
20
21
|
@classmethod
|
|
21
22
|
@lru_cache(maxsize=32)
|
|
@@ -24,15 +25,10 @@ class AlitaCarrierToolkit(BaseToolkit):
|
|
|
24
25
|
for t in __all__:
|
|
25
26
|
default = t['tool'].__pydantic_fields__['args_schema'].default
|
|
26
27
|
selected_tools[t['name']] = default.schema() if default else default
|
|
27
|
-
cls.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
28
28
|
return create_model(
|
|
29
29
|
name,
|
|
30
|
-
url=(str, Field(description="Carrier Platform Base URL")),
|
|
31
|
-
organization=(str, Field(description="Carrier Organization Name", json_schema_extra={'toolkit_name': True,
|
|
32
|
-
'max_toolkit_length': cls.toolkit_max_length})),
|
|
33
|
-
private_token=(
|
|
34
|
-
SecretStr, Field(description="Carrier Platform Authentication Token", json_schema_extra={'secret': True})),
|
|
35
30
|
project_id=(Optional[str], Field(None, description="Optional project ID for scoped operations")),
|
|
31
|
+
carrier_configuration=(CarrierConfiguration, Field(description="Carrier Configuration", json_schema_extra={'configuration_types': ['carrier']})),
|
|
36
32
|
selected_tools=(
|
|
37
33
|
List[Literal[tuple(selected_tools)]],
|
|
38
34
|
Field(default=[], json_schema_extra={"args_schemas": selected_tools}),
|
|
@@ -49,6 +45,7 @@ class AlitaCarrierToolkit(BaseToolkit):
|
|
|
49
45
|
)
|
|
50
46
|
|
|
51
47
|
@classmethod
|
|
48
|
+
@filter_missconfigured_index_tools
|
|
52
49
|
def get_toolkit(
|
|
53
50
|
cls,
|
|
54
51
|
selected_tools: Optional[List[str]] = None,
|
|
@@ -58,23 +55,28 @@ class AlitaCarrierToolkit(BaseToolkit):
|
|
|
58
55
|
selected_tools = selected_tools or []
|
|
59
56
|
logger.info(f"[AlitaCarrierToolkit] Initializing toolkit with selected tools: {selected_tools}")
|
|
60
57
|
|
|
58
|
+
wrapper_payload = {
|
|
59
|
+
**kwargs,
|
|
60
|
+
**kwargs.get('carrier_configuration', {}),
|
|
61
|
+
}
|
|
62
|
+
|
|
61
63
|
try:
|
|
62
|
-
carrier_api_wrapper = CarrierAPIWrapper(**
|
|
64
|
+
carrier_api_wrapper = CarrierAPIWrapper(**wrapper_payload)
|
|
63
65
|
logger.info(
|
|
64
|
-
f"[AlitaCarrierToolkit] CarrierAPIWrapper initialized successfully with URL: {
|
|
66
|
+
f"[AlitaCarrierToolkit] CarrierAPIWrapper initialized successfully with URL: {wrapper_payload.get('url')}")
|
|
65
67
|
except Exception as e:
|
|
66
68
|
logger.exception(f"[AlitaCarrierToolkit] Error initializing CarrierAPIWrapper: {e}")
|
|
67
69
|
raise ValueError(f"CarrierAPIWrapper initialization error: {e}")
|
|
68
70
|
|
|
69
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
70
|
-
|
|
71
71
|
tools = []
|
|
72
72
|
for tool_def in __all__:
|
|
73
73
|
if selected_tools and tool_def['name'] not in selected_tools:
|
|
74
74
|
continue
|
|
75
75
|
try:
|
|
76
76
|
tool_instance = tool_def['tool'](api_wrapper=carrier_api_wrapper)
|
|
77
|
-
|
|
77
|
+
if toolkit_name:
|
|
78
|
+
tool_instance.description = f"{tool_instance.description}\nToolkit: {toolkit_name}"
|
|
79
|
+
tool_instance.description = tool_instance.description[:1000]
|
|
78
80
|
tools.append(tool_instance)
|
|
79
81
|
logger.info(f"[AlitaCarrierToolkit] Successfully initialized tool '{tool_instance.name}'")
|
|
80
82
|
except Exception as e:
|
|
@@ -92,9 +94,7 @@ class AlitaCarrierToolkit(BaseToolkit):
|
|
|
92
94
|
def get_tools(tool_config: Dict) -> List[BaseTool]:
|
|
93
95
|
return AlitaCarrierToolkit.get_toolkit(
|
|
94
96
|
selected_tools=tool_config.get('selected_tools', []),
|
|
95
|
-
url=tool_config['settings']['url'],
|
|
96
97
|
project_id=tool_config['settings'].get('project_id'),
|
|
97
|
-
|
|
98
|
-
private_token=tool_config['settings']['private_token'],
|
|
98
|
+
carrier_configuration=tool_config['settings']['carrier_configuration'],
|
|
99
99
|
toolkit_name=tool_config.get('toolkit_name')
|
|
100
100
|
).get_tools()
|
|
@@ -154,6 +154,7 @@ class CreateExcelReportTool(BaseTool):
|
|
|
154
154
|
"tp_threshold": (int, Field(default=None, description="Throughput threshold")),
|
|
155
155
|
"rt_threshold": (int, Field(default=None, description="Response time threshold")),
|
|
156
156
|
"er_threshold": (int, Field(default=None, description="Error rate threshold")),
|
|
157
|
+
"include_group_pauses": (bool, Field(default=False, description="Include group pauses in Gatling Excel report")),
|
|
157
158
|
}
|
|
158
159
|
)
|
|
159
160
|
|
|
@@ -200,6 +201,7 @@ class CreateExcelReportTool(BaseTool):
|
|
|
200
201
|
"tp_threshold": 10,
|
|
201
202
|
"rt_threshold": 500,
|
|
202
203
|
"er_threshold": 5,
|
|
204
|
+
"include_group_pauses": False,
|
|
203
205
|
}
|
|
204
206
|
|
|
205
207
|
def _request_parameter_confirmation(self, default_parameters):
|
|
@@ -217,7 +219,8 @@ class CreateExcelReportTool(BaseTool):
|
|
|
217
219
|
excel_report_file_name = f'/tmp/reports_test_results_{report["build_id"]}_excel_report.xlsx'
|
|
218
220
|
bucket_name = report["name"].replace("_", "").replace(" ", "").lower()
|
|
219
221
|
|
|
220
|
-
result_stats_j = self._parse_report(test_log_file_path, lg_type, parameters["think_time"],
|
|
222
|
+
result_stats_j = self._parse_report(test_log_file_path, lg_type, parameters["think_time"],
|
|
223
|
+
parameters["include_group_pauses"], is_absolute_file_path=True)
|
|
221
224
|
calc_thr_j = self._calculate_thresholds(result_stats_j, parameters)
|
|
222
225
|
|
|
223
226
|
return self._generate_and_upload_report(
|
|
@@ -233,21 +236,22 @@ class CreateExcelReportTool(BaseTool):
|
|
|
233
236
|
excel_report_file_name = f'{file_path}_{current_date}.xlsx'
|
|
234
237
|
bucket_name = bucket
|
|
235
238
|
|
|
236
|
-
result_stats_j = self._parse_report(file_path, lg_type, parameters["think_time"],
|
|
239
|
+
result_stats_j = self._parse_report(file_path, lg_type, parameters["think_time"],
|
|
240
|
+
parameters["include_group_pauses"], is_absolute_file_path=True)
|
|
237
241
|
calc_thr_j = self._calculate_thresholds(result_stats_j, parameters)
|
|
238
242
|
|
|
239
243
|
return self._generate_and_upload_report(
|
|
240
244
|
result_stats_j, carrier_report, calc_thr_j, parameters, excel_report_file_name, bucket_name, file_path
|
|
241
245
|
)
|
|
242
246
|
|
|
243
|
-
def _parse_report(self, file_path, lg_type, think_time, is_absolute_file_path=False):
|
|
247
|
+
def _parse_report(self, file_path, lg_type, think_time, include_group_pauses, is_absolute_file_path=False):
|
|
244
248
|
"""Parse the report based on its type."""
|
|
245
249
|
if lg_type == "gatling":
|
|
246
250
|
if is_absolute_file_path:
|
|
247
251
|
report_file = file_path
|
|
248
252
|
else:
|
|
249
253
|
report_file = get_latest_log_file(file_path, "simulation.log")
|
|
250
|
-
parser = GatlingReportParser(report_file, think_time)
|
|
254
|
+
parser = GatlingReportParser(report_file, include_group_pauses, think_time)
|
|
251
255
|
result_stats_j = parser.parse()
|
|
252
256
|
result_stats_j["requests"].update(result_stats_j["groups"])
|
|
253
257
|
elif lg_type == "jmeter":
|
|
@@ -118,9 +118,10 @@ class JMeterReportParser(PerformanceReportParser):
|
|
|
118
118
|
|
|
119
119
|
class GatlingReportParser(PerformanceReportParser):
|
|
120
120
|
|
|
121
|
-
def __init__(self, log_file: str, think_times="5,0-10,0"):
|
|
121
|
+
def __init__(self, log_file: str, include_group_pauses, think_times="5,0-10,0"):
|
|
122
122
|
self.calculated_think_time = think_times
|
|
123
123
|
self.log_file = log_file
|
|
124
|
+
self.include_group_pauses = include_group_pauses
|
|
124
125
|
|
|
125
126
|
@staticmethod
|
|
126
127
|
def convert_timestamp_to_datetime(timestamp: int) -> datetime:
|
|
@@ -210,7 +211,7 @@ class GatlingReportParser(PerformanceReportParser):
|
|
|
210
211
|
ramp_end = self.convert_timestamp_to_datetime(int(line.split('\t')[3]))
|
|
211
212
|
|
|
212
213
|
elif line.startswith('GROUP'):
|
|
213
|
-
self.parse_group_line(groups, line)
|
|
214
|
+
self.parse_group_line(groups, line, self.include_group_pauses)
|
|
214
215
|
except FileNotFoundError as e:
|
|
215
216
|
print(f"File not found: {e}")
|
|
216
217
|
raise
|
|
@@ -242,11 +243,14 @@ class GatlingReportParser(PerformanceReportParser):
|
|
|
242
243
|
requests[request_name].append((response_time, status))
|
|
243
244
|
|
|
244
245
|
@staticmethod
|
|
245
|
-
def parse_group_line(groups, line):
|
|
246
|
+
def parse_group_line(groups, line, include_group_pauses):
|
|
246
247
|
parts = line.split('\t')
|
|
247
248
|
if len(parts) >= 6:
|
|
248
249
|
group_name = parts[1]
|
|
249
|
-
|
|
250
|
+
if include_group_pauses:
|
|
251
|
+
response_time = int(parts[3]) - int(parts[2])
|
|
252
|
+
else:
|
|
253
|
+
response_time = int(parts[4])
|
|
250
254
|
status = parts[5].strip()
|
|
251
255
|
groups[group_name].append((response_time, status))
|
|
252
256
|
|
|
@@ -3,6 +3,7 @@ from .sematic.statistical_chunker import statistical_chunker
|
|
|
3
3
|
from .sematic.markdown_chunker import markdown_chunker
|
|
4
4
|
from .sematic.proposal_chunker import proposal_chunker
|
|
5
5
|
from .sematic.json_chunker import json_chunker
|
|
6
|
+
from .universal_chunker import universal_chunker, chunk_single_document, get_file_type
|
|
6
7
|
from .models import StatisticalChunkerConfig, MarkdownChunkerConfig, ProposalChunkerConfig
|
|
7
8
|
|
|
8
9
|
__all__ = {
|
|
@@ -10,7 +11,8 @@ __all__ = {
|
|
|
10
11
|
'statistical': statistical_chunker,
|
|
11
12
|
'markdown': markdown_chunker,
|
|
12
13
|
'proposal': proposal_chunker,
|
|
13
|
-
'json': json_chunker
|
|
14
|
+
'json': json_chunker,
|
|
15
|
+
'universal': universal_chunker,
|
|
14
16
|
}
|
|
15
17
|
|
|
16
18
|
__confluence_chunkers__ = {
|
|
@@ -79,7 +79,7 @@ def parse_code_files_for_db(file_content_generator: Generator[str, None, None],
|
|
|
79
79
|
for splitted_document in splitted_documents:
|
|
80
80
|
metadata = {
|
|
81
81
|
"filename": file_name,
|
|
82
|
-
"method_name": node.name,
|
|
82
|
+
"method_name": node.name if node.name else 'unknown',
|
|
83
83
|
"language": programming_language.value,
|
|
84
84
|
}
|
|
85
85
|
commit_hash = data.get("commit_hash")
|
|
@@ -17,6 +17,7 @@ def json_chunker(file_content_generator: Generator[Document, None, None], config
|
|
|
17
17
|
for chunk in chunks:
|
|
18
18
|
metadata = doc.metadata.copy()
|
|
19
19
|
metadata['chunk_id'] = chunk_id
|
|
20
|
+
metadata['method_name'] = 'json'
|
|
20
21
|
chunk_id += 1
|
|
21
22
|
yield Document(page_content=json.dumps(chunk), metadata=metadata)
|
|
22
23
|
except Exception as e:
|