alita-sdk 0.3.379__py3-none-any.whl → 0.3.627__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +156 -0
- alita_sdk/cli/agent_loader.py +245 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3113 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1073 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/testcases/__init__.py +94 -0
- alita_sdk/cli/testcases/data_generation.py +119 -0
- alita_sdk/cli/testcases/discovery.py +96 -0
- alita_sdk/cli/testcases/executor.py +84 -0
- alita_sdk/cli/testcases/logger.py +85 -0
- alita_sdk/cli/testcases/parser.py +172 -0
- alita_sdk/cli/testcases/prompts.py +91 -0
- alita_sdk/cli/testcases/reporting.py +125 -0
- alita_sdk/cli/testcases/setup.py +108 -0
- alita_sdk/cli/testcases/test_runner.py +282 -0
- alita_sdk/cli/testcases/utils.py +39 -0
- alita_sdk/cli/testcases/validation.py +90 -0
- alita_sdk/cli/testcases/workflow.py +196 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +72 -12
- alita_sdk/community/inventory/__init__.py +236 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +1 -1
- alita_sdk/configurations/ado.py +141 -20
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/confluence.py +130 -1
- alita_sdk/configurations/figma.py +76 -0
- alita_sdk/configurations/gitlab.py +91 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/openapi.py +329 -0
- alita_sdk/configurations/qtest.py +72 -1
- alita_sdk/configurations/report_portal.py +96 -0
- alita_sdk/configurations/sharepoint.py +148 -0
- alita_sdk/configurations/testio.py +83 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +93 -0
- alita_sdk/configurations/zephyr_enterprise.py +93 -0
- alita_sdk/configurations/zephyr_essential.py +75 -0
- alita_sdk/runtime/clients/artifact.py +3 -3
- alita_sdk/runtime/clients/client.py +388 -46
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +8 -21
- alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
- alita_sdk/runtime/langchain/assistant.py +157 -39
- alita_sdk/runtime/langchain/constants.py +647 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +10 -4
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +226 -7
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
- alita_sdk/runtime/langchain/document_loaders/constants.py +40 -19
- alita_sdk/runtime/langchain/langraph_agent.py +405 -84
- alita_sdk/runtime/langchain/utils.py +106 -7
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/skills/__init__.py +91 -0
- alita_sdk/runtime/skills/callbacks.py +498 -0
- alita_sdk/runtime/skills/discovery.py +540 -0
- alita_sdk/runtime/skills/executor.py +610 -0
- alita_sdk/runtime/skills/input_builder.py +371 -0
- alita_sdk/runtime/skills/models.py +330 -0
- alita_sdk/runtime/skills/registry.py +355 -0
- alita_sdk/runtime/skills/skill_runner.py +330 -0
- alita_sdk/runtime/toolkits/__init__.py +31 -0
- alita_sdk/runtime/toolkits/application.py +29 -10
- alita_sdk/runtime/toolkits/artifact.py +20 -11
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +783 -0
- alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/skill_router.py +238 -0
- alita_sdk/runtime/toolkits/subgraph.py +251 -6
- alita_sdk/runtime/toolkits/tools.py +356 -69
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/__init__.py +10 -3
- alita_sdk/runtime/tools/application.py +27 -6
- alita_sdk/runtime/tools/artifact.py +511 -28
- alita_sdk/runtime/tools/data_analysis.py +183 -0
- alita_sdk/runtime/tools/function.py +67 -35
- alita_sdk/runtime/tools/graph.py +10 -4
- alita_sdk/runtime/tools/image_generation.py +148 -46
- alita_sdk/runtime/tools/llm.py +1003 -128
- alita_sdk/runtime/tools/loop.py +3 -1
- alita_sdk/runtime/tools/loop_output.py +3 -1
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +8 -5
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -4
- alita_sdk/runtime/tools/sandbox.py +65 -48
- alita_sdk/runtime/tools/skill_router.py +776 -0
- alita_sdk/runtime/tools/tool.py +3 -1
- alita_sdk/runtime/tools/vectorstore.py +9 -3
- alita_sdk/runtime/tools/vectorstore_base.py +70 -14
- alita_sdk/runtime/utils/AlitaCallback.py +137 -21
- alita_sdk/runtime/utils/constants.py +5 -1
- alita_sdk/runtime/utils/mcp_client.py +492 -0
- alita_sdk/runtime/utils/mcp_oauth.py +361 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +434 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/serialization.py +155 -0
- alita_sdk/runtime/utils/streamlit.py +40 -13
- alita_sdk/runtime/utils/toolkit_utils.py +30 -9
- alita_sdk/runtime/utils/utils.py +36 -0
- alita_sdk/tools/__init__.py +134 -35
- alita_sdk/tools/ado/repos/__init__.py +51 -32
- alita_sdk/tools/ado/repos/repos_wrapper.py +148 -89
- alita_sdk/tools/ado/test_plan/__init__.py +25 -9
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
- alita_sdk/tools/ado/utils.py +1 -18
- alita_sdk/tools/ado/wiki/__init__.py +25 -12
- alita_sdk/tools/ado/wiki/ado_wrapper.py +291 -22
- alita_sdk/tools/ado/work_item/__init__.py +26 -13
- alita_sdk/tools/ado/work_item/ado_wrapper.py +73 -11
- alita_sdk/tools/advanced_jira_mining/__init__.py +11 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +13 -9
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +11 -8
- alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +271 -84
- alita_sdk/tools/bitbucket/__init__.py +17 -11
- alita_sdk/tools/bitbucket/api_wrapper.py +59 -11
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +49 -35
- alita_sdk/tools/browser/__init__.py +5 -4
- alita_sdk/tools/carrier/__init__.py +5 -6
- alita_sdk/tools/carrier/backend_reports_tool.py +6 -6
- alita_sdk/tools/carrier/run_ui_test_tool.py +6 -6
- alita_sdk/tools/carrier/ui_reports_tool.py +5 -5
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
- alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +10 -7
- alita_sdk/tools/cloud/azure/__init__.py +10 -7
- alita_sdk/tools/cloud/gcp/__init__.py +10 -7
- alita_sdk/tools/cloud/k8s/__init__.py +10 -7
- alita_sdk/tools/code/linter/__init__.py +10 -8
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +11 -8
- alita_sdk/tools/code_indexer_toolkit.py +82 -22
- alita_sdk/tools/confluence/__init__.py +22 -16
- alita_sdk/tools/confluence/api_wrapper.py +107 -30
- alita_sdk/tools/confluence/loader.py +14 -2
- alita_sdk/tools/custom_open_api/__init__.py +12 -5
- alita_sdk/tools/elastic/__init__.py +11 -8
- alita_sdk/tools/elitea_base.py +493 -30
- alita_sdk/tools/figma/__init__.py +58 -11
- alita_sdk/tools/figma/api_wrapper.py +1235 -143
- alita_sdk/tools/figma/figma_client.py +73 -0
- alita_sdk/tools/figma/toon_tools.py +2748 -0
- alita_sdk/tools/github/__init__.py +14 -15
- alita_sdk/tools/github/github_client.py +224 -100
- alita_sdk/tools/github/graphql_client_wrapper.py +119 -33
- alita_sdk/tools/github/schemas.py +14 -5
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/github/tool_prompts.py +9 -22
- alita_sdk/tools/gitlab/__init__.py +16 -11
- alita_sdk/tools/gitlab/api_wrapper.py +218 -48
- alita_sdk/tools/gitlab_org/__init__.py +10 -9
- alita_sdk/tools/gitlab_org/api_wrapper.py +63 -64
- alita_sdk/tools/google/bigquery/__init__.py +13 -12
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +11 -8
- alita_sdk/tools/google_places/api_wrapper.py +1 -1
- alita_sdk/tools/jira/__init__.py +17 -10
- alita_sdk/tools/jira/api_wrapper.py +92 -41
- alita_sdk/tools/keycloak/__init__.py +11 -8
- alita_sdk/tools/localgit/__init__.py +9 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +12 -4
- alita_sdk/tools/non_code_indexer_toolkit.py +1 -0
- alita_sdk/tools/ocr/__init__.py +11 -8
- alita_sdk/tools/openapi/__init__.py +491 -106
- alita_sdk/tools/openapi/api_wrapper.py +1368 -0
- alita_sdk/tools/openapi/tool.py +20 -0
- alita_sdk/tools/pandas/__init__.py +20 -12
- alita_sdk/tools/pandas/api_wrapper.py +38 -25
- alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
- alita_sdk/tools/postman/__init__.py +10 -9
- alita_sdk/tools/pptx/__init__.py +11 -10
- alita_sdk/tools/pptx/pptx_wrapper.py +1 -1
- alita_sdk/tools/qtest/__init__.py +31 -11
- alita_sdk/tools/qtest/api_wrapper.py +2135 -86
- alita_sdk/tools/rally/__init__.py +10 -9
- alita_sdk/tools/rally/api_wrapper.py +1 -1
- alita_sdk/tools/report_portal/__init__.py +12 -8
- alita_sdk/tools/salesforce/__init__.py +10 -8
- alita_sdk/tools/servicenow/__init__.py +17 -15
- alita_sdk/tools/servicenow/api_wrapper.py +1 -1
- alita_sdk/tools/sharepoint/__init__.py +10 -7
- alita_sdk/tools/sharepoint/api_wrapper.py +129 -38
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +10 -7
- alita_sdk/tools/slack/api_wrapper.py +2 -2
- alita_sdk/tools/sql/__init__.py +12 -9
- alita_sdk/tools/testio/__init__.py +10 -7
- alita_sdk/tools/testrail/__init__.py +11 -10
- alita_sdk/tools/testrail/api_wrapper.py +1 -1
- alita_sdk/tools/utils/__init__.py +9 -4
- alita_sdk/tools/utils/content_parser.py +103 -18
- alita_sdk/tools/utils/text_operations.py +410 -0
- alita_sdk/tools/utils/tool_prompts.py +79 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +30 -13
- alita_sdk/tools/xray/__init__.py +13 -9
- alita_sdk/tools/yagmail/__init__.py +9 -3
- alita_sdk/tools/zephyr/__init__.py +10 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +11 -7
- alita_sdk/tools/zephyr_essential/__init__.py +10 -7
- alita_sdk/tools/zephyr_essential/api_wrapper.py +30 -13
- alita_sdk/tools/zephyr_essential/client.py +2 -2
- alita_sdk/tools/zephyr_scale/__init__.py +11 -8
- alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
- alita_sdk/tools/zephyr_squad/__init__.py +10 -7
- {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/METADATA +154 -8
- alita_sdk-0.3.627.dist-info/RECORD +468 -0
- alita_sdk-0.3.627.dist-info/entry_points.txt +2 -0
- alita_sdk-0.3.379.dist-info/RECORD +0 -360
- {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Generator
|
|
1
|
+
from typing import Generator, List
|
|
2
2
|
from langchain_core.documents import Document
|
|
3
3
|
from langchain_text_splitters import MarkdownHeaderTextSplitter, ExperimentalMarkdownSyntaxTextSplitter
|
|
4
4
|
from langchain.text_splitter import TokenTextSplitter
|
|
@@ -7,34 +7,60 @@ from copy import deepcopy as copy
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
def markdown_chunker(file_content_generator: Generator[Document, None, None], config: dict, *args, **kwargs) -> Generator[Document, None, None]:
|
|
10
|
+
"""
|
|
11
|
+
Chunks markdown documents by headers, with support for:
|
|
12
|
+
- Minimum chunk size to avoid tiny fragments
|
|
13
|
+
- Maximum token limit with overflow splitting
|
|
14
|
+
- Header metadata preservation
|
|
15
|
+
|
|
16
|
+
Config options:
|
|
17
|
+
strip_header (bool): Remove headers from content. Default: False
|
|
18
|
+
return_each_line (bool): Split on every line. Default: False
|
|
19
|
+
headers_to_split_on (list): Headers to split on, e.g. [('#', 'H1'), ('##', 'H2')]
|
|
20
|
+
max_tokens (int): Maximum tokens per chunk. Default: 512
|
|
21
|
+
token_overlap (int): Token overlap for large chunk splitting. Default: 10
|
|
22
|
+
min_chunk_chars (int): Minimum characters per chunk. Default: 100
|
|
23
|
+
Chunks smaller than this will be merged with the next chunk.
|
|
24
|
+
"""
|
|
10
25
|
strip_header = config.get("strip_header", False)
|
|
11
26
|
return_each_line = config.get("return_each_line", False)
|
|
12
27
|
headers_to_split_on = config.get("headers_to_split_on", [])
|
|
13
28
|
max_tokens = config.get("max_tokens", 512)
|
|
14
29
|
tokens_overlapping = config.get("token_overlap", 10)
|
|
30
|
+
min_chunk_chars = config.get("min_chunk_chars", 100) # Minimum characters per chunk
|
|
31
|
+
|
|
15
32
|
headers_to_split_on = [tuple(header) for header in headers_to_split_on]
|
|
33
|
+
|
|
16
34
|
for doc in file_content_generator:
|
|
17
35
|
doc_metadata = doc.metadata
|
|
18
36
|
doc_content = doc.page_content
|
|
19
37
|
chunk_id = 0
|
|
38
|
+
|
|
20
39
|
markdown_splitter = MarkdownHeaderTextSplitter(
|
|
21
40
|
headers_to_split_on=headers_to_split_on,
|
|
22
41
|
strip_headers=strip_header,
|
|
23
42
|
return_each_line=return_each_line
|
|
24
43
|
)
|
|
25
44
|
md_header_splits = markdown_splitter.split_text(doc_content)
|
|
26
|
-
|
|
45
|
+
|
|
46
|
+
# Merge small chunks with the next one
|
|
47
|
+
merged_chunks = _merge_small_chunks(md_header_splits, min_chunk_chars)
|
|
48
|
+
|
|
49
|
+
for chunk in merged_chunks:
|
|
27
50
|
if tiktoken_length(chunk.page_content) > max_tokens:
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
51
|
+
# Split large chunks into smaller ones
|
|
52
|
+
for subchunk in TokenTextSplitter(
|
|
53
|
+
encoding_name="cl100k_base",
|
|
54
|
+
chunk_size=max_tokens,
|
|
55
|
+
chunk_overlap=tokens_overlapping
|
|
56
|
+
).split_text(chunk.page_content):
|
|
32
57
|
chunk_id += 1
|
|
33
58
|
headers_meta = list(chunk.metadata.values())
|
|
34
59
|
docmeta = copy(doc_metadata)
|
|
35
60
|
docmeta.update({"headers": "; ".join(headers_meta)})
|
|
36
61
|
docmeta['chunk_id'] = chunk_id
|
|
37
62
|
docmeta['chunk_type'] = "document"
|
|
63
|
+
docmeta['method_name'] = 'markdown'
|
|
38
64
|
yield Document(
|
|
39
65
|
page_content=subchunk,
|
|
40
66
|
metadata=docmeta
|
|
@@ -46,12 +72,77 @@ def markdown_chunker(file_content_generator: Generator[Document, None, None], co
|
|
|
46
72
|
docmeta.update({"headers": "; ".join(headers_meta)})
|
|
47
73
|
docmeta['chunk_id'] = chunk_id
|
|
48
74
|
docmeta['chunk_type'] = "document"
|
|
75
|
+
docmeta['method_name'] = 'text'
|
|
49
76
|
yield Document(
|
|
50
77
|
page_content=chunk.page_content,
|
|
51
78
|
metadata=docmeta
|
|
52
79
|
)
|
|
53
80
|
|
|
54
81
|
|
|
82
|
+
def _merge_small_chunks(chunks: List[Document], min_chars: int) -> List[Document]:
|
|
83
|
+
"""
|
|
84
|
+
Merge chunks that are smaller than min_chars with the next chunk.
|
|
85
|
+
|
|
86
|
+
This prevents tiny fragments (like standalone headers or short notes)
|
|
87
|
+
from becoming separate chunks.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
chunks: List of Document chunks from markdown splitter
|
|
91
|
+
min_chars: Minimum character count for a chunk
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
List of merged Document chunks
|
|
95
|
+
"""
|
|
96
|
+
if not chunks:
|
|
97
|
+
return chunks
|
|
98
|
+
|
|
99
|
+
merged = []
|
|
100
|
+
pending_content = ""
|
|
101
|
+
pending_metadata = {}
|
|
102
|
+
|
|
103
|
+
for i, chunk in enumerate(chunks):
|
|
104
|
+
content = chunk.page_content.strip()
|
|
105
|
+
|
|
106
|
+
if pending_content:
|
|
107
|
+
# Merge pending content with current chunk
|
|
108
|
+
combined_content = pending_content + "\n\n" + content
|
|
109
|
+
# Use the pending metadata (from the header) but can be extended
|
|
110
|
+
combined_metadata = {**pending_metadata}
|
|
111
|
+
# Add any new header info from current chunk
|
|
112
|
+
for key, value in chunk.metadata.items():
|
|
113
|
+
if key not in combined_metadata or not combined_metadata[key]:
|
|
114
|
+
combined_metadata[key] = value
|
|
115
|
+
|
|
116
|
+
if len(combined_content) >= min_chars:
|
|
117
|
+
# Combined is big enough, emit it
|
|
118
|
+
merged.append(Document(
|
|
119
|
+
page_content=combined_content,
|
|
120
|
+
metadata=combined_metadata
|
|
121
|
+
))
|
|
122
|
+
pending_content = ""
|
|
123
|
+
pending_metadata = {}
|
|
124
|
+
else:
|
|
125
|
+
# Still too small, keep accumulating
|
|
126
|
+
pending_content = combined_content
|
|
127
|
+
pending_metadata = combined_metadata
|
|
128
|
+
elif len(content) < min_chars:
|
|
129
|
+
# Current chunk is too small, start pending
|
|
130
|
+
pending_content = content
|
|
131
|
+
pending_metadata = dict(chunk.metadata)
|
|
132
|
+
else:
|
|
133
|
+
# Current chunk is big enough
|
|
134
|
+
merged.append(chunk)
|
|
135
|
+
|
|
136
|
+
# Don't forget any remaining pending content
|
|
137
|
+
if pending_content:
|
|
138
|
+
merged.append(Document(
|
|
139
|
+
page_content=pending_content,
|
|
140
|
+
metadata=pending_metadata
|
|
141
|
+
))
|
|
142
|
+
|
|
143
|
+
return merged
|
|
144
|
+
|
|
145
|
+
|
|
55
146
|
def markdown_by_headers_chunker(file_content_generator: Generator[Document, None, None], config: dict, *args, **kwargs) -> Generator[Document, None, None]:
|
|
56
147
|
strip_header = config.get("strip_header", False)
|
|
57
148
|
return_each_line = config.get("return_each_line", False)
|
|
@@ -6,7 +6,7 @@ from langchain_core.prompts import ChatPromptTemplate
|
|
|
6
6
|
from langchain.text_splitter import TokenTextSplitter
|
|
7
7
|
|
|
8
8
|
from typing import Optional, List
|
|
9
|
-
from
|
|
9
|
+
from pydantic import BaseModel
|
|
10
10
|
from ..utils import tiktoken_length
|
|
11
11
|
|
|
12
12
|
logger = getLogger(__name__)
|
|
@@ -0,0 +1,270 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Universal Chunker - Routes documents to appropriate chunkers based on file type.
|
|
3
|
+
|
|
4
|
+
This module provides a universal chunking interface that automatically selects
|
|
5
|
+
the appropriate chunking strategy based on the file extension:
|
|
6
|
+
|
|
7
|
+
- .md, .markdown → Markdown chunker (header-based splitting)
|
|
8
|
+
- .py, .js, .ts, .java, etc. → TreeSitter code chunker
|
|
9
|
+
- .json → JSON chunker
|
|
10
|
+
- other → Default text chunker
|
|
11
|
+
|
|
12
|
+
Usage:
|
|
13
|
+
from alita_sdk.tools.chunkers.universal_chunker import universal_chunker
|
|
14
|
+
|
|
15
|
+
# Chunk documents from a loader
|
|
16
|
+
for chunk in universal_chunker(document_generator, config):
|
|
17
|
+
print(chunk.page_content)
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import logging
|
|
21
|
+
import os
|
|
22
|
+
from typing import Generator, Dict, Any, Optional
|
|
23
|
+
from langchain_core.documents import Document
|
|
24
|
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
25
|
+
|
|
26
|
+
from .code.codeparser import parse_code_files_for_db
|
|
27
|
+
from .sematic.markdown_chunker import markdown_chunker
|
|
28
|
+
from .sematic.json_chunker import json_chunker
|
|
29
|
+
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# File extension mappings
|
|
34
|
+
MARKDOWN_EXTENSIONS = {'.md', '.markdown', '.mdown', '.mkd', '.mdx'}
|
|
35
|
+
JSON_EXTENSIONS = {'.json', '.jsonl', '.jsonc'}
|
|
36
|
+
CODE_EXTENSIONS = {
|
|
37
|
+
'.py', '.js', '.jsx', '.mjs', '.cjs', '.ts', '.tsx',
|
|
38
|
+
'.java', '.kt', '.rs', '.go', '.cpp', '.c', '.cs',
|
|
39
|
+
'.hs', '.rb', '.scala', '.lua'
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def get_file_extension(file_path: str) -> str:
|
|
44
|
+
"""Extract file extension from path."""
|
|
45
|
+
return os.path.splitext(file_path)[-1].lower()
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def get_file_type(file_path: str) -> str:
|
|
49
|
+
"""
|
|
50
|
+
Determine the file type category for chunking.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
'markdown', 'json', 'code', or 'text'
|
|
54
|
+
"""
|
|
55
|
+
ext = get_file_extension(file_path)
|
|
56
|
+
|
|
57
|
+
if ext in MARKDOWN_EXTENSIONS:
|
|
58
|
+
return 'markdown'
|
|
59
|
+
elif ext in JSON_EXTENSIONS:
|
|
60
|
+
return 'json'
|
|
61
|
+
elif ext in CODE_EXTENSIONS:
|
|
62
|
+
return 'code'
|
|
63
|
+
else:
|
|
64
|
+
return 'text'
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _default_text_chunker(
|
|
68
|
+
documents: Generator[Document, None, None],
|
|
69
|
+
config: Dict[str, Any]
|
|
70
|
+
) -> Generator[Document, None, None]:
|
|
71
|
+
"""
|
|
72
|
+
Default text chunker for unknown file types.
|
|
73
|
+
Uses recursive character splitting.
|
|
74
|
+
"""
|
|
75
|
+
chunk_size = config.get('chunk_size', 1000)
|
|
76
|
+
chunk_overlap = config.get('chunk_overlap', 100)
|
|
77
|
+
|
|
78
|
+
splitter = RecursiveCharacterTextSplitter(
|
|
79
|
+
chunk_size=chunk_size,
|
|
80
|
+
chunk_overlap=chunk_overlap,
|
|
81
|
+
length_function=len,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
for doc in documents:
|
|
85
|
+
chunks = splitter.split_documents([doc])
|
|
86
|
+
for idx, chunk in enumerate(chunks, 1):
|
|
87
|
+
chunk.metadata['chunk_id'] = idx
|
|
88
|
+
chunk.metadata['chunk_type'] = 'text'
|
|
89
|
+
chunk.metadata['method_name'] = 'text'
|
|
90
|
+
yield chunk
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _code_chunker_from_documents(
|
|
94
|
+
documents: Generator[Document, None, None],
|
|
95
|
+
config: Dict[str, Any]
|
|
96
|
+
) -> Generator[Document, None, None]:
|
|
97
|
+
"""
|
|
98
|
+
Adapter to convert Document generator to code parser format.
|
|
99
|
+
"""
|
|
100
|
+
def file_content_generator():
|
|
101
|
+
for doc in documents:
|
|
102
|
+
yield {
|
|
103
|
+
'file_name': doc.metadata.get('file_path', doc.metadata.get('filename', 'unknown')),
|
|
104
|
+
'file_content': doc.page_content,
|
|
105
|
+
'commit_hash': doc.metadata.get('commit_hash', ''),
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
# parse_code_files_for_db returns chunks with proper metadata
|
|
109
|
+
for chunk in parse_code_files_for_db(file_content_generator()):
|
|
110
|
+
# Ensure file_path is preserved
|
|
111
|
+
if 'file_path' not in chunk.metadata and 'filename' in chunk.metadata:
|
|
112
|
+
chunk.metadata['file_path'] = chunk.metadata['filename']
|
|
113
|
+
yield chunk
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def universal_chunker(
|
|
117
|
+
documents: Generator[Document, None, None],
|
|
118
|
+
config: Optional[Dict[str, Any]] = None
|
|
119
|
+
) -> Generator[Document, None, None]:
|
|
120
|
+
"""
|
|
121
|
+
Universal chunker that routes documents to appropriate chunkers based on file type.
|
|
122
|
+
|
|
123
|
+
Each document is inspected for its file extension (from metadata.file_path or
|
|
124
|
+
metadata.file_name) and routed to the appropriate chunker:
|
|
125
|
+
|
|
126
|
+
- Markdown files → markdown_chunker (header-based splitting)
|
|
127
|
+
- JSON files → json_chunker (recursive JSON splitting)
|
|
128
|
+
- Code files → code parser (TreeSitter-based parsing)
|
|
129
|
+
- Other files → default text chunker (recursive character splitting)
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
documents: Generator yielding Document objects with file content
|
|
133
|
+
config: Optional configuration dict with:
|
|
134
|
+
- markdown_config: Config for markdown chunker
|
|
135
|
+
- json_config: Config for JSON chunker
|
|
136
|
+
- code_config: Config for code chunker
|
|
137
|
+
- text_config: Config for default text chunker
|
|
138
|
+
|
|
139
|
+
Yields:
|
|
140
|
+
Document objects with chunked content and preserved metadata
|
|
141
|
+
"""
|
|
142
|
+
if config is None:
|
|
143
|
+
config = {}
|
|
144
|
+
|
|
145
|
+
# Default configs for each chunker type
|
|
146
|
+
markdown_config = config.get('markdown_config', {
|
|
147
|
+
'strip_header': False,
|
|
148
|
+
'return_each_line': False,
|
|
149
|
+
'headers_to_split_on': [
|
|
150
|
+
('#', 'Header 1'),
|
|
151
|
+
('##', 'Header 2'),
|
|
152
|
+
('###', 'Header 3'),
|
|
153
|
+
('####', 'Header 4'),
|
|
154
|
+
],
|
|
155
|
+
'max_tokens': 1024,
|
|
156
|
+
'token_overlap': 50,
|
|
157
|
+
'min_chunk_chars': 100, # Merge chunks smaller than this
|
|
158
|
+
})
|
|
159
|
+
|
|
160
|
+
json_config = config.get('json_config', {
|
|
161
|
+
'max_tokens': 512,
|
|
162
|
+
})
|
|
163
|
+
|
|
164
|
+
code_config = config.get('code_config', {})
|
|
165
|
+
|
|
166
|
+
text_config = config.get('text_config', {
|
|
167
|
+
'chunk_size': 1000,
|
|
168
|
+
'chunk_overlap': 100,
|
|
169
|
+
})
|
|
170
|
+
|
|
171
|
+
# Buffer documents by type for batch processing
|
|
172
|
+
# This is more efficient than processing one at a time
|
|
173
|
+
markdown_docs = []
|
|
174
|
+
json_docs = []
|
|
175
|
+
code_docs = []
|
|
176
|
+
text_docs = []
|
|
177
|
+
|
|
178
|
+
# Buffer size before flushing
|
|
179
|
+
BUFFER_SIZE = 10
|
|
180
|
+
|
|
181
|
+
def flush_markdown():
|
|
182
|
+
if markdown_docs:
|
|
183
|
+
def gen():
|
|
184
|
+
for d in markdown_docs:
|
|
185
|
+
yield d
|
|
186
|
+
for chunk in markdown_chunker(gen(), markdown_config):
|
|
187
|
+
yield chunk
|
|
188
|
+
markdown_docs.clear()
|
|
189
|
+
|
|
190
|
+
def flush_json():
|
|
191
|
+
if json_docs:
|
|
192
|
+
def gen():
|
|
193
|
+
for d in json_docs:
|
|
194
|
+
yield d
|
|
195
|
+
for chunk in json_chunker(gen(), json_config):
|
|
196
|
+
yield chunk
|
|
197
|
+
json_docs.clear()
|
|
198
|
+
|
|
199
|
+
def flush_code():
|
|
200
|
+
if code_docs:
|
|
201
|
+
def gen():
|
|
202
|
+
for d in code_docs:
|
|
203
|
+
yield d
|
|
204
|
+
for chunk in _code_chunker_from_documents(gen(), code_config):
|
|
205
|
+
yield chunk
|
|
206
|
+
code_docs.clear()
|
|
207
|
+
|
|
208
|
+
def flush_text():
|
|
209
|
+
if text_docs:
|
|
210
|
+
def gen():
|
|
211
|
+
for d in text_docs:
|
|
212
|
+
yield d
|
|
213
|
+
for chunk in _default_text_chunker(gen(), text_config):
|
|
214
|
+
yield chunk
|
|
215
|
+
text_docs.clear()
|
|
216
|
+
|
|
217
|
+
for doc in documents:
|
|
218
|
+
# Get file path from metadata
|
|
219
|
+
file_path = (doc.metadata.get('file_path') or
|
|
220
|
+
doc.metadata.get('file_name') or
|
|
221
|
+
doc.metadata.get('source') or
|
|
222
|
+
'unknown')
|
|
223
|
+
|
|
224
|
+
# Ensure file_path is in metadata for downstream use
|
|
225
|
+
doc.metadata['file_path'] = file_path
|
|
226
|
+
|
|
227
|
+
file_type = get_file_type(file_path)
|
|
228
|
+
|
|
229
|
+
if file_type == 'markdown':
|
|
230
|
+
markdown_docs.append(doc)
|
|
231
|
+
if len(markdown_docs) >= BUFFER_SIZE:
|
|
232
|
+
yield from flush_markdown()
|
|
233
|
+
elif file_type == 'json':
|
|
234
|
+
json_docs.append(doc)
|
|
235
|
+
if len(json_docs) >= BUFFER_SIZE:
|
|
236
|
+
yield from flush_json()
|
|
237
|
+
elif file_type == 'code':
|
|
238
|
+
code_docs.append(doc)
|
|
239
|
+
if len(code_docs) >= BUFFER_SIZE:
|
|
240
|
+
yield from flush_code()
|
|
241
|
+
else:
|
|
242
|
+
text_docs.append(doc)
|
|
243
|
+
if len(text_docs) >= BUFFER_SIZE:
|
|
244
|
+
yield from flush_text()
|
|
245
|
+
|
|
246
|
+
# Flush remaining documents
|
|
247
|
+
yield from flush_markdown()
|
|
248
|
+
yield from flush_json()
|
|
249
|
+
yield from flush_code()
|
|
250
|
+
yield from flush_text()
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def chunk_single_document(
|
|
254
|
+
doc: Document,
|
|
255
|
+
config: Optional[Dict[str, Any]] = None
|
|
256
|
+
) -> Generator[Document, None, None]:
|
|
257
|
+
"""
|
|
258
|
+
Convenience function to chunk a single document.
|
|
259
|
+
|
|
260
|
+
Args:
|
|
261
|
+
doc: Single Document to chunk
|
|
262
|
+
config: Optional chunker configuration
|
|
263
|
+
|
|
264
|
+
Yields:
|
|
265
|
+
Chunked Document objects
|
|
266
|
+
"""
|
|
267
|
+
def single_doc_gen():
|
|
268
|
+
yield doc
|
|
269
|
+
|
|
270
|
+
yield from universal_chunker(single_doc_gen(), config)
|
|
@@ -6,7 +6,8 @@ from langchain_core.tools import BaseToolkit, BaseTool
|
|
|
6
6
|
from .api_wrapper import AWSToolConfig
|
|
7
7
|
from ...base.tool import BaseAction
|
|
8
8
|
from ...elitea_base import filter_missconfigured_index_tools
|
|
9
|
-
from ...utils import clean_string,
|
|
9
|
+
from ...utils import clean_string, get_max_toolkit_length
|
|
10
|
+
from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
|
|
10
11
|
|
|
11
12
|
name = "aws"
|
|
12
13
|
|
|
@@ -22,12 +23,10 @@ def get_tools(tool):
|
|
|
22
23
|
|
|
23
24
|
class AWSToolkit(BaseToolkit):
|
|
24
25
|
tools: list[BaseTool] = []
|
|
25
|
-
toolkit_max_length: int = 0
|
|
26
26
|
|
|
27
27
|
@staticmethod
|
|
28
28
|
def toolkit_config_schema() -> BaseModel:
|
|
29
29
|
selected_tools = {x['name']: x['args_schema'].schema() for x in AWSToolConfig.model_construct().get_available_tools()}
|
|
30
|
-
AWSToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
31
30
|
return create_model(
|
|
32
31
|
name,
|
|
33
32
|
region=(str, Field(default="", title="Region", description="AWS region")),
|
|
@@ -54,15 +53,19 @@ class AWSToolkit(BaseToolkit):
|
|
|
54
53
|
aws_tool_config = AWSToolConfig(**kwargs)
|
|
55
54
|
available_tools = aws_tool_config.get_available_tools()
|
|
56
55
|
tools = []
|
|
57
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
58
56
|
for tool in available_tools:
|
|
59
57
|
if selected_tools and tool["name"] not in selected_tools:
|
|
60
58
|
continue
|
|
59
|
+
description = tool["description"]
|
|
60
|
+
if toolkit_name:
|
|
61
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
62
|
+
description = description[:1000]
|
|
61
63
|
tools.append(BaseAction(
|
|
62
64
|
api_wrapper=aws_tool_config,
|
|
63
|
-
name=
|
|
64
|
-
description=
|
|
65
|
-
args_schema=tool["args_schema"]
|
|
65
|
+
name=tool["name"],
|
|
66
|
+
description=description,
|
|
67
|
+
args_schema=tool["args_schema"],
|
|
68
|
+
metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
|
|
66
69
|
))
|
|
67
70
|
return cls(tools=tools)
|
|
68
71
|
|
|
@@ -6,7 +6,8 @@ from pydantic import create_model, BaseModel, ConfigDict, Field, SecretStr
|
|
|
6
6
|
from .api_wrapper import AzureApiWrapper
|
|
7
7
|
from ...base.tool import BaseAction
|
|
8
8
|
from ...elitea_base import filter_missconfigured_index_tools
|
|
9
|
-
from ...utils import clean_string,
|
|
9
|
+
from ...utils import clean_string, get_max_toolkit_length
|
|
10
|
+
from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
|
|
10
11
|
|
|
11
12
|
name = "azure"
|
|
12
13
|
|
|
@@ -23,12 +24,10 @@ def get_tools(tool):
|
|
|
23
24
|
|
|
24
25
|
class AzureToolkit(BaseToolkit):
|
|
25
26
|
tools: list[BaseTool] = []
|
|
26
|
-
toolkit_max_length: int = 0
|
|
27
27
|
|
|
28
28
|
@staticmethod
|
|
29
29
|
def toolkit_config_schema() -> BaseModel:
|
|
30
30
|
selected_tools = {x['name']: x['args_schema'].schema() for x in AzureApiWrapper.model_construct().get_available_tools()}
|
|
31
|
-
AzureToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
32
31
|
return create_model(
|
|
33
32
|
name,
|
|
34
33
|
subscription_id=(str, Field(default="", title="Subscription ID", description="Azure subscription ID")),
|
|
@@ -47,15 +46,19 @@ class AzureToolkit(BaseToolkit):
|
|
|
47
46
|
azure_api_wrapper = AzureApiWrapper(**kwargs)
|
|
48
47
|
available_tools = azure_api_wrapper.get_available_tools()
|
|
49
48
|
tools = []
|
|
50
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
51
49
|
for tool in available_tools:
|
|
52
50
|
if selected_tools and tool["name"] not in selected_tools:
|
|
53
51
|
continue
|
|
52
|
+
description = tool["description"]
|
|
53
|
+
if toolkit_name:
|
|
54
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
55
|
+
description = description[:1000]
|
|
54
56
|
tools.append(BaseAction(
|
|
55
57
|
api_wrapper=azure_api_wrapper,
|
|
56
|
-
name=
|
|
57
|
-
description=
|
|
58
|
-
args_schema=tool["args_schema"]
|
|
58
|
+
name=tool["name"],
|
|
59
|
+
description=description,
|
|
60
|
+
args_schema=tool["args_schema"],
|
|
61
|
+
metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
|
|
59
62
|
))
|
|
60
63
|
return cls(tools=tools)
|
|
61
64
|
|
|
@@ -6,7 +6,8 @@ from pydantic import create_model, BaseModel, ConfigDict, Field, SecretStr
|
|
|
6
6
|
from .api_wrapper import GCPApiWrapper
|
|
7
7
|
from ...base.tool import BaseAction
|
|
8
8
|
from ...elitea_base import filter_missconfigured_index_tools
|
|
9
|
-
from ...utils import clean_string,
|
|
9
|
+
from ...utils import clean_string, get_max_toolkit_length
|
|
10
|
+
from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
|
|
10
11
|
|
|
11
12
|
name = "gcp"
|
|
12
13
|
|
|
@@ -20,12 +21,10 @@ def get_tools(tool):
|
|
|
20
21
|
|
|
21
22
|
class GCPToolkit(BaseToolkit):
|
|
22
23
|
tools: list[BaseTool] = []
|
|
23
|
-
toolkit_max_length: int = 0
|
|
24
24
|
|
|
25
25
|
@staticmethod
|
|
26
26
|
def toolkit_config_schema() -> BaseModel:
|
|
27
27
|
selected_tools = {x['name']: x['args_schema'].schema() for x in GCPApiWrapper.model_construct().get_available_tools()}
|
|
28
|
-
GCPToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
29
28
|
return create_model(
|
|
30
29
|
name,
|
|
31
30
|
api_key=(SecretStr, Field(default="", title="API key", description="GCP API key", json_schema_extra={'secret': True})),
|
|
@@ -41,15 +40,19 @@ class GCPToolkit(BaseToolkit):
|
|
|
41
40
|
gcp_api_wrapper = GCPApiWrapper(**kwargs)
|
|
42
41
|
available_tools = gcp_api_wrapper.get_available_tools()
|
|
43
42
|
tools = []
|
|
44
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
45
43
|
for tool in available_tools:
|
|
46
44
|
if selected_tools and tool["name"] not in selected_tools:
|
|
47
45
|
continue
|
|
46
|
+
description = tool["description"]
|
|
47
|
+
if toolkit_name:
|
|
48
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
49
|
+
description = description[:1000]
|
|
48
50
|
tools.append(BaseAction(
|
|
49
51
|
api_wrapper=gcp_api_wrapper,
|
|
50
|
-
name=
|
|
51
|
-
description=
|
|
52
|
-
args_schema=tool["args_schema"]
|
|
52
|
+
name=tool["name"],
|
|
53
|
+
description=description,
|
|
54
|
+
args_schema=tool["args_schema"],
|
|
55
|
+
metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
|
|
53
56
|
))
|
|
54
57
|
return cls(tools=tools)
|
|
55
58
|
|
|
@@ -6,7 +6,8 @@ from pydantic import create_model, BaseModel, ConfigDict, Field, SecretStr
|
|
|
6
6
|
from .api_wrapper import KubernetesApiWrapper
|
|
7
7
|
from ...base.tool import BaseAction
|
|
8
8
|
from ...elitea_base import filter_missconfigured_index_tools
|
|
9
|
-
from ...utils import clean_string,
|
|
9
|
+
from ...utils import clean_string, get_max_toolkit_length
|
|
10
|
+
from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
|
|
10
11
|
|
|
11
12
|
name = "kubernetes"
|
|
12
13
|
|
|
@@ -21,12 +22,10 @@ def get_tools(tool):
|
|
|
21
22
|
|
|
22
23
|
class KubernetesToolkit(BaseToolkit):
|
|
23
24
|
tools: list[BaseTool] = []
|
|
24
|
-
toolkit_max_length: int = 0
|
|
25
25
|
|
|
26
26
|
@staticmethod
|
|
27
27
|
def toolkit_config_schema() -> BaseModel:
|
|
28
28
|
selected_tools = {x['name']: x['args_schema'].schema() for x in KubernetesApiWrapper.model_construct().get_available_tools()}
|
|
29
|
-
KubernetesToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
30
29
|
return create_model(
|
|
31
30
|
name,
|
|
32
31
|
url=(str, Field(default="", title="Cluster URL", description="The URL of the Kubernetes cluster")),
|
|
@@ -51,15 +50,19 @@ class KubernetesToolkit(BaseToolkit):
|
|
|
51
50
|
kubernetes_api_wrapper = KubernetesApiWrapper(**kwargs)
|
|
52
51
|
available_tools = kubernetes_api_wrapper.get_available_tools()
|
|
53
52
|
tools = []
|
|
54
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
55
53
|
for tool in available_tools:
|
|
56
54
|
if selected_tools and tool["name"] not in selected_tools:
|
|
57
55
|
continue
|
|
56
|
+
description = tool["description"]
|
|
57
|
+
if toolkit_name:
|
|
58
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
59
|
+
description = description[:1000]
|
|
58
60
|
tools.append(BaseAction(
|
|
59
61
|
api_wrapper=kubernetes_api_wrapper,
|
|
60
|
-
name=
|
|
61
|
-
description=
|
|
62
|
-
args_schema=tool["args_schema"]
|
|
62
|
+
name=tool["name"],
|
|
63
|
+
description=description,
|
|
64
|
+
args_schema=tool["args_schema"],
|
|
65
|
+
metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
|
|
63
66
|
))
|
|
64
67
|
return cls(tools=tools)
|
|
65
68
|
|
|
@@ -5,7 +5,8 @@ from pydantic import BaseModel, create_model, Field
|
|
|
5
5
|
|
|
6
6
|
from .api_wrapper import PythonLinter
|
|
7
7
|
from ...base.tool import BaseAction
|
|
8
|
-
from ...utils import clean_string,
|
|
8
|
+
from ...utils import clean_string, get_max_toolkit_length
|
|
9
|
+
from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
|
|
9
10
|
|
|
10
11
|
name = "python_linter"
|
|
11
12
|
|
|
@@ -19,11 +20,9 @@ def get_tools(tool):
|
|
|
19
20
|
|
|
20
21
|
class PythonLinterToolkit(BaseToolkit):
|
|
21
22
|
tools: list[BaseTool] = []
|
|
22
|
-
toolkit_max_length: int = 0
|
|
23
23
|
|
|
24
24
|
@staticmethod
|
|
25
25
|
def toolkit_config_schema() -> BaseModel:
|
|
26
|
-
PythonLinterToolkit.toolkit_max_length = get_max_toolkit_length([])
|
|
27
26
|
return create_model(
|
|
28
27
|
name,
|
|
29
28
|
error_codes=(str, Field(description="Error codes to be used by the linter")),
|
|
@@ -39,16 +38,19 @@ class PythonLinterToolkit(BaseToolkit):
|
|
|
39
38
|
python_linter = PythonLinter(**kwargs)
|
|
40
39
|
available_tools = python_linter.get_available_tools()
|
|
41
40
|
tools = []
|
|
42
|
-
toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
43
|
-
prefix = clean_string(toolkit_name, PythonLinterToolkit.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
44
41
|
for tool in available_tools:
|
|
45
42
|
if selected_tools and tool["name"] not in selected_tools:
|
|
46
43
|
continue
|
|
44
|
+
description = tool["description"]
|
|
45
|
+
if toolkit_name:
|
|
46
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
47
|
+
description = description[:1000]
|
|
47
48
|
tools.append(BaseAction(
|
|
48
49
|
api_wrapper=python_linter,
|
|
49
|
-
name=
|
|
50
|
-
description=
|
|
51
|
-
args_schema=tool["args_schema"]
|
|
50
|
+
name=tool["name"],
|
|
51
|
+
description=description,
|
|
52
|
+
args_schema=tool["args_schema"],
|
|
53
|
+
metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
|
|
52
54
|
))
|
|
53
55
|
return cls(tools=tools)
|
|
54
56
|
|
|
@@ -4,8 +4,9 @@ def search_format(items):
|
|
|
4
4
|
results = []
|
|
5
5
|
for (doc, score) in items:
|
|
6
6
|
res_chunk = ''
|
|
7
|
-
language = get_programming_language(get_file_extension(doc.metadata
|
|
8
|
-
|
|
7
|
+
language = get_programming_language(get_file_extension(doc.metadata.get("filename", "unknown")))
|
|
8
|
+
method_name = doc.metadata.get("method_name", "text")
|
|
9
|
+
res_chunk += doc.metadata.get("filename", "unknown") + " -> " + method_name + " (score: " + str(score) + ")"
|
|
9
10
|
res_chunk += "\n\n```" + language.value + "\n"+ doc.page_content + "\n```\n\n"
|
|
10
11
|
results.append(res_chunk)
|
|
11
12
|
return results
|