alita-sdk 0.3.256__py3-none-any.whl → 0.3.258__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  from typing import Optional
2
2
 
3
- from pydantic import BaseModel, ConfigDict, Field, SecretStr
3
+ from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
4
4
 
5
5
 
6
6
  class GithubConfiguration(BaseModel):
@@ -35,6 +35,7 @@ class GithubConfiguration(BaseModel):
35
35
  }
36
36
  }
37
37
  )
38
+
38
39
  base_url: Optional[str] = Field(description="Base API URL", default="https://api.github.com")
39
40
  app_id: Optional[str] = Field(description="Github APP ID", default=None)
40
41
  app_private_key: Optional[SecretStr] = Field(description="Github APP private key", default=None)
@@ -43,3 +44,44 @@ class GithubConfiguration(BaseModel):
43
44
 
44
45
  username: Optional[str] = Field(description="Github Username", default=None)
45
46
  password: Optional[SecretStr] = Field(description="Github Password", default=None)
47
+
48
+ @model_validator(mode='before')
49
+ @classmethod
50
+ def validate_auth_sections(cls, data):
51
+ if not isinstance(data, dict):
52
+ return data
53
+
54
+ has_token = bool(data.get('access_token') and str(data.get('access_token')).strip())
55
+ has_password = bool(
56
+ data.get('username') and str(data.get('username')).strip() and
57
+ data.get('password') and str(data.get('password')).strip()
58
+ )
59
+ has_app_key = bool(
60
+ data.get('app_id') and str(data.get('app_id')).strip() and
61
+ data.get('app_private_key') and str(data.get('app_private_key')).strip()
62
+ )
63
+
64
+ # If any method is partially configured, raise exception
65
+ if (
66
+ (data.get('username') and not data.get('password')) or
67
+ (data.get('password') and not data.get('username')) or
68
+ (data.get('app_id') and not data.get('app_private_key')) or
69
+ (data.get('app_private_key') and not data.get('app_id'))
70
+ ):
71
+ raise ValueError(
72
+ "Authentication is misconfigured: both username and password, or both app_id and app_private_key, must be provided together."
73
+ )
74
+
75
+ # If all are missing, allow anonymous
76
+ if not (has_token or has_password or has_app_key):
77
+ return data
78
+
79
+ # If any method is fully configured
80
+ if has_token or has_password or has_app_key:
81
+ return data
82
+
83
+ raise ValueError(
84
+ "Authentication is misconfigured: provide either Token (access_token), "
85
+ "Password (username + password), App private key (app_id + app_private_key), "
86
+ "or leave all blank for anonymous access."
87
+ )
@@ -364,8 +364,8 @@ class AlitaClient:
364
364
  return data.content
365
365
 
366
366
  def delete_artifact(self, bucket_name, artifact_name):
367
- url = f'{self.artifact_url}/{bucket_name}/{quote(artifact_name)}'
368
- data = requests.delete(url, headers=self.headers, verify=False)
367
+ url = f'{self.artifact_url}/{bucket_name}'
368
+ data = requests.delete(url, headers=self.headers, verify=False, params={'filename': quote(artifact_name)})
369
369
  return self._process_requst(data)
370
370
 
371
371
  def _prepare_messages(self, messages: list[BaseMessage]):
@@ -14,11 +14,12 @@ def get_tools(tool_type, tool):
14
14
  "limit": tool['settings'].get('limit', 5),
15
15
  "toolkit_name": tool.get('toolkit_name', ''),
16
16
  # indexer settings
17
+ "alita": tool['settings'].get('alita', None),
17
18
  "llm": tool['settings'].get('llm', None),
18
19
  "pgvector_configuration": tool['settings'].get('pgvector_configuration', {}),
19
20
  "collection_name": tool['toolkit_name'],
20
21
  "doctype": 'doc',
21
- "embedding_model": tool['settings'].get('embedding_configuration', {}).get('name', None),
22
+ "embedding_model": tool['settings'].get('embedding_model'),
22
23
  "vectorstore_type": "PGVector"
23
24
  }
24
25
  if tool_type == 'ado_plans':
@@ -23,10 +23,9 @@ def _get_toolkit(tool) -> BaseToolkit:
23
23
  active_branch=tool['settings'].get('active_branch', ""),
24
24
  toolkit_name=tool['settings'].get('toolkit_name', ""),
25
25
  pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
26
- collection_name=tool['toolkit_name'],
27
- doctype='code',
28
26
  embedding_model=tool['settings'].get('embedding_model'),
29
- vectorstore_type="PGVector",
27
+ collection_name=tool['toolkit_name'],
28
+ alita=tool['settings'].get('alita', None),
30
29
  )
31
30
 
32
31
  def get_toolkit():
@@ -62,11 +61,6 @@ class AzureDevOpsReposToolkit(BaseToolkit):
62
61
  "icon_url": "ado-repos-icon.svg",
63
62
  "categories": ["code repositories"],
64
63
  "extra_categories": ["code", "repository", "version control"],
65
- # "configuration_group": {
66
- # "name": "ado_repos",
67
- # "label": "Azure DevOps Repositories",
68
- # "icon_url": "ado-repos-icon.svg",
69
- # }
70
64
  }}}
71
65
  )
72
66
 
@@ -95,6 +89,7 @@ class AzureDevOpsReposToolkit(BaseToolkit):
95
89
  **kwargs,
96
90
  # TODO use ado_repos_configuration fields
97
91
  **kwargs['ado_repos_configuration'],
92
+ **kwargs['ado_repos_configuration']['ado_configuration'],
98
93
  **(kwargs.get('pgvector_configuration') or {}),
99
94
  }
100
95
  azure_devops_repos_wrapper = ReposApiWrapper(**wrapper_payload)
@@ -251,15 +251,6 @@ class ReposApiWrapper(BaseCodeToolApiWrapper):
251
251
  token: Optional[SecretStr]
252
252
  _client: Optional[GitClient] = PrivateAttr()
253
253
 
254
- llm: Optional[Any] = None
255
- # Vector store configuration
256
- connection_string: Optional[SecretStr] = None
257
- collection_name: Optional[str] = None
258
- doctype: Optional[str] = 'code'
259
- embedding_model: Optional[str] = "HuggingFaceEmbeddings"
260
- embedding_model_params: Optional[dict] = {"model_name": "sentence-transformers/all-MiniLM-L6-v2"}
261
- vectorstore_type: Optional[str] = "PGVector"
262
-
263
254
  class Config:
264
255
  arbitrary_types_allowed = True
265
256
 
@@ -16,11 +16,6 @@ from pydantic.fields import FieldInfo as Field
16
16
  from ..work_item import AzureDevOpsApiWrapper
17
17
  from ...elitea_base import BaseVectorStoreToolApiWrapper, extend_with_vector_tools
18
18
 
19
- try:
20
- from alita_sdk.runtime.langchain.interfaces.llm_processor import get_embeddings
21
- except ImportError:
22
- from alita_sdk.langchain.interfaces.llm_processor import get_embeddings
23
-
24
19
  logger = logging.getLogger(__name__)
25
20
 
26
21
  # Input models for Test Plan operations
@@ -17,11 +17,6 @@ from pydantic.fields import Field
17
17
 
18
18
  from ...elitea_base import BaseVectorStoreToolApiWrapper, extend_with_vector_tools
19
19
 
20
- try:
21
- from alita_sdk.runtime.langchain.interfaces.llm_processor import get_embeddings
22
- except ImportError:
23
- from alita_sdk.langchain.interfaces.llm_processor import get_embeddings
24
-
25
20
  logger = logging.getLogger(__name__)
26
21
 
27
22
  GetWikiInput = create_model(
@@ -68,13 +63,6 @@ class AzureDevOpsApiWrapper(BaseVectorStoreToolApiWrapper):
68
63
  _client: Optional[WikiClient] = PrivateAttr() # Private attribute for the wiki client
69
64
  _core_client: Optional[CoreClient] = PrivateAttr() # Private attribute for the CoreClient client
70
65
 
71
- llm: Any = None
72
- connection_string: Optional[SecretStr] = None
73
- collection_name: Optional[str] = None
74
- embedding_model: Optional[str] = "HuggingFaceEmbeddings"
75
- embedding_model_params: Optional[Dict[str, Any]] = {"model_name": "sentence-transformers/all-MiniLM-L6-v2"}
76
- vectorstore_type: Optional[str] = "PGVector"
77
-
78
66
  class Config:
79
67
  arbitrary_types_allowed = True # Allow arbitrary types (e.g., WorkItemTrackingClient)
80
68
 
@@ -16,11 +16,6 @@ from pydantic.fields import Field
16
16
 
17
17
  from alita_sdk.tools.non_code_indexer_toolkit import NonCodeIndexerToolkit
18
18
 
19
- try:
20
- from alita_sdk.runtime.langchain.interfaces.llm_processor import get_embeddings
21
- except ImportError:
22
- from alita_sdk.langchain.interfaces.llm_processor import get_embeddings
23
-
24
19
  logger = logging.getLogger(__name__)
25
20
 
26
21
  create_wi_field = """JSON of the work item fields to create in Azure DevOps, i.e.
@@ -5,7 +5,6 @@ from typing import Any, Optional, List, Literal, Dict, Generator
5
5
  from langchain_core.documents import Document
6
6
  from pydantic import create_model, Field, SecretStr
7
7
 
8
- # from alita_sdk.runtime.langchain.interfaces.llm_processor import get_embeddings
9
8
  from .utils.content_parser import process_content_by_type
10
9
  from .vector_adapters.VectorStoreAdapter import VectorStoreAdapterFactory
11
10
  from ..runtime.tools.vectorstore_base import VectorStoreWrapperBase
@@ -95,7 +94,6 @@ BaseIndexDataParams = create_model(
95
94
  description="Optional step size for progress reporting during indexing")),
96
95
  clean_index=(Optional[bool], Field(default=False,
97
96
  description="Optional flag to enforce clean existing index before indexing new data")),
98
- chunking_tool=(Literal[None,'markdown', 'statistical', 'proposal'], Field(description="Name of chunking tool", default=None)),
99
97
  chunking_config=(Optional[dict], Field(description="Chunking tool configuration", default_factory=dict)),
100
98
  )
101
99
 
@@ -162,7 +160,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
162
160
  chunking_config = kwargs.get("chunking_config")
163
161
  #
164
162
  if clean_index:
165
- self._clean_index()
163
+ self._clean_index(collection_suffix)
166
164
  #
167
165
  documents = self._base_loader(**kwargs)
168
166
  documents = self._reduce_duplicates(documents, collection_suffix)
@@ -173,7 +171,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
173
171
  return self._save_index(list(documents), collection_suffix=collection_suffix, progress_step=progress_step)
174
172
 
175
173
  def _apply_loaders_chunkers(self, documents: Generator[Document, None, None], chunking_tool: str=None, chunking_config=None) -> Generator[Document, None, None]:
176
- from alita_sdk.tools.chunkers import __confluence_chunkers__ as chunkers
174
+ from alita_sdk.tools.chunkers import __all__ as chunkers
177
175
 
178
176
  if chunking_config is None:
179
177
  chunking_config = {}
@@ -134,7 +134,7 @@ class ListBranchesTool(BaseTool):
134
134
  name: str = "list_branches_in_repo"
135
135
  description: str = """This tool is a wrapper for the Bitbucket API to fetch a list of all branches in the repository.
136
136
  It will return the name of each branch. No input parameters are required."""
137
- args_schema: Type[BaseModel] = None
137
+ args_schema: Type[BaseModel] = create_model("NoInput")
138
138
 
139
139
  def _run(self):
140
140
  try:
@@ -2,13 +2,15 @@ from .code.codeparser import parse_code_files_for_db
2
2
  from .sematic.statistical_chunker import statistical_chunker
3
3
  from .sematic.markdown_chunker import markdown_chunker
4
4
  from .sematic.proposal_chunker import proposal_chunker
5
+ from .sematic.json_chunker import json_chunker
5
6
  from .models import StatisticalChunkerConfig, MarkdownChunkerConfig, ProposalChunkerConfig
6
7
 
7
8
  __all__ = {
8
9
  'code_parser': parse_code_files_for_db,
9
10
  'statistical': statistical_chunker,
10
11
  'markdown': markdown_chunker,
11
- 'proposal': proposal_chunker
12
+ 'proposal': proposal_chunker,
13
+ 'json': json_chunker
12
14
  }
13
15
 
14
16
  __confluence_chunkers__ = {
@@ -0,0 +1,24 @@
1
+ import json
2
+ import logging
3
+ from typing import Generator
4
+ from langchain_text_splitters import RecursiveJsonSplitter
5
+ from langchain_core.documents import Document
6
+
7
+ def json_chunker(file_content_generator: Generator[Document, None, None], config: dict, *args, **kwargs) -> Generator[Document, None, None]:
8
+ max_tokens = config.get("max_tokens", 512)
9
+ for doc in file_content_generator:
10
+ try:
11
+ data_dict = json.loads(doc.page_content)
12
+ chunks = RecursiveJsonSplitter(max_chunk_size=max_tokens).split_json(json_data=data_dict, convert_lists=True)
13
+ if len(chunks) == 1:
14
+ yield doc
15
+ continue
16
+ chunk_id = 1
17
+ for chunk in chunks:
18
+ metadata = doc.metadata.copy()
19
+ metadata['chunk_id'] = chunk_id
20
+ chunk_id += 1
21
+ yield Document(page_content=json.dumps(chunk), metadata=metadata)
22
+ except Exception as e:
23
+ logging.error(f"Failed to chunk document: {e}")
24
+ yield doc
@@ -1237,11 +1237,11 @@ class JiraApiWrapper(BaseVectorStoreToolApiWrapper):
1237
1237
  jql = kwargs.get('jql')
1238
1238
  fields_to_extract = kwargs.get('fields_to_extract')
1239
1239
  fields_to_index = kwargs.get('fields_to_index')
1240
- include_attachments = kwargs.get('include_attachments', False)
1241
1240
  max_total_issues = kwargs.get('max_total_issues', 1000)
1242
1241
 
1243
- # set values for skipped attachment extensions
1242
+ # set values for skipped attachment extension
1244
1243
  self._skipped_attachment_extensions = kwargs.get('skip_attachment_extensions', [])
1244
+ self._include_attachments = kwargs.get('include_attachments', False)
1245
1245
  self._included_fields = fields_to_extract.copy() if fields_to_extract else []
1246
1246
 
1247
1247
  try:
@@ -1252,7 +1252,7 @@ class JiraApiWrapper(BaseVectorStoreToolApiWrapper):
1252
1252
  if fields_to_extract:
1253
1253
  fields.extend(fields_to_extract)
1254
1254
 
1255
- if include_attachments:
1255
+ if self._include_attachments:
1256
1256
  fields.append('attachment')
1257
1257
 
1258
1258
  # Use provided JQL query or default to all issues
@@ -1292,36 +1292,36 @@ class JiraApiWrapper(BaseVectorStoreToolApiWrapper):
1292
1292
 
1293
1293
  issue_key = base_document.metadata.get('issue_key')
1294
1294
  # get attachments content
1295
-
1296
- issue = self._client.issue(issue_key, fields="attachment")
1297
- attachments = issue.get('fields', {}).get('attachment', [])
1298
- for attachment in attachments:
1299
- # get extension
1300
- ext = f".{attachment['filename'].split('.')[-1].lower()}"
1301
- if ext not in self._skipped_attachment_extensions:
1302
- attachment_id = f"attach_{attachment['id']}"
1303
- base_document.metadata.setdefault(IndexerKeywords.DEPENDENT_DOCS.value, []).append(attachment_id)
1304
- try:
1305
- attachment_content = self._client.get_attachment_content(attachment['id'])
1306
- except Exception as e:
1307
- logger.error(f"Failed to download attachment {attachment['filename']} for issue {issue_key}: {str(e)}")
1308
- attachment_content = self._client.get(path=f"secure/attachment/{attachment['id']}/{attachment['filename']}", not_json_response=True)
1309
- content = load_content_from_bytes(attachment_content, ext, llm=self.llm) if ext not in '.pdf' \
1310
- else parse_file_content(file_content=attachment_content, file_name=attachment['filename'], llm=self.llm, is_capture_image=True)
1311
- if not content:
1312
- continue
1313
- yield Document(page_content=content,
1314
- metadata={
1315
- 'id': attachment_id,
1316
- 'issue_key': issue_key,
1317
- 'source': f"{self.base_url}/browse/{issue_key}",
1318
- 'filename': attachment['filename'],
1319
- 'created': attachment['created'],
1320
- 'mimeType': attachment['mimeType'],
1321
- 'author': attachment.get('author', {}).get('name'),
1322
- IndexerKeywords.PARENT.value: base_document.metadata.get('id', None),
1323
- 'type': 'attachment',
1324
- })
1295
+ if self._include_attachments:
1296
+ issue = self._client.issue(issue_key, fields="attachment")
1297
+ attachments = issue.get('fields', {}).get('attachment', [])
1298
+ for attachment in attachments:
1299
+ # get extension
1300
+ ext = f".{attachment['filename'].split('.')[-1].lower()}"
1301
+ if ext not in self._skipped_attachment_extensions:
1302
+ attachment_id = f"attach_{attachment['id']}"
1303
+ base_document.metadata.setdefault(IndexerKeywords.DEPENDENT_DOCS.value, []).append(attachment_id)
1304
+ try:
1305
+ attachment_content = self._client.get_attachment_content(attachment['id'])
1306
+ except Exception as e:
1307
+ logger.error(f"Failed to download attachment {attachment['filename']} for issue {issue_key}: {str(e)}")
1308
+ attachment_content = self._client.get(path=f"secure/attachment/{attachment['id']}/{attachment['filename']}", not_json_response=True)
1309
+ content = load_content_from_bytes(attachment_content, ext, llm=self.llm) if ext not in '.pdf' \
1310
+ else parse_file_content(file_content=attachment_content, file_name=attachment['filename'], llm=self.llm, is_capture_image=True)
1311
+ if not content:
1312
+ continue
1313
+ yield Document(page_content=content,
1314
+ metadata={
1315
+ 'id': attachment_id,
1316
+ 'issue_key': issue_key,
1317
+ 'source': f"{self.base_url}/browse/{issue_key}",
1318
+ 'filename': attachment['filename'],
1319
+ 'created': attachment['created'],
1320
+ 'mimeType': attachment['mimeType'],
1321
+ 'author': attachment.get('author', {}).get('name'),
1322
+ IndexerKeywords.PARENT.value: base_document.metadata.get('id', None),
1323
+ 'type': 'attachment',
1324
+ })
1325
1325
 
1326
1326
  def _jql_get_tickets(self, jql, fields="*all", start=0, limit=None, expand=None, validate_query=None):
1327
1327
  """
@@ -1430,9 +1430,9 @@ class JiraApiWrapper(BaseVectorStoreToolApiWrapper):
1430
1430
  Field(description="Whether to include attachment content in indexing",
1431
1431
  default=False)),
1432
1432
  'max_total_issues': (Optional[int], Field(description="Maximum number of issues to index", default=1000)),
1433
- 'skip_attachment_extensions': (Optional[str], Field(
1434
- description="Comma-separated list of file extensions to skip when processing attachments",
1435
- default=None)),
1433
+ 'skip_attachment_extensions': (Optional[List[str]], Field(
1434
+ description="List of file extensions to skip when processing attachments: i.e. ['.png', '.jpg']",
1435
+ default=[])),
1436
1436
  }
1437
1437
 
1438
1438
  # def index_data(self,
@@ -35,7 +35,7 @@ class QtestToolkit(BaseToolkit):
35
35
  name,
36
36
  qtest_configuration=(Optional[QtestConfiguration], Field(description="QTest API token", json_schema_extra={
37
37
  'configuration_types': ['qtest']})),
38
- qtest_project_id=(int, Field(description="QTest project id", json_schema_extra={'toolkit_name': True,
38
+ qtest_project_id=(int, Field(default=None, description="QTest project id", json_schema_extra={'toolkit_name': True,
39
39
  'max_toolkit_length': QtestToolkit.toolkit_max_length})),
40
40
  selected_tools=(List[Literal[tuple(selected_tools)]],
41
41
  Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
@@ -0,0 +1,6 @@
1
+ def extend_with_parent_available_tools(method):
2
+ def wrapper(self, *args, **kwargs):
3
+ child_tools = method(self, *args, **kwargs)
4
+ parent_tools = super(self.__class__, self).get_available_tools()
5
+ return parent_tools + child_tools
6
+ return wrapper
@@ -17,9 +17,7 @@ name = "xray_cloud"
17
17
  def get_tools(tool):
18
18
  return XrayToolkit().get_toolkit(
19
19
  selected_tools=tool['settings'].get('selected_tools', []),
20
- base_url=tool['settings'].get('base_url', None),
21
- client_id=tool['settings'].get('client_id', None),
22
- client_secret=tool['settings'].get('client_secret', None),
20
+ xray_configuration=tool['settings'].get('xray_configuration', {}),
23
21
  limit=tool['settings'].get('limit', 20),
24
22
  verify_ssl=tool['settings'].get('verify_ssl', True),
25
23
  toolkit_name=tool.get('toolkit_name'),
@@ -29,7 +27,6 @@ def get_tools(tool):
29
27
  pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
30
28
  embedding_model=tool['settings'].get('embedding_model'),
31
29
  collection_name=str(tool['toolkit_name']),
32
- vectorstore_type="PGVector"
33
30
  ).get_tools()
34
31
 
35
32
 
@@ -44,7 +41,7 @@ class XrayToolkit(BaseToolkit):
44
41
  return create_model(
45
42
  name,
46
43
  limit=(Optional[int], Field(description="Limit", default=100)),
47
- xray_configuration=(Optional[XrayConfiguration], Field(description="Xray Configuration", json_schema_extra={'configuration_types': ['xray']})),
44
+ xray_configuration=(XrayConfiguration, Field(description="Xray Configuration", json_schema_extra={'configuration_types': ['xray']})),
48
45
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(default=None,
49
46
  description="PgVector Configuration",
50
47
  json_schema_extra={
@@ -71,7 +68,7 @@ class XrayToolkit(BaseToolkit):
71
68
  wrapper_payload = {
72
69
  **kwargs,
73
70
  # Use xray_configuration fields
74
- **kwargs.get('xray_configuration', {}),
71
+ **(kwargs.get('xray_configuration') or {}),
75
72
  **(kwargs.get('pgvector_configuration') or {}),
76
73
  }
77
74
  xray_api_wrapper = XrayApiWrapper(**wrapper_payload)
@@ -1,14 +1,13 @@
1
1
  import logging
2
- from importlib.metadata import metadata
3
- from operator import ifloordiv
4
- from typing import Optional, List, Generator
2
+ from typing import Optional, List, Generator, Literal
5
3
 
6
4
  from langchain_core.tools import ToolException
7
5
  from pydantic import create_model, model_validator, PrivateAttr, Field, SecretStr
8
6
 
9
7
  from langchain_core.documents import Document
10
8
  from .zephyr_enterprise import ZephyrClient
11
- from ..elitea_base import BaseToolApiWrapper, BaseVectorStoreToolApiWrapper, extend_with_vector_tools
9
+ from ..non_code_indexer_toolkit import NonCodeIndexerToolkit
10
+ from ..utils.available_tools_decorator import extend_with_parent_available_tools
12
11
 
13
12
  logger = logging.getLogger(__name__)
14
13
 
@@ -23,7 +22,7 @@ zql_description = """
23
22
  "folder=\"TestToolkit\"", "name~\"TestToolkit5\"
24
23
  """
25
24
 
26
- class ZephyrApiWrapper(BaseVectorStoreToolApiWrapper):
25
+ class ZephyrApiWrapper(NonCodeIndexerToolkit):
27
26
  base_url: str
28
27
  token: SecretStr
29
28
  _client: Optional[ZephyrClient] = PrivateAttr()
@@ -34,7 +33,7 @@ class ZephyrApiWrapper(BaseVectorStoreToolApiWrapper):
34
33
  base_url = values.get('base_url')
35
34
  token = values.get('token')
36
35
  cls._client = ZephyrClient(base_url=base_url, token=token)
37
- return values
36
+ return super().validate_toolkit(values)
38
37
 
39
38
  def get_test_case(self, testcase_id: str):
40
39
 
@@ -153,7 +152,8 @@ class ZephyrApiWrapper(BaseVectorStoreToolApiWrapper):
153
152
  Returns a list of fields for index_data args schema.
154
153
  """
155
154
  return {
156
- "zql": (str, Field(description=zql_description, examples=["folder=\"TestToolkit\"", "name~\"TestToolkit5\""]))
155
+ "zql": (str, Field(description=zql_description, examples=["folder=\"TestToolkit\"", "name~\"TestToolkit5\""])),
156
+ 'chunking_tool': (Literal['json'], Field(description="Name of chunking tool", default='json'))
157
157
  }
158
158
 
159
159
  def _base_loader(self, zql: str, **kwargs) -> Generator[Document, None, None]:
@@ -166,12 +166,17 @@ class ZephyrApiWrapper(BaseVectorStoreToolApiWrapper):
166
166
  }
167
167
  yield Document(page_content='', metadata=metadata)
168
168
 
169
- def _process_document(self, document: Document) -> Generator[Document, None, None]:
170
- id = document.metadata['id']
171
- test_case_content = self.get_test_case_steps(id)
172
- document.page_content = test_case_content
173
-
174
- @extend_with_vector_tools
169
+ def _extend_data(self, documents: Generator[Document, None, None]) -> Generator[Document, None, None]:
170
+ for document in documents:
171
+ try:
172
+ id = document.metadata['id']
173
+ test_case_content = self.get_test_case_steps(id)
174
+ document.page_content = test_case_content
175
+ except Exception as e:
176
+ logging.error(f"Failed to process document: {e}")
177
+ yield document
178
+
179
+ @extend_with_parent_available_tools
175
180
  def get_available_tools(self):
176
181
  return [
177
182
  {
@@ -1,13 +1,17 @@
1
1
  import json
2
- from typing import Optional, Generator
2
+ import logging
3
+ from typing import Optional, Generator, Literal
3
4
  from pydantic import model_validator, create_model, Field, SecretStr, PrivateAttr
4
5
 
5
6
  from .client import ZephyrEssentialAPI
6
- from ..elitea_base import extend_with_vector_tools, BaseVectorStoreToolApiWrapper
7
7
  from langchain_core.documents import Document
8
8
  from langchain_core.tools import ToolException
9
9
 
10
- class ZephyrEssentialApiWrapper(BaseVectorStoreToolApiWrapper):
10
+ from ..non_code_indexer_toolkit import NonCodeIndexerToolkit
11
+ from ..utils.available_tools_decorator import extend_with_parent_available_tools
12
+
13
+
14
+ class ZephyrEssentialApiWrapper(NonCodeIndexerToolkit):
11
15
  token: SecretStr
12
16
  _client: ZephyrEssentialAPI = PrivateAttr()
13
17
 
@@ -22,7 +26,7 @@ class ZephyrEssentialApiWrapper(BaseVectorStoreToolApiWrapper):
22
26
  base_url=base_url,
23
27
  token=token
24
28
  )
25
- return values
29
+ return super().validate_toolkit(values)
26
30
 
27
31
  def list_test_cases(self, project_key: Optional[str] = None, folder_id: Optional[str] = None, max_results: int = None, start_at: int = None):
28
32
  """List test cases with optional filters."""
@@ -229,6 +233,11 @@ class ZephyrEssentialApiWrapper(BaseVectorStoreToolApiWrapper):
229
233
  except json.JSONDecodeError as e:
230
234
  raise ValueError(f"Invalid JSON string: {str(e)}")
231
235
 
236
+ def _index_tool_params(self):
237
+ return {
238
+ 'chunking_tool':(Literal['json'], Field(description="Name of chunking tool", default='json'))
239
+ }
240
+
232
241
  def _base_loader(self, **kwargs) -> Generator[Document, None, None]:
233
242
  try:
234
243
  test_cases = self.list_test_cases()
@@ -236,36 +245,37 @@ class ZephyrEssentialApiWrapper(BaseVectorStoreToolApiWrapper):
236
245
  raise ToolException(f"Unable to extract test cases: {e}")
237
246
 
238
247
  for case in test_cases:
239
- case['type'] = "TEST_CASE"
240
248
  metadata = {
241
249
  k: v for k, v in case.items()
242
250
  if isinstance(v, (str, int, float, bool, list, dict))
243
251
  }
244
-
245
- yield Document(page_content=json.dumps(case), metadata=metadata)
246
-
247
- def _process_document(self, document: Document) -> Generator[Document, None, None]:
248
- try:
249
- base_data = json.loads(document.page_content)
250
-
251
- if base_data['type'] and base_data['type'] == "TEST_CASE":
252
- additional_content = self._process_test_case(base_data)
253
- base_data['test_case_content'] = additional_content
254
-
255
- document.page_content = json.dumps(base_data)
256
- except json.JSONDecodeError as e:
257
- raise ToolException(f"Failed to decode JSON from document: {e}")
258
-
259
- def _process_test_case(self, case):
260
- steps = self.get_test_case_test_steps(case['key'])
261
- script = self.get_test_case_test_script(case['key'])
262
- additional_content = {
263
- "steps": "" if isinstance(steps, ToolException) else steps,
264
- "script": "" if isinstance(script, ToolException) else script,
265
- }
266
- return additional_content
267
-
268
- @extend_with_vector_tools
252
+ metadata['type'] = "TEST_CASE"
253
+
254
+ yield Document(page_content="", metadata=metadata)
255
+
256
+ def _extend_data(self, documents: Generator[Document, None, None]) -> Generator[Document, None, None]:
257
+ for document in documents:
258
+ try:
259
+ if 'type' in document.metadata and document.metadata['type'] == "TEST_CASE":
260
+ additional_content = self._process_test_case(document.metadata['key'])
261
+ for steps_type, content in additional_content.items():
262
+ if content:
263
+ document.page_content = json.dumps(content)
264
+ document.metadata["steps_type"] = steps_type
265
+ except Exception as e:
266
+ logging.error(f"Failed to process document: {e}")
267
+ yield document
268
+
269
+ def _process_test_case(self, key) -> dict:
270
+ steps = self.get_test_case_test_steps(key)
271
+ if steps and not isinstance(steps, ToolException):
272
+ return {"steps": steps}
273
+ script = self.get_test_case_test_script(key)
274
+ if script and not isinstance(script, ToolException):
275
+ return {"script": script}
276
+ return {"empty": ""}
277
+
278
+ @extend_with_parent_available_tools
269
279
  def get_available_tools(self):
270
280
  return [
271
281
  {
@@ -1,15 +1,18 @@
1
1
  import json
2
2
  import logging
3
3
  import re
4
- from typing import Any, Optional, List, Dict, Tuple, Union, Generator
4
+ from typing import Any, Optional, List, Dict, Tuple, Union, Generator, Literal
5
5
 
6
6
  from pydantic import model_validator, BaseModel, SecretStr
7
7
  from langchain_core.tools import ToolException
8
8
  from pydantic import create_model, PrivateAttr
9
9
  from pydantic.fields import Field
10
10
 
11
- from ..elitea_base import BaseVectorStoreToolApiWrapper, BaseIndexParams, extend_with_vector_tools
12
11
  from langchain_core.documents import Document
12
+
13
+ from ..non_code_indexer_toolkit import NonCodeIndexerToolkit
14
+ from ..utils.available_tools_decorator import extend_with_parent_available_tools
15
+
13
16
  try:
14
17
  from alita_sdk.runtime.langchain.interfaces.llm_processor import get_embeddings
15
18
  except ImportError:
@@ -249,7 +252,7 @@ ZephyrUpdateTestSteps = create_model(
249
252
  )
250
253
 
251
254
 
252
- class ZephyrScaleApiWrapper(BaseVectorStoreToolApiWrapper):
255
+ class ZephyrScaleApiWrapper(NonCodeIndexerToolkit):
253
256
  # url for a Zephyr server
254
257
  base_url: Optional[str] = ""
255
258
  # auth with Jira token (cloud & server)
@@ -296,7 +299,7 @@ class ZephyrScaleApiWrapper(BaseVectorStoreToolApiWrapper):
296
299
  # else:
297
300
  # Cloud version is enabled for now
298
301
  cls._api = ZephyrScale(token=values['token']).api
299
- return values
302
+ return super().validate_toolkit(values)
300
303
 
301
304
  def get_tests(self, project_key: str = None, folder_id: str = None, maxResults: Optional[int] = 10, startAt: Optional[int] = 0):
302
305
  """Retrieves all test cases. Query parameters can be used to filter the results.
@@ -1210,7 +1213,8 @@ class ZephyrScaleApiWrapper(BaseVectorStoreToolApiWrapper):
1210
1213
 
1211
1214
  Example:
1212
1215
  'folder = "Authentication" AND label in ("Smoke", "Critical") AND text ~ "login" AND orderBy = "name" AND orderDirection = "ASC"'
1213
- """))
1216
+ """)),
1217
+ 'chunking_tool': (Literal['json'], Field(description="Name of chunking tool", default='json'))
1214
1218
  }
1215
1219
 
1216
1220
  def _base_loader(self, project_key: str, jql: str, **kwargs) -> Generator[Document, None, None]:
@@ -1250,8 +1254,8 @@ class ZephyrScaleApiWrapper(BaseVectorStoreToolApiWrapper):
1250
1254
  for key, value in folder.items():
1251
1255
  if value is not None:
1252
1256
  metadata[key] = value
1253
- page_content['type'] = "FOLDER"
1254
- yield Document(page_content=json.dumps(page_content), metadata=metadata)
1257
+ metadata['type'] = "FOLDER"
1258
+ yield Document(page_content="", metadata=metadata)
1255
1259
 
1256
1260
  def _get_test_cases_docs(self, project_key: str, jql: str) -> Generator[Document, None, None]:
1257
1261
  try:
@@ -1269,31 +1273,31 @@ class ZephyrScaleApiWrapper(BaseVectorStoreToolApiWrapper):
1269
1273
  metadata['updated_on'] = last_version['createdOn']
1270
1274
  else:
1271
1275
  metadata['updated_on'] = case['createdOn']
1276
+ metadata['type'] = "TEST_CASE"
1272
1277
 
1273
- case['type'] = "TEST_CASE"
1274
-
1275
- yield Document(page_content=json.dumps(case), metadata=metadata)
1276
-
1277
- def _process_document(self, document: Document) -> Generator[Document, None, None]:
1278
- try:
1279
- base_data = json.loads(document.page_content)
1280
-
1281
- if base_data['type'] and base_data['type'] == "TEST_CASE":
1282
- additional_content = self._process_test_case(base_data)
1283
- base_data['test_case_content'] = additional_content
1278
+ yield Document(page_content="", metadata=metadata)
1284
1279
 
1285
- document.page_content = json.dumps(base_data)
1286
- except json.JSONDecodeError as e:
1287
- raise ToolException(f"Failed to decode JSON from document: {e}")
1288
-
1289
- def _process_test_case(self, case):
1290
- steps = self.get_test_steps(case['key'], return_list=True)
1291
- script = self.get_test_script(case['key'], return_only_script=True)
1292
- additional_content = {
1293
- "steps": "" if isinstance(steps, ToolException) else steps,
1294
- "script": "" if isinstance(script, ToolException) else script,
1295
- }
1296
- return additional_content
1280
+ def _extend_data(self, documents: Generator[Document, None, None]) -> Generator[Document, None, None]:
1281
+ for document in documents:
1282
+ try:
1283
+ if 'type' in document.metadata and document.metadata['type'] == "TEST_CASE":
1284
+ additional_content = self._process_test_case(document.metadata['key'])
1285
+ for steps_type, content in additional_content.items():
1286
+ if content:
1287
+ document.page_content = json.dumps(content)
1288
+ document.metadata["steps_type"] = steps_type
1289
+ except Exception as e:
1290
+ logging.error(f"Failed to process document: {e}")
1291
+ yield document
1292
+
1293
+ def _process_test_case(self, key):
1294
+ steps = self.get_test_steps(key, return_list=True)
1295
+ if steps and not isinstance(steps, ToolException):
1296
+ return {"steps": steps}
1297
+ script = self.get_test_script(key, return_only_script=True)
1298
+ if script and not isinstance(script, ToolException):
1299
+ return {"script": script}
1300
+ return {"empty": ""}
1297
1301
 
1298
1302
  def get_tests_recursive(self, project_key: str = None, folder_id: str = None, maxResults: Optional[int] = 100, startAt: Optional[int] = 0):
1299
1303
  """Retrieves all test cases recursively from a folder and all its subfolders.
@@ -1552,7 +1556,7 @@ class ZephyrScaleApiWrapper(BaseVectorStoreToolApiWrapper):
1552
1556
  except Exception as e:
1553
1557
  return ToolException(f"Error updating test steps for test case {test_case_key}: {str(e)}")
1554
1558
 
1555
- @extend_with_vector_tools
1559
+ @extend_with_parent_available_tools
1556
1560
  def get_available_tools(self):
1557
1561
  return [
1558
1562
  {
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.256
3
+ Version: 0.3.258
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedjik@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -10,7 +10,7 @@ alita_sdk/configurations/confluence.py,sha256=mAW2fgSEOg-BAV768Sc6b_EuRA3H5UL9xf
10
10
  alita_sdk/configurations/delta_lake.py,sha256=ADWcjabi7Krq2yxIpoc_tmhdncdgot2GBphE7ziDeTY,1133
11
11
  alita_sdk/configurations/embedding.py,sha256=8GSC8Feh8CH7bT_6cQhNqlS6raE91S2YRAtb2N9bUA8,552
12
12
  alita_sdk/configurations/figma.py,sha256=vecZ20IyZgnFO2GdphkovYHMISRPcUYh7fxkUQsPwX8,1306
13
- alita_sdk/configurations/github.py,sha256=GSj6sA4f6SfW0ZpoHXKi5FzbPDC6wE1AlscwWqIPj14,1832
13
+ alita_sdk/configurations/github.py,sha256=NAiXotsl766IwjOOsVrs70PuoP4gvXla2Yycsv1qBpI,3569
14
14
  alita_sdk/configurations/gitlab.py,sha256=0W35igIlue6QxOnPgw65ToLf4HSdPVuRyObdwQuEld8,1053
15
15
  alita_sdk/configurations/jira.py,sha256=ASh8I2iVXzOOtwjRX7kYNllXpCXyAIxFMP_YD4Q0PTI,1379
16
16
  alita_sdk/configurations/pgvector.py,sha256=P-Q07ocIg4CXN_7hUBDM6r9gN62XS1N2jyP79tM9Tig,500
@@ -26,7 +26,7 @@ alita_sdk/configurations/zephyr_enterprise.py,sha256=5W1QEcv62Y5Rk_kApI2QmOwvWZe
26
26
  alita_sdk/runtime/__init__.py,sha256=4W0UF-nl3QF2bvET5lnah4o24CoTwSoKXhuN0YnwvEE,828
27
27
  alita_sdk/runtime/clients/__init__.py,sha256=BdehU5GBztN1Qi1Wul0cqlU46FxUfMnI6Vq2Zd_oq1M,296
28
28
  alita_sdk/runtime/clients/artifact.py,sha256=H3pJAh5G-zWVyJ6YbqHGk4jA8U6HfacQduiTivpJZ3Y,3210
29
- alita_sdk/runtime/clients/client.py,sha256=irj2uTGdIQj8Wd1ZGdi5yDCFm_n9TiRhEhODJz4yI84,43493
29
+ alita_sdk/runtime/clients/client.py,sha256=ZkXP3-e785EyYoJLNowAGV3bRqv3fs1XqD_iuqqkg9Y,43513
30
30
  alita_sdk/runtime/clients/datasource.py,sha256=HAZovoQN9jBg0_-lIlGBQzb4FJdczPhkHehAiVG3Wx0,1020
31
31
  alita_sdk/runtime/clients/prompt.py,sha256=li1RG9eBwgNK_Qf0qUaZ8QNTmsncFrAL2pv3kbxZRZg,1447
32
32
  alita_sdk/runtime/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -119,19 +119,19 @@ alita_sdk/runtime/utils/toolkit_runtime.py,sha256=MU63Fpxj0b5_r1IUUc0Q3-PN9VwL7r
119
119
  alita_sdk/runtime/utils/toolkit_utils.py,sha256=I9QFqnaqfVgN26LUr6s3XlBlG6y0CoHURnCzG7XcwVs,5311
120
120
  alita_sdk/runtime/utils/utils.py,sha256=CpEl3LCeLbhzQySz08lkKPm7Auac6IiLF7WB8wmArMI,589
121
121
  alita_sdk/tools/__init__.py,sha256=ko5TToGYZFmBrho26DRAVvrkHWxQ2sfs8gVAASinYp8,10611
122
- alita_sdk/tools/base_indexer_toolkit.py,sha256=gOjE1igKyjG1LohMj0XMlj1IGaFp7eEEDqyEG6-xLmc,18405
122
+ alita_sdk/tools/base_indexer_toolkit.py,sha256=mfPo2iWhfHg-ihMC59nCf9XTomCPqPmKAudGkbpgBSE,18193
123
123
  alita_sdk/tools/elitea_base.py,sha256=PfelIUb5YFTjDN_1jNYT9tJbjfYr11PAUrPQHyW2d5I,32830
124
124
  alita_sdk/tools/non_code_indexer_toolkit.py,sha256=v9uq1POE1fQKCd152mbqDtF-HSe0qoDj83k4E5LAkMI,1080
125
- alita_sdk/tools/ado/__init__.py,sha256=bArTObt5cqG1SkijKevWGbsIILHBA3aCStg8Q1jd69k,1243
125
+ alita_sdk/tools/ado/__init__.py,sha256=u2tdDgufGuDb-7lIgKKQlqgStL9Wd1gzNmRNYems2c0,1267
126
126
  alita_sdk/tools/ado/utils.py,sha256=PTCludvaQmPLakF2EbCGy66Mro4-rjDtavVP-xcB2Wc,1252
127
- alita_sdk/tools/ado/repos/__init__.py,sha256=zPLrWuAZamPrcUStOYHwWb-_Cvq6qm2JOwbn4Nnog2w,5374
128
- alita_sdk/tools/ado/repos/repos_wrapper.py,sha256=nPVsS10Se52yHmZ_YXVGywCSaYLlBEYBTBlhBcDJr80,50143
127
+ alita_sdk/tools/ado/repos/__init__.py,sha256=n-IhKED05RwQGWT4LfCaxJ85uDyG4S9zTjSjK6A8N4o,5192
128
+ alita_sdk/tools/ado/repos/repos_wrapper.py,sha256=e3bGsM03m0UggSQfoVh5Gg_M1MYt_BTKS-s9G2Unc1k,49739
129
129
  alita_sdk/tools/ado/test_plan/__init__.py,sha256=4fEw_3cm4shuZ868HhAU-uMH3xNXPyb3uRjyNWoBKls,5243
130
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py,sha256=jQt8kFmdAzsopjByLTMiSnWtoqz_IUOmYkhPTVGeMnU,20265
130
+ alita_sdk/tools/ado/test_plan/test_plan_wrapper.py,sha256=57nYl-F4OPDwZu-amg9ptG0Z4iAYMBobcKlrbGgSAg8,20079
131
131
  alita_sdk/tools/ado/wiki/__init__.py,sha256=uBKo_Meu2ZxMxcxGsMmvCXyplRE2um1_PIRvdYd37rM,5171
132
- alita_sdk/tools/ado/wiki/ado_wrapper.py,sha256=zg6wMRar1DTp-ZRlYaQifBEnpYmTrHXskTNPdrLdy8s,14759
132
+ alita_sdk/tools/ado/wiki/ado_wrapper.py,sha256=YfumbP0lI04hXWY01P9f-q9JBWuQRaeIJEvYDpcZnu0,14238
133
133
  alita_sdk/tools/ado/work_item/__init__.py,sha256=HNcdIMwTSNe-25_Pg-KmVVXTFci3vNa84tkTFkls36c,5373
134
- alita_sdk/tools/ado/work_item/ado_wrapper.py,sha256=gEywCL_kS0k1jWcDhsmYUybpIP08tH8go6CixLJGwT4,28409
134
+ alita_sdk/tools/ado/work_item/ado_wrapper.py,sha256=uPhDp2zC8t42FQk7xc7gNyYs2o-hfOxsrw_rw31f7Sw,28223
135
135
  alita_sdk/tools/advanced_jira_mining/__init__.py,sha256=pUTzECqGvYaR5qWY3JPUhrImrZgc7pCXuqSe5eWIE80,4604
136
136
  alita_sdk/tools/advanced_jira_mining/data_mining_wrapper.py,sha256=nZPtuwVWp8VeHw1B8q9kdwf-6ZvHnlXTOGdcIMDkKpw,44211
137
137
  alita_sdk/tools/aws/__init__.py,sha256=tB6GCOg4XGSpR6qgbgAF4MUQ5-YmQCbWurWgrVKEKQ8,181
@@ -148,7 +148,7 @@ alita_sdk/tools/bitbucket/__init__.py,sha256=_ywYlgYoE6gtJlLR94MHcS4EPWIaCFU_Mxy
148
148
  alita_sdk/tools/bitbucket/api_wrapper.py,sha256=xKa2dQ-gw2YbLJx7P1xrc3JUfgBkXkMsEG-s0mzh3KI,11023
149
149
  alita_sdk/tools/bitbucket/bitbucket_constants.py,sha256=UsbhQ1iEvrKoxceTFPWTYhaXS1zSxbmjs1TwY0-P4gw,462
150
150
  alita_sdk/tools/bitbucket/cloud_api_wrapper.py,sha256=VELi65tLXvszwCGQSqVfyVal0ylx9DgAmAGpRQL_Zkg,15522
151
- alita_sdk/tools/bitbucket/tools.py,sha256=zKBUq7t9zLa1EvhlVZzyVcZSvwvdcbtz0oslgPFZeeo,15307
151
+ alita_sdk/tools/bitbucket/tools.py,sha256=o1hwSEbSmjd5YgF-AsWls2ZyrewUuegBn9S6xTwsT74,15326
152
152
  alita_sdk/tools/browser/__init__.py,sha256=iByi9uMGjd6v44SagIPTm5fu1vWnxIkjn3xsx86uRwI,5249
153
153
  alita_sdk/tools/browser/crawler.py,sha256=jhE35dU94eQLURSM-D50tspOqEMsiGzMDbYNqNSR2mU,2279
154
154
  alita_sdk/tools/browser/duck_duck_go_search.py,sha256=iKws923v34o-ySXohJw-8xTDBWlj3fMsnzC_ZRuPugE,2002
@@ -171,7 +171,7 @@ alita_sdk/tools/carrier/tools.py,sha256=xBKXKNEdPQ_kWysoV7w6y4cDjtAMno8Qj2ubI4zr
171
171
  alita_sdk/tools/carrier/ui_reports_tool.py,sha256=Y6EstTRCa9d11ipFUFGOYlpiEhFx7aOQcgZ_M5Gd1lQ,13708
172
172
  alita_sdk/tools/carrier/update_ui_test_schedule_tool.py,sha256=jh9Q86cMCEqpsFopJPNIP0wlr7sYVa_3lhlq6lRmkGg,11850
173
173
  alita_sdk/tools/carrier/utils.py,sha256=rl7aq-F6ed_PapDM15w8EtS0BkgsjpDrNdKYuDCMOaI,4376
174
- alita_sdk/tools/chunkers/__init__.py,sha256=myaBVvPbUsz6PXtBDpA4EiPQgLvIv3q_WPh86kxlccI,774
174
+ alita_sdk/tools/chunkers/__init__.py,sha256=5Nq--vAyECTh85HhDr0qEB-2UPXHXZWefP8hzX5zVO8,847
175
175
  alita_sdk/tools/chunkers/models.py,sha256=NNkLSljZboYDj6vbqeHmcjj9JrTHbkVWmoHGsL98q3k,3032
176
176
  alita_sdk/tools/chunkers/utils.py,sha256=gOyDHhXSH6Wlmxj_OsMOa2vydZuHD6HZql4PH-SYcTw,192
177
177
  alita_sdk/tools/chunkers/code/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -194,6 +194,7 @@ alita_sdk/tools/chunkers/code/treesitter/treesitter_rs.py,sha256=LgKyNffBy30gIr8
194
194
  alita_sdk/tools/chunkers/code/treesitter/treesitter_ts.py,sha256=Qs1a_BBN296iZc5hh8UNF9sc0G0-A_XZVhP3Na1ZNDg,387
195
195
  alita_sdk/tools/chunkers/sematic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
196
196
  alita_sdk/tools/chunkers/sematic/base.py,sha256=bRHpCFbOy-KPe4HBGpegrvIhvOsd7sDRfmb06T8tSuU,349
197
+ alita_sdk/tools/chunkers/sematic/json_chunker.py,sha256=wzlGeoS4qZOQA6Alcc0sn6zAxZSGVdwCLm0wHqmMODA,1038
197
198
  alita_sdk/tools/chunkers/sematic/markdown_chunker.py,sha256=HmAGKuIodnMcHl-kBwAb1NY0GKKwAskRFvGaW3m4HAM,3859
198
199
  alita_sdk/tools/chunkers/sematic/proposal_chunker.py,sha256=t8JjX9TH6yHXXaemiDK1E6000tlES2Kl8XfyezmlIoo,5116
199
200
  alita_sdk/tools/chunkers/sematic/statistical_chunker.py,sha256=VDQcMC-ky72GqdWJiHMmcRmfJTTU5XglBF1IWg2Qews,13403
@@ -246,7 +247,7 @@ alita_sdk/tools/google/bigquery/tool.py,sha256=Esf9Hsp8I0e7-5EdkFqQ-bid0cfrg-bfS
246
247
  alita_sdk/tools/google_places/__init__.py,sha256=mHKc7u9P2gqGDzqqJNQC9qiZYEm5gncnM_1XjtrM17o,3152
247
248
  alita_sdk/tools/google_places/api_wrapper.py,sha256=7nZly6nk4f4Tm7s2MVdnnwlb-1_WHRrDhyjDiqoyPjA,4674
248
249
  alita_sdk/tools/jira/__init__.py,sha256=k9Alxe1tEHYYzkCZv9hd89JMzBgv7cZiVT6k_tsO7hg,6073
249
- alita_sdk/tools/jira/api_wrapper.py,sha256=i8x8CttVEW_FFEl6hBNdzCqc-aMyy1FhqkiEHGaDmpo,76178
250
+ alita_sdk/tools/jira/api_wrapper.py,sha256=MRDBL_VES6Qs1mHXetwqfk-2BibsNsXQIiaz0bAocFY,76353
250
251
  alita_sdk/tools/keycloak/__init__.py,sha256=0WB9yXMUUAHQRni1ghDEmd7GYa7aJPsTVlZgMCM9cQ0,3050
251
252
  alita_sdk/tools/keycloak/api_wrapper.py,sha256=cOGr0f3S3-c6tRDBWI8wMnetjoNSxiV5rvC_0VHb8uw,3100
252
253
  alita_sdk/tools/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -284,7 +285,7 @@ alita_sdk/tools/postman/api_wrapper.py,sha256=bKgnEQVGv3QhqTevzBOwiXxAd0-Y5vUI1-
284
285
  alita_sdk/tools/postman/postman_analysis.py,sha256=2d-Oi2UORosIePIUyncSONw9hY7dw8Zc7BQvCd4aqpg,45115
285
286
  alita_sdk/tools/pptx/__init__.py,sha256=vVUrWnj7KWJgEk9oxGSsCAQ2SMSXrp_SFOdUHYQKcAo,3444
286
287
  alita_sdk/tools/pptx/pptx_wrapper.py,sha256=yyCYcTlIY976kJ4VfPo4dyxj4yeii9j9TWP6W8ZIpN8,29195
287
- alita_sdk/tools/qtest/__init__.py,sha256=4vXCB9GSKNFeRTimSB7wklAnO-4reZgrw0Nw1_QuRKE,4070
288
+ alita_sdk/tools/qtest/__init__.py,sha256=3NUBDnwIZFFmdNNzCo4u7hBfhgU3AT5NkyMBtdxS9yw,4084
288
289
  alita_sdk/tools/qtest/api_wrapper.py,sha256=cWXpmjjel9CYIXXjetJkARLYZXqvHufSghctTHN0ggc,22296
289
290
  alita_sdk/tools/qtest/tool.py,sha256=kKzNPS4fUC76WQQttQ6kdVANViHEvKE8Kf174MQiNYU,562
290
291
  alita_sdk/tools/rally/__init__.py,sha256=JvLt_hW_hC1WiCcwBwi1TlOH7QudJpM2z7XXGWYVaqI,3423
@@ -312,10 +313,11 @@ alita_sdk/tools/testio/api_wrapper.py,sha256=BvmL5h634BzG6p7ajnQLmj-uoAw1gjWnd4F
312
313
  alita_sdk/tools/testrail/__init__.py,sha256=0kETjWKLU7R6mugBWsjwEUsh10pipbAeNSGJAO0FBh0,4634
313
314
  alita_sdk/tools/testrail/api_wrapper.py,sha256=5T-QyTzt-J0rI32xc_E684lCdgyWeHSyeTYiwQwtGyg,32275
314
315
  alita_sdk/tools/utils/__init__.py,sha256=155xepXPr4OEzs2Mz5YnjXcBpxSv1X2eznRUVoPtyK0,3268
316
+ alita_sdk/tools/utils/available_tools_decorator.py,sha256=IbrdfeQkswxUFgvvN7-dyLMZMyXLiwvX7kgi3phciCk,273
315
317
  alita_sdk/tools/utils/content_parser.py,sha256=zqeyuxZqZqVFq5M5sZM-falMdlOw48FyZnp3Z0XUpCw,9868
316
318
  alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256=a6FAsiix_EvATIKUf5YT6vHh5LDyJ5uSP3LJqoxFo04,17367
317
319
  alita_sdk/tools/vector_adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
318
- alita_sdk/tools/xray/__init__.py,sha256=GGpbiBdDQ9kMFqJEHYi7XwKpkuMMHi-ZF-IM8yFIgUM,4380
320
+ alita_sdk/tools/xray/__init__.py,sha256=BnG2StSfX44CUMtrjHTcSCDWxxse5tCZqwyaZSkBKIc,4230
319
321
  alita_sdk/tools/xray/api_wrapper.py,sha256=A8PJmY2k7TowaD_vk6ZxkMnSUoZUt9A6g4TJrZfNTAw,32225
320
322
  alita_sdk/tools/yagmail/__init__.py,sha256=c4Qn3em0tLxzRmFKpzbBgY9W2EnOoKf0azoDJHng5CY,2208
321
323
  alita_sdk/tools/yagmail/yagmail_wrapper.py,sha256=SKoGVd1X4Ew3ad5tOdtPoY00M6jStNdT3q7GXEjQc5g,1952
@@ -324,18 +326,18 @@ alita_sdk/tools/zephyr/__init__.py,sha256=8B2Ibz5QTmB5WkV0q8Sq4kuj92FFaFWZLrT877
324
326
  alita_sdk/tools/zephyr/api_wrapper.py,sha256=lJCYPG03ej0qgdpLflnS7LFB4HSAfGzIvTjAJt07CQs,6244
325
327
  alita_sdk/tools/zephyr/rest_client.py,sha256=7vSD3oYIX-3KbAFed-mphSQif_VRuXrq5O07ryNQ7Pk,6208
326
328
  alita_sdk/tools/zephyr_enterprise/__init__.py,sha256=1E0xuyYx7QSuqIRKclEapI7MvxXjJ3Lwf4YpDXPzehw,4087
327
- alita_sdk/tools/zephyr_enterprise/api_wrapper.py,sha256=p9EpkO5tif3JJzprz2_VuLsQ1yET7TwwBfPOKJGwt9c,11215
329
+ alita_sdk/tools/zephyr_enterprise/api_wrapper.py,sha256=km2TYNu5ppRkspN1PyYetu6iBGj-xKVIwGHty1r_wAw,11552
328
330
  alita_sdk/tools/zephyr_enterprise/zephyr_enterprise.py,sha256=hV9LIrYfJT6oYp-ZfQR0YHflqBFPsUw2Oc55HwK0H48,6809
329
331
  alita_sdk/tools/zephyr_essential/__init__.py,sha256=BpRicA38JI9YEDuim1acZRGcDw-ZYTdP4Ewbiju37h8,3761
330
- alita_sdk/tools/zephyr_essential/api_wrapper.py,sha256=TpNov35XPgjM9eymCEFqv22mbpdVvLMBTb9WVqUcvNA,36795
332
+ alita_sdk/tools/zephyr_essential/api_wrapper.py,sha256=WZvtC_CLVZxwsLWITOGZpFqPWp3JgzCVtYmmq__vU5c,37237
331
333
  alita_sdk/tools/zephyr_essential/client.py,sha256=bfNcUKNqj9MFWTludGbbqD4qZlxrBaC2JtWsCfZMqSY,9722
332
334
  alita_sdk/tools/zephyr_scale/__init__.py,sha256=imuHOqdyOqtxQObeBZfyFvKPXfKVNuYfwKn2c9jJyeo,4299
333
- alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=JAeWf-RXohsxheUpT0iMDClc_izj-zxMwafXCW4jtC0,78015
335
+ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=HOt9ShtJI_1tVPcwd3Rwk-VS0SMLqcPNYbN1wqfeuhc,78330
334
336
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=0AI_j27xVO5Gk5HQMFrqPTd4uvuVTpiZUicBrdfEpKg,2796
335
337
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
336
338
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
337
- alita_sdk-0.3.256.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
338
- alita_sdk-0.3.256.dist-info/METADATA,sha256=Pz8NwBvPngJixME90E16UprodmhH7mPX8aR1oTzAHno,18897
339
- alita_sdk-0.3.256.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
340
- alita_sdk-0.3.256.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
341
- alita_sdk-0.3.256.dist-info/RECORD,,
339
+ alita_sdk-0.3.258.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
340
+ alita_sdk-0.3.258.dist-info/METADATA,sha256=2j5Uqki3lIbS41bXwlA4RiIVvtsBROzSHIoisJhN1gc,18897
341
+ alita_sdk-0.3.258.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
342
+ alita_sdk-0.3.258.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
343
+ alita_sdk-0.3.258.dist-info/RECORD,,