alita-sdk 0.3.317__py3-none-any.whl → 0.3.319__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,6 +17,7 @@ import pandas as pd
17
17
  from json import loads
18
18
 
19
19
  from langchain_core.tools import ToolException
20
+ from langchain_core.documents import Document
20
21
  from .AlitaTableLoader import AlitaTableLoader
21
22
 
22
23
 
@@ -65,6 +66,20 @@ class AlitaExcelLoader(AlitaTableLoader):
65
66
  else:
66
67
  return df.to_string(index=False)
67
68
 
69
+ def load(self) -> list:
70
+ docs = []
71
+ content_per_sheet = self.get_content()
72
+ for sheet_name, content in content_per_sheet.items():
73
+ metadata = {
74
+ "source": f'{self.file_path}:{sheet_name}',
75
+ "sheet_name": sheet_name,
76
+ "file_type": "excel",
77
+ "excel_by_sheets": self.excel_by_sheets,
78
+ "return_type": self.return_type,
79
+ }
80
+ docs.append(Document(page_content=f"Sheet: {sheet_name}\n {str(content)}", metadata=metadata))
81
+ return docs
82
+
68
83
  def read(self, lazy: bool = False):
69
84
  df = pd.read_excel(self.file_path, sheet_name=None)
70
85
  docs = []
@@ -17,8 +17,6 @@ from langchain_core.documents import Document
17
17
  from typing import List, Optional, Iterator
18
18
  from json import dumps
19
19
  from .utils import cleanse_data
20
- from ..tools.log import print_log
21
-
22
20
 
23
21
  class AlitaTableLoader(BaseLoader):
24
22
  def __init__(self,
@@ -74,7 +72,6 @@ class AlitaTableLoader(BaseLoader):
74
72
  docs.append(Document(page_content=row, metadata=metadata))
75
73
  continue
76
74
  if self.json_documents:
77
- # print_log(row)
78
75
  metadata['columns'] = list(row.keys())
79
76
  metadata['og_data'] = dumps(row)
80
77
  docs.append(Document(page_content=self.row_processor(row), metadata=metadata))
@@ -122,6 +122,7 @@ loaders_map = {
122
122
  'class': AlitaExcelLoader,
123
123
  'is_multimodal_processing': False,
124
124
  'kwargs': {
125
+ 'excel_by_sheets': True,
125
126
  'raw_content': True,
126
127
  'cleanse': False
127
128
  },
@@ -131,6 +132,7 @@ loaders_map = {
131
132
  'class': AlitaExcelLoader,
132
133
  'is_multimodal_processing': False,
133
134
  'kwargs': {
135
+ 'excel_by_sheets': True,
134
136
  'raw_content': True,
135
137
  'cleanse': False
136
138
  },
@@ -10,7 +10,6 @@ from pydantic import BaseModel, model_validator, Field
10
10
 
11
11
  from alita_sdk.tools.elitea_base import BaseToolApiWrapper
12
12
  from alita_sdk.tools.vector_adapters.VectorStoreAdapter import VectorStoreAdapterFactory
13
- from ..langchain.tools.vector import VectorAdapter
14
13
  from ..utils.logging import dispatch_custom_event
15
14
 
16
15
  logger = getLogger(__name__)
@@ -138,10 +137,7 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
138
137
  vectorstore_params: Optional[dict] = None
139
138
  max_docs_per_add: int = 100
140
139
  dataset: str = None
141
- embedding: Any = None
142
140
  vectorstore: Any = None
143
- # Review usage of old adapter
144
- vectoradapter: Any = None
145
141
  pg_helper: Any = None
146
142
  embeddings: Any = None
147
143
  # New adapter for vector database operations
@@ -152,17 +148,13 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
152
148
  def validate_toolkit(cls, values):
153
149
  from ..langchain.interfaces.llm_processor import get_vectorstore
154
150
  logger.debug(f"Validating toolkit: {values}")
155
- if 'vectorstore_params' in values:
156
- values["dataset"] = values.get('vectorstore_params').get('collection_name')
157
- if values.get('embedding_model'):
158
- values['embeddings'] = values['alita'].get_embeddings(values['embedding_model'])
151
+ values["dataset"] = values.get('collection_name')
152
+
153
+ if values.get('alita') and values.get('embedding_model'):
154
+ values['embeddings'] = values.get('alita').get_embeddings(values.get('embedding_model'))
155
+
159
156
  if values.get('vectorstore_type') and values.get('vectorstore_params') and values.get('embedding_model'):
160
157
  values['vectorstore'] = get_vectorstore(values['vectorstore_type'], values['vectorstore_params'], embedding_func=values['embeddings'])
161
- values['vectoradapter'] = VectorAdapter(
162
- vectorstore=values['vectorstore'],
163
- embeddings=values['embeddings'],
164
- quota_params=None,
165
- )
166
158
  # Initialize the new vector adapter
167
159
  values['vector_adapter'] = VectorStoreAdapterFactory.create_adapter(values['vectorstore_type'])
168
160
  logger.debug(f"Vectorstore wrapper initialized: {values}")
@@ -223,8 +215,6 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
223
215
  self._log_data("Cleaning index before re-indexing all documents. Previous index will be removed", tool_name="index_documents")
224
216
  try:
225
217
  self._clean_collection(collection_suffix)
226
- self.vectoradapter.persist()
227
- self.vectoradapter.vacuum()
228
218
  self._log_data("Previous index has been removed",
229
219
  tool_name="index_documents")
230
220
  except Exception as e:
@@ -238,7 +228,6 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
238
228
  logger.warning(f"Document is missing required metadata field 'id' or 'updated_on': {doc.metadata}")
239
229
 
240
230
  logger.debug(f"Indexing documents: {documents}")
241
- logger.debug(self.vectoradapter)
242
231
 
243
232
  # if collection_suffix is provided, add it to metadata of each document
244
233
  if collection_suffix:
@@ -411,11 +411,11 @@ class TestPlanApiWrapper(NonCodeIndexerToolkit):
411
411
  def _index_tool_params(self):
412
412
  """Return the parameters for indexing data."""
413
413
  return {
414
+ 'chunking_tool': (Literal['html', ''], Field(description="Name of chunking tool", default='html')),
414
415
  "plan_id": (int, Field(description="ID of the test plan for which test cases are requested")),
415
416
  "suite_ids": (Optional[List[int]], Field(description='List of test suite IDs for which test cases are requested '
416
417
  '(can be empty for all suites indexing from the plan). '
417
418
  'Example: [2, 23]', default=[])),
418
- 'chunking_tool':(Literal['html', ''], Field(description="Name of chunking tool", default='html'))
419
419
  }
420
420
 
421
421
  @extend_with_parent_available_tools
@@ -248,9 +248,9 @@ class AzureDevOpsApiWrapper(NonCodeIndexerToolkit):
248
248
  def _index_tool_params(self):
249
249
  """Return the parameters for indexing data."""
250
250
  return {
251
+ 'chunking_tool': (Literal['markdown', ''], Field(description="Name of chunking tool", default='markdown')),
251
252
  "wiki_identifier": (str, Field(description="Wiki identifier to index, e.g., 'ABCProject.wiki'")),
252
253
  'title_contains': (Optional[str], Field(default=None, description="Optional filter to include only pages with titles containing exact this string")),
253
- 'chunking_tool':(Literal['markdown', ''], Field(description="Name of chunking tool", default='markdown'))
254
254
  }
255
255
 
256
256
  @extend_with_parent_available_tools
@@ -5,7 +5,7 @@ from typing import Any, Optional, List, Literal, Dict, Generator
5
5
  from langchain_core.documents import Document
6
6
  from pydantic import create_model, Field, SecretStr
7
7
 
8
- from .utils.content_parser import file_extension_by_chunker, process_content_by_type
8
+ from .utils.content_parser import file_extension_by_chunker, process_document_by_type
9
9
  from .vector_adapters.VectorStoreAdapter import VectorStoreAdapterFactory
10
10
  from ..runtime.tools.vectorstore_base import VectorStoreWrapperBase
11
11
  from ..runtime.utils.utils import IndexerKeywords
@@ -87,10 +87,10 @@ BaseStepbackSearchParams = create_model(
87
87
  BaseIndexDataParams = create_model(
88
88
  "indexData",
89
89
  __base__=BaseIndexParams,
90
- progress_step=(Optional[int], Field(default=10, ge=0, le=100,
91
- description="Optional step size for progress reporting during indexing")),
92
90
  clean_index=(Optional[bool], Field(default=False,
93
91
  description="Optional flag to enforce clean existing index before indexing new data")),
92
+ progress_step=(Optional[int], Field(default=10, ge=0, le=100,
93
+ description="Optional step size for progress reporting during indexing")),
94
94
  chunking_config=(Optional[dict], Field(description="Chunking tool configuration", default_factory=dict)),
95
95
  )
96
96
 
@@ -102,7 +102,6 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
102
102
 
103
103
  connection_string: Optional[SecretStr] = None
104
104
  collection_name: Optional[str] = None
105
- _embedding: Optional[Any] = None
106
105
  alita: Any = None # Elitea client, if available
107
106
 
108
107
  def __init__(self, **kwargs):
@@ -116,7 +115,6 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
116
115
  if connection_string:
117
116
  # Initialize vectorstore params only if connection string is provided
118
117
  kwargs['vectorstore_params'] = VectorStoreAdapterFactory.create_adapter(vectorstore_type).get_vectorstore_params(collection_name, connection_string)
119
- kwargs['_embedding'] = kwargs.get('alita').get_embeddings(kwargs.get('embedding_model'))
120
118
  super().__init__(**kwargs)
121
119
 
122
120
  def _index_tool_params(self, **kwargs) -> dict[str, tuple[type, Field]]:
@@ -181,14 +179,14 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
181
179
 
182
180
  if chunking_config is None:
183
181
  chunking_config = {}
184
- chunking_config['embedding'] = self._embedding
182
+ chunking_config['embedding'] = self.embeddings
185
183
  chunking_config['llm'] = self.llm
186
184
 
187
185
  for document in documents:
188
186
  if content_type := document.metadata.get(IndexerKeywords.CONTENT_FILE_NAME.value, None):
189
187
  # apply parsing based on content type and chunk if chunker was applied to parent doc
190
188
  content = document.metadata.pop(IndexerKeywords.CONTENT_IN_BYTES.value, None)
191
- yield from process_content_by_type(
189
+ yield from process_document_by_type(
192
190
  document=document,
193
191
  content=content,
194
192
  extension_source=content_type, llm=self.llm, chunking_config=chunking_config)
@@ -199,7 +197,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
199
197
  continue
200
198
  # apply parsing based on content type resolved from chunking_tool
201
199
  content_type = file_extension_by_chunker(chunking_tool)
202
- yield from process_content_by_type(
200
+ yield from process_document_by_type(
203
201
  document=document,
204
202
  content=content_in_bytes,
205
203
  extension_source=content_type, llm=self.llm, chunking_config=chunking_config)
@@ -154,6 +154,7 @@ class CreateExcelReportTool(BaseTool):
154
154
  "tp_threshold": (int, Field(default=None, description="Throughput threshold")),
155
155
  "rt_threshold": (int, Field(default=None, description="Response time threshold")),
156
156
  "er_threshold": (int, Field(default=None, description="Error rate threshold")),
157
+ "include_group_pauses": (bool, Field(default=False, description="Include group pauses in Gatling Excel report")),
157
158
  }
158
159
  )
159
160
 
@@ -200,6 +201,7 @@ class CreateExcelReportTool(BaseTool):
200
201
  "tp_threshold": 10,
201
202
  "rt_threshold": 500,
202
203
  "er_threshold": 5,
204
+ "include_group_pauses": False,
203
205
  }
204
206
 
205
207
  def _request_parameter_confirmation(self, default_parameters):
@@ -217,7 +219,8 @@ class CreateExcelReportTool(BaseTool):
217
219
  excel_report_file_name = f'/tmp/reports_test_results_{report["build_id"]}_excel_report.xlsx'
218
220
  bucket_name = report["name"].replace("_", "").replace(" ", "").lower()
219
221
 
220
- result_stats_j = self._parse_report(test_log_file_path, lg_type, parameters["think_time"], is_absolute_file_path=True)
222
+ result_stats_j = self._parse_report(test_log_file_path, lg_type, parameters["think_time"],
223
+ parameters["include_group_pauses"], is_absolute_file_path=True)
221
224
  calc_thr_j = self._calculate_thresholds(result_stats_j, parameters)
222
225
 
223
226
  return self._generate_and_upload_report(
@@ -233,21 +236,22 @@ class CreateExcelReportTool(BaseTool):
233
236
  excel_report_file_name = f'{file_path}_{current_date}.xlsx'
234
237
  bucket_name = bucket
235
238
 
236
- result_stats_j = self._parse_report(file_path, lg_type, parameters["think_time"], is_absolute_file_path=True)
239
+ result_stats_j = self._parse_report(file_path, lg_type, parameters["think_time"],
240
+ parameters["include_group_pauses"], is_absolute_file_path=True)
237
241
  calc_thr_j = self._calculate_thresholds(result_stats_j, parameters)
238
242
 
239
243
  return self._generate_and_upload_report(
240
244
  result_stats_j, carrier_report, calc_thr_j, parameters, excel_report_file_name, bucket_name, file_path
241
245
  )
242
246
 
243
- def _parse_report(self, file_path, lg_type, think_time, is_absolute_file_path=False):
247
+ def _parse_report(self, file_path, lg_type, think_time, include_group_pauses, is_absolute_file_path=False):
244
248
  """Parse the report based on its type."""
245
249
  if lg_type == "gatling":
246
250
  if is_absolute_file_path:
247
251
  report_file = file_path
248
252
  else:
249
253
  report_file = get_latest_log_file(file_path, "simulation.log")
250
- parser = GatlingReportParser(report_file, think_time)
254
+ parser = GatlingReportParser(report_file, include_group_pauses, think_time)
251
255
  result_stats_j = parser.parse()
252
256
  result_stats_j["requests"].update(result_stats_j["groups"])
253
257
  elif lg_type == "jmeter":
@@ -118,9 +118,10 @@ class JMeterReportParser(PerformanceReportParser):
118
118
 
119
119
  class GatlingReportParser(PerformanceReportParser):
120
120
 
121
- def __init__(self, log_file: str, think_times="5,0-10,0"):
121
+ def __init__(self, log_file: str, include_group_pauses, think_times="5,0-10,0"):
122
122
  self.calculated_think_time = think_times
123
123
  self.log_file = log_file
124
+ self.include_group_pauses = include_group_pauses
124
125
 
125
126
  @staticmethod
126
127
  def convert_timestamp_to_datetime(timestamp: int) -> datetime:
@@ -210,7 +211,7 @@ class GatlingReportParser(PerformanceReportParser):
210
211
  ramp_end = self.convert_timestamp_to_datetime(int(line.split('\t')[3]))
211
212
 
212
213
  elif line.startswith('GROUP'):
213
- self.parse_group_line(groups, line)
214
+ self.parse_group_line(groups, line, self.include_group_pauses)
214
215
  except FileNotFoundError as e:
215
216
  print(f"File not found: {e}")
216
217
  raise
@@ -242,11 +243,14 @@ class GatlingReportParser(PerformanceReportParser):
242
243
  requests[request_name].append((response_time, status))
243
244
 
244
245
  @staticmethod
245
- def parse_group_line(groups, line):
246
+ def parse_group_line(groups, line, include_group_pauses):
246
247
  parts = line.split('\t')
247
248
  if len(parts) >= 6:
248
249
  group_name = parts[1]
249
- response_time = int(parts[4])
250
+ if include_group_pauses:
251
+ response_time = int(parts[3]) - int(parts[2])
252
+ else:
253
+ response_time = int(parts[4])
250
254
  status = parts[5].strip()
251
255
  groups[group_name].append((response_time, status))
252
256
 
@@ -1656,8 +1656,7 @@ class ConfluenceAPIWrapper(NonCodeIndexerToolkit):
1656
1656
 
1657
1657
  @extend_with_parent_available_tools
1658
1658
  def get_available_tools(self):
1659
- # Confluence-specific tools
1660
- confluence_tools = [
1659
+ return [
1661
1660
  {
1662
1661
  "name": "create_page",
1663
1662
  "ref": self.create_page,
@@ -1772,9 +1771,4 @@ class ConfluenceAPIWrapper(NonCodeIndexerToolkit):
1772
1771
  "description": self.get_page_attachments.__doc__,
1773
1772
  "args_schema": GetPageAttachmentsInput,
1774
1773
  }
1775
- ]
1776
-
1777
- # Add standardized vector search tools from base class
1778
- vector_search_tools = self._get_vector_search_tools()
1779
-
1780
- return confluence_tools + vector_search_tools
1774
+ ]
@@ -39,10 +39,13 @@ BaseIndexParams = create_model(
39
39
  BaseCodeIndexParams = create_model(
40
40
  "BaseCodeIndexParams",
41
41
  collection_suffix=(str, Field(description="Suffix for collection name (max 7 characters) used to separate datasets", min_length=1, max_length=7)),
42
+ clean_index=(Optional[bool], Field(default=False, description="Optional flag to enforce clean existing index before indexing new data")),
43
+ progress_step=(Optional[int], Field(default=5, ge=0, le=100,
44
+ description="Optional step size for progress reporting during indexing")),
42
45
  branch=(Optional[str], Field(description="Branch to index files from. Defaults to active branch if None.", default=None)),
43
46
  whitelist=(Optional[List[str]], Field(description='File extensions or paths to include. Defaults to all files if None. Example: ["*.md", "*.java"]', default=None)),
44
47
  blacklist=(Optional[List[str]], Field(description='File extensions or paths to exclude. Defaults to no exclusions if None. Example: ["*.md", "*.java"]', default=None)),
45
- clean_index=(Optional[bool], Field(default=False, description="Optional flag to enforce clean existing index before indexing new data")),
48
+
46
49
  )
47
50
 
48
51
  RemoveIndexParams = create_model(
@@ -115,10 +118,10 @@ BaseStepbackSearchParams = create_model(
115
118
  BaseIndexDataParams = create_model(
116
119
  "indexData",
117
120
  __base__=BaseIndexParams,
118
- progress_step=(Optional[int], Field(default=5, ge=0, le=100,
119
- description="Optional step size for progress reporting during indexing")),
120
121
  clean_index=(Optional[bool], Field(default=False,
121
122
  description="Optional flag to enforce clean existing index before indexing new data")),
123
+ progress_step=(Optional[int], Field(default=5, ge=0, le=100,
124
+ description="Optional step size for progress reporting during indexing")),
122
125
  chunking_tool=(Literal[None,'markdown', 'statistical', 'proposal'], Field(description="Name of chunking tool", default=None)),
123
126
  chunking_config=(Optional[dict], Field(description="Chunking tool configuration", default_factory=dict)),
124
127
  )
@@ -17,7 +17,7 @@ from ..llm.img_utils import ImageDescriptionCache
17
17
  from ..non_code_indexer_toolkit import NonCodeIndexerToolkit
18
18
  from ..utils import is_cookie_token, parse_cookie_string
19
19
  from ..utils.available_tools_decorator import extend_with_parent_available_tools
20
- from ..utils.content_parser import file_extension_by_chunker
20
+ from ..utils.content_parser import file_extension_by_chunker, process_content_by_type
21
21
  from ...runtime.utils.utils import IndexerKeywords
22
22
 
23
23
  logger = logging.getLogger(__name__)
@@ -132,6 +132,13 @@ GetRemoteLinks = create_model(
132
132
  jira_issue_key=(str, Field(description="Jira issue key from which remote links will be extracted, e.g. TEST-1234"))
133
133
  )
134
134
 
135
+ GetIssueAttachments = create_model(
136
+ "GetIssueAttachments",
137
+ jira_issue_key=(str, Field(description="Jira issue key from which remote links will be extracted, e.g. TEST-1234")),
138
+ attachment_pattern=(Optional[str], Field(description="Regex pattern to filter attachment filenames. If not provided,"
139
+ " all attachments will be processed", default=None))
140
+ )
141
+
135
142
  ListCommentsInput = create_model(
136
143
  "ListCommentsInputModel",
137
144
  issue_key=(str, Field(description="The issue key of the Jira issue from which comments will be extracted, e.g. 'TEST-123'."))
@@ -725,19 +732,37 @@ class JiraApiWrapper(NonCodeIndexerToolkit):
725
732
  logger.error(f"Error listing Jira projects: {stacktrace}")
726
733
  return ToolException(f"Error listing Jira projects: {stacktrace}")
727
734
 
728
- def get_attachments_content(self, jira_issue_key: str):
729
- """ Extract content of all attachments related to specified Jira issue key.
730
- NOTE: only parsable attachments will be considered """
735
+ def get_attachments_content(self, jira_issue_key: str, attachment_pattern: Optional[str] = None):
736
+ """ Extract the content of all attachments related to a specified Jira issue key.
737
+ NOTE: only parsable attachments will be considered
738
+ Args:
739
+ jira_issue_key: The key of the Jira issue, e.g. "TEST-123
740
+ attachment_pattern: Optional regex pattern to filter attachments by filename.
741
+ If provided, only attachments with filenames matching this pattern will be processed.
742
+ Returns:
743
+ A string containing the content of all relevant attachments, separated by double newlines.
744
+ """
731
745
 
732
746
  attachment_data = []
733
747
  attachments = self._client.get_attachments_ids_from_issue(issue=jira_issue_key)
734
748
  for attachment in attachments:
749
+ if attachment_pattern and not re.search(attachment_pattern, attachment['filename']):
750
+ logger.info(f"Skipping attachment {attachment['filename']} as it does not match pattern {attachment_pattern}")
751
+ continue
752
+ logger.info(f"Processing attachment {attachment['filename']} with ID {attachment['attachment_id']}")
735
753
  if self.api_version == "3":
736
754
  attachment_data.append(self._client.get_attachment_content(attachment['attachment_id']))
737
755
  else:
738
- extracted_attachment = self._client.get_attachment(attachment_id=attachment['attachment_id'])
739
- if extracted_attachment['mimeType'] in SUPPORTED_ATTACHMENT_MIME_TYPES:
740
- attachment_data.append(self._extract_attachment_content(extracted_attachment))
756
+ try:
757
+ attachment_content = self._client.get_attachment_content(attachment['attachment_id'])
758
+ except Exception as e:
759
+ logger.error(
760
+ f"Failed to download attachment {attachment['filename']} for issue {jira_issue_key}: {str(e)}")
761
+ attachment_content = self._client.get(
762
+ path=f"secure/attachment/{attachment['attachment_id']}/{attachment['filename']}", not_json_response=True)
763
+ content_docs = process_content_by_type(attachment_content, attachment['filename'], llm=self.llm)
764
+ attachment_data.append("filename: " + attachment['filename'] + "\ncontent: " + str([doc.page_content for doc in content_docs]))
765
+
741
766
  return "\n\n".join(attachment_data)
742
767
 
743
768
  def execute_generic_rq(self, method: str, relative_url: str, params: Optional[str] = "", *args):
@@ -1632,7 +1657,7 @@ class JiraApiWrapper(NonCodeIndexerToolkit):
1632
1657
  {
1633
1658
  "name": "get_attachments_content",
1634
1659
  "description": self.get_attachments_content.__doc__,
1635
- "args_schema": GetRemoteLinks,
1660
+ "args_schema": GetIssueAttachments,
1636
1661
  "ref": self.get_attachments_content,
1637
1662
  },
1638
1663
  {
@@ -201,17 +201,40 @@ def load_content_from_bytes(file_content: bytes, extension: str = None, loader_e
201
201
  if temp_file_path and os.path.exists(temp_file_path):
202
202
  os.remove(temp_file_path)
203
203
 
204
- def process_content_by_type(document: Document, content, extension_source: str, llm = None, chunking_config=None) -> Generator[Document, None, None]:
204
+ def process_document_by_type(content, extension_source: str, document: Document = None, llm = None, chunking_config=None) \
205
+ -> Generator[Document, None, None]:
206
+ """Process the content of a file based on its type using a configured loader cosidering the origin document."""
207
+ try:
208
+ chunks = process_content_by_type(content, extension_source, llm, chunking_config)
209
+ except Exception as e:
210
+ msg = f"Error during content for file {extension_source}:\n{e}"
211
+ logger.warning(msg)
212
+ yield Document(
213
+ page_content=msg,
214
+ metadata={**document.metadata, 'chunk_id': 1}
215
+ )
216
+ return
217
+ for chunk in chunks:
218
+ yield Document(
219
+ page_content=sanitize_for_postgres(chunk.page_content),
220
+ metadata={**document.metadata, **chunk.metadata}
221
+ )
222
+
223
+
224
+ def process_content_by_type(content, filename: str, llm=None, chunking_config=None) -> \
225
+ Generator[Document, None, None]:
226
+ """Process the content of a file based on its type using a configured loader."""
205
227
  temp_file_path = None
206
228
  try:
207
- extension = "." + extension_source.split('.')[-1].lower()
229
+ extension = "." + filename.split('.')[-1].lower()
208
230
 
209
231
  with tempfile.NamedTemporaryFile(mode='w+b', suffix=extension, delete=False) as temp_file:
210
232
  temp_file_path = temp_file.name
211
233
  if content is None:
212
- logger.warning(f"'{IndexerKeywords.CONTENT_IN_BYTES.value}' ie expected but not found in document metadata.")
234
+ logger.warning(
235
+ f"'{IndexerKeywords.CONTENT_IN_BYTES.value}' ie expected but not found in document metadata.")
213
236
  return
214
-
237
+
215
238
  temp_file.write(content)
216
239
  temp_file.flush()
217
240
 
@@ -238,21 +261,7 @@ def process_content_by_type(document: Document, content, extension_source: str,
238
261
  loader_kwargs.pop(LoaderProperties.PROMPT_DEFAULT.value)
239
262
  loader_kwargs[LoaderProperties.PROMPT.value] = image_processing_prompt
240
263
  loader = loader_cls(file_path=temp_file_path, **loader_kwargs)
241
- try:
242
- chunks = loader.load()
243
- except Exception as e:
244
- msg = f"Error during content for file {temp_file_path}:\n{e}"
245
- logger.warning(msg)
246
- yield Document(
247
- page_content=msg,
248
- metadata={**document.metadata, 'chunk_id':1}
249
- )
250
- return
251
- for chunk in chunks:
252
- yield Document(
253
- page_content=sanitize_for_postgres(chunk.page_content),
254
- metadata={**document.metadata, **chunk.metadata}
255
- )
264
+ return loader.load()
256
265
  finally:
257
266
  if temp_file_path and os.path.exists(temp_file_path):
258
267
  os.remove(temp_file_path)
@@ -561,6 +561,8 @@ class XrayApiWrapper(NonCodeIndexerToolkit):
561
561
 
562
562
  def _index_tool_params(self, **kwargs) -> dict[str, tuple[type, Field]]:
563
563
  return {
564
+ 'chunking_tool': (Literal['json', ''],
565
+ Field(description="Name of chunking tool for base document", default='json')),
564
566
  'jql': (Optional[str], Field(description="""JQL query for searching test cases in Xray.
565
567
 
566
568
  Standard JQL query syntax for filtering Xray test cases. Examples:
@@ -595,8 +597,6 @@ class XrayApiWrapper(NonCodeIndexerToolkit):
595
597
  'skip_attachment_extensions': (Optional[List[str]], Field(
596
598
  description="List of file extensions to skip when processing attachments (e.g., ['.exe', '.zip', '.bin'])",
597
599
  default=None)),
598
- 'chunking_tool': (Literal['json', ''],
599
- Field(description="Name of chunking tool for base document", default='json')),
600
600
  }
601
601
 
602
602
  def _get_tests_direct(self, jql: str) -> List[Dict]:
@@ -155,8 +155,9 @@ class ZephyrApiWrapper(NonCodeIndexerToolkit):
155
155
  Returns a list of fields for index_data args schema.
156
156
  """
157
157
  return {
158
- "zql": (str, Field(description=zql_description, examples=["folder=\"TestToolkit\"", "name~\"TestToolkit5\""])),
159
- 'chunking_tool': (Literal['json', ''], Field(description="Name of chunking tool", default='json'))
158
+ 'chunking_tool': (Literal['json', ''], Field(description="Name of chunking tool", default='json')),
159
+ "zql": (str, Field(description=zql_description, examples=["folder=\"TestToolkit\"", "name~\"TestToolkit5\""]))
160
+
160
161
  }
161
162
 
162
163
  def _base_loader(self, zql: str, **kwargs) -> Generator[Document, None, None]:
@@ -1198,6 +1198,7 @@ class ZephyrScaleApiWrapper(NonCodeIndexerToolkit):
1198
1198
  Returns a list of fields for index_data args schema.
1199
1199
  """
1200
1200
  return {
1201
+ "chunking_tool": (Literal['json', ''], Field(description="Name of chunking tool", default='json')),
1201
1202
  "project_key": (str, Field(description="Jira project key filter")),
1202
1203
  "jql":(str, Field(description="""JQL-like query for searching test cases.
1203
1204
  Supported fields:
@@ -1216,7 +1217,6 @@ class ZephyrScaleApiWrapper(NonCodeIndexerToolkit):
1216
1217
  Example:
1217
1218
  'folder = "Authentication" AND label in ("Smoke", "Critical") AND text ~ "login" AND orderBy = "name" AND orderDirection = "ASC"'
1218
1219
  """)),
1219
- 'chunking_tool': (Literal['json', ''], Field(description="Name of chunking tool", default='json'))
1220
1220
  }
1221
1221
 
1222
1222
  def _base_loader(self, project_key: str, jql: str, **kwargs) -> Generator[Document, None, None]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.317
3
+ Version: 0.3.319
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -56,7 +56,7 @@ alita_sdk/runtime/langchain/document_loaders/AlitaCSVLoader.py,sha256=3ne-a5qIkB
56
56
  alita_sdk/runtime/langchain/document_loaders/AlitaConfluenceLoader.py,sha256=NzpoL4C7UzyzLouTSL_xTQw70MitNt-WZz3Eyl7QkTA,8294
57
57
  alita_sdk/runtime/langchain/document_loaders/AlitaDirectoryLoader.py,sha256=fKezkgvIcLG7S2PVJp1a8sZd6C4XQKNZKAFC87DbQts,7003
58
58
  alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py,sha256=9hi5eHgDIfa9wBWqTuwMM6D6W64czrDTfZl_htooe8Y,5943
59
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py,sha256=-c6pTWM_UvogBqrx8ndtB2uFQAk2O4-NU1frmsKLdro,3098
59
+ alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py,sha256=CKFL13TXCyqQa_fl6EmR6q9O9cT_w0tQzoQQFmfCpi8,3712
60
60
  alita_sdk/runtime/langchain/document_loaders/AlitaGitRepoLoader.py,sha256=5WXGcyHraSVj3ANHj_U6X4EDikoekrIYtS0Q_QqNIng,2608
61
61
  alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py,sha256=QwgBJE-BvOasjgT1hYHZc0MP0F_elirUjSzKixoM6fY,6610
62
62
  alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py,sha256=Nav2cgCQKOHQi_ZgYYn_iFdP_Os56KVlVR5nHGXecBc,3445
@@ -66,11 +66,11 @@ alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py,sha256=toXdQbT9Tu
66
66
  alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py,sha256=SKAAPo3DfMtRPxICKrPzlXXkC5RfaeiRj7lejLXTi7o,2337
67
67
  alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py,sha256=m_7aq-aCFVb4vXZsJNinfN1hAuyy_S0ylRknv_ahxDc,340
68
68
  alita_sdk/runtime/langchain/document_loaders/AlitaQtestLoader.py,sha256=CUVVnisxm7b5yZWV6rn0Q3MEEaO1GWNcfnz5yWz8T0k,13283
69
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py,sha256=o0SRFPZ-VskltgThVRX80rT19qtB4gPzxED9SENTNWo,4145
69
+ alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py,sha256=nI8lyndVZxVAxbjX3yiqyuFQKFE8MjLPyYSyqRWxHqQ,4077
70
70
  alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py,sha256=uNcV0En49_0u0RYB1sP1XfNspT2Xc5CacuJr9Jqv79Q,2972
71
71
  alita_sdk/runtime/langchain/document_loaders/ImageParser.py,sha256=gao5yCCKdDai_Gx7YdEx5U6oMyJYzn69eYmEvWLh-fc,656
72
72
  alita_sdk/runtime/langchain/document_loaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
73
- alita_sdk/runtime/langchain/document_loaders/constants.py,sha256=d7Yu39NFW1tnPzgQ-YoXrXO7R5o0ZBqSQe3ScwegAsw,7161
73
+ alita_sdk/runtime/langchain/document_loaders/constants.py,sha256=H5oKHDHZw1L0x9sGiYGLmkGM6dH460bvuj-EycmpR6E,7235
74
74
  alita_sdk/runtime/langchain/document_loaders/utils.py,sha256=9xghESf3axBbwxATyVuS0Yu-TWe8zWZnXgCD1ZVyNW0,2414
75
75
  alita_sdk/runtime/langchain/interfaces/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
76
76
  alita_sdk/runtime/langchain/interfaces/kwextractor.py,sha256=kSJA9L8g8UArmHu7Bd9dIO0Rrq86JPUb8RYNlnN68FQ,3072
@@ -121,7 +121,7 @@ alita_sdk/runtime/tools/prompt.py,sha256=nJafb_e5aOM1Rr3qGFCR-SKziU9uCsiP2okIMs9
121
121
  alita_sdk/runtime/tools/router.py,sha256=wCvZjVkdXK9dMMeEerrgKf5M790RudH68pDortnHSz0,1517
122
122
  alita_sdk/runtime/tools/tool.py,sha256=lE1hGi6qOAXG7qxtqxarD_XMQqTghdywf261DZawwno,5631
123
123
  alita_sdk/runtime/tools/vectorstore.py,sha256=UFBAJ_N2F6uB0xxIy1VMx581tHco-xDl7v2Hl6u0Xzw,34468
124
- alita_sdk/runtime/tools/vectorstore_base.py,sha256=FXgPONBfUTKo1bV6P4uZ1JqYHkC3Ch8toR38eavEYPQ,27787
124
+ alita_sdk/runtime/tools/vectorstore_base.py,sha256=qmidtzD2SFZdUDrmKNkUWnuBHqJDjPbQQJ_z1TqIl0g,27283
125
125
  alita_sdk/runtime/utils/AlitaCallback.py,sha256=E4LlSBuCHWiUq6W7IZExERHZY0qcmdjzc_rJlF2iQIw,7356
126
126
  alita_sdk/runtime/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
127
127
  alita_sdk/runtime/utils/constants.py,sha256=Xntx1b_uxUzT4clwqHA_U6K8y5bBqf_4lSQwXdcWrp4,13586
@@ -133,17 +133,17 @@ alita_sdk/runtime/utils/toolkit_runtime.py,sha256=MU63Fpxj0b5_r1IUUc0Q3-PN9VwL7r
133
133
  alita_sdk/runtime/utils/toolkit_utils.py,sha256=I9QFqnaqfVgN26LUr6s3XlBlG6y0CoHURnCzG7XcwVs,5311
134
134
  alita_sdk/runtime/utils/utils.py,sha256=VXNLsdeTmf6snn9EtUyobv4yL-xzLhUcH8P_ORMifYc,675
135
135
  alita_sdk/tools/__init__.py,sha256=jUj1ztC2FbkIUB-YYmiqaz_rqW7Il5kWzDPn1mJmj5w,10545
136
- alita_sdk/tools/base_indexer_toolkit.py,sha256=3mQcSkc1w07PpRBdBW6oEmEGC_qFhBHHbWQCOVxS2-E,20363
137
- alita_sdk/tools/elitea_base.py,sha256=3o59N8qcyguqakVIuPVgqIizvoURLQ7HOMPH8du2RZo,34558
136
+ alita_sdk/tools/base_indexer_toolkit.py,sha256=IKtnJVX27yPu8bBWgbl-5YfUQy4pJPnBoRBFLkqagoc,20228
137
+ alita_sdk/tools/elitea_base.py,sha256=up3HshASSDfjlHV_HPrs1aD4JIwwX0Ug26WGTzgIYvY,34724
138
138
  alita_sdk/tools/non_code_indexer_toolkit.py,sha256=v9uq1POE1fQKCd152mbqDtF-HSe0qoDj83k4E5LAkMI,1080
139
139
  alita_sdk/tools/ado/__init__.py,sha256=NnNYpNFW0_N_v1td_iekYOoQRRB7PIunbpT2f9ZFJM4,1201
140
140
  alita_sdk/tools/ado/utils.py,sha256=PTCludvaQmPLakF2EbCGy66Mro4-rjDtavVP-xcB2Wc,1252
141
141
  alita_sdk/tools/ado/repos/__init__.py,sha256=rR-c40Pw_WpQeOXtEuS-COvgRUs1_cTkcJfHlK09N88,5339
142
142
  alita_sdk/tools/ado/repos/repos_wrapper.py,sha256=y1wJZNhuoWaSkFsGU5Pct3LClc1xfgmgBy2u_dgBF-4,49769
143
143
  alita_sdk/tools/ado/test_plan/__init__.py,sha256=qANjEjxwEEs0aTarH9LaQ745Dv_6iRdXxMKP8RDoeGs,5344
144
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py,sha256=68m5lmThE8Hhi-5PpCf96lVQyLHN-_xc7pJrnBQbaqQ,21948
144
+ alita_sdk/tools/ado/test_plan/test_plan_wrapper.py,sha256=tdacv-myDDiMiEaWTpBuWde9-Zkphtva0IKHIbVf3FI,21950
145
145
  alita_sdk/tools/ado/wiki/__init__.py,sha256=ela6FOuT1fqN3FvHGBflzAh16HS1SSPsJYS2SldRX7A,5272
146
- alita_sdk/tools/ado/wiki/ado_wrapper.py,sha256=o5Zp0z9NvEZ5Ua1BgDZbFJocmZfoYNDiRRXaMrffGP4,14956
146
+ alita_sdk/tools/ado/wiki/ado_wrapper.py,sha256=dGzhJO2PlrIL74sMRQg9iNNnNAopngCT_rsgLxvKYUY,14958
147
147
  alita_sdk/tools/ado/work_item/__init__.py,sha256=jml_zSkdC7gdGIoX2ZqRgDb45nhT3ZWzNsZ0II0iVJI,5474
148
148
  alita_sdk/tools/ado/work_item/ado_wrapper.py,sha256=TXl3V46SgGafQaxQKSTD3AN4MoQ3yNuQBwgVZ6-JhSk,28315
149
149
  alita_sdk/tools/advanced_jira_mining/__init__.py,sha256=GdrFVsyG8h43BnQwBKUtZ_ca_0atP1rQ_0adkd9mssc,4703
@@ -170,13 +170,13 @@ alita_sdk/tools/browser/utils.py,sha256=zFbpsTw593TRqxZ8bu5RQ7PHzZTfZjxvH5IGgRRj
170
170
  alita_sdk/tools/browser/wiki.py,sha256=Qh3HBFd4dkS2VavXbFJOm4b8SjVSIe5xSD7CY1vEkKE,1126
171
171
  alita_sdk/tools/carrier/__init__.py,sha256=Ove5wAXBxyLS5F5ZxgydV2xKZJIR3OoMB5fMkn8jNUc,4296
172
172
  alita_sdk/tools/carrier/api_wrapper.py,sha256=tP7oR_U0HX1rxqat0Jkz6oh3RB9BEr1ESKQ9J8OWDcE,9093
173
- alita_sdk/tools/carrier/backend_reports_tool.py,sha256=8qnHQVCuUErW6eCH2nLF4bUl1AFuMWm2GQnKvOhfUCs,13452
173
+ alita_sdk/tools/carrier/backend_reports_tool.py,sha256=_lKCuXEzr6xVi83t37ZU8V-Xg-AojTXtPBBrPqtZAKg,13825
174
174
  alita_sdk/tools/carrier/backend_tests_tool.py,sha256=a6EivWZee8HVU2eXUM5NWS6oB3pt1-orxLz1YARrqHA,26572
175
175
  alita_sdk/tools/carrier/cancel_ui_test_tool.py,sha256=pD1sKEcZGBWJqFpgjeohMk93uuUPWruVJRPVVg90rpo,6438
176
176
  alita_sdk/tools/carrier/carrier_sdk.py,sha256=IJUbnxoJWzqgUpKobJFdNS-85KT35IYRSAz6I92kWJk,15161
177
177
  alita_sdk/tools/carrier/create_ui_excel_report_tool.py,sha256=8aSpkyIGXsOBTo8Ye_6p19v8OOl1y7QW47IJxZ6QDgM,20163
178
178
  alita_sdk/tools/carrier/create_ui_test_tool.py,sha256=knKvPOo9usI2XHqZtcbBEBzKwB9tS7GEl9KIX78vJiA,8184
179
- alita_sdk/tools/carrier/excel_reporter.py,sha256=fXptz7iaBDBcFSc8Ah8nZ9CSgugTONc5JMC1XcQEnfM,21487
179
+ alita_sdk/tools/carrier/excel_reporter.py,sha256=ID9_rX6mJhAwSi-me0iQOWfkhvTyUXM61fiM1kP_b0Q,21736
180
180
  alita_sdk/tools/carrier/lighthouse_excel_reporter.py,sha256=mVuU63tl2n-Gntx9RuedjEU0U5AP1APKsSx1DvJs7wk,6684
181
181
  alita_sdk/tools/carrier/run_ui_test_tool.py,sha256=Wqfxi_jyOU6XxYGsTje2ftgm8O7PJRXRDHUwWcw8opM,26277
182
182
  alita_sdk/tools/carrier/tickets_tool.py,sha256=d-wFyFWWTvV01o-hyssb2S-oLnr51b6tlNTUqA_CohY,8099
@@ -227,7 +227,7 @@ alita_sdk/tools/code/loaders/codesearcher.py,sha256=XoXXZtIQZhvjIwZlnl_4wVGHC-3s
227
227
  alita_sdk/tools/code/sonar/__init__.py,sha256=iPqj2PnUY4-btJjaDeWIPdn-c9L_uCr_qOoP_uwRoXw,3360
228
228
  alita_sdk/tools/code/sonar/api_wrapper.py,sha256=nNqxcWN_6W8c0ckj-Er9HkNuAdgQLoWBXh5UyzNutis,2653
229
229
  alita_sdk/tools/confluence/__init__.py,sha256=zRnPBM1c7VTRTS955HNc7AEGV5t8ACc2f9wBXmmeXao,6845
230
- alita_sdk/tools/confluence/api_wrapper.py,sha256=HIP9zZMne9BDSnwpHKwpl-rW0MWv9jeA6APll0DNS6w,87863
230
+ alita_sdk/tools/confluence/api_wrapper.py,sha256=4lXffj9i-rTDoa4CGPkA3yuc5vgztypnTVOWCNIBPoY,87634
231
231
  alita_sdk/tools/confluence/loader.py,sha256=4bf5qrJMEiJzuZp2NlxO2XObLD1w7fxss_WyMUpe8sg,9290
232
232
  alita_sdk/tools/confluence/utils.py,sha256=Lxo6dBD0OlvM4o0JuK6qeB_4LV9BptiwJA9e1vqNcDw,435
233
233
  alita_sdk/tools/custom_open_api/__init__.py,sha256=9aT5SPNPWcJC6jMZEM-3rUCXVULj_3-qJLQKmnreKNo,2537
@@ -260,7 +260,7 @@ alita_sdk/tools/google/bigquery/tool.py,sha256=Esf9Hsp8I0e7-5EdkFqQ-bid0cfrg-bfS
260
260
  alita_sdk/tools/google_places/__init__.py,sha256=QtmBCI0bHDK79u4hsCSWFcUihu-h4EmPSh9Yll7zz3w,3590
261
261
  alita_sdk/tools/google_places/api_wrapper.py,sha256=7nZly6nk4f4Tm7s2MVdnnwlb-1_WHRrDhyjDiqoyPjA,4674
262
262
  alita_sdk/tools/jira/__init__.py,sha256=G-9qnOYKFWM_adG0QFexh5-2pj_WaxIxxZanB3ARFqI,6339
263
- alita_sdk/tools/jira/api_wrapper.py,sha256=T_YW328n59IS1S4-_5seTizlQkCGi9fTGhW34AqM-sA,79184
263
+ alita_sdk/tools/jira/api_wrapper.py,sha256=kYd6edldpjaEQNKvJjVaPQt8A22K6f3DMZVJk6Tylsc,80916
264
264
  alita_sdk/tools/keycloak/__init__.py,sha256=0WB9yXMUUAHQRni1ghDEmd7GYa7aJPsTVlZgMCM9cQ0,3050
265
265
  alita_sdk/tools/keycloak/api_wrapper.py,sha256=cOGr0f3S3-c6tRDBWI8wMnetjoNSxiV5rvC_0VHb8uw,3100
266
266
  alita_sdk/tools/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -327,11 +327,11 @@ alita_sdk/tools/testrail/__init__.py,sha256=Xg4nVjULL_D8JpIXLYXppnwUfGF4-lguFwKH
327
327
  alita_sdk/tools/testrail/api_wrapper.py,sha256=PKhtf04C6PFDexGCAJm-hjA9Gpu4crx6EXKT5K-b_Pk,32985
328
328
  alita_sdk/tools/utils/__init__.py,sha256=155xepXPr4OEzs2Mz5YnjXcBpxSv1X2eznRUVoPtyK0,3268
329
329
  alita_sdk/tools/utils/available_tools_decorator.py,sha256=IbrdfeQkswxUFgvvN7-dyLMZMyXLiwvX7kgi3phciCk,273
330
- alita_sdk/tools/utils/content_parser.py,sha256=8cq-6dHYp-jEYU1Yt3P6rVedVgVaOfgnNIyQ2qU4Yrk,13722
330
+ alita_sdk/tools/utils/content_parser.py,sha256=Z6YQ__GONqRE0Ifhhhjc3A2SgYsxbTejBUNHw6MCdJA,14057
331
331
  alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256=ypBEAkFRGHv5edW0N9rdo1yKurNGQ4pRVEWtrN_7SeA,17656
332
332
  alita_sdk/tools/vector_adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
333
333
  alita_sdk/tools/xray/__init__.py,sha256=eOMWP8VamFbbJgt1xrGpGPqB9ByOTA0Cd3LCaETzGk4,4376
334
- alita_sdk/tools/xray/api_wrapper.py,sha256=6eHYsIgbuplO7hFOrcsHE6rdE8Mpk4smIF_irglvcKw,30157
334
+ alita_sdk/tools/xray/api_wrapper.py,sha256=uj5kzUgPdo_Oct9WCNMOpkb6o_3L7J4LZrEGtrwYMmc,30157
335
335
  alita_sdk/tools/yagmail/__init__.py,sha256=c4Qn3em0tLxzRmFKpzbBgY9W2EnOoKf0azoDJHng5CY,2208
336
336
  alita_sdk/tools/yagmail/yagmail_wrapper.py,sha256=SKoGVd1X4Ew3ad5tOdtPoY00M6jStNdT3q7GXEjQc5g,1952
337
337
  alita_sdk/tools/zephyr/Zephyr.py,sha256=ODZbg9Aw0H0Rbv-HcDXLI4KHbPiLDHoteDofshw9A_k,1508
@@ -339,18 +339,18 @@ alita_sdk/tools/zephyr/__init__.py,sha256=i5j-nHEZ5bRSTCM844rM8QAU2jLwhSYVQlJB3s
339
339
  alita_sdk/tools/zephyr/api_wrapper.py,sha256=lJCYPG03ej0qgdpLflnS7LFB4HSAfGzIvTjAJt07CQs,6244
340
340
  alita_sdk/tools/zephyr/rest_client.py,sha256=7vSD3oYIX-3KbAFed-mphSQif_VRuXrq5O07ryNQ7Pk,6208
341
341
  alita_sdk/tools/zephyr_enterprise/__init__.py,sha256=zmUZ7q3ACkHsOrfDO5UdbBPQ-0rWBK_Z9g8cmU1tpsk,4271
342
- alita_sdk/tools/zephyr_enterprise/api_wrapper.py,sha256=I0Ih3HBHV-C7lsI9YBTYWiLJ4r9A0bYBitFdX8Rdf0k,12116
342
+ alita_sdk/tools/zephyr_enterprise/api_wrapper.py,sha256=FSqW3FKqoOkp3wsCz9_4wLem0KAqbbmqIyIA8vh0Cp4,12117
343
343
  alita_sdk/tools/zephyr_enterprise/zephyr_enterprise.py,sha256=hV9LIrYfJT6oYp-ZfQR0YHflqBFPsUw2Oc55HwK0H48,6809
344
344
  alita_sdk/tools/zephyr_essential/__init__.py,sha256=osbquER9AHy4QBuFplyHfcIMoyjphyOdkhGy243oVso,4079
345
345
  alita_sdk/tools/zephyr_essential/api_wrapper.py,sha256=SMru8XGTiHFYRmeRC4A3v9YC4n8Ggb7PkhO3N_bOCHs,46876
346
346
  alita_sdk/tools/zephyr_essential/client.py,sha256=fX_n2pACNzDiHxry3F4Xc6baUdw7d9U2m4srbfBv5Eg,9882
347
347
  alita_sdk/tools/zephyr_scale/__init__.py,sha256=pV8Uo3HT3DsZXqAHZ_bVGTwmryekcRnJYxqBs2DaTUU,4388
348
- alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=A6CUEKjENt3mZlPU9lai88WV9esCDHuaoR_CtBupkDw,78618
348
+ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=kT0TbmMvuKhDUZc0i7KO18O38JM9SDU7-rUzYEPDuso,78619
349
349
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=0ne8XLJEQSLOWfzd2HdnqOYmQlUliKHbBED5kW_Vias,2895
350
350
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
351
351
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
352
- alita_sdk-0.3.317.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
353
- alita_sdk-0.3.317.dist-info/METADATA,sha256=LoQ5c7CeCcoUxpYNpw90MobeOL_ppKCaxYzMaZeh44s,18897
354
- alita_sdk-0.3.317.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
355
- alita_sdk-0.3.317.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
356
- alita_sdk-0.3.317.dist-info/RECORD,,
352
+ alita_sdk-0.3.319.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
353
+ alita_sdk-0.3.319.dist-info/METADATA,sha256=UnHRV2v-GuuFB5dxevPtsCNlvqenMZXte0wsRZakGjc,18897
354
+ alita_sdk-0.3.319.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
355
+ alita_sdk-0.3.319.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
356
+ alita_sdk-0.3.319.dist-info/RECORD,,