alita-sdk 0.3.317__py3-none-any.whl → 0.3.318__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,7 +10,6 @@ from pydantic import BaseModel, model_validator, Field
10
10
 
11
11
  from alita_sdk.tools.elitea_base import BaseToolApiWrapper
12
12
  from alita_sdk.tools.vector_adapters.VectorStoreAdapter import VectorStoreAdapterFactory
13
- from ..langchain.tools.vector import VectorAdapter
14
13
  from ..utils.logging import dispatch_custom_event
15
14
 
16
15
  logger = getLogger(__name__)
@@ -138,10 +137,7 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
138
137
  vectorstore_params: Optional[dict] = None
139
138
  max_docs_per_add: int = 100
140
139
  dataset: str = None
141
- embedding: Any = None
142
140
  vectorstore: Any = None
143
- # Review usage of old adapter
144
- vectoradapter: Any = None
145
141
  pg_helper: Any = None
146
142
  embeddings: Any = None
147
143
  # New adapter for vector database operations
@@ -152,17 +148,13 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
152
148
  def validate_toolkit(cls, values):
153
149
  from ..langchain.interfaces.llm_processor import get_vectorstore
154
150
  logger.debug(f"Validating toolkit: {values}")
155
- if 'vectorstore_params' in values:
156
- values["dataset"] = values.get('vectorstore_params').get('collection_name')
157
- if values.get('embedding_model'):
158
- values['embeddings'] = values['alita'].get_embeddings(values['embedding_model'])
151
+ values["dataset"] = values.get('collection_name')
152
+
153
+ if values.get('alita') and values.get('embedding_model'):
154
+ values['embeddings'] = values.get('alita').get_embeddings(values.get('embedding_model'))
155
+
159
156
  if values.get('vectorstore_type') and values.get('vectorstore_params') and values.get('embedding_model'):
160
157
  values['vectorstore'] = get_vectorstore(values['vectorstore_type'], values['vectorstore_params'], embedding_func=values['embeddings'])
161
- values['vectoradapter'] = VectorAdapter(
162
- vectorstore=values['vectorstore'],
163
- embeddings=values['embeddings'],
164
- quota_params=None,
165
- )
166
158
  # Initialize the new vector adapter
167
159
  values['vector_adapter'] = VectorStoreAdapterFactory.create_adapter(values['vectorstore_type'])
168
160
  logger.debug(f"Vectorstore wrapper initialized: {values}")
@@ -223,8 +215,6 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
223
215
  self._log_data("Cleaning index before re-indexing all documents. Previous index will be removed", tool_name="index_documents")
224
216
  try:
225
217
  self._clean_collection(collection_suffix)
226
- self.vectoradapter.persist()
227
- self.vectoradapter.vacuum()
228
218
  self._log_data("Previous index has been removed",
229
219
  tool_name="index_documents")
230
220
  except Exception as e:
@@ -238,7 +228,6 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
238
228
  logger.warning(f"Document is missing required metadata field 'id' or 'updated_on': {doc.metadata}")
239
229
 
240
230
  logger.debug(f"Indexing documents: {documents}")
241
- logger.debug(self.vectoradapter)
242
231
 
243
232
  # if collection_suffix is provided, add it to metadata of each document
244
233
  if collection_suffix:
@@ -5,7 +5,7 @@ from typing import Any, Optional, List, Literal, Dict, Generator
5
5
  from langchain_core.documents import Document
6
6
  from pydantic import create_model, Field, SecretStr
7
7
 
8
- from .utils.content_parser import file_extension_by_chunker, process_content_by_type
8
+ from .utils.content_parser import file_extension_by_chunker, process_document_by_type
9
9
  from .vector_adapters.VectorStoreAdapter import VectorStoreAdapterFactory
10
10
  from ..runtime.tools.vectorstore_base import VectorStoreWrapperBase
11
11
  from ..runtime.utils.utils import IndexerKeywords
@@ -102,7 +102,6 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
102
102
 
103
103
  connection_string: Optional[SecretStr] = None
104
104
  collection_name: Optional[str] = None
105
- _embedding: Optional[Any] = None
106
105
  alita: Any = None # Elitea client, if available
107
106
 
108
107
  def __init__(self, **kwargs):
@@ -116,7 +115,6 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
116
115
  if connection_string:
117
116
  # Initialize vectorstore params only if connection string is provided
118
117
  kwargs['vectorstore_params'] = VectorStoreAdapterFactory.create_adapter(vectorstore_type).get_vectorstore_params(collection_name, connection_string)
119
- kwargs['_embedding'] = kwargs.get('alita').get_embeddings(kwargs.get('embedding_model'))
120
118
  super().__init__(**kwargs)
121
119
 
122
120
  def _index_tool_params(self, **kwargs) -> dict[str, tuple[type, Field]]:
@@ -181,14 +179,14 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
181
179
 
182
180
  if chunking_config is None:
183
181
  chunking_config = {}
184
- chunking_config['embedding'] = self._embedding
182
+ chunking_config['embedding'] = self.embeddings
185
183
  chunking_config['llm'] = self.llm
186
184
 
187
185
  for document in documents:
188
186
  if content_type := document.metadata.get(IndexerKeywords.CONTENT_FILE_NAME.value, None):
189
187
  # apply parsing based on content type and chunk if chunker was applied to parent doc
190
188
  content = document.metadata.pop(IndexerKeywords.CONTENT_IN_BYTES.value, None)
191
- yield from process_content_by_type(
189
+ yield from process_document_by_type(
192
190
  document=document,
193
191
  content=content,
194
192
  extension_source=content_type, llm=self.llm, chunking_config=chunking_config)
@@ -199,7 +197,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
199
197
  continue
200
198
  # apply parsing based on content type resolved from chunking_tool
201
199
  content_type = file_extension_by_chunker(chunking_tool)
202
- yield from process_content_by_type(
200
+ yield from process_document_by_type(
203
201
  document=document,
204
202
  content=content_in_bytes,
205
203
  extension_source=content_type, llm=self.llm, chunking_config=chunking_config)
@@ -154,6 +154,7 @@ class CreateExcelReportTool(BaseTool):
154
154
  "tp_threshold": (int, Field(default=None, description="Throughput threshold")),
155
155
  "rt_threshold": (int, Field(default=None, description="Response time threshold")),
156
156
  "er_threshold": (int, Field(default=None, description="Error rate threshold")),
157
+ "include_group_pauses": (bool, Field(default=False, description="Include group pauses in Gatling Excel report")),
157
158
  }
158
159
  )
159
160
 
@@ -200,6 +201,7 @@ class CreateExcelReportTool(BaseTool):
200
201
  "tp_threshold": 10,
201
202
  "rt_threshold": 500,
202
203
  "er_threshold": 5,
204
+ "include_group_pauses": False,
203
205
  }
204
206
 
205
207
  def _request_parameter_confirmation(self, default_parameters):
@@ -217,7 +219,8 @@ class CreateExcelReportTool(BaseTool):
217
219
  excel_report_file_name = f'/tmp/reports_test_results_{report["build_id"]}_excel_report.xlsx'
218
220
  bucket_name = report["name"].replace("_", "").replace(" ", "").lower()
219
221
 
220
- result_stats_j = self._parse_report(test_log_file_path, lg_type, parameters["think_time"], is_absolute_file_path=True)
222
+ result_stats_j = self._parse_report(test_log_file_path, lg_type, parameters["think_time"],
223
+ parameters["include_group_pauses"], is_absolute_file_path=True)
221
224
  calc_thr_j = self._calculate_thresholds(result_stats_j, parameters)
222
225
 
223
226
  return self._generate_and_upload_report(
@@ -233,21 +236,22 @@ class CreateExcelReportTool(BaseTool):
233
236
  excel_report_file_name = f'{file_path}_{current_date}.xlsx'
234
237
  bucket_name = bucket
235
238
 
236
- result_stats_j = self._parse_report(file_path, lg_type, parameters["think_time"], is_absolute_file_path=True)
239
+ result_stats_j = self._parse_report(file_path, lg_type, parameters["think_time"],
240
+ parameters["include_group_pauses"], is_absolute_file_path=True)
237
241
  calc_thr_j = self._calculate_thresholds(result_stats_j, parameters)
238
242
 
239
243
  return self._generate_and_upload_report(
240
244
  result_stats_j, carrier_report, calc_thr_j, parameters, excel_report_file_name, bucket_name, file_path
241
245
  )
242
246
 
243
- def _parse_report(self, file_path, lg_type, think_time, is_absolute_file_path=False):
247
+ def _parse_report(self, file_path, lg_type, think_time, include_group_pauses, is_absolute_file_path=False):
244
248
  """Parse the report based on its type."""
245
249
  if lg_type == "gatling":
246
250
  if is_absolute_file_path:
247
251
  report_file = file_path
248
252
  else:
249
253
  report_file = get_latest_log_file(file_path, "simulation.log")
250
- parser = GatlingReportParser(report_file, think_time)
254
+ parser = GatlingReportParser(report_file, include_group_pauses, think_time)
251
255
  result_stats_j = parser.parse()
252
256
  result_stats_j["requests"].update(result_stats_j["groups"])
253
257
  elif lg_type == "jmeter":
@@ -118,9 +118,10 @@ class JMeterReportParser(PerformanceReportParser):
118
118
 
119
119
  class GatlingReportParser(PerformanceReportParser):
120
120
 
121
- def __init__(self, log_file: str, think_times="5,0-10,0"):
121
+ def __init__(self, log_file: str, include_group_pauses, think_times="5,0-10,0"):
122
122
  self.calculated_think_time = think_times
123
123
  self.log_file = log_file
124
+ self.include_group_pauses = include_group_pauses
124
125
 
125
126
  @staticmethod
126
127
  def convert_timestamp_to_datetime(timestamp: int) -> datetime:
@@ -210,7 +211,7 @@ class GatlingReportParser(PerformanceReportParser):
210
211
  ramp_end = self.convert_timestamp_to_datetime(int(line.split('\t')[3]))
211
212
 
212
213
  elif line.startswith('GROUP'):
213
- self.parse_group_line(groups, line)
214
+ self.parse_group_line(groups, line, self.include_group_pauses)
214
215
  except FileNotFoundError as e:
215
216
  print(f"File not found: {e}")
216
217
  raise
@@ -242,11 +243,14 @@ class GatlingReportParser(PerformanceReportParser):
242
243
  requests[request_name].append((response_time, status))
243
244
 
244
245
  @staticmethod
245
- def parse_group_line(groups, line):
246
+ def parse_group_line(groups, line, include_group_pauses):
246
247
  parts = line.split('\t')
247
248
  if len(parts) >= 6:
248
249
  group_name = parts[1]
249
- response_time = int(parts[4])
250
+ if include_group_pauses:
251
+ response_time = int(parts[3]) - int(parts[2])
252
+ else:
253
+ response_time = int(parts[4])
250
254
  status = parts[5].strip()
251
255
  groups[group_name].append((response_time, status))
252
256
 
@@ -17,7 +17,7 @@ from ..llm.img_utils import ImageDescriptionCache
17
17
  from ..non_code_indexer_toolkit import NonCodeIndexerToolkit
18
18
  from ..utils import is_cookie_token, parse_cookie_string
19
19
  from ..utils.available_tools_decorator import extend_with_parent_available_tools
20
- from ..utils.content_parser import file_extension_by_chunker
20
+ from ..utils.content_parser import file_extension_by_chunker, process_content_by_type
21
21
  from ...runtime.utils.utils import IndexerKeywords
22
22
 
23
23
  logger = logging.getLogger(__name__)
@@ -132,6 +132,13 @@ GetRemoteLinks = create_model(
132
132
  jira_issue_key=(str, Field(description="Jira issue key from which remote links will be extracted, e.g. TEST-1234"))
133
133
  )
134
134
 
135
+ GetIssueAttachments = create_model(
136
+ "GetIssueAttachments",
137
+ jira_issue_key=(str, Field(description="Jira issue key from which remote links will be extracted, e.g. TEST-1234")),
138
+ attachment_pattern=(Optional[str], Field(description="Regex pattern to filter attachment filenames. If not provided,"
139
+ " all attachments will be processed", default=None))
140
+ )
141
+
135
142
  ListCommentsInput = create_model(
136
143
  "ListCommentsInputModel",
137
144
  issue_key=(str, Field(description="The issue key of the Jira issue from which comments will be extracted, e.g. 'TEST-123'."))
@@ -725,19 +732,37 @@ class JiraApiWrapper(NonCodeIndexerToolkit):
725
732
  logger.error(f"Error listing Jira projects: {stacktrace}")
726
733
  return ToolException(f"Error listing Jira projects: {stacktrace}")
727
734
 
728
- def get_attachments_content(self, jira_issue_key: str):
729
- """ Extract content of all attachments related to specified Jira issue key.
730
- NOTE: only parsable attachments will be considered """
735
+ def get_attachments_content(self, jira_issue_key: str, attachment_pattern: Optional[str] = None):
736
+ """ Extract the content of all attachments related to a specified Jira issue key.
737
+ NOTE: only parsable attachments will be considered
738
+ Args:
739
+ jira_issue_key: The key of the Jira issue, e.g. "TEST-123
740
+ attachment_pattern: Optional regex pattern to filter attachments by filename.
741
+ If provided, only attachments with filenames matching this pattern will be processed.
742
+ Returns:
743
+ A string containing the content of all relevant attachments, separated by double newlines.
744
+ """
731
745
 
732
746
  attachment_data = []
733
747
  attachments = self._client.get_attachments_ids_from_issue(issue=jira_issue_key)
734
748
  for attachment in attachments:
749
+ if attachment_pattern and not re.search(attachment_pattern, attachment['filename']):
750
+ logger.info(f"Skipping attachment {attachment['filename']} as it does not match pattern {attachment_pattern}")
751
+ continue
752
+ logger.info(f"Processing attachment {attachment['filename']} with ID {attachment['attachment_id']}")
735
753
  if self.api_version == "3":
736
754
  attachment_data.append(self._client.get_attachment_content(attachment['attachment_id']))
737
755
  else:
738
- extracted_attachment = self._client.get_attachment(attachment_id=attachment['attachment_id'])
739
- if extracted_attachment['mimeType'] in SUPPORTED_ATTACHMENT_MIME_TYPES:
740
- attachment_data.append(self._extract_attachment_content(extracted_attachment))
756
+ try:
757
+ attachment_content = self._client.get_attachment_content(attachment['attachment_id'])
758
+ except Exception as e:
759
+ logger.error(
760
+ f"Failed to download attachment {attachment['filename']} for issue {jira_issue_key}: {str(e)}")
761
+ attachment_content = self._client.get(
762
+ path=f"secure/attachment/{attachment['attachment_id']}/{attachment['filename']}", not_json_response=True)
763
+ content_docs = process_content_by_type(attachment_content, attachment['filename'], llm=self.llm)
764
+ attachment_data.append("filename: " + attachment['filename'] + "\ncontent: " + str([doc.page_content for doc in content_docs]))
765
+
741
766
  return "\n\n".join(attachment_data)
742
767
 
743
768
  def execute_generic_rq(self, method: str, relative_url: str, params: Optional[str] = "", *args):
@@ -1632,7 +1657,7 @@ class JiraApiWrapper(NonCodeIndexerToolkit):
1632
1657
  {
1633
1658
  "name": "get_attachments_content",
1634
1659
  "description": self.get_attachments_content.__doc__,
1635
- "args_schema": GetRemoteLinks,
1660
+ "args_schema": GetIssueAttachments,
1636
1661
  "ref": self.get_attachments_content,
1637
1662
  },
1638
1663
  {
@@ -201,17 +201,40 @@ def load_content_from_bytes(file_content: bytes, extension: str = None, loader_e
201
201
  if temp_file_path and os.path.exists(temp_file_path):
202
202
  os.remove(temp_file_path)
203
203
 
204
- def process_content_by_type(document: Document, content, extension_source: str, llm = None, chunking_config=None) -> Generator[Document, None, None]:
204
+ def process_document_by_type(content, extension_source: str, document: Document = None, llm = None, chunking_config=None) \
205
+ -> Generator[Document, None, None]:
206
+ """Process the content of a file based on its type using a configured loader cosidering the origin document."""
207
+ try:
208
+ chunks = process_content_by_type(content, extension_source, llm, chunking_config)
209
+ except Exception as e:
210
+ msg = f"Error during content for file {extension_source}:\n{e}"
211
+ logger.warning(msg)
212
+ yield Document(
213
+ page_content=msg,
214
+ metadata={**document.metadata, 'chunk_id': 1}
215
+ )
216
+ return
217
+ for chunk in chunks:
218
+ yield Document(
219
+ page_content=sanitize_for_postgres(chunk.page_content),
220
+ metadata={**document.metadata, **chunk.metadata}
221
+ )
222
+
223
+
224
+ def process_content_by_type(content, filename: str, llm=None, chunking_config=None) -> \
225
+ Generator[Document, None, None]:
226
+ """Process the content of a file based on its type using a configured loader."""
205
227
  temp_file_path = None
206
228
  try:
207
- extension = "." + extension_source.split('.')[-1].lower()
229
+ extension = "." + filename.split('.')[-1].lower()
208
230
 
209
231
  with tempfile.NamedTemporaryFile(mode='w+b', suffix=extension, delete=False) as temp_file:
210
232
  temp_file_path = temp_file.name
211
233
  if content is None:
212
- logger.warning(f"'{IndexerKeywords.CONTENT_IN_BYTES.value}' ie expected but not found in document metadata.")
234
+ logger.warning(
235
+ f"'{IndexerKeywords.CONTENT_IN_BYTES.value}' ie expected but not found in document metadata.")
213
236
  return
214
-
237
+
215
238
  temp_file.write(content)
216
239
  temp_file.flush()
217
240
 
@@ -238,21 +261,7 @@ def process_content_by_type(document: Document, content, extension_source: str,
238
261
  loader_kwargs.pop(LoaderProperties.PROMPT_DEFAULT.value)
239
262
  loader_kwargs[LoaderProperties.PROMPT.value] = image_processing_prompt
240
263
  loader = loader_cls(file_path=temp_file_path, **loader_kwargs)
241
- try:
242
- chunks = loader.load()
243
- except Exception as e:
244
- msg = f"Error during content for file {temp_file_path}:\n{e}"
245
- logger.warning(msg)
246
- yield Document(
247
- page_content=msg,
248
- metadata={**document.metadata, 'chunk_id':1}
249
- )
250
- return
251
- for chunk in chunks:
252
- yield Document(
253
- page_content=sanitize_for_postgres(chunk.page_content),
254
- metadata={**document.metadata, **chunk.metadata}
255
- )
264
+ return loader.load()
256
265
  finally:
257
266
  if temp_file_path and os.path.exists(temp_file_path):
258
267
  os.remove(temp_file_path)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.317
3
+ Version: 0.3.318
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -121,7 +121,7 @@ alita_sdk/runtime/tools/prompt.py,sha256=nJafb_e5aOM1Rr3qGFCR-SKziU9uCsiP2okIMs9
121
121
  alita_sdk/runtime/tools/router.py,sha256=wCvZjVkdXK9dMMeEerrgKf5M790RudH68pDortnHSz0,1517
122
122
  alita_sdk/runtime/tools/tool.py,sha256=lE1hGi6qOAXG7qxtqxarD_XMQqTghdywf261DZawwno,5631
123
123
  alita_sdk/runtime/tools/vectorstore.py,sha256=UFBAJ_N2F6uB0xxIy1VMx581tHco-xDl7v2Hl6u0Xzw,34468
124
- alita_sdk/runtime/tools/vectorstore_base.py,sha256=FXgPONBfUTKo1bV6P4uZ1JqYHkC3Ch8toR38eavEYPQ,27787
124
+ alita_sdk/runtime/tools/vectorstore_base.py,sha256=qmidtzD2SFZdUDrmKNkUWnuBHqJDjPbQQJ_z1TqIl0g,27283
125
125
  alita_sdk/runtime/utils/AlitaCallback.py,sha256=E4LlSBuCHWiUq6W7IZExERHZY0qcmdjzc_rJlF2iQIw,7356
126
126
  alita_sdk/runtime/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
127
127
  alita_sdk/runtime/utils/constants.py,sha256=Xntx1b_uxUzT4clwqHA_U6K8y5bBqf_4lSQwXdcWrp4,13586
@@ -133,7 +133,7 @@ alita_sdk/runtime/utils/toolkit_runtime.py,sha256=MU63Fpxj0b5_r1IUUc0Q3-PN9VwL7r
133
133
  alita_sdk/runtime/utils/toolkit_utils.py,sha256=I9QFqnaqfVgN26LUr6s3XlBlG6y0CoHURnCzG7XcwVs,5311
134
134
  alita_sdk/runtime/utils/utils.py,sha256=VXNLsdeTmf6snn9EtUyobv4yL-xzLhUcH8P_ORMifYc,675
135
135
  alita_sdk/tools/__init__.py,sha256=jUj1ztC2FbkIUB-YYmiqaz_rqW7Il5kWzDPn1mJmj5w,10545
136
- alita_sdk/tools/base_indexer_toolkit.py,sha256=3mQcSkc1w07PpRBdBW6oEmEGC_qFhBHHbWQCOVxS2-E,20363
136
+ alita_sdk/tools/base_indexer_toolkit.py,sha256=zEOLb9vYmqyhj_A1sAaoQidlBS01MEA5gKUVcinhHtc,20228
137
137
  alita_sdk/tools/elitea_base.py,sha256=3o59N8qcyguqakVIuPVgqIizvoURLQ7HOMPH8du2RZo,34558
138
138
  alita_sdk/tools/non_code_indexer_toolkit.py,sha256=v9uq1POE1fQKCd152mbqDtF-HSe0qoDj83k4E5LAkMI,1080
139
139
  alita_sdk/tools/ado/__init__.py,sha256=NnNYpNFW0_N_v1td_iekYOoQRRB7PIunbpT2f9ZFJM4,1201
@@ -170,13 +170,13 @@ alita_sdk/tools/browser/utils.py,sha256=zFbpsTw593TRqxZ8bu5RQ7PHzZTfZjxvH5IGgRRj
170
170
  alita_sdk/tools/browser/wiki.py,sha256=Qh3HBFd4dkS2VavXbFJOm4b8SjVSIe5xSD7CY1vEkKE,1126
171
171
  alita_sdk/tools/carrier/__init__.py,sha256=Ove5wAXBxyLS5F5ZxgydV2xKZJIR3OoMB5fMkn8jNUc,4296
172
172
  alita_sdk/tools/carrier/api_wrapper.py,sha256=tP7oR_U0HX1rxqat0Jkz6oh3RB9BEr1ESKQ9J8OWDcE,9093
173
- alita_sdk/tools/carrier/backend_reports_tool.py,sha256=8qnHQVCuUErW6eCH2nLF4bUl1AFuMWm2GQnKvOhfUCs,13452
173
+ alita_sdk/tools/carrier/backend_reports_tool.py,sha256=_lKCuXEzr6xVi83t37ZU8V-Xg-AojTXtPBBrPqtZAKg,13825
174
174
  alita_sdk/tools/carrier/backend_tests_tool.py,sha256=a6EivWZee8HVU2eXUM5NWS6oB3pt1-orxLz1YARrqHA,26572
175
175
  alita_sdk/tools/carrier/cancel_ui_test_tool.py,sha256=pD1sKEcZGBWJqFpgjeohMk93uuUPWruVJRPVVg90rpo,6438
176
176
  alita_sdk/tools/carrier/carrier_sdk.py,sha256=IJUbnxoJWzqgUpKobJFdNS-85KT35IYRSAz6I92kWJk,15161
177
177
  alita_sdk/tools/carrier/create_ui_excel_report_tool.py,sha256=8aSpkyIGXsOBTo8Ye_6p19v8OOl1y7QW47IJxZ6QDgM,20163
178
178
  alita_sdk/tools/carrier/create_ui_test_tool.py,sha256=knKvPOo9usI2XHqZtcbBEBzKwB9tS7GEl9KIX78vJiA,8184
179
- alita_sdk/tools/carrier/excel_reporter.py,sha256=fXptz7iaBDBcFSc8Ah8nZ9CSgugTONc5JMC1XcQEnfM,21487
179
+ alita_sdk/tools/carrier/excel_reporter.py,sha256=ID9_rX6mJhAwSi-me0iQOWfkhvTyUXM61fiM1kP_b0Q,21736
180
180
  alita_sdk/tools/carrier/lighthouse_excel_reporter.py,sha256=mVuU63tl2n-Gntx9RuedjEU0U5AP1APKsSx1DvJs7wk,6684
181
181
  alita_sdk/tools/carrier/run_ui_test_tool.py,sha256=Wqfxi_jyOU6XxYGsTje2ftgm8O7PJRXRDHUwWcw8opM,26277
182
182
  alita_sdk/tools/carrier/tickets_tool.py,sha256=d-wFyFWWTvV01o-hyssb2S-oLnr51b6tlNTUqA_CohY,8099
@@ -260,7 +260,7 @@ alita_sdk/tools/google/bigquery/tool.py,sha256=Esf9Hsp8I0e7-5EdkFqQ-bid0cfrg-bfS
260
260
  alita_sdk/tools/google_places/__init__.py,sha256=QtmBCI0bHDK79u4hsCSWFcUihu-h4EmPSh9Yll7zz3w,3590
261
261
  alita_sdk/tools/google_places/api_wrapper.py,sha256=7nZly6nk4f4Tm7s2MVdnnwlb-1_WHRrDhyjDiqoyPjA,4674
262
262
  alita_sdk/tools/jira/__init__.py,sha256=G-9qnOYKFWM_adG0QFexh5-2pj_WaxIxxZanB3ARFqI,6339
263
- alita_sdk/tools/jira/api_wrapper.py,sha256=T_YW328n59IS1S4-_5seTizlQkCGi9fTGhW34AqM-sA,79184
263
+ alita_sdk/tools/jira/api_wrapper.py,sha256=kYd6edldpjaEQNKvJjVaPQt8A22K6f3DMZVJk6Tylsc,80916
264
264
  alita_sdk/tools/keycloak/__init__.py,sha256=0WB9yXMUUAHQRni1ghDEmd7GYa7aJPsTVlZgMCM9cQ0,3050
265
265
  alita_sdk/tools/keycloak/api_wrapper.py,sha256=cOGr0f3S3-c6tRDBWI8wMnetjoNSxiV5rvC_0VHb8uw,3100
266
266
  alita_sdk/tools/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -327,7 +327,7 @@ alita_sdk/tools/testrail/__init__.py,sha256=Xg4nVjULL_D8JpIXLYXppnwUfGF4-lguFwKH
327
327
  alita_sdk/tools/testrail/api_wrapper.py,sha256=PKhtf04C6PFDexGCAJm-hjA9Gpu4crx6EXKT5K-b_Pk,32985
328
328
  alita_sdk/tools/utils/__init__.py,sha256=155xepXPr4OEzs2Mz5YnjXcBpxSv1X2eznRUVoPtyK0,3268
329
329
  alita_sdk/tools/utils/available_tools_decorator.py,sha256=IbrdfeQkswxUFgvvN7-dyLMZMyXLiwvX7kgi3phciCk,273
330
- alita_sdk/tools/utils/content_parser.py,sha256=8cq-6dHYp-jEYU1Yt3P6rVedVgVaOfgnNIyQ2qU4Yrk,13722
330
+ alita_sdk/tools/utils/content_parser.py,sha256=Z6YQ__GONqRE0Ifhhhjc3A2SgYsxbTejBUNHw6MCdJA,14057
331
331
  alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256=ypBEAkFRGHv5edW0N9rdo1yKurNGQ4pRVEWtrN_7SeA,17656
332
332
  alita_sdk/tools/vector_adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
333
333
  alita_sdk/tools/xray/__init__.py,sha256=eOMWP8VamFbbJgt1xrGpGPqB9ByOTA0Cd3LCaETzGk4,4376
@@ -349,8 +349,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=A6CUEKjENt3mZlPU9lai88WV9esCD
349
349
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=0ne8XLJEQSLOWfzd2HdnqOYmQlUliKHbBED5kW_Vias,2895
350
350
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
351
351
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
352
- alita_sdk-0.3.317.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
353
- alita_sdk-0.3.317.dist-info/METADATA,sha256=LoQ5c7CeCcoUxpYNpw90MobeOL_ppKCaxYzMaZeh44s,18897
354
- alita_sdk-0.3.317.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
355
- alita_sdk-0.3.317.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
356
- alita_sdk-0.3.317.dist-info/RECORD,,
352
+ alita_sdk-0.3.318.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
353
+ alita_sdk-0.3.318.dist-info/METADATA,sha256=l6LkxFBLT4xkWB6VFzgmCUVIDdHYa5WdAZG0seKOFsM,18897
354
+ alita_sdk-0.3.318.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
355
+ alita_sdk-0.3.318.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
356
+ alita_sdk-0.3.318.dist-info/RECORD,,