alita-sdk 0.3.254__py3-none-any.whl → 0.3.255__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,6 +27,6 @@ class SlackConfiguration(BaseModel):
27
27
  }
28
28
  }
29
29
  )
30
- name: Optional[SecretStr] = Field(description="Slack Bot Token")
30
+ name: Optional[str] = Field(description="Slack name")
31
31
  slack_token: Optional[SecretStr] = Field(description="Slack Token like XOXB-*****-*****-*****-*****")
32
32
  channel_id:Optional[str] = Field(description="Channel ID")
@@ -9,6 +9,7 @@ from langchain_core.documents import Document
9
9
  from mammoth import convert_to_html
10
10
  from markdownify import markdownify
11
11
 
12
+ from alita_sdk.tools.chunkers.sematic.markdown_chunker import markdown_by_headers_chunker
12
13
  from .utils import perform_llm_prediction_for_image_bytes
13
14
 
14
15
 
@@ -35,6 +36,7 @@ class AlitaDocxMammothLoader(BaseLoader):
35
36
  self.extract_images = kwargs.get('extract_images')
36
37
  self.llm = kwargs.get("llm")
37
38
  self.prompt = kwargs.get("prompt")
39
+ self.max_tokens = kwargs.get('max_tokens', 512)
38
40
 
39
41
  def __handle_image(self, image) -> dict:
40
42
  """
@@ -100,11 +102,11 @@ class AlitaDocxMammothLoader(BaseLoader):
100
102
  Loads and converts the Docx file to markdown format.
101
103
 
102
104
  Returns:
103
- List[Document]: A list containing a single Document with the markdown content
105
+ List[Document]: A list containing a Documents with the markdown content
104
106
  and metadata including the source file path.
105
107
  """
106
108
  result_content = self.get_content()
107
- return [Document(page_content=result_content, metadata={'source': str(self.path)})]
109
+ return list(markdown_by_headers_chunker(iter([Document(page_content=result_content, metadata={'source': str(self.path)})]), config={'max_tokens':self.max_tokens}))
108
110
 
109
111
  def get_content(self):
110
112
  """
@@ -123,7 +123,9 @@ loaders_map = {
123
123
  '.docx': {
124
124
  'class': AlitaDocxMammothLoader,
125
125
  'is_multimodal_processing': True,
126
- 'kwargs': {}
126
+ 'kwargs': {
127
+ 'extract_images': True
128
+ }
127
129
  },
128
130
  '.doc': {
129
131
  'class': AlitaTextLoader,
@@ -336,6 +336,7 @@ class VectorStoreWrapper(BaseToolApiWrapper):
336
336
 
337
337
  from ..langchain.interfaces.llm_processor import add_documents
338
338
 
339
+ self._log_tool_event(message=f"Starting the indexing... Parameters: {collection_suffix=}, {clean_index=}, {is_code}", tool_name="index_documents")
339
340
  # pre-process documents if needed (find duplicates, etc.)
340
341
  if clean_index:
341
342
  logger.info("Cleaning index before re-indexing all documents.")
@@ -351,9 +352,15 @@ class VectorStoreWrapper(BaseToolApiWrapper):
351
352
  if isinstance(documents, types.GeneratorType):
352
353
  documents = list(documents)
353
354
  else:
355
+ self._log_tool_event(
356
+ message="Filter for duplicates",
357
+ tool_name="index_documents")
354
358
  # remove duplicates based on metadata 'id' and 'updated_on' or 'commit_hash' fields
355
359
  documents = self._reduce_code_duplicates(documents, collection_suffix) if is_code \
356
360
  else self._reduce_non_code_duplicates(documents, collection_suffix)
361
+ self._log_tool_event(
362
+ message="All the duplicates were filtered out. Proceeding with indexing.",
363
+ tool_name="index_documents")
357
364
 
358
365
  if not documents or len(documents) == 0:
359
366
  logger.info("No new documents to index after duplicate check.")
@@ -362,8 +369,8 @@ class VectorStoreWrapper(BaseToolApiWrapper):
362
369
  # if func is provided, apply it to documents
363
370
  # used for processing of documents before indexing,
364
371
  # e.g. to avoid time-consuming operations for documents that are already indexed
372
+ self._log_tool_event(message=f"Processing the dependent documents (attachments, etc.)", tool_name="index_documents")
365
373
  dependent_docs_generator = self.process_document_func(documents) if self.process_document_func else []
366
-
367
374
  # notify user about missed required metadata fields: id, updated_on
368
375
  # it is not required to have them, but it is recommended to have them for proper re-indexing and duplicate detection
369
376
  for doc in documents:
@@ -375,6 +382,9 @@ class VectorStoreWrapper(BaseToolApiWrapper):
375
382
 
376
383
  documents = documents + list(dependent_docs_generator)
377
384
 
385
+ self._log_tool_event(message=f"Documents for indexing were processed. Total documents: {len(documents)}",
386
+ tool_name="index_documents")
387
+
378
388
  # if collection_suffix is provided, add it to metadata of each document
379
389
  if collection_suffix:
380
390
  for doc in documents:
@@ -386,7 +396,8 @@ class VectorStoreWrapper(BaseToolApiWrapper):
386
396
  total_docs = len(documents)
387
397
  documents_count = 0
388
398
  _documents = []
389
-
399
+ self._log_tool_event(message=f"Starting the indexing of processed documents. Total documents: {len(documents)}",
400
+ tool_name="index_documents")
390
401
  # set default progress step to 20 if out of 0...100 or None
391
402
  progress_step = 20 if progress_step not in range(0, 100) else progress_step
392
403
  next_progress_point = progress_step
@@ -26,7 +26,7 @@ class AzureDevOpsWorkItemsToolkit(BaseToolkit):
26
26
  'toolkit_name': True,
27
27
  'max_toolkit_length': AzureDevOpsWorkItemsToolkit.toolkit_max_length})
28
28
  ),
29
- ado_configuration=(AdoConfiguration, Field(description="Ado Work Item configuration", json_schema_extra={'configuration_types': ['ado_work_item']})),
29
+ ado_configuration=(AdoConfiguration, Field(description="Ado Work Item configuration", json_schema_extra={'configuration_types': ['ado']})),
30
30
  limit=(Optional[int], Field(description="ADO plans limit used for limitation of the list with results", default=5)),
31
31
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
32
32
  # indexer settings
@@ -1,7 +1,6 @@
1
1
  from typing import Generator
2
- from langchain.schema import Document
3
2
  from langchain_core.documents import Document
4
- from langchain_text_splitters import MarkdownHeaderTextSplitter
3
+ from langchain_text_splitters import MarkdownHeaderTextSplitter, ExperimentalMarkdownSyntaxTextSplitter
5
4
  from langchain.text_splitter import TokenTextSplitter
6
5
  from ..utils import tiktoken_length
7
6
  from copy import deepcopy as copy
@@ -50,4 +49,32 @@ def markdown_chunker(file_content_generator: Generator[Document, None, None], co
50
49
  yield Document(
51
50
  page_content=chunk.page_content,
52
51
  metadata=docmeta
53
- )
52
+ )
53
+
54
+
55
+ def markdown_by_headers_chunker(file_content_generator: Generator[Document, None, None], config: dict, *args, **kwargs) -> Generator[Document, None, None]:
56
+ strip_header = config.get("strip_header", False)
57
+ return_each_line = config.get("return_each_line", False)
58
+ headers_to_split_on = config.get("headers_to_split_on", [])
59
+ headers_to_split_on = [header.split(' ', 1) for header in headers_to_split_on]
60
+ for doc in file_content_generator:
61
+ doc_metadata = doc.metadata
62
+ doc_content = doc.page_content
63
+ chunk_id = 0
64
+ markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(
65
+ headers_to_split_on=headers_to_split_on,
66
+ strip_headers=strip_header,
67
+ return_each_line=return_each_line
68
+ )
69
+ md_header_splits = markdown_splitter.split_text(doc_content)
70
+ for chunk in md_header_splits:
71
+ chunk_id += 1
72
+ headers_meta = list(chunk.metadata.values())
73
+ docmeta = copy(doc_metadata)
74
+ docmeta.update({"headers": "; ".join(headers_meta)})
75
+ docmeta['chunk_id'] = chunk_id
76
+ docmeta['chunk_type'] = "document"
77
+ yield Document(
78
+ page_content=chunk.page_content,
79
+ metadata=docmeta
80
+ )
@@ -110,7 +110,7 @@ BaseStepbackSearchParams = create_model(
110
110
  BaseIndexDataParams = create_model(
111
111
  "indexData",
112
112
  __base__=BaseIndexParams,
113
- progress_step=(Optional[int], Field(default=10, ge=0, le=100,
113
+ progress_step=(Optional[int], Field(default=5, ge=0, le=100,
114
114
  description="Optional step size for progress reporting during indexing")),
115
115
  clean_index=(Optional[bool], Field(default=False,
116
116
  description="Optional flag to enforce clean existing index before indexing new data")),
@@ -132,6 +132,8 @@ class BaseToolApiWrapper(BaseModel):
132
132
 
133
133
  if tool_name is None:
134
134
  tool_name = 'tool_progress'
135
+
136
+ logger.info(message)
135
137
  dispatch_custom_event(
136
138
  name="tool_execution_step",
137
139
  data={
@@ -334,7 +336,13 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
334
336
  Returns:
335
337
  Generator[Document, None, None]: A generator yielding processed documents with metadata.
336
338
  """
337
- for doc in documents:
339
+ total_docs = len(documents)
340
+ self._log_tool_event(
341
+ message=f"Preparing a base documents for indexing. Total documents: {total_docs}",
342
+ tool_name="_process_documents"
343
+ )
344
+ processed_count = 0
345
+ for idx, doc in enumerate(documents, 1):
338
346
  # Filter documents to process only those that either:
339
347
  # - do not have a 'chunk_id' in their metadata, or
340
348
  # - have 'chunk_id' explicitly set to 1.
@@ -346,10 +354,19 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
346
354
  for processed_doc in processed_docs:
347
355
  # map processed document (child) to the original document (parent)
348
356
  processed_doc.metadata[IndexerKeywords.PARENT.value] = doc.metadata.get('id', None)
349
- if chunker:=self._get_dependencies_chunker(processed_doc):
350
- yield from chunker(file_content_generator=iter([processed_doc]), config=self._get_dependencies_chunker_config())
357
+ if chunker := self._get_dependencies_chunker(processed_doc):
358
+ yield from chunker(
359
+ file_content_generator=iter([processed_doc]),
360
+ config=self._get_dependencies_chunker_config()
361
+ )
351
362
  else:
352
363
  yield processed_doc
364
+ processed_count += 1
365
+ if processed_count % 5 == 0 or processed_count == total_docs:
366
+ self._log_tool_event(
367
+ message=f"Prepared {processed_count} out of {total_docs} documents for indexing.",
368
+ tool_name="_process_documents"
369
+ )
353
370
 
354
371
 
355
372
  # TODO: init store once and re-use the instance
@@ -583,7 +600,7 @@ class BaseCodeToolApiWrapper(BaseVectorStoreToolApiWrapper):
583
600
  from .chunkers.code.codeparser import parse_code_files_for_db
584
601
 
585
602
  _files = self.__handle_get_files("", branch or self.active_branch or self._active_branch)
586
-
603
+ self._log_tool_event(message="Listing files in branch", tool_name="loader")
587
604
  logger.info(f"Files in branch: {_files}")
588
605
 
589
606
  def is_whitelisted(file_path: str) -> bool:
@@ -599,11 +616,22 @@ class BaseCodeToolApiWrapper(BaseVectorStoreToolApiWrapper):
599
616
  return False
600
617
 
601
618
  def file_content_generator():
602
- for file in _files:
619
+ self._log_tool_event(message="Reading the files", tool_name="loader")
620
+ # log the progress of file reading
621
+ total_files = len(_files)
622
+ for idx, file in enumerate(_files, 1):
603
623
  if is_whitelisted(file) and not is_blacklisted(file):
624
+ # read file ONLY if it matches whitelist and does not match blacklist
625
+ file_content = self._read_file(file, branch=branch or self.active_branch or self._active_branch)
626
+ # hash the file content to ensure uniqueness
627
+ import hashlib
628
+ file_hash = hashlib.sha256(file_content.encode("utf-8")).hexdigest()
604
629
  yield {"file_name": file,
605
- "file_content": self._read_file(file, branch=branch or self.active_branch or self._active_branch),
606
- "commit_hash": self._file_commit_hash(file, branch=branch or self.active_branch or self._active_branch)}
630
+ "file_content": file_content,
631
+ "commit_hash": file_hash}
632
+ if idx % 10 == 0 or idx == total_files:
633
+ self._log_tool_event(message=f"{idx} out of {total_files} files have been read", tool_name="loader")
634
+ self._log_tool_event(message=f"{len(_files)} have been read", tool_name="loader")
607
635
 
608
636
  return parse_code_files_for_db(file_content_generator())
609
637
 
@@ -621,7 +649,9 @@ class BaseCodeToolApiWrapper(BaseVectorStoreToolApiWrapper):
621
649
  blacklist=blacklist
622
650
  )
623
651
  vectorstore = self._init_vector_store()
624
- return vectorstore.index_documents(documents, collection_suffix=collection_suffix, clean_index=False, is_code=True)
652
+ clean_index = kwargs.get('clean_index', False)
653
+ return vectorstore.index_documents(documents, collection_suffix=collection_suffix,
654
+ clean_index=clean_index, is_code=True)
625
655
 
626
656
  def _get_vector_search_tools(self):
627
657
  """
@@ -1,16 +1,13 @@
1
1
  import os
2
2
  import tempfile
3
- from copy import deepcopy as copy
4
3
  from logging import getLogger
5
4
  from pathlib import Path
6
5
  from typing import Generator
7
6
 
8
7
  from langchain_core.documents import Document
9
8
  from langchain_core.tools import ToolException
10
- from langchain_text_splitters import TokenTextSplitter
11
9
 
12
10
  from alita_sdk.runtime.langchain.document_loaders.constants import loaders_map
13
- from alita_sdk.tools.chunkers.utils import tiktoken_length
14
11
 
15
12
  logger = getLogger(__name__)
16
13
 
@@ -193,37 +190,11 @@ def process_content_by_type(document: Document, content, extension_source: str,
193
190
  loader_kwargs = loader_config['kwargs']
194
191
 
195
192
  loader = loader_cls(file_path=temp_file_path, **loader_kwargs)
196
- docs_iterator = loader.load()
197
- max_tokens = chunking_config.get('max_tokens', 512)
198
- tokens_overlapping = chunking_config.get('tokens_overlapping', 10)
199
- chunk_id = 0
200
- for chunk in docs_iterator:
201
- if tiktoken_length(chunk.page_content) > max_tokens:
202
- for subchunk in TokenTextSplitter(encoding_name="cl100k_base",
203
- chunk_size=max_tokens,
204
- chunk_overlap=tokens_overlapping
205
- ).split_text(chunk.page_content):
206
- chunk_id += 1
207
- headers_meta = list(chunk.metadata.values())
208
- docmeta = copy(document.metadata)
209
- docmeta.update({"headers": "; ".join(str(headers_meta))})
210
- docmeta['chunk_id'] = chunk_id
211
- docmeta['chunk_type'] = "document"
212
- yield Document(
213
- page_content=sanitize_for_postgres(subchunk),
214
- metadata=docmeta
215
- )
216
- else:
217
- chunk_id += 1
218
- headers_meta = list(chunk.metadata.values())
219
- docmeta = copy(document.metadata)
220
- docmeta.update({"headers": "; ".join(str(headers_meta))})
221
- docmeta['chunk_id'] = chunk_id
222
- docmeta['chunk_type'] = "document"
223
- yield Document(
224
- page_content=sanitize_for_postgres(chunk.page_content),
225
- metadata=docmeta
226
- )
193
+ for chunk in loader.load():
194
+ yield Document(
195
+ page_content=sanitize_for_postgres(chunk.page_content),
196
+ metadata={**document.metadata, **chunk.metadata}
197
+ )
227
198
  finally:
228
199
  if temp_file_path and os.path.exists(temp_file_path):
229
200
  os.remove(temp_file_path)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.254
3
+ Version: 0.3.255
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedjik@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -18,7 +18,7 @@ alita_sdk/configurations/postman.py,sha256=wEmbZxwJGKSmeOzNVgk4vWkme275m3PFfYu06
18
18
  alita_sdk/configurations/qtest.py,sha256=LHM6RXxs_iSwSUdBjNXXVvqiiehT9fkBESE-ECDukt0,695
19
19
  alita_sdk/configurations/rally.py,sha256=1rwYh7bVV3XXufWRuPbr3Gz6zVPnfbA42bJYvJYsY-o,1515
20
20
  alita_sdk/configurations/service_now.py,sha256=Y3EQx0DQmLDm0P7V997FV5DoPQprgJ3Mk-yJmE5rE3M,1196
21
- alita_sdk/configurations/slack.py,sha256=fiKs04brkESygJg2EB1p6Dj1mkvKIKyuEozaueL_KMM,1150
21
+ alita_sdk/configurations/slack.py,sha256=ppwfV7YMpkq-qU6YREK7EH8VmYBZ0EN_9WIwz3EZI-Q,1139
22
22
  alita_sdk/configurations/testrail.py,sha256=k0fPmHBIrWAfEKhrDdB9Rdirw-UFHFoXkRePyrsqcWI,725
23
23
  alita_sdk/configurations/xray.py,sha256=xbydsVMqGJYVrNmg6bCr3uMxXVEPFtEhPovgWX6-6_Y,1141
24
24
  alita_sdk/configurations/zephyr.py,sha256=ndqGYFy5OFxjoXB7DzC71rd5W6qGBGAlKMWoqT8TuNk,1653
@@ -45,7 +45,7 @@ alita_sdk/runtime/langchain/document_loaders/AlitaBDDScenariosLoader.py,sha256=4
45
45
  alita_sdk/runtime/langchain/document_loaders/AlitaCSVLoader.py,sha256=3ne-a5qIkBuGL2pzIePxDr79n3RJhASbOdS5izYWDMg,2321
46
46
  alita_sdk/runtime/langchain/document_loaders/AlitaConfluenceLoader.py,sha256=NzpoL4C7UzyzLouTSL_xTQw70MitNt-WZz3Eyl7QkTA,8294
47
47
  alita_sdk/runtime/langchain/document_loaders/AlitaDirectoryLoader.py,sha256=fKezkgvIcLG7S2PVJp1a8sZd6C4XQKNZKAFC87DbQts,7003
48
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py,sha256=nHvXm5U5qa26FGRwl6YKCG7HGBV5erjqqyWowNWs7iI,5723
48
+ alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py,sha256=9hi5eHgDIfa9wBWqTuwMM6D6W64czrDTfZl_htooe8Y,5943
49
49
  alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py,sha256=YBFYikrOEITfIavU0Xu7BQSNvPCFKzcmbJ_VDeQ6KdI,3078
50
50
  alita_sdk/runtime/langchain/document_loaders/AlitaGitRepoLoader.py,sha256=5WXGcyHraSVj3ANHj_U6X4EDikoekrIYtS0Q_QqNIng,2608
51
51
  alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py,sha256=ogvCmpnS54-D7fP_sSkL1dnhHTmRSD-HA2FFrTNhDEo,6560
@@ -57,7 +57,7 @@ alita_sdk/runtime/langchain/document_loaders/AlitaQtestLoader.py,sha256=CUVVnisx
57
57
  alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py,sha256=o0SRFPZ-VskltgThVRX80rT19qtB4gPzxED9SENTNWo,4145
58
58
  alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py,sha256=uNcV0En49_0u0RYB1sP1XfNspT2Xc5CacuJr9Jqv79Q,2972
59
59
  alita_sdk/runtime/langchain/document_loaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
60
- alita_sdk/runtime/langchain/document_loaders/constants.py,sha256=gTTHIbJQVpSGaOwQjJwAltZryoDDX7GaqbODI30MwQM,4563
60
+ alita_sdk/runtime/langchain/document_loaders/constants.py,sha256=EL20rusYbnPk2zwOh8-gxSdaEuqThZJcqiyINXphxFw,4607
61
61
  alita_sdk/runtime/langchain/document_loaders/utils.py,sha256=9xghESf3axBbwxATyVuS0Yu-TWe8zWZnXgCD1ZVyNW0,2414
62
62
  alita_sdk/runtime/langchain/interfaces/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
63
63
  alita_sdk/runtime/langchain/interfaces/kwextractor.py,sha256=kSJA9L8g8UArmHu7Bd9dIO0Rrq86JPUb8RYNlnN68FQ,3072
@@ -106,7 +106,7 @@ alita_sdk/runtime/tools/pgvector_search.py,sha256=NN2BGAnq4SsDHIhUcFZ8d_dbEOM8Qw
106
106
  alita_sdk/runtime/tools/prompt.py,sha256=nJafb_e5aOM1Rr3qGFCR-SKziU9uCsiP2okIMs9PppM,741
107
107
  alita_sdk/runtime/tools/router.py,sha256=wCvZjVkdXK9dMMeEerrgKf5M790RudH68pDortnHSz0,1517
108
108
  alita_sdk/runtime/tools/tool.py,sha256=lE1hGi6qOAXG7qxtqxarD_XMQqTghdywf261DZawwno,5631
109
- alita_sdk/runtime/tools/vectorstore.py,sha256=l5wfovwMNvS_RgW-ZHXCh8Cm8gauunRzP0NPkzmshcQ,33852
109
+ alita_sdk/runtime/tools/vectorstore.py,sha256=yl6FKJGVQDevftSkxWTkMbqjIskIFz69vXELdEGp9u4,34780
110
110
  alita_sdk/runtime/tools/vectorstore_base.py,sha256=HFaNk_oBoeZWrQWBrvEsozajHqwjWxsV6RigkQyq-eQ,27586
111
111
  alita_sdk/runtime/utils/AlitaCallback.py,sha256=E4LlSBuCHWiUq6W7IZExERHZY0qcmdjzc_rJlF2iQIw,7356
112
112
  alita_sdk/runtime/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -120,7 +120,7 @@ alita_sdk/runtime/utils/toolkit_utils.py,sha256=I9QFqnaqfVgN26LUr6s3XlBlG6y0CoHU
120
120
  alita_sdk/runtime/utils/utils.py,sha256=CpEl3LCeLbhzQySz08lkKPm7Auac6IiLF7WB8wmArMI,589
121
121
  alita_sdk/tools/__init__.py,sha256=ko5TToGYZFmBrho26DRAVvrkHWxQ2sfs8gVAASinYp8,10611
122
122
  alita_sdk/tools/base_indexer_toolkit.py,sha256=gOjE1igKyjG1LohMj0XMlj1IGaFp7eEEDqyEG6-xLmc,18405
123
- alita_sdk/tools/elitea_base.py,sha256=yfDSNKdLtsedp8546KHfDPOPYgpD4ZZ-dvAxs3zoF3o,31071
123
+ alita_sdk/tools/elitea_base.py,sha256=Qrj8r71yffF6qmbwLtu5dz72LjitEjK8Me-8-TSfLVY,32694
124
124
  alita_sdk/tools/non_code_indexer_toolkit.py,sha256=v9uq1POE1fQKCd152mbqDtF-HSe0qoDj83k4E5LAkMI,1080
125
125
  alita_sdk/tools/ado/__init__.py,sha256=bArTObt5cqG1SkijKevWGbsIILHBA3aCStg8Q1jd69k,1243
126
126
  alita_sdk/tools/ado/utils.py,sha256=PTCludvaQmPLakF2EbCGy66Mro4-rjDtavVP-xcB2Wc,1252
@@ -130,7 +130,7 @@ alita_sdk/tools/ado/test_plan/__init__.py,sha256=4fEw_3cm4shuZ868HhAU-uMH3xNXPyb
130
130
  alita_sdk/tools/ado/test_plan/test_plan_wrapper.py,sha256=jQt8kFmdAzsopjByLTMiSnWtoqz_IUOmYkhPTVGeMnU,20265
131
131
  alita_sdk/tools/ado/wiki/__init__.py,sha256=uBKo_Meu2ZxMxcxGsMmvCXyplRE2um1_PIRvdYd37rM,5171
132
132
  alita_sdk/tools/ado/wiki/ado_wrapper.py,sha256=zg6wMRar1DTp-ZRlYaQifBEnpYmTrHXskTNPdrLdy8s,14759
133
- alita_sdk/tools/ado/work_item/__init__.py,sha256=coDedNL0pSPLjZ6VVK1UcqWo00zxe2T4XfVXt8bMho8,5383
133
+ alita_sdk/tools/ado/work_item/__init__.py,sha256=HNcdIMwTSNe-25_Pg-KmVVXTFci3vNa84tkTFkls36c,5373
134
134
  alita_sdk/tools/ado/work_item/ado_wrapper.py,sha256=gEywCL_kS0k1jWcDhsmYUybpIP08tH8go6CixLJGwT4,28409
135
135
  alita_sdk/tools/advanced_jira_mining/__init__.py,sha256=pUTzECqGvYaR5qWY3JPUhrImrZgc7pCXuqSe5eWIE80,4604
136
136
  alita_sdk/tools/advanced_jira_mining/data_mining_wrapper.py,sha256=nZPtuwVWp8VeHw1B8q9kdwf-6ZvHnlXTOGdcIMDkKpw,44211
@@ -194,7 +194,7 @@ alita_sdk/tools/chunkers/code/treesitter/treesitter_rs.py,sha256=LgKyNffBy30gIr8
194
194
  alita_sdk/tools/chunkers/code/treesitter/treesitter_ts.py,sha256=Qs1a_BBN296iZc5hh8UNF9sc0G0-A_XZVhP3Na1ZNDg,387
195
195
  alita_sdk/tools/chunkers/sematic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
196
196
  alita_sdk/tools/chunkers/sematic/base.py,sha256=bRHpCFbOy-KPe4HBGpegrvIhvOsd7sDRfmb06T8tSuU,349
197
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py,sha256=NZCZi0Xzi58Bm7-9LzwGoAhdNZhvUERb_sK1bNQpqCQ,2574
197
+ alita_sdk/tools/chunkers/sematic/markdown_chunker.py,sha256=HmAGKuIodnMcHl-kBwAb1NY0GKKwAskRFvGaW3m4HAM,3859
198
198
  alita_sdk/tools/chunkers/sematic/proposal_chunker.py,sha256=t8JjX9TH6yHXXaemiDK1E6000tlES2Kl8XfyezmlIoo,5116
199
199
  alita_sdk/tools/chunkers/sematic/statistical_chunker.py,sha256=VDQcMC-ky72GqdWJiHMmcRmfJTTU5XglBF1IWg2Qews,13403
200
200
  alita_sdk/tools/cloud/__init__.py,sha256=ekqANTJAyuURqpjNTn6MmSn2q6qEKwENxEXBUFGkkck,512
@@ -312,7 +312,7 @@ alita_sdk/tools/testio/api_wrapper.py,sha256=BvmL5h634BzG6p7ajnQLmj-uoAw1gjWnd4F
312
312
  alita_sdk/tools/testrail/__init__.py,sha256=0kETjWKLU7R6mugBWsjwEUsh10pipbAeNSGJAO0FBh0,4634
313
313
  alita_sdk/tools/testrail/api_wrapper.py,sha256=5T-QyTzt-J0rI32xc_E684lCdgyWeHSyeTYiwQwtGyg,32275
314
314
  alita_sdk/tools/utils/__init__.py,sha256=155xepXPr4OEzs2Mz5YnjXcBpxSv1X2eznRUVoPtyK0,3268
315
- alita_sdk/tools/utils/content_parser.py,sha256=MsBlh97v5aVTuB2bw43J4K2-IJumKOoRbz0zkpjkbhI,11521
315
+ alita_sdk/tools/utils/content_parser.py,sha256=zqeyuxZqZqVFq5M5sZM-falMdlOw48FyZnp3Z0XUpCw,9868
316
316
  alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256=a6FAsiix_EvATIKUf5YT6vHh5LDyJ5uSP3LJqoxFo04,17367
317
317
  alita_sdk/tools/vector_adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
318
318
  alita_sdk/tools/xray/__init__.py,sha256=GGpbiBdDQ9kMFqJEHYi7XwKpkuMMHi-ZF-IM8yFIgUM,4380
@@ -334,8 +334,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=JAeWf-RXohsxheUpT0iMDClc_izj-
334
334
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=0AI_j27xVO5Gk5HQMFrqPTd4uvuVTpiZUicBrdfEpKg,2796
335
335
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
336
336
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
337
- alita_sdk-0.3.254.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
338
- alita_sdk-0.3.254.dist-info/METADATA,sha256=XLAiAMkY0U1LualoozT6xq8wieW8dvFe0xwIZeM7Dmw,18897
339
- alita_sdk-0.3.254.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
340
- alita_sdk-0.3.254.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
341
- alita_sdk-0.3.254.dist-info/RECORD,,
337
+ alita_sdk-0.3.255.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
338
+ alita_sdk-0.3.255.dist-info/METADATA,sha256=U2ck9IqpmmWxni_szIR0vV7aZZpPr9HUKUexI2HQb44,18897
339
+ alita_sdk-0.3.255.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
340
+ alita_sdk-0.3.255.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
341
+ alita_sdk-0.3.255.dist-info/RECORD,,