alita-sdk 0.3.374__py3-none-any.whl → 0.3.375__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

@@ -207,9 +207,9 @@ class VectorStoreWrapper(BaseToolApiWrapper):
207
207
  tool_name="_remove_collection"
208
208
  )
209
209
 
210
- def _get_indexed_ids(self, collection_suffix: Optional[str] = '') -> List[str]:
210
+ def _get_indexed_ids(self, index_name: Optional[str] = '') -> List[str]:
211
211
  """Get all indexed document IDs from vectorstore"""
212
- return self.vector_adapter.get_indexed_ids(self, collection_suffix)
212
+ return self.vector_adapter.get_indexed_ids(self, index_name)
213
213
 
214
214
  def list_collections(self) -> Any:
215
215
  """List all collections in the vectorstore.
@@ -233,7 +233,7 @@ class VectorStoreWrapper(BaseToolApiWrapper):
233
233
  return {"collections": [], "message": "No indexed collections"}
234
234
  return cols
235
235
 
236
- def _clean_collection(self, collection_suffix: str = ''):
236
+ def _clean_collection(self, index_name: str = ''):
237
237
  """
238
238
  Clean the vectorstore collection by deleting all indexed data.
239
239
  """
@@ -241,15 +241,15 @@ class VectorStoreWrapper(BaseToolApiWrapper):
241
241
  f"Cleaning collection '{self.dataset}'",
242
242
  tool_name="_clean_collection"
243
243
  )
244
- self.vector_adapter.clean_collection(self, collection_suffix)
244
+ self.vector_adapter.clean_collection(self, index_name)
245
245
  self._log_data(
246
246
  f"Collection '{self.dataset}' has been cleaned. ",
247
247
  tool_name="_clean_collection"
248
248
  )
249
249
 
250
- def _get_code_indexed_data(self, collection_suffix: str) -> Dict[str, Dict[str, Any]]:
250
+ def _get_code_indexed_data(self, index_name: str) -> Dict[str, Dict[str, Any]]:
251
251
  """ Get all indexed data from vectorstore for code content """
252
- return self.vector_adapter.get_code_indexed_data(self, collection_suffix)
252
+ return self.vector_adapter.get_code_indexed_data(self, index_name)
253
253
 
254
254
  def _add_to_collection(self, entry_id, new_collection_value):
255
255
  """Add a new collection name to the `collection` key in the `metadata` column."""
@@ -258,7 +258,7 @@ class VectorStoreWrapper(BaseToolApiWrapper):
258
258
  def _reduce_duplicates(
259
259
  self,
260
260
  documents: Generator[Any, None, None],
261
- collection_suffix: str,
261
+ index_name: str,
262
262
  get_indexed_data: Callable,
263
263
  key_fn: Callable,
264
264
  compare_fn: Callable,
@@ -267,7 +267,7 @@ class VectorStoreWrapper(BaseToolApiWrapper):
267
267
  ) -> List[Any]:
268
268
  """Generic duplicate reduction logic for documents."""
269
269
  self._log_data(log_msg, tool_name="index_documents")
270
- indexed_data = get_indexed_data(collection_suffix)
270
+ indexed_data = get_indexed_data(index_name)
271
271
  indexed_keys = set(indexed_data.keys())
272
272
  if not indexed_keys:
273
273
  self._log_data("Vectorstore is empty, indexing all incoming documents", tool_name="index_documents")
@@ -279,14 +279,14 @@ class VectorStoreWrapper(BaseToolApiWrapper):
279
279
  for document in documents:
280
280
  key = key_fn(document)
281
281
  key = key if isinstance(key, str) else str(key)
282
- if key in indexed_keys and collection_suffix == indexed_data[key]['metadata'].get('collection'):
282
+ if key in indexed_keys and index_name == indexed_data[key]['metadata'].get('collection'):
283
283
  if compare_fn(document, indexed_data[key]):
284
284
  # Disabled addition of new collection to already indexed documents
285
285
  # # check metadata.collection and update if needed
286
286
  # for update_collection_id in remove_ids_fn(indexed_data, key):
287
287
  # self._add_to_collection(
288
288
  # update_collection_id,
289
- # collection_suffix
289
+ # index_name
290
290
  # )
291
291
  continue
292
292
  final_docs.append(document)
@@ -303,10 +303,10 @@ class VectorStoreWrapper(BaseToolApiWrapper):
303
303
 
304
304
  return final_docs
305
305
 
306
- def _reduce_code_duplicates(self, documents: Generator[Any, None, None], collection_suffix: str) -> List[Any]:
306
+ def _reduce_code_duplicates(self, documents: Generator[Any, None, None], index_name: str) -> List[Any]:
307
307
  return self._reduce_duplicates(
308
308
  documents,
309
- collection_suffix,
309
+ index_name,
310
310
  self._get_code_indexed_data,
311
311
  lambda doc: doc.metadata.get('filename'),
312
312
  lambda doc, idx: (
@@ -318,7 +318,7 @@ class VectorStoreWrapper(BaseToolApiWrapper):
318
318
  log_msg="Verification of code documents to index started"
319
319
  )
320
320
 
321
- def index_documents(self, documents: Generator[Document, None, None], collection_suffix: str, progress_step: int = 20, clean_index: bool = True, is_code: bool = True):
321
+ def index_documents(self, documents: Generator[Document, None, None], index_name: str, progress_step: int = 20, clean_index: bool = True, is_code: bool = True):
322
322
  """ Index documents in the vectorstore.
323
323
 
324
324
  Args:
@@ -329,13 +329,13 @@ class VectorStoreWrapper(BaseToolApiWrapper):
329
329
 
330
330
  from ..langchain.interfaces.llm_processor import add_documents
331
331
 
332
- self._log_tool_event(message=f"Starting the indexing... Parameters: {collection_suffix=}, {clean_index=}, {is_code}", tool_name="index_documents")
332
+ self._log_tool_event(message=f"Starting the indexing... Parameters: {index_name=}, {clean_index=}, {is_code}", tool_name="index_documents")
333
333
  # pre-process documents if needed (find duplicates, etc.)
334
334
  if clean_index:
335
335
  logger.info("Cleaning index before re-indexing all documents.")
336
336
  self._log_data("Cleaning index before re-indexing all documents. Previous index will be removed", tool_name="index_documents")
337
337
  try:
338
- self._clean_collection(collection_suffix)
338
+ self._clean_collection(index_name)
339
339
  self.vectoradapter.persist()
340
340
  self.vectoradapter.vacuum()
341
341
  self._log_data("Previous index has been removed",
@@ -349,7 +349,7 @@ class VectorStoreWrapper(BaseToolApiWrapper):
349
349
  message="Filter for duplicates",
350
350
  tool_name="index_documents")
351
351
  # remove duplicates based on metadata 'id' and 'updated_on' or 'commit_hash' fields
352
- documents = self._reduce_code_duplicates(documents, collection_suffix)
352
+ documents = self._reduce_code_duplicates(documents, index_name)
353
353
  self._log_tool_event(
354
354
  message="All the duplicates were filtered out. Proceeding with indexing.",
355
355
  tool_name="index_documents")
@@ -377,13 +377,13 @@ class VectorStoreWrapper(BaseToolApiWrapper):
377
377
  self._log_tool_event(message=f"Documents for indexing were processed. Total documents: {len(documents)}",
378
378
  tool_name="index_documents")
379
379
 
380
- # if collection_suffix is provided, add it to metadata of each document
381
- if collection_suffix:
380
+ # if index_name is provided, add it to metadata of each document
381
+ if index_name:
382
382
  for doc in documents:
383
383
  if not doc.metadata.get('collection'):
384
- doc.metadata['collection'] = collection_suffix
384
+ doc.metadata['collection'] = index_name
385
385
  else:
386
- doc.metadata['collection'] += f";{collection_suffix}"
386
+ doc.metadata['collection'] += f";{index_name}"
387
387
 
388
388
  total_docs = len(documents)
389
389
  documents_count = 0
@@ -216,13 +216,13 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
216
216
  return "No indexed collections"
217
217
  return collections
218
218
 
219
- def get_index_meta(self, collection_suffix: str):
220
- index_metas = self.vector_adapter.get_index_meta(self, collection_suffix)
219
+ def get_index_meta(self, index_name: str):
220
+ index_metas = self.vector_adapter.get_index_meta(self, index_name)
221
221
  if len(index_metas) > 1:
222
222
  raise RuntimeError(f"Multiple index_meta documents found: {index_metas}")
223
223
  return index_metas[0] if index_metas else None
224
224
 
225
- def _clean_collection(self, collection_suffix: str = ''):
225
+ def _clean_collection(self, index_name: str = ''):
226
226
  """
227
227
  Clean the vectorstore collection by deleting all indexed data.
228
228
  """
@@ -230,13 +230,13 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
230
230
  f"Cleaning collection '{self.dataset}'",
231
231
  tool_name="_clean_collection"
232
232
  )
233
- self.vector_adapter.clean_collection(self, collection_suffix)
233
+ self.vector_adapter.clean_collection(self, index_name)
234
234
  self._log_tool_event(
235
235
  f"Collection '{self.dataset}' has been cleaned. ",
236
236
  tool_name="_clean_collection"
237
237
  )
238
238
 
239
- def index_documents(self, documents: Generator[Document, None, None], collection_suffix: str, progress_step: int = 20, clean_index: bool = True):
239
+ def index_documents(self, documents: Generator[Document, None, None], index_name: str, progress_step: int = 20, clean_index: bool = True):
240
240
  """ Index documents in the vectorstore.
241
241
 
242
242
  Args:
@@ -245,21 +245,21 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
245
245
  clean_index (bool): If True, clean the index before re-indexing all documents.
246
246
  """
247
247
  if clean_index:
248
- self._clean_index(collection_suffix)
248
+ self._clean_index(index_name)
249
249
 
250
- return self._save_index(list(documents), collection_suffix, progress_step)
250
+ return self._save_index(list(documents), index_name, progress_step)
251
251
 
252
- def _clean_index(self, collection_suffix: str):
252
+ def _clean_index(self, index_name: str):
253
253
  logger.info("Cleaning index before re-indexing all documents.")
254
254
  self._log_tool_event("Cleaning index before re-indexing all documents. Previous index will be removed", tool_name="index_documents")
255
255
  try:
256
- self._clean_collection(collection_suffix)
256
+ self._clean_collection(index_name)
257
257
  self._log_tool_event("Previous index has been removed",
258
258
  tool_name="index_documents")
259
259
  except Exception as e:
260
260
  logger.warning(f"Failed to clean index: {str(e)}. Continuing with re-indexing.")
261
261
 
262
- def _save_index(self, documents: list[Document], collection_suffix: Optional[str] = None, progress_step: int = 20):
262
+ def _save_index(self, documents: list[Document], index_name: Optional[str] = None, progress_step: int = 20):
263
263
  from ..langchain.interfaces.llm_processor import add_documents
264
264
  #
265
265
  for doc in documents:
@@ -268,13 +268,13 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
268
268
 
269
269
  logger.debug(f"Indexing documents: {documents}")
270
270
 
271
- # if collection_suffix is provided, add it to metadata of each document
272
- if collection_suffix:
271
+ # if index_name is provided, add it to metadata of each document
272
+ if index_name:
273
273
  for doc in documents:
274
274
  if not doc.metadata.get('collection'):
275
- doc.metadata['collection'] = collection_suffix
275
+ doc.metadata['collection'] = index_name
276
276
  else:
277
- doc.metadata['collection'] += f";{collection_suffix}"
277
+ doc.metadata['collection'] += f";{index_name}"
278
278
 
279
279
  total_docs = len(documents)
280
280
  documents_count = 0
@@ -19,19 +19,19 @@ logger = logging.getLogger(__name__)
19
19
  # Base Vector Store Schema Models
20
20
  BaseIndexParams = create_model(
21
21
  "BaseIndexParams",
22
- collection_suffix=(str, Field(description="Suffix for collection name (max 7 characters) used to separate datasets", min_length=1, max_length=7)),
22
+ index_name=(str, Field(description="Index name (max 7 characters)", min_length=1, max_length=7)),
23
23
  )
24
24
 
25
25
  RemoveIndexParams = create_model(
26
26
  "RemoveIndexParams",
27
- collection_suffix=(Optional[str], Field(description="Optional suffix for collection name (max 7 characters)", default="", max_length=7)),
27
+ index_name=(Optional[str], Field(description="Optional index name (max 7 characters)", default="", max_length=7)),
28
28
  )
29
29
 
30
30
  BaseSearchParams = create_model(
31
31
  "BaseSearchParams",
32
32
  query=(str, Field(description="Query text to search in the index")),
33
- collection_suffix=(Optional[str], Field(
34
- description="Optional suffix for collection name (max 7 characters). Leave empty to search across all datasets",
33
+ index_name=(Optional[str], Field(
34
+ description="Optional index name (max 7 characters). Leave empty to search across all datasets",
35
35
  default="", max_length=7)),
36
36
  filter=(Optional[dict | str], Field(
37
37
  description="Filter to apply to the search results. Can be a dictionary or a JSON string.",
@@ -61,7 +61,7 @@ BaseSearchParams = create_model(
61
61
  BaseStepbackSearchParams = create_model(
62
62
  "BaseStepbackSearchParams",
63
63
  query=(str, Field(description="Query text to search in the index")),
64
- collection_suffix=(Optional[str], Field(description="Optional suffix for collection name (max 7 characters)", default="", max_length=7)),
64
+ index_name=(Optional[str], Field(description="Optional index name (max 7 characters)", default="", max_length=7)),
65
65
  messages=(Optional[List], Field(description="Chat messages for stepback search context", default=[])),
66
66
  filter=(Optional[dict | str], Field(
67
67
  description="Filter to apply to the search results. Can be a dictionary or a JSON string.",
@@ -151,18 +151,18 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
151
151
  yield from ()
152
152
 
153
153
  def index_data(self, **kwargs):
154
- collection_suffix = kwargs.get("collection_suffix")
154
+ index_name = kwargs.get("index_name")
155
155
  progress_step = kwargs.get("progress_step")
156
156
  clean_index = kwargs.get("clean_index")
157
157
  chunking_tool = kwargs.get("chunking_tool")
158
158
  chunking_config = kwargs.get("chunking_config")
159
159
  #
160
160
  if clean_index:
161
- self._clean_index(collection_suffix)
161
+ self._clean_index(index_name)
162
162
  #
163
- self.index_meta_init(collection_suffix, kwargs)
163
+ self.index_meta_init(index_name, kwargs)
164
164
  #
165
- self._log_tool_event(f"Indexing data into collection with suffix '{collection_suffix}'. It can take some time...")
165
+ self._log_tool_event(f"Indexing data into collection with suffix '{index_name}'. It can take some time...")
166
166
  self._log_tool_event(f"Loading the documents to index...{kwargs}")
167
167
  documents = self._base_loader(**kwargs)
168
168
  documents = list(documents) # consume/exhaust generator to count items
@@ -170,16 +170,16 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
170
170
  documents = (doc for doc in documents)
171
171
  self._log_tool_event(f"Base documents were pre-loaded. "
172
172
  f"Search for possible document duplicates and remove them from the indexing list...")
173
- documents = self._reduce_duplicates(documents, collection_suffix)
173
+ documents = self._reduce_duplicates(documents, index_name)
174
174
  self._log_tool_event(f"Duplicates were removed. "
175
175
  f"Processing documents to collect dependencies and prepare them for indexing...")
176
- result = self._save_index_generator(documents, documents_count, chunking_tool, chunking_config, collection_suffix=collection_suffix, progress_step=progress_step)
176
+ result = self._save_index_generator(documents, documents_count, chunking_tool, chunking_config, index_name=index_name, progress_step=progress_step)
177
177
  #
178
- self.index_meta_update(collection_suffix, IndexerKeywords.INDEX_META_COMPLETED.value, result)
178
+ self.index_meta_update(index_name, IndexerKeywords.INDEX_META_COMPLETED.value, result)
179
179
  #
180
180
  return {"status": "ok", "message": f"successfully indexed {result} documents"}
181
181
 
182
- def _save_index_generator(self, base_documents: Generator[Document, None, None], base_total: int, chunking_tool, chunking_config, collection_suffix: Optional[str] = None, progress_step: int = 20):
182
+ def _save_index_generator(self, base_documents: Generator[Document, None, None], base_total: int, chunking_tool, chunking_config, index_name: Optional[str] = None, progress_step: int = 20):
183
183
  self._log_tool_event(f"Base documents are ready for indexing. {base_total} base documents in total to index.")
184
184
  from ..runtime.langchain.interfaces.llm_processor import add_documents
185
185
  #
@@ -211,12 +211,12 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
211
211
  if 'id' not in doc.metadata or 'updated_on' not in doc.metadata:
212
212
  logger.warning(f"Document is missing required metadata field 'id' or 'updated_on': {doc.metadata}")
213
213
  #
214
- # if collection_suffix is provided, add it to metadata of each document
215
- if collection_suffix:
214
+ # if index_name is provided, add it to metadata of each document
215
+ if index_name:
216
216
  if not doc.metadata.get('collection'):
217
- doc.metadata['collection'] = collection_suffix
217
+ doc.metadata['collection'] = index_name
218
218
  else:
219
- doc.metadata['collection'] += f";{collection_suffix}"
219
+ doc.metadata['collection'] += f";{index_name}"
220
220
  #
221
221
  try:
222
222
  pg_vector_add_docs_chunk.append(doc)
@@ -295,12 +295,12 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
295
295
  def _reduce_duplicates(
296
296
  self,
297
297
  documents: Generator[Any, None, None],
298
- collection_suffix: str,
298
+ index_name: str,
299
299
  log_msg: str = "Verification of documents to index started"
300
300
  ) -> Generator[Document, None, None]:
301
301
  """Generic duplicate reduction logic for documents."""
302
302
  self._log_tool_event(log_msg, tool_name="index_documents")
303
- indexed_data = self._get_indexed_data(collection_suffix)
303
+ indexed_data = self._get_indexed_data(index_name)
304
304
  indexed_keys = set(indexed_data.keys())
305
305
  if not indexed_keys:
306
306
  self._log_tool_event("Vectorstore is empty, indexing all incoming documents", tool_name="index_documents")
@@ -312,7 +312,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
312
312
  for document in documents:
313
313
  key = self.key_fn(document)
314
314
  key = key if isinstance(key, str) else str(key)
315
- if key in indexed_keys and collection_suffix == indexed_data[key]['metadata'].get('collection'):
315
+ if key in indexed_keys and index_name == indexed_data[key]['metadata'].get('collection'):
316
316
  if self.compare_fn(document, indexed_data[key]):
317
317
  continue
318
318
  yield document
@@ -327,7 +327,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
327
327
  )
328
328
  self.vectorstore.delete(ids=list(docs_to_remove))
329
329
 
330
- def _get_indexed_data(self, collection_suffix: str):
330
+ def _get_indexed_data(self, index_name: str):
331
331
  raise NotImplementedError("Subclasses must implement this method")
332
332
 
333
333
  def key_fn(self, document: Document):
@@ -339,20 +339,20 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
339
339
  def remove_ids_fn(self, idx_data, key: str):
340
340
  raise NotImplementedError("Subclasses must implement this method")
341
341
 
342
- def remove_index(self, collection_suffix: str = ""):
342
+ def remove_index(self, index_name: str = ""):
343
343
  """Cleans the indexed data in the collection."""
344
- super()._clean_collection(collection_suffix=collection_suffix)
345
- return (f"Collection '{collection_suffix}' has been removed from the vector store.\n"
346
- f"Available collections: {self.list_collections()}") if collection_suffix \
344
+ super()._clean_collection(index_name=index_name)
345
+ return (f"Collection '{index_name}' has been removed from the vector store.\n"
346
+ f"Available collections: {self.list_collections()}") if index_name \
347
347
  else "All collections have been removed from the vector store."
348
348
 
349
- def _build_collection_filter(self, filter: dict | str, collection_suffix: str = "") -> dict:
349
+ def _build_collection_filter(self, filter: dict | str, index_name: str = "") -> dict:
350
350
  """Builds a filter for the collection based on the provided suffix."""
351
351
 
352
352
  filter = filter if isinstance(filter, dict) else json.loads(filter)
353
- if collection_suffix:
353
+ if index_name:
354
354
  filter.update({"collection": {
355
- "$eq": collection_suffix.strip()
355
+ "$eq": index_name.strip()
356
356
  }})
357
357
 
358
358
  if filter:
@@ -375,7 +375,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
375
375
 
376
376
  def search_index(self,
377
377
  query: str,
378
- collection_suffix: str = "",
378
+ index_name: str = "",
379
379
  filter: dict | str = {}, cut_off: float = 0.5,
380
380
  search_top: int = 10, reranker: dict = {},
381
381
  full_text_search: Optional[Dict[str, Any]] = None,
@@ -383,13 +383,13 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
383
383
  extended_search: Optional[List[str]] = None,
384
384
  **kwargs):
385
385
  """ Searches indexed documents in the vector store."""
386
- # build filter on top of collection_suffix
386
+ # build filter on top of index_name
387
387
 
388
388
  available_collections = super().list_collections()
389
- if collection_suffix and collection_suffix not in available_collections:
390
- return f"Collection '{collection_suffix}' not found. Available collections: {available_collections}"
389
+ if index_name and index_name not in available_collections:
390
+ return f"Collection '{index_name}' not found. Available collections: {available_collections}"
391
391
 
392
- filter = self._build_collection_filter(filter, collection_suffix)
392
+ filter = self._build_collection_filter(filter, index_name)
393
393
  found_docs = super().search_documents(
394
394
  query,
395
395
  doctype=self.doctype,
@@ -406,7 +406,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
406
406
  def stepback_search_index(self,
407
407
  query: str,
408
408
  messages: List[Dict[str, Any]] = [],
409
- collection_suffix: str = "",
409
+ index_name: str = "",
410
410
  filter: dict | str = {}, cut_off: float = 0.5,
411
411
  search_top: int = 10, reranker: dict = {},
412
412
  full_text_search: Optional[Dict[str, Any]] = None,
@@ -414,7 +414,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
414
414
  extended_search: Optional[List[str]] = None,
415
415
  **kwargs):
416
416
  """ Searches indexed documents in the vector store."""
417
- filter = self._build_collection_filter(filter, collection_suffix)
417
+ filter = self._build_collection_filter(filter, index_name)
418
418
  found_docs = super().stepback_search(
419
419
  query,
420
420
  messages,
@@ -431,7 +431,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
431
431
  def stepback_summary_index(self,
432
432
  query: str,
433
433
  messages: List[Dict[str, Any]] = [],
434
- collection_suffix: str = "",
434
+ index_name: str = "",
435
435
  filter: dict | str = {}, cut_off: float = 0.5,
436
436
  search_top: int = 10, reranker: dict = {},
437
437
  full_text_search: Optional[Dict[str, Any]] = None,
@@ -440,7 +440,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
440
440
  **kwargs):
441
441
  """ Generates a summary of indexed documents using stepback technique."""
442
442
 
443
- filter = self._build_collection_filter(filter, collection_suffix)
443
+ filter = self._build_collection_filter(filter, index_name)
444
444
  return super().stepback_summary(
445
445
  query,
446
446
  messages,
@@ -453,12 +453,12 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
453
453
  extended_search=extended_search
454
454
  )
455
455
 
456
- def index_meta_init(self, collection_suffix: str, index_configuration: dict[str, Any]):
457
- index_meta_raw = super().get_index_meta(collection_suffix)
456
+ def index_meta_init(self, index_name: str, index_configuration: dict[str, Any]):
457
+ index_meta_raw = super().get_index_meta(index_name)
458
458
  from ..runtime.langchain.interfaces.llm_processor import add_documents
459
459
  created_on = time.time()
460
460
  metadata = {
461
- "collection": collection_suffix,
461
+ "collection": index_name,
462
462
  "type": IndexerKeywords.INDEX_META_TYPE.value,
463
463
  "indexed": 0,
464
464
  "state": IndexerKeywords.INDEX_META_IN_PROGRESS.value,
@@ -483,11 +483,11 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
483
483
  metadata["history"] = json.dumps(history)
484
484
  index_meta_ids = [index_meta_raw.get("id")]
485
485
  #
486
- index_meta_doc = Document(page_content=f"{IndexerKeywords.INDEX_META_TYPE.value}_{collection_suffix}", metadata=metadata)
486
+ index_meta_doc = Document(page_content=f"{IndexerKeywords.INDEX_META_TYPE.value}_{index_name}", metadata=metadata)
487
487
  add_documents(vectorstore=self.vectorstore, documents=[index_meta_doc], ids=index_meta_ids)
488
488
 
489
- def index_meta_update(self, collection_suffix: str, state: str, result: int):
490
- index_meta_raw = super().get_index_meta(collection_suffix)
489
+ def index_meta_update(self, index_name: str, state: str, result: int):
490
+ index_meta_raw = super().get_index_meta(index_name)
491
491
  from ..runtime.langchain.interfaces.llm_processor import add_documents
492
492
  #
493
493
  if index_meta_raw:
@@ -14,11 +14,11 @@ logger = logging.getLogger(__name__)
14
14
 
15
15
 
16
16
  class CodeIndexerToolkit(BaseIndexerToolkit):
17
- def _get_indexed_data(self, collection_suffix: str):
17
+ def _get_indexed_data(self, index_name: str):
18
18
  if not self.vector_adapter:
19
19
  raise ToolException("Vector adapter is not initialized. "
20
20
  "Check your configuration: embedding_model and vectorstore_type.")
21
- return self.vector_adapter.get_code_indexed_data(self, collection_suffix)
21
+ return self.vector_adapter.get_code_indexed_data(self, index_name)
22
22
 
23
23
  def key_fn(self, document: Document):
24
24
  return document.metadata.get('id')
@@ -1674,7 +1674,7 @@ class ConfluenceAPIWrapper(NonCodeIndexerToolkit):
1674
1674
  description="List of file extensions to skip when processing attachments: i.e. ['*.png', '*.jpg']",
1675
1675
  default=[])),
1676
1676
  "include_comments": (Optional[bool], Field(description="Include comments.", default=False)),
1677
- "include_labels": (Optional[bool], Field(description="Include labels.", default=True)),
1677
+ "include_labels": (Optional[bool], Field(description="Include labels.", default=False)),
1678
1678
  "ocr_languages": (Optional[str], Field(description="OCR languages for processing attachments.", default='eng')),
1679
1679
  "keep_markdown_format": (Optional[bool], Field(description="Keep the markdown format.", default=True)),
1680
1680
  "keep_newlines": (Optional[bool], Field(description="Keep newlines in the content.", default=True)),
@@ -33,12 +33,12 @@ LoaderSchema = create_model(
33
33
  # Base Vector Store Schema Models
34
34
  BaseIndexParams = create_model(
35
35
  "BaseIndexParams",
36
- collection_suffix=(str, Field(description="Suffix for collection name (max 7 characters) used to separate datasets", min_length=1, max_length=7)),
36
+ index_name=(str, Field(description="Index name (max 7 characters)", min_length=1, max_length=7)),
37
37
  )
38
38
 
39
39
  BaseCodeIndexParams = create_model(
40
40
  "BaseCodeIndexParams",
41
- collection_suffix=(str, Field(description="Suffix for collection name (max 7 characters) used to separate datasets", min_length=1, max_length=7)),
41
+ index_name=(str, Field(description="Index name (max 7 characters)", min_length=1, max_length=7)),
42
42
  clean_index=(Optional[bool], Field(default=False, description="Optional flag to enforce clean existing index before indexing new data")),
43
43
  progress_step=(Optional[int], Field(default=5, ge=0, le=100,
44
44
  description="Optional step size for progress reporting during indexing")),
@@ -50,14 +50,14 @@ BaseCodeIndexParams = create_model(
50
50
 
51
51
  RemoveIndexParams = create_model(
52
52
  "RemoveIndexParams",
53
- collection_suffix=(Optional[str], Field(description="Optional suffix for collection name (max 7 characters)", default="", max_length=7)),
53
+ index_name=(Optional[str], Field(description="Optional index name (max 7 characters)", default="", max_length=7)),
54
54
  )
55
55
 
56
56
  BaseSearchParams = create_model(
57
57
  "BaseSearchParams",
58
58
  query=(str, Field(description="Query text to search in the index")),
59
- collection_suffix=(Optional[str], Field(
60
- description="Optional suffix for collection name (max 7 characters). Leave empty to search across all datasets",
59
+ index_name=(Optional[str], Field(
60
+ description="Optional index name (max 7 characters). Leave empty to search across all datasets",
61
61
  default="", max_length=7)),
62
62
  filter=(Optional[dict], Field(
63
63
  description="Filter to apply to the search results. Can be a dictionary or a JSON string.",
@@ -87,7 +87,7 @@ BaseSearchParams = create_model(
87
87
  BaseStepbackSearchParams = create_model(
88
88
  "BaseStepbackSearchParams",
89
89
  query=(str, Field(description="Query text to search in the index")),
90
- collection_suffix=(Optional[str], Field(description="Optional suffix for collection name (max 7 characters)", default="", max_length=7)),
90
+ index_name=(Optional[str], Field(description="Optional index name (max 7 characters)", default="", max_length=7)),
91
91
  messages=(Optional[List], Field(description="Chat messages for stepback search context", default=[])),
92
92
  filter=(Optional[dict], Field(
93
93
  description="Filter to apply to the search results. Can be a dictionary or a JSON string.",
@@ -324,12 +324,12 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
324
324
  #
325
325
  docs = base_chunker(file_content_generator=docs, config=base_chunking_config)
326
326
  #
327
- collection_suffix = kwargs.get("collection_suffix")
327
+ index_name = kwargs.get("index_name")
328
328
  progress_step = kwargs.get("progress_step")
329
329
  clean_index = kwargs.get("clean_index")
330
330
  vs = self._init_vector_store()
331
331
  #
332
- return vs.index_documents(docs, collection_suffix=collection_suffix, progress_step=progress_step, clean_index=clean_index)
332
+ return vs.index_documents(docs, index_name=index_name, progress_step=progress_step, clean_index=clean_index)
333
333
 
334
334
  def _process_documents(self, documents: List[Document]) -> Generator[Document, None, None]:
335
335
  """
@@ -399,10 +399,10 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
399
399
  )
400
400
  return self._vector_store
401
401
 
402
- def remove_index(self, collection_suffix: str = ""):
402
+ def remove_index(self, index_name: str = ""):
403
403
  """Cleans the indexed data in the collection."""
404
- self._init_vector_store()._clean_collection(collection_suffix=collection_suffix)
405
- return (f"Collection '{collection_suffix}' has been removed from the vector store.\n"
404
+ self._init_vector_store()._clean_collection(index_name=index_name)
405
+ return (f"Collection '{index_name}' has been removed from the vector store.\n"
406
406
  f"Available collections: {self.list_collections()}")
407
407
 
408
408
  def list_collections(self):
@@ -410,19 +410,19 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
410
410
  vectorstore_wrapper = self._init_vector_store()
411
411
  return vectorstore_wrapper.list_collections()
412
412
 
413
- def _build_collection_filter(self, filter: dict | str, collection_suffix: str = "") -> dict:
413
+ def _build_collection_filter(self, filter: dict | str, index_name: str = "") -> dict:
414
414
  """Builds a filter for the collection based on the provided suffix."""
415
415
 
416
416
  filter = filter if isinstance(filter, dict) else json.loads(filter)
417
- if collection_suffix:
417
+ if index_name:
418
418
  filter.update({"collection": {
419
- "$eq": collection_suffix.strip()
419
+ "$eq": index_name.strip()
420
420
  }})
421
421
  return filter
422
422
 
423
423
  def search_index(self,
424
424
  query: str,
425
- collection_suffix: str = "",
425
+ index_name: str = "",
426
426
  filter: dict | str = {}, cut_off: float = 0.5,
427
427
  search_top: int = 10, reranker: dict = {},
428
428
  full_text_search: Optional[Dict[str, Any]] = None,
@@ -431,7 +431,7 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
431
431
  **kwargs):
432
432
  """ Searches indexed documents in the vector store."""
433
433
  vectorstore = self._init_vector_store()
434
- filter = self._build_collection_filter(filter, collection_suffix)
434
+ filter = self._build_collection_filter(filter, index_name)
435
435
  found_docs = vectorstore.search_documents(
436
436
  query,
437
437
  doctype=self.doctype,
@@ -448,7 +448,7 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
448
448
  def stepback_search_index(self,
449
449
  query: str,
450
450
  messages: List[Dict[str, Any]] = [],
451
- collection_suffix: str = "",
451
+ index_name: str = "",
452
452
  filter: dict | str = {}, cut_off: float = 0.5,
453
453
  search_top: int = 10, reranker: dict = {},
454
454
  full_text_search: Optional[Dict[str, Any]] = None,
@@ -457,7 +457,7 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
457
457
  **kwargs):
458
458
  """ Searches indexed documents in the vector store."""
459
459
 
460
- filter = self._build_collection_filter(filter, collection_suffix)
460
+ filter = self._build_collection_filter(filter, index_name)
461
461
  vectorstore = self._init_vector_store()
462
462
  found_docs = vectorstore.stepback_search(
463
463
  query,
@@ -475,7 +475,7 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
475
475
  def stepback_summary_index(self,
476
476
  query: str,
477
477
  messages: List[Dict[str, Any]] = [],
478
- collection_suffix: str = "",
478
+ index_name: str = "",
479
479
  filter: dict | str = {}, cut_off: float = 0.5,
480
480
  search_top: int = 10, reranker: dict = {},
481
481
  full_text_search: Optional[Dict[str, Any]] = None,
@@ -484,7 +484,7 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
484
484
  **kwargs):
485
485
  """ Generates a summary of indexed documents using stepback technique."""
486
486
  vectorstore = self._init_vector_store()
487
- filter = self._build_collection_filter(filter, collection_suffix)
487
+ filter = self._build_collection_filter(filter, index_name)
488
488
 
489
489
  found_docs = vectorstore.stepback_summary(
490
490
  query,
@@ -537,7 +537,7 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
537
537
  "args_schema": RemoveIndexParams
538
538
  },
539
539
  {
540
- "name": "list_collections",
540
+ "name": "list_indexes",
541
541
  "mode": "list_collections",
542
542
  "ref": self.list_collections,
543
543
  "description": self.list_collections.__doc__,
@@ -655,7 +655,7 @@ class BaseCodeToolApiWrapper(BaseVectorStoreToolApiWrapper):
655
655
  return parse_code_files_for_db(file_content_generator())
656
656
 
657
657
  def index_data(self,
658
- collection_suffix: str,
658
+ index_name: str,
659
659
  branch: Optional[str] = None,
660
660
  whitelist: Optional[List[str]] = None,
661
661
  blacklist: Optional[List[str]] = None,
@@ -669,7 +669,7 @@ class BaseCodeToolApiWrapper(BaseVectorStoreToolApiWrapper):
669
669
  )
670
670
  vectorstore = self._init_vector_store()
671
671
  clean_index = kwargs.get('clean_index', False)
672
- return vectorstore.index_documents(documents, collection_suffix=collection_suffix,
672
+ return vectorstore.index_documents(documents, index_name=index_name,
673
673
  clean_index=clean_index, is_code=True,
674
674
  progress_step=kwargs.get('progress_step', 5))
675
675
 
@@ -6,11 +6,11 @@ from alita_sdk.tools.base_indexer_toolkit import BaseIndexerToolkit
6
6
 
7
7
 
8
8
  class NonCodeIndexerToolkit(BaseIndexerToolkit):
9
- def _get_indexed_data(self, collection_suffix: str):
9
+ def _get_indexed_data(self, index_name: str):
10
10
  if not self.vector_adapter:
11
11
  raise ToolException("Vector adapter is not initialized. "
12
12
  "Check your configuration: embedding_model and vectorstore_type.")
13
- return self.vector_adapter.get_indexed_data(self, collection_suffix)
13
+ return self.vector_adapter.get_indexed_data(self, index_name)
14
14
 
15
15
  def key_fn(self, document: Document):
16
16
  return document.metadata.get('id')
@@ -26,12 +26,12 @@ class VectorStoreAdapter(ABC):
26
26
  pass
27
27
 
28
28
  @abstractmethod
29
- def get_indexed_ids(self, vectorstore_wrapper, collection_suffix: Optional[str] = '') -> List[str]:
29
+ def get_indexed_ids(self, vectorstore_wrapper, index_name: Optional[str] = '') -> List[str]:
30
30
  """Get all indexed document IDs from vectorstore"""
31
31
  pass
32
32
 
33
33
  @abstractmethod
34
- def clean_collection(self, vectorstore_wrapper, collection_suffix: str = ''):
34
+ def clean_collection(self, vectorstore_wrapper, index_name: str = ''):
35
35
  """Clean the vectorstore collection by deleting all indexed data."""
36
36
  pass
37
37
 
@@ -41,7 +41,7 @@ class VectorStoreAdapter(ABC):
41
41
  pass
42
42
 
43
43
  @abstractmethod
44
- def get_code_indexed_data(self, vectorstore_wrapper, collection_suffix) -> Dict[str, Dict[str, Any]]:
44
+ def get_code_indexed_data(self, vectorstore_wrapper, index_name) -> Dict[str, Dict[str, Any]]:
45
45
  """Get all indexed data from vectorstore for code content"""
46
46
  pass
47
47
 
@@ -51,7 +51,7 @@ class VectorStoreAdapter(ABC):
51
51
  pass
52
52
 
53
53
  @abstractmethod
54
- def get_index_meta(self, vectorstore_wrapper, collection_suffix: str) -> List[Dict[str, Any]]:
54
+ def get_index_meta(self, vectorstore_wrapper, index_name: str) -> List[Dict[str, Any]]:
55
55
  """Get all index_meta entries from the vector store."""
56
56
  pass
57
57
 
@@ -106,7 +106,7 @@ class PGVectorAdapter(VectorStoreAdapter):
106
106
  session.commit()
107
107
  logger.info(f"Schema '{schema_name}' has been dropped.")
108
108
 
109
- def get_indexed_ids(self, vectorstore_wrapper, collection_suffix: Optional[str] = '') -> List[str]:
109
+ def get_indexed_ids(self, vectorstore_wrapper, index_name: Optional[str] = '') -> List[str]:
110
110
  """Get all indexed document IDs from PGVector"""
111
111
  from sqlalchemy.orm import Session
112
112
  from sqlalchemy import func
@@ -116,10 +116,10 @@ class PGVectorAdapter(VectorStoreAdapter):
116
116
  with Session(store.session_maker.bind) as session:
117
117
  # Start building the query
118
118
  query = session.query(store.EmbeddingStore.id)
119
- # Apply filter only if collection_suffix is provided
120
- if collection_suffix:
119
+ # Apply filter only if index_name is provided
120
+ if index_name:
121
121
  query = query.filter(
122
- func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'collection') == collection_suffix
122
+ func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'collection') == index_name
123
123
  )
124
124
  ids = query.all()
125
125
  return [str(id_tuple[0]) for id_tuple in ids]
@@ -127,18 +127,18 @@ class PGVectorAdapter(VectorStoreAdapter):
127
127
  logger.error(f"Failed to get indexed IDs from PGVector: {str(e)}")
128
128
  return []
129
129
 
130
- def clean_collection(self, vectorstore_wrapper, collection_suffix: str = ''):
130
+ def clean_collection(self, vectorstore_wrapper, index_name: str = ''):
131
131
  """Clean the vectorstore collection by deleting all indexed data."""
132
132
  # This logic deletes all data from the vectorstore collection without removal of collection.
133
133
  # Collection itself remains available for future indexing.
134
- vectorstore_wrapper.vectorstore.delete(ids=self.get_indexed_ids(vectorstore_wrapper, collection_suffix))
134
+ vectorstore_wrapper.vectorstore.delete(ids=self.get_indexed_ids(vectorstore_wrapper, index_name))
135
135
 
136
136
  def is_vectorstore_type(self, vectorstore) -> bool:
137
137
  """Check if the vectorstore is a PGVector store."""
138
138
  return hasattr(vectorstore, 'session_maker') and hasattr(vectorstore, 'EmbeddingStore')
139
139
 
140
- def get_indexed_data(self, vectorstore_wrapper, collection_suffix: str)-> Dict[str, Dict[str, Any]]:
141
- """Get all indexed data from PGVector for non-code content per collection_suffix."""
140
+ def get_indexed_data(self, vectorstore_wrapper, index_name: str)-> Dict[str, Dict[str, Any]]:
141
+ """Get all indexed data from PGVector for non-code content per index_name."""
142
142
  from sqlalchemy.orm import Session
143
143
  from sqlalchemy import func
144
144
  from ...runtime.utils.utils import IndexerKeywords
@@ -154,7 +154,7 @@ class PGVectorAdapter(VectorStoreAdapter):
154
154
  store.EmbeddingStore.document,
155
155
  store.EmbeddingStore.cmetadata
156
156
  ).filter(
157
- func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'collection') == collection_suffix
157
+ func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'collection') == index_name
158
158
  ).all()
159
159
 
160
160
  # Process the retrieved data
@@ -187,7 +187,7 @@ class PGVectorAdapter(VectorStoreAdapter):
187
187
 
188
188
  return result
189
189
 
190
- def get_code_indexed_data(self, vectorstore_wrapper, collection_suffix: str) -> Dict[str, Dict[str, Any]]:
190
+ def get_code_indexed_data(self, vectorstore_wrapper, index_name: str) -> Dict[str, Dict[str, Any]]:
191
191
  """Get all indexed code data from PGVector per collection suffix."""
192
192
  from sqlalchemy.orm import Session
193
193
  from sqlalchemy import func
@@ -202,7 +202,7 @@ class PGVectorAdapter(VectorStoreAdapter):
202
202
  store.EmbeddingStore.id,
203
203
  store.EmbeddingStore.cmetadata
204
204
  ).filter(
205
- func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'collection') == collection_suffix
205
+ func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'collection') == index_name
206
206
  ).all()
207
207
 
208
208
  for db_id, meta in docs:
@@ -272,7 +272,7 @@ class PGVectorAdapter(VectorStoreAdapter):
272
272
  except Exception as e:
273
273
  logger.error(f"Failed to update collection for entry ID {entry_id}: {str(e)}")
274
274
 
275
- def get_index_meta(self, vectorstore_wrapper, collection_suffix: str) -> List[Dict[str, Any]]:
275
+ def get_index_meta(self, vectorstore_wrapper, index_name: str) -> List[Dict[str, Any]]:
276
276
  from sqlalchemy.orm import Session
277
277
  from sqlalchemy import func
278
278
 
@@ -285,7 +285,7 @@ class PGVectorAdapter(VectorStoreAdapter):
285
285
  store.EmbeddingStore.cmetadata
286
286
  ).filter(
287
287
  store.EmbeddingStore.cmetadata['type'].astext == IndexerKeywords.INDEX_META_TYPE.value,
288
- func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'collection') == collection_suffix
288
+ func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'collection') == index_name
289
289
  ).all()
290
290
  result = []
291
291
  for id, document, cmetadata in meta:
@@ -312,7 +312,7 @@ class ChromaAdapter(VectorStoreAdapter):
312
312
  def remove_collection(self, vectorstore_wrapper, collection_name: str):
313
313
  vectorstore_wrapper.vectorstore.delete_collection()
314
314
 
315
- def get_indexed_ids(self, vectorstore_wrapper, collection_suffix: Optional[str] = '') -> List[str]:
315
+ def get_indexed_ids(self, vectorstore_wrapper, index_name: Optional[str] = '') -> List[str]:
316
316
  """Get all indexed document IDs from Chroma"""
317
317
  try:
318
318
  data = vectorstore_wrapper.vectorstore.get(include=[]) # Only get IDs, no metadata
@@ -321,9 +321,9 @@ class ChromaAdapter(VectorStoreAdapter):
321
321
  logger.error(f"Failed to get indexed IDs from Chroma: {str(e)}")
322
322
  return []
323
323
 
324
- def clean_collection(self, vectorstore_wrapper, collection_suffix: str = ''):
324
+ def clean_collection(self, vectorstore_wrapper, index_name: str = ''):
325
325
  """Clean the vectorstore collection by deleting all indexed data."""
326
- vectorstore_wrapper.vectorstore.delete(ids=self.get_indexed_ids(vectorstore_wrapper, collection_suffix))
326
+ vectorstore_wrapper.vectorstore.delete(ids=self.get_indexed_ids(vectorstore_wrapper, index_name))
327
327
 
328
328
  def get_indexed_data(self, vectorstore_wrapper):
329
329
  """Get all indexed data from Chroma for non-code content"""
@@ -361,7 +361,7 @@ class ChromaAdapter(VectorStoreAdapter):
361
361
 
362
362
  return result
363
363
 
364
- def get_code_indexed_data(self, vectorstore_wrapper, collection_suffix) -> Dict[str, Dict[str, Any]]:
364
+ def get_code_indexed_data(self, vectorstore_wrapper, index_name) -> Dict[str, Dict[str, Any]]:
365
365
  """Get all indexed code data from Chroma."""
366
366
  result = {}
367
367
  try:
@@ -391,7 +391,7 @@ class ChromaAdapter(VectorStoreAdapter):
391
391
  # This is a simplified implementation - in practice, you might need more complex logic
392
392
  logger.warning("add_to_collection for Chroma is not fully implemented yet")
393
393
 
394
- def get_index_meta(self, vectorstore_wrapper, collection_suffix: str) -> List[Dict[str, Any]]:
394
+ def get_index_meta(self, vectorstore_wrapper, index_name: str) -> List[Dict[str, Any]]:
395
395
  logger.warning("get_index_meta for Chroma is not implemented yet")
396
396
 
397
397
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.374
3
+ Version: 0.3.375
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -123,8 +123,8 @@ alita_sdk/runtime/tools/prompt.py,sha256=nJafb_e5aOM1Rr3qGFCR-SKziU9uCsiP2okIMs9
123
123
  alita_sdk/runtime/tools/router.py,sha256=p7e0tX6YAWw2M2Nq0A_xqw1E2P-Xz1DaJvhUstfoZn4,1584
124
124
  alita_sdk/runtime/tools/sandbox.py,sha256=0OjCNsDVO1N0cFNEFVr6GVICSaqGWesUzF6LcYg-Hn0,11349
125
125
  alita_sdk/runtime/tools/tool.py,sha256=lE1hGi6qOAXG7qxtqxarD_XMQqTghdywf261DZawwno,5631
126
- alita_sdk/runtime/tools/vectorstore.py,sha256=8vRhi1lGFEs3unvnflEi2p59U2MfV32lStpEizpDms0,34467
127
- alita_sdk/runtime/tools/vectorstore_base.py,sha256=wixvgLrC2tQOeIjFMCD-7869K7YfERzk2Tzmo-fgsTE,28350
126
+ alita_sdk/runtime/tools/vectorstore.py,sha256=FsnxdnvMK5bUEFxz0eeSHeNpVOk2gxOeXjoSlvCo8rs,34327
127
+ alita_sdk/runtime/tools/vectorstore_base.py,sha256=lNz6bOMpHOY8JiHT7BkoDbyj3kLykcKlCx4zOu_IgPE,28252
128
128
  alita_sdk/runtime/utils/AlitaCallback.py,sha256=E4LlSBuCHWiUq6W7IZExERHZY0qcmdjzc_rJlF2iQIw,7356
129
129
  alita_sdk/runtime/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
130
130
  alita_sdk/runtime/utils/constants.py,sha256=Xntx1b_uxUzT4clwqHA_U6K8y5bBqf_4lSQwXdcWrp4,13586
@@ -136,10 +136,10 @@ alita_sdk/runtime/utils/toolkit_runtime.py,sha256=MU63Fpxj0b5_r1IUUc0Q3-PN9VwL7r
136
136
  alita_sdk/runtime/utils/toolkit_utils.py,sha256=I9QFqnaqfVgN26LUr6s3XlBlG6y0CoHURnCzG7XcwVs,5311
137
137
  alita_sdk/runtime/utils/utils.py,sha256=BVEVLkYiiotcUD0XsHyx-wACpHfALsQg7PLZpObqvK8,1008
138
138
  alita_sdk/tools/__init__.py,sha256=jUj1ztC2FbkIUB-YYmiqaz_rqW7Il5kWzDPn1mJmj5w,10545
139
- alita_sdk/tools/base_indexer_toolkit.py,sha256=R8PQ1FZijtCT6LPuma68B1X6x0umH7gyROKwtp0xabw,27044
140
- alita_sdk/tools/code_indexer_toolkit.py,sha256=6QvI1by0OFdnKTx5TfNoDJjnMrvnTi9T56xaDxzeleU,7306
141
- alita_sdk/tools/elitea_base.py,sha256=up3HshASSDfjlHV_HPrs1aD4JIwwX0Ug26WGTzgIYvY,34724
142
- alita_sdk/tools/non_code_indexer_toolkit.py,sha256=B3QvhpT1F9QidkCcsOi3J_QrTOaNlTxqWFwe90VivQQ,1329
139
+ alita_sdk/tools/base_indexer_toolkit.py,sha256=_Q8K5fVg5gBFlZ94wYvBrPWJ56VobnNrfr15Knyobu4,26632
140
+ alita_sdk/tools/code_indexer_toolkit.py,sha256=p3zVnCnQTUf7JUGra9Rl6GEK2W1-hvvz0Xsgz0v0muM,7292
141
+ alita_sdk/tools/elitea_base.py,sha256=nV4sNVctJGgLqWTrqkI2iMc07k69GQ6uF5hXrLmsshg,34413
142
+ alita_sdk/tools/non_code_indexer_toolkit.py,sha256=6Lrqor1VeSLbPLDHAfg_7UAUqKFy1r_n6bdsc4-ak98,1315
143
143
  alita_sdk/tools/ado/__init__.py,sha256=NnNYpNFW0_N_v1td_iekYOoQRRB7PIunbpT2f9ZFJM4,1201
144
144
  alita_sdk/tools/ado/utils.py,sha256=PTCludvaQmPLakF2EbCGy66Mro4-rjDtavVP-xcB2Wc,1252
145
145
  alita_sdk/tools/ado/repos/__init__.py,sha256=rR-c40Pw_WpQeOXtEuS-COvgRUs1_cTkcJfHlK09N88,5339
@@ -231,7 +231,7 @@ alita_sdk/tools/code/loaders/codesearcher.py,sha256=XoXXZtIQZhvjIwZlnl_4wVGHC-3s
231
231
  alita_sdk/tools/code/sonar/__init__.py,sha256=iPqj2PnUY4-btJjaDeWIPdn-c9L_uCr_qOoP_uwRoXw,3360
232
232
  alita_sdk/tools/code/sonar/api_wrapper.py,sha256=nNqxcWN_6W8c0ckj-Er9HkNuAdgQLoWBXh5UyzNutis,2653
233
233
  alita_sdk/tools/confluence/__init__.py,sha256=zRnPBM1c7VTRTS955HNc7AEGV5t8ACc2f9wBXmmeXao,6845
234
- alita_sdk/tools/confluence/api_wrapper.py,sha256=cHIr0EnXZVGQMepcaIcFgMfyTKjlkKGbAd0z79pf-bo,89544
234
+ alita_sdk/tools/confluence/api_wrapper.py,sha256=1HZftLQFzpNwGSN-9LPp8RQr1X-0fsKzmFlc-WEadZU,89545
235
235
  alita_sdk/tools/confluence/loader.py,sha256=4bf5qrJMEiJzuZp2NlxO2XObLD1w7fxss_WyMUpe8sg,9290
236
236
  alita_sdk/tools/confluence/utils.py,sha256=Lxo6dBD0OlvM4o0JuK6qeB_4LV9BptiwJA9e1vqNcDw,435
237
237
  alita_sdk/tools/custom_open_api/__init__.py,sha256=9aT5SPNPWcJC6jMZEM-3rUCXVULj_3-qJLQKmnreKNo,2537
@@ -332,7 +332,7 @@ alita_sdk/tools/testrail/api_wrapper.py,sha256=tQcGlFJmftvs5ZiO4tsP19fCo4CrJeq_U
332
332
  alita_sdk/tools/utils/__init__.py,sha256=xB9OQgW65DftadrSpoAAitnEIbIXZKBOCji0NDe7FRM,3923
333
333
  alita_sdk/tools/utils/available_tools_decorator.py,sha256=IbrdfeQkswxUFgvvN7-dyLMZMyXLiwvX7kgi3phciCk,273
334
334
  alita_sdk/tools/utils/content_parser.py,sha256=7ohj8HeL_-rmc-Fv0TS8IpxIQC8tOpfuhyT3XlWx-gQ,15368
335
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256=p_9Cu5eausnfiKNsitbVxwu5eimZHRv3R-OMw7lBrts,19173
335
+ alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256=eDezsk41b5ql3CISQ6Xk-qE3foO-PjY0VSWeZnVxHPE,19019
336
336
  alita_sdk/tools/vector_adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
337
337
  alita_sdk/tools/xray/__init__.py,sha256=eOMWP8VamFbbJgt1xrGpGPqB9ByOTA0Cd3LCaETzGk4,4376
338
338
  alita_sdk/tools/xray/api_wrapper.py,sha256=uj5kzUgPdo_Oct9WCNMOpkb6o_3L7J4LZrEGtrwYMmc,30157
@@ -353,8 +353,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=kT0TbmMvuKhDUZc0i7KO18O38JM9S
353
353
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=0ne8XLJEQSLOWfzd2HdnqOYmQlUliKHbBED5kW_Vias,2895
354
354
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
355
355
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
356
- alita_sdk-0.3.374.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
357
- alita_sdk-0.3.374.dist-info/METADATA,sha256=b-L7XNDZ_LNpW-hoB_pDqOchYCdw9fOUStiXnQfSxUM,19071
358
- alita_sdk-0.3.374.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
359
- alita_sdk-0.3.374.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
360
- alita_sdk-0.3.374.dist-info/RECORD,,
356
+ alita_sdk-0.3.375.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
357
+ alita_sdk-0.3.375.dist-info/METADATA,sha256=h4sLfds9uE3O5UTOZL6pzbssZ3EDLx9szzmV_y60L-Q,19071
358
+ alita_sdk-0.3.375.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
359
+ alita_sdk-0.3.375.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
360
+ alita_sdk-0.3.375.dist-info/RECORD,,