alita-sdk 0.3.354__py3-none-any.whl → 0.3.356__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

@@ -63,7 +63,7 @@ class AlitaTableLoader(BaseLoader):
63
63
  "source": f'{self.file_path}:{idx+1}',
64
64
  "table_source": self.file_path,
65
65
  }
66
- if len(docs) == 0:
66
+ if len(docs) == 0 and not self.raw_content:
67
67
  header_metadata = metadata.copy()
68
68
  header_metadata["header"] = "true"
69
69
  header = "\t".join([str(value) for value in row.keys()])
@@ -20,7 +20,6 @@ class ArtifactToolkit(BaseToolkit):
20
20
  ArtifactToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
21
21
  return create_model(
22
22
  "artifact",
23
- # client = (Any, FieldInfo(description="Client object", required=True, autopopulate=True)),
24
23
  bucket = (str, FieldInfo(description="Bucket name", json_schema_extra={'toolkit_name': True, 'max_toolkit_length': ArtifactToolkit.toolkit_max_length})),
25
24
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
26
25
  # indexer settings
@@ -187,12 +187,12 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
187
187
  """
188
188
  Clean the vectorstore collection by deleting all indexed data.
189
189
  """
190
- self._log_data(
190
+ self._log_tool_event(
191
191
  f"Cleaning collection '{self.dataset}'",
192
192
  tool_name="_clean_collection"
193
193
  )
194
194
  self.vector_adapter.clean_collection(self, collection_suffix)
195
- self._log_data(
195
+ self._log_tool_event(
196
196
  f"Collection '{self.dataset}' has been cleaned. ",
197
197
  tool_name="_clean_collection"
198
198
  )
@@ -212,10 +212,10 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
212
212
 
213
213
  def _clean_index(self, collection_suffix: str):
214
214
  logger.info("Cleaning index before re-indexing all documents.")
215
- self._log_data("Cleaning index before re-indexing all documents. Previous index will be removed", tool_name="index_documents")
215
+ self._log_tool_event("Cleaning index before re-indexing all documents. Previous index will be removed", tool_name="index_documents")
216
216
  try:
217
217
  self._clean_collection(collection_suffix)
218
- self._log_data("Previous index has been removed",
218
+ self._log_tool_event("Previous index has been removed",
219
219
  tool_name="index_documents")
220
220
  except Exception as e:
221
221
  logger.warning(f"Failed to clean index: {str(e)}. Continuing with re-indexing.")
@@ -261,7 +261,7 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
261
261
  if percent >= next_progress_point:
262
262
  msg = f"Indexing progress: {percent}%. Processed {documents_count} of {total_docs} documents."
263
263
  logger.debug(msg)
264
- self._log_data(msg)
264
+ self._log_tool_event(msg)
265
265
  next_progress_point += progress_step
266
266
  except Exception:
267
267
  from traceback import format_exc
@@ -569,21 +569,6 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
569
569
  ])
570
570
  return result.content
571
571
 
572
- def _log_data(self, message: str, tool_name: str = "index_data"):
573
- """Log data and dispatch custom event for indexing progress"""
574
-
575
- try:
576
- dispatch_custom_event(
577
- name="thinking_step",
578
- data={
579
- "message": message,
580
- "tool_name": tool_name,
581
- "toolkit": "vectorstore",
582
- },
583
- )
584
- except Exception as e:
585
- logger.warning(f"Failed to dispatch progress event: {str(e)}")
586
-
587
572
  def get_available_tools(self):
588
573
  return [
589
574
  {
@@ -24,7 +24,8 @@ from msrest.authentication import BasicAuthentication
24
24
  from pydantic import Field, PrivateAttr, create_model, model_validator, SecretStr
25
25
 
26
26
  from ..utils import extract_old_new_pairs, generate_diff, get_content_from_generator
27
- from ...elitea_base import BaseCodeToolApiWrapper
27
+ from ...code_indexer_toolkit import CodeIndexerToolkit
28
+ from ...utils.available_tools_decorator import extend_with_parent_available_tools
28
29
 
29
30
  logger = logging.getLogger(__name__)
30
31
 
@@ -242,7 +243,7 @@ class ArgsSchema(Enum):
242
243
  )
243
244
 
244
245
 
245
- class ReposApiWrapper(BaseCodeToolApiWrapper):
246
+ class ReposApiWrapper(CodeIndexerToolkit):
246
247
  # TODO use ado_repos_configuration fields
247
248
  organization_url: Optional[str]
248
249
  project: Optional[str]
@@ -293,7 +294,7 @@ class ReposApiWrapper(BaseCodeToolApiWrapper):
293
294
  if not branch_exists(active_branch):
294
295
  raise ToolException(f"The active branch '{active_branch}' does not exist.")
295
296
 
296
- return values
297
+ return super().validate_toolkit(values)
297
298
 
298
299
  def _get_commits(self, file_path: str, branch: str, top: int = None) -> List[GitCommitRef]:
299
300
  """
@@ -1174,9 +1175,10 @@ class ReposApiWrapper(BaseCodeToolApiWrapper):
1174
1175
  except Exception as e:
1175
1176
  return ToolException(f"Unable to retrieve commits due to error:\n{str(e)}")
1176
1177
 
1178
+ @extend_with_parent_available_tools
1177
1179
  def get_available_tools(self):
1178
1180
  """Return a list of available tools."""
1179
- tools = [
1181
+ return [
1180
1182
  {
1181
1183
  "ref": self.list_branches_in_repo,
1182
1184
  "name": "list_branches_in_repo",
@@ -1267,8 +1269,4 @@ class ReposApiWrapper(BaseCodeToolApiWrapper):
1267
1269
  "description": self.get_commits.__doc__,
1268
1270
  "args_schema": ArgsSchema.GetCommits.value,
1269
1271
  },
1270
- ] # Add vector search tools from base class (includes index_data + search tools)
1271
- vector_search_tools = self._get_vector_search_tools()
1272
- tools.extend(vector_search_tools)
1273
-
1274
- return tools
1272
+ ]
@@ -159,20 +159,73 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
159
159
  self._log_tool_event(f"Indexing data into collection with suffix '{collection_suffix}'. It can take some time...")
160
160
  self._log_tool_event(f"Loading the documents to index...{kwargs}")
161
161
  documents = self._base_loader(**kwargs)
162
+ documents = list(documents) # consume/exhaust generator to count items
163
+ documents_count = len(documents)
164
+ documents = (doc for doc in documents)
162
165
  self._log_tool_event(f"Base documents were pre-loaded. "
163
166
  f"Search for possible document duplicates and remove them from the indexing list...")
164
- documents = self._reduce_duplicates(documents, collection_suffix)
167
+ # documents = self._reduce_duplicates(documents, collection_suffix)
165
168
  self._log_tool_event(f"Duplicates were removed. "
166
169
  f"Processing documents to collect dependencies and prepare them for indexing...")
167
- documents = self._extend_data(documents) # update content of not-reduced base document if needed (for sharepoint and similar)
168
- documents = self._collect_dependencies(documents) # collect dependencies for base documents
169
- self._log_tool_event(f"Documents were processed. "
170
- f"Applying chunking tool '{chunking_tool}' if specified and preparing documents for indexing...")
171
- documents = self._apply_loaders_chunkers(documents, chunking_tool, chunking_config)
172
- list_documents = list(documents)
173
- self._clean_metadata(list_documents)
174
- self._log_tool_event(f"Documents are ready for indexing. Total documents to index: {len(list_documents)}")
175
- return self._save_index(list_documents, collection_suffix=collection_suffix, progress_step=progress_step)
170
+ return self._save_index_generator(documents, documents_count, chunking_tool, chunking_config, collection_suffix=collection_suffix, progress_step=progress_step)
171
+
172
+ def _save_index_generator(self, base_documents: Generator[Document, None, None], base_total: int, chunking_tool, chunking_config, collection_suffix: Optional[str] = None, progress_step: int = 20):
173
+ self._log_tool_event(f"Base documents are ready for indexing. {base_total} base documents in total to index.")
174
+ from ..runtime.langchain.interfaces.llm_processor import add_documents
175
+ #
176
+ base_doc_counter = 0
177
+ total_counter = 0
178
+ pg_vector_add_docs_chunk = []
179
+ for base_doc in base_documents:
180
+ base_doc_counter += 1
181
+ self._log_tool_event(f"Processing dependent documents for base documents #{base_doc_counter}.")
182
+
183
+ # (base_doc for _ in range(1)) - wrap single base_doc to Generator in order to reuse existing code
184
+ documents = self._extend_data((base_doc for _ in range(1))) # update content of not-reduced base document if needed (for sharepoint and similar)
185
+ documents = self._collect_dependencies(documents) # collect dependencies for base documents
186
+ self._log_tool_event(f"Dependent documents were processed. "
187
+ f"Applying chunking tool '{chunking_tool}' if specified and preparing documents for indexing...")
188
+ documents = self._apply_loaders_chunkers(documents, chunking_tool, chunking_config)
189
+ self._clean_metadata(documents)
190
+
191
+ logger.debug(f"Indexing base document #{base_doc_counter}: {base_doc} and all dependent documents: {documents}")
192
+
193
+ dependent_docs_counter = 0
194
+ #
195
+ for doc in documents:
196
+ if not doc.page_content:
197
+ # To avoid case when all documents have empty content
198
+ # See llm_processor.add_documents which exclude metadata of docs with empty content
199
+ continue
200
+ #
201
+ if 'id' not in doc.metadata or 'updated_on' not in doc.metadata:
202
+ logger.warning(f"Document is missing required metadata field 'id' or 'updated_on': {doc.metadata}")
203
+ #
204
+ # if collection_suffix is provided, add it to metadata of each document
205
+ if collection_suffix:
206
+ if not doc.metadata.get('collection'):
207
+ doc.metadata['collection'] = collection_suffix
208
+ else:
209
+ doc.metadata['collection'] += f";{collection_suffix}"
210
+ #
211
+ try:
212
+ pg_vector_add_docs_chunk.append(doc)
213
+ dependent_docs_counter += 1
214
+ if len(pg_vector_add_docs_chunk) >= self.max_docs_per_add:
215
+ add_documents(vectorstore=self.vectorstore, documents=pg_vector_add_docs_chunk)
216
+ self._log_tool_event(f"{len(pg_vector_add_docs_chunk)} documents have been indexed. Continuing...")
217
+ pg_vector_add_docs_chunk = []
218
+ except Exception:
219
+ from traceback import format_exc
220
+ logger.error(f"Error: {format_exc()}")
221
+ return {"status": "error", "message": f"Error: {format_exc()}"}
222
+ msg = f"Indexed base document #{base_doc_counter} out of {base_total} (with {dependent_docs_counter} dependencies)."
223
+ logger.debug(msg)
224
+ self._log_tool_event(msg)
225
+ total_counter += dependent_docs_counter
226
+ if pg_vector_add_docs_chunk:
227
+ add_documents(vectorstore=self.vectorstore, documents=pg_vector_add_docs_chunk)
228
+ return {"status": "ok", "message": f"successfully indexed {total_counter} documents"}
176
229
 
177
230
  def _apply_loaders_chunkers(self, documents: Generator[Document, None, None], chunking_tool: str=None, chunking_config=None) -> Generator[Document, None, None]:
178
231
  from ..tools.chunkers import __all__ as chunkers
@@ -222,11 +275,12 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
222
275
  dep.metadata[IndexerKeywords.PARENT.value] = document.metadata.get('id', None)
223
276
  yield dep
224
277
 
225
- def _clean_metadata(self, documents: list[Document]):
278
+ def _clean_metadata(self, documents: Generator[Document, None, None]):
226
279
  for document in documents:
227
280
  remove_keys = self._remove_metadata_keys()
228
281
  for key in remove_keys:
229
282
  document.metadata.pop(key, None)
283
+ yield document
230
284
 
231
285
  def _reduce_duplicates(
232
286
  self,
@@ -235,11 +289,11 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
235
289
  log_msg: str = "Verification of documents to index started"
236
290
  ) -> Generator[Document, None, None]:
237
291
  """Generic duplicate reduction logic for documents."""
238
- self._log_data(log_msg, tool_name="index_documents")
292
+ self._log_tool_event(log_msg, tool_name="index_documents")
239
293
  indexed_data = self._get_indexed_data(collection_suffix)
240
294
  indexed_keys = set(indexed_data.keys())
241
295
  if not indexed_keys:
242
- self._log_data("Vectorstore is empty, indexing all incoming documents", tool_name="index_documents")
296
+ self._log_tool_event("Vectorstore is empty, indexing all incoming documents", tool_name="index_documents")
243
297
  yield from documents
244
298
  return
245
299
 
@@ -257,7 +311,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
257
311
  yield document
258
312
 
259
313
  if docs_to_remove:
260
- self._log_data(
314
+ self._log_tool_event(
261
315
  f"Removing {len(docs_to_remove)} documents from vectorstore that are already indexed with different updated_on.",
262
316
  tool_name="index_documents"
263
317
  )
@@ -303,6 +357,11 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
303
357
  **kwargs):
304
358
  """ Searches indexed documents in the vector store."""
305
359
  # build filter on top of collection_suffix
360
+
361
+ available_collections = super().list_collections()
362
+ if collection_suffix and collection_suffix not in available_collections:
363
+ return f"Collection '{collection_suffix}' not found. Available collections: {available_collections}"
364
+
306
365
  filter = self._build_collection_filter(filter, collection_suffix)
307
366
  found_docs = super().search_documents(
308
367
  query,
@@ -11,7 +11,8 @@ from .bitbucket_constants import create_pr_data
11
11
  from .cloud_api_wrapper import BitbucketCloudApi, BitbucketServerApi
12
12
  from pydantic.fields import PrivateAttr
13
13
 
14
- from ..elitea_base import BaseCodeToolApiWrapper
14
+ from ..code_indexer_toolkit import CodeIndexerToolkit
15
+ from ..utils.available_tools_decorator import extend_with_parent_available_tools
15
16
 
16
17
  logger = logging.getLogger(__name__)
17
18
 
@@ -117,7 +118,7 @@ CommentOnIssueModel = create_model(
117
118
  )
118
119
 
119
120
 
120
- class BitbucketAPIWrapper(BaseCodeToolApiWrapper):
121
+ class BitbucketAPIWrapper(CodeIndexerToolkit):
121
122
  """Wrapper for Bitbucket API."""
122
123
 
123
124
  _bitbucket: Any = PrivateAttr()
@@ -167,7 +168,7 @@ class BitbucketAPIWrapper(BaseCodeToolApiWrapper):
167
168
  repository=values['repository']
168
169
  )
169
170
  cls._active_branch = values.get('branch')
170
- return values
171
+ return super().validate_toolkit(values)
171
172
 
172
173
  def set_active_branch(self, branch_name: str) -> str:
173
174
  """Set the active branch for the bot."""
@@ -399,6 +400,7 @@ class BitbucketAPIWrapper(BaseCodeToolApiWrapper):
399
400
  except Exception as e:
400
401
  return f"Failed to read file {file_path}: {str(e)}"
401
402
 
403
+ @extend_with_parent_available_tools
402
404
  def get_available_tools(self):
403
405
  return [
404
406
  {
@@ -473,4 +475,4 @@ class BitbucketAPIWrapper(BaseCodeToolApiWrapper):
473
475
  "description": self.add_pull_request_comment.__doc__ or "Add a comment to a pull request in the repository.",
474
476
  "args_schema": AddPullRequestCommentModel,
475
477
  }
476
- ] + self._get_vector_search_tools()
478
+ ]
@@ -0,0 +1,156 @@
1
+ import ast
2
+ import fnmatch
3
+ import logging
4
+ from typing import Optional, List, Generator
5
+
6
+ from langchain_core.documents import Document
7
+ from langchain_core.tools import ToolException
8
+ from pydantic import Field
9
+
10
+ from alita_sdk.tools.base_indexer_toolkit import BaseIndexerToolkit
11
+ from .chunkers.code.codeparser import parse_code_files_for_db
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class CodeIndexerToolkit(BaseIndexerToolkit):
17
+ def _get_indexed_data(self, collection_suffix: str):
18
+ if not self.vector_adapter:
19
+ raise ToolException("Vector adapter is not initialized. "
20
+ "Check your configuration: embedding_model and vectorstore_type.")
21
+ return self.vector_adapter.get_code_indexed_data(self, collection_suffix)
22
+
23
+ def key_fn(self, document: Document):
24
+ return document.metadata.get('id')
25
+
26
+ def compare_fn(self, document: Document, idx_data):
27
+ return (document.metadata.get('commit_hash') and
28
+ idx_data.get('commit_hashes') and
29
+ document.metadata.get('commit_hash') in idx_data.get('commit_hashes')
30
+ )
31
+
32
+ def remove_ids_fn(self, idx_data, key: str):
33
+ return idx_data[key]['ids']
34
+
35
+ def _base_loader(
36
+ self,
37
+ branch: Optional[str] = None,
38
+ whitelist: Optional[List[str]] = None,
39
+ blacklist: Optional[List[str]] = None,
40
+ **kwargs) -> Generator[Document, None, None]:
41
+ """Index repository files in the vector store using code parsing."""
42
+ yield from self.loader(
43
+ branch=branch,
44
+ whitelist=whitelist,
45
+ blacklist=blacklist
46
+ )
47
+
48
+ def _extend_data(self, documents: Generator[Document, None, None]):
49
+ yield from parse_code_files_for_db(documents)
50
+
51
+ def _index_tool_params(self):
52
+ """Return the parameters for indexing data."""
53
+ return {
54
+ "branch": (Optional[str], Field(
55
+ description="Branch to index files from. Defaults to active branch if None.",
56
+ default=None)),
57
+ "whitelist": (Optional[List[str]], Field(
58
+ description='File extensions or paths to include. Defaults to all files if None. Example: ["*.md", "*.java"]',
59
+ default=None)),
60
+ "blacklist": (Optional[List[str]], Field(
61
+ description='File extensions or paths to exclude. Defaults to no exclusions if None. Example: ["*.md", "*.java"]',
62
+ default=None)),
63
+ }
64
+
65
+ def loader(self,
66
+ branch: Optional[str] = None,
67
+ whitelist: Optional[List[str]] = None,
68
+ blacklist: Optional[List[str]] = None) -> Generator[Document, None, None]:
69
+ """
70
+ Generates file content from a branch, respecting whitelist and blacklist patterns.
71
+
72
+ Parameters:
73
+ - branch (Optional[str]): Branch for listing files. Defaults to the current branch if None.
74
+ - whitelist (Optional[List[str]]): File extensions or paths to include. Defaults to all files if None.
75
+ - blacklist (Optional[List[str]]): File extensions or paths to exclude. Defaults to no exclusions if None.
76
+
77
+ Returns:
78
+ - generator: Yields content from files matching the whitelist but not the blacklist.
79
+
80
+ Example:
81
+ # Use 'feature-branch', include '.py' files, exclude 'test_' files
82
+ file_generator = loader(branch='feature-branch', whitelist=['*.py'], blacklist=['*test_*'])
83
+
84
+ Notes:
85
+ - Whitelist and blacklist use Unix shell-style wildcards.
86
+ - Files must match the whitelist and not the blacklist to be included.
87
+ """
88
+
89
+ _files = self.__handle_get_files("", self.__get_branch(branch))
90
+ self._log_tool_event(message="Listing files in branch", tool_name="loader")
91
+ logger.info(f"Files in branch: {_files}")
92
+
93
+ def is_whitelisted(file_path: str) -> bool:
94
+ if whitelist:
95
+ return (any(fnmatch.fnmatch(file_path, pattern) for pattern in whitelist)
96
+ or any(file_path.endswith(f'.{pattern}') for pattern in whitelist))
97
+ return True
98
+
99
+ def is_blacklisted(file_path: str) -> bool:
100
+ if blacklist:
101
+ return (any(fnmatch.fnmatch(file_path, pattern) for pattern in blacklist)
102
+ or any(file_path.endswith(f'.{pattern}') for pattern in blacklist))
103
+ return False
104
+
105
+ def file_content_generator():
106
+ self._log_tool_event(message="Reading the files", tool_name="loader")
107
+ # log the progress of file reading
108
+ total_files = len(_files)
109
+ for idx, file in enumerate(_files, 1):
110
+ if is_whitelisted(file) and not is_blacklisted(file):
111
+ # read file ONLY if it matches whitelist and does not match blacklist
112
+ try:
113
+ file_content = self._read_file(file, self.__get_branch(branch))
114
+ except Exception as e:
115
+ logger.error(f"Failed to read file {file}: {e}")
116
+ file_content = ""
117
+ if not file_content:
118
+ # empty file, skip
119
+ continue
120
+ # hash the file content to ensure uniqueness
121
+ import hashlib
122
+ file_hash = hashlib.sha256(file_content.encode("utf-8")).hexdigest()
123
+ yield {"file_name": file,
124
+ "file_content": file_content,
125
+ "commit_hash": file_hash}
126
+ if idx % 10 == 0 or idx == total_files:
127
+ self._log_tool_event(message=f"{idx} out of {total_files} files have been read", tool_name="loader")
128
+ self._log_tool_event(message=f"{len(_files)} have been read", tool_name="loader")
129
+
130
+ return file_content_generator()
131
+
132
+ def __handle_get_files(self, path: str, branch: str):
133
+ """
134
+ Handles the retrieval of files from a specific path and branch.
135
+ This method should be implemented in subclasses to provide the actual file retrieval logic.
136
+ """
137
+ _files = self._get_files(path=path, branch=branch)
138
+ if isinstance(_files, str):
139
+ try:
140
+ # Attempt to convert the string to a list using ast.literal_eval
141
+ _files = ast.literal_eval(_files)
142
+ # Ensure that the result is actually a list of strings
143
+ if not isinstance(_files, list) or not all(isinstance(item, str) for item in _files):
144
+ raise ValueError("The evaluated result is not a list of strings")
145
+ except (SyntaxError, ValueError):
146
+ # Handle the case where the string cannot be converted to a list
147
+ raise ValueError("Expected a list of strings, but got a string that cannot be converted")
148
+
149
+ # Ensure _files is a list of strings
150
+ if not isinstance(_files, list) or not all(isinstance(item, str) for item in _files):
151
+ raise ValueError("Expected a list of strings")
152
+ return _files
153
+
154
+ def __get_branch(self, branch):
155
+ return (branch or getattr(self, 'active_branch', None)
156
+ or getattr(self, '_active_branch', None) or getattr(self, 'branch', None))
@@ -1,9 +1,7 @@
1
- from typing import Any, Dict, List, Optional, Union, Tuple
2
1
  import logging
3
- import traceback
4
- import json
5
- import re
6
- from pydantic import BaseModel, model_validator, Field, SecretStr
2
+ from typing import Any, Dict, Optional
3
+
4
+ from pydantic import model_validator, Field, SecretStr
7
5
 
8
6
  from .github_client import GitHubClient
9
7
  from .graphql_client_wrapper import GraphQLClientWrapper
@@ -11,28 +9,17 @@ from .schemas import (
11
9
  GitHubAuthConfig,
12
10
  GitHubRepoConfig
13
11
  )
14
-
15
- from ..elitea_base import BaseCodeToolApiWrapper
16
-
17
- from langchain_core.callbacks import dispatch_custom_event
12
+ from ..code_indexer_toolkit import CodeIndexerToolkit
13
+ from ..utils.available_tools_decorator import extend_with_parent_available_tools
18
14
 
19
15
  logger = logging.getLogger(__name__)
20
16
 
21
17
  # Import prompts for tools
22
- from .tool_prompts import (
23
- UPDATE_FILE_PROMPT,
24
- CREATE_ISSUE_PROMPT,
25
- UPDATE_ISSUE_PROMPT,
26
- CREATE_ISSUE_ON_PROJECT_PROMPT,
27
- UPDATE_ISSUE_ON_PROJECT_PROMPT
28
- )
29
18
 
30
19
  # Create schema models for the new indexing functionality
31
- from pydantic import create_model
32
- from typing import Literal
33
20
 
34
21
 
35
- class AlitaGitHubAPIWrapper(BaseCodeToolApiWrapper):
22
+ class AlitaGitHubAPIWrapper(CodeIndexerToolkit):
36
23
  """
37
24
  Wrapper for GitHub API that integrates both REST and GraphQL functionality.
38
25
  """
@@ -117,7 +104,7 @@ class AlitaGitHubAPIWrapper(BaseCodeToolApiWrapper):
117
104
  if "llm" not in values:
118
105
  values["llm"] = None
119
106
 
120
- return values
107
+ return super().validate_toolkit(values)
121
108
 
122
109
  # Expose GitHub REST client methods directly via property
123
110
  @property
@@ -131,7 +118,7 @@ class AlitaGitHubAPIWrapper(BaseCodeToolApiWrapper):
131
118
  """Access to GitHub GraphQL client methods"""
132
119
  return self.graphql_client_instance
133
120
 
134
-
121
+ @extend_with_parent_available_tools
135
122
  def get_available_tools(self):
136
123
  # this is horrible, I need to think on something better
137
124
  if not self.github_client_instance:
@@ -142,12 +129,8 @@ class AlitaGitHubAPIWrapper(BaseCodeToolApiWrapper):
142
129
  graphql_tools = GraphQLClientWrapper.model_construct().get_available_tools()
143
130
  else:
144
131
  graphql_tools = self.graphql_client_instance.get_available_tools()
145
-
146
- # Add vector search tools from base class (includes index_data + search tools)
147
- vector_search_tools = self._get_vector_search_tools()
148
132
 
149
- tools = github_tools + graphql_tools + vector_search_tools
150
- return tools
133
+ return github_tools + graphql_tools
151
134
 
152
135
  def _get_files(self, path: str = "", branch: str = None):
153
136
  """Get list of files from GitHub repository."""
@@ -1,9 +1,12 @@
1
1
  # api_wrapper.py
2
2
  from typing import Any, Dict, List, Optional
3
3
  import fnmatch
4
- from ...tools.elitea_base import BaseCodeToolApiWrapper
4
+
5
+ from ..code_indexer_toolkit import CodeIndexerToolkit
5
6
  from pydantic import create_model, Field, model_validator, SecretStr, PrivateAttr
6
7
 
8
+ from ..utils.available_tools_decorator import extend_with_parent_available_tools
9
+
7
10
  AppendFileModel = create_model(
8
11
  "AppendFileModel",
9
12
  file_path=(str, Field(description="The path of the file")),
@@ -97,15 +100,19 @@ GetCommitsModel = create_model(
97
100
  author=(Optional[str], Field(description="Author name", default=None)),
98
101
  )
99
102
 
100
- class GitLabAPIWrapper(BaseCodeToolApiWrapper):
103
+ class GitLabAPIWrapper(CodeIndexerToolkit):
101
104
  url: str
102
105
  repository: str
103
106
  private_token: SecretStr
104
107
  branch: Optional[str] = 'main'
105
108
  _git: Any = PrivateAttr()
106
- _repo_instance: Any = PrivateAttr()
107
109
  _active_branch: Any = PrivateAttr()
108
110
 
111
+ @staticmethod
112
+ def _sanitize_url(url: str) -> str:
113
+ """Remove trailing slash from URL if present."""
114
+ return url.rstrip('/') if url else url
115
+
109
116
  @model_validator(mode='before')
110
117
  @classmethod
111
118
  def validate_toolkit(cls, values: Dict) -> Dict:
@@ -116,22 +123,34 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
116
123
  "python-gitlab is not installed. "
117
124
  "Please install it with `pip install python-gitlab`"
118
125
  )
119
-
126
+ values['repository'] = cls._sanitize_url(values['repository'])
120
127
  g = gitlab.Gitlab(
121
- url=values['url'],
128
+ url=cls._sanitize_url(values['url']),
122
129
  private_token=values['private_token'],
123
130
  keep_base_url=True,
124
131
  )
125
132
 
126
133
  g.auth()
127
- cls._repo_instance = g.projects.get(values.get('repository'))
128
134
  cls._git = g
129
135
  cls._active_branch = values.get('branch')
130
- return values
136
+ return super().validate_toolkit(values)
137
+
138
+ @property
139
+ def repo_instance(self):
140
+ if not hasattr(self, "_repo_instance") or self._repo_instance is None:
141
+ try:
142
+ if self._git and self.repository:
143
+ self._repo_instance = self._git.projects.get(self.repository)
144
+ else:
145
+ self._repo_instance = None
146
+ except Exception as e:
147
+ # Only raise when accessed, not during initialization
148
+ raise ToolException(e)
149
+ return self._repo_instance
131
150
 
132
151
  def set_active_branch(self, branch_name: str) -> str:
133
152
  self._active_branch = branch_name
134
- self._repo_instance.default_branch = branch_name
153
+ self.repo_instance.default_branch = branch_name
135
154
  return f"Active branch set to {branch_name}"
136
155
 
137
156
  def list_branches_in_repo(self, limit: Optional[int] = 20, branch_wildcard: Optional[str] = None) -> List[str]:
@@ -146,7 +165,7 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
146
165
  List[str]: List containing names of branches
147
166
  """
148
167
  try:
149
- branches = self._repo_instance.branches.list(get_all=True)
168
+ branches = self.repo_instance.branches.list(get_all=True)
150
169
 
151
170
  if branch_wildcard:
152
171
  branches = [branch for branch in branches if fnmatch.fnmatch(branch.name, branch_wildcard)]
@@ -173,7 +192,7 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
173
192
 
174
193
  def _get_all_files(self, path: str = None, recursive: bool = True, branch: str = None):
175
194
  branch = branch if branch else self._active_branch
176
- return self._repo_instance.repository_tree(path=path, ref=branch, recursive=recursive, all=True)
195
+ return self.repo_instance.repository_tree(path=path, ref=branch, recursive=recursive, all=True)
177
196
 
178
197
  # overrided for indexer
179
198
  def _get_files(self, path: str = None, recursive: bool = True, branch: str = None):
@@ -185,7 +204,7 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
185
204
  Get the commit hash of a file in a specific branch.
186
205
  """
187
206
  try:
188
- file = self._repo_instance.files.get(file_path, branch)
207
+ file = self.repo_instance.files.get(file_path, branch)
189
208
  return file.commit_id
190
209
  except Exception as e:
191
210
  return f"Unable to get commit hash for {file_path} due to error:\n{e}"
@@ -195,7 +214,7 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
195
214
 
196
215
  def create_branch(self, branch_name: str) -> str:
197
216
  try:
198
- self._repo_instance.branches.create(
217
+ self.repo_instance.branches.create(
199
218
  {
200
219
  'branch': branch_name,
201
220
  'ref': self._active_branch,
@@ -218,7 +237,7 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
218
237
  return parsed
219
238
 
220
239
  def get_issues(self) -> str:
221
- issues = self._repo_instance.issues.list(state="opened")
240
+ issues = self.repo_instance.issues.list(state="opened")
222
241
  if len(issues) > 0:
223
242
  parsed_issues = self.parse_issues(issues)
224
243
  parsed_issues_str = (
@@ -229,7 +248,7 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
229
248
  return "No open issues available"
230
249
 
231
250
  def get_issue(self, issue_number: int) -> Dict[str, Any]:
232
- issue = self._repo_instance.issues.get(issue_number)
251
+ issue = self.repo_instance.issues.get(issue_number)
233
252
  page = 0
234
253
  comments: List[dict] = []
235
254
  while len(comments) <= 10:
@@ -255,7 +274,7 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
255
274
  commits are already in the {self.branch} branch"""
256
275
  else:
257
276
  try:
258
- pr = self._repo_instance.mergerequests.create(
277
+ pr = self.repo_instance.mergerequests.create(
259
278
  {
260
279
  "source_branch": branch,
261
280
  "target_branch": self.branch,
@@ -272,7 +291,7 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
272
291
  issue_number = int(comment_query.split("\n\n")[0])
273
292
  comment = comment_query[len(str(issue_number)) + 2 :]
274
293
  try:
275
- issue = self._repo_instance.issues.get(issue_number)
294
+ issue = self.repo_instance.issues.get(issue_number)
276
295
  issue.notes.create({"body": comment})
277
296
  return "Commented on issue " + str(issue_number)
278
297
  except Exception as e:
@@ -281,7 +300,7 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
281
300
  def create_file(self, file_path: str, file_contents: str, branch: str) -> str:
282
301
  try:
283
302
  self.set_active_branch(branch)
284
- self._repo_instance.files.get(file_path, branch)
303
+ self.repo_instance.files.get(file_path, branch)
285
304
  return f"File already exists at {file_path}. Use update_file instead"
286
305
  except Exception:
287
306
  data = {
@@ -290,13 +309,13 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
290
309
  "file_path": file_path,
291
310
  "content": file_contents,
292
311
  }
293
- self._repo_instance.files.create(data)
312
+ self.repo_instance.files.create(data)
294
313
 
295
314
  return "Created file " + file_path
296
315
 
297
316
  def read_file(self, file_path: str, branch: str) -> str:
298
317
  self.set_active_branch(branch)
299
- file = self._repo_instance.files.get(file_path, branch)
318
+ file = self.repo_instance.files.get(file_path, branch)
300
319
  return file.decode().decode("utf-8")
301
320
 
302
321
  def update_file(self, file_query: str, branch: str) -> str:
@@ -335,7 +354,7 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
335
354
  ],
336
355
  }
337
356
 
338
- self._repo_instance.commits.create(commit)
357
+ self.repo_instance.commits.create(commit)
339
358
  return "Updated file " + file_path
340
359
  except Exception as e:
341
360
  return "Unable to update file due to error:\n" + str(e)
@@ -365,7 +384,7 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
365
384
  ],
366
385
  }
367
386
 
368
- self._repo_instance.commits.create(commit)
387
+ self.repo_instance.commits.create(commit)
369
388
  return "Updated file " + file_path
370
389
  except Exception as e:
371
390
  return "Unable to update file due to error:\n" + str(e)
@@ -375,20 +394,20 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
375
394
  self.set_active_branch(branch)
376
395
  if not commit_message:
377
396
  commit_message = f"Delete {file_path}"
378
- self._repo_instance.files.delete(file_path, branch, commit_message)
397
+ self.repo_instance.files.delete(file_path, branch, commit_message)
379
398
  return f"Deleted file {file_path}"
380
399
  except Exception as e:
381
400
  return f"Unable to delete file due to error:\n{e}"
382
401
 
383
402
  def get_pr_changes(self, pr_number: int) -> str:
384
- mr = self._repo_instance.mergerequests.get(pr_number)
403
+ mr = self.repo_instance.mergerequests.get(pr_number)
385
404
  res = f"title: {mr.title}\ndescription: {mr.description}\n\n"
386
405
  for change in mr.changes()["changes"]:
387
406
  res += f"diff --git a/{change['old_path']} b/{change['new_path']}\n{change['diff']}\n"
388
407
  return res
389
408
 
390
409
  def create_pr_change_comment(self, pr_number: int, file_path: str, line_number: int, comment: str) -> str:
391
- mr = self._repo_instance.mergerequests.get(pr_number)
410
+ mr = self.repo_instance.mergerequests.get(pr_number)
392
411
  position = {"position_type": "text", "new_path": file_path, "new_line": line_number}
393
412
  mr.discussions.create({"body": comment, "position": position})
394
413
  return "Comment added"
@@ -405,7 +424,7 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
405
424
  params["until"] = until
406
425
  if author:
407
426
  params["author"] = author
408
- commits = self._repo_instance.commits.list(**params)
427
+ commits = self.repo_instance.commits.list(**params)
409
428
  return [
410
429
  {
411
430
  "sha": commit.id,
@@ -417,6 +436,7 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
417
436
  for commit in commits
418
437
  ]
419
438
 
439
+ @extend_with_parent_available_tools
420
440
  def get_available_tools(self):
421
441
  return [
422
442
  {
@@ -521,4 +541,4 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
521
541
  "description": "Retrieve a list of commits from the repository.",
522
542
  "args_schema": GetCommitsModel,
523
543
  }
524
- ] + self._get_vector_search_tools()
544
+ ]
@@ -1330,7 +1330,7 @@ class JiraApiWrapper(NonCodeIndexerToolkit):
1330
1330
 
1331
1331
  # Use provided JQL query or default to all issues
1332
1332
  if not jql:
1333
- jql_query = "ORDER BY updated DESC" # Default to get all issues ordered by update time
1333
+ jql_query = "created >= \"1970-01-01\" ORDER BY updated DESC" # Default to get all issues ordered by update time
1334
1334
  else:
1335
1335
  jql_query = jql
1336
1336
 
@@ -297,7 +297,9 @@ def sanitize_for_postgres(text: str, replacement: str = "") -> str:
297
297
  return text.replace("\x00", replacement)
298
298
 
299
299
 
300
- def file_extension_by_chunker(chunker_name: str) -> str:
300
+ def file_extension_by_chunker(chunker_name: str) -> str | None:
301
+ if not chunker_name:
302
+ return None
301
303
  name = chunker_name.lower()
302
304
  if name == "markdown":
303
305
  return ".md"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.354
3
+ Version: 0.3.356
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -66,7 +66,7 @@ alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py,sha256=olVThKX9Mm
66
66
  alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py,sha256=CHIaUnP2Alu7D1NHxlL5N98iY7Gqm4tA5wHjBYUsQLc,2833
67
67
  alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py,sha256=m_7aq-aCFVb4vXZsJNinfN1hAuyy_S0ylRknv_ahxDc,340
68
68
  alita_sdk/runtime/langchain/document_loaders/AlitaQtestLoader.py,sha256=CUVVnisxm7b5yZWV6rn0Q3MEEaO1GWNcfnz5yWz8T0k,13283
69
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py,sha256=nI8lyndVZxVAxbjX3yiqyuFQKFE8MjLPyYSyqRWxHqQ,4077
69
+ alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py,sha256=EO1nJDRPVwNAe6PNT7U8GhRuKbWUi6tKPtBwOrn_MwM,4102
70
70
  alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py,sha256=EiCIAF_OxSrbuwgOFk2IpxRMvFbctITt2jAI0g_atpk,3586
71
71
  alita_sdk/runtime/langchain/document_loaders/ImageParser.py,sha256=RQ4zGdSw42ec8c6Eb48uFadayWuiT4FbwhGVwhSw60s,1065
72
72
  alita_sdk/runtime/langchain/document_loaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -96,7 +96,7 @@ alita_sdk/runtime/llms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
96
96
  alita_sdk/runtime/llms/preloaded.py,sha256=3AaUbZK3d8fvxAQMjR3ftOoYa0SnkCOL1EvdvDCXIHE,11321
97
97
  alita_sdk/runtime/toolkits/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
98
98
  alita_sdk/runtime/toolkits/application.py,sha256=Mn8xwIdlbuyNzroH-WVVWJG0biOUV7u8qS15fQJ_XmI,2186
99
- alita_sdk/runtime/toolkits/artifact.py,sha256=nFDfEwDuhVLRDL-TIA-Um_-B3ifZjL6sc6q4txnMvcM,3028
99
+ alita_sdk/runtime/toolkits/artifact.py,sha256=4yB5oT6yBjbfScdERMBkqirUy_GDGE0uMq9_loSrEDU,2924
100
100
  alita_sdk/runtime/toolkits/configurations.py,sha256=kIDAlnryPQfbZyFxV-9SzN2-Vefzx06TX1BBdIIpN90,141
101
101
  alita_sdk/runtime/toolkits/datasource.py,sha256=qk78OdPoReYPCWwahfkKLbKc4pfsu-061oXRryFLP6I,2498
102
102
  alita_sdk/runtime/toolkits/prompt.py,sha256=WIpTkkVYWqIqOWR_LlSWz3ug8uO9tm5jJ7aZYdiGRn0,1192
@@ -122,7 +122,7 @@ alita_sdk/runtime/tools/router.py,sha256=P6IGvb5t8f3_lU8gUi31_CGNdTeetTFKvlmG4u-
122
122
  alita_sdk/runtime/tools/sandbox.py,sha256=WNz-aUMtkGCPg84dDy_0BPkyp-6YjoYB-xjIEFFrtKw,11601
123
123
  alita_sdk/runtime/tools/tool.py,sha256=lE1hGi6qOAXG7qxtqxarD_XMQqTghdywf261DZawwno,5631
124
124
  alita_sdk/runtime/tools/vectorstore.py,sha256=8vRhi1lGFEs3unvnflEi2p59U2MfV32lStpEizpDms0,34467
125
- alita_sdk/runtime/tools/vectorstore_base.py,sha256=7ZkbegFG0XTQBYGsJjtrkK-zrqKwketfx8vSJzuPCug,27292
125
+ alita_sdk/runtime/tools/vectorstore_base.py,sha256=4POq0NZ8FnMANop2JweeRNK9ViWcrpBM1y4Jl22E46E,26801
126
126
  alita_sdk/runtime/utils/AlitaCallback.py,sha256=E4LlSBuCHWiUq6W7IZExERHZY0qcmdjzc_rJlF2iQIw,7356
127
127
  alita_sdk/runtime/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
128
128
  alita_sdk/runtime/utils/constants.py,sha256=Xntx1b_uxUzT4clwqHA_U6K8y5bBqf_4lSQwXdcWrp4,13586
@@ -134,13 +134,14 @@ alita_sdk/runtime/utils/toolkit_runtime.py,sha256=MU63Fpxj0b5_r1IUUc0Q3-PN9VwL7r
134
134
  alita_sdk/runtime/utils/toolkit_utils.py,sha256=I9QFqnaqfVgN26LUr6s3XlBlG6y0CoHURnCzG7XcwVs,5311
135
135
  alita_sdk/runtime/utils/utils.py,sha256=VXNLsdeTmf6snn9EtUyobv4yL-xzLhUcH8P_ORMifYc,675
136
136
  alita_sdk/tools/__init__.py,sha256=jUj1ztC2FbkIUB-YYmiqaz_rqW7Il5kWzDPn1mJmj5w,10545
137
- alita_sdk/tools/base_indexer_toolkit.py,sha256=hRo93pgb8uJbQgxPle5n7CtLbSbY97jfVq2GKkoNzvc,20328
137
+ alita_sdk/tools/base_indexer_toolkit.py,sha256=PyT3BDSn6gNJPXdbZw21tvTbE9WkhJD3m_pFWZJlYbU,23825
138
+ alita_sdk/tools/code_indexer_toolkit.py,sha256=6QvI1by0OFdnKTx5TfNoDJjnMrvnTi9T56xaDxzeleU,7306
138
139
  alita_sdk/tools/elitea_base.py,sha256=up3HshASSDfjlHV_HPrs1aD4JIwwX0Ug26WGTzgIYvY,34724
139
140
  alita_sdk/tools/non_code_indexer_toolkit.py,sha256=B3QvhpT1F9QidkCcsOi3J_QrTOaNlTxqWFwe90VivQQ,1329
140
141
  alita_sdk/tools/ado/__init__.py,sha256=NnNYpNFW0_N_v1td_iekYOoQRRB7PIunbpT2f9ZFJM4,1201
141
142
  alita_sdk/tools/ado/utils.py,sha256=PTCludvaQmPLakF2EbCGy66Mro4-rjDtavVP-xcB2Wc,1252
142
143
  alita_sdk/tools/ado/repos/__init__.py,sha256=rR-c40Pw_WpQeOXtEuS-COvgRUs1_cTkcJfHlK09N88,5339
143
- alita_sdk/tools/ado/repos/repos_wrapper.py,sha256=zAvcCPUQ2U0QnQv8btIkqj1pG1KtFHXw1rlc2mVtWEc,49928
144
+ alita_sdk/tools/ado/repos/repos_wrapper.py,sha256=e7KL0BgwM1LTGCVWKZHqxcsCSmkVUlCtGe0Aan4yALE,49864
144
145
  alita_sdk/tools/ado/test_plan/__init__.py,sha256=qANjEjxwEEs0aTarH9LaQ745Dv_6iRdXxMKP8RDoeGs,5344
145
146
  alita_sdk/tools/ado/test_plan/test_plan_wrapper.py,sha256=MHM1WJUUWIgOUxGPjQUhNUxOj_Et2MAowIbhbU99h4I,22222
146
147
  alita_sdk/tools/ado/wiki/__init__.py,sha256=ela6FOuT1fqN3FvHGBflzAh16HS1SSPsJYS2SldRX7A,5272
@@ -160,7 +161,7 @@ alita_sdk/tools/azure_ai/search/api_wrapper.py,sha256=E4p6HPDlwgxfT_i6cvg9rN4Vn_
160
161
  alita_sdk/tools/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
161
162
  alita_sdk/tools/base/tool.py,sha256=-N27AodZS49vdPCgFkU-bFS9bxoPopZBnNrmwInx3d0,864
162
163
  alita_sdk/tools/bitbucket/__init__.py,sha256=2VAY45Jij5dHkz6UGTmsEmOcLeJMWmcX-WrYIyGXsZY,5549
163
- alita_sdk/tools/bitbucket/api_wrapper.py,sha256=xHrluV2aCckOK_lGd42fFz1c-pyuZAnC-A_v1SKiM5g,20006
164
+ alita_sdk/tools/bitbucket/api_wrapper.py,sha256=q22g27zoJnhNx_HvP4Q1Tt3-glTow7mSqbAjwpdg5CE,20120
164
165
  alita_sdk/tools/bitbucket/bitbucket_constants.py,sha256=UsbhQ1iEvrKoxceTFPWTYhaXS1zSxbmjs1TwY0-P4gw,462
165
166
  alita_sdk/tools/bitbucket/cloud_api_wrapper.py,sha256=QHdud-d3xcz3mOP3xb1Htk1sv9QFg7bTm1szdN_zohQ,15517
166
167
  alita_sdk/tools/browser/__init__.py,sha256=NvD1gfkuBt9AwtTP_Ag7LneCs0gDIVIMUZw2_SDWkG4,6577
@@ -238,14 +239,14 @@ alita_sdk/tools/elastic/api_wrapper.py,sha256=pl8CqQxteJAGwyOhMcld-ZgtOTFwwbv42O
238
239
  alita_sdk/tools/figma/__init__.py,sha256=W6vIMMkZI2Lmpg6_CRRV3oadaIbVI-qTLmKUh6enqWs,4509
239
240
  alita_sdk/tools/figma/api_wrapper.py,sha256=yK45guP6oMStTpfNLXRYgIZtNWkuWzgjFm_Vzu-ivNg,33687
240
241
  alita_sdk/tools/github/__init__.py,sha256=2rHu0zZyZGnLC5CkHgDIhe14N9yCyaEfrrt7ydH8478,5191
241
- alita_sdk/tools/github/api_wrapper.py,sha256=uDwYckdnpYRJtb0uZnDkaz2udvdDLVxuCh1tSwspsiU,8411
242
+ alita_sdk/tools/github/api_wrapper.py,sha256=mX23Rro6xnRa35tpeWhKYcRCJx0cDTzIe32pZAKDYno,7986
242
243
  alita_sdk/tools/github/github_client.py,sha256=0YkpD6Zm4X46jMNN57ZIypo2YObtgxCGQokJAF-laFs,86597
243
244
  alita_sdk/tools/github/graphql_client_wrapper.py,sha256=d3AGjzLGH_hdQV2V8HeAX92dJ4dlnE5OXqUlCO_PBr0,71539
244
245
  alita_sdk/tools/github/schemas.py,sha256=TxEWR3SjDKVwzo9i2tLnss_uPAv85Mh7oWjvQvYLDQE,14000
245
246
  alita_sdk/tools/github/tool.py,sha256=Jnnv5lenV5ds8AAdyo2m8hSzyJ117HZBjzHC6T1ck-M,1037
246
247
  alita_sdk/tools/github/tool_prompts.py,sha256=y6ZW_FpUCE87Uop3WuQAZVRnzxO5t7xjBOI5bCqiluw,30194
247
248
  alita_sdk/tools/gitlab/__init__.py,sha256=iis7RHD3YgKWxF_ryTfdtA8RPGV-W8zUfy4BgiTDADw,4540
248
- alita_sdk/tools/gitlab/api_wrapper.py,sha256=opBIlIizZkBq0XKguTOr2EaIQVm_Ohk93LpMaIwgIlk,21825
249
+ alita_sdk/tools/gitlab/api_wrapper.py,sha256=OW1JD3EyJCZA7iAHrNIwXuyd84Al-kB7A7VP5YE5FaQ,22578
249
250
  alita_sdk/tools/gitlab/tools.py,sha256=vOGTlSaGaFmWn6LS6YFP-FuTqUPun9vnv1VrUcUHAZQ,16500
250
251
  alita_sdk/tools/gitlab/utils.py,sha256=Z2XiqIg54ouqqt1to-geFybmkCb1I6bpE91wfnINH1I,2320
251
252
  alita_sdk/tools/gitlab_org/__init__.py,sha256=PSTsC4BcPoyDv03Wj9VQHrEGUeR8hw4MRarB64VeqFg,3865
@@ -261,7 +262,7 @@ alita_sdk/tools/google/bigquery/tool.py,sha256=Esf9Hsp8I0e7-5EdkFqQ-bid0cfrg-bfS
261
262
  alita_sdk/tools/google_places/__init__.py,sha256=QtmBCI0bHDK79u4hsCSWFcUihu-h4EmPSh9Yll7zz3w,3590
262
263
  alita_sdk/tools/google_places/api_wrapper.py,sha256=7nZly6nk4f4Tm7s2MVdnnwlb-1_WHRrDhyjDiqoyPjA,4674
263
264
  alita_sdk/tools/jira/__init__.py,sha256=G-9qnOYKFWM_adG0QFexh5-2pj_WaxIxxZanB3ARFqI,6339
264
- alita_sdk/tools/jira/api_wrapper.py,sha256=iavUyh0_ZbuhetEqYZza6skTWQpJL2NVuw_ev0hFB1Q,82719
265
+ alita_sdk/tools/jira/api_wrapper.py,sha256=Z1pL7mTERv9TZFJNewe67kNeWcT5XCb7l8scmz6lx88,82745
265
266
  alita_sdk/tools/keycloak/__init__.py,sha256=0WB9yXMUUAHQRni1ghDEmd7GYa7aJPsTVlZgMCM9cQ0,3050
266
267
  alita_sdk/tools/keycloak/api_wrapper.py,sha256=cOGr0f3S3-c6tRDBWI8wMnetjoNSxiV5rvC_0VHb8uw,3100
267
268
  alita_sdk/tools/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -328,7 +329,7 @@ alita_sdk/tools/testrail/__init__.py,sha256=Xg4nVjULL_D8JpIXLYXppnwUfGF4-lguFwKH
328
329
  alita_sdk/tools/testrail/api_wrapper.py,sha256=tQcGlFJmftvs5ZiO4tsP19fCo4CrJeq_UEvQR1liVfE,39891
329
330
  alita_sdk/tools/utils/__init__.py,sha256=W9rCCUPtHCP5nGAbWp0n5jaNA84572aiRoqKneBnaS4,3330
330
331
  alita_sdk/tools/utils/available_tools_decorator.py,sha256=IbrdfeQkswxUFgvvN7-dyLMZMyXLiwvX7kgi3phciCk,273
331
- alita_sdk/tools/utils/content_parser.py,sha256=eBqUI1HSgZYfPQgnDrz7dnhjKvpgPv9kqrbb_LWTh08,14473
332
+ alita_sdk/tools/utils/content_parser.py,sha256=sXVdSWC1BQCKOzO32x1kGzq9sbZIxtlgCLYZsMYTaho,14525
332
333
  alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256=ypBEAkFRGHv5edW0N9rdo1yKurNGQ4pRVEWtrN_7SeA,17656
333
334
  alita_sdk/tools/vector_adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
334
335
  alita_sdk/tools/xray/__init__.py,sha256=eOMWP8VamFbbJgt1xrGpGPqB9ByOTA0Cd3LCaETzGk4,4376
@@ -350,8 +351,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=kT0TbmMvuKhDUZc0i7KO18O38JM9S
350
351
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=0ne8XLJEQSLOWfzd2HdnqOYmQlUliKHbBED5kW_Vias,2895
351
352
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
352
353
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
353
- alita_sdk-0.3.354.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
354
- alita_sdk-0.3.354.dist-info/METADATA,sha256=Xug_UZxh4RE5PZGCbNgcZ4I8ThviJRdOrYiNi46RWLg,19071
355
- alita_sdk-0.3.354.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
356
- alita_sdk-0.3.354.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
357
- alita_sdk-0.3.354.dist-info/RECORD,,
354
+ alita_sdk-0.3.356.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
355
+ alita_sdk-0.3.356.dist-info/METADATA,sha256=IkFZJksYT0vdwMiG6dcF_FAhf4BgHhpUyvz1C6H_qsI,19071
356
+ alita_sdk-0.3.356.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
357
+ alita_sdk-0.3.356.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
358
+ alita_sdk-0.3.356.dist-info/RECORD,,