alita-sdk 0.3.208__py3-none-any.whl → 0.3.210__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/runtime/clients/artifact.py +18 -4
- alita_sdk/runtime/langchain/document_loaders/AlitaCSVLoader.py +2 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +3 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py +8 -4
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -1
- alita_sdk/runtime/langchain/langraph_agent.py +9 -6
- alita_sdk/runtime/toolkits/artifact.py +7 -3
- alita_sdk/runtime/toolkits/tools.py +8 -1
- alita_sdk/runtime/tools/application.py +2 -0
- alita_sdk/runtime/tools/artifact.py +65 -8
- alita_sdk/runtime/tools/vectorstore.py +125 -42
- alita_sdk/runtime/utils/utils.py +3 -0
- alita_sdk/tools/ado/__init__.py +8 -0
- alita_sdk/tools/ado/repos/repos_wrapper.py +37 -0
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +0 -7
- alita_sdk/tools/ado/work_item/__init__.py +4 -0
- alita_sdk/tools/ado/work_item/ado_wrapper.py +37 -4
- alita_sdk/tools/aws/delta_lake/__init__.py +1 -1
- alita_sdk/tools/bitbucket/__init__.py +13 -1
- alita_sdk/tools/bitbucket/api_wrapper.py +31 -4
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +31 -0
- alita_sdk/tools/chunkers/code/codeparser.py +18 -10
- alita_sdk/tools/confluence/api_wrapper.py +35 -134
- alita_sdk/tools/confluence/loader.py +30 -28
- alita_sdk/tools/elitea_base.py +112 -11
- alita_sdk/tools/figma/__init__.py +13 -1
- alita_sdk/tools/figma/api_wrapper.py +47 -3
- alita_sdk/tools/github/api_wrapper.py +8 -0
- alita_sdk/tools/github/github_client.py +18 -0
- alita_sdk/tools/gitlab/__init__.py +4 -0
- alita_sdk/tools/gitlab/api_wrapper.py +10 -0
- alita_sdk/tools/google/bigquery/__init__.py +1 -1
- alita_sdk/tools/jira/__init__.py +21 -13
- alita_sdk/tools/jira/api_wrapper.py +285 -5
- alita_sdk/tools/sharepoint/__init__.py +11 -1
- alita_sdk/tools/sharepoint/api_wrapper.py +23 -53
- alita_sdk/tools/testrail/__init__.py +4 -0
- alita_sdk/tools/testrail/api_wrapper.py +28 -56
- alita_sdk/tools/utils/content_parser.py +123 -9
- alita_sdk/tools/xray/__init__.py +8 -1
- alita_sdk/tools/xray/api_wrapper.py +505 -14
- alita_sdk/tools/zephyr_scale/api_wrapper.py +5 -5
- {alita_sdk-0.3.208.dist-info → alita_sdk-0.3.210.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.208.dist-info → alita_sdk-0.3.210.dist-info}/RECORD +47 -47
- {alita_sdk-0.3.208.dist-info → alita_sdk-0.3.210.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.208.dist-info → alita_sdk-0.3.210.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.208.dist-info → alita_sdk-0.3.210.dist-info}/top_level.txt +0 -0
alita_sdk/tools/elitea_base.py
CHANGED
@@ -2,12 +2,16 @@ import ast
|
|
2
2
|
import fnmatch
|
3
3
|
import logging
|
4
4
|
import traceback
|
5
|
-
from typing import Any, Optional, List, Dict, Generator
|
5
|
+
from typing import Any, Optional, List, Literal, Dict, Generator
|
6
6
|
|
7
7
|
from langchain_core.documents import Document
|
8
8
|
from langchain_core.tools import ToolException
|
9
|
-
from pydantic import BaseModel, create_model, Field
|
9
|
+
from pydantic import BaseModel, create_model, Field, SecretStr
|
10
|
+
|
11
|
+
from alita_sdk.runtime.langchain.interfaces.llm_processor import get_embeddings
|
12
|
+
from .chunkers import markdown_chunker
|
10
13
|
from .utils import TOOLKIT_SPLITTER
|
14
|
+
from ..runtime.utils.utils import IndexerKeywords
|
11
15
|
|
12
16
|
logger = logging.getLogger(__name__)
|
13
17
|
|
@@ -98,6 +102,17 @@ BaseStepbackSearchParams = create_model(
|
|
98
102
|
)),
|
99
103
|
)
|
100
104
|
|
105
|
+
BaseIndexDataParams = create_model(
|
106
|
+
"indexData",
|
107
|
+
__base__=BaseIndexParams,
|
108
|
+
progress_step=(Optional[int], Field(default=None, ge=0, le=100,
|
109
|
+
description="Optional step size for progress reporting during indexing")),
|
110
|
+
clean_index=(Optional[bool], Field(default=False,
|
111
|
+
description="Optional flag to enforce clean existing index before indexing new data")),
|
112
|
+
chunking_tool=(Literal['markdown', 'statistical', 'proposal'], Field(description="Name of chunking tool", default="markdown")),
|
113
|
+
chunking_config=(Optional[dict], Field(description="Chunking tool configuration", default_factory=dict)),
|
114
|
+
)
|
115
|
+
|
101
116
|
|
102
117
|
class BaseToolApiWrapper(BaseModel):
|
103
118
|
|
@@ -191,12 +206,34 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
|
|
191
206
|
|
192
207
|
doctype: str = "document"
|
193
208
|
|
194
|
-
|
209
|
+
llm: Any = None
|
210
|
+
connection_string: Optional[SecretStr] = None
|
211
|
+
collection_name: Optional[str] = None
|
212
|
+
embedding_model: Optional[str] = "HuggingFaceEmbeddings"
|
213
|
+
embedding_model_params: Optional[Dict[str, Any]] = {"model_name": "sentence-transformers/all-MiniLM-L6-v2"}
|
214
|
+
vectorstore_type: Optional[str] = "PGVector"
|
215
|
+
|
216
|
+
def _index_tool_params(self, **kwargs) -> dict[str, tuple[type, Field]]:
|
217
|
+
"""
|
218
|
+
Returns a list of fields for index_data args schema.
|
219
|
+
NOTE: override this method in subclasses to provide specific parameters for certain toolkit.
|
220
|
+
"""
|
221
|
+
return {}
|
222
|
+
|
223
|
+
def _get_dependencies_chunker(self, document: Optional[Document] = None):
|
224
|
+
return markdown_chunker
|
225
|
+
|
226
|
+
def _get_dependencies_chunker_config(self, document: Optional[Document] = None):
|
227
|
+
embedding = get_embeddings(self.embedding_model, self.embedding_model_params)
|
228
|
+
#
|
229
|
+
return {'embedding': embedding, 'llm': self.llm}
|
230
|
+
|
231
|
+
def _base_loader(self, **kwargs) -> Generator[Document, None, None]:
|
195
232
|
""" Loads documents from a source, processes them,
|
196
233
|
and returns a list of Document objects with base metadata: id and created_on."""
|
197
234
|
pass
|
198
235
|
|
199
|
-
def _process_document(self, base_document: Document) -> Document:
|
236
|
+
def _process_document(self, base_document: Document) -> Generator[Document, None, None]:
|
200
237
|
""" Process an existing base document to extract relevant metadata for full document preparation.
|
201
238
|
Used for late processing of documents after we ensure that the document has to be indexed to avoid
|
202
239
|
time-consuming operations for documents which might be useless.
|
@@ -208,6 +245,51 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
|
|
208
245
|
Document: The processed document with metadata."""
|
209
246
|
pass
|
210
247
|
|
248
|
+
def get_index_data_tool(self):
|
249
|
+
return {
|
250
|
+
"name": "index_data",
|
251
|
+
"ref": self.index_data,
|
252
|
+
"description": "Loads data to index.",
|
253
|
+
"args_schema": create_model(
|
254
|
+
"IndexData",
|
255
|
+
__base__=BaseIndexDataParams,
|
256
|
+
**self._index_tool_params() if self._index_tool_params() else {}
|
257
|
+
)
|
258
|
+
}
|
259
|
+
|
260
|
+
def index_data(self, **kwargs):
|
261
|
+
from alita_sdk.tools.chunkers import __confluence_chunkers__ as chunkers, __confluence_models__ as models
|
262
|
+
docs = self._base_loader(**kwargs)
|
263
|
+
embedding = get_embeddings(self.embedding_model, self.embedding_model_params)
|
264
|
+
chunking_tool = kwargs.get("chunking_tool")
|
265
|
+
if chunking_tool:
|
266
|
+
# Resolve chunker from the provided chunking_tool name
|
267
|
+
base_chunker = chunkers.get(chunking_tool)
|
268
|
+
# Resolve chunking configuration
|
269
|
+
base_chunking_config = kwargs.get("chunking_config", {})
|
270
|
+
config_model = models.get(chunking_tool)
|
271
|
+
# Set required fields that should come from the instance (and Fallback for chunkers without models)
|
272
|
+
base_chunking_config['embedding'] = embedding
|
273
|
+
base_chunking_config['llm'] = self.llm
|
274
|
+
#
|
275
|
+
if config_model:
|
276
|
+
try:
|
277
|
+
# Validate the configuration using the appropriate Pydantic model
|
278
|
+
validated_config = config_model(**base_chunking_config)
|
279
|
+
base_chunking_config = validated_config.model_dump()
|
280
|
+
except Exception as e:
|
281
|
+
logger.error(f"Invalid chunking configuration for {chunking_tool}: {e}")
|
282
|
+
raise ToolException(f"Invalid chunking configuration: {e}")
|
283
|
+
#
|
284
|
+
docs = base_chunker(file_content_generator=docs, config=base_chunking_config)
|
285
|
+
#
|
286
|
+
collection_suffix = kwargs.get("collection_suffix")
|
287
|
+
progress_step = kwargs.get("progress_step")
|
288
|
+
clean_index = kwargs.get("clean_index")
|
289
|
+
vs = self._init_vector_store(collection_suffix, embeddings=embedding)
|
290
|
+
#
|
291
|
+
return vs.index_documents(docs, progress_step=progress_step, clean_index=clean_index)
|
292
|
+
|
211
293
|
def _process_documents(self, documents: List[Document]) -> Generator[Document, None, None]:
|
212
294
|
"""
|
213
295
|
Process a list of base documents to extract relevant metadata for full document preparation.
|
@@ -222,10 +304,21 @@ class BaseVectorStoreToolApiWrapper(BaseToolApiWrapper):
|
|
222
304
|
Generator[Document, None, None]: A generator yielding processed documents with metadata.
|
223
305
|
"""
|
224
306
|
for doc in documents:
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
307
|
+
# Filter documents to process only those that either:
|
308
|
+
# - do not have a 'chunk_id' in their metadata, or
|
309
|
+
# - have 'chunk_id' explicitly set to 1.
|
310
|
+
# This prevents processing of irrelevant or duplicate chunks, improving efficiency.
|
311
|
+
chunk_id = doc.metadata.get("chunk_id")
|
312
|
+
if chunk_id is None or chunk_id == 1:
|
313
|
+
processed_docs = self._process_document(doc)
|
314
|
+
if processed_docs: # Only proceed if the list is not empty
|
315
|
+
for processed_doc in processed_docs:
|
316
|
+
# map processed document (child) to the original document (parent)
|
317
|
+
processed_doc.metadata[IndexerKeywords.PARENT.value] = doc.metadata.get('id', None)
|
318
|
+
if chunker:=self._get_dependencies_chunker(processed_doc):
|
319
|
+
yield from chunker(file_content_generator=iter([processed_doc]), config=self._get_dependencies_chunker_config())
|
320
|
+
else:
|
321
|
+
yield processed_doc
|
229
322
|
|
230
323
|
|
231
324
|
def _init_vector_store(self, collection_suffix: str = "", embeddings: Optional[Any] = None):
|
@@ -382,6 +475,9 @@ class BaseCodeToolApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
382
475
|
def _read_file(self, file_path: str, branch: str):
|
383
476
|
raise NotImplementedError("Subclasses should implement this method")
|
384
477
|
|
478
|
+
def _file_commit_hash(self, file_path: str, branch: str):
|
479
|
+
pass
|
480
|
+
|
385
481
|
def __handle_get_files(self, path: str, branch: str):
|
386
482
|
"""
|
387
483
|
Handles the retrieval of files from a specific path and branch.
|
@@ -447,7 +543,8 @@ class BaseCodeToolApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
447
543
|
for file in _files:
|
448
544
|
if is_whitelisted(file) and not is_blacklisted(file):
|
449
545
|
yield {"file_name": file,
|
450
|
-
"file_content": self._read_file(file, branch=branch or self.active_branch)
|
546
|
+
"file_content": self._read_file(file, branch=branch or self.active_branch),
|
547
|
+
"commit_hash": self._file_commit_hash(file, branch=branch or self.active_branch)}
|
451
548
|
|
452
549
|
return parse_code_files_for_db(file_content_generator())
|
453
550
|
|
@@ -467,7 +564,7 @@ class BaseCodeToolApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
467
564
|
blacklist=blacklist
|
468
565
|
)
|
469
566
|
vectorstore = self._init_vector_store(collection_suffix)
|
470
|
-
return vectorstore.index_documents(documents)
|
567
|
+
return vectorstore.index_documents(documents, clean_index=False, is_code=True)
|
471
568
|
|
472
569
|
def _get_vector_search_tools(self):
|
473
570
|
"""
|
@@ -494,7 +591,11 @@ class BaseCodeToolApiWrapper(BaseVectorStoreToolApiWrapper):
|
|
494
591
|
def extend_with_vector_tools(method):
|
495
592
|
def wrapper(self, *args, **kwargs):
|
496
593
|
tools = method(self, *args, **kwargs)
|
497
|
-
tools.extend(self._get_vector_search_tools())
|
594
|
+
tools.extend(self._get_vector_search_tools())
|
595
|
+
#
|
596
|
+
if isinstance(self, BaseVectorStoreToolApiWrapper):
|
597
|
+
tools.append(self.get_index_data_tool())
|
598
|
+
#
|
498
599
|
return tools
|
499
600
|
|
500
601
|
return wrapper
|
@@ -18,7 +18,15 @@ def get_tools(tool):
|
|
18
18
|
oauth2=tool["settings"].get("oauth2", None),
|
19
19
|
global_limit=tool["settings"].get("global_limit", GLOBAL_LIMIT),
|
20
20
|
global_regexp=tool["settings"].get("global_regexp", None),
|
21
|
-
toolkit_name=tool.get('toolkit_name')
|
21
|
+
toolkit_name=tool.get('toolkit_name'),
|
22
|
+
# indexer settings
|
23
|
+
llm=tool['settings'].get('llm', None),
|
24
|
+
connection_string = tool['settings'].get('connection_string', None),
|
25
|
+
collection_name=str(tool['id']),
|
26
|
+
doctype='doc',
|
27
|
+
embedding_model="HuggingFaceEmbeddings",
|
28
|
+
embedding_model_params={"model_name": "sentence-transformers/all-MiniLM-L6-v2"},
|
29
|
+
vectorstore_type="PGVector"
|
22
30
|
)
|
23
31
|
.get_tools()
|
24
32
|
)
|
@@ -45,6 +53,10 @@ class FigmaToolkit(BaseToolkit):
|
|
45
53
|
List[Literal[tuple(selected_tools)]],
|
46
54
|
Field(default=[], json_schema_extra={"args_schemas": selected_tools}),
|
47
55
|
),
|
56
|
+
# indexer settings
|
57
|
+
connection_string = (Optional[SecretStr], Field(description="Connection string for vectorstore",
|
58
|
+
default=None,
|
59
|
+
json_schema_extra={'secret': True})),
|
48
60
|
__config__=ConfigDict(
|
49
61
|
json_schema_extra={
|
50
62
|
"metadata": {
|
@@ -1,16 +1,18 @@
|
|
1
|
+
import base64
|
1
2
|
import functools
|
2
3
|
import json
|
3
4
|
import logging
|
4
5
|
import re
|
5
6
|
from enum import Enum
|
6
|
-
from typing import Dict, Optional, Union
|
7
|
+
from typing import Dict, Generator, Optional, Union
|
7
8
|
|
8
9
|
import requests
|
9
10
|
from FigmaPy import FigmaPy
|
11
|
+
from langchain_core.documents import Document
|
10
12
|
from langchain_core.tools import ToolException
|
11
13
|
from pydantic import Field, PrivateAttr, create_model, model_validator, SecretStr
|
12
14
|
|
13
|
-
from ..elitea_base import
|
15
|
+
from ..elitea_base import BaseVectorStoreToolApiWrapper, extend_with_vector_tools
|
14
16
|
|
15
17
|
GLOBAL_LIMIT = 10000
|
16
18
|
|
@@ -226,13 +228,54 @@ class ArgsSchema(Enum):
|
|
226
228
|
)
|
227
229
|
|
228
230
|
|
229
|
-
class FigmaApiWrapper(
|
231
|
+
class FigmaApiWrapper(BaseVectorStoreToolApiWrapper):
|
230
232
|
token: Optional[SecretStr] = Field(default=None)
|
231
233
|
oauth2: Optional[SecretStr] = Field(default=None)
|
232
234
|
global_limit: Optional[int] = Field(default=GLOBAL_LIMIT)
|
233
235
|
global_regexp: Optional[str] = Field(default=None)
|
234
236
|
_client: Optional[FigmaPy] = PrivateAttr()
|
235
237
|
|
238
|
+
def _base_loader(self, project_id: str, **kwargs) -> Generator[Document, None, None]:
|
239
|
+
files = json.loads(self.get_project_files(project_id)).get('files', [])
|
240
|
+
for file in files:
|
241
|
+
yield Document(page_content=json.dumps(file), metadata={
|
242
|
+
'id': file.get('key', ''),
|
243
|
+
'file_key': file.get('key', ''),
|
244
|
+
'name': file.get('name', ''),
|
245
|
+
'updated_on': file.get('last_modified', '')
|
246
|
+
})
|
247
|
+
|
248
|
+
def _process_document(self, document: Document) -> Generator[Document, None, None]:
|
249
|
+
file_key = document.metadata.get('id', '')
|
250
|
+
#
|
251
|
+
node_ids = []
|
252
|
+
children = self._client.get_file(file_key).document.get('children', [])
|
253
|
+
if children:
|
254
|
+
nodes = children[0].get('children', [])
|
255
|
+
node_ids = [node['id'] for node in nodes if 'id' in node]
|
256
|
+
images = self._client.get_file_images(file_key, node_ids).images or {}
|
257
|
+
|
258
|
+
# iterate over images values
|
259
|
+
for node_id, image_url in images.items():
|
260
|
+
response = requests.get(image_url)
|
261
|
+
if response.status_code == 200:
|
262
|
+
content_type = response.headers.get('Content-Type', '')
|
263
|
+
if 'text/html' not in content_type.lower():
|
264
|
+
yield Document(
|
265
|
+
page_content=base64.b64encode(response.content).decode("utf-8"),
|
266
|
+
metadata={
|
267
|
+
'file_key': file_key,
|
268
|
+
'node_id': node_id,
|
269
|
+
'image_url': image_url
|
270
|
+
}
|
271
|
+
)
|
272
|
+
|
273
|
+
def _index_tool_params(self):
|
274
|
+
"""Return the parameters for indexing data."""
|
275
|
+
return {
|
276
|
+
"project_id": (str, Field(description="ID of the project to list files from", examples=["55391681"]))
|
277
|
+
}
|
278
|
+
|
236
279
|
def _send_request(
|
237
280
|
self,
|
238
281
|
method: str,
|
@@ -442,6 +485,7 @@ class FigmaApiWrapper(BaseToolApiWrapper):
|
|
442
485
|
"""Retrieves all files for a specified project ID from Figma."""
|
443
486
|
return self._client.get_project_files(project_id)
|
444
487
|
|
488
|
+
@extend_with_vector_tools
|
445
489
|
def get_available_tools(self):
|
446
490
|
return [
|
447
491
|
{
|
@@ -167,6 +167,14 @@ class AlitaGitHubAPIWrapper(BaseCodeToolApiWrapper):
|
|
167
167
|
# Use the GitHub client's method to get files
|
168
168
|
return self.github_client_instance._get_files(path, branch or self.active_branch)
|
169
169
|
|
170
|
+
def _file_commit_hash(self, file_path: str, branch: str):
|
171
|
+
"""Get the commit hash of a file in the GitHub repository."""
|
172
|
+
if not self.github_client_instance:
|
173
|
+
raise ValueError("GitHub client not initialized")
|
174
|
+
|
175
|
+
# Use the GitHub client's method to get commit hash
|
176
|
+
return self.github_client_instance._file_commit_hash(file_path, branch or self.active_branch)
|
177
|
+
|
170
178
|
def _read_file(self, file_path: str, branch: str):
|
171
179
|
"""Read file content from GitHub repository."""
|
172
180
|
if not self.github_client_instance:
|
@@ -1393,6 +1393,24 @@ class GitHubClient(BaseModel):
|
|
1393
1393
|
except Exception as e:
|
1394
1394
|
return f"An error occurred while updating the issue: {str(e)}"
|
1395
1395
|
|
1396
|
+
def _file_commit_hash(self, file_path: str, branch: str, repo_name: Optional[str] = None) -> str:
|
1397
|
+
"""
|
1398
|
+
Get the commit hash of a file in a specific branch.
|
1399
|
+
Parameters:
|
1400
|
+
file_path(str): the file path
|
1401
|
+
branch(str): the branch to read the file from
|
1402
|
+
repo_name (Optional[str]): Name of the repository in format 'owner/repo'
|
1403
|
+
|
1404
|
+
Returns:
|
1405
|
+
str: The commit hash of the file, or an error message if not found
|
1406
|
+
"""
|
1407
|
+
try:
|
1408
|
+
repo = self.github_api.get_repo(repo_name) if repo_name else self.github_repo_instance
|
1409
|
+
file = repo.get_contents(file_path, ref=branch)
|
1410
|
+
return file.sha
|
1411
|
+
except Exception as e:
|
1412
|
+
return f"File not found `{file_path}` on branch `{branch}`. Error: {str(e)}"
|
1413
|
+
|
1396
1414
|
def _read_file(self, file_path: str, branch: str, repo_name: Optional[str] = None) -> str:
|
1397
1415
|
"""
|
1398
1416
|
Read a file from specified branch
|
@@ -47,6 +47,10 @@ class AlitaGitlabToolkit(BaseToolkit):
|
|
47
47
|
repository=(str, Field(description="GitLab repository", json_schema_extra={'toolkit_name': True, 'max_toolkit_length': AlitaGitlabToolkit.toolkit_max_length})),
|
48
48
|
private_token=(SecretStr, Field(description="GitLab private token", json_schema_extra={'secret': True, 'configuration': True})),
|
49
49
|
branch=(str, Field(description="Main branch", default="main")),
|
50
|
+
# indexer settings
|
51
|
+
connection_string=(Optional[SecretStr], Field(description="Connection string for vectorstore",
|
52
|
+
default=None,
|
53
|
+
json_schema_extra={'secret': True})),
|
50
54
|
selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
|
51
55
|
__config__=ConfigDict(json_schema_extra={
|
52
56
|
'metadata': {
|
@@ -167,6 +167,16 @@ class GitLabAPIWrapper(BaseCodeToolApiWrapper):
|
|
167
167
|
gitlab_files = self._get_all_files(path, recursive, branch)
|
168
168
|
return [file['path'] for file in gitlab_files if file['type'] == 'blob']
|
169
169
|
|
170
|
+
def _file_commit_hash(self, file_path: str, branch: str):
|
171
|
+
"""
|
172
|
+
Get the commit hash of a file in a specific branch.
|
173
|
+
"""
|
174
|
+
try:
|
175
|
+
file = self._repo_instance.files.get(file_path, branch)
|
176
|
+
return file.commit_id
|
177
|
+
except Exception as e:
|
178
|
+
return f"Unable to get commit hash for {file_path} due to error:\n{e}"
|
179
|
+
|
170
180
|
def _read_file(self, file_path: str, branch: str):
|
171
181
|
return self.read_file(file_path, branch)
|
172
182
|
|
@@ -63,7 +63,7 @@ class BigQueryToolkitConfig(BaseModel):
|
|
63
63
|
dataset: Optional[str] = Field(
|
64
64
|
default=None,
|
65
65
|
description="BigQuery dataset name",
|
66
|
-
json_schema_extra={"configuration": True},
|
66
|
+
json_schema_extra={"configuration": True, "configuration_title": True},
|
67
67
|
)
|
68
68
|
table: Optional[str] = Field(
|
69
69
|
default=None,
|
alita_sdk/tools/jira/__init__.py
CHANGED
@@ -11,19 +11,27 @@ name = "jira"
|
|
11
11
|
|
12
12
|
def get_tools(tool):
|
13
13
|
return JiraToolkit().get_toolkit(
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
14
|
+
selected_tools=tool['settings'].get('selected_tools', []),
|
15
|
+
base_url=tool['settings'].get('base_url'),
|
16
|
+
cloud=tool['settings'].get('cloud', True),
|
17
|
+
api_key=tool['settings'].get('api_key', None),
|
18
|
+
username=tool['settings'].get('username', None),
|
19
|
+
token=tool['settings'].get('token', None),
|
20
|
+
limit=tool['settings'].get('limit', 5),
|
21
|
+
labels=parse_list(tool['settings'].get('labels', [])),
|
22
|
+
additional_fields=tool['settings'].get('additional_fields', []),
|
23
|
+
verify_ssl=tool['settings'].get('verify_ssl', True),
|
24
|
+
# indexer settings
|
25
|
+
llm=tool['settings'].get('llm', None),
|
26
|
+
alita=tool['settings'].get('alita', None),
|
27
|
+
connection_string=tool['settings'].get('connection_string', None),
|
28
|
+
collection_name=f"{tool.get('toolkit_name')}_{str(tool['id'])}",
|
29
|
+
doctype='code',
|
30
|
+
embedding_model="HuggingFaceEmbeddings",
|
31
|
+
embedding_model_params={"model_name": "sentence-transformers/all-MiniLM-L6-v2"},
|
32
|
+
vectorstore_type="PGVector",
|
33
|
+
toolkit_name=tool.get('toolkit_name')
|
34
|
+
).get_tools()
|
27
35
|
|
28
36
|
|
29
37
|
class JiraToolkit(BaseToolkit):
|