alita-sdk 0.3.344__py3-none-any.whl → 0.3.346__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  import pymupdf
2
2
  import fitz
3
- from langchain_community.document_loaders import PyPDFLoader
3
+ from langchain_community.document_loaders import PyPDFium2Loader
4
4
 
5
5
  from .ImageParser import ImageParser
6
6
  from .utils import perform_llm_prediction_for_image_bytes, create_temp_file
@@ -23,6 +23,7 @@ class AlitaPDFLoader:
23
23
  self.headers = kwargs.get('headers', None)
24
24
  self.extraction_mode = kwargs.get('extraction_mode', "plain")
25
25
  self.extraction_kwargs = kwargs.get('extraction_kwargs', None)
26
+ self.images_parser=ImageParser(llm=self.llm, prompt=self.prompt)
26
27
 
27
28
  def get_content(self):
28
29
  if hasattr(self, 'file_path'):
@@ -119,13 +120,13 @@ class AlitaPDFLoader:
119
120
  return self._load_docs()
120
121
 
121
122
  def _load_docs(self):
122
- docs = PyPDFLoader(file_path=self.file_path,
123
- password=self.password,
124
- headers=self.headers,
125
- extract_images=self.extract_images,
126
- extraction_mode=self.extraction_mode,
127
- images_parser=ImageParser(llm=self.llm, prompt=self.prompt),
128
- extraction_kwargs=self.extraction_kwargs).load()
123
+ docs = PyPDFium2Loader(
124
+ file_path = self.file_path,
125
+ password=self.password,
126
+ headers=self.headers,
127
+ extract_images = self.extract_images,
128
+ images_parser = ImageParser(llm=self.llm, prompt=self.prompt),
129
+ ).load()
129
130
  for doc in docs:
130
131
  doc.metadata['chunk_id'] = doc.metadata['page']
131
132
  return docs
@@ -36,15 +36,23 @@ class AlitaPowerPointLoader:
36
36
  def read_pptx_slide(self, slide, index):
37
37
  text_content = f'Slide: {index}\n'
38
38
  for shape in slide.shapes:
39
- if hasattr(shape, "text"):
40
- text_content += shape.text + "\n"
39
+ if hasattr(shape, "text_frame") and shape.text_frame is not None:
40
+ for paragraph in shape.text_frame.paragraphs:
41
+ for run in paragraph.runs:
42
+ if run.hyperlink and run.hyperlink.address:
43
+ link_text = run.text.strip() or "Link"
44
+ link_url = run.hyperlink.address
45
+ text_content += f" [{link_text}]({link_url}) "
46
+ else:
47
+ text_content += run.text
48
+ text_content += "\n"
41
49
  elif self.extract_images and shape.shape_type == MSO_SHAPE_TYPE.PICTURE:
42
50
  try:
43
51
  caption = perform_llm_prediction_for_image_bytes(shape.image.blob, self.llm)
44
52
  except:
45
53
  caption = "unknown"
46
54
  text_content += "\n**Image Transcript:**\n" + caption + "\n--------------------\n"
47
- return text_content
55
+ return text_content + "\n"
48
56
 
49
57
  def load(self):
50
58
  if not self.file_path:
@@ -1,4 +1,8 @@
1
+ from typing import Iterator
2
+
1
3
  from langchain_community.document_loaders.parsers.images import BaseImageBlobParser
4
+ from langchain_core.documents import Document
5
+ from langchain_core.documents.base import Blob
2
6
 
3
7
  from alita_sdk.runtime.langchain.document_loaders.AlitaImageLoader import AlitaImageLoader
4
8
 
@@ -8,10 +12,19 @@ class ImageParser(BaseImageBlobParser):
8
12
  self.llm = kwargs.get('llm')
9
13
  self.prompt = kwargs.get('prompt')
10
14
 
15
+ def lazy_parse(self, blob: Blob) -> Iterator[Document]:
16
+ try:
17
+ yield from super().lazy_parse(blob)
18
+ except Exception:
19
+ yield Document(page_content="[Image: Unknown]")
20
+
11
21
  def _analyze_image(self, img) -> str:
12
22
  from io import BytesIO
13
23
 
14
24
  byte_stream = BytesIO()
15
25
  img.save(byte_stream, format='PNG')
16
26
  image_bytes = byte_stream.getvalue()
17
- return AlitaImageLoader(file_content=image_bytes, file_name="image.png", prompt=self.prompt, llm=self.llm).get_content()
27
+ try:
28
+ return AlitaImageLoader(file_content=image_bytes, file_name="image.png", prompt=self.prompt, llm=self.llm).get_content()
29
+ except Exception:
30
+ return "Image: unknown"
@@ -1,7 +1,7 @@
1
1
  import hashlib
2
- import json
3
2
  import logging
4
- from typing import Any, Optional, Generator
3
+ import re
4
+ from typing import Any, Optional, Generator, List
5
5
 
6
6
  from langchain_core.documents import Document
7
7
  from langchain_core.tools import ToolException
@@ -59,18 +59,53 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
59
59
  def create_new_bucket(self, bucket_name: str, expiration_measure = "weeks", expiration_value = 1):
60
60
  return self.artifact.client.create_bucket(bucket_name, expiration_measure, expiration_value)
61
61
 
62
+ def _index_tool_params(self):
63
+ return {
64
+ 'include_extensions': (Optional[List[str]], Field(
65
+ description="List of file extensions to include when processing: i.e. ['*.png', '*.jpg']. "
66
+ "If empty, all files will be processed (except skip_extensions).",
67
+ default=[])),
68
+ 'skip_extensions': (Optional[List[str]], Field(
69
+ description="List of file extensions to skip when processing: i.e. ['*.png', '*.jpg']",
70
+ default=[])),
71
+ }
72
+
62
73
  def _base_loader(self, **kwargs) -> Generator[Document, None, None]:
63
74
  try:
64
- all_files = self.list_files(self.bucket, False)
75
+ all_files = self.list_files(self.bucket, False)['rows']
65
76
  except Exception as e:
66
77
  raise ToolException(f"Unable to extract files: {e}")
67
78
 
68
- for file in all_files['rows']:
79
+ include_extensions = kwargs.get('include_extensions', [])
80
+ skip_extensions = kwargs.get('skip_extensions', [])
81
+ self._log_tool_event(message=f"Files filtering started. Include extensions: {include_extensions}. "
82
+ f"Skip extensions: {skip_extensions}", tool_name="loader")
83
+ # show the progress of filtering
84
+ total_files = len(all_files) if isinstance(all_files, list) else 0
85
+ filtered_files_count = 0
86
+ for file in all_files:
87
+ filtered_files_count += 1
88
+ if filtered_files_count % 10 == 0 or filtered_files_count == total_files:
89
+ self._log_tool_event(message=f"Files filtering progress: {filtered_files_count}/{total_files}",
90
+ tool_name="loader")
91
+ file_name = file['name']
92
+
93
+ # Check if file should be skipped based on skip_extensions
94
+ if any(re.match(pattern.replace('*', '.*') + '$', file_name, re.IGNORECASE)
95
+ for pattern in skip_extensions):
96
+ continue
97
+
98
+ # Check if file should be included based on include_extensions
99
+ # If include_extensions is empty, process all files (that weren't skipped)
100
+ if include_extensions and not (any(re.match(pattern.replace('*', '.*') + '$', file_name, re.IGNORECASE)
101
+ for pattern in include_extensions)):
102
+ continue
103
+
69
104
  metadata = {
70
105
  ("updated_on" if k == "modified" else k): str(v)
71
106
  for k, v in file.items()
72
107
  }
73
- metadata['id'] = self.get_hash_from_bucket_and_file_name(self.bucket, file['name'])
108
+ metadata['id'] = self.get_hash_from_bucket_and_file_name(self.bucket, file_name)
74
109
  yield Document(page_content="", metadata=metadata)
75
110
 
76
111
  def get_hash_from_bucket_and_file_name(self, bucket, file_name):
@@ -227,9 +227,9 @@ class FigmaApiWrapper(NonCodeIndexerToolkit):
227
227
  self._log_tool_event(f"Loading files: {file_keys_include}")
228
228
  for file_key in file_keys_include:
229
229
  self._log_tool_event(f"Loading file `{file_key}`")
230
- file = self._client.get_file(file_key)
230
+ file = self._client.get_file(file_key, geometry='depth=1') # fetch only top-level structure (only pages without inner components)
231
231
  if not file:
232
- raise ToolException(f"Unexpected error while retrieving file {file_key}. Probably file is under editing. Try again later.")
232
+ raise ToolException(f"Unexpected error while retrieving file {file_key}. Please try specifying the node-id of an inner page.")
233
233
  metadata = {
234
234
  'id': file_key,
235
235
  'file_key': file_key,
@@ -284,20 +284,47 @@ class FigmaApiWrapper(NonCodeIndexerToolkit):
284
284
  for child in node['children']:
285
285
  texts.extend(self.get_texts_recursive(child))
286
286
  return texts
287
+
288
+ def _load_pages(self, document: Document):
289
+ file_key = document.metadata.get('id', '')
290
+ node_ids_include = document.metadata.pop('figma_pages_include', [])
291
+ node_ids_exclude = document.metadata.pop('figma_pages_exclude', [])
292
+ self._log_tool_event(f"Included pages: {node_ids_include}. Excluded pages: {node_ids_exclude}.")
293
+ if node_ids_include:
294
+ # try to fetch only specified pages/nodes in one request
295
+ file = self._get_file_nodes(file_key,','.join(node_ids_include)) # attempt to fetch only specified pages/nodes in one request
296
+ if file:
297
+ return [node['document'] for node in file.get('nodes', {}).values() if 'document' in node]
298
+ else:
299
+ #
300
+ file = self._client.get_file(file_key)
301
+ if file:
302
+ figma_pages = file.document.get('children', [])
303
+ return [node for node in figma_pages if ('id' in node and node['id'].replace(':', '-') not in node_ids_exclude)]
304
+ # fallback to loading all pages and filtering them one by one
305
+ file = self._client.get_file(file_key, geometry='depth=1')
306
+ if not file:
307
+ raise ToolException(
308
+ f"Unexpected error while retrieving file {file_key}. Please try specifying the node-id of an inner page.")
309
+ figma_pages_raw = file.document.get('children', [])
310
+ # extract pages one by one
311
+ if node_ids_include:
312
+ return [self._get_file_nodes(file_key, node_id) for node_id in node_ids_include]
313
+ else:
314
+ # return [self._get_file_nodes(file_key, page["id"]) for page in figma_pages_raw if ('id' in page and page['id'].replace(':', '-') not in node_ids_exclude)]
315
+ result = []
316
+ for page in figma_pages_raw:
317
+ if 'id' in page and page['id'].replace(':', '-') not in node_ids_exclude:
318
+ page_res = self._get_file_nodes(file_key, page["id"]).get('nodes', {}).get(page["id"], {}).get("document", {})
319
+ result.append(page_res)
320
+ return result
287
321
 
288
322
  def _process_document(self, document: Document) -> Generator[Document, None, None]:
289
323
  file_key = document.metadata.get('id', '')
290
324
  self._log_tool_event(f"Loading details (images) for `{file_key}`")
291
- figma_pages = self._client.get_file(file_key).document.get('children', [])
292
- node_ids_include = document.metadata.pop('figma_pages_include', [])
293
- node_ids_exclude = document.metadata.pop('figma_pages_exclude', [])
325
+ figma_pages = self._load_pages(document)
294
326
  node_types_include = [t.strip().lower() for t in document.metadata.pop('figma_nodes_include', [])]
295
327
  node_types_exclude = [t.strip().lower() for t in document.metadata.pop('figma_nodes_exclude', [])]
296
- self._log_tool_event(f"Included pages: {node_ids_include}. Excluded pages: {node_ids_exclude}.")
297
- if node_ids_include:
298
- figma_pages = [node for node in figma_pages if ('id' in node and node['id'].replace(':', '-') in node_ids_include)]
299
- elif node_ids_exclude:
300
- figma_pages = [node for node in figma_pages if ('id' in node and node['id'].replace(':', '-') not in node_ids_exclude)]
301
328
 
302
329
  image_nodes = []
303
330
  text_nodes = {}
@@ -609,6 +636,12 @@ class FigmaApiWrapper(NonCodeIndexerToolkit):
609
636
  f"files/{file_key}/nodes?ids={str(ids)}", method="get"
610
637
  )
611
638
 
639
+ def _get_file_nodes(self, file_key: str, ids: str, **kwargs):
640
+ """Reads a specified file nodes by field key from Figma."""
641
+ return self._client.api_request(
642
+ f"files/{file_key}/nodes?ids={str(ids)}", method="get"
643
+ )
644
+
612
645
  @process_output
613
646
  def get_file(
614
647
  self,
@@ -87,7 +87,6 @@ class GitHubClient(BaseModel):
87
87
 
88
88
  # Using optional variables with None defaults instead of PrivateAttr
89
89
  github_api: Optional[Github] = Field(default=None, exclude=True)
90
- github_repo_instance: Optional[Repository.Repository] = Field(default=None, exclude=True)
91
90
 
92
91
  # Adding auth config and repo config as optional fields for initialization
93
92
  auth_config: Optional[GitHubAuthConfig] = Field(default=None, exclude=True)
@@ -96,6 +95,19 @@ class GitHubClient(BaseModel):
96
95
  # Alita instance
97
96
  alita: Optional[Any] = Field(default=None, exclude=True)
98
97
 
98
+ @property
99
+ def github_repo_instance(self) -> Optional[Repository.Repository]:
100
+ if not hasattr(self, "_github_repo_instance") or self._github_repo_instance is None:
101
+ try:
102
+ if self.github_api and self.github_repository:
103
+ self._github_repo_instance = self.github_api.get_repo(self.github_repository)
104
+ else:
105
+ self._github_repo_instance = None
106
+ except Exception as e:
107
+ # Only raise when accessed, not during initialization
108
+ raise ToolException(e)
109
+ return self._github_repo_instance
110
+
99
111
  @model_validator(mode='before')
100
112
  def initialize_github_client(cls, values):
101
113
  """
@@ -144,15 +156,6 @@ class GitHubClient(BaseModel):
144
156
  else:
145
157
  values["github_api"] = Github(base_url=values["github_base_url"], auth=auth)
146
158
 
147
- # Get repository instance
148
- if values.get("github_repository"):
149
- values["github_repo_instance"] = values["github_api"].get_repo(values["github_repository"])
150
- else:
151
- # Initialize with default authentication if no auth_config provided
152
- values["github_api"] = Github(base_url=values.get("github_base_url", DEFAULT_BASE_URL))
153
- if values.get("github_repository"):
154
- values["github_repo_instance"] = values["github_api"].get_repo(values["github_repository"])
155
-
156
159
  return values
157
160
 
158
161
  @staticmethod
@@ -10,7 +10,7 @@ from pydantic import SecretStr, create_model, model_validator
10
10
  from pydantic.fields import Field, PrivateAttr
11
11
  from testrail_api import StatusCodeError, TestRailAPI
12
12
 
13
- from ..chunkers.code.constants import get_file_extension
13
+ from ..chunkers.code.constants import get_file_extension, image_extensions
14
14
  from ..non_code_indexer_toolkit import NonCodeIndexerToolkit
15
15
  from ..utils.available_tools_decorator import extend_with_parent_available_tools
16
16
  from ...runtime.utils.utils import IndexerKeywords
@@ -117,6 +117,13 @@ getCases = create_model(
117
117
  description="A list of case field keys to include in the data output. If None, defaults to ['title', 'id'].",
118
118
  ),
119
119
  ),
120
+ suite_id=(Optional[str],
121
+ Field(
122
+ default=None,
123
+ description="[Optional] Suite id for test cases extraction in case "
124
+ "project is in multiple suite mode (setting 3)",
125
+ ),
126
+ ),
120
127
  )
121
128
 
122
129
  getCasesByFilter = create_model(
@@ -323,6 +330,30 @@ class TestrailAPIWrapper(NonCodeIndexerToolkit):
323
330
  cls._client = TestRailAPI(url, email, password)
324
331
  return super().validate_toolkit(values)
325
332
 
333
+ def _validate_suite_mode_requirements(self, project_id: str, suite_id: Optional[str] = None) -> None:
334
+ """
335
+ Validate if project requires suite_id when in multiple suite mode.
336
+
337
+ Args:
338
+ project_id: The TestRail project ID to check
339
+ suite_id: The suite ID if provided (optional)
340
+ custom_error_msg: Custom error message to use (optional)
341
+
342
+ Raises:
343
+ ToolException: If project is in multiple suite mode and no suite_id is provided
344
+ """
345
+ if suite_id:
346
+ return # No validation needed if suite_id is already provided
347
+
348
+ try:
349
+ project = self._client.projects.get_project(project_id=project_id)
350
+ # 1 for single suite mode, 2 for single suite + baselines, 3 for multiple suites
351
+ suite_mode = project.get('suite_mode', 1)
352
+ if suite_mode == 3:
353
+ raise ToolException("Project is in multiple suite mode, please provide suite_id to extract test cases.")
354
+ except StatusCodeError as e:
355
+ logger.warning(f"Unable to check project suite mode: {e}")
356
+
326
357
  def add_cases(self, add_test_cases_data: str):
327
358
  """Adds new test cases into Testrail per defined parameters.
328
359
  add_test_cases_data: str - JSON string which includes list of objects with following parameters:
@@ -389,7 +420,8 @@ class TestrailAPIWrapper(NonCodeIndexerToolkit):
389
420
  return f"Extracted test case:\n{str(extracted_case)}"
390
421
 
391
422
  def get_cases(
392
- self, project_id: str, output_format: str = "json", keys: Optional[List[str]] = None
423
+ self, project_id: str, output_format: str = "json", keys: Optional[List[str]] = None,
424
+ suite_id: Optional[str] = None
393
425
  ) -> Union[str, ToolException]:
394
426
  """
395
427
  Extracts a list of test cases in the specified format: `json`, `csv`, or `markdown`.
@@ -410,8 +442,15 @@ class TestrailAPIWrapper(NonCodeIndexerToolkit):
410
442
  invalid_keys = [key for key in keys if key not in SUPPORTED_KEYS]
411
443
 
412
444
  try:
413
- extracted_cases = self._client.cases.get_cases(project_id=project_id)
414
- cases = extracted_cases.get("cases")
445
+ # Check if project requires suite_id for multiple suite mode
446
+ self._validate_suite_mode_requirements(
447
+ project_id=project_id,
448
+ suite_id=suite_id
449
+ )
450
+
451
+ extracted_cases = self._client.cases.get_cases(project_id=project_id, suite_id=suite_id)
452
+ # support old versions of testrail_api
453
+ cases = extracted_cases.get("cases") if isinstance(extracted_cases, dict) else extracted_cases
415
454
 
416
455
  if cases is None:
417
456
  return ToolException("No test cases found in the extracted data.")
@@ -466,10 +505,18 @@ class TestrailAPIWrapper(NonCodeIndexerToolkit):
466
505
  "json_case_arguments must be a JSON string or dictionary."
467
506
  )
468
507
  self._log_tool_event(message=f"Extract test cases per filter {params}", tool_name='get_cases_by_filter')
508
+
509
+ # Check if project requires suite_id when not provided in params
510
+ suite_id_in_params = params.get('suite_id', None)
511
+ self._validate_suite_mode_requirements(
512
+ project_id=project_id,
513
+ suite_id=str(suite_id_in_params) if suite_id_in_params else None
514
+ )
515
+
469
516
  extracted_cases = self._client.cases.get_cases(
470
517
  project_id=project_id, **params
471
518
  )
472
- self._log_tool_event(message=f"Test cases were extracted", tool_name='get_cases_by_filter')
519
+ self._log_tool_event(message="Test cases were extracted", tool_name='get_cases_by_filter')
473
520
  # support old versions of testrail_api
474
521
  cases = extracted_cases.get("cases") if isinstance(extracted_cases, dict) else extracted_cases
475
522
 
@@ -542,13 +589,21 @@ class TestrailAPIWrapper(NonCodeIndexerToolkit):
542
589
  self._include_attachments = kwargs.get('include_attachments', False)
543
590
  self._skip_attachment_extensions = kwargs.get('skip_attachment_extensions', [])
544
591
 
592
+ def _extract_cases_from_response(response):
593
+ """Extract cases from API response, supporting both old and new testrail_api versions."""
594
+ return response.get('cases', []) if isinstance(response, dict) else response
595
+
545
596
  try:
597
+ # Check if project requires suite_id when not provided
598
+ self._validate_suite_mode_requirements(project_id=project_id, suite_id=suite_id)
599
+
546
600
  if suite_id:
547
601
  resp = self._client.cases.get_cases(project_id=project_id, suite_id=int(suite_id))
548
- cases = resp.get('cases', [])
549
602
  else:
550
603
  resp = self._client.cases.get_cases(project_id=project_id)
551
- cases = resp.get('cases', [])
604
+
605
+ cases = _extract_cases_from_response(resp)
606
+
552
607
  except StatusCodeError as e:
553
608
  raise ToolException(f"Unable to extract test cases: {e}")
554
609
  # Apply filters
@@ -603,11 +658,30 @@ class TestrailAPIWrapper(NonCodeIndexerToolkit):
603
658
  case_id = base_data.get("id")
604
659
 
605
660
  # get a list of attachments for the case
606
- attachments = self._client.attachments.get_attachments_for_case_bulk(case_id=case_id)
661
+ attachments_response = self._client.attachments.get_attachments_for_case(case_id=case_id)
662
+
663
+ # Extract attachments from response - handle both old and new API response formats
664
+ if isinstance(attachments_response, dict) and 'attachments' in attachments_response:
665
+ attachments = attachments_response['attachments']
666
+ else:
667
+ attachments = attachments_response if isinstance(attachments_response, list) else []
607
668
 
608
669
  # process each attachment to extract its content
609
670
  for attachment in attachments:
610
- if get_file_extension(attachment['filename']) in self._skip_attachment_extensions:
671
+ attachment_name = attachment.get('filename') or attachment.get('name')
672
+ attachment['filename'] = attachment_name
673
+
674
+ # Handle filetype: use existing field if present, otherwise extract from filename
675
+ if 'filetype' not in attachment or not attachment['filetype']:
676
+ file_extension = get_file_extension(attachment_name)
677
+ attachment['filetype'] = file_extension.lstrip('.')
678
+
679
+ # Handle is_image: use existing field if present, otherwise check file extension
680
+ if 'is_image' not in attachment:
681
+ file_extension = get_file_extension(attachment_name)
682
+ attachment['is_image'] = file_extension in image_extensions
683
+
684
+ if get_file_extension(attachment_name) in self._skip_attachment_extensions:
611
685
  logger.info(f"Skipping attachment {attachment['filename']} with unsupported extension.")
612
686
  continue
613
687
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.344
3
+ Version: 0.3.346
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -82,7 +82,7 @@ Requires-Dist: gitpython==3.1.43; extra == "tools"
82
82
  Requires-Dist: atlassian-python-api~=4.0.7; extra == "tools"
83
83
  Requires-Dist: jira==3.8.0; extra == "tools"
84
84
  Requires-Dist: qtest-swagger-client==0.0.3; extra == "tools"
85
- Requires-Dist: testrail-api==1.13.2; extra == "tools"
85
+ Requires-Dist: testrail-api==1.13.4; extra == "tools"
86
86
  Requires-Dist: azure-devops==7.1.0b4; extra == "tools"
87
87
  Requires-Dist: msrest==0.7.1; extra == "tools"
88
88
  Requires-Dist: python-graphql-client~=0.4.3; extra == "tools"
@@ -62,13 +62,13 @@ alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py,sha256=QwgBJE-B
62
62
  alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py,sha256=Nav2cgCQKOHQi_ZgYYn_iFdP_Os56KVlVR5nHGXecBc,3445
63
63
  alita_sdk/runtime/langchain/document_loaders/AlitaJiraLoader.py,sha256=M2q3YThkps0yAZOjfoLcyE7qycVTYKcXEGtpmp0N6C8,10950
64
64
  alita_sdk/runtime/langchain/document_loaders/AlitaMarkdownLoader.py,sha256=RGHDfleYTn7AAc3H-yFZrjm06L0Ux14ZtEJpFlVBNCA,2474
65
- alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py,sha256=usSrPnYQ3dDOJDdg6gBDTnBJnHiqjLxd_kvOBfRyVxY,5946
66
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py,sha256=SKAAPo3DfMtRPxICKrPzlXXkC5RfaeiRj7lejLXTi7o,2337
65
+ alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py,sha256=olVThKX9Mmv4muTW0cAQBkgeNqU4IcdLVhqpBuzwly4,5904
66
+ alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py,sha256=CHIaUnP2Alu7D1NHxlL5N98iY7Gqm4tA5wHjBYUsQLc,2833
67
67
  alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py,sha256=m_7aq-aCFVb4vXZsJNinfN1hAuyy_S0ylRknv_ahxDc,340
68
68
  alita_sdk/runtime/langchain/document_loaders/AlitaQtestLoader.py,sha256=CUVVnisxm7b5yZWV6rn0Q3MEEaO1GWNcfnz5yWz8T0k,13283
69
69
  alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py,sha256=nI8lyndVZxVAxbjX3yiqyuFQKFE8MjLPyYSyqRWxHqQ,4077
70
70
  alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py,sha256=EiCIAF_OxSrbuwgOFk2IpxRMvFbctITt2jAI0g_atpk,3586
71
- alita_sdk/runtime/langchain/document_loaders/ImageParser.py,sha256=gao5yCCKdDai_Gx7YdEx5U6oMyJYzn69eYmEvWLh-fc,656
71
+ alita_sdk/runtime/langchain/document_loaders/ImageParser.py,sha256=RQ4zGdSw42ec8c6Eb48uFadayWuiT4FbwhGVwhSw60s,1065
72
72
  alita_sdk/runtime/langchain/document_loaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
73
73
  alita_sdk/runtime/langchain/document_loaders/constants.py,sha256=xlOXq2iooepcM41SUehbH4ZUFsdz1gWli_7C9Lt5saI,7528
74
74
  alita_sdk/runtime/langchain/document_loaders/utils.py,sha256=9xghESf3axBbwxATyVuS0Yu-TWe8zWZnXgCD1ZVyNW0,2414
@@ -106,7 +106,7 @@ alita_sdk/runtime/toolkits/vectorstore.py,sha256=BGppQADa1ZiLO17fC0uCACTTEvPHlod
106
106
  alita_sdk/runtime/tools/__init__.py,sha256=7OA8YPKlEOfXu3-gJA08cyR-VymjSPL-OmbXI-B2xVA,355
107
107
  alita_sdk/runtime/tools/agent.py,sha256=m98QxOHwnCRTT9j18Olbb5UPS8-ZGeQaGiUyZJSyFck,3162
108
108
  alita_sdk/runtime/tools/application.py,sha256=z3vLZODs-_xEEnZFmGF0fKz1j3VtNJxqsAmg5ovExpQ,3129
109
- alita_sdk/runtime/tools/artifact.py,sha256=2Jjrhuj7Q-Sc5AKkAG7Pk8cJnGPqnqgtOmE3eDOVX0M,8694
109
+ alita_sdk/runtime/tools/artifact.py,sha256=9kNZENeGDK4wW3cG0tixmJb0FDJhO-VqujuuuxN8kDo,10682
110
110
  alita_sdk/runtime/tools/datasource.py,sha256=pvbaSfI-ThQQnjHG-QhYNSTYRnZB0rYtZFpjCfpzxYI,2443
111
111
  alita_sdk/runtime/tools/echo.py,sha256=spw9eCweXzixJqHnZofHE1yWiSUa04L4VKycf3KCEaM,486
112
112
  alita_sdk/runtime/tools/function.py,sha256=0iZJ-UxaPbtcXAVX9G5Vsn7vmD7lrz3cBG1qylto1gs,2844
@@ -236,10 +236,10 @@ alita_sdk/tools/custom_open_api/api_wrapper.py,sha256=sDSFpvEqpSvXHGiBISdQQcUecf
236
236
  alita_sdk/tools/elastic/__init__.py,sha256=iwnSRppRpzvJ1da2K3Glu8Uu41MhBDCYbguboLkEbW0,2818
237
237
  alita_sdk/tools/elastic/api_wrapper.py,sha256=pl8CqQxteJAGwyOhMcld-ZgtOTFwwbv42OITQVe8rM0,1948
238
238
  alita_sdk/tools/figma/__init__.py,sha256=W6vIMMkZI2Lmpg6_CRRV3oadaIbVI-qTLmKUh6enqWs,4509
239
- alita_sdk/tools/figma/api_wrapper.py,sha256=Bgwas5HOX9lzpQuPTJ5E3L2ugtWL4OxOzYSu4b1AWWI,31857
239
+ alita_sdk/tools/figma/api_wrapper.py,sha256=KbKet1Xvjq1Vynz_jEE1MtEAVtLYNlSCg67u4dfhe90,33681
240
240
  alita_sdk/tools/github/__init__.py,sha256=2rHu0zZyZGnLC5CkHgDIhe14N9yCyaEfrrt7ydH8478,5191
241
241
  alita_sdk/tools/github/api_wrapper.py,sha256=uDwYckdnpYRJtb0uZnDkaz2udvdDLVxuCh1tSwspsiU,8411
242
- alita_sdk/tools/github/github_client.py,sha256=nxnSXsDul2PPbWvYZS8TmAFFmR-5ALyakNoV5LN2D4U,86617
242
+ alita_sdk/tools/github/github_client.py,sha256=0YkpD6Zm4X46jMNN57ZIypo2YObtgxCGQokJAF-laFs,86597
243
243
  alita_sdk/tools/github/graphql_client_wrapper.py,sha256=d3AGjzLGH_hdQV2V8HeAX92dJ4dlnE5OXqUlCO_PBr0,71539
244
244
  alita_sdk/tools/github/schemas.py,sha256=TxEWR3SjDKVwzo9i2tLnss_uPAv85Mh7oWjvQvYLDQE,14000
245
245
  alita_sdk/tools/github/tool.py,sha256=Jnnv5lenV5ds8AAdyo2m8hSzyJ117HZBjzHC6T1ck-M,1037
@@ -325,7 +325,7 @@ alita_sdk/tools/sql/models.py,sha256=AKJgSl_kEEz4fZfw3kbvdGHXaRZ-yiaqfJOB6YOj3i0
325
325
  alita_sdk/tools/testio/__init__.py,sha256=NEvQtzsffqAXryaffVk0GpdcxZQ1AMkfeztnxHwNql4,2798
326
326
  alita_sdk/tools/testio/api_wrapper.py,sha256=BvmL5h634BzG6p7ajnQLmj-uoAw1gjWnd4FHHu1h--Q,21638
327
327
  alita_sdk/tools/testrail/__init__.py,sha256=Xg4nVjULL_D8JpIXLYXppnwUfGF4-lguFwKHmP5VwxM,4696
328
- alita_sdk/tools/testrail/api_wrapper.py,sha256=PKhtf04C6PFDexGCAJm-hjA9Gpu4crx6EXKT5K-b_Pk,32985
328
+ alita_sdk/tools/testrail/api_wrapper.py,sha256=kocA4ok7WDN7nGRpH-r7wPiqQaiWfBRrvbHm0Y961L4,36756
329
329
  alita_sdk/tools/utils/__init__.py,sha256=W9rCCUPtHCP5nGAbWp0n5jaNA84572aiRoqKneBnaS4,3330
330
330
  alita_sdk/tools/utils/available_tools_decorator.py,sha256=IbrdfeQkswxUFgvvN7-dyLMZMyXLiwvX7kgi3phciCk,273
331
331
  alita_sdk/tools/utils/content_parser.py,sha256=q0wj__flyFlmzwlvbzAj3wCdHhLXysFbpcpCtrNfsGg,14437
@@ -350,8 +350,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=kT0TbmMvuKhDUZc0i7KO18O38JM9S
350
350
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=0ne8XLJEQSLOWfzd2HdnqOYmQlUliKHbBED5kW_Vias,2895
351
351
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
352
352
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
353
- alita_sdk-0.3.344.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
354
- alita_sdk-0.3.344.dist-info/METADATA,sha256=b_iDXrCrmVAMdEpyYJkUb09r3tHnM9GQQKUAP1KWDgs,19015
355
- alita_sdk-0.3.344.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
356
- alita_sdk-0.3.344.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
357
- alita_sdk-0.3.344.dist-info/RECORD,,
353
+ alita_sdk-0.3.346.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
354
+ alita_sdk-0.3.346.dist-info/METADATA,sha256=OoUbeD3TLR5rkU_L-5H3DOb2tB7yJ9JWqmwAjDpYq_E,19015
355
+ alita_sdk-0.3.346.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
356
+ alita_sdk-0.3.346.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
357
+ alita_sdk-0.3.346.dist-info/RECORD,,