alita-sdk 0.3.345__py3-none-any.whl → 0.3.347__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

@@ -85,7 +85,12 @@ class AlitaClient:
85
85
  # This loop iterates over each key-value pair in the arguments dictionary,
86
86
  # and if a value is a Pydantic object, it replaces it with its dictionary representation using .dict().
87
87
  for arg_name, arg_value in params.get('params', {}).get('arguments', {}).items():
88
- if hasattr(arg_value, "dict") and callable(arg_value.dict):
88
+ if isinstance(arg_value, list):
89
+ params['params']['arguments'][arg_name] = [
90
+ item.dict() if hasattr(item, "dict") and callable(item.dict) else item
91
+ for item in arg_value
92
+ ]
93
+ elif hasattr(arg_value, "dict") and callable(arg_value.dict):
89
94
  params['params']['arguments'][arg_name] = arg_value.dict()
90
95
  #
91
96
  response = requests.post(url, headers=self.headers, json=params, verify=False)
@@ -1,6 +1,6 @@
1
1
  import pymupdf
2
2
  import fitz
3
- from langchain_community.document_loaders import PyPDFLoader
3
+ from langchain_community.document_loaders import PyPDFium2Loader
4
4
 
5
5
  from .ImageParser import ImageParser
6
6
  from .utils import perform_llm_prediction_for_image_bytes, create_temp_file
@@ -23,6 +23,7 @@ class AlitaPDFLoader:
23
23
  self.headers = kwargs.get('headers', None)
24
24
  self.extraction_mode = kwargs.get('extraction_mode', "plain")
25
25
  self.extraction_kwargs = kwargs.get('extraction_kwargs', None)
26
+ self.images_parser=ImageParser(llm=self.llm, prompt=self.prompt)
26
27
 
27
28
  def get_content(self):
28
29
  if hasattr(self, 'file_path'):
@@ -119,13 +120,13 @@ class AlitaPDFLoader:
119
120
  return self._load_docs()
120
121
 
121
122
  def _load_docs(self):
122
- docs = PyPDFLoader(file_path=self.file_path,
123
- password=self.password,
124
- headers=self.headers,
125
- extract_images=self.extract_images,
126
- extraction_mode=self.extraction_mode,
127
- images_parser=ImageParser(llm=self.llm, prompt=self.prompt),
128
- extraction_kwargs=self.extraction_kwargs).load()
123
+ docs = PyPDFium2Loader(
124
+ file_path = self.file_path,
125
+ password=self.password,
126
+ headers=self.headers,
127
+ extract_images = self.extract_images,
128
+ images_parser = ImageParser(llm=self.llm, prompt=self.prompt),
129
+ ).load()
129
130
  for doc in docs:
130
131
  doc.metadata['chunk_id'] = doc.metadata['page']
131
132
  return docs
@@ -1,4 +1,8 @@
1
+ from typing import Iterator
2
+
1
3
  from langchain_community.document_loaders.parsers.images import BaseImageBlobParser
4
+ from langchain_core.documents import Document
5
+ from langchain_core.documents.base import Blob
2
6
 
3
7
  from alita_sdk.runtime.langchain.document_loaders.AlitaImageLoader import AlitaImageLoader
4
8
 
@@ -8,10 +12,19 @@ class ImageParser(BaseImageBlobParser):
8
12
  self.llm = kwargs.get('llm')
9
13
  self.prompt = kwargs.get('prompt')
10
14
 
15
+ def lazy_parse(self, blob: Blob) -> Iterator[Document]:
16
+ try:
17
+ yield from super().lazy_parse(blob)
18
+ except Exception:
19
+ yield Document(page_content="[Image: Unknown]")
20
+
11
21
  def _analyze_image(self, img) -> str:
12
22
  from io import BytesIO
13
23
 
14
24
  byte_stream = BytesIO()
15
25
  img.save(byte_stream, format='PNG')
16
26
  image_bytes = byte_stream.getvalue()
17
- return AlitaImageLoader(file_content=image_bytes, file_name="image.png", prompt=self.prompt, llm=self.llm).get_content()
27
+ try:
28
+ return AlitaImageLoader(file_content=image_bytes, file_name="image.png", prompt=self.prompt, llm=self.llm).get_content()
29
+ except Exception:
30
+ return "Image: unknown"
@@ -1,7 +1,7 @@
1
1
  import hashlib
2
- import json
3
2
  import logging
4
- from typing import Any, Optional, Generator
3
+ import re
4
+ from typing import Any, Optional, Generator, List
5
5
 
6
6
  from langchain_core.documents import Document
7
7
  from langchain_core.tools import ToolException
@@ -59,18 +59,54 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
59
59
  def create_new_bucket(self, bucket_name: str, expiration_measure = "weeks", expiration_value = 1):
60
60
  return self.artifact.client.create_bucket(bucket_name, expiration_measure, expiration_value)
61
61
 
62
+ def _index_tool_params(self):
63
+ return {
64
+ 'include_extensions': (Optional[List[str]], Field(
65
+ description="List of file extensions to include when processing: i.e. ['*.png', '*.jpg']. "
66
+ "If empty, all files will be processed (except skip_extensions).",
67
+ default=[])),
68
+ 'skip_extensions': (Optional[List[str]], Field(
69
+ description="List of file extensions to skip when processing: i.e. ['*.png', '*.jpg']",
70
+ default=[])),
71
+ }
72
+
62
73
  def _base_loader(self, **kwargs) -> Generator[Document, None, None]:
74
+ self._log_tool_event(message=f"Loading the files from artifact's bucket. {kwargs=}", tool_name="loader")
63
75
  try:
64
- all_files = self.list_files(self.bucket, False)
76
+ all_files = self.list_files(self.bucket, False)['rows']
65
77
  except Exception as e:
66
78
  raise ToolException(f"Unable to extract files: {e}")
67
79
 
68
- for file in all_files['rows']:
80
+ include_extensions = kwargs.get('include_extensions', [])
81
+ skip_extensions = kwargs.get('skip_extensions', [])
82
+ self._log_tool_event(message=f"Files filtering started. Include extensions: {include_extensions}. "
83
+ f"Skip extensions: {skip_extensions}", tool_name="loader")
84
+ # show the progress of filtering
85
+ total_files = len(all_files) if isinstance(all_files, list) else 0
86
+ filtered_files_count = 0
87
+ for file in all_files:
88
+ filtered_files_count += 1
89
+ if filtered_files_count % 10 == 0 or filtered_files_count == total_files:
90
+ self._log_tool_event(message=f"Files filtering progress: {filtered_files_count}/{total_files}",
91
+ tool_name="loader")
92
+ file_name = file['name']
93
+
94
+ # Check if file should be skipped based on skip_extensions
95
+ if any(re.match(pattern.replace('*', '.*') + '$', file_name, re.IGNORECASE)
96
+ for pattern in skip_extensions):
97
+ continue
98
+
99
+ # Check if file should be included based on include_extensions
100
+ # If include_extensions is empty, process all files (that weren't skipped)
101
+ if include_extensions and not (any(re.match(pattern.replace('*', '.*') + '$', file_name, re.IGNORECASE)
102
+ for pattern in include_extensions)):
103
+ continue
104
+
69
105
  metadata = {
70
106
  ("updated_on" if k == "modified" else k): str(v)
71
107
  for k, v in file.items()
72
108
  }
73
- metadata['id'] = self.get_hash_from_bucket_and_file_name(self.bucket, file['name'])
109
+ metadata['id'] = self.get_hash_from_bucket_and_file_name(self.bucket, file_name)
74
110
  yield Document(page_content="", metadata=metadata)
75
111
 
76
112
  def get_hash_from_bucket_and_file_name(self, bucket, file_name):
@@ -22,7 +22,7 @@ GLOBAL_REMOVE = []
22
22
  GLOBAL_DEPTH_START = 4
23
23
  GLOBAL_DEPTH_END = 6
24
24
  EXTRA_PARAMS = (
25
- Optional[Dict[str, Union[str, int, None]]],
25
+ Optional[Dict[str, Union[str, int, List, None]]],
26
26
  Field(
27
27
  description=(
28
28
  "Additional parameters for customizing response processing:\n"
@@ -105,7 +105,7 @@ class GitHubClient(BaseModel):
105
105
  self._github_repo_instance = None
106
106
  except Exception as e:
107
107
  # Only raise when accessed, not during initialization
108
- return ToolException(e)
108
+ raise ToolException(e)
109
109
  return self._github_repo_instance
110
110
 
111
111
  @model_validator(mode='before')
@@ -298,6 +298,18 @@ updateCase = create_model(
298
298
  ),
299
299
  )
300
300
 
301
+ getSuites = create_model(
302
+ "getSuites",
303
+ project_id=(str, Field(description="Project id")),
304
+ output_format=(
305
+ str,
306
+ Field(
307
+ default="json",
308
+ description="Desired output format. Supported values: 'json', 'csv', 'markdown'. Defaults to 'json'.",
309
+ ),
310
+ ),
311
+ )
312
+
301
313
  SUPPORTED_KEYS = {
302
314
  "id", "title", "section_id", "template_id", "type_id", "priority_id", "milestone_id",
303
315
  "refs", "created_by", "created_on", "updated_by", "updated_on", "estimate",
@@ -330,29 +342,74 @@ class TestrailAPIWrapper(NonCodeIndexerToolkit):
330
342
  cls._client = TestRailAPI(url, email, password)
331
343
  return super().validate_toolkit(values)
332
344
 
333
- def _validate_suite_mode_requirements(self, project_id: str, suite_id: Optional[str] = None) -> None:
345
+ def _is_suite_id_required(self, project_id: str) -> bool:
334
346
  """
335
- Validate if project requires suite_id when in multiple suite mode.
336
-
347
+ Returns True if project requires suite_id (multiple suite or baselines mode), otherwise False.
337
348
  Args:
338
349
  project_id: The TestRail project ID to check
339
- suite_id: The suite ID if provided (optional)
340
- custom_error_msg: Custom error message to use (optional)
341
-
342
- Raises:
343
- ToolException: If project is in multiple suite mode and no suite_id is provided
344
350
  """
345
- if suite_id:
346
- return # No validation needed if suite_id is already provided
347
-
348
351
  try:
349
352
  project = self._client.projects.get_project(project_id=project_id)
350
353
  # 1 for single suite mode, 2 for single suite + baselines, 3 for multiple suites
351
354
  suite_mode = project.get('suite_mode', 1)
352
- if suite_mode == 3:
353
- raise ToolException("Project is in multiple suite mode, please provide suite_id to extract test cases.")
354
- except StatusCodeError as e:
355
- logger.warning(f"Unable to check project suite mode: {e}")
355
+ return suite_mode == 2 or suite_mode == 3
356
+ except StatusCodeError:
357
+ return False
358
+
359
+ def _fetch_cases_with_suite_handling(
360
+ self,
361
+ project_id: str,
362
+ suite_id: Optional[str] = None,
363
+ **api_params
364
+ ) -> List[Dict]:
365
+ """
366
+ Unified method to fetch test cases with proper TestRail suite mode handling.
367
+
368
+ Args:
369
+ project_id: The TestRail project ID
370
+ suite_id: Optional suite ID to filter by
371
+ **api_params: Additional parameters to pass to the get_cases API call
372
+
373
+ Returns:
374
+ List of test case dictionaries
375
+ """
376
+ def _extract_cases_from_response(response):
377
+ """Extract cases from API response, supporting both old and new testrail_api versions."""
378
+ return response.get('cases', []) if isinstance(response, dict) else response
379
+
380
+ suite_required = self._is_suite_id_required(project_id=project_id)
381
+ all_cases = []
382
+
383
+ if suite_required:
384
+ # Suite modes 2 & 3: Require suite_id parameter
385
+ if suite_id:
386
+ response = self._client.cases.get_cases(
387
+ project_id=project_id, suite_id=int(suite_id), **api_params
388
+ )
389
+ cases_from_suite = _extract_cases_from_response(response)
390
+ all_cases.extend(cases_from_suite)
391
+ else:
392
+ suites = self._get_raw_suites(project_id)
393
+ suite_ids = [suite['id'] for suite in suites if 'id' in suite]
394
+ for current_suite_id in suite_ids:
395
+ try:
396
+ response = self._client.cases.get_cases(
397
+ project_id=project_id, suite_id=int(current_suite_id), **api_params
398
+ )
399
+ cases_from_suite = _extract_cases_from_response(response)
400
+ all_cases.extend(cases_from_suite)
401
+ except StatusCodeError:
402
+ continue
403
+ else:
404
+ # Suite mode 1: Can fetch all cases directly without suite_id
405
+ try:
406
+ response = self._client.cases.get_cases(project_id=project_id, **api_params)
407
+ cases_from_project = _extract_cases_from_response(response)
408
+ all_cases.extend(cases_from_project)
409
+ except StatusCodeError as e:
410
+ logger.warning(f"Unable to fetch cases at project level: {e}")
411
+
412
+ return all_cases
356
413
 
357
414
  def add_cases(self, add_test_cases_data: str):
358
415
  """Adds new test cases into Testrail per defined parameters.
@@ -442,17 +499,10 @@ class TestrailAPIWrapper(NonCodeIndexerToolkit):
442
499
  invalid_keys = [key for key in keys if key not in SUPPORTED_KEYS]
443
500
 
444
501
  try:
445
- # Check if project requires suite_id for multiple suite mode
446
- self._validate_suite_mode_requirements(
447
- project_id=project_id,
448
- suite_id=suite_id
449
- )
450
-
451
- extracted_cases = self._client.cases.get_cases(project_id=project_id, suite_id=suite_id)
452
- # support old versions of testrail_api
453
- cases = extracted_cases.get("cases") if isinstance(extracted_cases, dict) else extracted_cases
502
+ # Use unified suite handling method
503
+ cases = self._fetch_cases_with_suite_handling(project_id=project_id, suite_id=suite_id)
454
504
 
455
- if cases is None:
505
+ if not cases:
456
506
  return ToolException("No test cases found in the extracted data.")
457
507
 
458
508
  extracted_cases_data = [
@@ -506,31 +556,35 @@ class TestrailAPIWrapper(NonCodeIndexerToolkit):
506
556
  )
507
557
  self._log_tool_event(message=f"Extract test cases per filter {params}", tool_name='get_cases_by_filter')
508
558
 
509
- # Check if project requires suite_id when not provided in params
510
- suite_id_in_params = params.get('suite_id', None)
511
- self._validate_suite_mode_requirements(
512
- project_id=project_id,
513
- suite_id=str(suite_id_in_params) if suite_id_in_params else None
514
- )
559
+ # Extract suite_id from params for unified handling
560
+ suite_id_in_params = params.pop('suite_id', None)
561
+ suite_id = str(suite_id_in_params) if suite_id_in_params else None
515
562
 
516
- extracted_cases = self._client.cases.get_cases(
517
- project_id=project_id, **params
563
+ # Use unified suite handling method with remaining filter parameters
564
+ cases = self._fetch_cases_with_suite_handling(
565
+ project_id=project_id,
566
+ suite_id=suite_id,
567
+ **params
518
568
  )
569
+
519
570
  self._log_tool_event(message="Test cases were extracted", tool_name='get_cases_by_filter')
520
- # support old versions of testrail_api
521
- cases = extracted_cases.get("cases") if isinstance(extracted_cases, dict) else extracted_cases
522
571
 
523
- if cases is None:
572
+ if not cases:
524
573
  return ToolException("No test cases found in the extracted data.")
525
574
 
526
575
  if keys is None:
527
576
  return self._to_markup(cases, output_format)
528
577
 
529
- extracted_cases_data = [
530
- {key: case.get(key, "N/A") for key in keys} for case in cases
531
- ]
578
+ extracted_cases_data = []
579
+ for case in cases:
580
+ case_dict = {}
581
+ for key in keys:
582
+ if key in case:
583
+ case_dict[key] = case[key]
584
+ if case_dict:
585
+ extracted_cases_data.append(case_dict)
532
586
 
533
- if extracted_cases_data is None:
587
+ if not extracted_cases_data:
534
588
  return ToolException("No valid test case data found to format.")
535
589
 
536
590
  result = self._to_markup(extracted_cases_data, output_format)
@@ -578,6 +632,40 @@ class TestrailAPIWrapper(NonCodeIndexerToolkit):
578
632
  return (
579
633
  f"Test case #{case_id} has been updated at '{updated_case['updated_on']}')"
580
634
  )
635
+
636
+ def _get_raw_suites(self, project_id: str) -> List[Dict]:
637
+ """
638
+ Internal helper to get raw suite data from TestRail API.
639
+ Handles both old and new testrail_api response formats.
640
+ """
641
+ suites_response = self._client.suites.get_suites(project_id=project_id)
642
+ if isinstance(suites_response, dict) and 'suites' in suites_response:
643
+ return suites_response['suites']
644
+ else:
645
+ return suites_response if isinstance(suites_response, list) else []
646
+
647
+ def get_suites(self, project_id: str, output_format: str = "json",) -> Union[str, ToolException]:
648
+ """Extracts a list of test suites for a given project from Testrail"""
649
+ try:
650
+ suites = self._get_raw_suites(project_id)
651
+
652
+ if not suites:
653
+ return ToolException("No test suites found for the specified project.")
654
+
655
+ suite_dicts = []
656
+ for suite in suites:
657
+ if isinstance(suite, dict):
658
+ suite_dict = {}
659
+ for field in ["id", "name", "description", "project_id", "is_baseline",
660
+ "completed_on", "url", "is_master", "is_completed"]:
661
+ if field in suite:
662
+ suite_dict[field] = suite[field]
663
+ suite_dicts.append(suite_dict)
664
+ else:
665
+ suite_dicts.append({"suite": str(suite)})
666
+ return self._to_markup(suite_dicts, output_format)
667
+ except StatusCodeError as e:
668
+ return ToolException(f"Unable to extract test suites: {e}")
581
669
 
582
670
  def _base_loader(self, project_id: str,
583
671
  suite_id: Optional[str] = None,
@@ -589,24 +677,13 @@ class TestrailAPIWrapper(NonCodeIndexerToolkit):
589
677
  self._include_attachments = kwargs.get('include_attachments', False)
590
678
  self._skip_attachment_extensions = kwargs.get('skip_attachment_extensions', [])
591
679
 
592
- def _extract_cases_from_response(response):
593
- """Extract cases from API response, supporting both old and new testrail_api versions."""
594
- return response.get('cases', []) if isinstance(response, dict) else response
595
-
596
680
  try:
597
- # Check if project requires suite_id when not provided
598
- self._validate_suite_mode_requirements(project_id=project_id, suite_id=suite_id)
599
-
600
- if suite_id:
601
- resp = self._client.cases.get_cases(project_id=project_id, suite_id=int(suite_id))
602
- else:
603
- resp = self._client.cases.get_cases(project_id=project_id)
604
-
605
- cases = _extract_cases_from_response(resp)
606
-
681
+ # Use unified suite handling method
682
+ cases = self._fetch_cases_with_suite_handling(project_id=project_id, suite_id=suite_id)
607
683
  except StatusCodeError as e:
608
684
  raise ToolException(f"Unable to extract test cases: {e}")
609
- # Apply filters
685
+
686
+ # Apply filters
610
687
  if section_id is not None:
611
688
  cases = [case for case in cases if case.get('section_id') == section_id]
612
689
  if title_keyword is not None:
@@ -810,6 +887,12 @@ class TestrailAPIWrapper(NonCodeIndexerToolkit):
810
887
  "ref": self.update_case,
811
888
  "description": self.update_case.__doc__,
812
889
  "args_schema": updateCase,
890
+ },
891
+ {
892
+ "name": "get_suites",
893
+ "ref": self.get_suites,
894
+ "description": self.get_suites.__doc__,
895
+ "args_schema": getSuites,
813
896
  }
814
897
  ]
815
898
  return tools
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.345
3
+ Version: 0.3.347
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -36,7 +36,7 @@ alita_sdk/configurations/zephyr_essential.py,sha256=tUIrh-PRNvdrLBj6rJXqlF-h6oaM
36
36
  alita_sdk/runtime/__init__.py,sha256=4W0UF-nl3QF2bvET5lnah4o24CoTwSoKXhuN0YnwvEE,828
37
37
  alita_sdk/runtime/clients/__init__.py,sha256=BdehU5GBztN1Qi1Wul0cqlU46FxUfMnI6Vq2Zd_oq1M,296
38
38
  alita_sdk/runtime/clients/artifact.py,sha256=TPvROw1qu4IyUEGuf7x40IKRpb5eFZpYGN3-8LfQE0M,3461
39
- alita_sdk/runtime/clients/client.py,sha256=uG97uviNsbBZvg2h5UbqH2VIW2_jDHwaTa2OKkhwvBc,43313
39
+ alita_sdk/runtime/clients/client.py,sha256=ZOWsv-JJl54lzQ4JzYFBKslt4DI0ExNZ3zQ_U7zA3uE,43590
40
40
  alita_sdk/runtime/clients/datasource.py,sha256=HAZovoQN9jBg0_-lIlGBQzb4FJdczPhkHehAiVG3Wx0,1020
41
41
  alita_sdk/runtime/clients/prompt.py,sha256=li1RG9eBwgNK_Qf0qUaZ8QNTmsncFrAL2pv3kbxZRZg,1447
42
42
  alita_sdk/runtime/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -62,13 +62,13 @@ alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py,sha256=QwgBJE-B
62
62
  alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py,sha256=Nav2cgCQKOHQi_ZgYYn_iFdP_Os56KVlVR5nHGXecBc,3445
63
63
  alita_sdk/runtime/langchain/document_loaders/AlitaJiraLoader.py,sha256=M2q3YThkps0yAZOjfoLcyE7qycVTYKcXEGtpmp0N6C8,10950
64
64
  alita_sdk/runtime/langchain/document_loaders/AlitaMarkdownLoader.py,sha256=RGHDfleYTn7AAc3H-yFZrjm06L0Ux14ZtEJpFlVBNCA,2474
65
- alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py,sha256=usSrPnYQ3dDOJDdg6gBDTnBJnHiqjLxd_kvOBfRyVxY,5946
65
+ alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py,sha256=olVThKX9Mmv4muTW0cAQBkgeNqU4IcdLVhqpBuzwly4,5904
66
66
  alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py,sha256=CHIaUnP2Alu7D1NHxlL5N98iY7Gqm4tA5wHjBYUsQLc,2833
67
67
  alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py,sha256=m_7aq-aCFVb4vXZsJNinfN1hAuyy_S0ylRknv_ahxDc,340
68
68
  alita_sdk/runtime/langchain/document_loaders/AlitaQtestLoader.py,sha256=CUVVnisxm7b5yZWV6rn0Q3MEEaO1GWNcfnz5yWz8T0k,13283
69
69
  alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py,sha256=nI8lyndVZxVAxbjX3yiqyuFQKFE8MjLPyYSyqRWxHqQ,4077
70
70
  alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py,sha256=EiCIAF_OxSrbuwgOFk2IpxRMvFbctITt2jAI0g_atpk,3586
71
- alita_sdk/runtime/langchain/document_loaders/ImageParser.py,sha256=gao5yCCKdDai_Gx7YdEx5U6oMyJYzn69eYmEvWLh-fc,656
71
+ alita_sdk/runtime/langchain/document_loaders/ImageParser.py,sha256=RQ4zGdSw42ec8c6Eb48uFadayWuiT4FbwhGVwhSw60s,1065
72
72
  alita_sdk/runtime/langchain/document_loaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
73
73
  alita_sdk/runtime/langchain/document_loaders/constants.py,sha256=xlOXq2iooepcM41SUehbH4ZUFsdz1gWli_7C9Lt5saI,7528
74
74
  alita_sdk/runtime/langchain/document_loaders/utils.py,sha256=9xghESf3axBbwxATyVuS0Yu-TWe8zWZnXgCD1ZVyNW0,2414
@@ -106,7 +106,7 @@ alita_sdk/runtime/toolkits/vectorstore.py,sha256=BGppQADa1ZiLO17fC0uCACTTEvPHlod
106
106
  alita_sdk/runtime/tools/__init__.py,sha256=7OA8YPKlEOfXu3-gJA08cyR-VymjSPL-OmbXI-B2xVA,355
107
107
  alita_sdk/runtime/tools/agent.py,sha256=m98QxOHwnCRTT9j18Olbb5UPS8-ZGeQaGiUyZJSyFck,3162
108
108
  alita_sdk/runtime/tools/application.py,sha256=z3vLZODs-_xEEnZFmGF0fKz1j3VtNJxqsAmg5ovExpQ,3129
109
- alita_sdk/runtime/tools/artifact.py,sha256=2Jjrhuj7Q-Sc5AKkAG7Pk8cJnGPqnqgtOmE3eDOVX0M,8694
109
+ alita_sdk/runtime/tools/artifact.py,sha256=4xA_va11WxO0fQclavKivRo24GI1b5qpsp2YZt7RxGY,10795
110
110
  alita_sdk/runtime/tools/datasource.py,sha256=pvbaSfI-ThQQnjHG-QhYNSTYRnZB0rYtZFpjCfpzxYI,2443
111
111
  alita_sdk/runtime/tools/echo.py,sha256=spw9eCweXzixJqHnZofHE1yWiSUa04L4VKycf3KCEaM,486
112
112
  alita_sdk/runtime/tools/function.py,sha256=0iZJ-UxaPbtcXAVX9G5Vsn7vmD7lrz3cBG1qylto1gs,2844
@@ -236,10 +236,10 @@ alita_sdk/tools/custom_open_api/api_wrapper.py,sha256=sDSFpvEqpSvXHGiBISdQQcUecf
236
236
  alita_sdk/tools/elastic/__init__.py,sha256=iwnSRppRpzvJ1da2K3Glu8Uu41MhBDCYbguboLkEbW0,2818
237
237
  alita_sdk/tools/elastic/api_wrapper.py,sha256=pl8CqQxteJAGwyOhMcld-ZgtOTFwwbv42OITQVe8rM0,1948
238
238
  alita_sdk/tools/figma/__init__.py,sha256=W6vIMMkZI2Lmpg6_CRRV3oadaIbVI-qTLmKUh6enqWs,4509
239
- alita_sdk/tools/figma/api_wrapper.py,sha256=KbKet1Xvjq1Vynz_jEE1MtEAVtLYNlSCg67u4dfhe90,33681
239
+ alita_sdk/tools/figma/api_wrapper.py,sha256=yK45guP6oMStTpfNLXRYgIZtNWkuWzgjFm_Vzu-ivNg,33687
240
240
  alita_sdk/tools/github/__init__.py,sha256=2rHu0zZyZGnLC5CkHgDIhe14N9yCyaEfrrt7ydH8478,5191
241
241
  alita_sdk/tools/github/api_wrapper.py,sha256=uDwYckdnpYRJtb0uZnDkaz2udvdDLVxuCh1tSwspsiU,8411
242
- alita_sdk/tools/github/github_client.py,sha256=IhTYcqByJ_wnYg2GFkLkYaiG2j8kFkL8p8CTIVZwmqY,86598
242
+ alita_sdk/tools/github/github_client.py,sha256=0YkpD6Zm4X46jMNN57ZIypo2YObtgxCGQokJAF-laFs,86597
243
243
  alita_sdk/tools/github/graphql_client_wrapper.py,sha256=d3AGjzLGH_hdQV2V8HeAX92dJ4dlnE5OXqUlCO_PBr0,71539
244
244
  alita_sdk/tools/github/schemas.py,sha256=TxEWR3SjDKVwzo9i2tLnss_uPAv85Mh7oWjvQvYLDQE,14000
245
245
  alita_sdk/tools/github/tool.py,sha256=Jnnv5lenV5ds8AAdyo2m8hSzyJ117HZBjzHC6T1ck-M,1037
@@ -325,7 +325,7 @@ alita_sdk/tools/sql/models.py,sha256=AKJgSl_kEEz4fZfw3kbvdGHXaRZ-yiaqfJOB6YOj3i0
325
325
  alita_sdk/tools/testio/__init__.py,sha256=NEvQtzsffqAXryaffVk0GpdcxZQ1AMkfeztnxHwNql4,2798
326
326
  alita_sdk/tools/testio/api_wrapper.py,sha256=BvmL5h634BzG6p7ajnQLmj-uoAw1gjWnd4FHHu1h--Q,21638
327
327
  alita_sdk/tools/testrail/__init__.py,sha256=Xg4nVjULL_D8JpIXLYXppnwUfGF4-lguFwKHmP5VwxM,4696
328
- alita_sdk/tools/testrail/api_wrapper.py,sha256=kocA4ok7WDN7nGRpH-r7wPiqQaiWfBRrvbHm0Y961L4,36756
328
+ alita_sdk/tools/testrail/api_wrapper.py,sha256=tQcGlFJmftvs5ZiO4tsP19fCo4CrJeq_UEvQR1liVfE,39891
329
329
  alita_sdk/tools/utils/__init__.py,sha256=W9rCCUPtHCP5nGAbWp0n5jaNA84572aiRoqKneBnaS4,3330
330
330
  alita_sdk/tools/utils/available_tools_decorator.py,sha256=IbrdfeQkswxUFgvvN7-dyLMZMyXLiwvX7kgi3phciCk,273
331
331
  alita_sdk/tools/utils/content_parser.py,sha256=q0wj__flyFlmzwlvbzAj3wCdHhLXysFbpcpCtrNfsGg,14437
@@ -350,8 +350,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=kT0TbmMvuKhDUZc0i7KO18O38JM9S
350
350
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=0ne8XLJEQSLOWfzd2HdnqOYmQlUliKHbBED5kW_Vias,2895
351
351
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
352
352
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
353
- alita_sdk-0.3.345.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
354
- alita_sdk-0.3.345.dist-info/METADATA,sha256=xKGJO9ArLAkIHbt6Ow6scbFIqtp0cqbqca2NPHVk6ao,19015
355
- alita_sdk-0.3.345.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
356
- alita_sdk-0.3.345.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
357
- alita_sdk-0.3.345.dist-info/RECORD,,
353
+ alita_sdk-0.3.347.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
354
+ alita_sdk-0.3.347.dist-info/METADATA,sha256=ZvJklicNWTOf3Q1MrNdtVdLhUEnd4oVDokj_y4J_Ecg,19015
355
+ alita_sdk-0.3.347.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
356
+ alita_sdk-0.3.347.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
357
+ alita_sdk-0.3.347.dist-info/RECORD,,