alita-sdk 0.3.327__py3-none-any.whl → 0.3.329__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

@@ -255,13 +255,29 @@ class StateModifierNode(Runnable):
255
255
  type_of_output = type(state.get(self.output_variables[0])) if self.output_variables else None
256
256
  # Render the template using Jinja
257
257
  import json
258
+ import base64
258
259
  from jinja2 import Environment
259
260
 
260
261
  def from_json(value):
261
- return json.loads(value)
262
+ """Convert JSON string to Python object"""
263
+ try:
264
+ return json.loads(value)
265
+ except (json.JSONDecodeError, TypeError) as e:
266
+ logger.warning(f"Failed to parse JSON value: {e}")
267
+ return value
268
+
269
+ def base64_to_string(value):
270
+ """Convert base64 encoded string to regular string"""
271
+ try:
272
+ return base64.b64decode(value).decode('utf-8')
273
+ except Exception as e:
274
+ logger.warning(f"Failed to decode base64 value: {e}")
275
+ return value
276
+
262
277
 
263
278
  env = Environment()
264
279
  env.filters['from_json'] = from_json
280
+ env.filters['base64ToString'] = base64_to_string
265
281
 
266
282
  template = env.from_string(self.template)
267
283
  rendered_message = template.render(**input_data)
@@ -1,5 +1,6 @@
1
1
  import json
2
2
  import logging
3
+ import re
3
4
  import urllib.parse
4
5
  from typing import Dict, List, Generator, Optional
5
6
 
@@ -7,6 +8,7 @@ from azure.devops.connection import Connection
7
8
  from azure.devops.v7_1.core import CoreClient
8
9
  from azure.devops.v7_1.wiki import WikiClient
9
10
  from azure.devops.v7_1.work_item_tracking import TeamContext, Wiql, WorkItemTrackingClient
11
+ from bs4 import BeautifulSoup
10
12
  from langchain_core.documents import Document
11
13
  from langchain_core.tools import ToolException
12
14
  from msrest.authentication import BasicAuthentication
@@ -15,6 +17,7 @@ from pydantic import model_validator
15
17
  from pydantic.fields import Field
16
18
 
17
19
  from alita_sdk.tools.non_code_indexer_toolkit import NonCodeIndexerToolkit
20
+ from ...utils.content_parser import parse_file_content
18
21
  from ....runtime.utils.utils import IndexerKeywords
19
22
 
20
23
  logger = logging.getLogger(__name__)
@@ -53,7 +56,11 @@ ADOGetWorkItem = create_model(
53
56
  id=(int, Field(description="The work item id")),
54
57
  fields=(Optional[list[str]], Field(description="Comma-separated list of requested fields", default=None)),
55
58
  as_of=(Optional[str], Field(description="AsOf UTC date time string", default=None)),
56
- expand=(Optional[str], Field(description="The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.", default=None))
59
+ expand=(Optional[str], Field(description="The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.", default=None)),
60
+ parse_attachments=(Optional[bool], Field(description="Value that defines is attachment should be parsed.", default=False)),
61
+ image_description_prompt=(Optional[str],
62
+ Field(description="Prompt which is used for image description", default=None)),
63
+
57
64
  )
58
65
 
59
66
  ADOLinkWorkItem = create_model(
@@ -284,8 +291,23 @@ class AzureDevOpsApiWrapper(NonCodeIndexerToolkit):
284
291
  logger.error(f"Error searching work items: {e}")
285
292
  return ToolException(f"Error searching work items: {e}")
286
293
 
287
-
288
- def get_work_item(self, id: int, fields: Optional[list[str]] = None, as_of: Optional[str] = None, expand: Optional[str] = None):
294
+ def parse_attachment_by_url(self, attachment_url, file_name=None, image_description_prompt=None):
295
+ match = re.search(r'attachments/([\w-]+)(?:\?fileName=([^&]+))?', attachment_url)
296
+ if match:
297
+ attachment_id = match.group(1)
298
+ if not file_name:
299
+ file_name = match.group(2)
300
+ if not file_name:
301
+ raise ToolException("File name must be provided either in the URL or as a parameter.")
302
+ return self.parse_attachment_by_id(attachment_id, file_name, image_description_prompt)
303
+ raise ToolException(f"Attachment '{attachment_url}' was not found.")
304
+
305
+ def parse_attachment_by_id(self, attachment_id, file_name, image_description_prompt):
306
+ file_content = self.get_attachment_content(attachment_id)
307
+ return parse_file_content(file_content=file_content, file_name=file_name,
308
+ llm=self.llm, prompt=image_description_prompt)
309
+
310
+ def get_work_item(self, id: int, fields: Optional[list[str]] = None, as_of: Optional[str] = None, expand: Optional[str] = None, parse_attachments=False, image_description_prompt=None):
289
311
  """Get a single work item by ID."""
290
312
  try:
291
313
  # Validate that the Azure DevOps client is initialized
@@ -313,6 +335,24 @@ class AzureDevOpsApiWrapper(NonCodeIndexerToolkit):
313
335
  for relation in relations_data:
314
336
  parsed_item['relations'].append(relation.as_dict())
315
337
 
338
+ if parse_attachments:
339
+ # describe images in work item fields if present
340
+ for field_name, field_value in fields_data.items():
341
+ if isinstance(field_value, str):
342
+ soup = BeautifulSoup(field_value, 'html.parser')
343
+ images = soup.find_all('img')
344
+ for img in images:
345
+ src = img.get('src')
346
+ if src:
347
+ description = self.parse_attachment_by_url(src, image_description_prompt)
348
+ img['image-description'] = description
349
+ parsed_item[field_name] = str(soup)
350
+ # parse attached documents if present
351
+ if parsed_item['relations']:
352
+ for attachment in parsed_item['relations']:
353
+ attachment['content'] = self.parse_attachment_by_url(attachment['url'], attachment['attributes']['name'], image_description_prompt)
354
+
355
+
316
356
  return parsed_item
317
357
  except Exception as e:
318
358
  logger.error(f"Error getting work item: {e}")
@@ -522,10 +562,13 @@ class AzureDevOpsApiWrapper(NonCodeIndexerToolkit):
522
562
  'attachment_ids': {rel.url.split('/')[-1]:rel.attributes.get('name', '') for rel in wi.relations or [] if rel.rel == 'AttachedFile'}
523
563
  })
524
564
 
565
+ def get_attachment_content(self, attachment_id):
566
+ content_generator = self._client.get_attachment_content(id=attachment_id, download=True)
567
+ return b"".join(content_generator)
568
+
525
569
  def _process_document(self, document: Document) -> Generator[Document, None, None]:
526
570
  for attachment_id, file_name in document.metadata.get('attachment_ids', {}).items():
527
- content_generator = self._client.get_attachment_content(id=attachment_id, download=True)
528
- content = b"".join(x for x in content_generator)
571
+ content = self.get_attachment_content(attachment_id=attachment_id)
529
572
  yield Document(page_content="", metadata={'id': attachment_id, IndexerKeywords.CONTENT_FILE_NAME.value: file_name, IndexerKeywords.CONTENT_IN_BYTES.value: content})
530
573
 
531
574
  def _index_tool_params(self):
@@ -61,11 +61,11 @@ class ArgsSchema(Enum):
61
61
  ),
62
62
  geometry=(
63
63
  Optional[str],
64
- Field(description="Sets to 'paths' to export vector data"),
64
+ Field(description="Sets to 'paths' to export vector data", default=None),
65
65
  ),
66
66
  version=(
67
67
  Optional[str],
68
- Field(description="Sets version of file"),
68
+ Field(description="Sets version of file", default=None),
69
69
  ),
70
70
  extra_params=(
71
71
  Optional[Dict[str, Union[str, int, None]]],
@@ -120,7 +120,8 @@ class ArgsSchema(Enum):
120
120
  client_meta=(
121
121
  Optional[dict],
122
122
  Field(
123
- description="Positioning information of the comment (Vector, FrameOffset, Region, FrameOffsetRegion)"
123
+ description="Positioning information of the comment (Vector, FrameOffset, Region, FrameOffsetRegion)",
124
+ default=None,
124
125
  ),
125
126
  ),
126
127
  extra_params=(
@@ -147,26 +148,28 @@ class ArgsSchema(Enum):
147
148
  ),
148
149
  ),
149
150
  ids=(
150
- str,
151
+ Optional[str],
151
152
  Field(
152
153
  description="Specifies id of file images separated by comma",
153
154
  examples=["8:6,1:7"],
155
+ default="0:0",
154
156
  ),
155
157
  ),
156
158
  scale=(
157
159
  Optional[str],
158
- Field(description="A number between 0.01 and 4, the image scaling factor"),
160
+ Field(description="A number between 0.01 and 4, the image scaling factor", default=None),
159
161
  ),
160
162
  format=(
161
163
  Optional[str],
162
164
  Field(
163
165
  description="A string enum for the image output format",
164
166
  examples=["jpg", "png", "svg", "pdf"],
167
+ default=None,
165
168
  ),
166
169
  ),
167
170
  version=(
168
171
  Optional[str],
169
- Field(description="A specific version ID to use"),
172
+ Field(description="A specific version ID to use", default=None),
170
173
  ),
171
174
  extra_params=(
172
175
  Optional[Dict[str, Union[str, int, None]]],
@@ -307,76 +310,110 @@ class FigmaApiWrapper(NonCodeIndexerToolkit):
307
310
  else:
308
311
  raise ValueError("You must provide at least project_id or file_keys_include.")
309
312
 
313
+ def has_image_representation(self, node):
314
+ node_type = node.get('type', '').lower()
315
+ default_images_types = [
316
+ 'image', 'canvas', 'frame', 'vector', 'table', 'slice', 'sticky', 'shape_with_text', 'connector'
317
+ ]
318
+ # filter nodes of type which has image representation
319
+ # or rectangles with image as background
320
+ if (node_type in default_images_types
321
+ or (node_type == 'rectangle' and 'fills' in node and any(
322
+ fill.get('type') == 'IMAGE' for fill in node['fills'] if isinstance(fill, dict)))):
323
+ return True
324
+ return False
325
+
326
+ def get_texts_recursive(self, node):
327
+ texts = []
328
+ node_type = node.get('type', '').lower()
329
+ if node_type == 'text':
330
+ texts.append(node.get('characters', ''))
331
+ if 'children' in node:
332
+ for child in node['children']:
333
+ texts.extend(self.get_texts_recursive(child))
334
+ return texts
335
+
310
336
  def _process_document(self, document: Document) -> Generator[Document, None, None]:
311
337
  file_key = document.metadata.get('id', '')
312
338
  self._log_tool_event(f"Loading details (images) for `{file_key}`")
313
- #
314
339
  figma_pages = self._client.get_file(file_key).document.get('children', [])
315
340
  node_ids_include = document.metadata.pop('figma_pages_include', [])
316
341
  node_ids_exclude = document.metadata.pop('figma_pages_exclude', [])
317
- node_types_include = [t.lower() for t in document.metadata.pop('figma_nodes_include', [])]
318
- node_types_exclude = [t.lower() for t in document.metadata.pop('figma_nodes_exclude', [])]
342
+ node_types_include = [t.strip().lower() for t in document.metadata.pop('figma_nodes_include', [])]
343
+ node_types_exclude = [t.strip().lower() for t in document.metadata.pop('figma_nodes_exclude', [])]
319
344
  self._log_tool_event(f"Included pages: {node_ids_include}. Excluded pages: {node_ids_exclude}.")
320
345
  if node_ids_include:
321
346
  figma_pages = [node for node in figma_pages if ('id' in node and node['id'].replace(':', '-') in node_ids_include)]
322
347
  elif node_ids_exclude:
323
348
  figma_pages = [node for node in figma_pages if ('id' in node and node['id'].replace(':', '-') not in node_ids_exclude)]
324
349
 
325
- # if node_types_include is not provided, default to 'frame'
326
- # to avoid downloading too many images and nodes which co=annot be rendered as images
327
- if not node_types_include:
328
- node_types_include = ['frame']
329
-
330
- node_ids = [
331
- child['id']
332
- for page in figma_pages
333
- if 'children' in page
334
- for child in page['children']
335
- if 'id' in child
336
- and (
337
- (node_types_include and child.get('type').lower() in node_types_include)
338
- or (node_types_exclude and child.get('type').lower() not in node_types_exclude)
339
- or (not node_types_include and not node_types_exclude)
340
- )
341
- ]
342
-
343
- if not node_ids:
344
- yield from ()
345
- return
346
-
347
- images = self._client.get_file_images(file_key, node_ids).images or {}
348
- total_images = len(images)
349
- if total_images == 0:
350
- logging.info(f"No images found for file {file_key}.")
351
- return
352
- progress_step = max(1, total_images // 10)
353
- for idx, (node_id, image_url) in enumerate(images.items(), 1):
354
- if not image_url:
355
- logging.warning(f"Image URL not found for node_id {node_id} in file {file_key}. Skipping.")
356
- continue
357
- response = requests.get(image_url)
358
- if response.status_code == 200:
359
- content_type = response.headers.get('Content-Type', '')
360
- if 'text/html' not in content_type.lower():
361
- extension = f".{content_type.split('/')[-1]}" if content_type.startswith('image') else '.txt'
362
- page_content = load_content_from_bytes(
363
- file_content=response.content,
364
- extension=extension, llm=self.llm)
350
+ image_nodes = []
351
+ text_nodes = {}
352
+ for page in figma_pages:
353
+ for node in page.get('children', []):
354
+ # filter by node_type if specified any include or exclude
355
+ node_type = node.get('type', '').lower()
356
+ include = node_types_include and node_type in node_types_include
357
+ exclude = node_types_exclude and node_type not in node_types_exclude
358
+ no_filter = not node_types_include and not node_types_exclude
359
+
360
+ if include or exclude or no_filter:
361
+ node_id = node.get('id')
362
+ if node_id:
363
+ if self.has_image_representation(node):
364
+ image_nodes.append(node['id'])
365
+ else:
366
+ text_nodes[node['id']] = self.get_texts_recursive(node)
367
+ # process image nodes
368
+ if image_nodes:
369
+ images = self._client.get_file_images(file_key, image_nodes).images or {}
370
+ total_images = len(images)
371
+ if total_images == 0:
372
+ logging.info(f"No images found for file {file_key}.")
373
+ return
374
+ progress_step = max(1, total_images // 10)
375
+ for idx, (node_id, image_url) in enumerate(images.items(), 1):
376
+ if not image_url:
377
+ logging.warning(f"Image URL not found for node_id {node_id} in file {file_key}. Skipping.")
378
+ continue
379
+ response = requests.get(image_url)
380
+ if response.status_code == 200:
381
+ content_type = response.headers.get('Content-Type', '')
382
+ if 'text/html' not in content_type.lower():
383
+ extension = f".{content_type.split('/')[-1]}" if content_type.startswith('image') else '.txt'
384
+ page_content = load_content_from_bytes(
385
+ file_content=response.content,
386
+ extension=extension, llm=self.llm)
387
+ yield Document(
388
+ page_content=page_content,
389
+ metadata={
390
+ 'id': node_id,
391
+ 'updated_on': document.metadata.get('updated_on', ''),
392
+ 'file_key': file_key,
393
+ 'node_id': node_id,
394
+ 'image_url': image_url,
395
+ 'type': 'image'
396
+ }
397
+ )
398
+ if idx % progress_step == 0 or idx == total_images:
399
+ percent = int((idx / total_images) * 100)
400
+ msg = f"Processed {idx}/{total_images} images ({percent}%) for file {file_key}."
401
+ logging.info(msg)
402
+ self._log_tool_event(msg)
403
+ # process text nodes
404
+ if text_nodes:
405
+ for node_id, texts in text_nodes.items():
406
+ if texts:
365
407
  yield Document(
366
- page_content=page_content,
408
+ page_content="\n".join(texts),
367
409
  metadata={
368
410
  'id': node_id,
369
411
  'updated_on': document.metadata.get('updated_on', ''),
370
412
  'file_key': file_key,
371
413
  'node_id': node_id,
372
- 'image_url': image_url
414
+ 'type': 'text'
373
415
  }
374
416
  )
375
- if idx % progress_step == 0 or idx == total_images:
376
- percent = int((idx / total_images) * 100)
377
- msg = f"Processed {idx}/{total_images} images ({percent}%) for file {file_key}."
378
- logging.info(msg)
379
- self._log_tool_event(msg)
380
417
 
381
418
  def _remove_metadata_keys(self):
382
419
  return super()._remove_metadata_keys() + ['figma_pages_include', 'figma_pages_exclude', 'figma_nodes_include', 'figma_nodes_exclude']
@@ -602,7 +639,7 @@ class FigmaApiWrapper(NonCodeIndexerToolkit):
602
639
  def get_file_images(
603
640
  self,
604
641
  file_key: str,
605
- ids: str = "0:0",
642
+ ids: Optional[str] = "0:0",
606
643
  scale: Optional[str] = None,
607
644
  format: Optional[str] = None,
608
645
  version: Optional[str] = None,
@@ -563,12 +563,14 @@ class JiraApiWrapper(NonCodeIndexerToolkit):
563
563
  Use the appropriate issue link type (e.g., "Test", "Relates", "Blocks").
564
564
  If we use "Test" linktype, the test is inward issue, the story/other issue is outward issue.."""
565
565
 
566
+ comment = "This test is linked to the story."
567
+ comment_body = {"content": [{"content": [{"text": comment,"type": "text"}],"type": "paragraph"}],"type": "doc","version": 1} if self.api_version == "3" else comment
566
568
  link_data = {
567
569
  "type": {"name": f"{linktype}"},
568
570
  "inwardIssue": {"key": f"{inward_issue_key}"},
569
571
  "outwardIssue": {"key": f"{outward_issue_key}"},
570
572
  "comment": {
571
- "body": "This test is linked to the story."
573
+ "body": comment_body
572
574
  }
573
575
  }
574
576
  self._client.create_issue_link(link_data)
@@ -706,6 +708,8 @@ class JiraApiWrapper(NonCodeIndexerToolkit):
706
708
  def add_comments(self, issue_key: str, comment: str):
707
709
  """ Add a comment to a Jira issue."""
708
710
  try:
711
+ if self.api_version == '3':
712
+ comment = {"content": [{"content": [{"text": comment,"type": "text"}],"type": "paragraph"}],"type": "doc","version": 1}
709
713
  self._client.issue_add_comment(issue_key, comment)
710
714
  issue_url = f"{self._client.url}browse/{issue_key}"
711
715
  output = f"Done. Comment is added for issue {issue_key}. You can view it at {issue_url}"
@@ -54,7 +54,7 @@ Be as precise and thorough as possible in your responses. If something is unclea
54
54
 
55
55
 
56
56
  def parse_file_content(file_name=None, file_content=None, is_capture_image: bool = False, page_number: int = None,
57
- sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False) -> str | ToolException:
57
+ sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False, prompt=None) -> str | ToolException:
58
58
  """Parse the content of a file based on its type and return the parsed content.
59
59
 
60
60
  Args:
@@ -71,6 +71,8 @@ def parse_file_content(file_name=None, file_content=None, is_capture_image: bool
71
71
  Raises:
72
72
  ToolException: If the file type is not supported or if there is an error reading the file.
73
73
  """
74
+ if not prompt:
75
+ prompt = image_processing_prompt
74
76
  loader = prepare_loader(
75
77
  file_name=file_name,
76
78
  file_content=file_content,
@@ -79,7 +81,8 @@ def parse_file_content(file_name=None, file_content=None, is_capture_image: bool
79
81
  sheet_name=sheet_name,
80
82
  llm=llm,
81
83
  file_path=file_path,
82
- excel_by_sheets=excel_by_sheets
84
+ excel_by_sheets=excel_by_sheets,
85
+ prompt=prompt
83
86
  )
84
87
 
85
88
  if not loader:
@@ -120,7 +123,7 @@ def load_file_docs(file_name=None, file_content=None, is_capture_image: bool = F
120
123
  return loader.load()
121
124
 
122
125
  def get_loader_kwargs(loader_object, file_name=None, file_content=None, is_capture_image: bool = False, page_number: int = None,
123
- sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False):
126
+ sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False, prompt=None):
124
127
  loader_kwargs = loader_object['kwargs']
125
128
  loader_kwargs.update({
126
129
  "file_path": file_path,
@@ -131,13 +134,15 @@ def get_loader_kwargs(loader_object, file_name=None, file_content=None, is_captu
131
134
  "page_number": page_number,
132
135
  "sheet_name": sheet_name,
133
136
  "excel_by_sheets": excel_by_sheets,
137
+ "prompt": prompt,
134
138
  "row_content": True,
135
139
  "json_documents": False
136
140
  })
137
141
  return loader_kwargs
138
142
 
139
143
  def prepare_loader(file_name=None, file_content=None, is_capture_image: bool = False, page_number: int = None,
140
- sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False):
144
+ sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False,
145
+ prompt=None):
141
146
  if (file_path and (file_name or file_content)) or (not file_path and (not file_name or file_content is None)):
142
147
  raise ToolException("Either (file_name and file_content) or file_path must be provided, but not both.")
143
148
 
@@ -146,7 +151,7 @@ def prepare_loader(file_name=None, file_content=None, is_capture_image: bool = F
146
151
  loader_object = loaders_map.get(extension)
147
152
  if not loader_object:
148
153
  return None
149
- loader_kwargs = get_loader_kwargs(loader_object, file_name, file_content, is_capture_image, page_number, sheet_name, llm, file_path, excel_by_sheets)
154
+ loader_kwargs = get_loader_kwargs(loader_object, file_name, file_content, is_capture_image, page_number, sheet_name, llm, file_path, excel_by_sheets, prompt)
150
155
  loader = loader_object['class'](**loader_kwargs)
151
156
  return loader
152
157
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.327
3
+ Version: 0.3.329
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -74,8 +74,7 @@ Requires-Dist: paramiko==3.3.1; extra == "tools"
74
74
  Requires-Dist: pygithub==2.3.0; extra == "tools"
75
75
  Requires-Dist: python-gitlab==4.5.0; extra == "tools"
76
76
  Requires-Dist: gitpython==3.1.43; extra == "tools"
77
- Requires-Dist: atlassian-python-api~=3.41; extra == "tools"
78
- Requires-Dist: atlassian_python_api==3.41.16; extra == "tools"
77
+ Requires-Dist: atlassian-python-api~=4.0.7; extra == "tools"
79
78
  Requires-Dist: jira==3.8.0; extra == "tools"
80
79
  Requires-Dist: qtest-swagger-client==0.0.3; extra == "tools"
81
80
  Requires-Dist: testrail-api==1.13.2; extra == "tools"
@@ -44,7 +44,7 @@ alita_sdk/runtime/langchain/assistant.py,sha256=1Eq8BIefp8suhbC9CssoOXtC-plkemoU
44
44
  alita_sdk/runtime/langchain/chat_message_template.py,sha256=kPz8W2BG6IMyITFDA5oeb5BxVRkHEVZhuiGl4MBZKdc,2176
45
45
  alita_sdk/runtime/langchain/constants.py,sha256=eHVJ_beJNTf1WJo4yq7KMK64fxsRvs3lKc34QCXSbpk,3319
46
46
  alita_sdk/runtime/langchain/indexer.py,sha256=0ENHy5EOhThnAiYFc7QAsaTNp9rr8hDV_hTK8ahbatk,37592
47
- alita_sdk/runtime/langchain/langraph_agent.py,sha256=oCUK2f8YdI96AZU0HmETnq78VWUv0weTltCH2q53qEU,46878
47
+ alita_sdk/runtime/langchain/langraph_agent.py,sha256=z_Bontl600nV7ombsomKXtRCuwCJc-5b5P91wapHYo4,47523
48
48
  alita_sdk/runtime/langchain/mixedAgentParser.py,sha256=M256lvtsL3YtYflBCEp-rWKrKtcY1dJIyRGVv7KW9ME,2611
49
49
  alita_sdk/runtime/langchain/mixedAgentRenderes.py,sha256=asBtKqm88QhZRILditjYICwFVKF5KfO38hu2O-WrSWE,5964
50
50
  alita_sdk/runtime/langchain/store_manager.py,sha256=i8Fl11IXJhrBXq1F1ukEVln57B1IBe-tqSUvfUmBV4A,2218
@@ -145,7 +145,7 @@ alita_sdk/tools/ado/test_plan/test_plan_wrapper.py,sha256=tdacv-myDDiMiEaWTpBuWd
145
145
  alita_sdk/tools/ado/wiki/__init__.py,sha256=ela6FOuT1fqN3FvHGBflzAh16HS1SSPsJYS2SldRX7A,5272
146
146
  alita_sdk/tools/ado/wiki/ado_wrapper.py,sha256=dGzhJO2PlrIL74sMRQg9iNNnNAopngCT_rsgLxvKYUY,14958
147
147
  alita_sdk/tools/ado/work_item/__init__.py,sha256=jml_zSkdC7gdGIoX2ZqRgDb45nhT3ZWzNsZ0II0iVJI,5474
148
- alita_sdk/tools/ado/work_item/ado_wrapper.py,sha256=TXl3V46SgGafQaxQKSTD3AN4MoQ3yNuQBwgVZ6-JhSk,28315
148
+ alita_sdk/tools/ado/work_item/ado_wrapper.py,sha256=LTZl9yiqjsoKdy-6zD4as3NCZg1NY1Ogp9LQbiV-IZw,30851
149
149
  alita_sdk/tools/advanced_jira_mining/__init__.py,sha256=GdrFVsyG8h43BnQwBKUtZ_ca_0atP1rQ_0adkd9mssc,4703
150
150
  alita_sdk/tools/advanced_jira_mining/data_mining_wrapper.py,sha256=nZPtuwVWp8VeHw1B8q9kdwf-6ZvHnlXTOGdcIMDkKpw,44211
151
151
  alita_sdk/tools/aws/__init__.py,sha256=tB6GCOg4XGSpR6qgbgAF4MUQ5-YmQCbWurWgrVKEKQ8,181
@@ -235,7 +235,7 @@ alita_sdk/tools/custom_open_api/api_wrapper.py,sha256=sDSFpvEqpSvXHGiBISdQQcUecf
235
235
  alita_sdk/tools/elastic/__init__.py,sha256=iwnSRppRpzvJ1da2K3Glu8Uu41MhBDCYbguboLkEbW0,2818
236
236
  alita_sdk/tools/elastic/api_wrapper.py,sha256=pl8CqQxteJAGwyOhMcld-ZgtOTFwwbv42OITQVe8rM0,1948
237
237
  alita_sdk/tools/figma/__init__.py,sha256=W6vIMMkZI2Lmpg6_CRRV3oadaIbVI-qTLmKUh6enqWs,4509
238
- alita_sdk/tools/figma/api_wrapper.py,sha256=SFuvjhxYgey1qGsO9sakFpY1bK1RSzgAH-uJsAp7FnE,27477
238
+ alita_sdk/tools/figma/api_wrapper.py,sha256=uWW3xCcTD18Fsv5Xf3BNbDVCiA7CEhRXHm6nn9PJCBs,29519
239
239
  alita_sdk/tools/github/__init__.py,sha256=2rHu0zZyZGnLC5CkHgDIhe14N9yCyaEfrrt7ydH8478,5191
240
240
  alita_sdk/tools/github/api_wrapper.py,sha256=uDwYckdnpYRJtb0uZnDkaz2udvdDLVxuCh1tSwspsiU,8411
241
241
  alita_sdk/tools/github/github_client.py,sha256=nxnSXsDul2PPbWvYZS8TmAFFmR-5ALyakNoV5LN2D4U,86617
@@ -260,7 +260,7 @@ alita_sdk/tools/google/bigquery/tool.py,sha256=Esf9Hsp8I0e7-5EdkFqQ-bid0cfrg-bfS
260
260
  alita_sdk/tools/google_places/__init__.py,sha256=QtmBCI0bHDK79u4hsCSWFcUihu-h4EmPSh9Yll7zz3w,3590
261
261
  alita_sdk/tools/google_places/api_wrapper.py,sha256=7nZly6nk4f4Tm7s2MVdnnwlb-1_WHRrDhyjDiqoyPjA,4674
262
262
  alita_sdk/tools/jira/__init__.py,sha256=G-9qnOYKFWM_adG0QFexh5-2pj_WaxIxxZanB3ARFqI,6339
263
- alita_sdk/tools/jira/api_wrapper.py,sha256=kYd6edldpjaEQNKvJjVaPQt8A22K6f3DMZVJk6Tylsc,80916
263
+ alita_sdk/tools/jira/api_wrapper.py,sha256=-juLuxeOCyDKb_-ZS8eTOeUJWEHKcCiBlXyFY2vbL4Q,81296
264
264
  alita_sdk/tools/keycloak/__init__.py,sha256=0WB9yXMUUAHQRni1ghDEmd7GYa7aJPsTVlZgMCM9cQ0,3050
265
265
  alita_sdk/tools/keycloak/api_wrapper.py,sha256=cOGr0f3S3-c6tRDBWI8wMnetjoNSxiV5rvC_0VHb8uw,3100
266
266
  alita_sdk/tools/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -327,7 +327,7 @@ alita_sdk/tools/testrail/__init__.py,sha256=Xg4nVjULL_D8JpIXLYXppnwUfGF4-lguFwKH
327
327
  alita_sdk/tools/testrail/api_wrapper.py,sha256=PKhtf04C6PFDexGCAJm-hjA9Gpu4crx6EXKT5K-b_Pk,32985
328
328
  alita_sdk/tools/utils/__init__.py,sha256=W9rCCUPtHCP5nGAbWp0n5jaNA84572aiRoqKneBnaS4,3330
329
329
  alita_sdk/tools/utils/available_tools_decorator.py,sha256=IbrdfeQkswxUFgvvN7-dyLMZMyXLiwvX7kgi3phciCk,273
330
- alita_sdk/tools/utils/content_parser.py,sha256=7k5Ddv3Nzp3UoocgslwwSXi1G9ZR7sXzj6593IDeOcM,14063
330
+ alita_sdk/tools/utils/content_parser.py,sha256=SLwRNNb2oahxM5DW9MJ570NgEFDKXl53bbMZ9021Ee0,14238
331
331
  alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256=ypBEAkFRGHv5edW0N9rdo1yKurNGQ4pRVEWtrN_7SeA,17656
332
332
  alita_sdk/tools/vector_adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
333
333
  alita_sdk/tools/xray/__init__.py,sha256=eOMWP8VamFbbJgt1xrGpGPqB9ByOTA0Cd3LCaETzGk4,4376
@@ -349,8 +349,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=kT0TbmMvuKhDUZc0i7KO18O38JM9S
349
349
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=0ne8XLJEQSLOWfzd2HdnqOYmQlUliKHbBED5kW_Vias,2895
350
350
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
351
351
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
352
- alita_sdk-0.3.327.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
353
- alita_sdk-0.3.327.dist-info/METADATA,sha256=vGd3pwE273hj9w8I4t8YqRph7n5oird_C76aRf_tjNY,18897
354
- alita_sdk-0.3.327.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
355
- alita_sdk-0.3.327.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
356
- alita_sdk-0.3.327.dist-info/RECORD,,
352
+ alita_sdk-0.3.329.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
353
+ alita_sdk-0.3.329.dist-info/METADATA,sha256=BRvAazGNKqm2sPsodjbdSv7A6pfFCEzX_l1jtj7c4DM,18835
354
+ alita_sdk-0.3.329.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
355
+ alita_sdk-0.3.329.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
356
+ alita_sdk-0.3.329.dist-info/RECORD,,