alita-sdk 0.3.328__py3-none-any.whl → 0.3.330__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

@@ -1,4 +1,4 @@
1
- from typing import Iterator
1
+ from typing import Iterator, Generator
2
2
 
3
3
  from langchain_core.documents import Document
4
4
 
@@ -6,6 +6,9 @@ from langchain_community.document_loaders.base import BaseLoader
6
6
  from langchain_community.document_loaders.helpers import detect_file_encodings
7
7
  from langchain_core.tools import ToolException
8
8
 
9
+ from alita_sdk.tools.chunkers import markdown_chunker
10
+
11
+
9
12
  class AlitaTextLoader(BaseLoader):
10
13
 
11
14
  def __init__(self, **kwargs):
@@ -19,6 +22,8 @@ class AlitaTextLoader(BaseLoader):
19
22
  raise ToolException("'file_path' or 'file_content' parameter should be provided.")
20
23
  self.encoding = kwargs.get('encoding', 'utf-8')
21
24
  self.autodetect_encoding = kwargs.get('autodetect_encoding', False)
25
+ self.max_tokens=kwargs.get('max_tokens', 1024)
26
+ self.token_overlap = kwargs.get('token_overlap', 10)
22
27
 
23
28
  def get_content(self):
24
29
  text = ""
@@ -59,8 +64,16 @@ class AlitaTextLoader(BaseLoader):
59
64
 
60
65
  return text
61
66
 
67
+ def generate_document(self, text, metadata) -> Generator[Document, None, None]:
68
+ yield Document(page_content=text, metadata=metadata)
69
+
62
70
  def lazy_load(self) -> Iterator[Document]:
63
71
  """Load from file path."""
64
72
  text = self.get_content()
65
73
  metadata = {"source": str(self.file_path) if hasattr(self, 'file_path') else self.file_name}
66
- yield Document(page_content=text, metadata=metadata)
74
+ chunks = markdown_chunker(file_content_generator=self.generate_document(text, metadata),
75
+ config={
76
+ "max_tokens": self.max_tokens,
77
+ "token_overlap": self.token_overlap
78
+ })
79
+ yield from chunks
@@ -1,5 +1,6 @@
1
1
  import json
2
2
  import logging
3
+ import re
3
4
  import urllib.parse
4
5
  from typing import Dict, List, Generator, Optional
5
6
 
@@ -7,6 +8,7 @@ from azure.devops.connection import Connection
7
8
  from azure.devops.v7_1.core import CoreClient
8
9
  from azure.devops.v7_1.wiki import WikiClient
9
10
  from azure.devops.v7_1.work_item_tracking import TeamContext, Wiql, WorkItemTrackingClient
11
+ from bs4 import BeautifulSoup
10
12
  from langchain_core.documents import Document
11
13
  from langchain_core.tools import ToolException
12
14
  from msrest.authentication import BasicAuthentication
@@ -15,6 +17,7 @@ from pydantic import model_validator
15
17
  from pydantic.fields import Field
16
18
 
17
19
  from alita_sdk.tools.non_code_indexer_toolkit import NonCodeIndexerToolkit
20
+ from ...utils.content_parser import parse_file_content
18
21
  from ....runtime.utils.utils import IndexerKeywords
19
22
 
20
23
  logger = logging.getLogger(__name__)
@@ -53,7 +56,11 @@ ADOGetWorkItem = create_model(
53
56
  id=(int, Field(description="The work item id")),
54
57
  fields=(Optional[list[str]], Field(description="Comma-separated list of requested fields", default=None)),
55
58
  as_of=(Optional[str], Field(description="AsOf UTC date time string", default=None)),
56
- expand=(Optional[str], Field(description="The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.", default=None))
59
+ expand=(Optional[str], Field(description="The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.", default=None)),
60
+ parse_attachments=(Optional[bool], Field(description="Value that defines is attachment should be parsed.", default=False)),
61
+ image_description_prompt=(Optional[str],
62
+ Field(description="Prompt which is used for image description", default=None)),
63
+
57
64
  )
58
65
 
59
66
  ADOLinkWorkItem = create_model(
@@ -284,8 +291,23 @@ class AzureDevOpsApiWrapper(NonCodeIndexerToolkit):
284
291
  logger.error(f"Error searching work items: {e}")
285
292
  return ToolException(f"Error searching work items: {e}")
286
293
 
287
-
288
- def get_work_item(self, id: int, fields: Optional[list[str]] = None, as_of: Optional[str] = None, expand: Optional[str] = None):
294
+ def parse_attachment_by_url(self, attachment_url, file_name=None, image_description_prompt=None):
295
+ match = re.search(r'attachments/([\w-]+)(?:\?fileName=([^&]+))?', attachment_url)
296
+ if match:
297
+ attachment_id = match.group(1)
298
+ if not file_name:
299
+ file_name = match.group(2)
300
+ if not file_name:
301
+ raise ToolException("File name must be provided either in the URL or as a parameter.")
302
+ return self.parse_attachment_by_id(attachment_id, file_name, image_description_prompt)
303
+ raise ToolException(f"Attachment '{attachment_url}' was not found.")
304
+
305
+ def parse_attachment_by_id(self, attachment_id, file_name, image_description_prompt):
306
+ file_content = self.get_attachment_content(attachment_id)
307
+ return parse_file_content(file_content=file_content, file_name=file_name,
308
+ llm=self.llm, prompt=image_description_prompt)
309
+
310
+ def get_work_item(self, id: int, fields: Optional[list[str]] = None, as_of: Optional[str] = None, expand: Optional[str] = None, parse_attachments=False, image_description_prompt=None):
289
311
  """Get a single work item by ID."""
290
312
  try:
291
313
  # Validate that the Azure DevOps client is initialized
@@ -313,6 +335,24 @@ class AzureDevOpsApiWrapper(NonCodeIndexerToolkit):
313
335
  for relation in relations_data:
314
336
  parsed_item['relations'].append(relation.as_dict())
315
337
 
338
+ if parse_attachments:
339
+ # describe images in work item fields if present
340
+ for field_name, field_value in fields_data.items():
341
+ if isinstance(field_value, str):
342
+ soup = BeautifulSoup(field_value, 'html.parser')
343
+ images = soup.find_all('img')
344
+ for img in images:
345
+ src = img.get('src')
346
+ if src:
347
+ description = self.parse_attachment_by_url(src, image_description_prompt)
348
+ img['image-description'] = description
349
+ parsed_item[field_name] = str(soup)
350
+ # parse attached documents if present
351
+ if parsed_item['relations']:
352
+ for attachment in parsed_item['relations']:
353
+ attachment['content'] = self.parse_attachment_by_url(attachment['url'], attachment['attributes']['name'], image_description_prompt)
354
+
355
+
316
356
  return parsed_item
317
357
  except Exception as e:
318
358
  logger.error(f"Error getting work item: {e}")
@@ -522,10 +562,13 @@ class AzureDevOpsApiWrapper(NonCodeIndexerToolkit):
522
562
  'attachment_ids': {rel.url.split('/')[-1]:rel.attributes.get('name', '') for rel in wi.relations or [] if rel.rel == 'AttachedFile'}
523
563
  })
524
564
 
565
+ def get_attachment_content(self, attachment_id):
566
+ content_generator = self._client.get_attachment_content(id=attachment_id, download=True)
567
+ return b"".join(content_generator)
568
+
525
569
  def _process_document(self, document: Document) -> Generator[Document, None, None]:
526
570
  for attachment_id, file_name in document.metadata.get('attachment_ids', {}).items():
527
- content_generator = self._client.get_attachment_content(id=attachment_id, download=True)
528
- content = b"".join(x for x in content_generator)
571
+ content = self.get_attachment_content(attachment_id=attachment_id)
529
572
  yield Document(page_content="", metadata={'id': attachment_id, IndexerKeywords.CONTENT_FILE_NAME.value: file_name, IndexerKeywords.CONTENT_IN_BYTES.value: content})
530
573
 
531
574
  def _index_tool_params(self):
@@ -61,11 +61,11 @@ class ArgsSchema(Enum):
61
61
  ),
62
62
  geometry=(
63
63
  Optional[str],
64
- Field(description="Sets to 'paths' to export vector data"),
64
+ Field(description="Sets to 'paths' to export vector data", default=None),
65
65
  ),
66
66
  version=(
67
67
  Optional[str],
68
- Field(description="Sets version of file"),
68
+ Field(description="Sets version of file", default=None),
69
69
  ),
70
70
  extra_params=(
71
71
  Optional[Dict[str, Union[str, int, None]]],
@@ -120,7 +120,8 @@ class ArgsSchema(Enum):
120
120
  client_meta=(
121
121
  Optional[dict],
122
122
  Field(
123
- description="Positioning information of the comment (Vector, FrameOffset, Region, FrameOffsetRegion)"
123
+ description="Positioning information of the comment (Vector, FrameOffset, Region, FrameOffsetRegion)",
124
+ default=None,
124
125
  ),
125
126
  ),
126
127
  extra_params=(
@@ -147,26 +148,28 @@ class ArgsSchema(Enum):
147
148
  ),
148
149
  ),
149
150
  ids=(
150
- str,
151
+ Optional[str],
151
152
  Field(
152
153
  description="Specifies id of file images separated by comma",
153
154
  examples=["8:6,1:7"],
155
+ default="0:0",
154
156
  ),
155
157
  ),
156
158
  scale=(
157
159
  Optional[str],
158
- Field(description="A number between 0.01 and 4, the image scaling factor"),
160
+ Field(description="A number between 0.01 and 4, the image scaling factor", default=None),
159
161
  ),
160
162
  format=(
161
163
  Optional[str],
162
164
  Field(
163
165
  description="A string enum for the image output format",
164
166
  examples=["jpg", "png", "svg", "pdf"],
167
+ default=None,
165
168
  ),
166
169
  ),
167
170
  version=(
168
171
  Optional[str],
169
- Field(description="A specific version ID to use"),
172
+ Field(description="A specific version ID to use", default=None),
170
173
  ),
171
174
  extra_params=(
172
175
  Optional[Dict[str, Union[str, int, None]]],
@@ -636,7 +639,7 @@ class FigmaApiWrapper(NonCodeIndexerToolkit):
636
639
  def get_file_images(
637
640
  self,
638
641
  file_key: str,
639
- ids: str = "0:0",
642
+ ids: Optional[str] = "0:0",
640
643
  scale: Optional[str] = None,
641
644
  format: Optional[str] = None,
642
645
  version: Optional[str] = None,
@@ -1,4 +1,5 @@
1
1
  import os
2
+ import re
2
3
  import tempfile
3
4
  from logging import getLogger
4
5
  from pathlib import Path
@@ -8,6 +9,7 @@ from langchain_core.documents import Document
8
9
  from langchain_core.tools import ToolException
9
10
 
10
11
  from alita_sdk.runtime.langchain.document_loaders.constants import loaders_map, LoaderProperties
12
+ from ...runtime.langchain.document_loaders.AlitaTextLoader import AlitaTextLoader
11
13
  from ...runtime.utils.utils import IndexerKeywords
12
14
 
13
15
  logger = getLogger(__name__)
@@ -54,7 +56,7 @@ Be as precise and thorough as possible in your responses. If something is unclea
54
56
 
55
57
 
56
58
  def parse_file_content(file_name=None, file_content=None, is_capture_image: bool = False, page_number: int = None,
57
- sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False) -> str | ToolException:
59
+ sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False, prompt=None) -> str | ToolException:
58
60
  """Parse the content of a file based on its type and return the parsed content.
59
61
 
60
62
  Args:
@@ -71,6 +73,8 @@ def parse_file_content(file_name=None, file_content=None, is_capture_image: bool
71
73
  Raises:
72
74
  ToolException: If the file type is not supported or if there is an error reading the file.
73
75
  """
76
+ if not prompt:
77
+ prompt = image_processing_prompt
74
78
  loader = prepare_loader(
75
79
  file_name=file_name,
76
80
  file_content=file_content,
@@ -79,7 +83,8 @@ def parse_file_content(file_name=None, file_content=None, is_capture_image: bool
79
83
  sheet_name=sheet_name,
80
84
  llm=llm,
81
85
  file_path=file_path,
82
- excel_by_sheets=excel_by_sheets
86
+ excel_by_sheets=excel_by_sheets,
87
+ prompt=prompt
83
88
  )
84
89
 
85
90
  if not loader:
@@ -120,7 +125,7 @@ def load_file_docs(file_name=None, file_content=None, is_capture_image: bool = F
120
125
  return loader.load()
121
126
 
122
127
  def get_loader_kwargs(loader_object, file_name=None, file_content=None, is_capture_image: bool = False, page_number: int = None,
123
- sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False):
128
+ sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False, prompt=None):
124
129
  loader_kwargs = loader_object['kwargs']
125
130
  loader_kwargs.update({
126
131
  "file_path": file_path,
@@ -131,13 +136,15 @@ def get_loader_kwargs(loader_object, file_name=None, file_content=None, is_captu
131
136
  "page_number": page_number,
132
137
  "sheet_name": sheet_name,
133
138
  "excel_by_sheets": excel_by_sheets,
139
+ "prompt": prompt,
134
140
  "row_content": True,
135
141
  "json_documents": False
136
142
  })
137
143
  return loader_kwargs
138
144
 
139
145
  def prepare_loader(file_name=None, file_content=None, is_capture_image: bool = False, page_number: int = None,
140
- sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False):
146
+ sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False,
147
+ prompt=None):
141
148
  if (file_path and (file_name or file_content)) or (not file_path and (not file_name or file_content is None)):
142
149
  raise ToolException("Either (file_name and file_content) or file_path must be provided, but not both.")
143
150
 
@@ -146,7 +153,7 @@ def prepare_loader(file_name=None, file_content=None, is_capture_image: bool = F
146
153
  loader_object = loaders_map.get(extension)
147
154
  if not loader_object:
148
155
  return None
149
- loader_kwargs = get_loader_kwargs(loader_object, file_name, file_content, is_capture_image, page_number, sheet_name, llm, file_path, excel_by_sheets)
156
+ loader_kwargs = get_loader_kwargs(loader_object, file_name, file_content, is_capture_image, page_number, sheet_name, llm, file_path, excel_by_sheets, prompt)
150
157
  loader = loader_object['class'](**loader_kwargs)
151
158
  return loader
152
159
 
@@ -226,7 +233,8 @@ def process_content_by_type(content, filename: str, llm=None, chunking_config=No
226
233
  """Process the content of a file based on its type using a configured loader."""
227
234
  temp_file_path = None
228
235
  try:
229
- extension = "." + filename.split('.')[-1].lower()
236
+ match = re.search(r'\.([^.]+)$', filename)
237
+ extension = f".{match.group(1).lower()}" if match else ".txt"
230
238
 
231
239
  with tempfile.NamedTemporaryFile(mode='w+b', suffix=extension, delete=False) as temp_file:
232
240
  temp_file_path = temp_file.name
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.328
3
+ Version: 0.3.330
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -67,7 +67,7 @@ alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py,sha256=SKA
67
67
  alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py,sha256=m_7aq-aCFVb4vXZsJNinfN1hAuyy_S0ylRknv_ahxDc,340
68
68
  alita_sdk/runtime/langchain/document_loaders/AlitaQtestLoader.py,sha256=CUVVnisxm7b5yZWV6rn0Q3MEEaO1GWNcfnz5yWz8T0k,13283
69
69
  alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py,sha256=nI8lyndVZxVAxbjX3yiqyuFQKFE8MjLPyYSyqRWxHqQ,4077
70
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py,sha256=uNcV0En49_0u0RYB1sP1XfNspT2Xc5CacuJr9Jqv79Q,2972
70
+ alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py,sha256=EiCIAF_OxSrbuwgOFk2IpxRMvFbctITt2jAI0g_atpk,3586
71
71
  alita_sdk/runtime/langchain/document_loaders/ImageParser.py,sha256=gao5yCCKdDai_Gx7YdEx5U6oMyJYzn69eYmEvWLh-fc,656
72
72
  alita_sdk/runtime/langchain/document_loaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
73
73
  alita_sdk/runtime/langchain/document_loaders/constants.py,sha256=H5oKHDHZw1L0x9sGiYGLmkGM6dH460bvuj-EycmpR6E,7235
@@ -145,7 +145,7 @@ alita_sdk/tools/ado/test_plan/test_plan_wrapper.py,sha256=tdacv-myDDiMiEaWTpBuWd
145
145
  alita_sdk/tools/ado/wiki/__init__.py,sha256=ela6FOuT1fqN3FvHGBflzAh16HS1SSPsJYS2SldRX7A,5272
146
146
  alita_sdk/tools/ado/wiki/ado_wrapper.py,sha256=dGzhJO2PlrIL74sMRQg9iNNnNAopngCT_rsgLxvKYUY,14958
147
147
  alita_sdk/tools/ado/work_item/__init__.py,sha256=jml_zSkdC7gdGIoX2ZqRgDb45nhT3ZWzNsZ0II0iVJI,5474
148
- alita_sdk/tools/ado/work_item/ado_wrapper.py,sha256=TXl3V46SgGafQaxQKSTD3AN4MoQ3yNuQBwgVZ6-JhSk,28315
148
+ alita_sdk/tools/ado/work_item/ado_wrapper.py,sha256=LTZl9yiqjsoKdy-6zD4as3NCZg1NY1Ogp9LQbiV-IZw,30851
149
149
  alita_sdk/tools/advanced_jira_mining/__init__.py,sha256=GdrFVsyG8h43BnQwBKUtZ_ca_0atP1rQ_0adkd9mssc,4703
150
150
  alita_sdk/tools/advanced_jira_mining/data_mining_wrapper.py,sha256=nZPtuwVWp8VeHw1B8q9kdwf-6ZvHnlXTOGdcIMDkKpw,44211
151
151
  alita_sdk/tools/aws/__init__.py,sha256=tB6GCOg4XGSpR6qgbgAF4MUQ5-YmQCbWurWgrVKEKQ8,181
@@ -235,7 +235,7 @@ alita_sdk/tools/custom_open_api/api_wrapper.py,sha256=sDSFpvEqpSvXHGiBISdQQcUecf
235
235
  alita_sdk/tools/elastic/__init__.py,sha256=iwnSRppRpzvJ1da2K3Glu8Uu41MhBDCYbguboLkEbW0,2818
236
236
  alita_sdk/tools/elastic/api_wrapper.py,sha256=pl8CqQxteJAGwyOhMcld-ZgtOTFwwbv42OITQVe8rM0,1948
237
237
  alita_sdk/tools/figma/__init__.py,sha256=W6vIMMkZI2Lmpg6_CRRV3oadaIbVI-qTLmKUh6enqWs,4509
238
- alita_sdk/tools/figma/api_wrapper.py,sha256=-vsIy0Y4UFJBjKumRAOqmgS7pEXVn0UjaNSNN7pFsLs,29351
238
+ alita_sdk/tools/figma/api_wrapper.py,sha256=uWW3xCcTD18Fsv5Xf3BNbDVCiA7CEhRXHm6nn9PJCBs,29519
239
239
  alita_sdk/tools/github/__init__.py,sha256=2rHu0zZyZGnLC5CkHgDIhe14N9yCyaEfrrt7ydH8478,5191
240
240
  alita_sdk/tools/github/api_wrapper.py,sha256=uDwYckdnpYRJtb0uZnDkaz2udvdDLVxuCh1tSwspsiU,8411
241
241
  alita_sdk/tools/github/github_client.py,sha256=nxnSXsDul2PPbWvYZS8TmAFFmR-5ALyakNoV5LN2D4U,86617
@@ -327,7 +327,7 @@ alita_sdk/tools/testrail/__init__.py,sha256=Xg4nVjULL_D8JpIXLYXppnwUfGF4-lguFwKH
327
327
  alita_sdk/tools/testrail/api_wrapper.py,sha256=PKhtf04C6PFDexGCAJm-hjA9Gpu4crx6EXKT5K-b_Pk,32985
328
328
  alita_sdk/tools/utils/__init__.py,sha256=W9rCCUPtHCP5nGAbWp0n5jaNA84572aiRoqKneBnaS4,3330
329
329
  alita_sdk/tools/utils/available_tools_decorator.py,sha256=IbrdfeQkswxUFgvvN7-dyLMZMyXLiwvX7kgi3phciCk,273
330
- alita_sdk/tools/utils/content_parser.py,sha256=7k5Ddv3Nzp3UoocgslwwSXi1G9ZR7sXzj6593IDeOcM,14063
330
+ alita_sdk/tools/utils/content_parser.py,sha256=ZgjzopMY3OyEcypa8NEwB4263HZ08V0fKQZRJXPmlj4,14393
331
331
  alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256=ypBEAkFRGHv5edW0N9rdo1yKurNGQ4pRVEWtrN_7SeA,17656
332
332
  alita_sdk/tools/vector_adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
333
333
  alita_sdk/tools/xray/__init__.py,sha256=eOMWP8VamFbbJgt1xrGpGPqB9ByOTA0Cd3LCaETzGk4,4376
@@ -349,8 +349,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=kT0TbmMvuKhDUZc0i7KO18O38JM9S
349
349
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=0ne8XLJEQSLOWfzd2HdnqOYmQlUliKHbBED5kW_Vias,2895
350
350
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
351
351
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
352
- alita_sdk-0.3.328.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
353
- alita_sdk-0.3.328.dist-info/METADATA,sha256=jRVOPK8heveuCV9P6VcjO4DE5h6kMdu2TUXKU6VOH7Q,18835
354
- alita_sdk-0.3.328.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
355
- alita_sdk-0.3.328.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
356
- alita_sdk-0.3.328.dist-info/RECORD,,
352
+ alita_sdk-0.3.330.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
353
+ alita_sdk-0.3.330.dist-info/METADATA,sha256=tTJb4beJGhlKTftR_US7mWBMNQ4PaJbyxkp2bJ2AxOg,18835
354
+ alita_sdk-0.3.330.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
355
+ alita_sdk-0.3.330.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
356
+ alita_sdk-0.3.330.dist-info/RECORD,,