alita-sdk 0.3.253__py3-none-any.whl → 0.3.254__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -602,16 +602,22 @@ class AlitaClient:
602
602
  import logging
603
603
  logger = logging.getLogger(__name__)
604
604
  toolkit_config_parsed_json = None
605
+ events_dispatched = []
606
+
605
607
  try:
606
608
  toolkit_config_type = toolkit_config.get('type')
607
- toolkit_class = get_available_toolkit_models().get(toolkit_config_type)['toolkit_class']
608
- toolkit_config_model_class = toolkit_class.toolkit_config_schema()
609
- toolkit_config_validated_settings = toolkit_config_model_class(
610
- **toolkit_config.get('settings', {})
611
- ).model_dump(mode='json')
612
-
609
+ available_toolkit_models = get_available_toolkit_models().get(toolkit_config_type)
613
610
  toolkit_config_parsed_json = deepcopy(toolkit_config)
614
- toolkit_config_parsed_json['settings'] = toolkit_config_validated_settings
611
+ if available_toolkit_models:
612
+ toolkit_class = available_toolkit_models['toolkit_class']
613
+ toolkit_config_model_class = toolkit_class.toolkit_config_schema()
614
+ toolkit_config_validated_settings = toolkit_config_model_class(
615
+ **toolkit_config.get('settings', {})
616
+ ).model_dump(mode='json')
617
+ toolkit_config_parsed_json['settings'] = toolkit_config_validated_settings
618
+ else:
619
+ logger.warning(f"Toolkit type '{toolkit_config_type}' is skipping model validation")
620
+ toolkit_config_parsed_json['settings'] = None
615
621
  except Exception as toolkit_config_error:
616
622
  logger.error(f"Failed to validate toolkit configuration: {str(toolkit_config_error)}")
617
623
  return {
@@ -635,7 +641,6 @@ class AlitaClient:
635
641
  # Create RunnableConfig for callback support
636
642
  config = None
637
643
  callbacks = []
638
- events_dispatched = []
639
644
 
640
645
  if runtime_config:
641
646
  callbacks = runtime_config.get('callbacks', [])
@@ -0,0 +1,73 @@
1
+ import json
2
+ from typing import Iterator
3
+
4
+ from langchain_core.documents import Document
5
+
6
+ from langchain_community.document_loaders.base import BaseLoader
7
+ from langchain_community.document_loaders.helpers import detect_file_encodings
8
+ from langchain_core.tools import ToolException
9
+ from langchain_text_splitters import RecursiveJsonSplitter
10
+
11
+
12
+ class AlitaJSONLoader(BaseLoader):
13
+
14
+ def __init__(self, **kwargs):
15
+ """Initialize with file path."""
16
+ if kwargs.get('file_path'):
17
+ self.file_path = kwargs['file_path']
18
+ elif kwargs.get('file_content'):
19
+ self.file_content = kwargs['file_content']
20
+ self.file_name = kwargs['file_name']
21
+ else:
22
+ raise ToolException("'file_path' or 'file_content' parameter should be provided.")
23
+ self.encoding = kwargs.get('encoding', 'utf-8')
24
+ self.autodetect_encoding = kwargs.get('autodetect_encoding', False)
25
+ self.max_tokens = kwargs.get('max_tokens', 512)
26
+
27
+ def get_content(self):
28
+ try:
29
+ if hasattr(self, 'file_path') and self.file_path:
30
+ with open(self.file_path, encoding=self.encoding) as f:
31
+ return json.load(f)
32
+ elif hasattr(self, 'file_content') and self.file_content:
33
+ return json.load(self.file_content)
34
+ else:
35
+ raise ValueError("Neither file_path nor file_content is provided.")
36
+
37
+ except UnicodeDecodeError as e:
38
+ if self.autodetect_encoding:
39
+ if hasattr(self, 'file_path') and self.file_path:
40
+ detected_encodings = detect_file_encodings(self.file_path)
41
+ for encoding in detected_encodings:
42
+ try:
43
+ with open(self.file_path, encoding=encoding.encoding) as f:
44
+ return f.read()
45
+ break
46
+ except UnicodeDecodeError:
47
+ continue
48
+ elif hasattr(self, 'file_content') and self.file_content:
49
+ detected_encodings = detect_file_encodings(self.file_content)
50
+ for encoding in detected_encodings:
51
+ try:
52
+ return self.file_content.decode(encoding.encoding)
53
+ except UnicodeDecodeError:
54
+ continue
55
+ else:
56
+ raise ValueError("Neither file_path nor file_content is provided for encoding detection.")
57
+ else:
58
+ raise RuntimeError(f"Error loading content with encoding {self.encoding}.") from e
59
+ except Exception as e:
60
+ raise RuntimeError(f"Error loading content.") from e
61
+
62
+ def lazy_load(self) -> Iterator[Document]:
63
+ """Load from file path."""
64
+ content_json = self.get_content()
65
+
66
+ if isinstance(content_json, list):
67
+ data_dict = {str(i): item for i, item in enumerate(content_json)}
68
+ else:
69
+ data_dict = content_json
70
+ chunks = RecursiveJsonSplitter(max_chunk_size=self.max_tokens).split_json(json_data=data_dict)
71
+ for chunk in chunks:
72
+ metadata = {"source": str(self.file_path) if hasattr(self, 'file_path') else self.file_name}
73
+ yield Document(page_content=json.dumps(chunk), metadata=metadata)
@@ -13,17 +13,18 @@
13
13
  # limitations under the License.
14
14
 
15
15
  from langchain_community.document_loaders import (
16
- UnstructuredMarkdownLoader,
17
- AirbyteJSONLoader, UnstructuredHTMLLoader,
18
- UnstructuredPowerPointLoader, PythonLoader)
16
+ UnstructuredMarkdownLoader,
17
+ AirbyteJSONLoader, UnstructuredHTMLLoader,
18
+ PythonLoader)
19
19
 
20
20
  from .AlitaCSVLoader import AlitaCSVLoader
21
21
  from .AlitaDocxMammothLoader import AlitaDocxMammothLoader
22
22
  from .AlitaExcelLoader import AlitaExcelLoader
23
23
  from .AlitaImageLoader import AlitaImageLoader
24
+ from .AlitaJSONLoader import AlitaJSONLoader
24
25
  from .AlitaPDFLoader import AlitaPDFLoader
25
- from .AlitaTextLoader import AlitaTextLoader
26
26
  from .AlitaPowerPointLoader import AlitaPowerPointLoader
27
+ from .AlitaTextLoader import AlitaTextLoader
27
28
 
28
29
  loaders_map = {
29
30
  '.png': {
@@ -130,11 +131,9 @@ loaders_map = {
130
131
  'kwargs': {}
131
132
  },
132
133
  '.json': {
133
- 'class': AlitaTextLoader,
134
+ 'class': AlitaJSONLoader,
134
135
  'is_multimodal_processing': False,
135
- 'kwargs': {
136
- 'autodetect_encoding': True
137
- }
136
+ 'kwargs': {}
138
137
  },
139
138
  '.jsonl': {
140
139
  'class': AirbyteJSONLoader,
@@ -154,12 +153,16 @@ loaders_map = {
154
153
  '.ppt': {
155
154
  'class': AlitaPowerPointLoader,
156
155
  'is_multimodal_processing': False,
157
- 'kwargs': {}
156
+ 'kwargs': {
157
+ 'mode': 'paged'
158
+ }
158
159
  },
159
160
  '.pptx': {
160
161
  'class': AlitaPowerPointLoader,
161
162
  'is_multimodal_processing': False,
162
- 'kwargs': {}
163
+ 'kwargs': {
164
+ 'mode': 'paged'
165
+ }
163
166
  },
164
167
  '.py': {
165
168
  'class': PythonLoader,
@@ -1,9 +1,9 @@
1
1
  import uuid
2
2
  from logging import getLogger
3
- from typing import Any, Type, Literal, Optional
3
+ from typing import Any, Type, Literal, Optional, Union, List
4
4
 
5
5
  from langchain_core.tools import BaseTool
6
- from pydantic import BaseModel, Field, create_model
6
+ from pydantic import BaseModel, Field, create_model, EmailStr, constr
7
7
 
8
8
  logger = getLogger(__name__)
9
9
 
@@ -19,45 +19,73 @@ class McpServerTool(BaseTool):
19
19
 
20
20
 
21
21
  @staticmethod
22
- def create_pydantic_model_from_schema(schema: dict):
23
- fields = {}
24
- for field_name, field_info in schema['properties'].items():
25
- field_type = field_info['type']
26
- field_description = field_info.get('description', '')
27
- if field_type == 'string':
28
- if 'enum' in field_info:
29
- field_type = Literal[tuple(field_info['enum'])]
30
- else:
31
- field_type = str
32
- elif field_type == 'integer':
33
- field_type = int
34
- elif field_type == 'number':
35
- field_type = float
36
- elif field_type == 'boolean':
37
- field_type = bool
38
- elif field_type == 'object':#Dict[str, Any]
39
- nested_model = McpServerTool.create_pydantic_model_from_schema(field_info)
40
- field_type = nested_model
41
- elif field_type == 'array':
42
- item_schema = field_info['items']
43
- item_type = McpServerTool.create_pydantic_model_from_schema(item_schema) if item_schema['type'] == 'object' else (
44
- str if item_schema['type'] == 'string' else
45
- int if item_schema['type'] == 'integer' else
46
- float if item_schema['type'] == 'number' else
47
- bool if item_schema['type'] == 'boolean' else
48
- None
49
- )
50
- if item_type is None:
51
- raise ValueError(f"Unsupported array item type: {item_schema['type']}")
52
- field_type = list[item_type]
53
- else:
54
- raise ValueError(f"Unsupported field type: {field_type}")
22
+ def create_pydantic_model_from_schema(schema: dict, model_name: str = "ArgsSchema"):
23
+ def parse_type(field: dict, name: str = "Field") -> Any:
24
+ if "allOf" in field:
25
+ merged = {}
26
+ required = set()
27
+ for idx, subschema in enumerate(field["allOf"]):
28
+ sub_type = parse_type(subschema, f"{name}AllOf{idx}")
29
+ if hasattr(sub_type, "__fields__"):
30
+ merged.update({k: (v.outer_type_, v.default) for k, v in sub_type.__fields__.items()})
31
+ required.update({k for k, v in sub_type.__fields__.items() if v.required})
32
+ if merged:
33
+ return create_model(f"{name}AllOf", **merged)
34
+ return Any
35
+ if "anyOf" in field or "oneOf" in field:
36
+ key = "anyOf" if "anyOf" in field else "oneOf"
37
+ types = [parse_type(sub, f"{name}{key.capitalize()}{i}") for i, sub in enumerate(field[key])]
38
+ # Check for null type
39
+ if any(sub.get("type") == "null" for sub in field[key]):
40
+ non_null_types = [parse_type(sub, f"{name}{key.capitalize()}{i}")
41
+ for i, sub in enumerate(field[key]) if sub.get("type") != "null"]
42
+ if len(non_null_types) == 1:
43
+ return Optional[non_null_types[0]]
44
+ return Union[tuple(types)]
45
+ t = field.get("type")
46
+ if isinstance(t, list):
47
+ if "null" in t:
48
+ non_null = [x for x in t if x != "null"]
49
+ if len(non_null) == 1:
50
+ field = dict(field)
51
+ field["type"] = non_null[0]
52
+ return Optional[parse_type(field, name)]
53
+ return Any
54
+ return Any
55
+ if t == "string":
56
+ if "enum" in field:
57
+ return Literal[tuple(field["enum"])]
58
+ if field.get("format") == "email":
59
+ return EmailStr
60
+ if "pattern" in field:
61
+ return constr(regex=field["pattern"])
62
+ return str
63
+ if t == "integer":
64
+ return int
65
+ if t == "number":
66
+ return float
67
+ if t == "boolean":
68
+ return bool
69
+ if t == "object":
70
+ return McpServerTool.create_pydantic_model_from_schema(field, name.capitalize())
71
+ if t == "array":
72
+ items = field.get("items", {})
73
+ return List[parse_type(items, name + "Item")]
74
+ return Any
55
75
 
56
- if field_name in schema.get('required', []):
57
- fields[field_name] = (field_type, Field(..., description=field_description))
58
- else:
59
- fields[field_name] = (Optional[field_type], Field(None, description=field_description))
60
- return create_model('DynamicModel', **fields)
76
+ properties = schema.get("properties", {})
77
+ required = set(schema.get("required", []))
78
+ fields = {}
79
+ for name, prop in properties.items():
80
+ typ = parse_type(prop, name.capitalize())
81
+ default = prop.get("default", ... if name in required else None)
82
+ field_args = {}
83
+ if "description" in prop:
84
+ field_args["description"] = prop["description"]
85
+ if "format" in prop:
86
+ field_args["format"] = prop["format"]
87
+ fields[name] = (typ, Field(default, **field_args))
88
+ return create_model(model_name, **fields)
61
89
 
62
90
  def _run(self, *args, **kwargs):
63
91
  call_data = {
@@ -1,18 +1,16 @@
1
1
  import json
2
2
  import math
3
- import types
4
- from typing import Any, Optional, List, Dict, Callable, Generator
3
+ from logging import getLogger
4
+ from typing import Any, Optional, List, Dict, Generator
5
5
 
6
6
  from langchain_core.documents import Document
7
- from pydantic import BaseModel, model_validator, Field
8
- from ..langchain.tools.vector import VectorAdapter
9
7
  from langchain_core.messages import HumanMessage
8
+ from pydantic import BaseModel, model_validator, Field
9
+
10
10
  from alita_sdk.tools.elitea_base import BaseToolApiWrapper
11
11
  from alita_sdk.tools.vector_adapters.VectorStoreAdapter import VectorStoreAdapterFactory
12
- from logging import getLogger
13
-
12
+ from ..langchain.tools.vector import VectorAdapter
14
13
  from ..utils.logging import dispatch_custom_event
15
- from ..utils.utils import IndexerKeywords
16
14
 
17
15
  logger = getLogger(__name__)
18
16
 
@@ -212,10 +210,6 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
212
210
  tool_name="_clean_collection"
213
211
  )
214
212
 
215
- def _add_to_collection(self, entry_id, new_collection_value):
216
- """Add a new collection name to the `collection` key in the `metadata` column."""
217
- self.vector_adapter.add_to_collection(self, entry_id, new_collection_value)
218
-
219
213
  def index_documents(self, documents: Generator[Document, None, None], collection_suffix: str, progress_step: int = 20, clean_index: bool = True):
220
214
  """ Index documents in the vectorstore.
221
215
 
@@ -6,7 +6,6 @@ from langchain_core.documents import Document
6
6
  from pydantic import create_model, Field, SecretStr
7
7
 
8
8
  # from alita_sdk.runtime.langchain.interfaces.llm_processor import get_embeddings
9
- from .chunkers import markdown_chunker
10
9
  from .utils.content_parser import process_content_by_type
11
10
  from .vector_adapters.VectorStoreAdapter import VectorStoreAdapterFactory
12
11
  from ..runtime.tools.vectorstore_base import VectorStoreWrapperBase
@@ -141,7 +140,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
141
140
  def _base_loader(self, **kwargs) -> Generator[Document, None, None]:
142
141
  """ Loads documents from a source, processes them,
143
142
  and returns a list of Document objects with base metadata: id and created_on."""
144
- pass
143
+ yield from ()
145
144
 
146
145
  def _process_document(self, base_document: Document) -> Generator[Document, None, None]:
147
146
  """ Process an existing base document to extract relevant metadata for full document preparation.
@@ -153,7 +152,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
153
152
 
154
153
  Returns:
155
154
  Document: The processed document with metadata."""
156
- pass
155
+ yield from ()
157
156
 
158
157
  def index_data(self, **kwargs):
159
158
  collection_suffix = kwargs.get("collection_suffix")
@@ -174,18 +173,20 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
174
173
  return self._save_index(list(documents), collection_suffix=collection_suffix, progress_step=progress_step)
175
174
 
176
175
  def _apply_loaders_chunkers(self, documents: Generator[Document, None, None], chunking_tool: str=None, chunking_config=None) -> Generator[Document, None, None]:
177
- from alita_sdk.tools.chunkers import __confluence_chunkers__ as chunkers, __confluence_models__ as models
176
+ from alita_sdk.tools.chunkers import __confluence_chunkers__ as chunkers
178
177
 
179
178
  if chunking_config is None:
180
179
  chunking_config = {}
181
180
  chunking_config['embedding'] = self._embedding
182
181
  chunking_config['llm'] = self.llm
183
-
182
+
184
183
  for document in documents:
185
184
  if content_type := document.metadata.get('loader_content_type', None):
186
185
  # apply parsing based on content type and chunk if chunker was applied to parent doc
186
+ content = document.metadata.pop('loader_content', None)
187
187
  yield from process_content_by_type(
188
188
  document=document,
189
+ content=content,
189
190
  extension_source=content_type, llm=self.llm, chunking_config=chunking_config)
190
191
  elif chunking_tool:
191
192
  # apply default chunker from toolkit config. No parsing.
@@ -205,9 +206,6 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
205
206
  for dep in dependencies:
206
207
  dep.metadata[IndexerKeywords.PARENT.value] = document.metadata.get('id', None)
207
208
  yield dep
208
-
209
- def _content_loader(self):
210
- pass
211
209
 
212
210
  def _reduce_duplicates(
213
211
  self,
@@ -255,36 +253,6 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
255
253
  def remove_ids_fn(self, idx_data, key: str):
256
254
  raise NotImplementedError("Subclasses must implement this method")
257
255
 
258
- def _process_documents(self, documents: List[Document]) -> Generator[Document, None, None]:
259
- """
260
- Process a list of base documents to extract relevant metadata for full document preparation.
261
- Used for late processing of documents after we ensure that the documents have to be indexed to avoid
262
- time-consuming operations for documents which might be useless.
263
- This function passed to index_documents method of vector store and called after _reduce_duplicates method.
264
-
265
- Args:
266
- documents (List[Document]): The base documents to process.
267
-
268
- Returns:
269
- Generator[Document, None, None]: A generator yielding processed documents with metadata.
270
- """
271
- for doc in documents:
272
- # Filter documents to process only those that either:
273
- # - do not have a 'chunk_id' in their metadata, or
274
- # - have 'chunk_id' explicitly set to 1.
275
- # This prevents processing of irrelevant or duplicate chunks, improving efficiency.
276
- chunk_id = doc.metadata.get("chunk_id")
277
- if chunk_id is None or chunk_id == 1:
278
- processed_docs = self._process_document(doc)
279
- if processed_docs: # Only proceed if the list is not empty
280
- for processed_doc in processed_docs:
281
- # map processed document (child) to the original document (parent)
282
- processed_doc.metadata[IndexerKeywords.PARENT.value] = doc.metadata.get('id', None)
283
- if chunker:=self._get_dependencies_chunker(processed_doc):
284
- yield from chunker(file_content_generator=iter([processed_doc]), config=self._get_dependencies_chunker_config())
285
- else:
286
- yield processed_doc
287
-
288
256
  def remove_index(self, collection_suffix: str = ""):
289
257
  """Cleans the indexed data in the collection."""
290
258
  super()._clean_collection(collection_suffix=collection_suffix)
@@ -20,6 +20,7 @@ def get_tools(tool):
20
20
  confluence_configuration=tool['settings']['confluence_configuration'],
21
21
  limit=tool['settings'].get('limit', 5),
22
22
  labels=parse_list(tool['settings'].get('labels', None)),
23
+ custom_headers=tool['settings'].get('custom_headers', {}),
23
24
  additional_fields=tool['settings'].get('additional_fields', []),
24
25
  verify_ssl=tool['settings'].get('verify_ssl', True),
25
26
  alita=tool['settings'].get('alita'),
@@ -78,6 +79,8 @@ class ConfluenceToolkit(BaseToolkit):
78
79
  number_of_retries=(int, Field(description="Number of retries", default=2)),
79
80
  min_retry_seconds=(int, Field(description="Min retry, sec", default=10)),
80
81
  max_retry_seconds=(int, Field(description="Max retry, sec", default=60)),
82
+ # optional field for custom headers as dictionary
83
+ custom_headers=(Optional[dict], Field(description="Custom headers for API requests", default=None)),
81
84
  confluence_configuration=(Optional[ConfluenceConfiguration], Field(description="Confluence Configuration", json_schema_extra={'configuration_types': ['confluence']})),
82
85
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(default = None,
83
86
  description="PgVector Configuration",
@@ -223,16 +223,21 @@ class ConfluenceAPIWrapper(BaseVectorStoreToolApiWrapper):
223
223
  username = values.get('username')
224
224
  token = values.get('token')
225
225
  cloud = values.get('cloud')
226
- # if values.get('collection_name'):
227
- # values['collection_name'] = shortuuid.encode(values['collection_name'])
228
226
  if token and is_cookie_token(token):
229
227
  session = requests.Session()
230
228
  session.cookies.update(parse_cookie_string(token))
231
- values['client'] = Confluence(url=url, session=session, cloud=cloud)
229
+ client_instance = Confluence(url=url, session=session, cloud=cloud)
232
230
  elif token:
233
- values['client'] = Confluence(url=url, token=token, cloud=cloud)
231
+ client_instance = Confluence(url=url, token=token, cloud=cloud)
234
232
  else:
235
- values['client'] = Confluence(url=url, username=username, password=api_key, cloud=cloud)
233
+ client_instance = Confluence(url=url, username=username, password=api_key, cloud=cloud)
234
+
235
+ custom_headers = values.get('custom_headers', {})
236
+ logger.info(f"Jira tool: custom headers length: {len(custom_headers)}")
237
+ for header, value in custom_headers.items():
238
+ client_instance._update_header(header, value)
239
+
240
+ values['client'] = client_instance
236
241
  return values
237
242
 
238
243
  def __unquote_confluence_space(self) -> str | None:
@@ -124,6 +124,26 @@ class BaseToolApiWrapper(BaseModel):
124
124
  def get_available_tools(self):
125
125
  raise NotImplementedError("Subclasses should implement this method")
126
126
 
127
+ def _log_tool_event(self, message: str, tool_name: str = None):
128
+ """Log data and dispatch custom event for the tool"""
129
+
130
+ try:
131
+ from langchain_core.callbacks import dispatch_custom_event
132
+
133
+ if tool_name is None:
134
+ tool_name = 'tool_progress'
135
+ dispatch_custom_event(
136
+ name="tool_execution_step",
137
+ data={
138
+ "message": message,
139
+ "tool_name": tool_name,
140
+ "toolkit": self.__class__.__name__,
141
+ },
142
+ )
143
+ except Exception as e:
144
+ logger.warning(f"Failed to dispatch progress event: {str(e)}")
145
+
146
+
127
147
  def run(self, mode: str, *args: Any, **kwargs: Any):
128
148
  if TOOLKIT_SPLITTER in mode:
129
149
  mode = mode.rsplit(TOOLKIT_SPLITTER, maxsplit=1)[1]
@@ -1,6 +1,5 @@
1
- import json
2
1
  import logging
3
- from typing import Optional, List, Generator, Any
2
+ from typing import Optional, Generator
4
3
 
5
4
  from langchain_core.documents import Document
6
5
  from langchain_core.tools import ToolException
@@ -8,7 +7,7 @@ from office365.runtime.auth.client_credential import ClientCredential
8
7
  from office365.sharepoint.client_context import ClientContext
9
8
  from pydantic import Field, PrivateAttr, create_model, model_validator, SecretStr
10
9
 
11
- from ..elitea_base import BaseVectorStoreToolApiWrapper, extend_with_vector_tools
10
+ from ..non_code_indexer_toolkit import NonCodeIndexerToolkit
12
11
  from ..utils.content_parser import parse_file_content
13
12
 
14
13
  NoInput = create_model(
@@ -38,7 +37,7 @@ ReadDocument = create_model(
38
37
  )
39
38
 
40
39
 
41
- class SharepointApiWrapper(BaseVectorStoreToolApiWrapper):
40
+ class SharepointApiWrapper(NonCodeIndexerToolkit):
42
41
  site_url: str
43
42
  client_id: str = None
44
43
  client_secret: SecretStr = None
@@ -77,9 +76,8 @@ class SharepointApiWrapper(BaseVectorStoreToolApiWrapper):
77
76
  raise ToolException("You have to define token or client id&secret.")
78
77
  logging.info("Successfully authenticated to SharePoint.")
79
78
  except Exception as e:
80
- logging.error(f"Failed to authenticate with SharePoint: {str(e)}")
81
- return values
82
-
79
+ logging.error(f"Failed to authenticate with SharePoint: {str(e)}")
80
+ return super().validate_toolkit(values)
83
81
 
84
82
  def read_list(self, list_title, limit: int = 1000):
85
83
  """ Reads a specified List in sharepoint site. Number of list items is limited by limit (default is 1000). """
@@ -161,25 +159,26 @@ class SharepointApiWrapper(BaseVectorStoreToolApiWrapper):
161
159
  }
162
160
  yield Document(page_content="", metadata=metadata)
163
161
 
164
- def _process_document(self, document: Document) -> Generator[Document, None, None]:
165
- doc_content = ""
166
- try:
167
- doc_content = self.read_file(document.metadata['Path'],
168
- is_capture_image=True,
169
- excel_by_sheets=True)
170
- except Exception as e:
171
- logging.error(f"Failed while parsing the file '{document.metadata['Path']}': {e}")
172
- if isinstance(doc_content, dict):
173
- for page, content in doc_content.items():
174
- new_metadata = document.metadata
175
- new_metadata['page'] = page
176
- yield Document(page_content=str(content), metadata=new_metadata)
177
- else:
178
- document.page_content = str(doc_content)
179
-
180
- @extend_with_vector_tools
162
+ def _extend_data(self, documents: Generator[Document, None, None]):
163
+ for document in documents:
164
+ try:
165
+ document.metadata['loader_content'] = self._load_file_content_in_bytes(document.metadata['Path'])
166
+ document.metadata['loader_content_type'] = document.metadata['Name']
167
+ yield document
168
+ except Exception as e:
169
+ logging.error(f"Failed while parsing the file '{document.metadata['Path']}': {e}")
170
+ yield document
171
+
172
+ def _load_file_content_in_bytes(self, path):
173
+ file = self._client.web.get_file_by_server_relative_path(path)
174
+ self._client.load(file).execute_query()
175
+ file_content = file.read()
176
+ self._client.execute_query()
177
+ #
178
+ return file_content
179
+
181
180
  def get_available_tools(self):
182
- return [
181
+ return super().get_available_tools() + [
183
182
  {
184
183
  "name": "read_list",
185
184
  "description": self.read_list.__doc__,
@@ -466,11 +466,11 @@ class TestrailAPIWrapper(BaseVectorStoreToolApiWrapper):
466
466
  return ToolException(
467
467
  "json_case_arguments must be a JSON string or dictionary."
468
468
  )
469
-
469
+ self._log_tool_event(message=f"Extract test cases per filter {params}", tool_name='get_cases_by_filter')
470
470
  extracted_cases = self._client.cases.get_cases(
471
471
  project_id=project_id, **params
472
472
  )
473
-
473
+ self._log_tool_event(message=f"Test cases were extracted", tool_name='get_cases_by_filter')
474
474
  # support old versions of testrail_api
475
475
  cases = extracted_cases.get("cases") if isinstance(extracted_cases, dict) else extracted_cases
476
476
 
@@ -170,14 +170,17 @@ def load_content_from_bytes(file_content: bytes, extension: str = None, loader_e
170
170
  if temp_file_path and os.path.exists(temp_file_path):
171
171
  os.remove(temp_file_path)
172
172
 
173
- def process_content_by_type(document: Document, extension_source: str, llm = None, chunking_config={}) -> Generator[Document, None, None]:
173
+ def process_content_by_type(document: Document, content, extension_source: str, llm = None, chunking_config={}) -> Generator[Document, None, None]:
174
174
  temp_file_path = None
175
175
  try:
176
176
  extension = "." + extension_source.split('.')[-1].lower()
177
177
 
178
178
  with tempfile.NamedTemporaryFile(mode='w+b', suffix=extension, delete=False) as temp_file:
179
179
  temp_file_path = temp_file.name
180
- content = document.metadata.pop('loader_content')
180
+ if content is None:
181
+ logger.warning("'loader_content' ie expected but not found in document metadata.")
182
+ return
183
+
181
184
  temp_file.write(content)
182
185
  temp_file.flush()
183
186
 
@@ -207,7 +210,7 @@ def process_content_by_type(document: Document, extension_source: str, llm = Non
207
210
  docmeta['chunk_id'] = chunk_id
208
211
  docmeta['chunk_type'] = "document"
209
212
  yield Document(
210
- page_content=subchunk,
213
+ page_content=sanitize_for_postgres(subchunk),
211
214
  metadata=docmeta
212
215
  )
213
216
  else:
@@ -218,9 +221,30 @@ def process_content_by_type(document: Document, extension_source: str, llm = Non
218
221
  docmeta['chunk_id'] = chunk_id
219
222
  docmeta['chunk_type'] = "document"
220
223
  yield Document(
221
- page_content=chunk.page_content,
224
+ page_content=sanitize_for_postgres(chunk.page_content),
222
225
  metadata=docmeta
223
226
  )
224
227
  finally:
225
228
  if temp_file_path and os.path.exists(temp_file_path):
226
- os.remove(temp_file_path)
229
+ os.remove(temp_file_path)
230
+
231
+ # FIXME copied from langchain_core/utils/strings.py of 0.3.74 version
232
+ # https://github.com/langchain-ai/langchain/pull/32157
233
+ # should be used from langchain_core.utils import sanitize_for_postgres once updated to newer version
234
+ def sanitize_for_postgres(text: str, replacement: str = "") -> str:
235
+ r"""Sanitize text by removing NUL bytes that are incompatible with PostgreSQL.
236
+ PostgreSQL text fields cannot contain NUL (0x00) bytes, which can cause
237
+ psycopg.DataError when inserting documents. This function removes or replaces
238
+ such characters to ensure compatibility.
239
+ Args:
240
+ text: The text to sanitize.
241
+ replacement: String to replace NUL bytes with. Defaults to empty string.
242
+ Returns:
243
+ str: The sanitized text with NUL bytes removed or replaced.
244
+ Example:
245
+ >>> sanitize_for_postgres("Hello\\x00world")
246
+ 'Helloworld'
247
+ >>> sanitize_for_postgres("Hello\\x00world", " ")
248
+ 'Hello world'
249
+ """
250
+ return text.replace("\x00", replacement)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.253
3
+ Version: 0.3.254
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedjik@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -26,7 +26,7 @@ alita_sdk/configurations/zephyr_enterprise.py,sha256=5W1QEcv62Y5Rk_kApI2QmOwvWZe
26
26
  alita_sdk/runtime/__init__.py,sha256=4W0UF-nl3QF2bvET5lnah4o24CoTwSoKXhuN0YnwvEE,828
27
27
  alita_sdk/runtime/clients/__init__.py,sha256=BdehU5GBztN1Qi1Wul0cqlU46FxUfMnI6Vq2Zd_oq1M,296
28
28
  alita_sdk/runtime/clients/artifact.py,sha256=H3pJAh5G-zWVyJ6YbqHGk4jA8U6HfacQduiTivpJZ3Y,3210
29
- alita_sdk/runtime/clients/client.py,sha256=HO5mSrrque9HaHdBmQVR639leBNTI1TFA0HYmXdfqLA,43187
29
+ alita_sdk/runtime/clients/client.py,sha256=irj2uTGdIQj8Wd1ZGdi5yDCFm_n9TiRhEhODJz4yI84,43493
30
30
  alita_sdk/runtime/clients/datasource.py,sha256=HAZovoQN9jBg0_-lIlGBQzb4FJdczPhkHehAiVG3Wx0,1020
31
31
  alita_sdk/runtime/clients/prompt.py,sha256=li1RG9eBwgNK_Qf0qUaZ8QNTmsncFrAL2pv3kbxZRZg,1447
32
32
  alita_sdk/runtime/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -49,6 +49,7 @@ alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py,sha256=nH
49
49
  alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py,sha256=YBFYikrOEITfIavU0Xu7BQSNvPCFKzcmbJ_VDeQ6KdI,3078
50
50
  alita_sdk/runtime/langchain/document_loaders/AlitaGitRepoLoader.py,sha256=5WXGcyHraSVj3ANHj_U6X4EDikoekrIYtS0Q_QqNIng,2608
51
51
  alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py,sha256=ogvCmpnS54-D7fP_sSkL1dnhHTmRSD-HA2FFrTNhDEo,6560
52
+ alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py,sha256=1mGZjltnqsSXkp1Jw-lQroyNFiCPpjb9ZbdoqOlqPeU,3354
52
53
  alita_sdk/runtime/langchain/document_loaders/AlitaJiraLoader.py,sha256=M2q3YThkps0yAZOjfoLcyE7qycVTYKcXEGtpmp0N6C8,10950
53
54
  alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py,sha256=elymFlVGiCkcrIY5FrLxbxnQ9jdt3PPV0yBJGF3pTFE,2858
54
55
  alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py,sha256=SKAAPo3DfMtRPxICKrPzlXXkC5RfaeiRj7lejLXTi7o,2337
@@ -56,7 +57,7 @@ alita_sdk/runtime/langchain/document_loaders/AlitaQtestLoader.py,sha256=CUVVnisx
56
57
  alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py,sha256=o0SRFPZ-VskltgThVRX80rT19qtB4gPzxED9SENTNWo,4145
57
58
  alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py,sha256=uNcV0En49_0u0RYB1sP1XfNspT2Xc5CacuJr9Jqv79Q,2972
58
59
  alita_sdk/runtime/langchain/document_loaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
59
- alita_sdk/runtime/langchain/document_loaders/constants.py,sha256=Zu_TSxZzcsrJjLEfLSgPEmoZOn97hwI-lfKJR0JKJzk,4535
60
+ alita_sdk/runtime/langchain/document_loaders/constants.py,sha256=gTTHIbJQVpSGaOwQjJwAltZryoDDX7GaqbODI30MwQM,4563
60
61
  alita_sdk/runtime/langchain/document_loaders/utils.py,sha256=9xghESf3axBbwxATyVuS0Yu-TWe8zWZnXgCD1ZVyNW0,2414
61
62
  alita_sdk/runtime/langchain/interfaces/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
62
63
  alita_sdk/runtime/langchain/interfaces/kwextractor.py,sha256=kSJA9L8g8UArmHu7Bd9dIO0Rrq86JPUb8RYNlnN68FQ,3072
@@ -100,13 +101,13 @@ alita_sdk/runtime/tools/indexer_tool.py,sha256=whSLPevB4WD6dhh2JDXEivDmTvbjiMV1M
100
101
  alita_sdk/runtime/tools/llm.py,sha256=NsrsP-SblyxDdzgMCn9_OBUL0sUGDVS5yqer49V7ciE,15069
101
102
  alita_sdk/runtime/tools/loop.py,sha256=uds0WhZvwMxDVFI6MZHrcmMle637cQfBNg682iLxoJA,8335
102
103
  alita_sdk/runtime/tools/loop_output.py,sha256=U4hO9PCQgWlXwOq6jdmCGbegtAxGAPXObSxZQ3z38uk,8069
103
- alita_sdk/runtime/tools/mcp_server_tool.py,sha256=eI8QUt497xblwF4Zhbvi8wCg17yh2yoWjcw_AIzHwGE,2819
104
+ alita_sdk/runtime/tools/mcp_server_tool.py,sha256=trGraI8-AwdbNmTKMjfmlBxgTDMTE4-21heCVtd_lz0,4156
104
105
  alita_sdk/runtime/tools/pgvector_search.py,sha256=NN2BGAnq4SsDHIhUcFZ8d_dbEOM8QwB0UwpsWCYruXU,11692
105
106
  alita_sdk/runtime/tools/prompt.py,sha256=nJafb_e5aOM1Rr3qGFCR-SKziU9uCsiP2okIMs9PppM,741
106
107
  alita_sdk/runtime/tools/router.py,sha256=wCvZjVkdXK9dMMeEerrgKf5M790RudH68pDortnHSz0,1517
107
108
  alita_sdk/runtime/tools/tool.py,sha256=lE1hGi6qOAXG7qxtqxarD_XMQqTghdywf261DZawwno,5631
108
109
  alita_sdk/runtime/tools/vectorstore.py,sha256=l5wfovwMNvS_RgW-ZHXCh8Cm8gauunRzP0NPkzmshcQ,33852
109
- alita_sdk/runtime/tools/vectorstore_base.py,sha256=OdJIJkjTmQ0BC-AzAOMP2phAcNATJ8gI5JoBWSSdpNU,27892
110
+ alita_sdk/runtime/tools/vectorstore_base.py,sha256=HFaNk_oBoeZWrQWBrvEsozajHqwjWxsV6RigkQyq-eQ,27586
110
111
  alita_sdk/runtime/utils/AlitaCallback.py,sha256=E4LlSBuCHWiUq6W7IZExERHZY0qcmdjzc_rJlF2iQIw,7356
111
112
  alita_sdk/runtime/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
112
113
  alita_sdk/runtime/utils/constants.py,sha256=Xntx1b_uxUzT4clwqHA_U6K8y5bBqf_4lSQwXdcWrp4,13586
@@ -118,8 +119,8 @@ alita_sdk/runtime/utils/toolkit_runtime.py,sha256=MU63Fpxj0b5_r1IUUc0Q3-PN9VwL7r
118
119
  alita_sdk/runtime/utils/toolkit_utils.py,sha256=I9QFqnaqfVgN26LUr6s3XlBlG6y0CoHURnCzG7XcwVs,5311
119
120
  alita_sdk/runtime/utils/utils.py,sha256=CpEl3LCeLbhzQySz08lkKPm7Auac6IiLF7WB8wmArMI,589
120
121
  alita_sdk/tools/__init__.py,sha256=ko5TToGYZFmBrho26DRAVvrkHWxQ2sfs8gVAASinYp8,10611
121
- alita_sdk/tools/base_indexer_toolkit.py,sha256=qQfMHzsQ2BfusKMV_DNiHOtZVheiQ4gBfy5JXjYi0UY,20231
122
- alita_sdk/tools/elitea_base.py,sha256=qXSrl0A8KxIuv6796bTkjPpxBm4WQ5zmpskIAwCFfC8,30394
122
+ alita_sdk/tools/base_indexer_toolkit.py,sha256=gOjE1igKyjG1LohMj0XMlj1IGaFp7eEEDqyEG6-xLmc,18405
123
+ alita_sdk/tools/elitea_base.py,sha256=yfDSNKdLtsedp8546KHfDPOPYgpD4ZZ-dvAxs3zoF3o,31071
123
124
  alita_sdk/tools/non_code_indexer_toolkit.py,sha256=v9uq1POE1fQKCd152mbqDtF-HSe0qoDj83k4E5LAkMI,1080
124
125
  alita_sdk/tools/ado/__init__.py,sha256=bArTObt5cqG1SkijKevWGbsIILHBA3aCStg8Q1jd69k,1243
125
126
  alita_sdk/tools/ado/utils.py,sha256=PTCludvaQmPLakF2EbCGy66Mro4-rjDtavVP-xcB2Wc,1252
@@ -211,8 +212,8 @@ alita_sdk/tools/code/linter/api_wrapper.py,sha256=wylpwhAw02Jt8L18CqBq2He5PbwIkx
211
212
  alita_sdk/tools/code/loaders/codesearcher.py,sha256=XoXXZtIQZhvjIwZlnl_4wVGHC-3saYzFo5oDR_Zh3EY,529
212
213
  alita_sdk/tools/code/sonar/__init__.py,sha256=u8wpgXJ_shToLl3G9-XEtGDor5dhmsnurIImh1-e-U0,3165
213
214
  alita_sdk/tools/code/sonar/api_wrapper.py,sha256=nNqxcWN_6W8c0ckj-Er9HkNuAdgQLoWBXh5UyzNutis,2653
214
- alita_sdk/tools/confluence/__init__.py,sha256=xLsxdBZ62NL0k9NxaV4KnspwmDcucQzcl-tAaz7eLB8,6562
215
- alita_sdk/tools/confluence/api_wrapper.py,sha256=4WqjVeFWyFeb4-VD5v4_J69pbyjire4Op7cBSKU9EXw,85057
215
+ alita_sdk/tools/confluence/__init__.py,sha256=ClK6fuJr5rsgDhLLA2Ci3zJdBSP3liyUpHML9oQqKFs,6804
216
+ alita_sdk/tools/confluence/api_wrapper.py,sha256=-wQduJUk2wwfBQGQHSWuCnrt35gfp195nSKUFVSPS1s,85218
216
217
  alita_sdk/tools/confluence/loader.py,sha256=4bf5qrJMEiJzuZp2NlxO2XObLD1w7fxss_WyMUpe8sg,9290
217
218
  alita_sdk/tools/confluence/utils.py,sha256=Lxo6dBD0OlvM4o0JuK6qeB_4LV9BptiwJA9e1vqNcDw,435
218
219
  alita_sdk/tools/custom_open_api/__init__.py,sha256=9aT5SPNPWcJC6jMZEM-3rUCXVULj_3-qJLQKmnreKNo,2537
@@ -298,7 +299,7 @@ alita_sdk/tools/servicenow/__init__.py,sha256=hReiTp8yv07eR0O_1KJThzUO2xhWhIWcjU
298
299
  alita_sdk/tools/servicenow/api_wrapper.py,sha256=WpH-bBLGFdhehs4g-K-WAkNuaD1CSrwsDpdgB3RG53s,6120
299
300
  alita_sdk/tools/servicenow/servicenow_client.py,sha256=Rdqfu-ll-qbnclMzChLZBsfXRDzgoX_FdeI2WLApWxc,3269
300
301
  alita_sdk/tools/sharepoint/__init__.py,sha256=Mofg_N-7zFf5mKm3_0D0dhC_H0MX-bk3YQ5Sl3oXokg,4114
301
- alita_sdk/tools/sharepoint/api_wrapper.py,sha256=TSdKZuLnn3uSkaNuYb7a2xG4w4sQzXbzOO3c8tIlFds,9259
302
+ alita_sdk/tools/sharepoint/api_wrapper.py,sha256=Hcd9YypWMr3upDVJHRxUyPdN4k8joqRQOc_uce2ek1A,9250
302
303
  alita_sdk/tools/sharepoint/authorization_helper.py,sha256=n-nL5dlBoLMK70nHu7P2RYCb8C6c9HMA_gEaw8LxuhE,2007
303
304
  alita_sdk/tools/sharepoint/utils.py,sha256=fZ1YzAu5CTjKSZeslowpOPH974902S8vCp1Wu7L44LM,446
304
305
  alita_sdk/tools/slack/__init__.py,sha256=o8BnDMWGC5qA8pVIyIiflM6T__dZ6qAE1UdtJcvmaxk,3901
@@ -309,9 +310,9 @@ alita_sdk/tools/sql/models.py,sha256=AKJgSl_kEEz4fZfw3kbvdGHXaRZ-yiaqfJOB6YOj3i0
309
310
  alita_sdk/tools/testio/__init__.py,sha256=qi12wyJXN02hrUXg08CbijcCL5pi30JMbJfiXjn1Zr0,2646
310
311
  alita_sdk/tools/testio/api_wrapper.py,sha256=BvmL5h634BzG6p7ajnQLmj-uoAw1gjWnd4FHHu1h--Q,21638
311
312
  alita_sdk/tools/testrail/__init__.py,sha256=0kETjWKLU7R6mugBWsjwEUsh10pipbAeNSGJAO0FBh0,4634
312
- alita_sdk/tools/testrail/api_wrapper.py,sha256=K-Gc42RH2z-fK4cXi8zQq3s9A4v_pCJkRB3XKLAhypc,32056
313
+ alita_sdk/tools/testrail/api_wrapper.py,sha256=5T-QyTzt-J0rI32xc_E684lCdgyWeHSyeTYiwQwtGyg,32275
313
314
  alita_sdk/tools/utils/__init__.py,sha256=155xepXPr4OEzs2Mz5YnjXcBpxSv1X2eznRUVoPtyK0,3268
314
- alita_sdk/tools/utils/content_parser.py,sha256=0HKQqGTdXHKlcz72GHEwXqLXJsRYXm35F-P1KZz0sNc,10351
315
+ alita_sdk/tools/utils/content_parser.py,sha256=MsBlh97v5aVTuB2bw43J4K2-IJumKOoRbz0zkpjkbhI,11521
315
316
  alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256=a6FAsiix_EvATIKUf5YT6vHh5LDyJ5uSP3LJqoxFo04,17367
316
317
  alita_sdk/tools/vector_adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
317
318
  alita_sdk/tools/xray/__init__.py,sha256=GGpbiBdDQ9kMFqJEHYi7XwKpkuMMHi-ZF-IM8yFIgUM,4380
@@ -333,8 +334,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=JAeWf-RXohsxheUpT0iMDClc_izj-
333
334
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=0AI_j27xVO5Gk5HQMFrqPTd4uvuVTpiZUicBrdfEpKg,2796
334
335
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
335
336
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
336
- alita_sdk-0.3.253.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
337
- alita_sdk-0.3.253.dist-info/METADATA,sha256=sOv_LdDPyuyBm4c-1hfZH1XG_V5-MeUIDuTJgmDX8Hk,18897
338
- alita_sdk-0.3.253.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
339
- alita_sdk-0.3.253.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
340
- alita_sdk-0.3.253.dist-info/RECORD,,
337
+ alita_sdk-0.3.254.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
338
+ alita_sdk-0.3.254.dist-info/METADATA,sha256=XLAiAMkY0U1LualoozT6xq8wieW8dvFe0xwIZeM7Dmw,18897
339
+ alita_sdk-0.3.254.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
340
+ alita_sdk-0.3.254.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
341
+ alita_sdk-0.3.254.dist-info/RECORD,,