alita-sdk 0.3.347__py3-none-any.whl → 0.3.349__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

@@ -42,7 +42,17 @@ class Artifact:
42
42
  return f"{data['error']}. {data['content'] if data['content'] else ''}"
43
43
  detected = chardet.detect(data)
44
44
  if detected['encoding'] is not None:
45
- return data.decode(detected['encoding'])
45
+ try:
46
+ return data.decode(detected['encoding'])
47
+ except Exception:
48
+ logger.error("Error while default encoding")
49
+ return parse_file_content(file_name=artifact_name,
50
+ file_content=data,
51
+ is_capture_image=is_capture_image,
52
+ page_number=page_number,
53
+ sheet_name=sheet_name,
54
+ excel_by_sheets=excel_by_sheets,
55
+ llm=llm)
46
56
  else:
47
57
  return parse_file_content(file_name=artifact_name,
48
58
  file_content=data,
@@ -69,6 +69,7 @@ class AlitaClient:
69
69
  self.configurations_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=configurations&unsecret=true'
70
70
  self.ai_section_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=ai'
71
71
  self.configurations: list = configurations or []
72
+ self.model_timeout = kwargs.get('model_timeout', 120)
72
73
 
73
74
  def get_mcp_toolkits(self):
74
75
  if user_id := self._get_real_user_id():
@@ -184,6 +185,7 @@ class AlitaClient:
184
185
  model=embedding_model,
185
186
  api_key=self.auth_token,
186
187
  openai_organization=str(self.project_id),
188
+ request_timeout=self.model_timeout
187
189
  )
188
190
 
189
191
  def get_llm(self, model_name: str, model_config: dict) -> ChatOpenAI:
@@ -12,27 +12,32 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  import io
15
+ import os
15
16
  from typing import Iterator
16
17
  import pandas as pd
17
18
  from json import loads
18
19
 
19
20
  from openpyxl import load_workbook
21
+ from xlrd import open_workbook
20
22
  from langchain_core.documents import Document
21
23
  from .AlitaTableLoader import AlitaTableLoader
22
-
23
- cell_delimeter = " | "
24
24
 
25
- class AlitaExcelLoader(AlitaTableLoader):
25
+ cell_delimiter = " | "
26
26
 
27
+ class AlitaExcelLoader(AlitaTableLoader):
27
28
  excel_by_sheets: bool = False
28
29
  sheet_name: str = None
29
30
  return_type: str = 'str'
31
+ file_name: str = None
30
32
 
31
33
  def __init__(self, **kwargs):
32
34
  if not kwargs.get('file_path'):
33
35
  file_content = kwargs.get('file_content')
34
36
  if file_content:
37
+ self.file_name = kwargs.get('file_name')
35
38
  kwargs['file_path'] = io.BytesIO(file_content)
39
+ else:
40
+ self.file_name = kwargs.get('file_path')
36
41
  super().__init__(**kwargs)
37
42
  self.excel_by_sheets = kwargs.get('excel_by_sheets')
38
43
  self.return_type = kwargs.get('return_type')
@@ -40,36 +45,82 @@ class AlitaExcelLoader(AlitaTableLoader):
40
45
 
41
46
  def get_content(self):
42
47
  try:
43
- # Load the workbook
44
- workbook = load_workbook(self.file_path, data_only=True) # `data_only=True` ensures we get cell values, not formulas
45
-
46
- if self.sheet_name:
47
- # If a specific sheet name is provided, parse only that sheet
48
- if self.sheet_name in workbook.sheetnames:
49
- sheet_content = self.parse_sheet(workbook[self.sheet_name])
50
- return sheet_content
51
- else:
52
- raise ValueError(f"Sheet '{self.sheet_name}' does not exist in the workbook.")
53
- elif self.excel_by_sheets:
54
- # Parse each sheet individually and return as a dictionary
55
- result = {}
56
- for sheet_name in workbook.sheetnames:
57
- sheet_content = self.parse_sheet(workbook[sheet_name])
58
- result[sheet_name] = sheet_content
59
- return result
48
+ # Determine file extension
49
+ file_extension = os.path.splitext(self.file_name)[-1].lower()
50
+
51
+ if file_extension == '.xlsx':
52
+ # Use openpyxl for .xlsx files
53
+ return self._read_xlsx()
54
+ elif file_extension == '.xls':
55
+ # Use xlrd for .xls files
56
+ return self._read_xls()
60
57
  else:
61
- # Combine all sheets into a single string result
62
- result = []
63
- for sheet_name in workbook.sheetnames:
64
- sheet_content = self.parse_sheet(workbook[sheet_name])
65
- result.append(f"====== Sheet name: {sheet_name} ======\n{sheet_content}")
66
- return "\n\n".join(result)
58
+ raise ValueError(f"Unsupported file format: {file_extension}")
67
59
  except Exception as e:
68
60
  return f"Error reading Excel file: {e}"
69
61
 
62
+ def _read_xlsx(self):
63
+ """
64
+ Reads .xlsx files using openpyxl.
65
+ """
66
+ workbook = load_workbook(self.file_path, data_only=True) # `data_only=True` ensures we get cell values, not formulas
67
+
68
+ if self.sheet_name:
69
+ # If a specific sheet name is provided, parse only that sheet
70
+ if self.sheet_name in workbook.sheetnames:
71
+ sheet_content = self.parse_sheet(workbook[self.sheet_name])
72
+ return sheet_content
73
+ else:
74
+ raise ValueError(f"Sheet '{self.sheet_name}' does not exist in the workbook.")
75
+ elif self.excel_by_sheets:
76
+ # Parse each sheet individually and return as a dictionary
77
+ result = {}
78
+ for sheet_name in workbook.sheetnames:
79
+ sheet_content = self.parse_sheet(workbook[sheet_name])
80
+ result[sheet_name] = sheet_content
81
+ return result
82
+ else:
83
+ # Combine all sheets into a single string result
84
+ result = []
85
+ for sheet_name in workbook.sheetnames:
86
+ sheet_content = self.parse_sheet(workbook[sheet_name])
87
+ result.append(f"====== Sheet name: {sheet_name} ======\n{sheet_content}")
88
+ return "\n\n".join(result)
89
+
90
+ def _read_xls(self):
91
+ """
92
+ Reads .xls files using xlrd.
93
+ """
94
+ workbook = open_workbook(filename=self.file_name, file_contents=self.file_content)
95
+
96
+ if self.sheet_name:
97
+ # If a specific sheet name is provided, parse only that sheet
98
+ if self.sheet_name in workbook.sheet_names():
99
+ sheet = workbook.sheet_by_name(self.sheet_name)
100
+ sheet_content = self.parse_sheet_xls(sheet)
101
+ return sheet_content
102
+ else:
103
+ raise ValueError(f"Sheet '{self.sheet_name}' does not exist in the workbook.")
104
+ elif self.excel_by_sheets:
105
+ # Parse each sheet individually and return as a dictionary
106
+ result = {}
107
+ for sheet_name in workbook.sheet_names():
108
+ sheet = workbook.sheet_by_name(sheet_name)
109
+ sheet_content = self.parse_sheet_xls(sheet)
110
+ result[sheet_name] = sheet_content
111
+ return result
112
+ else:
113
+ # Combine all sheets into a single string result
114
+ result = []
115
+ for sheet_name in workbook.sheet_names():
116
+ sheet = workbook.sheet_by_name(sheet_name)
117
+ sheet_content = self.parse_sheet_xls(sheet)
118
+ result.append(f"====== Sheet name: {sheet_name} ======\n{sheet_content}")
119
+ return "\n\n".join(result)
120
+
70
121
  def parse_sheet(self, sheet):
71
122
  """
72
- Parses a single sheet, extracting text and hyperlinks, and formats them.
123
+ Parses a single .xlsx sheet, extracting text and hyperlinks, and formats them.
73
124
  """
74
125
  sheet_content = []
75
126
 
@@ -85,17 +136,52 @@ class AlitaExcelLoader(AlitaTableLoader):
85
136
  # If no hyperlink, use the cell value (computed value if formula)
86
137
  row_content.append(str(cell.value) if cell.value is not None else "")
87
138
  # Join the row content into a single line using `|` as the delimiter
88
- sheet_content.append(cell_delimeter.join(row_content))
139
+ sheet_content.append(cell_delimiter.join(row_content))
140
+
141
+ # Format the sheet content based on the return type
142
+ return self._format_sheet_content(sheet_content)
143
+
144
+ def parse_sheet_xls(self, sheet):
145
+ """
146
+ Parses a single .xls sheet using xlrd, extracting text and hyperlinks, and formats them.
147
+ """
148
+ sheet_content = []
149
+
150
+ # Extract hyperlink map (if available)
151
+ hyperlink_map = getattr(sheet, 'hyperlink_map', {})
152
+
153
+ for row_idx in range(sheet.nrows):
154
+ row_content = []
155
+ for col_idx in range(sheet.ncols):
156
+ cell = sheet.cell(row_idx, col_idx)
157
+ cell_value = cell.value
158
+
159
+ # Check if the cell has a hyperlink
160
+ cell_address = (row_idx, col_idx)
161
+ if cell_address in hyperlink_map:
162
+ hyperlink = hyperlink_map[cell_address].url_or_path
163
+ if cell_value:
164
+ row_content.append(f"[{cell_value}]({hyperlink})")
165
+ else:
166
+ row_content.append(str(cell_value) if cell_value is not None else "")
167
+ # Join the row content into a single line using `|` as the delimiter
168
+ sheet_content.append(cell_delimiter.join(row_content))
89
169
 
90
170
  # Format the sheet content based on the return type
171
+ return self._format_sheet_content(sheet_content)
172
+
173
+ def _format_sheet_content(self, sheet_content):
174
+ """
175
+ Formats the sheet content based on the return type.
176
+ """
91
177
  if self.return_type == 'dict':
92
178
  # Convert to a list of dictionaries (each row is a dictionary)
93
- headers = sheet_content[0].split(cell_delimeter) if sheet_content else []
179
+ headers = sheet_content[0].split(cell_delimiter) if sheet_content else []
94
180
  data_rows = sheet_content[1:] if len(sheet_content) > 1 else []
95
- return [dict(zip(headers, row.split(cell_delimeter))) for row in data_rows]
181
+ return [dict(zip(headers, row.split(cell_delimiter))) for row in data_rows]
96
182
  elif self.return_type == 'csv':
97
183
  # Return as CSV (newline-separated rows, comma-separated values)
98
- return "\n".join([",".join(row.split(cell_delimeter)) for row in sheet_content])
184
+ return "\n".join([",".join(row.split(cell_delimiter)) for row in sheet_content])
99
185
  else:
100
186
  # Default: Return as plain text (newline-separated rows, pipe-separated values)
101
187
  return "\n".join(sheet_content)
@@ -115,7 +201,7 @@ class AlitaExcelLoader(AlitaTableLoader):
115
201
  return docs
116
202
 
117
203
  def read(self, lazy: bool = False):
118
- df = pd.read_excel(self.file_path, sheet_name=None)
204
+ df = pd.read_excel(self.file_path, sheet_name=None, engine='calamine')
119
205
  docs = []
120
206
  for key in df.keys():
121
207
  if self.raw_content:
@@ -126,7 +212,7 @@ class AlitaExcelLoader(AlitaTableLoader):
126
212
  return docs
127
213
 
128
214
  def read_lazy(self) -> Iterator[dict]:
129
- df = pd.read_excel(self.file_path, sheet_name=None)
215
+ df = pd.read_excel(self.file_path, sheet_name=None, engine='calamine')
130
216
  for key in df.keys():
131
217
  if self.raw_content:
132
218
  yield df[key].to_string()
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ import re
2
3
  from typing import Union, Any, Optional, Annotated, get_type_hints
3
4
  from uuid import uuid4
4
5
  from typing import Dict
@@ -274,11 +275,20 @@ class StateModifierNode(Runnable):
274
275
  logger.warning(f"Failed to decode base64 value: {e}")
275
276
  return value
276
277
 
278
+ def split_by_words(value, chunk_size=100):
279
+ words = value.split()
280
+ return [" ".join(words[i:i + chunk_size]) for i in range(0, len(words), chunk_size)]
281
+
282
+ def split_by_regex(value, pattern):
283
+ """Splits the provided string using the specified regex pattern."""
284
+ return re.split(pattern, value)
277
285
 
278
286
  env = Environment()
279
287
  env.filters['from_json'] = from_json
280
- env.filters['base64ToString'] = base64_to_string
281
-
288
+ env.filters['base64_to_string'] = base64_to_string
289
+ env.filters['split_by_words'] = split_by_words
290
+ env.filters['split_by_regex'] = split_by_regex
291
+
282
292
  template = env.from_string(self.template)
283
293
  rendered_message = template.render(**input_data)
284
294
  result = {}
@@ -3,6 +3,7 @@ import logging
3
3
  import re
4
4
  from typing import Any, Optional, Generator, List
5
5
 
6
+ from langchain_core.callbacks import dispatch_custom_event
6
7
  from langchain_core.documents import Document
7
8
  from langchain_core.tools import ToolException
8
9
  from pydantic import create_model, Field, model_validator
@@ -30,7 +31,21 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
30
31
  return self.artifact.list(bucket_name, return_as_string)
31
32
 
32
33
  def create_file(self, filename: str, filedata: str, bucket_name = None):
33
- return self.artifact.create(filename, filedata, bucket_name)
34
+ result = self.artifact.create(filename, filedata, bucket_name)
35
+
36
+ # Dispatch custom event for file creation
37
+ dispatch_custom_event("file_modified", {
38
+ "message": f"File '{filename}' created successfully",
39
+ "filename": filename,
40
+ "tool_name": "createFile",
41
+ "toolkit": "artifact",
42
+ "operation_type": "create",
43
+ "meta": {
44
+ "bucket": bucket_name or self.bucket
45
+ }
46
+ })
47
+
48
+ return result
34
49
 
35
50
  def read_file(self,
36
51
  filename: str,
@@ -51,10 +66,38 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
51
66
  return self.artifact.delete(filename, bucket_name)
52
67
 
53
68
  def append_data(self, filename: str, filedata: str, bucket_name = None):
54
- return self.artifact.append(filename, filedata, bucket_name)
69
+ result = self.artifact.append(filename, filedata, bucket_name)
70
+
71
+ # Dispatch custom event for file append
72
+ dispatch_custom_event("file_modified", {
73
+ "message": f"Data appended to file '{filename}' successfully",
74
+ "filename": filename,
75
+ "tool_name": "appendData",
76
+ "toolkit": "artifact",
77
+ "operation_type": "modify",
78
+ "meta": {
79
+ "bucket": bucket_name or self.bucket
80
+ }
81
+ })
82
+
83
+ return result
55
84
 
56
85
  def overwrite_data(self, filename: str, filedata: str, bucket_name = None):
57
- return self.artifact.overwrite(filename, filedata, bucket_name)
86
+ result = self.artifact.overwrite(filename, filedata, bucket_name)
87
+
88
+ # Dispatch custom event for file overwrite
89
+ dispatch_custom_event("file_modified", {
90
+ "message": f"File '{filename}' overwritten successfully",
91
+ "filename": filename,
92
+ "tool_name": "overwriteData",
93
+ "toolkit": "artifact",
94
+ "operation_type": "modify",
95
+ "meta": {
96
+ "bucket": bucket_name or self.bucket
97
+ }
98
+ })
99
+
100
+ return result
58
101
 
59
102
  def create_new_bucket(self, bucket_name: str, expiration_measure = "weeks", expiration_value = 1):
60
103
  return self.artifact.client.create_bucket(bucket_name, expiration_measure, expiration_value)
@@ -137,7 +137,7 @@ class VectorStoreWrapper(BaseToolApiWrapper):
137
137
  embedding_model_params: dict
138
138
  vectorstore_type: str
139
139
  vectorstore_params: dict
140
- max_docs_per_add: int = 100
140
+ max_docs_per_add: int = 20
141
141
  dataset: str = None
142
142
  embedding: Any = None
143
143
  vectorstore: Any = None
@@ -135,7 +135,7 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
135
135
  embedding_model: Optional[str] = None
136
136
  vectorstore_type: Optional[str] = None
137
137
  vectorstore_params: Optional[dict] = None
138
- max_docs_per_add: int = 100
138
+ max_docs_per_add: int = 20
139
139
  dataset: Optional[str] = None
140
140
  vectorstore: Any = None
141
141
  pg_helper: Any = None
@@ -279,7 +279,8 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
279
279
  """Cleans the indexed data in the collection."""
280
280
  super()._clean_collection(collection_suffix=collection_suffix)
281
281
  return (f"Collection '{collection_suffix}' has been removed from the vector store.\n"
282
- f"Available collections: {self.list_collections()}")
282
+ f"Available collections: {self.list_collections()}") if collection_suffix \
283
+ else "All collections have been removed from the vector store."
283
284
 
284
285
  def _build_collection_filter(self, filter: dict | str, collection_suffix: str = "") -> dict:
285
286
  """Builds a filter for the collection based on the provided suffix."""
@@ -93,7 +93,7 @@ class PandasWrapper(BaseToolApiWrapper):
93
93
  if file_extension in ['csv', 'txt']:
94
94
  df = pd.read_csv(file_obj)
95
95
  elif file_extension in ['xlsx', 'xls']:
96
- df = pd.read_excel(file_obj)
96
+ df = pd.read_excel(file_obj, engine='calamine')
97
97
  elif file_extension == 'parquet':
98
98
  df = pd.read_parquet(file_obj)
99
99
  elif file_extension == 'json':
@@ -162,35 +162,17 @@ class PandasWrapper(BaseToolApiWrapper):
162
162
  """Analyze and process using query on dataset"""
163
163
  df = self._get_dataframe(filename)
164
164
  code = self.generate_code_with_retries(df, query)
165
- dispatch_custom_event(
166
- name="thinking_step",
167
- data={
168
- "message": f"Executing generated code... \n\n```python\n{code}\n```",
169
- "tool_name": "process_query",
170
- "toolkit": "pandas"
171
- }
172
- )
165
+ self._log_tool_event(tool_name="process_query",
166
+ message=f"Executing generated code... \n\n```python\n{code}\n```")
173
167
  try:
174
168
  result = self.execute_code(df, code)
175
169
  except Exception as e:
176
170
  logger.error(f"Code execution failed: {format_exc()}")
177
- dispatch_custom_event(
178
- name="thinking_step",
179
- data={
180
- "message": f"Code execution failed: {format_exc()}",
181
- "tool_name": "process_query",
182
- "toolkit": "pandas"
183
- }
184
- )
171
+ self._log_tool_event(tool_name="process_query",
172
+ message=f"Executing generated code... \n\n```python\n{code}\n```")
185
173
  raise
186
- dispatch_custom_event(
187
- name="thinking_step",
188
- data={
189
- "message": f"Result of code execution... \n\n```\n{result['result']}\n```",
190
- "tool_name": "process_query",
191
- "toolkit": "pandas"
192
- }
193
- )
174
+ self._log_tool_event(tool_name="process_query",
175
+ message=f"Executing generated code... \n\n```python\n{code}\n```")
194
176
  if result.get("df") is not None:
195
177
  df = result.pop("df")
196
178
  # Not saving dataframe to artifact repo for now
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.347
3
+ Version: 0.3.349
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -128,6 +128,7 @@ Requires-Dist: textract-py3==2.1.1; extra == "tools"
128
128
  Requires-Dist: slack_sdk==3.35.0; extra == "tools"
129
129
  Requires-Dist: deltalake==1.0.2; extra == "tools"
130
130
  Requires-Dist: google_cloud_bigquery==3.34.0; extra == "tools"
131
+ Requires-Dist: python-calamine==0.5.3; extra == "tools"
131
132
  Provides-Extra: community
132
133
  Requires-Dist: retry-extended==0.2.3; extra == "community"
133
134
  Requires-Dist: pyobjtojson==0.3; extra == "community"
@@ -35,8 +35,8 @@ alita_sdk/configurations/zephyr_enterprise.py,sha256=UaBk3qWcT2-bCzko5HEPvgxArw1
35
35
  alita_sdk/configurations/zephyr_essential.py,sha256=tUIrh-PRNvdrLBj6rJXqlF-h6oaMXUQI1wgit07kFBw,752
36
36
  alita_sdk/runtime/__init__.py,sha256=4W0UF-nl3QF2bvET5lnah4o24CoTwSoKXhuN0YnwvEE,828
37
37
  alita_sdk/runtime/clients/__init__.py,sha256=BdehU5GBztN1Qi1Wul0cqlU46FxUfMnI6Vq2Zd_oq1M,296
38
- alita_sdk/runtime/clients/artifact.py,sha256=TPvROw1qu4IyUEGuf7x40IKRpb5eFZpYGN3-8LfQE0M,3461
39
- alita_sdk/runtime/clients/client.py,sha256=ZOWsv-JJl54lzQ4JzYFBKslt4DI0ExNZ3zQ_U7zA3uE,43590
38
+ alita_sdk/runtime/clients/artifact.py,sha256=Tt3aWcxu20bVW6EX7s_iX5CTmcItKhUnkk8Q2gv2vw0,4036
39
+ alita_sdk/runtime/clients/client.py,sha256=T3hmVnT63iLWEGeuJb8k8Httw-sSWUpy6rsrumD0P0w,43699
40
40
  alita_sdk/runtime/clients/datasource.py,sha256=HAZovoQN9jBg0_-lIlGBQzb4FJdczPhkHehAiVG3Wx0,1020
41
41
  alita_sdk/runtime/clients/prompt.py,sha256=li1RG9eBwgNK_Qf0qUaZ8QNTmsncFrAL2pv3kbxZRZg,1447
42
42
  alita_sdk/runtime/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -44,7 +44,7 @@ alita_sdk/runtime/langchain/assistant.py,sha256=lF46zxtEg8Tnims5gm-24jvjvUoJ28GB
44
44
  alita_sdk/runtime/langchain/chat_message_template.py,sha256=kPz8W2BG6IMyITFDA5oeb5BxVRkHEVZhuiGl4MBZKdc,2176
45
45
  alita_sdk/runtime/langchain/constants.py,sha256=eHVJ_beJNTf1WJo4yq7KMK64fxsRvs3lKc34QCXSbpk,3319
46
46
  alita_sdk/runtime/langchain/indexer.py,sha256=0ENHy5EOhThnAiYFc7QAsaTNp9rr8hDV_hTK8ahbatk,37592
47
- alita_sdk/runtime/langchain/langraph_agent.py,sha256=5I5l4zpbKeHy9JONwkkrEyFAMzpBY_BBeAOhHPwmWB8,46866
47
+ alita_sdk/runtime/langchain/langraph_agent.py,sha256=XR6M_MufA0HEtfAESbHfXNU3DF45rh_H8z14wDfs1V8,47339
48
48
  alita_sdk/runtime/langchain/mixedAgentParser.py,sha256=M256lvtsL3YtYflBCEp-rWKrKtcY1dJIyRGVv7KW9ME,2611
49
49
  alita_sdk/runtime/langchain/mixedAgentRenderes.py,sha256=asBtKqm88QhZRILditjYICwFVKF5KfO38hu2O-WrSWE,5964
50
50
  alita_sdk/runtime/langchain/store_manager.py,sha256=i8Fl11IXJhrBXq1F1ukEVln57B1IBe-tqSUvfUmBV4A,2218
@@ -56,7 +56,7 @@ alita_sdk/runtime/langchain/document_loaders/AlitaCSVLoader.py,sha256=3ne-a5qIkB
56
56
  alita_sdk/runtime/langchain/document_loaders/AlitaConfluenceLoader.py,sha256=NzpoL4C7UzyzLouTSL_xTQw70MitNt-WZz3Eyl7QkTA,8294
57
57
  alita_sdk/runtime/langchain/document_loaders/AlitaDirectoryLoader.py,sha256=fKezkgvIcLG7S2PVJp1a8sZd6C4XQKNZKAFC87DbQts,7003
58
58
  alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py,sha256=9hi5eHgDIfa9wBWqTuwMM6D6W64czrDTfZl_htooe8Y,5943
59
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py,sha256=P17csHx94JkXiyo1a2V-CrfP2E5XCG4uZC31ulZ_Ab4,5817
59
+ alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py,sha256=YI8QaHRjCl8WtxuQKMXi_iTJBZ6da3OTNgoDFqNjz1g,9294
60
60
  alita_sdk/runtime/langchain/document_loaders/AlitaGitRepoLoader.py,sha256=5WXGcyHraSVj3ANHj_U6X4EDikoekrIYtS0Q_QqNIng,2608
61
61
  alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py,sha256=QwgBJE-BvOasjgT1hYHZc0MP0F_elirUjSzKixoM6fY,6610
62
62
  alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py,sha256=Nav2cgCQKOHQi_ZgYYn_iFdP_Os56KVlVR5nHGXecBc,3445
@@ -106,7 +106,7 @@ alita_sdk/runtime/toolkits/vectorstore.py,sha256=BGppQADa1ZiLO17fC0uCACTTEvPHlod
106
106
  alita_sdk/runtime/tools/__init__.py,sha256=7OA8YPKlEOfXu3-gJA08cyR-VymjSPL-OmbXI-B2xVA,355
107
107
  alita_sdk/runtime/tools/agent.py,sha256=m98QxOHwnCRTT9j18Olbb5UPS8-ZGeQaGiUyZJSyFck,3162
108
108
  alita_sdk/runtime/tools/application.py,sha256=z3vLZODs-_xEEnZFmGF0fKz1j3VtNJxqsAmg5ovExpQ,3129
109
- alita_sdk/runtime/tools/artifact.py,sha256=4xA_va11WxO0fQclavKivRo24GI1b5qpsp2YZt7RxGY,10795
109
+ alita_sdk/runtime/tools/artifact.py,sha256=wh2e9JSVBZzJHhNOANhHFF6BaK0RtuZ3kvhkqTrTbys,12234
110
110
  alita_sdk/runtime/tools/datasource.py,sha256=pvbaSfI-ThQQnjHG-QhYNSTYRnZB0rYtZFpjCfpzxYI,2443
111
111
  alita_sdk/runtime/tools/echo.py,sha256=spw9eCweXzixJqHnZofHE1yWiSUa04L4VKycf3KCEaM,486
112
112
  alita_sdk/runtime/tools/function.py,sha256=0iZJ-UxaPbtcXAVX9G5Vsn7vmD7lrz3cBG1qylto1gs,2844
@@ -121,8 +121,8 @@ alita_sdk/runtime/tools/prompt.py,sha256=nJafb_e5aOM1Rr3qGFCR-SKziU9uCsiP2okIMs9
121
121
  alita_sdk/runtime/tools/router.py,sha256=wCvZjVkdXK9dMMeEerrgKf5M790RudH68pDortnHSz0,1517
122
122
  alita_sdk/runtime/tools/sandbox.py,sha256=WNz-aUMtkGCPg84dDy_0BPkyp-6YjoYB-xjIEFFrtKw,11601
123
123
  alita_sdk/runtime/tools/tool.py,sha256=lE1hGi6qOAXG7qxtqxarD_XMQqTghdywf261DZawwno,5631
124
- alita_sdk/runtime/tools/vectorstore.py,sha256=UFBAJ_N2F6uB0xxIy1VMx581tHco-xDl7v2Hl6u0Xzw,34468
125
- alita_sdk/runtime/tools/vectorstore_base.py,sha256=F2EFwq5LFwCpV6U9D5Jq1dxYrV3lxOErLfgWTXqEVRI,27293
124
+ alita_sdk/runtime/tools/vectorstore.py,sha256=8vRhi1lGFEs3unvnflEi2p59U2MfV32lStpEizpDms0,34467
125
+ alita_sdk/runtime/tools/vectorstore_base.py,sha256=7ZkbegFG0XTQBYGsJjtrkK-zrqKwketfx8vSJzuPCug,27292
126
126
  alita_sdk/runtime/utils/AlitaCallback.py,sha256=E4LlSBuCHWiUq6W7IZExERHZY0qcmdjzc_rJlF2iQIw,7356
127
127
  alita_sdk/runtime/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
128
128
  alita_sdk/runtime/utils/constants.py,sha256=Xntx1b_uxUzT4clwqHA_U6K8y5bBqf_4lSQwXdcWrp4,13586
@@ -134,7 +134,7 @@ alita_sdk/runtime/utils/toolkit_runtime.py,sha256=MU63Fpxj0b5_r1IUUc0Q3-PN9VwL7r
134
134
  alita_sdk/runtime/utils/toolkit_utils.py,sha256=I9QFqnaqfVgN26LUr6s3XlBlG6y0CoHURnCzG7XcwVs,5311
135
135
  alita_sdk/runtime/utils/utils.py,sha256=VXNLsdeTmf6snn9EtUyobv4yL-xzLhUcH8P_ORMifYc,675
136
136
  alita_sdk/tools/__init__.py,sha256=jUj1ztC2FbkIUB-YYmiqaz_rqW7Il5kWzDPn1mJmj5w,10545
137
- alita_sdk/tools/base_indexer_toolkit.py,sha256=IKtnJVX27yPu8bBWgbl-5YfUQy4pJPnBoRBFLkqagoc,20228
137
+ alita_sdk/tools/base_indexer_toolkit.py,sha256=hRo93pgb8uJbQgxPle5n7CtLbSbY97jfVq2GKkoNzvc,20328
138
138
  alita_sdk/tools/elitea_base.py,sha256=up3HshASSDfjlHV_HPrs1aD4JIwwX0Ug26WGTzgIYvY,34724
139
139
  alita_sdk/tools/non_code_indexer_toolkit.py,sha256=B3QvhpT1F9QidkCcsOi3J_QrTOaNlTxqWFwe90VivQQ,1329
140
140
  alita_sdk/tools/ado/__init__.py,sha256=NnNYpNFW0_N_v1td_iekYOoQRRB7PIunbpT2f9ZFJM4,1201
@@ -276,7 +276,7 @@ alita_sdk/tools/ocr/api_wrapper.py,sha256=08UF8wj1sR8DcW0z16pw19bgLatLkBF8dySW-D
276
276
  alita_sdk/tools/ocr/text_detection.py,sha256=1DBxt54r3_HdEi93QynSIVta3rH3UpIvy799TPtDTtk,23825
277
277
  alita_sdk/tools/openapi/__init__.py,sha256=x1U4SGApL6MmNFz9SSsQCv352wMAIdGv0z4eMmYnjCw,4984
278
278
  alita_sdk/tools/pandas/__init__.py,sha256=rGenKJH5b9__qM4GerpyLT5YEhNk7W1gA7gn6Zpew04,2748
279
- alita_sdk/tools/pandas/api_wrapper.py,sha256=froH0h7NPPyFUHWNioESzJ-PQQ522oBM7hNTMfh3qAw,11494
279
+ alita_sdk/tools/pandas/api_wrapper.py,sha256=wn0bagB45Tz_kN0FoKUCIxKcYklMMWTqQP5NOM8_Kwc,11100
280
280
  alita_sdk/tools/pandas/dataframe/__init__.py,sha256=iOZRlYDEtwqg2MaYFFxETjN8yHAkUqSNe86cm6ao4LA,108
281
281
  alita_sdk/tools/pandas/dataframe/errors.py,sha256=MBzpi5e2p3lNKxiVadzuT5A_DwuTT8cpJ059rXsdabs,320
282
282
  alita_sdk/tools/pandas/dataframe/prompts.py,sha256=Zvc-LQ7MifldDTuhxbi-RkpAyO5Gae1wDBtTU6Ygs3k,2884
@@ -350,8 +350,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=kT0TbmMvuKhDUZc0i7KO18O38JM9S
350
350
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=0ne8XLJEQSLOWfzd2HdnqOYmQlUliKHbBED5kW_Vias,2895
351
351
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
352
352
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
353
- alita_sdk-0.3.347.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
354
- alita_sdk-0.3.347.dist-info/METADATA,sha256=ZvJklicNWTOf3Q1MrNdtVdLhUEnd4oVDokj_y4J_Ecg,19015
355
- alita_sdk-0.3.347.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
356
- alita_sdk-0.3.347.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
357
- alita_sdk-0.3.347.dist-info/RECORD,,
353
+ alita_sdk-0.3.349.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
354
+ alita_sdk-0.3.349.dist-info/METADATA,sha256=7-TBm0rWMDR8iCwUI1ixF6841zGe1wyOqfeGEoLkiRU,19071
355
+ alita_sdk-0.3.349.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
356
+ alita_sdk-0.3.349.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
357
+ alita_sdk-0.3.349.dist-info/RECORD,,