alita-sdk 0.3.372__py3-none-any.whl → 0.3.374__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/runtime/clients/artifact.py +1 -1
- alita_sdk/runtime/clients/sandbox_client.py +365 -0
- alita_sdk/runtime/langchain/assistant.py +4 -2
- alita_sdk/runtime/langchain/document_loaders/constants.py +46 -89
- alita_sdk/runtime/langchain/langraph_agent.py +12 -0
- alita_sdk/runtime/tools/function.py +71 -0
- alita_sdk/runtime/tools/sandbox.py +16 -18
- alita_sdk/runtime/tools/vectorstore_base.py +40 -20
- alita_sdk/tools/base_indexer_toolkit.py +3 -1
- alita_sdk/tools/utils/__init__.py +17 -0
- alita_sdk/tools/utils/content_parser.py +8 -8
- {alita_sdk-0.3.372.dist-info → alita_sdk-0.3.374.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.372.dist-info → alita_sdk-0.3.374.dist-info}/RECORD +16 -15
- {alita_sdk-0.3.372.dist-info → alita_sdk-0.3.374.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.372.dist-info → alita_sdk-0.3.374.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.372.dist-info → alita_sdk-0.3.374.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,365 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Dict, Optional
|
|
3
|
+
from urllib.parse import quote
|
|
4
|
+
|
|
5
|
+
import requests
|
|
6
|
+
from typing import Any
|
|
7
|
+
from json import dumps
|
|
8
|
+
import chardet
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ApiDetailsRequestError(Exception):
|
|
14
|
+
...
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class SandboxArtifact:
|
|
18
|
+
def __init__(self, client: Any, bucket_name: str):
|
|
19
|
+
self.client = client
|
|
20
|
+
self.bucket_name = bucket_name
|
|
21
|
+
if not self.client.bucket_exists(bucket_name):
|
|
22
|
+
self.client.create_bucket(bucket_name)
|
|
23
|
+
|
|
24
|
+
def create(self, artifact_name: str, artifact_data: Any, bucket_name: str = None):
|
|
25
|
+
try:
|
|
26
|
+
if not bucket_name:
|
|
27
|
+
bucket_name = self.bucket_name
|
|
28
|
+
return dumps(self.client.create_artifact(bucket_name, artifact_name, artifact_data))
|
|
29
|
+
except Exception as e:
|
|
30
|
+
logger.error(f'Error: {e}')
|
|
31
|
+
return f'Error: {e}'
|
|
32
|
+
|
|
33
|
+
def get(self,
|
|
34
|
+
artifact_name: str,
|
|
35
|
+
bucket_name: str = None,
|
|
36
|
+
is_capture_image: bool = False,
|
|
37
|
+
page_number: int = None,
|
|
38
|
+
sheet_name: str = None,
|
|
39
|
+
excel_by_sheets: bool = False,
|
|
40
|
+
llm=None):
|
|
41
|
+
if not bucket_name:
|
|
42
|
+
bucket_name = self.bucket_name
|
|
43
|
+
data = self.client.download_artifact(bucket_name, artifact_name)
|
|
44
|
+
if len(data) == 0:
|
|
45
|
+
# empty file might be created
|
|
46
|
+
return ''
|
|
47
|
+
if isinstance(data, dict) and data['error']:
|
|
48
|
+
return f'{data['error']}. {data['content'] if data['content'] else ''}'
|
|
49
|
+
detected = chardet.detect(data)
|
|
50
|
+
return data
|
|
51
|
+
# TODO: add proper handling for binary files (images, pdf, etc.) for sandbox
|
|
52
|
+
# if detected['encoding'] is not None:
|
|
53
|
+
# try:
|
|
54
|
+
# return data.decode(detected['encoding'])
|
|
55
|
+
# except Exception:
|
|
56
|
+
# logger.error('Error while default encoding')
|
|
57
|
+
# return parse_file_content(file_name=artifact_name,
|
|
58
|
+
# file_content=data,
|
|
59
|
+
# is_capture_image=is_capture_image,
|
|
60
|
+
# page_number=page_number,
|
|
61
|
+
# sheet_name=sheet_name,
|
|
62
|
+
# excel_by_sheets=excel_by_sheets,
|
|
63
|
+
# llm=llm)
|
|
64
|
+
# else:
|
|
65
|
+
# return parse_file_content(file_name=artifact_name,
|
|
66
|
+
# file_content=data,
|
|
67
|
+
# is_capture_image=is_capture_image,
|
|
68
|
+
# page_number=page_number,
|
|
69
|
+
# sheet_name=sheet_name,
|
|
70
|
+
# excel_by_sheets=excel_by_sheets,
|
|
71
|
+
# llm=llm)
|
|
72
|
+
|
|
73
|
+
def delete(self, artifact_name: str, bucket_name=None):
|
|
74
|
+
if not bucket_name:
|
|
75
|
+
bucket_name = self.bucket_name
|
|
76
|
+
self.client.delete_artifact(bucket_name, artifact_name)
|
|
77
|
+
|
|
78
|
+
def list(self, bucket_name: str = None, return_as_string=True) -> str | dict:
|
|
79
|
+
if not bucket_name:
|
|
80
|
+
bucket_name = self.bucket_name
|
|
81
|
+
artifacts = self.client.list_artifacts(bucket_name)
|
|
82
|
+
return str(artifacts) if return_as_string else artifacts
|
|
83
|
+
|
|
84
|
+
def append(self, artifact_name: str, additional_data: Any, bucket_name: str = None):
|
|
85
|
+
if not bucket_name:
|
|
86
|
+
bucket_name = self.bucket_name
|
|
87
|
+
data = self.get(artifact_name, bucket_name)
|
|
88
|
+
if data == 'Could not detect encoding':
|
|
89
|
+
return data
|
|
90
|
+
data += f'{additional_data}' if len(data) > 0 else additional_data
|
|
91
|
+
self.client.create_artifact(bucket_name, artifact_name, data)
|
|
92
|
+
return 'Data appended successfully'
|
|
93
|
+
|
|
94
|
+
def overwrite(self, artifact_name: str, new_data: Any, bucket_name: str = None):
|
|
95
|
+
if not bucket_name:
|
|
96
|
+
bucket_name = self.bucket_name
|
|
97
|
+
return self.create(artifact_name, new_data, bucket_name)
|
|
98
|
+
|
|
99
|
+
def get_content_bytes(self,
|
|
100
|
+
artifact_name: str,
|
|
101
|
+
bucket_name: str = None):
|
|
102
|
+
if not bucket_name:
|
|
103
|
+
bucket_name = self.bucket_name
|
|
104
|
+
return self.client.download_artifact(bucket_name, artifact_name)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class SandboxClient:
|
|
108
|
+
def __init__(self,
|
|
109
|
+
base_url: str,
|
|
110
|
+
project_id: int,
|
|
111
|
+
auth_token: str,
|
|
112
|
+
api_extra_headers: Optional[dict] = None,
|
|
113
|
+
configurations: Optional[list] = None,
|
|
114
|
+
**kwargs):
|
|
115
|
+
|
|
116
|
+
self.base_url = base_url.rstrip('/')
|
|
117
|
+
self.api_path = '/api/v1'
|
|
118
|
+
self.llm_path = '/llm/v1'
|
|
119
|
+
self.project_id = project_id
|
|
120
|
+
self.auth_token = auth_token
|
|
121
|
+
self.headers = {
|
|
122
|
+
'Authorization': f'Bearer {auth_token}',
|
|
123
|
+
'X-SECRET': kwargs.get('XSECRET', 'secret')
|
|
124
|
+
}
|
|
125
|
+
if api_extra_headers is not None:
|
|
126
|
+
self.headers.update(api_extra_headers)
|
|
127
|
+
self.predict_url = f'{self.base_url}{self.api_path}/prompt_lib/predict/prompt_lib/{self.project_id}'
|
|
128
|
+
self.prompt_versions = f'{self.base_url}{self.api_path}/prompt_lib/version/prompt_lib/{self.project_id}'
|
|
129
|
+
self.prompts = f'{self.base_url}{self.api_path}/prompt_lib/prompt/prompt_lib/{self.project_id}'
|
|
130
|
+
self.datasources = f'{self.base_url}{self.api_path}/datasources/datasource/prompt_lib/{self.project_id}'
|
|
131
|
+
self.datasources_predict = f'{self.base_url}{self.api_path}/datasources/predict/prompt_lib/{self.project_id}'
|
|
132
|
+
self.datasources_search = f'{self.base_url}{self.api_path}/datasources/search/prompt_lib/{self.project_id}'
|
|
133
|
+
self.app = f'{self.base_url}{self.api_path}/applications/application/prompt_lib/{self.project_id}'
|
|
134
|
+
self.mcp_tools_list = f'{self.base_url}{self.api_path}/mcp_sse/tools_list/{self.project_id}'
|
|
135
|
+
self.mcp_tools_call = f'{self.base_url}{self.api_path}/mcp_sse/tools_call/{self.project_id}'
|
|
136
|
+
self.application_versions = f'{self.base_url}{self.api_path}/applications/version/prompt_lib/{self.project_id}'
|
|
137
|
+
self.list_apps_url = f'{self.base_url}{self.api_path}/applications/applications/prompt_lib/{self.project_id}'
|
|
138
|
+
self.integration_details = f'{self.base_url}{self.api_path}/integrations/integration/{self.project_id}'
|
|
139
|
+
self.secrets_url = f'{self.base_url}{self.api_path}/secrets/secret/{self.project_id}'
|
|
140
|
+
self.artifacts_url = f'{self.base_url}{self.api_path}/artifacts/artifacts/default/{self.project_id}'
|
|
141
|
+
self.artifact_url = f'{self.base_url}{self.api_path}/artifacts/artifact/default/{self.project_id}'
|
|
142
|
+
self.bucket_url = f'{self.base_url}{self.api_path}/artifacts/buckets/{self.project_id}'
|
|
143
|
+
self.configurations_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=configurations&unsecret=true'
|
|
144
|
+
self.ai_section_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=ai'
|
|
145
|
+
self.image_generation_url = f'{self.base_url}{self.llm_path}/images/generations'
|
|
146
|
+
self.configurations: list = configurations or []
|
|
147
|
+
self.model_timeout = kwargs.get('model_timeout', 120)
|
|
148
|
+
self.model_image_generation = kwargs.get('model_image_generation')
|
|
149
|
+
|
|
150
|
+
def get_mcp_toolkits(self):
|
|
151
|
+
if user_id := self._get_real_user_id():
|
|
152
|
+
url = f'{self.mcp_tools_list}/{user_id}'
|
|
153
|
+
data = requests.get(url, headers=self.headers, verify=False).json()
|
|
154
|
+
return data
|
|
155
|
+
else:
|
|
156
|
+
return []
|
|
157
|
+
|
|
158
|
+
def mcp_tool_call(self, params: dict[str, Any]):
|
|
159
|
+
if user_id := self._get_real_user_id():
|
|
160
|
+
url = f'{self.mcp_tools_call}/{user_id}'
|
|
161
|
+
#
|
|
162
|
+
# This loop iterates over each key-value pair in the arguments dictionary,
|
|
163
|
+
# and if a value is a Pydantic object, it replaces it with its dictionary representation using .dict().
|
|
164
|
+
for arg_name, arg_value in params.get('params', {}).get('arguments', {}).items():
|
|
165
|
+
if isinstance(arg_value, list):
|
|
166
|
+
params['params']['arguments'][arg_name] = [
|
|
167
|
+
item.dict() if hasattr(item, 'dict') and callable(item.dict) else item
|
|
168
|
+
for item in arg_value
|
|
169
|
+
]
|
|
170
|
+
elif hasattr(arg_value, 'dict') and callable(arg_value.dict):
|
|
171
|
+
params['params']['arguments'][arg_name] = arg_value.dict()
|
|
172
|
+
#
|
|
173
|
+
response = requests.post(url, headers=self.headers, json=params, verify=False)
|
|
174
|
+
try:
|
|
175
|
+
return response.json()
|
|
176
|
+
except (ValueError, TypeError):
|
|
177
|
+
return response.text
|
|
178
|
+
else:
|
|
179
|
+
return f'Error: Could not determine user ID for MCP tool call'
|
|
180
|
+
|
|
181
|
+
def get_app_details(self, application_id: int):
|
|
182
|
+
url = f'{self.app}/{application_id}'
|
|
183
|
+
data = requests.get(url, headers=self.headers, verify=False).json()
|
|
184
|
+
return data
|
|
185
|
+
|
|
186
|
+
def get_list_of_apps(self):
|
|
187
|
+
apps = []
|
|
188
|
+
limit = 10
|
|
189
|
+
offset = 0
|
|
190
|
+
total_count = None
|
|
191
|
+
|
|
192
|
+
while total_count is None or offset < total_count:
|
|
193
|
+
params = {'offset': offset, 'limit': limit}
|
|
194
|
+
resp = requests.get(self.list_apps_url, headers=self.headers, params=params, verify=False)
|
|
195
|
+
|
|
196
|
+
if resp.ok:
|
|
197
|
+
data = resp.json()
|
|
198
|
+
total_count = data.get('total')
|
|
199
|
+
apps.extend([{'name': app['name'], 'id': app['id']} for app in data.get('rows', [])])
|
|
200
|
+
offset += limit
|
|
201
|
+
else:
|
|
202
|
+
break
|
|
203
|
+
|
|
204
|
+
return apps
|
|
205
|
+
|
|
206
|
+
def fetch_available_configurations(self) -> list:
|
|
207
|
+
resp = requests.get(self.configurations_url, headers=self.headers, verify=False)
|
|
208
|
+
if resp.ok:
|
|
209
|
+
return resp.json()
|
|
210
|
+
return []
|
|
211
|
+
|
|
212
|
+
def all_models_and_integrations(self):
|
|
213
|
+
resp = requests.get(self.ai_section_url, headers=self.headers, verify=False)
|
|
214
|
+
if resp.ok:
|
|
215
|
+
return resp.json()
|
|
216
|
+
return []
|
|
217
|
+
|
|
218
|
+
def generate_image(self,
|
|
219
|
+
prompt: str,
|
|
220
|
+
n: int = 1,
|
|
221
|
+
size: str = 'auto',
|
|
222
|
+
quality: str = 'auto',
|
|
223
|
+
response_format: str = 'b64_json',
|
|
224
|
+
style: Optional[str] = None) -> dict:
|
|
225
|
+
|
|
226
|
+
if not self.model_image_generation:
|
|
227
|
+
raise ValueError('Image generation model is not configured for this client')
|
|
228
|
+
|
|
229
|
+
image_generation_data = {
|
|
230
|
+
'prompt': prompt,
|
|
231
|
+
'model': self.model_image_generation,
|
|
232
|
+
'n': n,
|
|
233
|
+
'response_format': response_format,
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
# Only add optional parameters if they have meaningful values
|
|
237
|
+
if size and size.lower() != 'auto':
|
|
238
|
+
image_generation_data['size'] = size
|
|
239
|
+
|
|
240
|
+
if quality and quality.lower() != 'auto':
|
|
241
|
+
image_generation_data['quality'] = quality
|
|
242
|
+
|
|
243
|
+
if style:
|
|
244
|
+
image_generation_data['style'] = style
|
|
245
|
+
|
|
246
|
+
# Standard headers for image generation
|
|
247
|
+
image_headers = self.headers.copy()
|
|
248
|
+
image_headers.update({
|
|
249
|
+
'Content-Type': 'application/json',
|
|
250
|
+
})
|
|
251
|
+
|
|
252
|
+
logger.info(f'Generating image with model: {self.model_image_generation}, prompt: {prompt[:50]}...')
|
|
253
|
+
|
|
254
|
+
try:
|
|
255
|
+
response = requests.post(
|
|
256
|
+
self.image_generation_url,
|
|
257
|
+
headers=image_headers,
|
|
258
|
+
json=image_generation_data,
|
|
259
|
+
verify=False,
|
|
260
|
+
timeout=self.model_timeout
|
|
261
|
+
)
|
|
262
|
+
response.raise_for_status()
|
|
263
|
+
return response.json()
|
|
264
|
+
|
|
265
|
+
except requests.exceptions.HTTPError as e:
|
|
266
|
+
logger.error(f'Image generation failed: {e.response.status_code} - {e.response.text}')
|
|
267
|
+
raise
|
|
268
|
+
except requests.exceptions.RequestException as e:
|
|
269
|
+
logger.error(f'Image generation request failed: {e}')
|
|
270
|
+
raise
|
|
271
|
+
|
|
272
|
+
def get_app_version_details(self, application_id: int, application_version_id: int) -> dict:
|
|
273
|
+
url = f'{self.application_versions}/{application_id}/{application_version_id}'
|
|
274
|
+
if self.configurations:
|
|
275
|
+
configs = self.configurations
|
|
276
|
+
else:
|
|
277
|
+
configs = self.fetch_available_configurations()
|
|
278
|
+
|
|
279
|
+
resp = requests.patch(url, headers=self.headers, verify=False, json={'configurations': configs})
|
|
280
|
+
if resp.ok:
|
|
281
|
+
return resp.json()
|
|
282
|
+
logger.error(f'Failed to fetch application version details: {resp.status_code} - {resp.text}.'
|
|
283
|
+
f' Application ID: {application_id}, Version ID: {application_version_id}')
|
|
284
|
+
raise ApiDetailsRequestError(
|
|
285
|
+
f'Failed to fetch application version details for {application_id}/{application_version_id}.')
|
|
286
|
+
|
|
287
|
+
def get_integration_details(self, integration_id: str, format_for_model: bool = False):
|
|
288
|
+
url = f'{self.integration_details}/{integration_id}'
|
|
289
|
+
data = requests.get(url, headers=self.headers, verify=False).json()
|
|
290
|
+
return data
|
|
291
|
+
|
|
292
|
+
def unsecret(self, secret_name: str):
|
|
293
|
+
url = f'{self.secrets_url}/{secret_name}'
|
|
294
|
+
data = requests.get(url, headers=self.headers, verify=False).json()
|
|
295
|
+
logger.info(f'Unsecret response: {data}')
|
|
296
|
+
return data.get('value', None)
|
|
297
|
+
|
|
298
|
+
def artifact(self, bucket_name):
|
|
299
|
+
return SandboxArtifact(self, bucket_name)
|
|
300
|
+
|
|
301
|
+
def _process_requst(self, data: requests.Response) -> Dict[str, str]:
|
|
302
|
+
if data.status_code == 403:
|
|
303
|
+
return {'error': 'You are not authorized to access this resource'}
|
|
304
|
+
elif data.status_code == 404:
|
|
305
|
+
return {'error': 'Resource not found'}
|
|
306
|
+
elif data.status_code != 200:
|
|
307
|
+
return {
|
|
308
|
+
'error': 'An error occurred while fetching the resource',
|
|
309
|
+
'content': data.text
|
|
310
|
+
}
|
|
311
|
+
else:
|
|
312
|
+
return data.json()
|
|
313
|
+
|
|
314
|
+
def bucket_exists(self, bucket_name):
|
|
315
|
+
try:
|
|
316
|
+
resp = self._process_requst(
|
|
317
|
+
requests.get(f'{self.bucket_url}', headers=self.headers, verify=False)
|
|
318
|
+
)
|
|
319
|
+
for each in resp.get('rows', []):
|
|
320
|
+
if each['name'] == bucket_name:
|
|
321
|
+
return True
|
|
322
|
+
return False
|
|
323
|
+
except:
|
|
324
|
+
return False
|
|
325
|
+
|
|
326
|
+
def create_bucket(self, bucket_name, expiration_measure='months', expiration_value=1):
|
|
327
|
+
post_data = {
|
|
328
|
+
'name': bucket_name,
|
|
329
|
+
'expiration_measure': expiration_measure,
|
|
330
|
+
'expiration_value': expiration_value
|
|
331
|
+
}
|
|
332
|
+
resp = requests.post(f'{self.bucket_url}', headers=self.headers, json=post_data, verify=False)
|
|
333
|
+
return self._process_requst(resp)
|
|
334
|
+
|
|
335
|
+
def list_artifacts(self, bucket_name: str):
|
|
336
|
+
# Ensure bucket name is lowercase as required by the API
|
|
337
|
+
url = f'{self.artifacts_url}/{bucket_name.lower()}'
|
|
338
|
+
data = requests.get(url, headers=self.headers, verify=False)
|
|
339
|
+
return self._process_requst(data)
|
|
340
|
+
|
|
341
|
+
def create_artifact(self, bucket_name, artifact_name, artifact_data):
|
|
342
|
+
url = f'{self.artifacts_url}/{bucket_name.lower()}'
|
|
343
|
+
data = requests.post(url, headers=self.headers, files={
|
|
344
|
+
'file': (artifact_name, artifact_data)
|
|
345
|
+
}, verify=False)
|
|
346
|
+
return self._process_requst(data)
|
|
347
|
+
|
|
348
|
+
def download_artifact(self, bucket_name, artifact_name):
|
|
349
|
+
url = f'{self.artifact_url}/{bucket_name.lower()}/{artifact_name}'
|
|
350
|
+
data = requests.get(url, headers=self.headers, verify=False)
|
|
351
|
+
if data.status_code == 403:
|
|
352
|
+
return {'error': 'You are not authorized to access this resource'}
|
|
353
|
+
elif data.status_code == 404:
|
|
354
|
+
return {'error': 'Resource not found'}
|
|
355
|
+
elif data.status_code != 200:
|
|
356
|
+
return {
|
|
357
|
+
'error': 'An error occurred while fetching the resource',
|
|
358
|
+
'content': data.content
|
|
359
|
+
}
|
|
360
|
+
return data.content
|
|
361
|
+
|
|
362
|
+
def delete_artifact(self, bucket_name, artifact_name):
|
|
363
|
+
url = f'{self.artifact_url}/{bucket_name}'
|
|
364
|
+
data = requests.delete(url, headers=self.headers, verify=False, params={'filename': quote(artifact_name)})
|
|
365
|
+
return self._process_requst(data)
|
|
@@ -314,7 +314,8 @@ class Assistant:
|
|
|
314
314
|
memory=checkpointer,
|
|
315
315
|
store=self.store,
|
|
316
316
|
debug=False,
|
|
317
|
-
for_subgraph=False
|
|
317
|
+
for_subgraph=False,
|
|
318
|
+
alita_client=self.alita_client
|
|
318
319
|
)
|
|
319
320
|
|
|
320
321
|
return agent
|
|
@@ -328,7 +329,8 @@ class Assistant:
|
|
|
328
329
|
#
|
|
329
330
|
agent = create_graph(
|
|
330
331
|
client=self.client, tools=self.tools,
|
|
331
|
-
yaml_schema=self.prompt, memory=memory
|
|
332
|
+
yaml_schema=self.prompt, memory=memory,
|
|
333
|
+
alita_client=self.alita_client
|
|
332
334
|
)
|
|
333
335
|
#
|
|
334
336
|
return agent
|
|
@@ -30,10 +30,18 @@ from enum import Enum
|
|
|
30
30
|
|
|
31
31
|
|
|
32
32
|
class LoaderProperties(Enum):
|
|
33
|
-
LLM = '
|
|
33
|
+
LLM = 'use_llm'
|
|
34
|
+
PROMPT_DEFAULT = 'use_default_prompt'
|
|
34
35
|
PROMPT = 'prompt'
|
|
35
|
-
PROMPT_DEFAULT = 'prompt_default'
|
|
36
36
|
|
|
37
|
+
DEFAULT_ALLOWED_BASE = {'max_tokens': 512}
|
|
38
|
+
|
|
39
|
+
DEFAULT_ALLOWED_WITH_LLM = {
|
|
40
|
+
**DEFAULT_ALLOWED_BASE,
|
|
41
|
+
LoaderProperties.LLM.value: False,
|
|
42
|
+
LoaderProperties.PROMPT_DEFAULT.value: False,
|
|
43
|
+
LoaderProperties.PROMPT.value: "",
|
|
44
|
+
}
|
|
37
45
|
|
|
38
46
|
# Image file loaders mapping
|
|
39
47
|
image_loaders_map = {
|
|
@@ -42,66 +50,42 @@ image_loaders_map = {
|
|
|
42
50
|
'mime_type': 'image/png',
|
|
43
51
|
'is_multimodal_processing': True,
|
|
44
52
|
'kwargs': {},
|
|
45
|
-
'allowed_to_override':
|
|
46
|
-
'max_tokens', LoaderProperties.LLM.value,
|
|
47
|
-
LoaderProperties.PROMPT.value,
|
|
48
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
49
|
-
],
|
|
53
|
+
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM,
|
|
50
54
|
},
|
|
51
55
|
'.jpg': {
|
|
52
56
|
'class': AlitaImageLoader,
|
|
53
57
|
'mime_type': 'image/jpeg',
|
|
54
58
|
'is_multimodal_processing': True,
|
|
55
59
|
'kwargs': {},
|
|
56
|
-
'allowed_to_override':
|
|
57
|
-
'max_tokens', LoaderProperties.LLM.value,
|
|
58
|
-
LoaderProperties.PROMPT.value,
|
|
59
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
60
|
-
]
|
|
60
|
+
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
|
|
61
61
|
},
|
|
62
62
|
'.jpeg': {
|
|
63
63
|
'class': AlitaImageLoader,
|
|
64
64
|
'mime_type': 'image/jpeg',
|
|
65
65
|
'is_multimodal_processing': True,
|
|
66
66
|
'kwargs': {},
|
|
67
|
-
'allowed_to_override':
|
|
68
|
-
'max_tokens', LoaderProperties.LLM.value,
|
|
69
|
-
LoaderProperties.PROMPT.value,
|
|
70
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
71
|
-
]
|
|
67
|
+
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
|
|
72
68
|
},
|
|
73
69
|
'.gif': {
|
|
74
70
|
'class': AlitaImageLoader,
|
|
75
71
|
'mime_type': 'image/gif',
|
|
76
72
|
'is_multimodal_processing': True,
|
|
77
73
|
'kwargs': {},
|
|
78
|
-
'allowed_to_override':
|
|
79
|
-
'max_tokens', LoaderProperties.LLM.value,
|
|
80
|
-
LoaderProperties.PROMPT.value,
|
|
81
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
82
|
-
]
|
|
74
|
+
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
|
|
83
75
|
},
|
|
84
76
|
'.bmp': {
|
|
85
77
|
'class': AlitaImageLoader,
|
|
86
78
|
'mime_type': 'image/bmp',
|
|
87
79
|
'is_multimodal_processing': True,
|
|
88
80
|
'kwargs': {},
|
|
89
|
-
'allowed_to_override':
|
|
90
|
-
'max_tokens', LoaderProperties.LLM.value,
|
|
91
|
-
LoaderProperties.PROMPT.value,
|
|
92
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
93
|
-
]
|
|
81
|
+
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
|
|
94
82
|
},
|
|
95
83
|
'.svg': {
|
|
96
84
|
'class': AlitaImageLoader,
|
|
97
85
|
'mime_type': 'image/svg+xml',
|
|
98
86
|
'is_multimodal_processing': True,
|
|
99
87
|
'kwargs': {},
|
|
100
|
-
'allowed_to_override':
|
|
101
|
-
'max_tokens', LoaderProperties.LLM.value,
|
|
102
|
-
LoaderProperties.PROMPT.value,
|
|
103
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
104
|
-
]
|
|
88
|
+
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
|
|
105
89
|
}
|
|
106
90
|
}
|
|
107
91
|
|
|
@@ -114,7 +98,7 @@ document_loaders_map = {
|
|
|
114
98
|
'kwargs': {
|
|
115
99
|
'autodetect_encoding': True
|
|
116
100
|
},
|
|
117
|
-
'allowed_to_override':
|
|
101
|
+
'allowed_to_override': DEFAULT_ALLOWED_BASE
|
|
118
102
|
},
|
|
119
103
|
'.yml': {
|
|
120
104
|
'class': AlitaTextLoader,
|
|
@@ -123,7 +107,7 @@ document_loaders_map = {
|
|
|
123
107
|
'kwargs': {
|
|
124
108
|
'autodetect_encoding': True
|
|
125
109
|
},
|
|
126
|
-
'allowed_to_override':
|
|
110
|
+
'allowed_to_override': DEFAULT_ALLOWED_BASE
|
|
127
111
|
},
|
|
128
112
|
'.yaml': {
|
|
129
113
|
'class': AlitaTextLoader,
|
|
@@ -132,7 +116,7 @@ document_loaders_map = {
|
|
|
132
116
|
'kwargs': {
|
|
133
117
|
'autodetect_encoding': True
|
|
134
118
|
},
|
|
135
|
-
'allowed_to_override':
|
|
119
|
+
'allowed_to_override': DEFAULT_ALLOWED_BASE
|
|
136
120
|
},
|
|
137
121
|
'.groovy': {
|
|
138
122
|
'class': AlitaTextLoader,
|
|
@@ -141,14 +125,14 @@ document_loaders_map = {
|
|
|
141
125
|
'kwargs': {
|
|
142
126
|
'autodetect_encoding': True
|
|
143
127
|
},
|
|
144
|
-
'allowed_to_override':
|
|
128
|
+
'allowed_to_override': DEFAULT_ALLOWED_BASE
|
|
145
129
|
},
|
|
146
130
|
'.md': {
|
|
147
131
|
'class': AlitaMarkdownLoader,
|
|
148
132
|
'mime_type': 'text/markdown',
|
|
149
133
|
'is_multimodal_processing': False,
|
|
150
134
|
'kwargs': {},
|
|
151
|
-
'allowed_to_override':
|
|
135
|
+
'allowed_to_override': DEFAULT_ALLOWED_BASE
|
|
152
136
|
},
|
|
153
137
|
'.csv': {
|
|
154
138
|
'class': AlitaCSVLoader,
|
|
@@ -159,7 +143,7 @@ document_loaders_map = {
|
|
|
159
143
|
'raw_content': True,
|
|
160
144
|
'cleanse': False
|
|
161
145
|
},
|
|
162
|
-
'allowed_to_override':
|
|
146
|
+
'allowed_to_override': DEFAULT_ALLOWED_BASE
|
|
163
147
|
},
|
|
164
148
|
'.xlsx': {
|
|
165
149
|
'class': AlitaExcelLoader,
|
|
@@ -171,11 +155,7 @@ document_loaders_map = {
|
|
|
171
155
|
'raw_content': True,
|
|
172
156
|
'cleanse': False
|
|
173
157
|
},
|
|
174
|
-
'allowed_to_override':
|
|
175
|
-
'max_tokens', LoaderProperties.LLM.value,
|
|
176
|
-
LoaderProperties.PROMPT.value,
|
|
177
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
178
|
-
]
|
|
158
|
+
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
|
|
179
159
|
},
|
|
180
160
|
'.xls': {
|
|
181
161
|
'class': AlitaExcelLoader,
|
|
@@ -186,22 +166,14 @@ document_loaders_map = {
|
|
|
186
166
|
'raw_content': True,
|
|
187
167
|
'cleanse': False
|
|
188
168
|
},
|
|
189
|
-
'allowed_to_override':
|
|
190
|
-
'max_tokens', LoaderProperties.LLM.value,
|
|
191
|
-
LoaderProperties.PROMPT.value,
|
|
192
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
193
|
-
]
|
|
169
|
+
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
|
|
194
170
|
},
|
|
195
171
|
'.pdf': {
|
|
196
172
|
'class': AlitaPDFLoader,
|
|
197
173
|
'mime_type': 'application/pdf',
|
|
198
174
|
'is_multimodal_processing': False,
|
|
199
175
|
'kwargs': {},
|
|
200
|
-
'allowed_to_override':
|
|
201
|
-
'max_tokens', LoaderProperties.LLM.value,
|
|
202
|
-
LoaderProperties.PROMPT.value,
|
|
203
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
204
|
-
]
|
|
176
|
+
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
|
|
205
177
|
},
|
|
206
178
|
'.docx': {
|
|
207
179
|
'class': AlitaDocxMammothLoader,
|
|
@@ -211,58 +183,42 @@ document_loaders_map = {
|
|
|
211
183
|
'kwargs': {
|
|
212
184
|
'extract_images': True
|
|
213
185
|
},
|
|
214
|
-
'allowed_to_override':
|
|
215
|
-
'max_tokens', 'mode', LoaderProperties.LLM.value,
|
|
216
|
-
LoaderProperties.PROMPT.value,
|
|
217
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
218
|
-
]
|
|
186
|
+
'allowed_to_override': {**DEFAULT_ALLOWED_WITH_LLM, 'mode': 'paged'}
|
|
219
187
|
},
|
|
220
188
|
'.json': {
|
|
221
189
|
'class': AlitaJSONLoader,
|
|
222
190
|
'mime_type': 'application/json',
|
|
223
191
|
'is_multimodal_processing': False,
|
|
224
192
|
'kwargs': {},
|
|
225
|
-
'allowed_to_override':
|
|
193
|
+
'allowed_to_override': DEFAULT_ALLOWED_BASE
|
|
226
194
|
},
|
|
227
195
|
'.jsonl': {
|
|
228
196
|
'class': AirbyteJSONLoader,
|
|
229
197
|
'mime_type': 'application/jsonl',
|
|
230
198
|
'is_multimodal_processing': False,
|
|
231
199
|
'kwargs': {},
|
|
232
|
-
'allowed_to_override':
|
|
200
|
+
'allowed_to_override': DEFAULT_ALLOWED_BASE
|
|
233
201
|
},
|
|
234
202
|
'.htm': {
|
|
235
203
|
'class': UnstructuredHTMLLoader,
|
|
236
204
|
'mime_type': 'text/html',
|
|
237
205
|
'is_multimodal_processing': False,
|
|
238
206
|
'kwargs': {},
|
|
239
|
-
'allowed_to_override':
|
|
240
|
-
'max_tokens', LoaderProperties.LLM.value,
|
|
241
|
-
LoaderProperties.PROMPT.value,
|
|
242
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
243
|
-
]
|
|
207
|
+
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
|
|
244
208
|
},
|
|
245
209
|
'.html': {
|
|
246
210
|
'class': UnstructuredHTMLLoader,
|
|
247
211
|
'mime_type': 'text/html',
|
|
248
212
|
'is_multimodal_processing': False,
|
|
249
213
|
'kwargs': {},
|
|
250
|
-
'allowed_to_override':
|
|
251
|
-
'max_tokens', LoaderProperties.LLM.value,
|
|
252
|
-
LoaderProperties.PROMPT.value,
|
|
253
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
254
|
-
]
|
|
214
|
+
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
|
|
255
215
|
},
|
|
256
216
|
'.xml': {
|
|
257
217
|
'class': UnstructuredXMLLoader,
|
|
258
218
|
'mime_type': 'text/xml',
|
|
259
219
|
'is_multimodal_processing': False,
|
|
260
220
|
'kwargs': {},
|
|
261
|
-
'allowed_to_override':
|
|
262
|
-
'max_tokens', LoaderProperties.LLM.value,
|
|
263
|
-
LoaderProperties.PROMPT.value,
|
|
264
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
265
|
-
]
|
|
221
|
+
'allowed_to_override': DEFAULT_ALLOWED_WITH_LLM
|
|
266
222
|
},
|
|
267
223
|
'.ppt': {
|
|
268
224
|
'class': AlitaPowerPointLoader,
|
|
@@ -271,11 +227,7 @@ document_loaders_map = {
|
|
|
271
227
|
'kwargs': {
|
|
272
228
|
'mode': 'paged'
|
|
273
229
|
},
|
|
274
|
-
'allowed_to_override':
|
|
275
|
-
'max_tokens', 'mode', LoaderProperties.LLM.value,
|
|
276
|
-
LoaderProperties.PROMPT.value,
|
|
277
|
-
LoaderProperties.PROMPT_DEFAULT.value
|
|
278
|
-
]
|
|
230
|
+
'allowed_to_override': {**DEFAULT_ALLOWED_WITH_LLM, 'mode': 'paged'}
|
|
279
231
|
},
|
|
280
232
|
'.pptx': {
|
|
281
233
|
'class': AlitaPowerPointLoader,
|
|
@@ -285,20 +237,19 @@ document_loaders_map = {
|
|
|
285
237
|
'kwargs': {
|
|
286
238
|
'mode': 'paged'
|
|
287
239
|
},
|
|
288
|
-
'allowed_to_override':
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
]
|
|
240
|
+
'allowed_to_override': {
|
|
241
|
+
**DEFAULT_ALLOWED_WITH_LLM,
|
|
242
|
+
'mode': 'paged',
|
|
243
|
+
'pages_per_chunk': 5,
|
|
244
|
+
'extract_images': False,
|
|
245
|
+
}
|
|
295
246
|
},
|
|
296
247
|
'.py': {
|
|
297
248
|
'class': AlitaPythonLoader,
|
|
298
249
|
'mime_type': 'text/x-python',
|
|
299
250
|
'is_multimodal_processing': False,
|
|
300
251
|
'kwargs': {},
|
|
301
|
-
'allowed_to_override':
|
|
252
|
+
'allowed_to_override': DEFAULT_ALLOWED_BASE
|
|
302
253
|
}
|
|
303
254
|
}
|
|
304
255
|
|
|
@@ -335,10 +286,16 @@ default_loader_config = {
|
|
|
335
286
|
'mime_type': 'text/plain',
|
|
336
287
|
'is_multimodal_processing': False,
|
|
337
288
|
'kwargs': {},
|
|
338
|
-
'allowed_to_override':
|
|
289
|
+
'allowed_to_override': DEFAULT_ALLOWED_BASE
|
|
339
290
|
}
|
|
340
291
|
|
|
341
292
|
code_loaders_map = {ext: default_loader_config for ext in code_extensions}
|
|
342
293
|
|
|
343
294
|
# Combined mapping for backward compatibility
|
|
344
295
|
loaders_map = {**image_loaders_map, **document_loaders_map, **code_loaders_map}
|
|
296
|
+
|
|
297
|
+
loaders_allowed_to_override = {
|
|
298
|
+
extension: config.get('allowed_to_override')
|
|
299
|
+
for extension, config in loaders_map.items()
|
|
300
|
+
if 'allowed_to_override' in config
|
|
301
|
+
}
|
|
@@ -553,6 +553,18 @@ def create_graph(
|
|
|
553
553
|
input_variables=node.get('input', ['messages']),
|
|
554
554
|
structured_output=node.get('structured_output', False)))
|
|
555
555
|
break
|
|
556
|
+
elif node_type == 'code':
|
|
557
|
+
from ..tools.sandbox import create_sandbox_tool
|
|
558
|
+
sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True)
|
|
559
|
+
code = node.get('code', "return 'Code block is empty'")
|
|
560
|
+
lg_builder.add_node(node_id, FunctionTool(
|
|
561
|
+
tool=sandbox_tool, name=node['id'], return_type='dict',
|
|
562
|
+
output_variables=node.get('output', []),
|
|
563
|
+
input_mapping={'code': {'type': 'fixed', 'value': code}},
|
|
564
|
+
input_variables=node.get('input', ['messages']),
|
|
565
|
+
structured_output=node.get('structured_output', False),
|
|
566
|
+
alita_client=kwargs.get('alita_client', None)
|
|
567
|
+
))
|
|
556
568
|
elif node_type == 'llm':
|
|
557
569
|
output_vars = node.get('output', [])
|
|
558
570
|
output_vars_dict = {
|
|
@@ -1,4 +1,6 @@
|
|
|
1
|
+
import json
|
|
1
2
|
import logging
|
|
3
|
+
from copy import deepcopy
|
|
2
4
|
from json import dumps
|
|
3
5
|
|
|
4
6
|
from langchain_core.callbacks import dispatch_custom_event
|
|
@@ -8,6 +10,7 @@ from langchain_core.tools import BaseTool, ToolException
|
|
|
8
10
|
from typing import Any, Optional, Union, Annotated
|
|
9
11
|
from langchain_core.utils.function_calling import convert_to_openai_tool
|
|
10
12
|
from pydantic import ValidationError
|
|
13
|
+
|
|
11
14
|
from ..langchain.utils import propagate_the_input_mapping
|
|
12
15
|
|
|
13
16
|
logger = logging.getLogger(__name__)
|
|
@@ -21,6 +24,63 @@ class FunctionTool(BaseTool):
|
|
|
21
24
|
input_variables: Optional[list[str]] = None
|
|
22
25
|
input_mapping: Optional[dict[str, dict]] = None
|
|
23
26
|
output_variables: Optional[list[str]] = None
|
|
27
|
+
structured_output: Optional[bool] = False
|
|
28
|
+
alita_client: Optional[Any] = None
|
|
29
|
+
|
|
30
|
+
def _prepare_pyodide_input(self, state: Union[str, dict, ToolCall]) -> str:
|
|
31
|
+
"""Prepare input for PyodideSandboxTool by injecting state into the code block."""
|
|
32
|
+
# add state into the code block here since it might be changed during the execution of the code
|
|
33
|
+
state_copy = deepcopy(state)
|
|
34
|
+
# pickle state
|
|
35
|
+
import pickle
|
|
36
|
+
|
|
37
|
+
del state_copy['messages'] # remove messages to avoid issues with pickling without langchain-core
|
|
38
|
+
serialized_state = pickle.dumps(state_copy)
|
|
39
|
+
# inject state into the code block as alita_state variable
|
|
40
|
+
pyodide_predata = f"""import pickle\nalita_state = pickle.loads({serialized_state})\n"""
|
|
41
|
+
# add classes related to sandbox client
|
|
42
|
+
# read the content of alita_sdk/runtime/cliens/sandbox_client.py
|
|
43
|
+
try:
|
|
44
|
+
with open('alita_sdk/runtime/clients/sandbox_client.py', 'r') as f:
|
|
45
|
+
sandbox_client_code = f.read()
|
|
46
|
+
pyodide_predata += f"\n{sandbox_client_code}\n"
|
|
47
|
+
pyodide_predata += (f"alita_client = SandboxClient(base_url='{self.alita_client.base_url}',"
|
|
48
|
+
f"project_id={self.alita_client.project_id},"
|
|
49
|
+
f"auth_token='{self.alita_client.auth_token}')")
|
|
50
|
+
except FileNotFoundError:
|
|
51
|
+
logger.error("sandbox_client.py not found. Ensure 'alita_sdk/runtime/clients/sandbox_client.py' exists.")
|
|
52
|
+
return pyodide_predata
|
|
53
|
+
|
|
54
|
+
def _handle_pyodide_output(self, tool_result: Any) -> dict:
|
|
55
|
+
"""Handle output processing for PyodideSandboxTool results."""
|
|
56
|
+
tool_result_converted = {}
|
|
57
|
+
|
|
58
|
+
if self.output_variables:
|
|
59
|
+
for var in self.output_variables:
|
|
60
|
+
if isinstance(tool_result, dict) and var in tool_result:
|
|
61
|
+
tool_result_converted[var] = tool_result[var]
|
|
62
|
+
else:
|
|
63
|
+
# handler in case user points to a var that is not in the output of the tool
|
|
64
|
+
tool_result_converted[var] = tool_result.get('result',
|
|
65
|
+
tool_result.get('error') if tool_result.get('error')
|
|
66
|
+
else 'Execution result is missing')
|
|
67
|
+
else:
|
|
68
|
+
tool_result_converted.update({"messages": [{"role": "assistant", "content": dumps(tool_result)}]})
|
|
69
|
+
|
|
70
|
+
if self.structured_output:
|
|
71
|
+
# execute code tool and update state variables
|
|
72
|
+
try:
|
|
73
|
+
result_value = tool_result.get('result', {})
|
|
74
|
+
tool_result_converted.update(result_value if isinstance(result_value, dict)
|
|
75
|
+
else json.loads(result_value))
|
|
76
|
+
except json.JSONDecodeError:
|
|
77
|
+
logger.error(f"JSONDecodeError: {tool_result}")
|
|
78
|
+
|
|
79
|
+
return tool_result_converted
|
|
80
|
+
|
|
81
|
+
def _is_pyodide_tool(self) -> bool:
|
|
82
|
+
"""Check if the current tool is a PyodideSandboxTool."""
|
|
83
|
+
return self.tool.name.lower() == 'pyodide_sandbox'
|
|
24
84
|
|
|
25
85
|
def invoke(
|
|
26
86
|
self,
|
|
@@ -31,8 +91,14 @@ class FunctionTool(BaseTool):
|
|
|
31
91
|
params = convert_to_openai_tool(self.tool).get(
|
|
32
92
|
'function', {'parameters': {}}).get(
|
|
33
93
|
'parameters', {'properties': {}}).get('properties', {})
|
|
94
|
+
|
|
34
95
|
func_args = propagate_the_input_mapping(input_mapping=self.input_mapping, input_variables=self.input_variables,
|
|
35
96
|
state=state)
|
|
97
|
+
|
|
98
|
+
# special handler for PyodideSandboxTool
|
|
99
|
+
if self._is_pyodide_tool():
|
|
100
|
+
code = func_args['code']
|
|
101
|
+
func_args['code'] = f"{self._prepare_pyodide_input(state)}\n{code}"
|
|
36
102
|
try:
|
|
37
103
|
tool_result = self.tool.invoke(func_args, config, **kwargs)
|
|
38
104
|
dispatch_custom_event(
|
|
@@ -44,6 +110,11 @@ class FunctionTool(BaseTool):
|
|
|
44
110
|
}, config=config
|
|
45
111
|
)
|
|
46
112
|
logger.info(f"ToolNode response: {tool_result}")
|
|
113
|
+
|
|
114
|
+
# handler for PyodideSandboxTool
|
|
115
|
+
if self._is_pyodide_tool():
|
|
116
|
+
return self._handle_pyodide_output(tool_result)
|
|
117
|
+
|
|
47
118
|
if not self.output_variables:
|
|
48
119
|
return {"messages": [{"role": "assistant", "content": dumps(tool_result)}]}
|
|
49
120
|
else:
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
import logging
|
|
2
1
|
import asyncio
|
|
2
|
+
import logging
|
|
3
3
|
import subprocess
|
|
4
4
|
import os
|
|
5
|
-
from typing import Any, Type, Optional,
|
|
5
|
+
from typing import Any, Type, Optional, Dict
|
|
6
6
|
from langchain_core.tools import BaseTool
|
|
7
|
-
from pydantic import BaseModel,
|
|
7
|
+
from pydantic import BaseModel, create_model
|
|
8
8
|
from pydantic.fields import FieldInfo
|
|
9
9
|
|
|
10
10
|
logger = logging.getLogger(__name__)
|
|
@@ -190,30 +190,28 @@ class PyodideSandboxTool(BaseTool):
|
|
|
190
190
|
self.session_bytes = result.session_bytes
|
|
191
191
|
self.session_metadata = result.session_metadata
|
|
192
192
|
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
193
|
+
result_dict = {}
|
|
194
|
+
|
|
196
195
|
if result.result is not None:
|
|
197
|
-
|
|
198
|
-
|
|
196
|
+
result_dict["result"] = result.result
|
|
197
|
+
|
|
199
198
|
if result.stdout:
|
|
200
|
-
|
|
201
|
-
|
|
199
|
+
result_dict["output"] = result.stdout
|
|
200
|
+
|
|
202
201
|
if result.stderr:
|
|
203
|
-
|
|
204
|
-
|
|
202
|
+
result_dict["error"] = result.stderr
|
|
203
|
+
|
|
205
204
|
if result.status == 'error':
|
|
206
|
-
|
|
207
|
-
|
|
205
|
+
result_dict["status"] = "Execution failed"
|
|
206
|
+
|
|
208
207
|
execution_info = f"Execution time: {result.execution_time:.2f}s"
|
|
209
208
|
if result.session_metadata and 'packages' in result.session_metadata:
|
|
210
209
|
packages = result.session_metadata.get('packages', [])
|
|
211
210
|
if packages:
|
|
212
211
|
execution_info += f", Packages: {', '.join(packages)}"
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
return "\n".join(output_parts) if output_parts else "Code executed successfully (no output)"
|
|
212
|
+
|
|
213
|
+
result_dict["execution_info"] = execution_info
|
|
214
|
+
return result_dict
|
|
217
215
|
|
|
218
216
|
except Exception as e:
|
|
219
217
|
logger.error(f"Error executing code in sandbox: {e}")
|
|
@@ -177,6 +177,37 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
|
|
|
177
177
|
except Exception as e:
|
|
178
178
|
logger.error(f"Failed to initialize PGVectorSearch: {str(e)}")
|
|
179
179
|
|
|
180
|
+
def _similarity_search_with_score(self, query: str, filter: dict = None, k: int = 10):
|
|
181
|
+
"""
|
|
182
|
+
Perform similarity search with proper exception handling for DataException.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
query: Search query string
|
|
186
|
+
filter: Optional filter dictionary
|
|
187
|
+
k: Number of results to return
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
List of (Document, score) tuples
|
|
191
|
+
|
|
192
|
+
Raises:
|
|
193
|
+
ToolException: When DataException occurs or other search errors
|
|
194
|
+
"""
|
|
195
|
+
try:
|
|
196
|
+
return self.vectorstore.similarity_search_with_score(
|
|
197
|
+
query, filter=filter, k=k
|
|
198
|
+
)
|
|
199
|
+
except DataException as dimException:
|
|
200
|
+
exception_str = str(dimException)
|
|
201
|
+
if 'different vector dimensions' in exception_str:
|
|
202
|
+
logger.error(f"Data exception: {exception_str}")
|
|
203
|
+
raise ToolException(f"Global search cannot be completed since collections were indexed using "
|
|
204
|
+
f"different embedding models. Use search within a single collection."
|
|
205
|
+
f"\nDetails: {exception_str}")
|
|
206
|
+
raise ToolException(f"Data exception during search. Possibly invalid filter: {exception_str}")
|
|
207
|
+
except Exception as e:
|
|
208
|
+
logger.error(f"Error during similarity search: {str(e)}")
|
|
209
|
+
raise ToolException(f"Search failed: {str(e)}")
|
|
210
|
+
|
|
180
211
|
def list_collections(self) -> List[str]:
|
|
181
212
|
"""List all collections in the vectorstore."""
|
|
182
213
|
|
|
@@ -311,7 +342,7 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
|
|
|
311
342
|
}
|
|
312
343
|
|
|
313
344
|
try:
|
|
314
|
-
document_items = self.
|
|
345
|
+
document_items = self._similarity_search_with_score(
|
|
315
346
|
query, filter=document_filter, k=search_top
|
|
316
347
|
)
|
|
317
348
|
# Add document results to unique docs
|
|
@@ -324,15 +355,6 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
|
|
|
324
355
|
if doc_id not in unique_docs or score > chunk_type_scores.get(doc_id, 0):
|
|
325
356
|
unique_docs[doc_id] = doc
|
|
326
357
|
chunk_type_scores[doc_id] = score
|
|
327
|
-
except DataException as dimException:
|
|
328
|
-
exception_str = str(dimException)
|
|
329
|
-
if 'different vector dimensions' in exception_str:
|
|
330
|
-
logger.error(f"Data exception: {exception_str}")
|
|
331
|
-
raise ToolException(f"Global search cannot be completed since collections were indexed using "
|
|
332
|
-
f"different embedding models. Use search within a single collection."
|
|
333
|
-
f"\nDetails: {exception_str}")
|
|
334
|
-
raise ToolException(f"Data exception during search. Possibly invalid filter: {exception_str}")
|
|
335
|
-
|
|
336
358
|
except Exception as e:
|
|
337
359
|
logger.warning(f"Error searching for document chunks: {str(e)}")
|
|
338
360
|
|
|
@@ -353,18 +375,16 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
|
|
|
353
375
|
}
|
|
354
376
|
|
|
355
377
|
try:
|
|
356
|
-
chunk_items = self.
|
|
378
|
+
chunk_items = self._similarity_search_with_score(
|
|
357
379
|
query, filter=chunk_filter, k=search_top
|
|
358
380
|
)
|
|
359
|
-
|
|
360
|
-
logger.debug(f"Chunk items for {chunk_type}: {chunk_items[0]}")
|
|
361
|
-
|
|
381
|
+
|
|
362
382
|
for doc, score in chunk_items:
|
|
363
383
|
# Create unique identifier for document
|
|
364
384
|
source = doc.metadata.get('source')
|
|
365
385
|
chunk_id = doc.metadata.get('chunk_id')
|
|
366
386
|
doc_id = f"{source}_{chunk_id}" if source and chunk_id else str(doc.metadata.get('id', id(doc)))
|
|
367
|
-
|
|
387
|
+
|
|
368
388
|
# Store document and its score
|
|
369
389
|
if doc_id not in unique_docs:
|
|
370
390
|
unique_docs[doc_id] = doc
|
|
@@ -384,9 +404,9 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
|
|
|
384
404
|
doc_filter = {
|
|
385
405
|
"$and": doc_filter_parts
|
|
386
406
|
}
|
|
387
|
-
|
|
407
|
+
|
|
388
408
|
try:
|
|
389
|
-
fetch_items = self.
|
|
409
|
+
fetch_items = self._similarity_search_with_score(
|
|
390
410
|
query, filter=doc_filter, k=1
|
|
391
411
|
)
|
|
392
412
|
if fetch_items:
|
|
@@ -400,7 +420,7 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
|
|
|
400
420
|
else:
|
|
401
421
|
# Default search behavior (unchanged)
|
|
402
422
|
max_search_results = 30 if search_top * 3 > 30 else search_top * 3
|
|
403
|
-
vector_items = self.
|
|
423
|
+
vector_items = self._similarity_search_with_score(
|
|
404
424
|
query, filter=filter, k=max_search_results
|
|
405
425
|
)
|
|
406
426
|
|
|
@@ -418,7 +438,7 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
|
|
|
418
438
|
doc_map = OrderedDict(
|
|
419
439
|
sorted(doc_map.items(), key=lambda x: x[1][1], reverse=True)
|
|
420
440
|
)
|
|
421
|
-
|
|
441
|
+
|
|
422
442
|
# Process full-text search if configured
|
|
423
443
|
if full_text_search and full_text_search.get('enabled') and full_text_search.get('fields'):
|
|
424
444
|
language = full_text_search.get('language', 'english')
|
|
@@ -431,7 +451,7 @@ class VectorStoreWrapperBase(BaseToolApiWrapper):
|
|
|
431
451
|
for field_name in full_text_search.get('fields', []):
|
|
432
452
|
try:
|
|
433
453
|
text_results = self.pg_helper.full_text_search(field_name, query)
|
|
434
|
-
|
|
454
|
+
|
|
435
455
|
# Combine text search results with vector results
|
|
436
456
|
for result in text_results:
|
|
437
457
|
doc_id = result['id']
|
|
@@ -7,8 +7,10 @@ from typing import Any, Optional, List, Dict, Generator
|
|
|
7
7
|
from langchain_core.documents import Document
|
|
8
8
|
from pydantic import create_model, Field, SecretStr
|
|
9
9
|
|
|
10
|
+
from .utils import make_json_serializable
|
|
10
11
|
from .utils.content_parser import file_extension_by_chunker, process_document_by_type
|
|
11
12
|
from .vector_adapters.VectorStoreAdapter import VectorStoreAdapterFactory
|
|
13
|
+
from ..runtime.langchain.document_loaders.constants import loaders_allowed_to_override
|
|
12
14
|
from ..runtime.tools.vectorstore_base import VectorStoreWrapperBase
|
|
13
15
|
from ..runtime.utils.utils import IndexerKeywords
|
|
14
16
|
|
|
@@ -93,7 +95,7 @@ BaseIndexDataParams = create_model(
|
|
|
93
95
|
description="Optional flag to enforce clean existing index before indexing new data")),
|
|
94
96
|
progress_step=(Optional[int], Field(default=10, ge=0, le=100,
|
|
95
97
|
description="Optional step size for progress reporting during indexing")),
|
|
96
|
-
chunking_config=(Optional[dict], Field(description="Chunking tool configuration",
|
|
98
|
+
chunking_config=(Optional[dict], Field(description="Chunking tool configuration", default=loaders_allowed_to_override)),
|
|
97
99
|
)
|
|
98
100
|
|
|
99
101
|
|
|
@@ -97,3 +97,20 @@ def check_connection_response(check_fun):
|
|
|
97
97
|
else:
|
|
98
98
|
return f"Service Unreachable: return code {response.status_code}"
|
|
99
99
|
return _wrapper
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def make_json_serializable(obj):
|
|
103
|
+
if isinstance(obj, BaseModel):
|
|
104
|
+
return obj.model_dump()
|
|
105
|
+
if isinstance(obj, dict):
|
|
106
|
+
return {k: make_json_serializable(v) for k, v in obj.items()}
|
|
107
|
+
if isinstance(obj, list):
|
|
108
|
+
return [make_json_serializable(i) for i in obj]
|
|
109
|
+
if isinstance(obj, bool):
|
|
110
|
+
return bool(obj)
|
|
111
|
+
if isinstance(obj, (str, int, float)) or obj is None:
|
|
112
|
+
return obj
|
|
113
|
+
# Fallback: handle objects that look like booleans but were not caught above
|
|
114
|
+
if str(obj) in ("True", "False"):
|
|
115
|
+
return str(obj) == "True"
|
|
116
|
+
return str(obj)
|
|
@@ -262,18 +262,18 @@ def process_content_by_type(content, filename: str, llm=None, chunking_config=No
|
|
|
262
262
|
loader_kwargs = loader_config['kwargs']
|
|
263
263
|
# Determine which loader configuration keys are allowed to be overridden by user input.
|
|
264
264
|
# If 'allowed_to_override' is specified in the loader configuration, use it; otherwise, allow all keys in loader_kwargs.
|
|
265
|
-
allowed_to_override = loader_config.get('allowed_to_override',
|
|
265
|
+
allowed_to_override = loader_config.get('allowed_to_override', loader_kwargs)
|
|
266
266
|
# If a chunking_config is provided and contains custom configuration for the current file extension,
|
|
267
|
-
# update loader_kwargs with user-supplied values, but only for keys explicitly permitted in allowed_to_override.
|
|
267
|
+
# update loader_kwargs with user-supplied values, but only for keys explicitly permitted in allowed_to_override and if value differs from default.
|
|
268
268
|
# This ensures that only safe and intended parameters can be customized, preventing accidental or unauthorized changes
|
|
269
269
|
# to critical loader settings.
|
|
270
270
|
if chunking_config and (users_config_for_extension := chunking_config.get(extension, {})):
|
|
271
|
-
for key in set(users_config_for_extension.keys()) & set(allowed_to_override):
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
271
|
+
for key in set(users_config_for_extension.keys()) & set(allowed_to_override.keys()):
|
|
272
|
+
if users_config_for_extension[key] != allowed_to_override[key]:
|
|
273
|
+
loader_kwargs[key] = users_config_for_extension[key]
|
|
274
|
+
if LoaderProperties.LLM.value in loader_kwargs and loader_kwargs.pop(LoaderProperties.LLM.value):
|
|
275
|
+
loader_kwargs['llm'] = llm
|
|
276
|
+
if LoaderProperties.PROMPT_DEFAULT.value in loader_kwargs and loader_kwargs.pop(LoaderProperties.PROMPT_DEFAULT.value):
|
|
277
277
|
loader_kwargs[LoaderProperties.PROMPT.value] = image_processing_prompt
|
|
278
278
|
loader = loader_cls(file_path=temp_file_path, **loader_kwargs)
|
|
279
279
|
yield from loader.load()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: alita_sdk
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.374
|
|
4
4
|
Summary: SDK for building langchain agents using resources from Alita
|
|
5
5
|
Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
|
|
6
6
|
License-Expression: Apache-2.0
|
|
@@ -35,16 +35,17 @@ alita_sdk/configurations/zephyr_enterprise.py,sha256=UaBk3qWcT2-bCzko5HEPvgxArw1
|
|
|
35
35
|
alita_sdk/configurations/zephyr_essential.py,sha256=tUIrh-PRNvdrLBj6rJXqlF-h6oaMXUQI1wgit07kFBw,752
|
|
36
36
|
alita_sdk/runtime/__init__.py,sha256=4W0UF-nl3QF2bvET5lnah4o24CoTwSoKXhuN0YnwvEE,828
|
|
37
37
|
alita_sdk/runtime/clients/__init__.py,sha256=BdehU5GBztN1Qi1Wul0cqlU46FxUfMnI6Vq2Zd_oq1M,296
|
|
38
|
-
alita_sdk/runtime/clients/artifact.py,sha256=
|
|
38
|
+
alita_sdk/runtime/clients/artifact.py,sha256=b7hVuGRROt6qUcT11uAZqzJqslzmlgW-Y6oGsiwNmjI,4029
|
|
39
39
|
alita_sdk/runtime/clients/client.py,sha256=BIF6QSnhlTfsTQ_dQs-QZjeBJHZsOtSuv_q7_ABUUQg,45737
|
|
40
40
|
alita_sdk/runtime/clients/datasource.py,sha256=HAZovoQN9jBg0_-lIlGBQzb4FJdczPhkHehAiVG3Wx0,1020
|
|
41
41
|
alita_sdk/runtime/clients/prompt.py,sha256=li1RG9eBwgNK_Qf0qUaZ8QNTmsncFrAL2pv3kbxZRZg,1447
|
|
42
|
+
alita_sdk/runtime/clients/sandbox_client.py,sha256=OhEasE0MxBBDw4o76xkxVCpNpr3xJ8spQsrsVxMrjUA,16192
|
|
42
43
|
alita_sdk/runtime/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
43
|
-
alita_sdk/runtime/langchain/assistant.py,sha256=
|
|
44
|
+
alita_sdk/runtime/langchain/assistant.py,sha256=YsxYNoaEidV02VlPwccdHP7PKeRRPp9M3tvUiYIDQ-I,15514
|
|
44
45
|
alita_sdk/runtime/langchain/chat_message_template.py,sha256=kPz8W2BG6IMyITFDA5oeb5BxVRkHEVZhuiGl4MBZKdc,2176
|
|
45
46
|
alita_sdk/runtime/langchain/constants.py,sha256=eHVJ_beJNTf1WJo4yq7KMK64fxsRvs3lKc34QCXSbpk,3319
|
|
46
47
|
alita_sdk/runtime/langchain/indexer.py,sha256=0ENHy5EOhThnAiYFc7QAsaTNp9rr8hDV_hTK8ahbatk,37592
|
|
47
|
-
alita_sdk/runtime/langchain/langraph_agent.py,sha256=
|
|
48
|
+
alita_sdk/runtime/langchain/langraph_agent.py,sha256=R4h_m_7NUgays7lt-F9WvKEOnGr1Yz7OgrmLMiGxurQ,48530
|
|
48
49
|
alita_sdk/runtime/langchain/mixedAgentParser.py,sha256=M256lvtsL3YtYflBCEp-rWKrKtcY1dJIyRGVv7KW9ME,2611
|
|
49
50
|
alita_sdk/runtime/langchain/mixedAgentRenderes.py,sha256=asBtKqm88QhZRILditjYICwFVKF5KfO38hu2O-WrSWE,5964
|
|
50
51
|
alita_sdk/runtime/langchain/store_manager.py,sha256=i8Fl11IXJhrBXq1F1ukEVln57B1IBe-tqSUvfUmBV4A,2218
|
|
@@ -70,7 +71,7 @@ alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py,sha256=EO1nJDRP
|
|
|
70
71
|
alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py,sha256=EiCIAF_OxSrbuwgOFk2IpxRMvFbctITt2jAI0g_atpk,3586
|
|
71
72
|
alita_sdk/runtime/langchain/document_loaders/ImageParser.py,sha256=RQ4zGdSw42ec8c6Eb48uFadayWuiT4FbwhGVwhSw60s,1065
|
|
72
73
|
alita_sdk/runtime/langchain/document_loaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
73
|
-
alita_sdk/runtime/langchain/document_loaders/constants.py,sha256=
|
|
74
|
+
alita_sdk/runtime/langchain/document_loaders/constants.py,sha256=L8sgQsD5LH8kMSWEnGkc00j5FsC2VZQcUV95jpnbQlQ,9007
|
|
74
75
|
alita_sdk/runtime/langchain/document_loaders/utils.py,sha256=9xghESf3axBbwxATyVuS0Yu-TWe8zWZnXgCD1ZVyNW0,2414
|
|
75
76
|
alita_sdk/runtime/langchain/interfaces/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
76
77
|
alita_sdk/runtime/langchain/interfaces/kwextractor.py,sha256=kSJA9L8g8UArmHu7Bd9dIO0Rrq86JPUb8RYNlnN68FQ,3072
|
|
@@ -109,7 +110,7 @@ alita_sdk/runtime/tools/application.py,sha256=z3vLZODs-_xEEnZFmGF0fKz1j3VtNJxqsA
|
|
|
109
110
|
alita_sdk/runtime/tools/artifact.py,sha256=u3szFwZqguHrPZ3tZJ7S_TiZl7cxlT3oHYd6zbdpRDE,13842
|
|
110
111
|
alita_sdk/runtime/tools/datasource.py,sha256=pvbaSfI-ThQQnjHG-QhYNSTYRnZB0rYtZFpjCfpzxYI,2443
|
|
111
112
|
alita_sdk/runtime/tools/echo.py,sha256=spw9eCweXzixJqHnZofHE1yWiSUa04L4VKycf3KCEaM,486
|
|
112
|
-
alita_sdk/runtime/tools/function.py,sha256=
|
|
113
|
+
alita_sdk/runtime/tools/function.py,sha256=0JL9D9NP31uzZ1G5br4Dhfop77l1wiqjx-7L8PHK4PA,6349
|
|
113
114
|
alita_sdk/runtime/tools/graph.py,sha256=MbnZYqdmvZY7SGDp43lOVVIjUt5ARHSgj43mdtBjSjQ,3092
|
|
114
115
|
alita_sdk/runtime/tools/image_generation.py,sha256=8ZH4SoRrbS4EzmtF6cpNMRvuFephCYD2S8uqNC9KGE4,4274
|
|
115
116
|
alita_sdk/runtime/tools/indexer_tool.py,sha256=whSLPevB4WD6dhh2JDXEivDmTvbjiMV1MrPl9cz5eLA,4375
|
|
@@ -120,10 +121,10 @@ alita_sdk/runtime/tools/mcp_server_tool.py,sha256=MhLxZJ44LYrB_0GrojmkyqKoDRaqIH
|
|
|
120
121
|
alita_sdk/runtime/tools/pgvector_search.py,sha256=NN2BGAnq4SsDHIhUcFZ8d_dbEOM8QwB0UwpsWCYruXU,11692
|
|
121
122
|
alita_sdk/runtime/tools/prompt.py,sha256=nJafb_e5aOM1Rr3qGFCR-SKziU9uCsiP2okIMs9PppM,741
|
|
122
123
|
alita_sdk/runtime/tools/router.py,sha256=p7e0tX6YAWw2M2Nq0A_xqw1E2P-Xz1DaJvhUstfoZn4,1584
|
|
123
|
-
alita_sdk/runtime/tools/sandbox.py,sha256=
|
|
124
|
+
alita_sdk/runtime/tools/sandbox.py,sha256=0OjCNsDVO1N0cFNEFVr6GVICSaqGWesUzF6LcYg-Hn0,11349
|
|
124
125
|
alita_sdk/runtime/tools/tool.py,sha256=lE1hGi6qOAXG7qxtqxarD_XMQqTghdywf261DZawwno,5631
|
|
125
126
|
alita_sdk/runtime/tools/vectorstore.py,sha256=8vRhi1lGFEs3unvnflEi2p59U2MfV32lStpEizpDms0,34467
|
|
126
|
-
alita_sdk/runtime/tools/vectorstore_base.py,sha256=
|
|
127
|
+
alita_sdk/runtime/tools/vectorstore_base.py,sha256=wixvgLrC2tQOeIjFMCD-7869K7YfERzk2Tzmo-fgsTE,28350
|
|
127
128
|
alita_sdk/runtime/utils/AlitaCallback.py,sha256=E4LlSBuCHWiUq6W7IZExERHZY0qcmdjzc_rJlF2iQIw,7356
|
|
128
129
|
alita_sdk/runtime/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
129
130
|
alita_sdk/runtime/utils/constants.py,sha256=Xntx1b_uxUzT4clwqHA_U6K8y5bBqf_4lSQwXdcWrp4,13586
|
|
@@ -135,7 +136,7 @@ alita_sdk/runtime/utils/toolkit_runtime.py,sha256=MU63Fpxj0b5_r1IUUc0Q3-PN9VwL7r
|
|
|
135
136
|
alita_sdk/runtime/utils/toolkit_utils.py,sha256=I9QFqnaqfVgN26LUr6s3XlBlG6y0CoHURnCzG7XcwVs,5311
|
|
136
137
|
alita_sdk/runtime/utils/utils.py,sha256=BVEVLkYiiotcUD0XsHyx-wACpHfALsQg7PLZpObqvK8,1008
|
|
137
138
|
alita_sdk/tools/__init__.py,sha256=jUj1ztC2FbkIUB-YYmiqaz_rqW7Il5kWzDPn1mJmj5w,10545
|
|
138
|
-
alita_sdk/tools/base_indexer_toolkit.py,sha256=
|
|
139
|
+
alita_sdk/tools/base_indexer_toolkit.py,sha256=R8PQ1FZijtCT6LPuma68B1X6x0umH7gyROKwtp0xabw,27044
|
|
139
140
|
alita_sdk/tools/code_indexer_toolkit.py,sha256=6QvI1by0OFdnKTx5TfNoDJjnMrvnTi9T56xaDxzeleU,7306
|
|
140
141
|
alita_sdk/tools/elitea_base.py,sha256=up3HshASSDfjlHV_HPrs1aD4JIwwX0Ug26WGTzgIYvY,34724
|
|
141
142
|
alita_sdk/tools/non_code_indexer_toolkit.py,sha256=B3QvhpT1F9QidkCcsOi3J_QrTOaNlTxqWFwe90VivQQ,1329
|
|
@@ -328,9 +329,9 @@ alita_sdk/tools/testio/__init__.py,sha256=NEvQtzsffqAXryaffVk0GpdcxZQ1AMkfeztnxH
|
|
|
328
329
|
alita_sdk/tools/testio/api_wrapper.py,sha256=BvmL5h634BzG6p7ajnQLmj-uoAw1gjWnd4FHHu1h--Q,21638
|
|
329
330
|
alita_sdk/tools/testrail/__init__.py,sha256=Xg4nVjULL_D8JpIXLYXppnwUfGF4-lguFwKHmP5VwxM,4696
|
|
330
331
|
alita_sdk/tools/testrail/api_wrapper.py,sha256=tQcGlFJmftvs5ZiO4tsP19fCo4CrJeq_UEvQR1liVfE,39891
|
|
331
|
-
alita_sdk/tools/utils/__init__.py,sha256=
|
|
332
|
+
alita_sdk/tools/utils/__init__.py,sha256=xB9OQgW65DftadrSpoAAitnEIbIXZKBOCji0NDe7FRM,3923
|
|
332
333
|
alita_sdk/tools/utils/available_tools_decorator.py,sha256=IbrdfeQkswxUFgvvN7-dyLMZMyXLiwvX7kgi3phciCk,273
|
|
333
|
-
alita_sdk/tools/utils/content_parser.py,sha256=
|
|
334
|
+
alita_sdk/tools/utils/content_parser.py,sha256=7ohj8HeL_-rmc-Fv0TS8IpxIQC8tOpfuhyT3XlWx-gQ,15368
|
|
334
335
|
alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256=p_9Cu5eausnfiKNsitbVxwu5eimZHRv3R-OMw7lBrts,19173
|
|
335
336
|
alita_sdk/tools/vector_adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
336
337
|
alita_sdk/tools/xray/__init__.py,sha256=eOMWP8VamFbbJgt1xrGpGPqB9ByOTA0Cd3LCaETzGk4,4376
|
|
@@ -352,8 +353,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=kT0TbmMvuKhDUZc0i7KO18O38JM9S
|
|
|
352
353
|
alita_sdk/tools/zephyr_squad/__init__.py,sha256=0ne8XLJEQSLOWfzd2HdnqOYmQlUliKHbBED5kW_Vias,2895
|
|
353
354
|
alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
|
|
354
355
|
alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
|
|
355
|
-
alita_sdk-0.3.
|
|
356
|
-
alita_sdk-0.3.
|
|
357
|
-
alita_sdk-0.3.
|
|
358
|
-
alita_sdk-0.3.
|
|
359
|
-
alita_sdk-0.3.
|
|
356
|
+
alita_sdk-0.3.374.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
357
|
+
alita_sdk-0.3.374.dist-info/METADATA,sha256=b-L7XNDZ_LNpW-hoB_pDqOchYCdw9fOUStiXnQfSxUM,19071
|
|
358
|
+
alita_sdk-0.3.374.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
359
|
+
alita_sdk-0.3.374.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
|
|
360
|
+
alita_sdk-0.3.374.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|