alita-sdk 0.3.163__py3-none-any.whl → 0.3.165__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -61,8 +61,8 @@ class Assistant:
61
61
  "Review toolkits configuration or use pipeline as master agent.")
62
62
 
63
63
  # configure memory store if memory tool is defined
64
- # memory_tool = next((tool for tool in data['tools'] if tool['type'] == 'memory'), None)
65
- # self._configure_store(memory_tool)
64
+ memory_tool = next((tool for tool in data['tools'] if tool['type'] == 'memory'), None)
65
+ self._configure_store(memory_tool)
66
66
 
67
67
  # Lazy import to avoid circular dependency
68
68
  from ..toolkits.tools import get_tools
@@ -1,6 +1,8 @@
1
1
  import threading
2
2
  import atexit
3
3
  import logging
4
+ from urllib.parse import urlparse, unquote
5
+
4
6
  from psycopg import Connection
5
7
  from langgraph.store.postgres import PostgresStore
6
8
 
@@ -18,11 +20,30 @@ class StoreManager:
18
20
  cls._instance._stores = {}
19
21
  return cls._instance
20
22
 
23
+ def _parse_connection_string(self, conn_str: str) -> dict:
24
+ """
25
+ Parse the connection string from SQLAlchemy style to args dict.
26
+ """
27
+ if conn_str.startswith("postgresql+psycopg://"):
28
+ url = conn_str[len("postgresql+psycopg://"):]
29
+
30
+ parsed = urlparse(f"//{url}")
31
+
32
+ return {
33
+ "user": unquote(parsed.username) if parsed.username else None,
34
+ "password": unquote(parsed.password) if parsed.password else None,
35
+ "host": parsed.hostname,
36
+ "port": parsed.port,
37
+ "dbname": parsed.path.lstrip("/") if parsed.path else None
38
+ }
39
+
21
40
  def get_store(self, conn_str: str) -> PostgresStore:
22
41
  store = self._stores.get(conn_str)
23
42
  if store is None:
24
43
  logger.info(f"Creating new PostgresStore for connection: {conn_str}")
25
- conn = Connection.connect(conn_str, autocommit=True, prepare_threshold=0)
44
+ conn_params = self._parse_connection_string(conn_str)
45
+ conn_params.update({'autocommit': True, 'prepare_threshold': 0})
46
+ conn = Connection.connect(**conn_params)
26
47
  store = PostgresStore(conn)
27
48
  store.setup()
28
49
  self._stores[conn_str] = store
@@ -95,7 +95,7 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
95
95
  # Add community tools
96
96
  tools += community_tools(tools_list, alita_client, llm)
97
97
  # Add alita tools
98
- tools += alita_tools(tools_list, alita_client, llm)
98
+ tools += alita_tools(tools_list, alita_client, llm, memory_store)
99
99
  # Add MCP tools
100
100
  tools += _mcp_tools(tools_list, alita_client)
101
101
 
@@ -1,5 +1,8 @@
1
1
  import logging
2
2
  from importlib import import_module
3
+ from typing import Optional
4
+
5
+ from langgraph.store.base import BaseStore
3
6
 
4
7
  logger = logging.getLogger(__name__)
5
8
 
@@ -74,6 +77,7 @@ _safe_import_tool('carrier', 'carrier', 'get_tools', 'AlitaCarrierToolkit')
74
77
  _safe_import_tool('ocr', 'ocr', 'get_tools', 'OCRToolkit')
75
78
  _safe_import_tool('pptx', 'pptx', 'get_tools', 'PPTXToolkit')
76
79
  _safe_import_tool('postman', 'postman', 'get_tools', 'PostmanToolkit')
80
+ _safe_import_tool('memory', 'memory', 'get_tools', 'MemoryToolkit')
77
81
  _safe_import_tool('zephyr_squad', 'zephyr_squad', 'get_tools', 'ZephyrSquadToolkit')
78
82
 
79
83
  # Log import summary
@@ -81,7 +85,7 @@ available_count = len(AVAILABLE_TOOLS)
81
85
  total_attempted = len(AVAILABLE_TOOLS) + len(FAILED_IMPORTS)
82
86
  logger.info(f"Tool imports completed: {available_count}/{total_attempted} successful")
83
87
 
84
- def get_tools(tools_list, alita, llm, *args, **kwargs):
88
+ def get_tools(tools_list, alita, llm, store: Optional[BaseStore] = None, *args, **kwargs):
85
89
  tools = []
86
90
  for tool in tools_list:
87
91
  # validate tool name syntax - it cannot be started with _
@@ -91,6 +95,7 @@ def get_tools(tools_list, alita, llm, *args, **kwargs):
91
95
 
92
96
  tool['settings']['alita'] = alita
93
97
  tool['settings']['llm'] = llm
98
+ tool['settings']['store'] = store
94
99
  tool_type = tool['type']
95
100
 
96
101
  # Check if tool is available and has get_tools function
@@ -80,8 +80,8 @@ class CarrierAPIWrapper(BaseModel):
80
80
  def get_report_file_log(self, bucket: str, file_name: str):
81
81
  return self._client.get_report_file_log(bucket, file_name)
82
82
 
83
- def upload_excel_report(self, bucket_name: str, excel_report_name: str):
84
- return self._client.upload_excel_report(bucket_name, excel_report_name)
83
+ def upload_file(self, bucket_name: str, file_name: str):
84
+ return self._client.upload_file(bucket_name, file_name)
85
85
 
86
86
  def get_ui_reports_list(self) -> List[Dict[str, Any]]:
87
87
  """Get list of UI test reports from the Carrier platform."""
@@ -1,6 +1,8 @@
1
1
  import logging
2
2
  from datetime import datetime
3
3
  import json
4
+ import zipfile
5
+ from itertools import islice
4
6
  import traceback
5
7
  from typing import Type
6
8
  from langchain_core.tools import BaseTool, ToolException
@@ -78,14 +80,31 @@ class GetReportByIDTool(BaseTool):
78
80
 
79
81
  def _run(self, report_id: str):
80
82
  try:
81
- reports = self.api_wrapper.get_reports_list()
82
- report_data = {}
83
- for report in reports:
84
- if report_id == str(report["id"]):
85
- report_data = report
86
- break
87
-
88
- return json.dumps(report_data)
83
+ report, test_log_file_path, errors_log_file_path = self.api_wrapper.get_report_file_name(report_id)
84
+ try:
85
+ with open(errors_log_file_path, mode='r') as f:
86
+ # Use islice to read up to 100 lines
87
+ errors = list(islice(f, 100))
88
+ report["errors_log"] = errors
89
+ # Archive with errors log file path
90
+ zip_file_path = f'/tmp/{report["build_id"]}_error_log_archive.zip'
91
+
92
+ # Create zip archive
93
+ with zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
94
+ arcname = os.path.basename(errors_log_file_path)
95
+ zipf.write(errors_log_file_path, arcname)
96
+
97
+ bucket_name = report["name"].replace("_", "").replace(" ", "").lower()
98
+ self.api_wrapper.upload_file(bucket_name, zip_file_path)
99
+ report["link_to_errors_file"] = f"{self.api_wrapper.url.rstrip('/')}/api/v1/artifacts/artifact/default/" \
100
+ f"{self.api_wrapper.project_id}/{bucket_name}/" \
101
+ f"{zip_file_path.replace('/tmp/', '')}"
102
+ except Exception as e:
103
+ logger.error(e)
104
+ report["errors_log"] = []
105
+ report["link_to_errors_file"] = "link is not available"
106
+
107
+ return json.dumps(report)
89
108
  except Exception:
90
109
  stacktrace = traceback.format_exc()
91
110
  logger.error(f"Error downloading reports: {stacktrace}")
@@ -164,17 +183,17 @@ class CreateExcelReportTool(BaseTool):
164
183
 
165
184
  def _process_report_by_id(self, report_id, parameters):
166
185
  """Process report using report ID."""
167
- report, file_path = self.api_wrapper.get_report_file_name(report_id)
186
+ report, test_log_file_path, errors_log_file_path = self.api_wrapper.get_report_file_name(report_id)
168
187
  carrier_report = f"{self.api_wrapper.url.rstrip('/')}/-/performance/backend/results?result_id={report_id}"
169
188
  lg_type = report.get("lg_type")
170
189
  excel_report_file_name = f'/tmp/reports_test_results_{report["build_id"]}_excel_report.xlsx'
171
190
  bucket_name = report["name"].replace("_", "").replace(" ", "").lower()
172
191
 
173
- result_stats_j = self._parse_report(file_path, lg_type, parameters["think_time"], is_absolute_file_path=True)
192
+ result_stats_j = self._parse_report(test_log_file_path, lg_type, parameters["think_time"], is_absolute_file_path=True)
174
193
  calc_thr_j = self._calculate_thresholds(result_stats_j, parameters)
175
194
 
176
195
  return self._generate_and_upload_report(
177
- result_stats_j, carrier_report, calc_thr_j, parameters, excel_report_file_name, bucket_name, file_path
196
+ result_stats_j, carrier_report, calc_thr_j, parameters, excel_report_file_name, bucket_name, test_log_file_path
178
197
  )
179
198
 
180
199
  def _process_report_by_file(self, bucket, file_name, parameters):
@@ -233,7 +252,7 @@ class CreateExcelReportTool(BaseTool):
233
252
  excel_reporter_object.prepare_headers_and_titles()
234
253
  excel_reporter_object.write_to_excel(result_stats_j, carrier_report, calc_thr_j, parameters["pct"])
235
254
 
236
- self.api_wrapper.upload_excel_report(bucket_name, excel_report_file_name)
255
+ self.api_wrapper.upload_file(bucket_name, excel_report_file_name)
237
256
 
238
257
  # Clean up
239
258
  self._cleanup(file_path, excel_report_file_name)
@@ -80,22 +80,28 @@ class RunTestByIDTool(BaseTool):
80
80
  description: str = "Execute test plan from the Carrier platform."
81
81
  args_schema: Type[BaseModel] = create_model(
82
82
  "RunTestByIdInput",
83
- test_id=(str, Field(default="", description="Test id to execute")),
83
+ test_id=(str, Field(default=None, description="Test id to execute")),
84
+ name=(str, Field(default=None, description="Test name to execute")),
84
85
  test_parameters=(dict, Field(default=None, description="Test parameters to override")),
85
86
  )
86
87
 
87
- def _run(self, test_id: str, test_parameters=None):
88
+ def _run(self, test_id=None, name=None, test_parameters=None):
88
89
  try:
90
+ if not test_id and not name:
91
+ return {"message": "Please provide test id or test name to start"}
92
+
89
93
  # Fetch test data
90
94
  tests = self.api_wrapper.get_tests_list()
91
- test_data = {}
92
- for test in tests:
93
- if test_id == str(test["id"]):
94
- test_data = test
95
- break
95
+
96
+ # Find the test data based on test_id or name
97
+ test_data = next(
98
+ (test for test in tests if
99
+ (test_id and str(test["id"]) == test_id) or (name and str(test["name"]) == name)),
100
+ None
101
+ )
96
102
 
97
103
  if not test_data:
98
- raise ValueError(f"Test with id {test_id} not found.")
104
+ raise ValueError(f"Test with id {test_id} or name {name} not found.")
99
105
 
100
106
  # Default test parameters
101
107
  default_test_parameters = test_data.get("test_parameters", [])
@@ -128,15 +128,17 @@ class CarrierClient(BaseModel):
128
128
  for file_name in file_list:
129
129
  if file_name.startswith(report_archive_prefix) and "excel_report" not in file_name:
130
130
  report_files_list.append(file_name)
131
- file_path = self.download_and_merge_reports(report_files_list, lg_type, bucket_name, extract_to)
131
+ test_log_file_path, errors_log_file_path = self.download_and_merge_reports(report_files_list, lg_type, bucket_name, extract_to)
132
132
 
133
- return report_info, file_path
133
+ return report_info, test_log_file_path, errors_log_file_path
134
134
 
135
- def download_and_merge_reports(self, report_files_list: list, lg_type: str, bucket: str, extract_to: str = "/tmp") -> str:
135
+ def download_and_merge_reports(self, report_files_list: list, lg_type: str, bucket: str, extract_to: str = "/tmp"):
136
136
  if lg_type == "jmeter":
137
137
  summary_log_file_path = f"summary_{bucket}_jmeter.jtl"
138
+ error_log_file_path = f"error_{bucket}_jmeter.log"
138
139
  else:
139
140
  summary_log_file_path = f"summary_{bucket}_simulation.log"
141
+ error_log_file_path = f"error_{bucket}_simulation.log"
140
142
  extracted_reports = []
141
143
  for each in report_files_list:
142
144
  endpoint = f"api/v1/artifacts/artifact/{self.credentials.project_id}/{bucket}/{each}"
@@ -158,10 +160,21 @@ class CarrierClient(BaseModel):
158
160
  os.remove(local_file_path)
159
161
  extracted_reports.append(extract_dir)
160
162
 
161
- # get files from extract_dirs and merge to summary_log_file_path
163
+ # get files from extract_dirs and merge to summary_log_file_path and error_log_file_path
162
164
  self.merge_log_files(summary_log_file_path, extracted_reports, lg_type)
165
+ try:
166
+ self.merge_error_files(error_log_file_path, extracted_reports)
167
+ except Exception as e:
168
+ logger.error(f"Failed to merge errors log: {e}")
169
+
170
+ # Clean up
171
+ for each in extracted_reports:
172
+ try:
173
+ shutil.rmtree(each)
174
+ except Exception as e:
175
+ logger.error(e)
163
176
 
164
- return summary_log_file_path
177
+ return summary_log_file_path, error_log_file_path
165
178
 
166
179
  def merge_log_files(self, summary_file, extracted_reports, lg_type):
167
180
  with open(summary_file, mode='w') as summary:
@@ -178,11 +191,14 @@ class CarrierClient(BaseModel):
178
191
  else:
179
192
  # Skip the first line (header) for subsequent files
180
193
  summary.writelines(lines[1:])
181
- for each in extracted_reports:
182
- try:
183
- shutil.rmtree(each)
184
- except Exception as e:
185
- logger.error(e)
194
+
195
+ def merge_error_files(self, error_file, extracted_reports):
196
+ with open(error_file, mode='w') as summary_errors:
197
+ for i, log_file in enumerate(extracted_reports):
198
+ report_file = f"{log_file}/simulation-errors.log"
199
+ with open(report_file, mode='r') as f:
200
+ lines = f.readlines()
201
+ summary_errors.writelines(lines)
186
202
 
187
203
  def get_report_file_log(self, bucket: str, file_name: str):
188
204
  bucket_endpoint = f"api/v1/artifacts/artifact/default/{self.credentials.project_id}/{bucket}/{file_name}"
@@ -195,10 +211,10 @@ class CarrierClient(BaseModel):
195
211
  f.write(response.content)
196
212
  return file_path
197
213
 
198
- def upload_excel_report(self, bucket_name: str, excel_report_name: str):
214
+ def upload_file(self, bucket_name: str, file_name: str):
199
215
  upload_url = f'api/v1/artifacts/artifacts/{self.credentials.project_id}/{bucket_name}'
200
216
  full_url = f"{self.credentials.url.rstrip('/')}/{upload_url.lstrip('/')}"
201
- files = {'file': open(excel_report_name, 'rb')}
217
+ files = {'file': open(file_name, 'rb')}
202
218
  headers = {'Authorization': f'bearer {self.credentials.token}'}
203
219
  s3_config = {'integration_id': 1, 'is_local': False}
204
220
  requests.post(full_url, params=s3_config, allow_redirects=True, files=files, headers=headers)
@@ -231,22 +247,22 @@ class CarrierClient(BaseModel):
231
247
  def create_ui_test(self, json_body: Dict[str, Any]) -> Dict[str, Any]:
232
248
  """Create a new UI test."""
233
249
  endpoint = f"api/v1/ui_performance/tests/{self.credentials.project_id}"
234
-
250
+
235
251
  # Print full JSON POST body for debugging
236
252
  print("=" * 60)
237
253
  print("DEBUG: Full JSON POST body for create_ui_test:")
238
254
  print("=" * 60)
239
255
  print(json.dumps(json_body, indent=2))
240
256
  print("=" * 60)
241
-
257
+
242
258
  # Use multipart/form-data with data field containing the JSON body
243
259
  form_data = {'data': json.dumps(json_body)}
244
-
260
+
245
261
  # Temporarily remove Content-Type header to let requests set it for multipart
246
262
  original_headers = self.session.headers.copy()
247
263
  if 'Content-Type' in self.session.headers:
248
264
  del self.session.headers['Content-Type']
249
-
265
+
250
266
  try:
251
267
  full_url = f"{self.credentials.url.rstrip('/')}/{endpoint.lstrip('/')}"
252
268
  response = self.session.post(full_url, data=form_data)
@@ -265,7 +281,7 @@ class CarrierClient(BaseModel):
265
281
  def cancel_ui_test(self, test_id: str) -> Dict[str, Any]:
266
282
  """Cancel a UI test by setting its status to Canceled."""
267
283
  endpoint = f"api/v1/ui_performance/report_status/{self.credentials.project_id}/{test_id}"
268
-
284
+
269
285
  cancel_body = {
270
286
  "test_status": {
271
287
  "status": "Canceled",
@@ -273,5 +289,5 @@ class CarrierClient(BaseModel):
273
289
  "description": "Test was canceled"
274
290
  }
275
291
  }
276
-
292
+
277
293
  return self.request('put', endpoint, json=cancel_body)
@@ -148,7 +148,7 @@ class CreateUIExcelReportTool(BaseTool):
148
148
  bucket_name = report_name.replace("_", "").replace(" ", "").lower()
149
149
  excel_file_basename = os.path.basename(excel_file_name)
150
150
 
151
- self.api_wrapper.upload_excel_report(bucket_name, excel_file_name)
151
+ self.api_wrapper.upload_file(bucket_name, excel_file_name)
152
152
 
153
153
  # Clean up temporary file
154
154
  if os.path.exists(excel_file_name):
@@ -491,8 +491,16 @@ class GitHubClient(BaseModel):
491
491
  """
492
492
  try:
493
493
  patch_content = self.alita.download_artifact(bucket_name, file_name)
494
- if not patch_content or not isinstance(patch_content, str):
494
+
495
+ if not patch_content:
495
496
  return {"error": "Patch file not found", "message": f"Patch file '{file_name}' not found in bucket '{bucket_name}'."}
497
+
498
+ # Convert bytes to string if necessary
499
+ if isinstance(patch_content, bytes):
500
+ patch_content = patch_content.decode('utf-8')
501
+ elif not isinstance(patch_content, str):
502
+ return {"error": "Invalid patch content", "message": f"Patch file '{file_name}' contains invalid content type."}
503
+
496
504
  # Apply the git patch using the content
497
505
  return self.apply_git_patch(patch_content, commit_message, repo_name)
498
506
  except Exception as e:
@@ -15,6 +15,13 @@ from pydantic import create_model, BaseModel, ConfigDict, Field, SecretStr
15
15
 
16
16
  name = "memory"
17
17
 
18
+ def get_tools(tool):
19
+ return MemoryToolkit().get_toolkit(
20
+ namespace=tool['settings'].get('namespace', str(tool['id'])),
21
+ store=tool['settings'].get('store', None),
22
+ toolkit_name=tool.get('toolkit_name', '')
23
+ ).get_tools()
24
+
18
25
  class MemoryToolkit(BaseToolkit):
19
26
  tools: List[BaseTool] = []
20
27
 
@@ -1,4 +1,5 @@
1
1
  from typing import List, Literal, Optional, Type
2
+ import json
2
3
 
3
4
  import requests
4
5
  from langchain_core.tools import BaseToolkit, BaseTool
@@ -25,6 +26,8 @@ class PostmanAction(BaseAction):
25
26
  return v.replace(' ', '')
26
27
 
27
28
  def get_tools(tool):
29
+ # Parse environment_config if it's a string (from UI)
30
+ environment_config = tool['settings'].get('environment_config', {})
28
31
  toolkit = PostmanToolkit.get_toolkit(
29
32
  selected_tools=tool['settings'].get('selected_tools', []),
30
33
  api_key=tool['settings'].get('api_key', None),
@@ -32,6 +35,7 @@ def get_tools(tool):
32
35
  'base_url', 'https://api.getpostman.com'),
33
36
  collection_id=tool['settings'].get('collection_id', None),
34
37
  workspace_id=tool['settings'].get('workspace_id', None),
38
+ environment_config=environment_config,
35
39
  toolkit_name=tool.get('toolkit_name')
36
40
  )
37
41
  return toolkit.tools
@@ -57,6 +61,9 @@ class PostmanToolkit(BaseToolkit):
57
61
  'toolkit_name': True, 'max_toolkit_length': PostmanToolkit.toolkit_max_length})),
58
62
  workspace_id=(str, Field(description="Default workspace ID",
59
63
  default="", json_schema_extra={'configuration': True})),
64
+ environment_config=(dict, Field(
65
+ description="JSON configuration for request execution (auth headers, project IDs, base URLs, etc.)",
66
+ default={})),
60
67
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(
61
68
  default=[], json_schema_extra={'args_schemas': selected_tools})),
62
69
  __config__=ConfigDict(json_schema_extra={'metadata': {
@@ -256,6 +256,12 @@ PostmanGetRequestScript = create_model(
256
256
  script_type=(str, Field(description="The type of script to retrieve: 'test' or 'prerequest'", default="prerequest"))
257
257
  )
258
258
 
259
+ PostmanExecuteRequest = create_model(
260
+ "PostmanExecuteRequest",
261
+ request_path=(str, Field(description="The path to the request in the collection (e.g., 'API/Users/Get User')")),
262
+ override_variables=(Optional[Dict[str, Any]], Field(description="Optional variables to override environment/collection variables", default=None))
263
+ )
264
+
259
265
 
260
266
  class PostmanApiWrapper(BaseToolApiWrapper):
261
267
  """Wrapper for Postman API."""
@@ -264,6 +270,7 @@ class PostmanApiWrapper(BaseToolApiWrapper):
264
270
  base_url: str = "https://api.getpostman.com"
265
271
  collection_id: Optional[str] = None
266
272
  workspace_id: Optional[str] = None
273
+ environment_config: dict = {}
267
274
  timeout: int = 30
268
275
  session: Any = None
269
276
  analyzer: PostmanAnalyzer = None
@@ -318,6 +325,108 @@ class PostmanApiWrapper(BaseToolApiWrapper):
318
325
  raise ToolException(
319
326
  f"Invalid JSON response from Postman API: {str(e)}")
320
327
 
328
+ def _apply_authentication(self, headers, params, all_variables, resolve_variables):
329
+ """Apply authentication based on environment_config auth settings.
330
+
331
+ Supports multiple authentication types:
332
+ - bearer: Bearer token in Authorization header
333
+ - basic: Basic authentication in Authorization header
334
+ - api_key: API key in header, query parameter, or cookie
335
+ - oauth2: OAuth2 access token in Authorization header
336
+ - custom: Custom headers, cookies, or query parameters
337
+
338
+ Required format:
339
+ environment_config = {
340
+ "auth": {
341
+ "type": "bearer|basic|api_key|oauth2|custom",
342
+ "params": {
343
+ # type-specific parameters
344
+ }
345
+ }
346
+ }
347
+ """
348
+ import base64
349
+
350
+ # Handle structured auth configuration only - no backward compatibility
351
+ auth_config = self.environment_config.get('auth')
352
+ if auth_config and isinstance(auth_config, dict):
353
+ auth_type = auth_config.get('type', '').lower()
354
+ auth_params = auth_config.get('params', {})
355
+
356
+ if auth_type == 'bearer':
357
+ # Bearer token authentication
358
+ token = resolve_variables(str(auth_params.get('token', '')))
359
+ if token:
360
+ headers['Authorization'] = f'Bearer {token}'
361
+
362
+ elif auth_type == 'basic':
363
+ # Basic authentication
364
+ username = resolve_variables(str(auth_params.get('username', '')))
365
+ password = resolve_variables(str(auth_params.get('password', '')))
366
+ if username and password:
367
+ credentials = base64.b64encode(f"{username}:{password}".encode()).decode()
368
+ headers['Authorization'] = f'Basic {credentials}'
369
+
370
+ elif auth_type == 'api_key':
371
+ # API key authentication
372
+ key_name = resolve_variables(str(auth_params.get('key', '')))
373
+ key_value = resolve_variables(str(auth_params.get('value', '')))
374
+ key_location = auth_params.get('in', 'header').lower()
375
+
376
+ if key_name and key_value:
377
+ if key_location == 'header':
378
+ headers[key_name] = key_value
379
+ elif key_location == 'query':
380
+ params[key_name] = key_value
381
+ elif key_location == 'cookie':
382
+ # Add to Cookie header
383
+ existing_cookies = headers.get('Cookie', '')
384
+ new_cookie = f"{key_name}={key_value}"
385
+ if existing_cookies:
386
+ headers['Cookie'] = f"{existing_cookies}; {new_cookie}"
387
+ else:
388
+ headers['Cookie'] = new_cookie
389
+
390
+ elif auth_type == 'oauth2':
391
+ # OAuth2 access token
392
+ access_token = resolve_variables(str(auth_params.get('access_token', '')))
393
+ if access_token:
394
+ headers['Authorization'] = f'Bearer {access_token}'
395
+
396
+ elif auth_type == 'custom':
397
+ # Custom authentication - allows full control
398
+ custom_headers = auth_params.get('headers', {})
399
+ custom_cookies = auth_params.get('cookies', {})
400
+ custom_query = auth_params.get('query', {})
401
+
402
+ # Add custom headers
403
+ for key, value in custom_headers.items():
404
+ resolved_key = resolve_variables(str(key))
405
+ resolved_value = resolve_variables(str(value))
406
+ headers[resolved_key] = resolved_value
407
+
408
+ # Add custom query parameters
409
+ for key, value in custom_query.items():
410
+ resolved_key = resolve_variables(str(key))
411
+ resolved_value = resolve_variables(str(value))
412
+ params[resolved_key] = resolved_value
413
+
414
+ # Add custom cookies
415
+ if custom_cookies:
416
+ cookie_parts = []
417
+ for key, value in custom_cookies.items():
418
+ resolved_key = resolve_variables(str(key))
419
+ resolved_value = resolve_variables(str(value))
420
+ cookie_parts.append(f"{resolved_key}={resolved_value}")
421
+
422
+ existing_cookies = headers.get('Cookie', '')
423
+ new_cookies = "; ".join(cookie_parts)
424
+ if existing_cookies:
425
+ headers['Cookie'] = f"{existing_cookies}; {new_cookies}"
426
+ else:
427
+ headers['Cookie'] = new_cookies
428
+
429
+
321
430
  def get_available_tools(self):
322
431
  """Return list of available tools with their configurations."""
323
432
  return [
@@ -398,6 +507,13 @@ class PostmanApiWrapper(BaseToolApiWrapper):
398
507
  "args_schema": PostmanAnalyze,
399
508
  "ref": self.analyze
400
509
  },
510
+ {
511
+ "name": "execute_request",
512
+ "mode": "execute_request",
513
+ "description": "Execute a Postman request with environment variables and custom configuration",
514
+ "args_schema": PostmanExecuteRequest,
515
+ "ref": self.execute_request
516
+ },
401
517
  # {
402
518
  # "name": "create_collection",
403
519
  # "mode": "create_collection",
@@ -583,6 +699,224 @@ class PostmanApiWrapper(BaseToolApiWrapper):
583
699
  logger.error(f"Exception when getting collections: {stacktrace}")
584
700
  raise ToolException(f"Unable to get collections: {str(e)}")
585
701
 
702
+ def execute_request(self, request_path: str, override_variables: Dict = None, **kwargs) -> str:
703
+ """Execute a Postman request with environment variables and custom configuration.
704
+
705
+ This method uses the environment_config to make actual HTTP requests
706
+ using the requests library with structured authentication.
707
+
708
+ Args:
709
+ request_path: The path to the request in the collection
710
+ override_variables: Optional variables to override environment/collection variables
711
+
712
+ Returns:
713
+ JSON string with comprehensive response data
714
+ """
715
+ try:
716
+ import time
717
+ from urllib.parse import urlencode, parse_qs, urlparse
718
+
719
+ # Get the request from the collection
720
+ request_item, _, collection_data = self._get_request_item_and_id(request_path)
721
+ request_data = request_item.get('request', {})
722
+
723
+ # Gather all variables from different sources
724
+ all_variables = {}
725
+
726
+ # 1. Start with environment_config variables (lowest priority)
727
+ all_variables.update(self.environment_config)
728
+
729
+ # 2. Add collection variables
730
+ collection_variables = collection_data.get('variable', [])
731
+ for var in collection_variables:
732
+ if isinstance(var, dict) and 'key' in var:
733
+ all_variables[var['key']] = var.get('value', '')
734
+
735
+ # 3. Add override variables (highest priority)
736
+ if override_variables:
737
+ all_variables.update(override_variables)
738
+
739
+ # Helper function to resolve variables in strings
740
+ def resolve_variables(text):
741
+ if not isinstance(text, str):
742
+ return text
743
+
744
+ # Replace {{variable}} patterns
745
+ import re
746
+ def replace_var(match):
747
+ var_name = match.group(1)
748
+ return str(all_variables.get(var_name, match.group(0)))
749
+
750
+ return re.sub(r'\{\{([^}]+)\}\}', replace_var, text)
751
+
752
+ # Prepare the request
753
+ method = request_data.get('method', 'GET').upper()
754
+
755
+ # Handle URL
756
+ url_data = request_data.get('url', '')
757
+ if isinstance(url_data, str):
758
+ url = resolve_variables(url_data)
759
+ params = {}
760
+ else:
761
+ # URL is an object
762
+ raw_url = resolve_variables(url_data.get('raw', ''))
763
+ url = raw_url
764
+
765
+ # Extract query parameters
766
+ params = {}
767
+ query_params = url_data.get('query', [])
768
+ for param in query_params:
769
+ if isinstance(param, dict) and not param.get('disabled', False):
770
+ key = resolve_variables(param.get('key', ''))
771
+ value = resolve_variables(param.get('value', ''))
772
+ if key:
773
+ params[key] = value
774
+
775
+ # Prepare headers
776
+ headers = {}
777
+
778
+ # Handle authentication from environment_config
779
+ self._apply_authentication(headers, params, all_variables, resolve_variables)
780
+
781
+ # Add headers from request
782
+ request_headers = request_data.get('header', [])
783
+ for header in request_headers:
784
+ if isinstance(header, dict) and not header.get('disabled', False):
785
+ key = resolve_variables(header.get('key', ''))
786
+ value = resolve_variables(header.get('value', ''))
787
+ if key:
788
+ headers[key] = value
789
+
790
+ # Prepare body
791
+ body = None
792
+ content_type = headers.get('Content-Type', '').lower()
793
+
794
+ request_body = request_data.get('body', {})
795
+ if request_body:
796
+ body_mode = request_body.get('mode', '')
797
+
798
+ if body_mode == 'raw':
799
+ raw_body = request_body.get('raw', '')
800
+ body = resolve_variables(raw_body)
801
+
802
+ # Try to parse as JSON if content type suggests it
803
+ if 'application/json' in content_type:
804
+ try:
805
+ # Validate JSON
806
+ json.loads(body)
807
+ except json.JSONDecodeError:
808
+ logger.warning("Body is not valid JSON despite Content-Type")
809
+
810
+ elif body_mode == 'formdata':
811
+ # Handle form data
812
+ form_data = {}
813
+ formdata_items = request_body.get('formdata', [])
814
+ for item in formdata_items:
815
+ if isinstance(item, dict) and not item.get('disabled', False):
816
+ key = resolve_variables(item.get('key', ''))
817
+ value = resolve_variables(item.get('value', ''))
818
+ if key:
819
+ form_data[key] = value
820
+ body = form_data
821
+
822
+ elif body_mode == 'urlencoded':
823
+ # Handle URL encoded data
824
+ urlencoded_data = {}
825
+ urlencoded_items = request_body.get('urlencoded', [])
826
+ for item in urlencoded_items:
827
+ if isinstance(item, dict) and not item.get('disabled', False):
828
+ key = resolve_variables(item.get('key', ''))
829
+ value = resolve_variables(item.get('value', ''))
830
+ if key:
831
+ urlencoded_data[key] = value
832
+ body = urlencode(urlencoded_data)
833
+ if 'content-type' not in [h.lower() for h in headers.keys()]:
834
+ headers['Content-Type'] = 'application/x-www-form-urlencoded'
835
+
836
+ # Execute the request
837
+ start_time = time.time()
838
+
839
+ logger.info(f"Executing {method} request to {url}")
840
+
841
+ # Create a new session for this request (separate from Postman API session)
842
+ exec_session = requests.Session()
843
+
844
+ # Prepare request kwargs
845
+ request_kwargs = {
846
+ 'timeout': self.timeout,
847
+ 'params': params if params else None,
848
+ 'headers': headers if headers else None
849
+ }
850
+
851
+ # Add body based on content type and method
852
+ if body is not None and method in ['POST', 'PUT', 'PATCH']:
853
+ if isinstance(body, dict):
854
+ # Form data
855
+ request_kwargs['data'] = body
856
+ elif isinstance(body, str):
857
+ if 'application/json' in content_type:
858
+ request_kwargs['json'] = json.loads(body) if body.strip() else {}
859
+ else:
860
+ request_kwargs['data'] = body
861
+ else:
862
+ request_kwargs['data'] = body
863
+
864
+ # Execute the request
865
+ response = exec_session.request(method, url, **request_kwargs)
866
+
867
+ end_time = time.time()
868
+ elapsed_time = end_time - start_time
869
+
870
+ # Parse response
871
+ response_data = {
872
+ "request": {
873
+ "path": request_path,
874
+ "method": method,
875
+ "url": url,
876
+ "headers": dict(headers) if headers else {},
877
+ "params": dict(params) if params else {},
878
+ "body": body if body is not None else None
879
+ },
880
+ "response": {
881
+ "status_code": response.status_code,
882
+ "status_text": response.reason,
883
+ "headers": dict(response.headers),
884
+ "elapsed_time_seconds": round(elapsed_time, 3),
885
+ "size_bytes": len(response.content)
886
+ },
887
+ "variables_used": dict(all_variables),
888
+ "success": response.ok
889
+ }
890
+
891
+ # Add response body
892
+ try:
893
+ # Try to parse as JSON
894
+ response_data["response"]["body"] = response.json()
895
+ response_data["response"]["content_type"] = "application/json"
896
+ except json.JSONDecodeError:
897
+ # Fall back to text
898
+ try:
899
+ response_data["response"]["body"] = response.text
900
+ response_data["response"]["content_type"] = "text/plain"
901
+ except UnicodeDecodeError:
902
+ # Binary content
903
+ response_data["response"]["body"] = f"<binary content: {len(response.content)} bytes>"
904
+ response_data["response"]["content_type"] = "binary"
905
+
906
+ # Add error details if request failed
907
+ if not response.ok:
908
+ response_data["error"] = {
909
+ "message": f"HTTP {response.status_code}: {response.reason}",
910
+ "status_code": response.status_code
911
+ }
912
+
913
+ return json.dumps(response_data, indent=2)
914
+
915
+ except Exception as e:
916
+ stacktrace = format_exc()
917
+ logger.error(f"Exception when executing request: {stacktrace}")
918
+ raise ToolException(f"Unable to execute request '{request_path}': {str(e)}")
919
+
586
920
  def get_collection(self, **kwargs) -> str:
587
921
  """Get a specific collection by ID."""
588
922
  try:
@@ -1699,6 +2033,7 @@ class PostmanApiWrapper(BaseToolApiWrapper):
1699
2033
  # Check if this is a folder (has 'item' property) or a request
1700
2034
  if 'item' in item:
1701
2035
  # This is a folder
2036
+
1702
2037
  result['items'][current_path] = {
1703
2038
  "type": "folder",
1704
2039
  "id": item.get('id'),
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.163
3
+ Version: 0.3.165
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedjik@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -31,8 +31,8 @@ Requires-Dist: langchain_community~=0.3.7; extra == "runtime"
31
31
  Requires-Dist: langchain-openai~=0.3.0; extra == "runtime"
32
32
  Requires-Dist: langgraph-checkpoint-sqlite~=2.0.0; extra == "runtime"
33
33
  Requires-Dist: langgraph-checkpoint-postgres~=2.0.1; extra == "runtime"
34
- Requires-Dist: langsmith==0.1.144; extra == "runtime"
35
- Requires-Dist: langgraph~=0.2.53; extra == "runtime"
34
+ Requires-Dist: langsmith>=0.3.45; extra == "runtime"
35
+ Requires-Dist: langgraph>=0.4.8; extra == "runtime"
36
36
  Requires-Dist: langchain_chroma~=0.2.2; extra == "runtime"
37
37
  Requires-Dist: langchain-unstructured~=0.1.6; extra == "runtime"
38
38
  Requires-Dist: langchain-postgres~=0.0.13; extra == "runtime"
@@ -122,6 +122,7 @@ Requires-Dist: yagmail==0.15.293; extra == "tools"
122
122
  Requires-Dist: pysnc==1.1.10; extra == "tools"
123
123
  Requires-Dist: shortuuid==1.0.13; extra == "tools"
124
124
  Requires-Dist: yarl==1.17.1; extra == "tools"
125
+ Requires-Dist: langmem==0.0.27; extra == "tools"
125
126
  Provides-Extra: community
126
127
  Requires-Dist: retry-extended==0.2.3; extra == "community"
127
128
  Requires-Dist: browser-use==0.1.43; extra == "community"
@@ -48,14 +48,14 @@ alita_sdk/runtime/clients/client.py,sha256=jbC_M72CybwZgFfMRL6paj-NmICrSuk1vVnVT
48
48
  alita_sdk/runtime/clients/datasource.py,sha256=HAZovoQN9jBg0_-lIlGBQzb4FJdczPhkHehAiVG3Wx0,1020
49
49
  alita_sdk/runtime/clients/prompt.py,sha256=li1RG9eBwgNK_Qf0qUaZ8QNTmsncFrAL2pv3kbxZRZg,1447
50
50
  alita_sdk/runtime/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
- alita_sdk/runtime/langchain/assistant.py,sha256=1G4yBhBc-tXgqerujUVu5Z8T49m_7ov-7zYYsm-jGb4,7511
51
+ alita_sdk/runtime/langchain/assistant.py,sha256=QJEMiEOrFMJ4GpnK24U2pKFblrvdQpKFdfhZsI2wAUI,7507
52
52
  alita_sdk/runtime/langchain/chat_message_template.py,sha256=kPz8W2BG6IMyITFDA5oeb5BxVRkHEVZhuiGl4MBZKdc,2176
53
53
  alita_sdk/runtime/langchain/constants.py,sha256=eHVJ_beJNTf1WJo4yq7KMK64fxsRvs3lKc34QCXSbpk,3319
54
54
  alita_sdk/runtime/langchain/indexer.py,sha256=0ENHy5EOhThnAiYFc7QAsaTNp9rr8hDV_hTK8ahbatk,37592
55
55
  alita_sdk/runtime/langchain/langraph_agent.py,sha256=BkrbYMy4BPAvy9uANH3s9ffBzaGewKFK97evN90L5kY,39917
56
56
  alita_sdk/runtime/langchain/mixedAgentParser.py,sha256=M256lvtsL3YtYflBCEp-rWKrKtcY1dJIyRGVv7KW9ME,2611
57
57
  alita_sdk/runtime/langchain/mixedAgentRenderes.py,sha256=asBtKqm88QhZRILditjYICwFVKF5KfO38hu2O-WrSWE,5964
58
- alita_sdk/runtime/langchain/store_manager.py,sha256=2MSbk8Zy969R2YjA5lV6qIPoFRPZYAIUHyoK_2apoGw,1420
58
+ alita_sdk/runtime/langchain/store_manager.py,sha256=w5-0GbPGJAw14g0CCD9BKFMznzk1I-iJ5OGj_HZJZgA,2211
59
59
  alita_sdk/runtime/langchain/utils.py,sha256=Npferkn10dvdksnKzLJLBI5bNGQyVWTBwqp3vQtUqmY,6631
60
60
  alita_sdk/runtime/langchain/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
61
61
  alita_sdk/runtime/langchain/agents/xml_chat.py,sha256=Mx7PK5T97_GrFCwHHZ3JZP42S7MwtUzV0W-_8j6Amt8,6212
@@ -102,7 +102,7 @@ alita_sdk/runtime/toolkits/artifact.py,sha256=7fTr9VpGd2zwCB3EwW4aqWa5jVKRTunqV3
102
102
  alita_sdk/runtime/toolkits/datasource.py,sha256=qk78OdPoReYPCWwahfkKLbKc4pfsu-061oXRryFLP6I,2498
103
103
  alita_sdk/runtime/toolkits/prompt.py,sha256=WIpTkkVYWqIqOWR_LlSWz3ug8uO9tm5jJ7aZYdiGRn0,1192
104
104
  alita_sdk/runtime/toolkits/subgraph.py,sha256=ZYqI4yVLbEPAjCR8dpXbjbL2ipX598Hk3fL6AgaqFD4,1758
105
- alita_sdk/runtime/toolkits/tools.py,sha256=gCIEtdeD9u-za-oIZtJ916r9oSR9_0gCWE5FIKynWdU,6148
105
+ alita_sdk/runtime/toolkits/tools.py,sha256=TfnXSDMPlCt-L_CcV59XQNqkH1bzyu0bh9k9CijJWT4,6162
106
106
  alita_sdk/runtime/toolkits/vectorstore.py,sha256=BGppQADa1ZiLO17fC0uCACTTEvPHlodEDYEzUcBRbAA,2901
107
107
  alita_sdk/runtime/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
108
108
  alita_sdk/runtime/tools/agent.py,sha256=m98QxOHwnCRTT9j18Olbb5UPS8-ZGeQaGiUyZJSyFck,3162
@@ -129,7 +129,7 @@ alita_sdk/runtime/utils/logging.py,sha256=svPyiW8ztDfhqHFITv5FBCj8UhLxz6hWcqGIY6
129
129
  alita_sdk/runtime/utils/save_dataframe.py,sha256=i-E1wp-t4wb17Zq3nA3xYwgSILjoXNizaQAA9opWvxY,1576
130
130
  alita_sdk/runtime/utils/streamlit.py,sha256=z4J_bdxkA0zMROkvTB4u379YBRFCkKh-h7PD8RlnZWQ,85644
131
131
  alita_sdk/runtime/utils/utils.py,sha256=dM8whOJAuFJFe19qJ69-FLzrUp6d2G-G6L7d4ss2XqM,346
132
- alita_sdk/tools/__init__.py,sha256=Ttjp6AVkjuh-WqGE_cc_nUwi4qvU1khf7_BVKdwO7gU,9801
132
+ alita_sdk/tools/__init__.py,sha256=iuEQDwXHY8MQvHt6qIkYpXD7VC_3no-iVgDCpHqwIy4,10018
133
133
  alita_sdk/tools/elitea_base.py,sha256=NQaIxPX6DVIerHCb18jwUR6maZxxk73NZaTsFHkBQWE,21119
134
134
  alita_sdk/tools/ado/__init__.py,sha256=mD6GHcYMTtffPJkJvFPe2rzvye_IRmXmWfI7xYuZhO4,912
135
135
  alita_sdk/tools/ado/utils.py,sha256=PTCludvaQmPLakF2EbCGy66Mro4-rjDtavVP-xcB2Wc,1252
@@ -160,12 +160,12 @@ alita_sdk/tools/browser/google_search_rag.py,sha256=QVHFbVwymiJGuno_HLSJOK1c_Mpg
160
160
  alita_sdk/tools/browser/utils.py,sha256=4k3YM_f1Kqlhjz9vt2pNsGkvCjhy-EmY3nvcwdFCsLA,2501
161
161
  alita_sdk/tools/browser/wiki.py,sha256=Qh3HBFd4dkS2VavXbFJOm4b8SjVSIe5xSD7CY1vEkKE,1126
162
162
  alita_sdk/tools/carrier/__init__.py,sha256=pP-nk-dpqOkrvwcRY_szgwqoowyVNl_GobD4Inp-Qus,4435
163
- alita_sdk/tools/carrier/api_wrapper.py,sha256=smcc1Q7H6U1_18qDYBFpeGnu5cX3OrsUoKaSn3s6vkw,8728
164
- alita_sdk/tools/carrier/backend_reports_tool.py,sha256=WNZVGBIZusakOdbd7lG6o6xL180VZfER-uDw_SSGupo,11005
165
- alita_sdk/tools/carrier/backend_tests_tool.py,sha256=arq275qiP9t3ST-MPn7FlxbLLSPiIGEnyPdgJ-AvOoQ,5917
163
+ alita_sdk/tools/carrier/api_wrapper.py,sha256=Y_Qznko3ReDZipDn64_IivehdN92hUpX0HLCfiLpYTw,8696
164
+ alita_sdk/tools/carrier/backend_reports_tool.py,sha256=2Z1DCt6f0XTrQcEoynxMFUnenhtYMuIt_epq4YBSLDE,12234
165
+ alita_sdk/tools/carrier/backend_tests_tool.py,sha256=Y1Va-VxDtnbRvrEdlwe63t-oDvqzQv5jxZW1eH0XeTY,6246
166
166
  alita_sdk/tools/carrier/cancel_ui_test_tool.py,sha256=pD1sKEcZGBWJqFpgjeohMk93uuUPWruVJRPVVg90rpo,6438
167
- alita_sdk/tools/carrier/carrier_sdk.py,sha256=sYdPWcpH8ti0MggOvU2pbsKYiaKR1zuXlbiCtcTfc3A,12913
168
- alita_sdk/tools/carrier/create_ui_excel_report_tool.py,sha256=sYAz54ILk8CIF_n76zH_hcmbW9xw7oTNnf_d9d-N-_Q,20171
167
+ alita_sdk/tools/carrier/carrier_sdk.py,sha256=wb3d1W6dvo5NRLMiJcBcKy663J6IkbTwpKFTZX30QFQ,13672
168
+ alita_sdk/tools/carrier/create_ui_excel_report_tool.py,sha256=8aSpkyIGXsOBTo8Ye_6p19v8OOl1y7QW47IJxZ6QDgM,20163
169
169
  alita_sdk/tools/carrier/create_ui_test_tool.py,sha256=sHi7-D1uqIUHEyoywI92h6MdUVybKfBXs_XttTu-Ck4,8624
170
170
  alita_sdk/tools/carrier/excel_reporter.py,sha256=fXptz7iaBDBcFSc8Ah8nZ9CSgugTONc5JMC1XcQEnfM,21487
171
171
  alita_sdk/tools/carrier/lighthouse_excel_reporter.py,sha256=mVuU63tl2n-Gntx9RuedjEU0U5AP1APKsSx1DvJs7wk,6684
@@ -228,7 +228,7 @@ alita_sdk/tools/figma/__init__.py,sha256=rtEebf9zj1zUD0bpkN-SupaYpjmHFM01gY8XZNE
228
228
  alita_sdk/tools/figma/api_wrapper.py,sha256=G96pEp_qUOouwkM5xMqRg-Ywfx_kEey8NV8iO7YLodE,17190
229
229
  alita_sdk/tools/github/__init__.py,sha256=YPpZPPhRUHWKJ9aaMJnkjl9xrnAij1YB9C2TMRnlaTI,6388
230
230
  alita_sdk/tools/github/api_wrapper.py,sha256=qyIrwPg07TFsTB1l95soy1xsJIuxfKOWTWUdLZCmTA4,8365
231
- alita_sdk/tools/github/github_client.py,sha256=YKhLDMq0VF1KM_Get2JKj-YsipwozeSX8xdcCaM4XvI,85395
231
+ alita_sdk/tools/github/github_client.py,sha256=3uM4VHrBOPAgqCQV19mQHgt6mh89gfXenrAt9K2hvVs,85740
232
232
  alita_sdk/tools/github/graphql_client_wrapper.py,sha256=d3AGjzLGH_hdQV2V8HeAX92dJ4dlnE5OXqUlCO_PBr0,71539
233
233
  alita_sdk/tools/github/schemas.py,sha256=9JfJ3nYdFeT30dOwZH6QZyZYMT8v8HrKq1jOv6Xn-Gs,13739
234
234
  alita_sdk/tools/github/tool.py,sha256=Jnnv5lenV5ds8AAdyo2m8hSzyJ117HZBjzHC6T1ck-M,1037
@@ -254,7 +254,7 @@ alita_sdk/tools/llm/llm_utils.py,sha256=v3_lWP_Nk6tJLkj0BYohOun0OWNfvzqLjPdPAMl-
254
254
  alita_sdk/tools/localgit/__init__.py,sha256=NScO0Eu-wl-rc63jjD5Qv1RXXB1qukSIJXx-yS_JQLI,2529
255
255
  alita_sdk/tools/localgit/local_git.py,sha256=gsAftNcK7nMCd8VsIkwDLs2SoG0MgpYdkQG5tmoynkA,18074
256
256
  alita_sdk/tools/localgit/tool.py,sha256=It_B24rMvFPurB355Oy5IShg2BsZTASsEoSS8hu2SXw,998
257
- alita_sdk/tools/memory/__init__.py,sha256=QBzuOQapovmbcFS4nG39p3g-fUPp3kQrjh8EGk6VmBs,1901
257
+ alita_sdk/tools/memory/__init__.py,sha256=SOB5Lhf8v8v0-IDUXUgb1KNdv5je-ooi6oGor8iYPpI,2148
258
258
  alita_sdk/tools/ocr/__init__.py,sha256=pvslKVXyJmK0q23FFDNieuc7RBIuzNXTjTNj-GqhGb0,3335
259
259
  alita_sdk/tools/ocr/api_wrapper.py,sha256=08UF8wj1sR8DcW0z16pw19bgLatLkBF8dySW-Ds8iRk,29649
260
260
  alita_sdk/tools/ocr/text_detection.py,sha256=1DBxt54r3_HdEi93QynSIVta3rH3UpIvy799TPtDTtk,23825
@@ -278,8 +278,8 @@ alita_sdk/tools/pandas/statsmodels/base_stats.py,sha256=jeKW1KfyaNi4M6wkru2iXHNr
278
278
  alita_sdk/tools/pandas/statsmodels/descriptive.py,sha256=APdofBnEiRhMrn6tLKwH076NPp5uHe8VwmblN3lQLfQ,10217
279
279
  alita_sdk/tools/pandas/statsmodels/hypothesis_testing.py,sha256=fdNAayMB3W7avMfKJCcbf2_P54vUXbq8KVebOB48348,10508
280
280
  alita_sdk/tools/pandas/statsmodels/regression.py,sha256=Y1pWK4u_qzrfA740K-FX0nZ5FREGGPk8mfvykPIYoiI,9164
281
- alita_sdk/tools/postman/__init__.py,sha256=W0HdtACnTZw6tnzj7_qY_X5RoRyX3czcUSVaZJjBW-Y,4236
282
- alita_sdk/tools/postman/api_wrapper.py,sha256=DvdZtLPpe6LpsGfsF38UmNyDHjORwWWutisd5AVIogg,78094
281
+ alita_sdk/tools/postman/__init__.py,sha256=mrRwZlmpWh9zMrdNmq1QLu69E-7sA1uwzL8UDs-gaRo,4617
282
+ alita_sdk/tools/postman/api_wrapper.py,sha256=K7Vq06DX7uMeuhpnc3EQfYvc4szY0_ElrRf9P4FLlfk,93501
283
283
  alita_sdk/tools/postman/postman_analysis.py,sha256=2d-Oi2UORosIePIUyncSONw9hY7dw8Zc7BQvCd4aqpg,45115
284
284
  alita_sdk/tools/pptx/__init__.py,sha256=LNSTQk0BncfdWLXAOGX2WXezG3D4qSEuYwLpokmF9iM,3438
285
285
  alita_sdk/tools/pptx/pptx_wrapper.py,sha256=yyCYcTlIY976kJ4VfPo4dyxj4yeii9j9TWP6W8ZIpN8,29195
@@ -326,8 +326,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=UHVQUVqcBc3SZvDfO78HSuBzwAsRw
326
326
  alita_sdk/tools/zephyr_squad/__init__.py,sha256=rq4jOb3lRW2GXvAguk4H1KinO5f-zpygzhBJf-E1Ucw,2773
327
327
  alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=iOMxyE7vOc_LwFB_nBMiSFXkNtvbptA4i-BrTlo7M0A,5854
328
328
  alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=IYUJoMFOMA70knLhLtAnuGoy3OK80RuqeQZ710oyIxE,3631
329
- alita_sdk-0.3.163.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
330
- alita_sdk-0.3.163.dist-info/METADATA,sha256=3Fd8T3ods52lKrlnTjGDxEAn_svB00x64xg65cCi8rY,18667
331
- alita_sdk-0.3.163.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
332
- alita_sdk-0.3.163.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
333
- alita_sdk-0.3.163.dist-info/RECORD,,
329
+ alita_sdk-0.3.165.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
330
+ alita_sdk-0.3.165.dist-info/METADATA,sha256=5JAyaE1u0l1m3LsaJihg9jBQum9oDZF5uKcefAQzB5Y,18714
331
+ alita_sdk-0.3.165.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
332
+ alita_sdk-0.3.165.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
333
+ alita_sdk-0.3.165.dist-info/RECORD,,