alita-sdk 0.3.602__py3-none-any.whl → 0.3.609__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (32) hide show
  1. alita_sdk/cli/agents.py +108 -826
  2. alita_sdk/cli/testcases/__init__.py +94 -0
  3. alita_sdk/cli/testcases/data_generation.py +119 -0
  4. alita_sdk/cli/testcases/discovery.py +96 -0
  5. alita_sdk/cli/testcases/executor.py +84 -0
  6. alita_sdk/cli/testcases/logger.py +85 -0
  7. alita_sdk/cli/testcases/parser.py +172 -0
  8. alita_sdk/cli/testcases/prompts.py +91 -0
  9. alita_sdk/cli/testcases/reporting.py +125 -0
  10. alita_sdk/cli/testcases/setup.py +108 -0
  11. alita_sdk/cli/testcases/test_runner.py +282 -0
  12. alita_sdk/cli/testcases/utils.py +39 -0
  13. alita_sdk/cli/testcases/validation.py +90 -0
  14. alita_sdk/cli/testcases/workflow.py +196 -0
  15. alita_sdk/configurations/openapi.py +2 -2
  16. alita_sdk/runtime/clients/artifact.py +1 -1
  17. alita_sdk/runtime/tools/artifact.py +253 -8
  18. alita_sdk/runtime/tools/llm.py +12 -11
  19. alita_sdk/tools/bitbucket/api_wrapper.py +31 -30
  20. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +49 -35
  21. alita_sdk/tools/confluence/api_wrapper.py +8 -1
  22. alita_sdk/tools/elitea_base.py +40 -36
  23. alita_sdk/tools/figma/api_wrapper.py +140 -83
  24. alita_sdk/tools/github/github_client.py +18 -10
  25. alita_sdk/tools/github/graphql_client_wrapper.py +1 -0
  26. alita_sdk/tools/utils/text_operations.py +156 -52
  27. {alita_sdk-0.3.602.dist-info → alita_sdk-0.3.609.dist-info}/METADATA +1 -1
  28. {alita_sdk-0.3.602.dist-info → alita_sdk-0.3.609.dist-info}/RECORD +32 -19
  29. {alita_sdk-0.3.602.dist-info → alita_sdk-0.3.609.dist-info}/WHEEL +0 -0
  30. {alita_sdk-0.3.602.dist-info → alita_sdk-0.3.609.dist-info}/entry_points.txt +0 -0
  31. {alita_sdk-0.3.602.dist-info → alita_sdk-0.3.609.dist-info}/licenses/LICENSE +0 -0
  32. {alita_sdk-0.3.602.dist-info → alita_sdk-0.3.609.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,90 @@
1
+ """
2
+ Validation utilities for test execution.
3
+
4
+ Handles JSON extraction, fallback results, and diagnostics.
5
+ """
6
+
7
+ import json
8
+ import logging
9
+ from pathlib import Path
10
+ from typing import Dict, Any
11
+ from rich.console import Console
12
+
13
+ logger = logging.getLogger(__name__)
14
+ console = Console()
15
+
16
+
17
+ def extract_json_from_text(text: str) -> dict:
18
+ """Extract JSON object from text using brace counting."""
19
+ start_idx = text.find('{')
20
+ if start_idx == -1:
21
+ raise ValueError("No JSON found in text")
22
+
23
+ brace_count = 0
24
+ end_idx = -1
25
+ for i, char in enumerate(text[start_idx:], start=start_idx):
26
+ if char == '{':
27
+ brace_count += 1
28
+ elif char == '}':
29
+ brace_count -= 1
30
+ if brace_count == 0:
31
+ end_idx = i + 1
32
+ break
33
+
34
+ if end_idx == -1:
35
+ raise ValueError("Could not find matching closing brace")
36
+
37
+ return json.loads(text[start_idx:end_idx])
38
+
39
+
40
+ def create_fallback_result_for_test(test_case: Dict[str, Any], test_file: Path, reason: str = 'Validation failed') -> Dict[str, Any]:
41
+ """Create a fallback result for a single test case with detailed step information.
42
+
43
+ Args:
44
+ test_case: Parsed test case data
45
+ test_file: Path to test case file
46
+ reason: Reason for fallback
47
+
48
+ Returns:
49
+ Fallback test result dict with step details
50
+ """
51
+ fallback_steps = []
52
+ for step_info in test_case.get('steps', []):
53
+ fallback_steps.append({
54
+ 'step_number': step_info['number'],
55
+ 'title': step_info['title'],
56
+ 'passed': False,
57
+ 'details': reason
58
+ })
59
+
60
+ return {
61
+ 'title': test_case['name'],
62
+ 'passed': False,
63
+ 'file': test_file.name,
64
+ 'step_results': fallback_steps,
65
+ 'validation_error': reason
66
+ }
67
+
68
+
69
+ def print_validation_diagnostics(validation_output: str) -> None:
70
+ """Print diagnostic information for validation output.
71
+
72
+ Args:
73
+ validation_output: The validation output to diagnose
74
+ """
75
+ console.print(f"\n[bold red]🔍 Diagnostic Information:[/bold red]")
76
+ console.print(f"[dim]Output length: {len(validation_output)} characters[/dim]")
77
+
78
+ # Check for key JSON elements
79
+ has_json = '{' in validation_output and '}' in validation_output
80
+ has_fields = 'test_number' in validation_output and 'steps' in validation_output
81
+
82
+ console.print(f"[dim]Has JSON structure: {has_json}[/dim]")
83
+ console.print(f"[dim]Has required fields: {has_fields}[/dim]")
84
+
85
+ # Show relevant excerpt
86
+ if len(validation_output) > 400:
87
+ console.print(f"\n[red]First 200 chars:[/red] [dim]{validation_output[:200]}[/dim]")
88
+ console.print(f"[red]Last 200 chars:[/red] [dim]{validation_output[-200:]}[/dim]")
89
+ else:
90
+ console.print(f"\n[red]Full output:[/red] [dim]{validation_output}[/dim]")
@@ -0,0 +1,196 @@
1
+ """
2
+ Main workflow orchestration for test case execution.
3
+
4
+ Coordinates the entire test execution flow from parsing to reporting.
5
+ """
6
+
7
+ import logging
8
+ import uuid
9
+ from pathlib import Path
10
+ from typing import List, Dict, Any, Optional, Tuple
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ def parse_all_test_cases(
16
+ test_case_files_list: List[Path],
17
+ master_log
18
+ ) -> List[Dict[str, Any]]:
19
+ """Parse all test case files.
20
+
21
+ Args:
22
+ test_case_files_list: List of test case files to parse
23
+ master_log: Log capture instance
24
+
25
+ Returns:
26
+ List of parsed test case dicts with 'file' and 'data' keys
27
+ """
28
+ from .parser import parse_test_case
29
+
30
+ parsed_test_cases = []
31
+ for test_file in test_case_files_list:
32
+ try:
33
+ test_case = parse_test_case(str(test_file))
34
+ parsed_test_cases.append({
35
+ 'file': test_file,
36
+ 'data': test_case
37
+ })
38
+ except Exception as e:
39
+ master_log.print(f"[yellow]⚠ Warning: Failed to parse {test_file.name}: {e}[/yellow]")
40
+ logger.debug(f"Parse error for {test_file.name}: {e}", exc_info=True)
41
+
42
+ return parsed_test_cases
43
+
44
+
45
+ def filter_test_cases_needing_data_gen(
46
+ parsed_test_cases: List[Dict[str, Any]]
47
+ ) -> List[Dict[str, Any]]:
48
+ """Filter test cases that need data generation.
49
+
50
+ Args:
51
+ parsed_test_cases: All parsed test cases
52
+
53
+ Returns:
54
+ Filtered list of test cases that require data generation
55
+ """
56
+ return [
57
+ tc for tc in parsed_test_cases
58
+ if tc['data'].get('generate_test_data', True)
59
+ ]
60
+
61
+
62
+ def execute_all_test_cases(
63
+ parsed_test_cases: List[Dict[str, Any]],
64
+ bulk_gen_chat_history: List[Dict[str, str]],
65
+ test_cases_path: Path,
66
+ agent_def: Dict[str, Any],
67
+ validator_def: Optional[Dict[str, Any]],
68
+ client,
69
+ config,
70
+ model: Optional[str],
71
+ temperature: Optional[float],
72
+ max_tokens: Optional[int],
73
+ work_dir: str,
74
+ master_log,
75
+ setup_executor_func,
76
+ verbose: bool = True,
77
+ debug: bool = False,
78
+ ) -> List[Dict[str, Any]]:
79
+ """Execute all test cases and return results.
80
+
81
+ Args:
82
+ parsed_test_cases: List of parsed test cases
83
+ bulk_gen_chat_history: Chat history from data generation
84
+ test_cases_path: Path to test cases directory
85
+ agent_def: Test runner agent definition
86
+ validator_def: Validator agent definition (optional)
87
+ client: API client
88
+ config: CLI configuration
89
+ model: Model override
90
+ temperature: Temperature override
91
+ max_tokens: Max tokens override
92
+ work_dir: Working directory
93
+ master_log: Log capture instance
94
+ setup_executor_func: Function to setup executor
95
+
96
+ Returns:
97
+ List of test result dicts
98
+ """
99
+ from .parser import resolve_toolkit_config_path
100
+ from .utils import extract_toolkit_name
101
+ from .executor import cleanup_executor_cache
102
+ from .test_runner import execute_single_test_case, validate_single_test_case
103
+ from .validation import create_fallback_result_for_test
104
+
105
+ if not parsed_test_cases:
106
+ master_log.print("[yellow]No test cases to execute[/yellow]")
107
+ return []
108
+
109
+ master_log.print(f"\n[bold yellow]📋 Executing test cases sequentially...[/bold yellow]\n")
110
+
111
+ # Show data generation context availability
112
+ if bulk_gen_chat_history:
113
+ master_log.print(f"[dim]✓ Data generation history available ({len(bulk_gen_chat_history)} messages) - shared with all test cases[/dim]\n")
114
+ else:
115
+ master_log.print(f"[dim]ℹ No data generation history (skipped or disabled)[/dim]\n")
116
+
117
+ # Executor caches
118
+ executor_cache = {}
119
+ validation_executor_cache = {}
120
+
121
+ # Execute each test case sequentially
122
+ test_results = []
123
+ total_tests = len(parsed_test_cases)
124
+
125
+ for idx, tc_info in enumerate(parsed_test_cases, 1):
126
+ test_case = tc_info['data']
127
+ test_file = tc_info['file']
128
+ test_name = test_case['name']
129
+
130
+ try:
131
+ # Resolve toolkit config path
132
+ toolkit_config_path = resolve_toolkit_config_path(
133
+ test_case.get('config_path', ''),
134
+ test_file,
135
+ test_cases_path
136
+ )
137
+
138
+ # Use cache key
139
+ cache_key = toolkit_config_path if toolkit_config_path else '__no_config__'
140
+
141
+ # Execute single test case
142
+ execution_output = execute_single_test_case(
143
+ tc_info, idx, total_tests, bulk_gen_chat_history, test_cases_path,
144
+ executor_cache, client, agent_def, config, model, temperature,
145
+ max_tokens, work_dir, master_log, setup_executor_func,
146
+ verbose=verbose,
147
+ debug=debug,
148
+ )
149
+
150
+ if not execution_output:
151
+ # Create fallback result for failed execution
152
+ test_results.append({
153
+ 'title': test_name,
154
+ 'passed': False,
155
+ 'file': test_file.name,
156
+ 'step_results': []
157
+ })
158
+ continue
159
+
160
+ # Append execution to history for validation
161
+ from .prompts import build_single_test_execution_prompt
162
+ validation_chat_history = bulk_gen_chat_history + [
163
+ {"role": "user", "content": build_single_test_execution_prompt(tc_info, idx)},
164
+ {"role": "assistant", "content": execution_output}
165
+ ]
166
+
167
+ # Validate test case
168
+ test_result = validate_single_test_case(
169
+ tc_info, idx, execution_output, validation_chat_history,
170
+ validation_executor_cache, cache_key, client, validator_def,
171
+ agent_def, toolkit_config_path, config, model, temperature,
172
+ max_tokens, work_dir, master_log, setup_executor_func,
173
+ verbose=verbose,
174
+ debug=debug,
175
+ )
176
+
177
+ test_results.append(test_result)
178
+
179
+ except Exception as e:
180
+ logger.debug(f"Test execution failed for {test_name}: {e}", exc_info=True)
181
+ master_log.print(f"[red]✗ Test execution failed: {e}[/red]")
182
+
183
+ # Create fallback result
184
+ fallback_result = create_fallback_result_for_test(
185
+ test_case,
186
+ test_file,
187
+ f'Test execution failed: {str(e)}'
188
+ )
189
+ test_results.append(fallback_result)
190
+ master_log.print()
191
+
192
+ # Cleanup executor caches
193
+ cleanup_executor_cache(executor_cache, "executor")
194
+ cleanup_executor_cache(validation_executor_cache, "validation executor")
195
+
196
+ return test_results
@@ -64,7 +64,7 @@ class OpenApiConfiguration(BaseModel):
64
64
  ),
65
65
  )
66
66
  auth_type: Optional[Literal['Basic', 'Bearer', 'Custom']] = Field(
67
- default='Bearer',
67
+ default=None,
68
68
  description=(
69
69
  "How to apply the API key. "
70
70
  "- 'Bearer': sets 'Authorization: Bearer <api_key>' "
@@ -111,7 +111,7 @@ class OpenApiConfiguration(BaseModel):
111
111
  )
112
112
  )
113
113
  method: Optional[Literal['default', 'Basic']] = Field(
114
- default='default',
114
+ default=None,
115
115
  description=(
116
116
  "Token exchange method for client credentials flow. "
117
117
  "'default': Sends client_id and client_secret in POST body (Azure AD, Auth0, most providers). "
@@ -65,7 +65,7 @@ class Artifact:
65
65
  def delete(self, artifact_name: str, bucket_name = None):
66
66
  if not bucket_name:
67
67
  bucket_name = self.bucket_name
68
- self.client.delete_artifact(bucket_name, artifact_name)
68
+ return self.client.delete_artifact(bucket_name, artifact_name)
69
69
 
70
70
  def list(self, bucket_name: str = None, return_as_string = True) -> str|dict:
71
71
  if not bucket_name:
@@ -21,13 +21,255 @@ from ...runtime.utils.utils import IndexerKeywords, resolve_image_from_cache
21
21
  class ArtifactWrapper(NonCodeIndexerToolkit):
22
22
  bucket: str
23
23
  artifact: Optional[Any] = None
24
-
25
- # Import file operation methods from BaseCodeToolApiWrapper
26
- read_file_chunk = BaseCodeToolApiWrapper.read_file_chunk
27
- read_multiple_files = BaseCodeToolApiWrapper.read_multiple_files
28
- search_file = BaseCodeToolApiWrapper.search_file
29
- edit_file = BaseCodeToolApiWrapper.edit_file
30
-
24
+
25
+ # Override file operation methods to support bucket_name parameter
26
+ # (instead of importing from BaseCodeToolApiWrapper which uses 'branch')
27
+
28
+ def read_file_chunk(
29
+ self,
30
+ file_path: str,
31
+ start_line: int,
32
+ end_line: Optional[int] = None,
33
+ bucket_name: str = None
34
+ ) -> str:
35
+ """
36
+ Read a specific range of lines from a file in an artifact bucket.
37
+
38
+ Args:
39
+ file_path: Path to the file to read
40
+ start_line: Starting line number (1-indexed, inclusive)
41
+ end_line: Ending line number (1-indexed, inclusive). If None, reads to end.
42
+ bucket_name: Bucket name. If not provided, uses toolkit-configured default bucket.
43
+
44
+ Returns:
45
+ File content for the specified line range
46
+ """
47
+ from ...tools.utils.text_operations import apply_line_slice
48
+
49
+ # Calculate offset and limit from start_line and end_line
50
+ offset = start_line
51
+ limit = (end_line - start_line + 1) if end_line is not None else None
52
+
53
+ # Read the file with bucket_name support
54
+ content = self._read_file(file_path, branch=None, bucket_name=bucket_name, offset=offset, limit=limit)
55
+
56
+ # Apply client-side slicing if toolkit doesn't support partial reads
57
+ return apply_line_slice(content, offset=offset, limit=limit)
58
+
59
+ def read_multiple_files(
60
+ self,
61
+ file_paths: List[str],
62
+ bucket_name: str = None,
63
+ offset: Optional[int] = None,
64
+ limit: Optional[int] = None
65
+ ) -> dict:
66
+ """
67
+ Read multiple files in batch from an artifact bucket.
68
+
69
+ Args:
70
+ file_paths: List of file paths to read
71
+ bucket_name: Bucket name. If not provided, uses toolkit-configured default bucket.
72
+ offset: Starting line number for all files (1-indexed)
73
+ limit: Number of lines to read from offset for all files
74
+
75
+ Returns:
76
+ Dict mapping file paths to their content
77
+ """
78
+ from ...tools.utils.text_operations import apply_line_slice
79
+
80
+ results = {}
81
+ for path in file_paths:
82
+ try:
83
+ content = self._read_file(path, branch=None, bucket_name=bucket_name, offset=offset, limit=limit)
84
+ results[path] = apply_line_slice(content, offset=offset, limit=limit)
85
+ except Exception as e:
86
+ results[path] = f"Error reading file: {str(e)}"
87
+ return results
88
+
89
+ def search_file(
90
+ self,
91
+ file_path: str,
92
+ pattern: str,
93
+ bucket_name: str = None,
94
+ is_regex: bool = True,
95
+ context_lines: int = 2
96
+ ) -> str:
97
+ """
98
+ Search for a pattern in a file from an artifact bucket.
99
+
100
+ Args:
101
+ file_path: Path to the file to search
102
+ pattern: Search pattern. Treated as regex by default unless is_regex=False.
103
+ bucket_name: Bucket name. If not provided, uses toolkit-configured default bucket.
104
+ is_regex: Whether pattern is a regex. Default is True for flexible matching.
105
+ context_lines: Number of lines before/after match to include for context
106
+
107
+ Returns:
108
+ Formatted string with match results and context
109
+ """
110
+ from ...tools.utils.text_operations import search_in_content
111
+
112
+ content = self._read_file(file_path, branch=None, bucket_name=bucket_name)
113
+ matches = search_in_content(content, pattern, is_regex=is_regex, context_lines=context_lines)
114
+
115
+ if not matches:
116
+ return f"No matches found for pattern '{pattern}' in {file_path}"
117
+
118
+ # Format results
119
+ results = [f"Found {len(matches)} match(es) in {file_path}:\n"]
120
+ for match in matches:
121
+ results.append(f"\n--- Line {match['line_number']} ---")
122
+ if match['context_before']:
123
+ results.append("\n".join(f" {l}" for l in match['context_before']))
124
+ results.append(f"> {match['line_content']}")
125
+ if match['context_after']:
126
+ results.append("\n".join(f" {l}" for l in match['context_after']))
127
+
128
+ return "\n".join(results)
129
+
130
+ def edit_file(
131
+ self,
132
+ file_path: str,
133
+ file_query: str,
134
+ bucket_name: str = None,
135
+ commit_message: str = None
136
+ ) -> str:
137
+ """
138
+ Edit a file in an artifact bucket using OLD/NEW markers.
139
+
140
+ Args:
141
+ file_path: Path to the file to edit. Must be a text file.
142
+ file_query: Edit instructions with OLD/NEW markers.
143
+ bucket_name: Bucket name. If not provided, uses toolkit-configured default bucket.
144
+ commit_message: Not used for artifacts (kept for API consistency)
145
+
146
+ Returns:
147
+ Success message or error description
148
+ """
149
+ from ...tools.utils.text_operations import parse_old_new_markers, is_text_editable, try_apply_edit
150
+ from langchain_core.tools import ToolException
151
+
152
+ # Validate file type
153
+ if not is_text_editable(file_path):
154
+ raise ToolException(f"File '{file_path}' is not a text-editable file type")
155
+
156
+ # Read current content
157
+ content = self._read_file(file_path, branch=None, bucket_name=bucket_name)
158
+
159
+ # Parse edit instructions
160
+ edits = parse_old_new_markers(file_query)
161
+ if not edits:
162
+ raise ToolException("No valid OLD/NEW marker pairs found in edit instructions")
163
+
164
+ # Apply edits
165
+ updated_content = content
166
+ applied_count = 0
167
+ for old_text, new_text in edits:
168
+ updated_content, used_fallback = try_apply_edit(updated_content, old_text, new_text, file_path)
169
+ if updated_content != content or used_fallback:
170
+ applied_count += 1
171
+ content = updated_content
172
+
173
+ if applied_count == 0:
174
+ return f"No edits were applied to {file_path}. The OLD blocks may not match the file content."
175
+
176
+ # Write updated content
177
+ self._write_file(file_path, updated_content, branch=None, commit_message=commit_message, bucket_name=bucket_name)
178
+
179
+ return f"Successfully applied {applied_count} edit(s) to {file_path}"
180
+
181
+ def _get_file_operation_schemas(self):
182
+ """
183
+ Returns custom schemas for file operations that use bucket_name instead of branch.
184
+
185
+ This method is called by the @extend_with_file_operations decorator to get
186
+ toolkit-specific schemas for file operation tools.
187
+ """
188
+ # Artifact-specific schemas with bucket_name instead of branch
189
+ ArtifactReadFileChunkInput = create_model(
190
+ "ArtifactReadFileChunkInput",
191
+ file_path=(str, Field(description="Path to the file to read")),
192
+ bucket_name=(Optional[str], Field(
193
+ description="Bucket name. If not provided, uses toolkit-configured default bucket.",
194
+ default=None
195
+ )),
196
+ start_line=(int, Field(description="Starting line number (1-indexed, inclusive)", ge=1)),
197
+ end_line=(Optional[int], Field(
198
+ description="Ending line number (1-indexed, inclusive). If None, reads to end.",
199
+ default=None,
200
+ ge=1
201
+ )),
202
+ )
203
+
204
+ ArtifactReadMultipleFilesInput = create_model(
205
+ "ArtifactReadMultipleFilesInput",
206
+ file_paths=(List[str], Field(description="List of file paths to read", min_length=1)),
207
+ bucket_name=(Optional[str], Field(
208
+ description="Bucket name. If not provided, uses toolkit-configured default bucket.",
209
+ default=None
210
+ )),
211
+ offset=(Optional[int], Field(
212
+ description="Starting line number for all files (1-indexed)",
213
+ default=None,
214
+ ge=1
215
+ )),
216
+ limit=(Optional[int], Field(
217
+ description="Number of lines to read from offset for all files",
218
+ default=None,
219
+ ge=1
220
+ )),
221
+ )
222
+
223
+ ArtifactSearchFileInput = create_model(
224
+ "ArtifactSearchFileInput",
225
+ file_path=(str, Field(description="Path to the file to search")),
226
+ pattern=(str, Field(description="Search pattern. Treated as regex by default unless is_regex=False.")),
227
+ bucket_name=(Optional[str], Field(
228
+ description="Bucket name. If not provided, uses toolkit-configured default bucket.",
229
+ default=None
230
+ )),
231
+ is_regex=(bool, Field(
232
+ description="Whether pattern is a regex. Default is True for flexible matching.",
233
+ default=True
234
+ )),
235
+ context_lines=(int, Field(
236
+ description="Number of lines before/after match to include for context",
237
+ default=2,
238
+ ge=0
239
+ )),
240
+ )
241
+
242
+ ArtifactEditFileInput = create_model(
243
+ "ArtifactEditFileInput",
244
+ file_path=(str, Field(
245
+ description="Path to the file to edit. Must be a text file (markdown, txt, csv, json, xml, html, yaml, etc.)"
246
+ )),
247
+ file_query=(str, Field(description="""Edit instructions with OLD/NEW markers. Format:
248
+ OLD <<<<
249
+ old content to replace
250
+ >>>> OLD
251
+ NEW <<<<
252
+ new content
253
+ >>>> NEW
254
+
255
+ Multiple OLD/NEW pairs can be provided for multiple edits.""")),
256
+ bucket_name=(Optional[str], Field(
257
+ description="Bucket name. If not provided, uses toolkit-configured default bucket.",
258
+ default=None
259
+ )),
260
+ commit_message=(Optional[str], Field(
261
+ description="Not used for artifacts (kept for API consistency)",
262
+ default=None
263
+ )),
264
+ )
265
+
266
+ return {
267
+ "read_file_chunk": ArtifactReadFileChunkInput,
268
+ "read_multiple_files": ArtifactReadMultipleFilesInput,
269
+ "search_file": ArtifactSearchFileInput,
270
+ "edit_file": ArtifactEditFileInput,
271
+ }
272
+
31
273
  @model_validator(mode='before')
32
274
  @classmethod
33
275
  def validate_toolkit(cls, values):
@@ -287,7 +529,10 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
287
529
  raise ToolException(f"Unable to write file {file_path}: {str(e)}")
288
530
 
289
531
  def delete_file(self, filename: str, bucket_name = None):
290
- return self.artifact.delete(filename, bucket_name)
532
+ result = self.artifact.delete(filename, bucket_name)
533
+ if result and isinstance(result, dict) and result.get('error'):
534
+ raise ToolException(f'Error (deleteFile): {result.get("error")}')
535
+ return f'File "{filename}" deleted successfully.'
291
536
 
292
537
  def append_data(self, filename: str, filedata: str, bucket_name = None):
293
538
  result = self.artifact.append(filename, filedata, bucket_name)
@@ -1,7 +1,7 @@
1
1
  import asyncio
2
2
  import logging
3
3
  from traceback import format_exc
4
- from typing import Any, Optional, List, Union, Literal
4
+ from typing import Any, Optional, List, Union, Literal, Dict
5
5
 
6
6
  from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
7
7
  from langchain_core.runnables import RunnableConfig
@@ -67,7 +67,7 @@ class LLMNode(BaseTool):
67
67
  client: Any = Field(default=None, description='LLM client instance')
68
68
  return_type: str = Field(default="str", description='Return type')
69
69
  response_key: str = Field(default="messages", description='Response key')
70
- structured_output_dict: Optional[dict[str, str]] = Field(default=None, description='Structured output dictionary')
70
+ structured_output_dict: Optional[Dict[str, Any]] = Field(default=None, description='Structured output dictionary')
71
71
  output_variables: Optional[List[str]] = Field(default=None, description='Output variables')
72
72
  input_mapping: Optional[dict[str, dict]] = Field(default=None, description='Input mapping')
73
73
  input_variables: Optional[List[str]] = Field(default=None, description='Input variables')
@@ -82,7 +82,7 @@ class LLMNode(BaseTool):
82
82
  Prepare structured output parameters from structured_output_dict.
83
83
 
84
84
  Expected self.structured_output_dict formats:
85
- - {"field": "str"} / {"field": "list"} / {"field": "list[str]"} / {"field": "any"} ...
85
+ - {"field": "str"} / {"field": "list"} / {"field": "list[dict]"} / {"field": "any"} ...
86
86
  - OR {"field": {"type": "...", "description": "...", "default": ...}} (optional)
87
87
 
88
88
  Returns:
@@ -93,19 +93,20 @@ class LLMNode(BaseTool):
93
93
  for key, value in (self.structured_output_dict or {}).items():
94
94
  # Allow either a plain type string or a dict with details
95
95
  if isinstance(value, dict):
96
- type_str = (value.get("type") or "any")
96
+ type_str = str(value.get("type") or "any")
97
97
  desc = value.get("description", "") or ""
98
98
  entry: dict = {"type": type_str, "description": desc}
99
99
  if "default" in value:
100
100
  entry["default"] = value["default"]
101
101
  else:
102
- type_str = (value or "any") if isinstance(value, str) else "any"
103
- entry = {"type": type_str, "description": ""}
102
+ # Ensure we always have a string type
103
+ if isinstance(value, str):
104
+ type_str = value
105
+ else:
106
+ # If it's already a type object, convert to string representation
107
+ type_str = getattr(value, '__name__', 'any')
104
108
 
105
- # Normalize: only convert the *exact* "list" into "list[str]"
106
- # (avoid the old bug where "if 'list' in value" also hits "blacklist", etc.)
107
- if isinstance(entry.get("type"), str) and entry["type"].strip().lower() == "list":
108
- entry["type"] = "list[str]"
109
+ entry = {"type": type_str, "description": ""}
109
110
 
110
111
  struct_params[key] = entry
111
112
 
@@ -1146,5 +1147,5 @@ class LLMNode(BaseTool):
1146
1147
 
1147
1148
  return new_messages, current_completion
1148
1149
 
1149
- def __get_struct_output_model(self, llm_client, pydantic_model, method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema"):
1150
+ def __get_struct_output_model(self, llm_client, pydantic_model, method: Literal["function_calling", "json_mode", "json_schema"] = "function_calling"):
1150
1151
  return llm_client.with_structured_output(pydantic_model, method=method)