alita-sdk 0.3.376__py3-none-any.whl → 0.3.423__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (50) hide show
  1. alita_sdk/configurations/bitbucket.py +95 -0
  2. alita_sdk/configurations/confluence.py +96 -1
  3. alita_sdk/configurations/gitlab.py +79 -0
  4. alita_sdk/configurations/jira.py +103 -0
  5. alita_sdk/configurations/testrail.py +88 -0
  6. alita_sdk/configurations/xray.py +93 -0
  7. alita_sdk/configurations/zephyr_enterprise.py +93 -0
  8. alita_sdk/configurations/zephyr_essential.py +75 -0
  9. alita_sdk/runtime/clients/client.py +3 -2
  10. alita_sdk/runtime/clients/sandbox_client.py +8 -0
  11. alita_sdk/runtime/langchain/assistant.py +41 -38
  12. alita_sdk/runtime/langchain/constants.py +4 -0
  13. alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
  14. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
  15. alita_sdk/runtime/langchain/document_loaders/constants.py +28 -12
  16. alita_sdk/runtime/langchain/langraph_agent.py +88 -27
  17. alita_sdk/runtime/langchain/utils.py +24 -4
  18. alita_sdk/runtime/toolkits/application.py +8 -1
  19. alita_sdk/runtime/toolkits/tools.py +80 -49
  20. alita_sdk/runtime/tools/__init__.py +7 -2
  21. alita_sdk/runtime/tools/application.py +7 -0
  22. alita_sdk/runtime/tools/function.py +20 -28
  23. alita_sdk/runtime/tools/graph.py +10 -4
  24. alita_sdk/runtime/tools/image_generation.py +104 -8
  25. alita_sdk/runtime/tools/llm.py +146 -114
  26. alita_sdk/runtime/tools/sandbox.py +166 -63
  27. alita_sdk/runtime/tools/vectorstore.py +3 -2
  28. alita_sdk/runtime/tools/vectorstore_base.py +4 -3
  29. alita_sdk/runtime/utils/utils.py +1 -0
  30. alita_sdk/tools/__init__.py +43 -31
  31. alita_sdk/tools/ado/work_item/ado_wrapper.py +17 -8
  32. alita_sdk/tools/base_indexer_toolkit.py +75 -66
  33. alita_sdk/tools/code_indexer_toolkit.py +13 -3
  34. alita_sdk/tools/confluence/api_wrapper.py +29 -7
  35. alita_sdk/tools/confluence/loader.py +10 -0
  36. alita_sdk/tools/elitea_base.py +7 -7
  37. alita_sdk/tools/gitlab/api_wrapper.py +8 -9
  38. alita_sdk/tools/jira/api_wrapper.py +1 -1
  39. alita_sdk/tools/openapi/__init__.py +10 -1
  40. alita_sdk/tools/qtest/api_wrapper.py +298 -51
  41. alita_sdk/tools/sharepoint/api_wrapper.py +104 -33
  42. alita_sdk/tools/sharepoint/authorization_helper.py +175 -1
  43. alita_sdk/tools/sharepoint/utils.py +8 -2
  44. alita_sdk/tools/utils/content_parser.py +27 -16
  45. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +19 -6
  46. {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.423.dist-info}/METADATA +1 -1
  47. {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.423.dist-info}/RECORD +50 -50
  48. {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.423.dist-info}/WHEEL +0 -0
  49. {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.423.dist-info}/licenses/LICENSE +0 -0
  50. {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.423.dist-info}/top_level.txt +0 -0
@@ -7,7 +7,7 @@ from langchain_core.callbacks import dispatch_custom_event
7
7
  from langchain_core.messages import ToolCall
8
8
  from langchain_core.runnables import RunnableConfig
9
9
  from langchain_core.tools import BaseTool, ToolException
10
- from typing import Any, Optional, Union, Annotated
10
+ from typing import Any, Optional, Union
11
11
  from langchain_core.utils.function_calling import convert_to_openai_tool
12
12
  from pydantic import ValidationError
13
13
 
@@ -31,31 +31,10 @@ class FunctionTool(BaseTool):
31
31
  """Prepare input for PyodideSandboxTool by injecting state into the code block."""
32
32
  # add state into the code block here since it might be changed during the execution of the code
33
33
  state_copy = deepcopy(state)
34
- # pickle state
35
- import pickle
36
34
 
37
35
  del state_copy['messages'] # remove messages to avoid issues with pickling without langchain-core
38
- serialized_state = pickle.dumps(state_copy)
39
36
  # inject state into the code block as alita_state variable
40
- pyodide_predata = f"""import pickle\nalita_state = pickle.loads({serialized_state})\n"""
41
- # add classes related to sandbox client
42
- # read the content of alita_sdk/runtime/cliens/sandbox_client.py
43
- try:
44
- import os
45
- from pathlib import Path
46
-
47
- # Get the directory of the current file and construct the path to sandbox_client.py
48
- current_dir = Path(__file__).parent
49
- sandbox_client_path = current_dir.parent / 'clients' / 'sandbox_client.py'
50
-
51
- with open(sandbox_client_path, 'r') as f:
52
- sandbox_client_code = f.read()
53
- pyodide_predata += f"\n{sandbox_client_code}\n"
54
- pyodide_predata += (f"alita_client = SandboxClient(base_url='{self.alita_client.base_url}',"
55
- f"project_id={self.alita_client.project_id},"
56
- f"auth_token='{self.alita_client.auth_token}')")
57
- except FileNotFoundError:
58
- logger.error(f"sandbox_client.py not found at {sandbox_client_path}. Ensure the file exists.")
37
+ pyodide_predata = f"#state dict\nalita_state = {state_copy}\n"
59
38
  return pyodide_predata
60
39
 
61
40
  def _handle_pyodide_output(self, tool_result: Any) -> dict:
@@ -64,6 +43,10 @@ class FunctionTool(BaseTool):
64
43
 
65
44
  if self.output_variables:
66
45
  for var in self.output_variables:
46
+ if var == "messages":
47
+ tool_result_converted.update(
48
+ {"messages": [{"role": "assistant", "content": dumps(tool_result)}]})
49
+ continue
67
50
  if isinstance(tool_result, dict) and var in tool_result:
68
51
  tool_result_converted[var] = tool_result[var]
69
52
  else:
@@ -111,7 +94,9 @@ class FunctionTool(BaseTool):
111
94
  # special handler for PyodideSandboxTool
112
95
  if self._is_pyodide_tool():
113
96
  code = func_args['code']
114
- func_args['code'] = f"{self._prepare_pyodide_input(state)}\n{code}"
97
+ func_args['code'] = (f"{self._prepare_pyodide_input(state)}\n{code}"
98
+ # handle new lines in the code properly
99
+ .replace('\\n','\\\\n'))
115
100
  try:
116
101
  tool_result = self.tool.invoke(func_args, config, **kwargs)
117
102
  dispatch_custom_event(
@@ -131,14 +116,21 @@ class FunctionTool(BaseTool):
131
116
  if not self.output_variables:
132
117
  return {"messages": [{"role": "assistant", "content": dumps(tool_result)}]}
133
118
  else:
134
- if self.output_variables[0] == "messages":
135
- return {
119
+ if "messages" in self.output_variables:
120
+ messages_dict = {
136
121
  "messages": [{
137
122
  "role": "assistant",
138
- "content": dumps(tool_result) if not isinstance(tool_result, ToolException) else str(
139
- tool_result)
123
+ "content": dumps(tool_result) if not isinstance(tool_result, ToolException)
124
+ else str(tool_result)
140
125
  }]
141
126
  }
127
+ for var in self.output_variables:
128
+ if var != "messages":
129
+ if isinstance(tool_result, dict) and var in tool_result:
130
+ messages_dict[var] = tool_result[var]
131
+ else:
132
+ messages_dict[var] = tool_result
133
+ return messages_dict
142
134
  else:
143
135
  return { self.output_variables[0]: tool_result }
144
136
  except ValidationError:
@@ -47,8 +47,8 @@ def formulate_query(kwargs):
47
47
 
48
48
 
49
49
  class GraphTool(BaseTool):
50
- name: str
51
- description: str
50
+ name: str = 'GraphTool'
51
+ description: str = 'Graph tool for tools'
52
52
  graph: CompiledStateGraph
53
53
  args_schema: Type[BaseModel] = graphToolSchema
54
54
  return_type: str = "str"
@@ -65,10 +65,16 @@ class GraphTool(BaseTool):
65
65
  all_kwargs = {**kwargs, **extras, **schema_values}
66
66
  if config is None:
67
67
  config = {}
68
- return self._run(*config, **all_kwargs)
68
+ # Pass the config to the _run empty or the one passed from the parent executor.
69
+ return self._run(config, **all_kwargs)
69
70
 
70
71
  def _run(self, *args, **kwargs):
71
- response = self.graph.invoke(formulate_query(kwargs))
72
+ config = None
73
+ # From invoke method we are passing only 1 arg so it is safe to do this condition and config assignment.
74
+ # Default to None is safe because it will be checked also on the langchain side.
75
+ if args:
76
+ config = args[0]
77
+ response = self.graph.invoke(formulate_query(kwargs), config=config)
72
78
  if self.return_type == "str":
73
79
  return response["output"]
74
80
  else:
@@ -2,16 +2,59 @@
2
2
  Image generation tool for Alita SDK.
3
3
  """
4
4
  import logging
5
- from typing import Optional, Type, Any
6
- from langchain_core.tools import BaseTool
7
- from pydantic import BaseModel, Field
5
+ from typing import Optional, Type, Any, List, Literal
6
+ from langchain_core.tools import BaseTool, BaseToolkit
7
+ from pydantic import BaseModel, Field, create_model, ConfigDict
8
8
 
9
9
  logger = logging.getLogger(__name__)
10
10
 
11
+ name = "image_generation"
12
+
13
+
14
+ def get_tools(tools_list: list, alita_client=None, llm=None,
15
+ memory_store=None):
16
+ """
17
+ Get image generation tools for the provided tool configurations.
18
+
19
+ Args:
20
+ tools_list: List of tool configurations
21
+ alita_client: Alita client instance (required for image generation)
22
+ llm: LLM client instance (unused for image generation)
23
+ memory_store: Optional memory store instance (unused)
24
+
25
+ Returns:
26
+ List of image generation tools
27
+ """
28
+ all_tools = []
29
+
30
+ for tool in tools_list:
31
+ if (tool.get('type') == 'image_generation' or
32
+ tool.get('toolkit_name') == 'image_generation'):
33
+ try:
34
+ if not alita_client:
35
+ logger.error("Alita client is required for image "
36
+ "generation tools")
37
+ continue
38
+
39
+ toolkit_instance = ImageGenerationToolkit.get_toolkit(
40
+ client=alita_client,
41
+ toolkit_name=tool.get('toolkit_name', '')
42
+ )
43
+ all_tools.extend(toolkit_instance.get_tools())
44
+ except Exception as e:
45
+ logger.error(f"Error in image generation toolkit "
46
+ f"get_tools: {e}")
47
+ logger.error(f"Tool config: {tool}")
48
+ raise
49
+
50
+ return all_tools
51
+
11
52
 
12
53
  class ImageGenerationInput(BaseModel):
13
54
  """Input schema for image generation tool."""
14
- prompt: str = Field(description="Text prompt describing the image to generate")
55
+ prompt: str = Field(
56
+ description="Text prompt describing the image to generate"
57
+ )
15
58
  n: int = Field(
16
59
  default=1, description="Number of images to generate (1-10)",
17
60
  ge=1, le=10
@@ -22,7 +65,7 @@ class ImageGenerationInput(BaseModel):
22
65
  )
23
66
  quality: str = Field(
24
67
  default="auto",
25
- description="Quality of the generated image ('low', 'medium', 'high', 'auto')"
68
+ description="Quality of the generated image ('low', 'medium', 'high')"
26
69
  )
27
70
  style: Optional[str] = Field(
28
71
  default=None, description="Style of the generated image (optional)"
@@ -69,7 +112,8 @@ class ImageGenerationTool(BaseTool):
69
112
  else:
70
113
  content_chunks.append({
71
114
  "type": "text",
72
- "text": f"Generated {len(images)} images for prompt: '{prompt}'"
115
+ "text": f"Generated {len(images)} images for "
116
+ f"prompt: '{prompt}'"
73
117
  })
74
118
 
75
119
  # Add image content for each generated image
@@ -85,7 +129,8 @@ class ImageGenerationTool(BaseTool):
85
129
  content_chunks.append({
86
130
  "type": "image_url",
87
131
  "image_url": {
88
- "url": f"data:image/png;base64,{image_data['b64_json']}"
132
+ "url": f"data:image/png;base64,"
133
+ f"{image_data['b64_json']}"
89
134
  }
90
135
  })
91
136
 
@@ -94,7 +139,8 @@ class ImageGenerationTool(BaseTool):
94
139
  # Fallback to text response if no images in result
95
140
  return [{
96
141
  "type": "text",
97
- "text": f"Image generation completed but no images returned: {result}"
142
+ "text": f"Image generation completed but no images "
143
+ f"returned: {result}"
98
144
  }]
99
145
 
100
146
  except Exception as e:
@@ -114,3 +160,53 @@ class ImageGenerationTool(BaseTool):
114
160
  def create_image_generation_tool(client):
115
161
  """Create an image generation tool with the provided Alita client."""
116
162
  return ImageGenerationTool(client=client)
163
+
164
+
165
+ class ImageGenerationToolkit(BaseToolkit):
166
+ """Toolkit for image generation tools."""
167
+ tools: List[BaseTool] = []
168
+
169
+ @staticmethod
170
+ def toolkit_config_schema() -> BaseModel:
171
+ """Get the configuration schema for the image generation toolkit."""
172
+ # Create sample tool to get schema
173
+ sample_tool = ImageGenerationTool(client=None)
174
+ selected_tools = {sample_tool.name: sample_tool.args_schema.schema()}
175
+
176
+ return create_model(
177
+ 'image_generation',
178
+ selected_tools=(
179
+ List[Literal[tuple(selected_tools)]],
180
+ Field(
181
+ default=[],
182
+ json_schema_extra={'args_schemas': selected_tools}
183
+ )
184
+ ),
185
+ __config__=ConfigDict(json_schema_extra={
186
+ 'metadata': {
187
+ "label": "Image Generation",
188
+ "icon_url": "image_generation.svg",
189
+ "hidden": True,
190
+ "categories": ["internal_tool"],
191
+ "extra_categories": ["image generation"],
192
+ }
193
+ })
194
+ )
195
+
196
+ @classmethod
197
+ def get_toolkit(cls, client=None, **kwargs):
198
+ """
199
+ Get toolkit with image generation tools.
200
+
201
+ Args:
202
+ client: Alita client instance (required)
203
+ **kwargs: Additional arguments
204
+ """
205
+ if not client:
206
+ raise ValueError("Alita client is required for image generation")
207
+
208
+ tools = [ImageGenerationTool(client=client)]
209
+ return cls(tools=tools)
210
+
211
+ def get_tools(self):
212
+ return self.tools
@@ -7,6 +7,7 @@ from langchain_core.runnables import RunnableConfig
7
7
  from langchain_core.tools import BaseTool, ToolException
8
8
  from pydantic import Field
9
9
 
10
+ from ..langchain.constants import ELITEA_RS
10
11
  from ..langchain.utils import create_pydantic_model, propagate_the_input_mapping
11
12
 
12
13
  logger = logging.getLogger(__name__)
@@ -30,6 +31,7 @@ class LLMNode(BaseTool):
30
31
  structured_output: Optional[bool] = Field(default=False, description='Whether to use structured output')
31
32
  available_tools: Optional[List[BaseTool]] = Field(default=None, description='Available tools for binding')
32
33
  tool_names: Optional[List[str]] = Field(default=None, description='Specific tool names to filter')
34
+ steps_limit: Optional[int] = Field(default=25, description='Maximum steps for tool execution')
33
35
 
34
36
  def get_filtered_tools(self) -> List[BaseTool]:
35
37
  """
@@ -88,8 +90,11 @@ class LLMNode(BaseTool):
88
90
  raise ToolException(f"LLMNode requires 'system' and 'task' parameters in input mapping. "
89
91
  f"Actual params: {func_args}")
90
92
  # cast to str in case user passes variable different from str
91
- messages = [SystemMessage(content=str(func_args.get('system'))), HumanMessage(content=str(func_args.get('task')))]
92
- messages.extend(func_args.get('chat_history', []))
93
+ messages = [SystemMessage(content=str(func_args.get('system'))), *func_args.get('chat_history', []), HumanMessage(content=str(func_args.get('task')))]
94
+ # Remove pre-last item if last two messages are same type and content
95
+ if len(messages) >= 2 and type(messages[-1]) == type(messages[-2]) and messages[-1].content == messages[
96
+ -2].content:
97
+ messages.pop(-2)
93
98
  else:
94
99
  # Flow for chat-based LLM node w/o prompt/task from pipeline but with messages in state
95
100
  # verify messages structure
@@ -122,14 +127,25 @@ class LLMNode(BaseTool):
122
127
  }
123
128
  for key, value in (self.structured_output_dict or {}).items()
124
129
  }
130
+ # Add default output field for proper response to user
131
+ struct_params['elitea_response'] = {'description': 'final output to user', 'type': 'str'}
125
132
  struct_model = create_pydantic_model(f"LLMOutput", struct_params)
126
- llm = llm_client.with_structured_output(struct_model)
127
- completion = llm.invoke(messages, config=config)
128
- result = completion.model_dump()
133
+ completion = llm_client.invoke(messages, config=config)
134
+ if hasattr(completion, 'tool_calls') and completion.tool_calls:
135
+ new_messages, _ = self.__perform_tool_calling(completion, messages, llm_client, config)
136
+ llm = self.__get_struct_output_model(llm_client, struct_model)
137
+ completion = llm.invoke(new_messages, config=config)
138
+ result = completion.model_dump()
139
+ else:
140
+ llm = self.__get_struct_output_model(llm_client, struct_model)
141
+ completion = llm.invoke(messages, config=config)
142
+ result = completion.model_dump()
129
143
 
130
144
  # Ensure messages are properly formatted
131
145
  if result.get('messages') and isinstance(result['messages'], list):
132
146
  result['messages'] = [{'role': 'assistant', 'content': '\n'.join(result['messages'])}]
147
+ else:
148
+ result['messages'] = messages + [AIMessage(content=result.get(ELITEA_RS, ''))]
133
149
 
134
150
  return result
135
151
  else:
@@ -139,115 +155,15 @@ class LLMNode(BaseTool):
139
155
  # Handle both tool-calling and regular responses
140
156
  if hasattr(completion, 'tool_calls') and completion.tool_calls:
141
157
  # Handle iterative tool-calling and execution
142
- new_messages = messages + [completion]
143
- max_iterations = 15
144
- iteration = 0
145
-
146
- # Continue executing tools until no more tool calls or max iterations reached
147
- current_completion = completion
148
- while (hasattr(current_completion, 'tool_calls') and
149
- current_completion.tool_calls and
150
- iteration < max_iterations):
151
-
152
- iteration += 1
153
- logger.info(f"Tool execution iteration {iteration}/{max_iterations}")
154
-
155
- # Execute each tool call in the current completion
156
- tool_calls = current_completion.tool_calls if hasattr(current_completion.tool_calls,
157
- '__iter__') else []
158
-
159
- for tool_call in tool_calls:
160
- tool_name = tool_call.get('name', '') if isinstance(tool_call, dict) else getattr(tool_call,
161
- 'name',
162
- '')
163
- tool_args = tool_call.get('args', {}) if isinstance(tool_call, dict) else getattr(tool_call,
164
- 'args',
165
- {})
166
- tool_call_id = tool_call.get('id', '') if isinstance(tool_call, dict) else getattr(
167
- tool_call, 'id', '')
168
-
169
- # Find the tool in filtered tools
170
- filtered_tools = self.get_filtered_tools()
171
- tool_to_execute = None
172
- for tool in filtered_tools:
173
- if tool.name == tool_name:
174
- tool_to_execute = tool
175
- break
176
-
177
- if tool_to_execute:
178
- try:
179
- logger.info(f"Executing tool '{tool_name}' with args: {tool_args}")
180
- tool_result = tool_to_execute.invoke(tool_args)
181
-
182
- # Create tool message with result - preserve structured content
183
- from langchain_core.messages import ToolMessage
184
-
185
- # Check if tool_result is structured content (list of dicts)
186
- # TODO: need solid check for being compatible with ToolMessage content format
187
- if isinstance(tool_result, list) and all(
188
- isinstance(item, dict) and 'type' in item for item in tool_result
189
- ):
190
- # Use structured content directly for multimodal support
191
- tool_message = ToolMessage(
192
- content=tool_result,
193
- tool_call_id=tool_call_id
194
- )
195
- else:
196
- # Fallback to string conversion for other tool results
197
- tool_message = ToolMessage(
198
- content=str(tool_result),
199
- tool_call_id=tool_call_id
200
- )
201
- new_messages.append(tool_message)
202
-
203
- except Exception as e:
204
- logger.error(f"Error executing tool '{tool_name}': {e}")
205
- # Create error tool message
206
- from langchain_core.messages import ToolMessage
207
- tool_message = ToolMessage(
208
- content=f"Error executing {tool_name}: {str(e)}",
209
- tool_call_id=tool_call_id
210
- )
211
- new_messages.append(tool_message)
212
- else:
213
- logger.warning(f"Tool '{tool_name}' not found in available tools")
214
- # Create error tool message for missing tool
215
- from langchain_core.messages import ToolMessage
216
- tool_message = ToolMessage(
217
- content=f"Tool '{tool_name}' not available",
218
- tool_call_id=tool_call_id
219
- )
220
- new_messages.append(tool_message)
221
-
222
- # Call LLM again with tool results to get next response
223
- try:
224
- current_completion = llm_client.invoke(new_messages, config=config)
225
- new_messages.append(current_completion)
226
-
227
- # Check if we still have tool calls
228
- if hasattr(current_completion, 'tool_calls') and current_completion.tool_calls:
229
- logger.info(f"LLM requested {len(current_completion.tool_calls)} more tool calls")
230
- else:
231
- logger.info("LLM completed without requesting more tools")
232
- break
233
-
234
- except Exception as e:
235
- logger.error(f"Error in LLM call during iteration {iteration}: {e}")
236
- # Add error message and break the loop
237
- error_msg = f"Error processing tool results in iteration {iteration}: {str(e)}"
238
- new_messages.append(AIMessage(content=error_msg))
239
- break
240
-
241
- # Log completion status
242
- if iteration >= max_iterations:
243
- logger.warning(f"Reached maximum iterations ({max_iterations}) for tool execution")
244
- # Add a warning message to the chat
245
- warning_msg = f"Maximum tool execution iterations ({max_iterations}) reached. Stopping tool execution."
246
- new_messages.append(AIMessage(content=warning_msg))
247
- else:
248
- logger.info(f"Tool execution completed after {iteration} iterations")
158
+ new_messages, current_completion = self.__perform_tool_calling(completion, messages, llm_client, config)
249
159
 
250
- return {"messages": new_messages}
160
+ output_msgs = {"messages": new_messages}
161
+ if self.output_variables:
162
+ if self.output_variables[0] == 'messages':
163
+ return output_msgs
164
+ output_msgs[self.output_variables[0]] = current_completion.content if current_completion else None
165
+
166
+ return output_msgs
251
167
  else:
252
168
  # Regular text response
253
169
  content = completion.content.strip() if hasattr(completion, 'content') else str(completion)
@@ -273,4 +189,120 @@ class LLMNode(BaseTool):
273
189
 
274
190
  def _run(self, *args, **kwargs):
275
191
  # Legacy support for old interface
276
- return self.invoke(kwargs, **kwargs)
192
+ return self.invoke(kwargs, **kwargs)
193
+
194
+ def __perform_tool_calling(self, completion, messages, llm_client, config):
195
+ # Handle iterative tool-calling and execution
196
+ new_messages = messages + [completion]
197
+ iteration = 0
198
+
199
+ # Continue executing tools until no more tool calls or max iterations reached
200
+ current_completion = completion
201
+ while (hasattr(current_completion, 'tool_calls') and
202
+ current_completion.tool_calls and
203
+ iteration < self.steps_limit):
204
+
205
+ iteration += 1
206
+ logger.info(f"Tool execution iteration {iteration}/{self.steps_limit}")
207
+
208
+ # Execute each tool call in the current completion
209
+ tool_calls = current_completion.tool_calls if hasattr(current_completion.tool_calls,
210
+ '__iter__') else []
211
+
212
+ for tool_call in tool_calls:
213
+ tool_name = tool_call.get('name', '') if isinstance(tool_call, dict) else getattr(tool_call,
214
+ 'name',
215
+ '')
216
+ tool_args = tool_call.get('args', {}) if isinstance(tool_call, dict) else getattr(tool_call,
217
+ 'args',
218
+ {})
219
+ tool_call_id = tool_call.get('id', '') if isinstance(tool_call, dict) else getattr(
220
+ tool_call, 'id', '')
221
+
222
+ # Find the tool in filtered tools
223
+ filtered_tools = self.get_filtered_tools()
224
+ tool_to_execute = None
225
+ for tool in filtered_tools:
226
+ if tool.name == tool_name:
227
+ tool_to_execute = tool
228
+ break
229
+
230
+ if tool_to_execute:
231
+ try:
232
+ logger.info(f"Executing tool '{tool_name}' with args: {tool_args}")
233
+ # Pass the underlying config to the tool execution invoke method
234
+ # since it may be another agent, graph, etc. to see it properly in thinking steps
235
+ tool_result = tool_to_execute.invoke(tool_args, config=config)
236
+
237
+ # Create tool message with result - preserve structured content
238
+ from langchain_core.messages import ToolMessage
239
+
240
+ # Check if tool_result is structured content (list of dicts)
241
+ # TODO: need solid check for being compatible with ToolMessage content format
242
+ if isinstance(tool_result, list) and all(
243
+ isinstance(item, dict) and 'type' in item for item in tool_result
244
+ ):
245
+ # Use structured content directly for multimodal support
246
+ tool_message = ToolMessage(
247
+ content=tool_result,
248
+ tool_call_id=tool_call_id
249
+ )
250
+ else:
251
+ # Fallback to string conversion for other tool results
252
+ tool_message = ToolMessage(
253
+ content=str(tool_result),
254
+ tool_call_id=tool_call_id
255
+ )
256
+ new_messages.append(tool_message)
257
+
258
+ except Exception as e:
259
+ logger.error(f"Error executing tool '{tool_name}': {e}")
260
+ # Create error tool message
261
+ from langchain_core.messages import ToolMessage
262
+ tool_message = ToolMessage(
263
+ content=f"Error executing {tool_name}: {str(e)}",
264
+ tool_call_id=tool_call_id
265
+ )
266
+ new_messages.append(tool_message)
267
+ else:
268
+ logger.warning(f"Tool '{tool_name}' not found in available tools")
269
+ # Create error tool message for missing tool
270
+ from langchain_core.messages import ToolMessage
271
+ tool_message = ToolMessage(
272
+ content=f"Tool '{tool_name}' not available",
273
+ tool_call_id=tool_call_id
274
+ )
275
+ new_messages.append(tool_message)
276
+
277
+ # Call LLM again with tool results to get next response
278
+ try:
279
+ current_completion = llm_client.invoke(new_messages, config=config)
280
+ new_messages.append(current_completion)
281
+
282
+ # Check if we still have tool calls
283
+ if hasattr(current_completion, 'tool_calls') and current_completion.tool_calls:
284
+ logger.info(f"LLM requested {len(current_completion.tool_calls)} more tool calls")
285
+ else:
286
+ logger.info("LLM completed without requesting more tools")
287
+ break
288
+
289
+ except Exception as e:
290
+ logger.error(f"Error in LLM call during iteration {iteration}: {e}")
291
+ # Add error message and break the loop
292
+ error_msg = f"Error processing tool results in iteration {iteration}: {str(e)}"
293
+ new_messages.append(AIMessage(content=error_msg))
294
+ break
295
+
296
+ # Log completion status
297
+ if iteration >= self.steps_limit:
298
+ logger.warning(f"Reached maximum iterations ({self.steps_limit}) for tool execution")
299
+ # Add a warning message to the chat
300
+ warning_msg = f"Maximum tool execution iterations ({self.steps_limit}) reached. Stopping tool execution."
301
+ new_messages.append(AIMessage(content=warning_msg))
302
+ else:
303
+ logger.info(f"Tool execution completed after {iteration} iterations")
304
+
305
+ return new_messages, current_completion
306
+
307
+ def __get_struct_output_model(self, llm_client, pydantic_model):
308
+ return llm_client.with_structured_output(pydantic_model)