alita-sdk 0.3.182__py3-none-any.whl → 0.3.184__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,46 +1,148 @@
1
+ import json
1
2
  import logging
2
3
  from traceback import format_exc
3
- from typing import Any, Optional, Dict, List
4
+ from typing import Any, Optional, Dict, List, Union
4
5
 
5
- from langchain_core.messages import HumanMessage, BaseMessage
6
+ from langchain_core.messages import HumanMessage, BaseMessage, SystemMessage, AIMessage
6
7
  from langchain_core.tools import BaseTool, ToolException
8
+ from langchain_core.runnables import RunnableConfig
9
+ from pydantic import Field
7
10
 
8
11
  from ..langchain.utils import _extract_json, create_pydantic_model, create_params
9
12
 
10
13
  logger = logging.getLogger(__name__)
11
14
 
12
15
 
13
- def create_llm_input(prompt: Dict[str, str], params: Dict[str, Any], kwargs: Dict[str, Any]) -> list[BaseMessage]:
14
- logger.info(f"Creating LLM input with prompt: {prompt}, params: {params}, kwargs: {kwargs}")
15
- if prompt.get('type') == 'fstring' and params:
16
+ def create_llm_input_with_messages(
17
+ prompt: Dict[str, str],
18
+ messages: List[BaseMessage],
19
+ params: Dict[str, Any]
20
+ ) -> List[BaseMessage]:
21
+ """
22
+ Create LLM input by combining system prompt with chat history messages.
23
+
24
+ Args:
25
+ prompt: The prompt configuration with template
26
+ messages: List of chat history messages
27
+ params: Additional parameters for prompt formatting
28
+
29
+ Returns:
30
+ List of messages to send to LLM
31
+ """
32
+ logger.info(f"Creating LLM input with messages: {len(messages)} messages, params: {params}")
33
+
34
+ # Build the input messages
35
+ input_messages = []
36
+
37
+ # Add system message from prompt if available
38
+ if prompt and prompt.get('template'):
16
39
  try:
17
- return [HumanMessage(content=prompt['value'].format(**params))]
40
+ system_content = prompt['template'].format(**params) if params else prompt['template']
41
+ input_messages.append(SystemMessage(content=system_content))
18
42
  except KeyError as e:
19
- error_msg = f"KeyError in input formatting - make sure you have added all required state variables as input to the node: {e}"
43
+ error_msg = f"KeyError in prompt formatting: {e}. Available params: {list(params.keys())}"
20
44
  logger.error(error_msg)
21
45
  raise ToolException(error_msg)
22
- else:
23
- return kwargs.get("messages") + [HumanMessage(prompt['value'])]
46
+
47
+ # Add the chat history messages
48
+ if messages:
49
+ input_messages.extend(messages)
50
+
51
+ return input_messages
24
52
 
25
53
 
26
54
  class LLMNode(BaseTool):
27
- name: str = 'LLMNode'
28
- prompt: Dict[str, str]
29
- description: str = 'This is tool node for LLM'
30
- client: Any = None
31
- return_type: str = "str"
32
- response_key: str = "messages"
33
- structured_output_dict: Optional[dict[str, str]] = None
34
- output_variables: Optional[List[str]] = None
35
- input_variables: Optional[List[str]] = None
36
- structured_output: Optional[bool] = False
55
+ """Enhanced LLM node with chat history and tool binding support"""
56
+
57
+ # Override BaseTool required fields
58
+ name: str = Field(default='LLMNode', description='Name of the LLM node')
59
+ description: str = Field(default='This is tool node for LLM with chat history and tool support', description='Description of the LLM node')
60
+
61
+ # LLM-specific fields
62
+ prompt: Dict[str, str] = Field(default_factory=dict, description='Prompt configuration')
63
+ client: Any = Field(default=None, description='LLM client instance')
64
+ return_type: str = Field(default="str", description='Return type')
65
+ response_key: str = Field(default="messages", description='Response key')
66
+ structured_output_dict: Optional[dict[str, str]] = Field(default=None, description='Structured output dictionary')
67
+ output_variables: Optional[List[str]] = Field(default=None, description='Output variables')
68
+ input_variables: Optional[List[str]] = Field(default=None, description='Input variables')
69
+ structured_output: Optional[bool] = Field(default=False, description='Whether to use structured output')
70
+ available_tools: Optional[List[BaseTool]] = Field(default=None, description='Available tools for binding')
71
+ tool_names: Optional[List[str]] = Field(default=None, description='Specific tool names to filter')
37
72
 
38
- def _run(self, *args, **kwargs):
39
- params = create_params(self.input_variables, kwargs)
40
- logger.info(f"LLM Node params: {params}")
41
- llm_input = create_llm_input(self.prompt, params, kwargs)
73
+ def get_filtered_tools(self) -> List[BaseTool]:
74
+ """
75
+ Filter available tools based on tool_names list.
76
+
77
+ Returns:
78
+ List of filtered tools
79
+ """
80
+ if not self.available_tools:
81
+ return []
82
+
83
+ if not self.tool_names:
84
+ # If no specific tool names provided, return all available tools
85
+ return self.available_tools
86
+
87
+ # Filter tools by name
88
+ filtered_tools = []
89
+ available_tool_names = {tool.name: tool for tool in self.available_tools}
90
+
91
+ for tool_name in self.tool_names:
92
+ if tool_name in available_tool_names:
93
+ filtered_tools.append(available_tool_names[tool_name])
94
+ logger.debug(f"Added tool '{tool_name}' to LLM node")
95
+ else:
96
+ logger.warning(f"Tool '{tool_name}' not found in available tools: {list(available_tool_names.keys())}")
97
+
98
+ return filtered_tools
99
+
100
+ def invoke(
101
+ self,
102
+ state: Union[str, dict],
103
+ config: Optional[RunnableConfig] = None,
104
+ **kwargs: Any,
105
+ ) -> dict:
106
+ """
107
+ Invoke the LLM node with proper message handling and tool binding.
108
+
109
+ Args:
110
+ state: The current state containing messages and other variables
111
+ config: Optional runnable config
112
+ **kwargs: Additional keyword arguments
113
+
114
+ Returns:
115
+ Updated state with LLM response
116
+ """
117
+ # Extract messages from state
118
+
119
+ messages = state.get("messages", []) if isinstance(state, dict) else []
120
+ logger.info(f"Invoking LLMNode with {len(messages)} messages")
121
+ logger.info("Messages: %s", messages)
122
+ # Create parameters for prompt formatting from state
123
+ params = {}
124
+ if isinstance(state, dict):
125
+ for var in self.input_variables or []:
126
+ if var != "messages" and var in state:
127
+ params[var] = state[var]
128
+
129
+ # Create LLM input with proper message handling
130
+ llm_input = create_llm_input_with_messages(self.prompt, messages, params)
131
+
132
+ # Get the LLM client, potentially with tools bound
133
+ llm_client = self.client
134
+
135
+ if len(self.tool_names or []) > 0:
136
+ filtered_tools = self.get_filtered_tools()
137
+ if filtered_tools:
138
+ logger.info(f"Binding {len(filtered_tools)} tools to LLM: {[t.name for t in filtered_tools]}")
139
+ llm_client = self.client.bind_tools(filtered_tools)
140
+ else:
141
+ logger.warning("No tools to bind to LLM")
142
+
42
143
  try:
43
- if self.structured_output and len(self.output_variables) > 0:
144
+ if self.structured_output and self.output_variables:
145
+ # Handle structured output
44
146
  struct_params = {
45
147
  key: {
46
148
  "type": 'list[str]' if 'list' in value else value,
@@ -48,27 +150,144 @@ class LLMNode(BaseTool):
48
150
  }
49
151
  for key, value in (self.structured_output_dict or {}).items()
50
152
  }
51
- stuct_model = create_pydantic_model(f"LLMOutput", struct_params)
52
- llm = self.client.with_structured_output(stuct_model)
53
- completion = llm.invoke(llm_input)
153
+ struct_model = create_pydantic_model(f"LLMOutput", struct_params)
154
+ llm = llm_client.with_structured_output(struct_model)
155
+ completion = llm.invoke(llm_input, config=config)
54
156
  result = completion.model_dump()
157
+
158
+ # Ensure messages are properly formatted
55
159
  if result.get('messages') and isinstance(result['messages'], list):
56
160
  result['messages'] = [{'role': 'assistant', 'content': '\n'.join(result['messages'])}]
161
+
57
162
  return result
58
163
  else:
59
- completion = self.client.invoke(llm_input)
60
- result = completion.content.strip()
61
- response = _extract_json(result) or {}
62
- response_data = {key: response[key] for key in response if key in self.output_variables}
63
- if not response_data.get('messages'):
64
- response_data['messages'] = [
65
- {"role": "assistant", "content": response_data.get(self.response_key) or result}]
66
- return response_data
67
- except ValueError:
68
- if self.output_variables:
69
- return {self.output_variables[0]: result, "messages": [{"role": "assistant", "content": result}]}
70
- else:
71
- return {"messages": [{"role": "assistant", "content": result}]}
164
+ # Handle regular completion
165
+ completion = llm_client.invoke(llm_input, config=config)
166
+ logger.info(f"Initial completion: {completion}")
167
+ # Handle both tool-calling and regular responses
168
+ if hasattr(completion, 'tool_calls') and completion.tool_calls:
169
+ # Handle iterative tool-calling and execution
170
+ new_messages = messages + [completion]
171
+ max_iterations = 15
172
+ iteration = 0
173
+
174
+ # Continue executing tools until no more tool calls or max iterations reached
175
+ current_completion = completion
176
+ while (hasattr(current_completion, 'tool_calls') and
177
+ current_completion.tool_calls and
178
+ iteration < max_iterations):
179
+
180
+ iteration += 1
181
+ logger.info(f"Tool execution iteration {iteration}/{max_iterations}")
182
+
183
+ # Execute each tool call in the current completion
184
+ tool_calls = current_completion.tool_calls if hasattr(current_completion.tool_calls, '__iter__') else []
185
+
186
+ for tool_call in tool_calls:
187
+ tool_name = tool_call.get('name', '') if isinstance(tool_call, dict) else getattr(tool_call, 'name', '')
188
+ tool_args = tool_call.get('args', {}) if isinstance(tool_call, dict) else getattr(tool_call, 'args', {})
189
+ tool_call_id = tool_call.get('id', '') if isinstance(tool_call, dict) else getattr(tool_call, 'id', '')
190
+
191
+ # Find the tool in filtered tools
192
+ filtered_tools = self.get_filtered_tools()
193
+ tool_to_execute = None
194
+ for tool in filtered_tools:
195
+ if tool.name == tool_name:
196
+ tool_to_execute = tool
197
+ break
198
+
199
+ if tool_to_execute:
200
+ try:
201
+ logger.info(f"Executing tool '{tool_name}' with args: {tool_args}")
202
+ tool_result = tool_to_execute.invoke(tool_args)
203
+
204
+ # Create tool message with result
205
+ from langchain_core.messages import ToolMessage
206
+ tool_message = ToolMessage(
207
+ content=str(tool_result),
208
+ tool_call_id=tool_call_id
209
+ )
210
+ new_messages.append(tool_message)
211
+
212
+ except Exception as e:
213
+ logger.error(f"Error executing tool '{tool_name}': {e}")
214
+ # Create error tool message
215
+ from langchain_core.messages import ToolMessage
216
+ tool_message = ToolMessage(
217
+ content=f"Error executing {tool_name}: {str(e)}",
218
+ tool_call_id=tool_call_id
219
+ )
220
+ new_messages.append(tool_message)
221
+ else:
222
+ logger.warning(f"Tool '{tool_name}' not found in available tools")
223
+ # Create error tool message for missing tool
224
+ from langchain_core.messages import ToolMessage
225
+ tool_message = ToolMessage(
226
+ content=f"Tool '{tool_name}' not available",
227
+ tool_call_id=tool_call_id
228
+ )
229
+ new_messages.append(tool_message)
230
+
231
+ # Call LLM again with tool results to get next response
232
+ try:
233
+ current_completion = llm_client.invoke(new_messages, config=config)
234
+ new_messages.append(current_completion)
235
+
236
+ # Check if we still have tool calls
237
+ if hasattr(current_completion, 'tool_calls') and current_completion.tool_calls:
238
+ logger.info(f"LLM requested {len(current_completion.tool_calls)} more tool calls")
239
+ else:
240
+ logger.info("LLM completed without requesting more tools")
241
+ break
242
+
243
+ except Exception as e:
244
+ logger.error(f"Error in LLM call during iteration {iteration}: {e}")
245
+ # Add error message and break the loop
246
+ error_msg = f"Error processing tool results in iteration {iteration}: {str(e)}"
247
+ new_messages.append(AIMessage(content=error_msg))
248
+ break
249
+
250
+ # Log completion status
251
+ if iteration >= max_iterations:
252
+ logger.warning(f"Reached maximum iterations ({max_iterations}) for tool execution")
253
+ # Add a warning message to the chat
254
+ warning_msg = f"Maximum tool execution iterations ({max_iterations}) reached. Stopping tool execution."
255
+ new_messages.append(AIMessage(content=warning_msg))
256
+ else:
257
+ logger.info(f"Tool execution completed after {iteration} iterations")
258
+
259
+ return {"messages": new_messages}
260
+ else:
261
+ # Regular text response
262
+ content = completion.content.strip() if hasattr(completion, 'content') else str(completion)
263
+
264
+ # Try to extract JSON if output variables are specified (but exclude 'messages' which is handled separately)
265
+ json_output_vars = [var for var in (self.output_variables or []) if var != 'messages']
266
+ if json_output_vars:
267
+ try:
268
+ response = _extract_json(content) or {}
269
+ response_data = {key: response.get(key) for key in json_output_vars if key in response}
270
+
271
+ # Always add the messages to the response
272
+ new_messages = messages + [AIMessage(content=content)]
273
+ response_data['messages'] = new_messages
274
+
275
+ return response_data
276
+ except (ValueError, json.JSONDecodeError) as e:
277
+ # LLM returned non-JSON content, treat as plain text
278
+ logger.warning(f"Expected JSON output but got plain text. Output variables specified: {json_output_vars}. Error: {e}")
279
+ # Fall through to plain text handling
280
+
281
+ # Simple text response (either no output variables or JSON parsing failed)
282
+ new_messages = messages + [AIMessage(content=content)]
283
+ return {"messages": new_messages}
284
+
72
285
  except Exception as e:
73
286
  logger.error(f"Error in LLM Node: {format_exc()}")
74
- return {"messages": [{"role": "assistant", "content": f"Error: {e}"}]}
287
+ error_msg = f"Error: {e}"
288
+ new_messages = messages + [AIMessage(content=error_msg)]
289
+ return {"messages": new_messages}
290
+
291
+ def _run(self, *args, **kwargs):
292
+ # Legacy support for old interface
293
+ return self.invoke(kwargs, **kwargs)
@@ -35,6 +35,21 @@ class McpServerTool(BaseTool):
35
35
  field_type = float
36
36
  elif field_type == 'boolean':
37
37
  field_type = bool
38
+ elif field_type == 'object':#Dict[str, Any]
39
+ nested_model = McpServerTool.create_pydantic_model_from_schema(field_info)
40
+ field_type = nested_model
41
+ elif field_type == 'array':
42
+ item_schema = field_info['items']
43
+ item_type = McpServerTool.create_pydantic_model_from_schema(item_schema) if item_schema['type'] == 'object' else (
44
+ str if item_schema['type'] == 'string' else
45
+ int if item_schema['type'] == 'integer' else
46
+ float if item_schema['type'] == 'number' else
47
+ bool if item_schema['type'] == 'boolean' else
48
+ None
49
+ )
50
+ if item_type is None:
51
+ raise ValueError(f"Unsupported array item type: {item_schema['type']}")
52
+ field_type = list[item_type]
38
53
  else:
39
54
  raise ValueError(f"Unsupported field type: {field_type}")
40
55
 
@@ -14,7 +14,7 @@ logger = logging.getLogger(__name__)
14
14
 
15
15
  ai_icon = b'<plain_txt_msg:img>iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAAXNSR0IArs4c6QAAAMZlWElmTU0AKgAAAAgABgESAAMAAAABAAEAAAEaAAUAAAABAAAAVgEbAAUAAAABAAAAXgEoAAMAAAABAAIAAAExAAIAAAAVAAAAZodpAAQAAAABAAAAfAAAAAAAAABIAAAAAQAAAEgAAAABUGl4ZWxtYXRvciBQcm8gMy41LjEAAAAEkAQAAgAAABQAAACyoAEAAwAAAAEAAQAAoAIABAAAAAEAAAAgoAMABAAAAAEAAAAgAAAAADIwMjQ6MDQ6MDMgMTk6NDA6NDQATjJeeQAAAAlwSFlzAAALEwAACxMBAJqcGAAAA7BpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDYuMC4wIj4KICAgPHJkZjpSREYeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdPC9IgogICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iCiAgICAgICAgICAgIHhtbG5zOmV4aWY9X2h0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIgogICAgICAgICAgICB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iPgogICAgICAgICA8dGlmZjpZUmVzb2x1dGlvbj43MjAwMDAvMTAwMDA8L3RpZmY6WVJlc29sdXRpb24+CiAgICAgICAgIDx0aWZmOlhSZXNvbHV0aW9uPjcyMDAwMC8xMDAwMDwvdGlmZjpYUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6UmVzb2x1dGlvblVuaXQ+MjwvdGlmZjpSZXNvbHV0aW9uVW5pdD4KICAgICAgICAgPHRpZmY6T3JpZW50YXRpb24+MTwvdGlmZjpPcmllbnRhdGlvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjMyPC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjMyPC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPHhtcDpNZXRhZGF0YURhdGU+MjAyNC0wNC0wM1QxOTo0Mjo1OSswMzowMDwveG1wOk1ldGFkYXRhRGF0ZT4KICAgICAgICAgPHhtcDpDcmVhdGVEYXRlPjIwMjQtMDQtMDNUMTk6NDI6MjMrMDM6MDA8L3htcDpDcmVhdGVEYXRlPgogICAgICAgICA8eG1wOkNyZWF0b3JUb29sPlBpeGVsbWF0b3IgUHJvIDMuNS4xPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgrdpg3+AAAH30lEQVRYCe1Xe4yU1RU/936P+eab187s7izIo4LIo1IR12AJpRIphNIlrVbQtqSVNgVaGw0+Uki02SiYJlaSltoE0kSJFRUo6m4LBWxdwdZQqSWuPOxSyurCzj5nZ+ab+d739txvd8YBFtz/25vcvWfuveec3/mdc+/9FuB/vZExEkD+ef9rCcfkadM358xOTmvMOYWpJddMUiL5UTWccbh35kKp91hSSXa4EX/wth0rTLTNP8v+ZwEgh7+7e9IErX6pRpVl2BeqkpoeVhJ/L7PPCc+5xbMc+KG8Zxx485P2tscOPVa8FohrAjj6w5amcXr6kRBR50mU6tcyVL3GEYHP/UzRM4+cGux44p5dazqq16vlqwEgr31v18Y5DTdt4QSutqfazlVlBNJ3sueD1XftvP/QaJuuMN66trUuGYs3p8LJBy5RYByo6YNkuCAPOSAVPSAuAwGPhSTw4wp4CRWYLgPTpIoqYR7I/R1d+WzXo417Hnm1sjAifLoTJ7avbdXrk/FHY5HEel8iqo+rOAJzfaC9JsgZE+iABdz2wUeefXQuOvOR85IHkHcATC+Yc3QEpVBgxIOY0R0PcZi37LrG0y+dffvf1SBo9Y9EnC4Jx2seclUpaisELOyu6QJ0FcFH4xbuLkVlKMVG7yY6tZEpJkBmSmATBj51QOYGaAqdPKt24s59izdNr/ZZYWDl1t2p20OTWmVVrRdR+5QAR8pJnwU+argqBTf0aXdC4DkKsRwVPE+TJDckkco6Ru4jEO74EJUyELGzgU88stGYFK+7Ib94/8H8QaQMQA5WAOjEL8x9PH+RTpSGrGCKIM1y0QEvjGFrtFyJ3PPc/3iu9XfMQEfJMXoY4XKNmpzsEzZbVcO3U0oT4nCK4iLIQLx0AfCqAM7RBsXU2TWLZo5LLoIuOCAcBQDWth6fHkqk7roALmglG6iHBYc5dUUxabgrOO7csUxjr2saz/2t6+CH2w5sywsDojVDM629b9K4VKLui+F43RZV1WYKACm/BzTbQOcSEATAGQWrmGwIy+7y1sbtb6/4x7pSAICE9Tu5QtOFeAiyWMnRPhMIRl3dsvn+3+7auvmhNmgLqKteQwAMXoGLOLdv2rRpf3xi9Qt/1rXoglmFcxi9hwCQDQRQyNWBw0KUcWcJeMmf4/4Snb9ha1ivr78FVDks8txfp0MxLFUKrRiVWR8Yv1v79OjOq4EI+ezZs3avM/DjBOtoj5JsQL9IgcsU6B2aABxrS1cj03Ikf73YT2es+moDV+QpXCKESxSMRBiG6vUKgILmn7roZp+EUSIXBkZrc/Jv9M8oHe9WKNYT5p0Bhd78BPCIDOgHQVApGapZKHRlSZaTmJp6hsiIxMFDFoYaYhCVsQ48n1klc2/vJxfPj+boanNTY/KSeMj5EvrGewRZNcdD1kpjEeKdggAIdsTweaFPfdsNeczTh5EhOnEEVQmKtXEwNDKQhdKJHTvW4WUwttbykxVTEjH6lCxzHdMOA9Z4uFC4EY91OXrBAN4x3KkVFikjhICMB0ZQIzouipEhchaJSjRWO2VF8/YxPUQdT35z7oLra/fpGplk4/HpLM6Cjtyt4RIFBMNBD2yjD5kEVU6JpjigKLZwXNkgZNwIspLSI8lNc+Z+Zcf6X+2fi4DF6bqiNWMgJ5/+2sJ0KvScHia3ZJzPQXv+y3C+NBvzrlSCCgIUQWKXJCU4xlRPpg2ihbLVi4EsmMBOVDmtRePfmXzjrceaXz7d8oPHX1wG0KiUUSxCmPf+YukD4+rqWrr5nPmHBr8P7+WboN+dgG+ChM7wGgm6CBDlkUAjWuy8sCGffuP3PdO/8fWPqRYJioPg6yKKRRRK0FlZpooWTzbNbFzc9Owrd7hWNnuO2s7QdWpp6gnQ6i0jAcQGkPDQS1h9w/p4++FDFchYkESkleELinfSOaPzrwGAtg1rcjcsz3zIwhGbUhKqMIGOAxkBVeZGZDw7SiSRmiHhc1x0GFg4ShS7xPC2wzdA0Cz20ioZT1jABAKxidOtRWLtAoAoBG4N9h/hhOTKhShGUQ/CUDAnaBuRq+cr8mXrlVoq64zYK9uwqf+OyWCwDABemj/7XdvIvRc4FI7LzssjGqjMEXzjCM+4zH0/m8u0ZHO9B/BB+ogBG8QcswpbQqdKb3iegkdZwSL+H9a9uSoowvJryIqdnU+pM/UFVFJqgs1l6nEUyCnWk2e753zLeLl4oXPf+/u2tLe1Db8LK1c2R+fefMeiWCh5rwahe6hEtOEUjIAo28B6wK/nY31m/1uCecFA9bEia052bYzUpTdLLqcS5lbkOBhRZnnjjNnf+yO/u/vdbQ8ux3K7sjVv2J3So+ObUlrNNpXL8bJu2Q51fD8z+HHT+j13/6msXQ0Avn30g6TekN6pR1MrJPyYQCABAG4UB/71l/23vb5pzfmy4rXGZ3761uraWP321ad6dSCd3Wc2PfzqfeIVrLTgNir/2rXw5qw3lP+ZY5WODBcM3uXg9eWGer81VufCVrHQ97rjmjt8yl1RkFgzRtbJ/vJy52LvJQDExAvzpp9w+noeLGUH9/qc2aVS4fnBXOEdsTbW1vybVUbfYNevHd/9CL+U8jkr+8yQZ2weTf+SFFRvWHn4eCJC1aWcSKd23nnTyeq1Mcrk2Y2H73aKViGUMY8+vGeV+Fft/+0KBv4LJG7QdeOMt6wAAAAASUVORK5CYII=<!plain_txt_msg>'
16
16
  user_icon = b'<plain_txt_msg:img>iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAAXNSR0IArs4c6QAAAMZlWElmTU0AKgAAAAgABgESAAMAAAABAAEAAAEaAAUAAAABAAAAVgEbAAUAAAABAAAAXgEoAAMAAAABAAIAAAExAAIAAAAVAAAAZodpAAQAAAABAAAAfAAAAAAAAABIAAAAAQAAAEgAAAABUGl4ZWxtYXRvciBQcm8gMy41LjEAAAAEkAQAAgAAABQAAACyoAEAAwAAAAEAAQAAoAIABAAAAAEAAAAgoAMABAAAAAEAAAAgAAAAADIwMjQ6MDQ6MDMgMTk6NDI6MjMAfz7nbAAAAAlwSFlzAAALEwAACxMBAJqcGAAAA7BpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1zbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDYuMC4wIj4KICAgPHJkZjpSREYgeG1zbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1zbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iCiAgICAgICAgICAgIHhtbG5zOmV4aWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIgogICAgICAgICAgICB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iPgogICAgICAgICA8dGlmZjpZUmVzb2x1dGlvbj43MjAwMDAvMTAwMDA8L3RpZmY6WVJlc29sdXRpb24+CiAgICAgICAgIDx0aWZmOlhSZXNvbHV0aW9uPjcyMDAwMC8xMDAwMDwvdGlmZjpYUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6UmVzb2x1dGlvblVuaXQ+MjwvdGlmZjpSZXNvbHV0aW9uVW5pdD4KICAgICAgICAgPHRpZmY6T3JpZW50YXRpb24+MTwvdGlmZjpPcmllbnRhdGlvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjMyPC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjMyPC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPHhtcDpNZXRhZGF0YURhdGU+MjAyNC0wNC0wM1QxOTo0Mjo1OSswMzowMDwveG1wOk1ldGFkYXRhRGF0ZT4KICAgICAgICAgPHhtcDpDcmVhdGVEYXRlPjIwMjQtMDQtMDNUMTk6NDI6MjMrMDM6MDA8L3htcDpDcmVhdGVEYXRlPgogICAgICAgICA8eG1wOkNyZWF0b3JUb29sPlBpeGVsbWF0b3IgUHJvIDMuNS4xPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgorTryeAAAFAUlEQVRYCc1XbWgcRRie2Zndy6XGpD+ijbZWxT8G7QfKnWlremk0JpriR7jUVsUgiGK1EhQKWnGFKiqUSLTJBbRJG6r2IuJHkNgkJrZJSxIQLWiJloI/mhgLirnE3O19jM/s7SZXLnd7VwRduHtnZ96PZ96d9312CfmPL5pvfN0f1NisaI6ovCHi4uVhzgqjLjaD+7GYqrV2B7Z+k4/PvAAc2Ny4KqapxyIqKzU0TiIaww9SXRobKjuhMF7b07JpIRcgSi5KUqfdc2SbQuhxDEvlvSAkIihpJVTsxW2fnJMX5ioXGDmv6yIn3zxplv2/bcvRlQnD6KeEmU4Zide83ts4QBDdsnzbpw/xVTP0C9zX4bfqlDE+ArnJWs8ockJJotGX4CEZPCZqXz6+qz8luOl8WK+Kfdzuu5dQ+i2ygAzRCoAqyBjZWnAEMOQb4tjnC1IffodePPnI19mcMpf7fqzHZGoSpLglm65ccwTwc+jiCiGfbPK4Bp0c9rx1+1/Y/XRSTzzgpO8IIMoWFs8JjZMLTg7NdUHsCli0zWTnCECNu2O2sWDkWnucVVLitjK2aJtJ3xHAdFHpPPJvnnb87cjkyJ737+0vpkSUJe/FZ/Z8JukIQB+uiglBDkgHOAm+N6uO3pPJmZyPRQo+hzBTr5BQczZdueYIwHTgUt9AXSXMOqC077Wa4N0mnBTvsg88/MzwV8jVVnNakFGUZjhFZdlhzq34vYquaoOxPkNjPNl6eQTtuCOs8QuGplRhrtZszbItu9i0p7Bita5TgM5+5QxAumnxdfsiCg9KLrB5wOQE1eKEJDcMF5esruvSb3DcvfSZFwBpINmQSjbUeIOhqeURTXFj978jM2MRrrR+FKjOiw1zOwMyMq6DvuAVBX/En0S7vRPYr0KDUoWgskm5USFrBWU7Hnx+aEtSO7f/nDLQ6essmA+TQJRzP1JfaFKwScPJ1GP3l1Kyxs+FNaV5cL+31wmGI4CA54P1CaKAYGgxnj0xgyE45ACAjOKFZM5w8etxGLfjXeC6VHDQOX3y1duyMmJWAO2eQ/Xo61/KXchMI8Ck4ebbFvonf9OJnnbCZSmWzGqPGkx5H5liSTBsaiKybg3JUBEZAbR5O+9A7Z82gxMRw3PeuXvsiU+cUmqvV+8b/x6Hc72slqjGzpaELq6TlG2v23LZQ9jqbb0SwQcspagQyoZ8gku7wf2eDUShPbLQEoTePLOy7F07aKpMAxD0BxkXRR9CaYVUROrrn51o+jHVKNdx2ZlfdgoqvjP9KPTptR0/WRyx5CENQGgqVIjlSkslvHusSb4HXtbV09MYxxlqksaSzWhCe0yOU680AEXXFP0NhROWUsFBb1dNqkE+Yz+ySYU4bNsIxei2x7ZMA9AI1DEa2gWFeakEB70B76Fy2yAfOXXrTUF0qY3SRkmIwK9PlVtvSkte0gDIpT1je2Zxdu6y1FTk8Yd2b1fDkpnzqHrfxBmU2EOW5tmr/5x+bjmrjGUolds8Xffh6ZndLKGgD3A2GXXoA8UhtQms2YEeoMgSRO+4vD5go7U7IQBYndD6ElLZIGh3BIQ0F1bZjYaL1aPu15jNx2JHdMhTI69s3Gz7Wk5mzYBtILlgTnKByv1ouYWLn2WXfJItfZ5h5+cM9V/iAhuElDrYUGXxx6MuXgdeuAU7LsNPRSZmAeo8xuNRlXZ/+k7laKrd/3r8D5cLzopAT7EBAAAAAElFTkSuQmCC<!plain_txt_msg>'
17
- agent_types = ["pipeline", "react", "xml", "openai"]
17
+ agent_types = ["pipeline", "react", "xml", "openai", "predict"]
18
18
 
19
19
  def img_to_txt(filename):
20
20
  msg = b"<plain_txt_msg:img>"
@@ -1034,6 +1034,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
1034
1034
  st.session_state.messages.append({"role": "user", "content": prompt})
1035
1035
  with st.chat_message("assistant", avatar=ai_icon):
1036
1036
  st_cb = AlitaStreamlitCallback(st)
1037
+ logger.info(st.session_state.messages)
1037
1038
  response = st.session_state.agent_executor.invoke(
1038
1039
  {"input": prompt, "chat_history": st.session_state.messages[:-1]},
1039
1040
  { 'callbacks': [st_cb], 'configurable': {"thread_id": st.session_state.thread_id}}
@@ -79,6 +79,7 @@ _safe_import_tool('pptx', 'pptx', 'get_tools', 'PPTXToolkit')
79
79
  _safe_import_tool('postman', 'postman', 'get_tools', 'PostmanToolkit')
80
80
  _safe_import_tool('memory', 'memory', 'get_tools', 'MemoryToolkit')
81
81
  _safe_import_tool('zephyr_squad', 'zephyr_squad', 'get_tools', 'ZephyrSquadToolkit')
82
+ _safe_import_tool('slack', 'slack', 'get_tools', 'SlackToolkit')
82
83
 
83
84
  # Log import summary
84
85
  available_count = len(AVAILABLE_TOOLS)
@@ -10,7 +10,9 @@ from langchain_core.tools import ToolException
10
10
  from msrest.authentication import BasicAuthentication
11
11
  from pydantic import create_model, PrivateAttr, model_validator, SecretStr
12
12
  from pydantic.fields import FieldInfo as Field
13
+ import xml.etree.ElementTree as ET
13
14
 
15
+ from ..work_item import AzureDevOpsApiWrapper
14
16
  from ...elitea_base import BaseToolApiWrapper
15
17
 
16
18
  logger = logging.getLogger(__name__)
@@ -101,6 +103,16 @@ TestCaseAddModel = create_model(
101
103
  suite_id=(int, Field(description="ID of the test suite to which test cases are to be added"))
102
104
  )
103
105
 
106
+ TestCaseCreateModel = create_model(
107
+ "TestCaseCreateModel",
108
+ project=(str, Field(description="Project ID or project name")),
109
+ plan_id=(int, Field(description="ID of the test plan to which test cases are to be added")),
110
+ suite_id=(int, Field(description="ID of the test suite to which test cases are to be added")),
111
+ title=(str, Field(description="Test case title")),
112
+ description=(str, Field(description="Test case description")),
113
+ test_steps=(str, Field(description="""Json array with test steps. Example: [{"action": "Some action", "expectedResult": "Some expectation"},...]""")),
114
+ )
115
+
104
116
  TestCaseGetModel = create_model(
105
117
  "TestCaseGetModel",
106
118
  project=(str, Field(description="Project ID or project name")),
@@ -202,17 +214,51 @@ class TestPlanApiWrapper(BaseToolApiWrapper):
202
214
  logger.error(f"Error getting test suite(s): {e}")
203
215
  return ToolException(f"Error getting test suite(s): {e}")
204
216
 
205
- def add_test_case(self, suite_test_case_create_update_parameters: str, project: str, plan_id: int, suite_id: int):
217
+ def add_test_case(self, suite_test_case_create_update_parameters, project: str, plan_id: int, suite_id: int):
206
218
  """Add a test case to a suite in Azure DevOps."""
207
219
  try:
208
- params = json.loads(suite_test_case_create_update_parameters)
209
- suite_test_case_create_update_params_obj = [SuiteTestCaseCreateUpdateParameters(**param) for param in params]
220
+ if isinstance(suite_test_case_create_update_parameters, str):
221
+ suite_test_case_create_update_parameters = json.loads(suite_test_case_create_update_parameters)
222
+ suite_test_case_create_update_params_obj = [SuiteTestCaseCreateUpdateParameters(**param) for param in suite_test_case_create_update_parameters]
210
223
  test_cases = self._client.add_test_cases_to_suite(suite_test_case_create_update_params_obj, project, plan_id, suite_id)
211
224
  return [test_case.as_dict() for test_case in test_cases]
212
225
  except Exception as e:
213
226
  logger.error(f"Error adding test case: {e}")
214
227
  return ToolException(f"Error adding test case: {e}")
215
228
 
229
+ def create_test_case(self, project: str, plan_id: int, suite_id: int, title: str, description: str, test_steps: str):
230
+ """Creates a new test case in specified suite in Azure DevOps."""
231
+ work_item_wrapper = AzureDevOpsApiWrapper(organization_url=self.organization_url, token=self.token.get_secret_value(), project=project)
232
+ work_item_json = self.build_ado_test_case(title, description, json.loads(test_steps))
233
+ created_work_item_id = work_item_wrapper.create_work_item(work_item_json=json.dumps(work_item_json), wi_type="Test Case")['id']
234
+ return self.add_test_case([{"work_item":{"id":created_work_item_id}}], project, plan_id, suite_id)
235
+
236
+ def build_ado_test_case(self, title, description, steps):
237
+ """
238
+ :param title: test title
239
+ :param description: test description
240
+ :param steps: steps [(action, expected result), ...]
241
+ :return: JSON with ADO fields
242
+ """
243
+ steps_elem = ET.Element("steps")
244
+
245
+ for idx, step in enumerate(steps, start=1):
246
+ step_elem = ET.SubElement(steps_elem, "step", id=str(idx), type="Action")
247
+ action_elem = ET.SubElement(step_elem, "parameterizedString", isformatted="true")
248
+ action_elem.text = step["action"]
249
+ expected_elem = ET.SubElement(step_elem, "parameterizedString", isformatted="true")
250
+ expected_elem.text = step["expectedResult"]
251
+
252
+ steps_xml = ET.tostring(steps_elem, encoding="unicode")
253
+
254
+ return {
255
+ "fields": {
256
+ "System.Title": title,
257
+ "System.Description": description,
258
+ "Microsoft.VSTS.TCM.Steps": steps_xml
259
+ }
260
+ }
261
+
216
262
  def get_test_case(self, project: str, plan_id: int, suite_id: int, test_case_id: str):
217
263
  """Get a test case from a suite in Azure DevOps."""
218
264
  try:
@@ -280,6 +326,12 @@ class TestPlanApiWrapper(BaseToolApiWrapper):
280
326
  "args_schema": TestCaseAddModel,
281
327
  "ref": self.add_test_case,
282
328
  },
329
+ {
330
+ "name": "create_test_case",
331
+ "description": self.create_test_case.__doc__,
332
+ "args_schema": TestCaseCreateModel,
333
+ "ref": self.create_test_case,
334
+ },
283
335
  {
284
336
  "name": "get_test_case",
285
337
  "description": self.get_test_case.__doc__,
@@ -150,14 +150,15 @@ class AzureDevOpsApiWrapper(BaseToolApiWrapper):
150
150
 
151
151
  return parsed_items
152
152
 
153
- def _transform_work_item(self, work_item_json: str):
153
+ def _transform_work_item(self, work_item_json):
154
154
  try:
155
155
  # Convert the input JSON to a Python dictionary
156
- params = json.loads(work_item_json)
156
+ if isinstance(work_item_json, str):
157
+ work_item_json = json.loads(work_item_json)
157
158
  except (json.JSONDecodeError, ValueError) as e:
158
159
  raise ToolException(f"Issues during attempt to parse work_item_json: {e}")
159
160
 
160
- if 'fields' not in params:
161
+ if 'fields' not in work_item_json:
161
162
  raise ToolException("The 'fields' property is missing from the work_item_json.")
162
163
 
163
164
  # Transform the dictionary into a list of JsonPatchOperation objects
@@ -167,11 +168,11 @@ class AzureDevOpsApiWrapper(BaseToolApiWrapper):
167
168
  "path": f"/fields/{field}",
168
169
  "value": value
169
170
  }
170
- for field, value in params["fields"].items()
171
+ for field, value in work_item_json["fields"].items()
171
172
  ]
172
173
  return patch_document
173
174
 
174
- def create_work_item(self, work_item_json: str, wi_type="Task"):
175
+ def create_work_item(self, work_item_json, wi_type="Task"):
175
176
  """Create a work item in Azure DevOps."""
176
177
  try:
177
178
  patch_document = self._transform_work_item(work_item_json)
@@ -185,7 +186,10 @@ class AzureDevOpsApiWrapper(BaseToolApiWrapper):
185
186
  project=self.project,
186
187
  type=wi_type
187
188
  )
188
- return f"Work item {work_item.id} created successfully. View it at {work_item.url}."
189
+ return {
190
+ "id": work_item.id,
191
+ "message": f"Work item {work_item.id} created successfully. View it at {work_item.url}."
192
+ }
189
193
  except Exception as e:
190
194
  if "unknown value" in str(e):
191
195
  logger.error(f"Unable to create work item due to incorrect assignee: {e}")
@@ -70,6 +70,9 @@ class CarrierAPIWrapper(BaseModel):
70
70
  def get_integrations(self, name: str):
71
71
  return self._client.get_integrations(name)
72
72
 
73
+ def get_available_locations(self):
74
+ return self._client.get_available_locations()
75
+
73
76
  def run_test(self, test_id: str, json_body):
74
77
  return self._client.run_test(test_id, json_body)
75
78