alita-sdk 0.3.375__py3-none-any.whl → 0.3.417__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. alita_sdk/configurations/bitbucket.py +95 -0
  2. alita_sdk/configurations/confluence.py +96 -1
  3. alita_sdk/configurations/gitlab.py +79 -0
  4. alita_sdk/configurations/jira.py +103 -0
  5. alita_sdk/configurations/testrail.py +88 -0
  6. alita_sdk/configurations/xray.py +93 -0
  7. alita_sdk/configurations/zephyr_enterprise.py +93 -0
  8. alita_sdk/configurations/zephyr_essential.py +75 -0
  9. alita_sdk/runtime/clients/client.py +3 -2
  10. alita_sdk/runtime/langchain/assistant.py +56 -40
  11. alita_sdk/runtime/langchain/constants.py +2 -0
  12. alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
  13. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
  14. alita_sdk/runtime/langchain/document_loaders/constants.py +28 -12
  15. alita_sdk/runtime/langchain/langraph_agent.py +52 -27
  16. alita_sdk/runtime/langchain/utils.py +15 -4
  17. alita_sdk/runtime/toolkits/application.py +8 -1
  18. alita_sdk/runtime/toolkits/tools.py +79 -49
  19. alita_sdk/runtime/tools/__init__.py +7 -2
  20. alita_sdk/runtime/tools/application.py +7 -0
  21. alita_sdk/runtime/tools/function.py +28 -23
  22. alita_sdk/runtime/tools/graph.py +10 -4
  23. alita_sdk/runtime/tools/image_generation.py +104 -8
  24. alita_sdk/runtime/tools/llm.py +142 -114
  25. alita_sdk/runtime/tools/sandbox.py +166 -63
  26. alita_sdk/runtime/tools/vectorstore.py +2 -1
  27. alita_sdk/runtime/tools/vectorstore_base.py +2 -1
  28. alita_sdk/runtime/utils/utils.py +1 -0
  29. alita_sdk/tools/__init__.py +43 -31
  30. alita_sdk/tools/base_indexer_toolkit.py +54 -60
  31. alita_sdk/tools/code_indexer_toolkit.py +13 -3
  32. alita_sdk/tools/confluence/api_wrapper.py +29 -7
  33. alita_sdk/tools/confluence/loader.py +10 -0
  34. alita_sdk/tools/elitea_base.py +1 -1
  35. alita_sdk/tools/gitlab/api_wrapper.py +8 -9
  36. alita_sdk/tools/jira/api_wrapper.py +1 -1
  37. alita_sdk/tools/qtest/api_wrapper.py +7 -10
  38. alita_sdk/tools/sharepoint/api_wrapper.py +81 -28
  39. alita_sdk/tools/sharepoint/authorization_helper.py +131 -1
  40. alita_sdk/tools/sharepoint/utils.py +8 -2
  41. alita_sdk/tools/utils/content_parser.py +27 -16
  42. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +18 -5
  43. {alita_sdk-0.3.375.dist-info → alita_sdk-0.3.417.dist-info}/METADATA +1 -1
  44. {alita_sdk-0.3.375.dist-info → alita_sdk-0.3.417.dist-info}/RECORD +47 -47
  45. {alita_sdk-0.3.375.dist-info → alita_sdk-0.3.417.dist-info}/WHEEL +0 -0
  46. {alita_sdk-0.3.375.dist-info → alita_sdk-0.3.417.dist-info}/licenses/LICENSE +0 -0
  47. {alita_sdk-0.3.375.dist-info → alita_sdk-0.3.417.dist-info}/top_level.txt +0 -0
@@ -2,16 +2,59 @@
2
2
  Image generation tool for Alita SDK.
3
3
  """
4
4
  import logging
5
- from typing import Optional, Type, Any
6
- from langchain_core.tools import BaseTool
7
- from pydantic import BaseModel, Field
5
+ from typing import Optional, Type, Any, List, Literal
6
+ from langchain_core.tools import BaseTool, BaseToolkit
7
+ from pydantic import BaseModel, Field, create_model, ConfigDict
8
8
 
9
9
  logger = logging.getLogger(__name__)
10
10
 
11
+ name = "image_generation"
12
+
13
+
14
+ def get_tools(tools_list: list, alita_client=None, llm=None,
15
+ memory_store=None):
16
+ """
17
+ Get image generation tools for the provided tool configurations.
18
+
19
+ Args:
20
+ tools_list: List of tool configurations
21
+ alita_client: Alita client instance (required for image generation)
22
+ llm: LLM client instance (unused for image generation)
23
+ memory_store: Optional memory store instance (unused)
24
+
25
+ Returns:
26
+ List of image generation tools
27
+ """
28
+ all_tools = []
29
+
30
+ for tool in tools_list:
31
+ if (tool.get('type') == 'image_generation' or
32
+ tool.get('toolkit_name') == 'image_generation'):
33
+ try:
34
+ if not alita_client:
35
+ logger.error("Alita client is required for image "
36
+ "generation tools")
37
+ continue
38
+
39
+ toolkit_instance = ImageGenerationToolkit.get_toolkit(
40
+ client=alita_client,
41
+ toolkit_name=tool.get('toolkit_name', '')
42
+ )
43
+ all_tools.extend(toolkit_instance.get_tools())
44
+ except Exception as e:
45
+ logger.error(f"Error in image generation toolkit "
46
+ f"get_tools: {e}")
47
+ logger.error(f"Tool config: {tool}")
48
+ raise
49
+
50
+ return all_tools
51
+
11
52
 
12
53
  class ImageGenerationInput(BaseModel):
13
54
  """Input schema for image generation tool."""
14
- prompt: str = Field(description="Text prompt describing the image to generate")
55
+ prompt: str = Field(
56
+ description="Text prompt describing the image to generate"
57
+ )
15
58
  n: int = Field(
16
59
  default=1, description="Number of images to generate (1-10)",
17
60
  ge=1, le=10
@@ -22,7 +65,7 @@ class ImageGenerationInput(BaseModel):
22
65
  )
23
66
  quality: str = Field(
24
67
  default="auto",
25
- description="Quality of the generated image ('low', 'medium', 'high', 'auto')"
68
+ description="Quality of the generated image ('low', 'medium', 'high')"
26
69
  )
27
70
  style: Optional[str] = Field(
28
71
  default=None, description="Style of the generated image (optional)"
@@ -69,7 +112,8 @@ class ImageGenerationTool(BaseTool):
69
112
  else:
70
113
  content_chunks.append({
71
114
  "type": "text",
72
- "text": f"Generated {len(images)} images for prompt: '{prompt}'"
115
+ "text": f"Generated {len(images)} images for "
116
+ f"prompt: '{prompt}'"
73
117
  })
74
118
 
75
119
  # Add image content for each generated image
@@ -85,7 +129,8 @@ class ImageGenerationTool(BaseTool):
85
129
  content_chunks.append({
86
130
  "type": "image_url",
87
131
  "image_url": {
88
- "url": f"data:image/png;base64,{image_data['b64_json']}"
132
+ "url": f"data:image/png;base64,"
133
+ f"{image_data['b64_json']}"
89
134
  }
90
135
  })
91
136
 
@@ -94,7 +139,8 @@ class ImageGenerationTool(BaseTool):
94
139
  # Fallback to text response if no images in result
95
140
  return [{
96
141
  "type": "text",
97
- "text": f"Image generation completed but no images returned: {result}"
142
+ "text": f"Image generation completed but no images "
143
+ f"returned: {result}"
98
144
  }]
99
145
 
100
146
  except Exception as e:
@@ -114,3 +160,53 @@ class ImageGenerationTool(BaseTool):
114
160
  def create_image_generation_tool(client):
115
161
  """Create an image generation tool with the provided Alita client."""
116
162
  return ImageGenerationTool(client=client)
163
+
164
+
165
+ class ImageGenerationToolkit(BaseToolkit):
166
+ """Toolkit for image generation tools."""
167
+ tools: List[BaseTool] = []
168
+
169
+ @staticmethod
170
+ def toolkit_config_schema() -> BaseModel:
171
+ """Get the configuration schema for the image generation toolkit."""
172
+ # Create sample tool to get schema
173
+ sample_tool = ImageGenerationTool(client=None)
174
+ selected_tools = {sample_tool.name: sample_tool.args_schema.schema()}
175
+
176
+ return create_model(
177
+ 'image_generation',
178
+ selected_tools=(
179
+ List[Literal[tuple(selected_tools)]],
180
+ Field(
181
+ default=[],
182
+ json_schema_extra={'args_schemas': selected_tools}
183
+ )
184
+ ),
185
+ __config__=ConfigDict(json_schema_extra={
186
+ 'metadata': {
187
+ "label": "Image Generation",
188
+ "icon_url": "image_generation.svg",
189
+ "hidden": True,
190
+ "categories": ["internal_tool"],
191
+ "extra_categories": ["image generation"],
192
+ }
193
+ })
194
+ )
195
+
196
+ @classmethod
197
+ def get_toolkit(cls, client=None, **kwargs):
198
+ """
199
+ Get toolkit with image generation tools.
200
+
201
+ Args:
202
+ client: Alita client instance (required)
203
+ **kwargs: Additional arguments
204
+ """
205
+ if not client:
206
+ raise ValueError("Alita client is required for image generation")
207
+
208
+ tools = [ImageGenerationTool(client=client)]
209
+ return cls(tools=tools)
210
+
211
+ def get_tools(self):
212
+ return self.tools
@@ -7,6 +7,7 @@ from langchain_core.runnables import RunnableConfig
7
7
  from langchain_core.tools import BaseTool, ToolException
8
8
  from pydantic import Field
9
9
 
10
+ from ..langchain.constants import ELITEA_RS
10
11
  from ..langchain.utils import create_pydantic_model, propagate_the_input_mapping
11
12
 
12
13
  logger = logging.getLogger(__name__)
@@ -30,6 +31,7 @@ class LLMNode(BaseTool):
30
31
  structured_output: Optional[bool] = Field(default=False, description='Whether to use structured output')
31
32
  available_tools: Optional[List[BaseTool]] = Field(default=None, description='Available tools for binding')
32
33
  tool_names: Optional[List[str]] = Field(default=None, description='Specific tool names to filter')
34
+ steps_limit: Optional[int] = Field(default=25, description='Maximum steps for tool execution')
33
35
 
34
36
  def get_filtered_tools(self) -> List[BaseTool]:
35
37
  """
@@ -88,8 +90,7 @@ class LLMNode(BaseTool):
88
90
  raise ToolException(f"LLMNode requires 'system' and 'task' parameters in input mapping. "
89
91
  f"Actual params: {func_args}")
90
92
  # cast to str in case user passes variable different from str
91
- messages = [SystemMessage(content=str(func_args.get('system'))), HumanMessage(content=str(func_args.get('task')))]
92
- messages.extend(func_args.get('chat_history', []))
93
+ messages = [SystemMessage(content=str(func_args.get('system'))), *func_args.get('chat_history', []), HumanMessage(content=str(func_args.get('task')))]
93
94
  else:
94
95
  # Flow for chat-based LLM node w/o prompt/task from pipeline but with messages in state
95
96
  # verify messages structure
@@ -122,14 +123,25 @@ class LLMNode(BaseTool):
122
123
  }
123
124
  for key, value in (self.structured_output_dict or {}).items()
124
125
  }
126
+ # Add default output field for proper response to user
127
+ struct_params['elitea_response'] = {'description': 'final output to user', 'type': 'str'}
125
128
  struct_model = create_pydantic_model(f"LLMOutput", struct_params)
126
- llm = llm_client.with_structured_output(struct_model)
127
- completion = llm.invoke(messages, config=config)
128
- result = completion.model_dump()
129
+ completion = llm_client.invoke(messages, config=config)
130
+ if hasattr(completion, 'tool_calls') and completion.tool_calls:
131
+ new_messages, _ = self.__perform_tool_calling(completion, messages, llm_client, config)
132
+ llm = self.__get_struct_output_model(llm_client, struct_model)
133
+ completion = llm.invoke(new_messages, config=config)
134
+ result = completion.model_dump()
135
+ else:
136
+ llm = self.__get_struct_output_model(llm_client, struct_model)
137
+ completion = llm.invoke(messages, config=config)
138
+ result = completion.model_dump()
129
139
 
130
140
  # Ensure messages are properly formatted
131
141
  if result.get('messages') and isinstance(result['messages'], list):
132
142
  result['messages'] = [{'role': 'assistant', 'content': '\n'.join(result['messages'])}]
143
+ else:
144
+ result['messages'] = messages + [AIMessage(content=result.get(ELITEA_RS, ''))]
133
145
 
134
146
  return result
135
147
  else:
@@ -139,115 +151,15 @@ class LLMNode(BaseTool):
139
151
  # Handle both tool-calling and regular responses
140
152
  if hasattr(completion, 'tool_calls') and completion.tool_calls:
141
153
  # Handle iterative tool-calling and execution
142
- new_messages = messages + [completion]
143
- max_iterations = 15
144
- iteration = 0
145
-
146
- # Continue executing tools until no more tool calls or max iterations reached
147
- current_completion = completion
148
- while (hasattr(current_completion, 'tool_calls') and
149
- current_completion.tool_calls and
150
- iteration < max_iterations):
151
-
152
- iteration += 1
153
- logger.info(f"Tool execution iteration {iteration}/{max_iterations}")
154
-
155
- # Execute each tool call in the current completion
156
- tool_calls = current_completion.tool_calls if hasattr(current_completion.tool_calls,
157
- '__iter__') else []
158
-
159
- for tool_call in tool_calls:
160
- tool_name = tool_call.get('name', '') if isinstance(tool_call, dict) else getattr(tool_call,
161
- 'name',
162
- '')
163
- tool_args = tool_call.get('args', {}) if isinstance(tool_call, dict) else getattr(tool_call,
164
- 'args',
165
- {})
166
- tool_call_id = tool_call.get('id', '') if isinstance(tool_call, dict) else getattr(
167
- tool_call, 'id', '')
168
-
169
- # Find the tool in filtered tools
170
- filtered_tools = self.get_filtered_tools()
171
- tool_to_execute = None
172
- for tool in filtered_tools:
173
- if tool.name == tool_name:
174
- tool_to_execute = tool
175
- break
176
-
177
- if tool_to_execute:
178
- try:
179
- logger.info(f"Executing tool '{tool_name}' with args: {tool_args}")
180
- tool_result = tool_to_execute.invoke(tool_args)
181
-
182
- # Create tool message with result - preserve structured content
183
- from langchain_core.messages import ToolMessage
184
-
185
- # Check if tool_result is structured content (list of dicts)
186
- # TODO: need solid check for being compatible with ToolMessage content format
187
- if isinstance(tool_result, list) and all(
188
- isinstance(item, dict) and 'type' in item for item in tool_result
189
- ):
190
- # Use structured content directly for multimodal support
191
- tool_message = ToolMessage(
192
- content=tool_result,
193
- tool_call_id=tool_call_id
194
- )
195
- else:
196
- # Fallback to string conversion for other tool results
197
- tool_message = ToolMessage(
198
- content=str(tool_result),
199
- tool_call_id=tool_call_id
200
- )
201
- new_messages.append(tool_message)
202
-
203
- except Exception as e:
204
- logger.error(f"Error executing tool '{tool_name}': {e}")
205
- # Create error tool message
206
- from langchain_core.messages import ToolMessage
207
- tool_message = ToolMessage(
208
- content=f"Error executing {tool_name}: {str(e)}",
209
- tool_call_id=tool_call_id
210
- )
211
- new_messages.append(tool_message)
212
- else:
213
- logger.warning(f"Tool '{tool_name}' not found in available tools")
214
- # Create error tool message for missing tool
215
- from langchain_core.messages import ToolMessage
216
- tool_message = ToolMessage(
217
- content=f"Tool '{tool_name}' not available",
218
- tool_call_id=tool_call_id
219
- )
220
- new_messages.append(tool_message)
221
-
222
- # Call LLM again with tool results to get next response
223
- try:
224
- current_completion = llm_client.invoke(new_messages, config=config)
225
- new_messages.append(current_completion)
226
-
227
- # Check if we still have tool calls
228
- if hasattr(current_completion, 'tool_calls') and current_completion.tool_calls:
229
- logger.info(f"LLM requested {len(current_completion.tool_calls)} more tool calls")
230
- else:
231
- logger.info("LLM completed without requesting more tools")
232
- break
233
-
234
- except Exception as e:
235
- logger.error(f"Error in LLM call during iteration {iteration}: {e}")
236
- # Add error message and break the loop
237
- error_msg = f"Error processing tool results in iteration {iteration}: {str(e)}"
238
- new_messages.append(AIMessage(content=error_msg))
239
- break
240
-
241
- # Log completion status
242
- if iteration >= max_iterations:
243
- logger.warning(f"Reached maximum iterations ({max_iterations}) for tool execution")
244
- # Add a warning message to the chat
245
- warning_msg = f"Maximum tool execution iterations ({max_iterations}) reached. Stopping tool execution."
246
- new_messages.append(AIMessage(content=warning_msg))
247
- else:
248
- logger.info(f"Tool execution completed after {iteration} iterations")
154
+ new_messages, current_completion = self.__perform_tool_calling(completion, messages, llm_client, config)
249
155
 
250
- return {"messages": new_messages}
156
+ output_msgs = {"messages": new_messages}
157
+ if self.output_variables:
158
+ if self.output_variables[0] == 'messages':
159
+ return output_msgs
160
+ output_msgs[self.output_variables[0]] = current_completion.content if current_completion else None
161
+
162
+ return output_msgs
251
163
  else:
252
164
  # Regular text response
253
165
  content = completion.content.strip() if hasattr(completion, 'content') else str(completion)
@@ -273,4 +185,120 @@ class LLMNode(BaseTool):
273
185
 
274
186
  def _run(self, *args, **kwargs):
275
187
  # Legacy support for old interface
276
- return self.invoke(kwargs, **kwargs)
188
+ return self.invoke(kwargs, **kwargs)
189
+
190
+ def __perform_tool_calling(self, completion, messages, llm_client, config):
191
+ # Handle iterative tool-calling and execution
192
+ new_messages = messages + [completion]
193
+ iteration = 0
194
+
195
+ # Continue executing tools until no more tool calls or max iterations reached
196
+ current_completion = completion
197
+ while (hasattr(current_completion, 'tool_calls') and
198
+ current_completion.tool_calls and
199
+ iteration < self.steps_limit):
200
+
201
+ iteration += 1
202
+ logger.info(f"Tool execution iteration {iteration}/{self.steps_limit}")
203
+
204
+ # Execute each tool call in the current completion
205
+ tool_calls = current_completion.tool_calls if hasattr(current_completion.tool_calls,
206
+ '__iter__') else []
207
+
208
+ for tool_call in tool_calls:
209
+ tool_name = tool_call.get('name', '') if isinstance(tool_call, dict) else getattr(tool_call,
210
+ 'name',
211
+ '')
212
+ tool_args = tool_call.get('args', {}) if isinstance(tool_call, dict) else getattr(tool_call,
213
+ 'args',
214
+ {})
215
+ tool_call_id = tool_call.get('id', '') if isinstance(tool_call, dict) else getattr(
216
+ tool_call, 'id', '')
217
+
218
+ # Find the tool in filtered tools
219
+ filtered_tools = self.get_filtered_tools()
220
+ tool_to_execute = None
221
+ for tool in filtered_tools:
222
+ if tool.name == tool_name:
223
+ tool_to_execute = tool
224
+ break
225
+
226
+ if tool_to_execute:
227
+ try:
228
+ logger.info(f"Executing tool '{tool_name}' with args: {tool_args}")
229
+ # Pass the underlying config to the tool execution invoke method
230
+ # since it may be another agent, graph, etc. to see it properly in thinking steps
231
+ tool_result = tool_to_execute.invoke(tool_args, config=config)
232
+
233
+ # Create tool message with result - preserve structured content
234
+ from langchain_core.messages import ToolMessage
235
+
236
+ # Check if tool_result is structured content (list of dicts)
237
+ # TODO: need solid check for being compatible with ToolMessage content format
238
+ if isinstance(tool_result, list) and all(
239
+ isinstance(item, dict) and 'type' in item for item in tool_result
240
+ ):
241
+ # Use structured content directly for multimodal support
242
+ tool_message = ToolMessage(
243
+ content=tool_result,
244
+ tool_call_id=tool_call_id
245
+ )
246
+ else:
247
+ # Fallback to string conversion for other tool results
248
+ tool_message = ToolMessage(
249
+ content=str(tool_result),
250
+ tool_call_id=tool_call_id
251
+ )
252
+ new_messages.append(tool_message)
253
+
254
+ except Exception as e:
255
+ logger.error(f"Error executing tool '{tool_name}': {e}")
256
+ # Create error tool message
257
+ from langchain_core.messages import ToolMessage
258
+ tool_message = ToolMessage(
259
+ content=f"Error executing {tool_name}: {str(e)}",
260
+ tool_call_id=tool_call_id
261
+ )
262
+ new_messages.append(tool_message)
263
+ else:
264
+ logger.warning(f"Tool '{tool_name}' not found in available tools")
265
+ # Create error tool message for missing tool
266
+ from langchain_core.messages import ToolMessage
267
+ tool_message = ToolMessage(
268
+ content=f"Tool '{tool_name}' not available",
269
+ tool_call_id=tool_call_id
270
+ )
271
+ new_messages.append(tool_message)
272
+
273
+ # Call LLM again with tool results to get next response
274
+ try:
275
+ current_completion = llm_client.invoke(new_messages, config=config)
276
+ new_messages.append(current_completion)
277
+
278
+ # Check if we still have tool calls
279
+ if hasattr(current_completion, 'tool_calls') and current_completion.tool_calls:
280
+ logger.info(f"LLM requested {len(current_completion.tool_calls)} more tool calls")
281
+ else:
282
+ logger.info("LLM completed without requesting more tools")
283
+ break
284
+
285
+ except Exception as e:
286
+ logger.error(f"Error in LLM call during iteration {iteration}: {e}")
287
+ # Add error message and break the loop
288
+ error_msg = f"Error processing tool results in iteration {iteration}: {str(e)}"
289
+ new_messages.append(AIMessage(content=error_msg))
290
+ break
291
+
292
+ # Log completion status
293
+ if iteration >= self.steps_limit:
294
+ logger.warning(f"Reached maximum iterations ({self.steps_limit}) for tool execution")
295
+ # Add a warning message to the chat
296
+ warning_msg = f"Maximum tool execution iterations ({self.steps_limit}) reached. Stopping tool execution."
297
+ new_messages.append(AIMessage(content=warning_msg))
298
+ else:
299
+ logger.info(f"Tool execution completed after {iteration} iterations")
300
+
301
+ return new_messages, current_completion
302
+
303
+ def __get_struct_output_model(self, llm_client, pydantic_model):
304
+ return llm_client.with_structured_output(pydantic_model)