alita-sdk 0.3.435__py3-none-any.whl → 0.3.457__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (54) hide show
  1. alita_sdk/runtime/clients/client.py +39 -7
  2. alita_sdk/runtime/langchain/assistant.py +10 -2
  3. alita_sdk/runtime/langchain/langraph_agent.py +57 -15
  4. alita_sdk/runtime/langchain/utils.py +19 -3
  5. alita_sdk/runtime/models/mcp_models.py +4 -0
  6. alita_sdk/runtime/toolkits/artifact.py +5 -6
  7. alita_sdk/runtime/toolkits/mcp.py +258 -150
  8. alita_sdk/runtime/toolkits/tools.py +44 -2
  9. alita_sdk/runtime/tools/function.py +2 -1
  10. alita_sdk/runtime/tools/mcp_remote_tool.py +166 -0
  11. alita_sdk/runtime/tools/mcp_server_tool.py +9 -76
  12. alita_sdk/runtime/tools/vectorstore_base.py +17 -2
  13. alita_sdk/runtime/utils/mcp_oauth.py +164 -0
  14. alita_sdk/runtime/utils/mcp_sse_client.py +405 -0
  15. alita_sdk/runtime/utils/toolkit_utils.py +9 -2
  16. alita_sdk/tools/ado/repos/__init__.py +1 -0
  17. alita_sdk/tools/ado/test_plan/__init__.py +1 -1
  18. alita_sdk/tools/ado/wiki/__init__.py +1 -5
  19. alita_sdk/tools/ado/work_item/__init__.py +1 -5
  20. alita_sdk/tools/base_indexer_toolkit.py +10 -6
  21. alita_sdk/tools/bitbucket/__init__.py +1 -0
  22. alita_sdk/tools/code/sonar/__init__.py +1 -1
  23. alita_sdk/tools/confluence/__init__.py +2 -2
  24. alita_sdk/tools/github/__init__.py +2 -2
  25. alita_sdk/tools/gitlab/__init__.py +2 -1
  26. alita_sdk/tools/gitlab_org/__init__.py +1 -2
  27. alita_sdk/tools/google_places/__init__.py +2 -1
  28. alita_sdk/tools/jira/__init__.py +1 -0
  29. alita_sdk/tools/memory/__init__.py +1 -1
  30. alita_sdk/tools/pandas/__init__.py +1 -1
  31. alita_sdk/tools/postman/__init__.py +2 -1
  32. alita_sdk/tools/pptx/__init__.py +2 -2
  33. alita_sdk/tools/qtest/__init__.py +3 -3
  34. alita_sdk/tools/qtest/api_wrapper.py +374 -29
  35. alita_sdk/tools/rally/__init__.py +1 -2
  36. alita_sdk/tools/report_portal/__init__.py +1 -0
  37. alita_sdk/tools/salesforce/__init__.py +1 -0
  38. alita_sdk/tools/servicenow/__init__.py +2 -3
  39. alita_sdk/tools/sharepoint/__init__.py +1 -0
  40. alita_sdk/tools/slack/__init__.py +1 -0
  41. alita_sdk/tools/sql/__init__.py +2 -1
  42. alita_sdk/tools/testio/__init__.py +1 -0
  43. alita_sdk/tools/testrail/__init__.py +1 -3
  44. alita_sdk/tools/xray/__init__.py +2 -1
  45. alita_sdk/tools/zephyr/__init__.py +2 -1
  46. alita_sdk/tools/zephyr_enterprise/__init__.py +1 -0
  47. alita_sdk/tools/zephyr_essential/__init__.py +1 -0
  48. alita_sdk/tools/zephyr_scale/__init__.py +1 -0
  49. alita_sdk/tools/zephyr_squad/__init__.py +1 -0
  50. {alita_sdk-0.3.435.dist-info → alita_sdk-0.3.457.dist-info}/METADATA +2 -1
  51. {alita_sdk-0.3.435.dist-info → alita_sdk-0.3.457.dist-info}/RECORD +54 -51
  52. {alita_sdk-0.3.435.dist-info → alita_sdk-0.3.457.dist-info}/WHEEL +0 -0
  53. {alita_sdk-0.3.435.dist-info → alita_sdk-0.3.457.dist-info}/licenses/LICENSE +0 -0
  54. {alita_sdk-0.3.435.dist-info → alita_sdk-0.3.457.dist-info}/top_level.txt +0 -0
@@ -303,7 +303,7 @@ class AlitaClient:
303
303
  app_type=None, memory=None, runtime='langchain',
304
304
  application_variables: Optional[dict] = None,
305
305
  version_details: Optional[dict] = None, store: Optional[BaseStore] = None,
306
- llm: Optional[ChatOpenAI] = None):
306
+ llm: Optional[ChatOpenAI] = None, mcp_tokens: Optional[dict] = None):
307
307
  if tools is None:
308
308
  tools = []
309
309
  if chat_history is None:
@@ -342,13 +342,16 @@ class AlitaClient:
342
342
  app_type = "react"
343
343
  elif app_type == 'autogen':
344
344
  app_type = "react"
345
+
346
+ # LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
347
+ # The exception will propagate naturally to the indexer worker's outer handler
345
348
  if runtime == 'nonrunnable':
346
349
  return LangChainAssistant(self, data, llm, chat_history, app_type,
347
- tools=tools, memory=memory, store=store)
350
+ tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens)
348
351
  if runtime == 'langchain':
349
352
  return LangChainAssistant(self, data, llm,
350
353
  chat_history, app_type,
351
- tools=tools, memory=memory, store=store).runnable()
354
+ tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens).runnable()
352
355
  elif runtime == 'llama':
353
356
  raise NotImplementedError("LLama runtime is not supported")
354
357
 
@@ -568,7 +571,8 @@ class AlitaClient:
568
571
  def predict_agent(self, llm: ChatOpenAI, instructions: str = "You are a helpful assistant.",
569
572
  tools: Optional[list] = None, chat_history: Optional[List[Any]] = None,
570
573
  memory=None, runtime='langchain', variables: Optional[list] = None,
571
- store: Optional[BaseStore] = None, debug_mode: Optional[bool] = False):
574
+ store: Optional[BaseStore] = None, debug_mode: Optional[bool] = False,
575
+ mcp_tokens: Optional[dict] = None):
572
576
  """
573
577
  Create a predict-type agent with minimal configuration.
574
578
 
@@ -604,8 +608,20 @@ class AlitaClient:
604
608
  'tools': tools, # Tool configs that will be processed by get_tools()
605
609
  'variables': variables
606
610
  }
607
- return LangChainAssistant(self, agent_data, llm,
608
- chat_history, "predict", memory=memory, store=store, debug_mode=debug_mode).runnable()
611
+
612
+ # LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
613
+ # The exception will propagate naturally to the indexer worker's outer handler
614
+ return LangChainAssistant(
615
+ self,
616
+ agent_data,
617
+ llm,
618
+ chat_history,
619
+ "predict",
620
+ memory=memory,
621
+ store=store,
622
+ debug_mode=debug_mode,
623
+ mcp_tokens=mcp_tokens
624
+ ).runnable()
609
625
 
610
626
  def test_toolkit_tool(self, toolkit_config: dict, tool_name: str, tool_params: dict = None,
611
627
  runtime_config: dict = None, llm_model: str = None,
@@ -746,7 +762,23 @@ class AlitaClient:
746
762
  }
747
763
 
748
764
  # Instantiate the toolkit with client and LLM support
749
- tools = instantiate_toolkit_with_client(toolkit_config, llm, self)
765
+ try:
766
+ tools = instantiate_toolkit_with_client(toolkit_config, llm, self)
767
+ except Exception as toolkit_error:
768
+ # Re-raise McpAuthorizationRequired to allow proper handling upstream
769
+ from ..utils.mcp_oauth import McpAuthorizationRequired
770
+ if isinstance(toolkit_error, McpAuthorizationRequired):
771
+ raise
772
+ # For other errors, return error response
773
+ return {
774
+ "success": False,
775
+ "error": f"Failed to instantiate toolkit '{toolkit_config.get('toolkit_name')}': {str(toolkit_error)}",
776
+ "tool_name": tool_name,
777
+ "toolkit_config": toolkit_config_parsed_json,
778
+ "llm_model": llm_model,
779
+ "events_dispatched": events_dispatched,
780
+ "execution_time_seconds": 0.0
781
+ }
750
782
 
751
783
  if not tools:
752
784
  return {
@@ -31,7 +31,8 @@ class Assistant:
31
31
  tools: Optional[list] = [],
32
32
  memory: Optional[Any] = None,
33
33
  store: Optional[BaseStore] = None,
34
- debug_mode: Optional[bool] = False):
34
+ debug_mode: Optional[bool] = False,
35
+ mcp_tokens: Optional[dict] = None):
35
36
 
36
37
  self.app_type = app_type
37
38
  self.memory = memory
@@ -89,7 +90,14 @@ class Assistant:
89
90
  for internal_tool_name in meta.get("internal_tools"):
90
91
  version_tools.append({"type": "internal_tool", "name": internal_tool_name})
91
92
 
92
- self.tools = get_tools(version_tools, alita_client=alita, llm=self.client, memory_store=self.store, debug_mode=debug_mode)
93
+ self.tools = get_tools(
94
+ version_tools,
95
+ alita_client=alita,
96
+ llm=self.client,
97
+ memory_store=self.store,
98
+ debug_mode=debug_mode,
99
+ mcp_tokens=mcp_tokens
100
+ )
93
101
  if tools:
94
102
  self.tools += tools
95
103
  # Handle prompt setup
@@ -475,10 +475,14 @@ def create_graph(
475
475
  if toolkit_name:
476
476
  tool_name = f"{clean_string(toolkit_name)}{TOOLKIT_SPLITTER}{tool_name}"
477
477
  logger.info(f"Node: {node_id} : {node_type} - {tool_name}")
478
- if node_type in ['function', 'tool', 'loop', 'loop_from_tool', 'indexer', 'subgraph', 'pipeline', 'agent']:
478
+ if node_type in ['function', 'toolkit', 'mcp', 'tool', 'loop', 'loop_from_tool', 'indexer', 'subgraph', 'pipeline', 'agent']:
479
+ if node_type == 'mcp' and tool_name not in [tool.name for tool in tools]:
480
+ # MCP is not connected and node cannot be added
481
+ raise ToolException(f"MCP tool '{tool_name}' not found in the provided tools. "
482
+ f"Make sure it is connected properly. Available tools: {[tool.name for tool in tools]}")
479
483
  for tool in tools:
480
484
  if tool.name == tool_name:
481
- if node_type == 'function':
485
+ if node_type in ['function', 'toolkit', 'mcp']:
482
486
  lg_builder.add_node(node_id, FunctionTool(
483
487
  tool=tool, name=node_id, return_type='dict',
484
488
  output_variables=node.get('output', []),
@@ -643,6 +647,7 @@ def create_graph(
643
647
  default_output=node.get('default_output', 'END')
644
648
  )
645
649
  )
650
+ continue
646
651
  elif node_type == 'state_modifier':
647
652
  lg_builder.add_node(node_id, StateModifierNode(
648
653
  template=node.get('template', ''),
@@ -663,7 +668,7 @@ def create_graph(
663
668
  lg_builder.add_node(reset_node_id, PrinterNode(
664
669
  input_mapping={'printer': {'type': 'fixed', 'value': ''}}
665
670
  ))
666
- lg_builder.add_edge(node_id, reset_node_id)
671
+ lg_builder.add_conditional_edges(node_id, TransitionalEdge(reset_node_id))
667
672
  lg_builder.add_conditional_edges(reset_node_id, TransitionalEdge(clean_string(node['transition'])))
668
673
  continue
669
674
  if node.get('transition'):
@@ -814,35 +819,63 @@ class LangGraphAgentRunnable(CompiledStateGraph):
814
819
  input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
815
820
 
816
821
  # handler for LLM node: if no input (Chat perspective), then take last human message
822
+ # Track if input came from messages to handle content extraction properly
823
+ input_from_messages = False
817
824
  if not input.get('input'):
818
825
  if input.get('messages'):
819
826
  input['input'] = [next((msg for msg in reversed(input['messages']) if isinstance(msg, HumanMessage)),
820
- None)]
827
+ None)]
828
+ if input['input'] is not None:
829
+ input_from_messages = True
821
830
 
822
831
  # Append current input to existing messages instead of overwriting
823
832
  if input.get('input'):
824
833
  if isinstance(input['input'], str):
825
834
  current_message = input['input']
826
835
  else:
836
+ # input can be a list of messages or a single message object
827
837
  current_message = input.get('input')[-1]
828
838
 
829
839
  # TODO: add handler after we add 2+ inputs (filterByType, etc.)
830
840
  if isinstance(current_message, HumanMessage):
831
841
  current_content = current_message.content
832
842
  if isinstance(current_content, list):
833
- text_contents = [
834
- item['text'] if isinstance(item, dict) and item.get('type') == 'text'
835
- else item if isinstance(item, str)
836
- else None
837
- for item in current_content
838
- ]
839
- text_contents = [text for text in text_contents if text is not None]
840
- input['input'] = ". ".join(text_contents)
843
+ # Extract text parts and keep non-text parts (images, etc.)
844
+ text_contents = []
845
+ non_text_parts = []
846
+
847
+ for item in current_content:
848
+ if isinstance(item, dict) and item.get('type') == 'text':
849
+ text_contents.append(item['text'])
850
+ elif isinstance(item, str):
851
+ text_contents.append(item)
852
+ else:
853
+ # Keep image_url and other non-text content
854
+ non_text_parts.append(item)
855
+
856
+ # Set input to the joined text
857
+ input['input'] = ". ".join(text_contents) if text_contents else ""
858
+
859
+ # If this message came from input['messages'], update or remove it
860
+ if input_from_messages:
861
+ if non_text_parts:
862
+ # Keep the message but only with non-text content (images, etc.)
863
+ current_message.content = non_text_parts
864
+ else:
865
+ # All content was text, remove this message from the list
866
+ input['messages'] = [msg for msg in input['messages'] if msg is not current_message]
867
+
841
868
  elif isinstance(current_content, str):
842
869
  # on regenerate case
843
870
  input['input'] = current_content
871
+ # If from messages and all content is text, remove the message
872
+ if input_from_messages:
873
+ input['messages'] = [msg for msg in input['messages'] if msg is not current_message]
844
874
  else:
845
875
  input['input'] = str(current_content)
876
+ # If from messages, remove since we extracted the content
877
+ if input_from_messages:
878
+ input['messages'] = [msg for msg in input['messages'] if msg is not current_message]
846
879
  elif isinstance(current_message, str):
847
880
  input['input'] = current_message
848
881
  else:
@@ -852,9 +885,18 @@ class LangGraphAgentRunnable(CompiledStateGraph):
852
885
  input['messages'] = [convert_dict_to_message(msg) for msg in input['messages']]
853
886
  # Append to existing messages
854
887
  # input['messages'].append(current_message)
855
- else:
856
- # No existing messages, create new list
857
- input['messages'] = [current_message]
888
+ # else:
889
+ # NOTE: Commented out to prevent duplicates with input['input']
890
+ # input['messages'] = [current_message]
891
+
892
+ # Validate that input is not empty after all processing
893
+ if not input.get('input'):
894
+ raise RuntimeError(
895
+ "Empty input after processing. Cannot send empty string to LLM. "
896
+ "This likely means the message contained only non-text content "
897
+ "with no accompanying text."
898
+ )
899
+
858
900
  logging.info(f"Input: {thread_id} - {input}")
859
901
  if self.checkpointer and self.checkpointer.get_tuple(config):
860
902
  self.update_state(config, input)
@@ -2,7 +2,7 @@ import builtins
2
2
  import json
3
3
  import logging
4
4
  import re
5
- from pydantic import create_model, Field
5
+ from pydantic import create_model, Field, Json
6
6
  from typing import Tuple, TypedDict, Any, Optional, Annotated
7
7
  from langchain_core.messages import AnyMessage
8
8
  from langgraph.graph import add_messages
@@ -208,5 +208,21 @@ def safe_format(template, mapping):
208
208
  def create_pydantic_model(model_name: str, variables: dict[str, dict]):
209
209
  fields = {}
210
210
  for var_name, var_data in variables.items():
211
- fields[var_name] = (parse_type(var_data['type']), Field(description=var_data.get('description', None)))
212
- return create_model(model_name, **fields)
211
+ fields[var_name] = (parse_pydantic_type(var_data['type']), Field(description=var_data.get('description', None)))
212
+ return create_model(model_name, **fields)
213
+
214
+ def parse_pydantic_type(type_name: str):
215
+ """
216
+ Helper function to parse type names into Python types.
217
+ Extend this function to handle custom types like 'dict' -> Json[Any].
218
+ """
219
+ type_mapping = {
220
+ 'str': str,
221
+ 'int': int,
222
+ 'float': float,
223
+ 'bool': bool,
224
+ 'dict': Json[Any], # Map 'dict' to Pydantic's Json type
225
+ 'list': list,
226
+ 'any': Any
227
+ }
228
+ return type_mapping.get(type_name, Any)
@@ -19,6 +19,10 @@ class McpConnectionConfig(BaseModel):
19
19
  default=None,
20
20
  description="HTTP headers for the connection (JSON object)"
21
21
  )
22
+ session_id: Optional[str] = Field(
23
+ default=None,
24
+ description="MCP session ID for stateful SSE servers (managed by client)"
25
+ )
22
26
 
23
27
  @validator('url')
24
28
  def validate_url(cls, v):
@@ -23,11 +23,7 @@ class ArtifactToolkit(BaseToolkit):
23
23
  # client = (Any, FieldInfo(description="Client object", required=True, autopopulate=True)),
24
24
  bucket=(str, FieldInfo(
25
25
  description="Bucket name",
26
- pattern=r'^[a-z][a-z0-9-]*$',
27
- json_schema_extra={
28
- 'toolkit_name': True,
29
- 'max_toolkit_length': ArtifactToolkit.toolkit_max_length
30
- }
26
+ pattern=r'^[a-z][a-z0-9-]*$'
31
27
  )),
32
28
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
33
29
  # indexer settings
@@ -37,7 +33,10 @@ class ArtifactToolkit(BaseToolkit):
37
33
  embedding_model=(Optional[str], Field(default=None, description="Embedding configuration.",
38
34
  json_schema_extra={'configuration_model': 'embedding'})),
39
35
 
40
- __config__=ConfigDict(json_schema_extra={'metadata': {"label": "Artifact", "icon_url": None}})
36
+ __config__=ConfigDict(json_schema_extra={'metadata': {"label": "Artifact",
37
+ "icon_url": None,
38
+ "max_length": ArtifactToolkit.toolkit_max_length
39
+ }})
41
40
  )
42
41
 
43
42
  @classmethod