alita-sdk 0.3.465__py3-none-any.whl → 0.3.497__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (103) hide show
  1. alita_sdk/cli/agent/__init__.py +5 -0
  2. alita_sdk/cli/agent/default.py +83 -1
  3. alita_sdk/cli/agent_loader.py +22 -4
  4. alita_sdk/cli/agent_ui.py +13 -3
  5. alita_sdk/cli/agents.py +1876 -186
  6. alita_sdk/cli/callbacks.py +96 -25
  7. alita_sdk/cli/cli.py +10 -1
  8. alita_sdk/cli/config.py +151 -9
  9. alita_sdk/cli/context/__init__.py +30 -0
  10. alita_sdk/cli/context/cleanup.py +198 -0
  11. alita_sdk/cli/context/manager.py +731 -0
  12. alita_sdk/cli/context/message.py +285 -0
  13. alita_sdk/cli/context/strategies.py +289 -0
  14. alita_sdk/cli/context/token_estimation.py +127 -0
  15. alita_sdk/cli/input_handler.py +167 -4
  16. alita_sdk/cli/inventory.py +1256 -0
  17. alita_sdk/cli/toolkit.py +14 -17
  18. alita_sdk/cli/toolkit_loader.py +35 -5
  19. alita_sdk/cli/tools/__init__.py +8 -1
  20. alita_sdk/cli/tools/filesystem.py +910 -64
  21. alita_sdk/cli/tools/planning.py +143 -157
  22. alita_sdk/cli/tools/terminal.py +154 -20
  23. alita_sdk/community/__init__.py +64 -8
  24. alita_sdk/community/inventory/__init__.py +224 -0
  25. alita_sdk/community/inventory/config.py +257 -0
  26. alita_sdk/community/inventory/enrichment.py +2137 -0
  27. alita_sdk/community/inventory/extractors.py +1469 -0
  28. alita_sdk/community/inventory/ingestion.py +3172 -0
  29. alita_sdk/community/inventory/knowledge_graph.py +1457 -0
  30. alita_sdk/community/inventory/parsers/__init__.py +218 -0
  31. alita_sdk/community/inventory/parsers/base.py +295 -0
  32. alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
  33. alita_sdk/community/inventory/parsers/go_parser.py +851 -0
  34. alita_sdk/community/inventory/parsers/html_parser.py +389 -0
  35. alita_sdk/community/inventory/parsers/java_parser.py +593 -0
  36. alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
  37. alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
  38. alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
  39. alita_sdk/community/inventory/parsers/python_parser.py +604 -0
  40. alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
  41. alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
  42. alita_sdk/community/inventory/parsers/text_parser.py +322 -0
  43. alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
  44. alita_sdk/community/inventory/patterns/__init__.py +61 -0
  45. alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
  46. alita_sdk/community/inventory/patterns/loader.py +348 -0
  47. alita_sdk/community/inventory/patterns/registry.py +198 -0
  48. alita_sdk/community/inventory/presets.py +535 -0
  49. alita_sdk/community/inventory/retrieval.py +1403 -0
  50. alita_sdk/community/inventory/toolkit.py +169 -0
  51. alita_sdk/community/inventory/visualize.py +1370 -0
  52. alita_sdk/configurations/bitbucket.py +0 -3
  53. alita_sdk/runtime/clients/client.py +108 -31
  54. alita_sdk/runtime/langchain/assistant.py +4 -2
  55. alita_sdk/runtime/langchain/constants.py +3 -1
  56. alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
  57. alita_sdk/runtime/langchain/document_loaders/constants.py +10 -6
  58. alita_sdk/runtime/langchain/langraph_agent.py +123 -31
  59. alita_sdk/runtime/llms/preloaded.py +2 -6
  60. alita_sdk/runtime/toolkits/__init__.py +2 -0
  61. alita_sdk/runtime/toolkits/application.py +1 -1
  62. alita_sdk/runtime/toolkits/mcp.py +107 -91
  63. alita_sdk/runtime/toolkits/planning.py +173 -0
  64. alita_sdk/runtime/toolkits/tools.py +59 -7
  65. alita_sdk/runtime/tools/artifact.py +46 -17
  66. alita_sdk/runtime/tools/function.py +2 -1
  67. alita_sdk/runtime/tools/llm.py +320 -32
  68. alita_sdk/runtime/tools/mcp_remote_tool.py +23 -7
  69. alita_sdk/runtime/tools/planning/__init__.py +36 -0
  70. alita_sdk/runtime/tools/planning/models.py +246 -0
  71. alita_sdk/runtime/tools/planning/wrapper.py +607 -0
  72. alita_sdk/runtime/tools/vectorstore_base.py +44 -9
  73. alita_sdk/runtime/utils/AlitaCallback.py +106 -20
  74. alita_sdk/runtime/utils/mcp_client.py +465 -0
  75. alita_sdk/runtime/utils/mcp_oauth.py +80 -0
  76. alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
  77. alita_sdk/runtime/utils/streamlit.py +6 -10
  78. alita_sdk/runtime/utils/toolkit_utils.py +14 -5
  79. alita_sdk/tools/__init__.py +54 -27
  80. alita_sdk/tools/ado/repos/repos_wrapper.py +1 -2
  81. alita_sdk/tools/base_indexer_toolkit.py +99 -20
  82. alita_sdk/tools/bitbucket/__init__.py +2 -2
  83. alita_sdk/tools/chunkers/__init__.py +3 -1
  84. alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
  85. alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
  86. alita_sdk/tools/chunkers/universal_chunker.py +270 -0
  87. alita_sdk/tools/code/loaders/codesearcher.py +3 -2
  88. alita_sdk/tools/code_indexer_toolkit.py +55 -22
  89. alita_sdk/tools/confluence/api_wrapper.py +63 -14
  90. alita_sdk/tools/elitea_base.py +86 -21
  91. alita_sdk/tools/jira/__init__.py +1 -1
  92. alita_sdk/tools/jira/api_wrapper.py +91 -40
  93. alita_sdk/tools/non_code_indexer_toolkit.py +1 -0
  94. alita_sdk/tools/qtest/__init__.py +1 -1
  95. alita_sdk/tools/sharepoint/api_wrapper.py +2 -2
  96. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +17 -13
  97. alita_sdk/tools/zephyr_essential/api_wrapper.py +12 -13
  98. {alita_sdk-0.3.465.dist-info → alita_sdk-0.3.497.dist-info}/METADATA +2 -1
  99. {alita_sdk-0.3.465.dist-info → alita_sdk-0.3.497.dist-info}/RECORD +103 -61
  100. {alita_sdk-0.3.465.dist-info → alita_sdk-0.3.497.dist-info}/WHEEL +0 -0
  101. {alita_sdk-0.3.465.dist-info → alita_sdk-0.3.497.dist-info}/entry_points.txt +0 -0
  102. {alita_sdk-0.3.465.dist-info → alita_sdk-0.3.497.dist-info}/licenses/LICENSE +0 -0
  103. {alita_sdk-0.3.465.dist-info → alita_sdk-0.3.497.dist-info}/top_level.txt +0 -0
@@ -12,6 +12,7 @@ from langchain_core.runnables import Runnable
12
12
  from langchain_core.runnables import RunnableConfig
13
13
  from langchain_core.tools import BaseTool, ToolException
14
14
  from langgraph.channels.ephemeral_value import EphemeralValue
15
+ from langgraph.errors import GraphRecursionError
15
16
  from langgraph.graph import StateGraph
16
17
  from langgraph.graph.graph import END, START
17
18
  from langgraph.graph.state import CompiledStateGraph
@@ -171,12 +172,13 @@ Answer only with step name, no need to add descrip in case none of the steps are
171
172
  """
172
173
 
173
174
  def __init__(self, client, steps: str, description: str = "", decisional_inputs: Optional[list[str]] = [],
174
- default_output: str = 'END'):
175
+ default_output: str = 'END', is_node: bool = False):
175
176
  self.client = client
176
177
  self.steps = ",".join([clean_string(step) for step in steps])
177
178
  self.description = description
178
179
  self.decisional_inputs = decisional_inputs
179
180
  self.default_output = default_output if default_output != 'END' else END
181
+ self.is_node = is_node
180
182
 
181
183
  def invoke(self, state: Annotated[BaseStore, InjectedStore()], config: Optional[RunnableConfig] = None) -> str:
182
184
  additional_info = ""
@@ -198,7 +200,8 @@ Answer only with step name, no need to add descrip in case none of the steps are
198
200
  dispatch_custom_event(
199
201
  "on_decision_edge", {"decisional_inputs": self.decisional_inputs, "state": state}, config=config
200
202
  )
201
- return result
203
+ # support of legacy `decision` as part of node
204
+ return {"router_output": result} if self.is_node else result
202
205
 
203
206
 
204
207
  class TransitionalEdge(Runnable):
@@ -632,19 +635,31 @@ def create_graph(
632
635
  output_variables=output_vars,
633
636
  input_variables=node.get('input', ['messages']),
634
637
  structured_output=node.get('structured_output', False),
638
+ tool_execution_timeout=node.get('tool_execution_timeout', 900),
635
639
  available_tools=available_tools,
636
640
  tool_names=tool_names,
637
641
  steps_limit=kwargs.get('steps_limit', 25)
638
642
  ))
639
- elif node_type == 'router':
640
- # Add a RouterNode as an independent node
641
- lg_builder.add_node(node_id, RouterNode(
642
- name=node_id,
643
- condition=node.get('condition', ''),
644
- routes=node.get('routes', []),
645
- default_output=node.get('default_output', 'END'),
646
- input_variables=node.get('input', ['messages'])
647
- ))
643
+ elif node_type in ['router', 'decision']:
644
+ if node_type == 'router':
645
+ # Add a RouterNode as an independent node
646
+ lg_builder.add_node(node_id, RouterNode(
647
+ name=node_id,
648
+ condition=node.get('condition', ''),
649
+ routes=node.get('routes', []),
650
+ default_output=node.get('default_output', 'END'),
651
+ input_variables=node.get('input', ['messages'])
652
+ ))
653
+ elif node_type == 'decision':
654
+ logger.info(f'Adding decision: {node["nodes"]}')
655
+ lg_builder.add_node(node_id, DecisionEdge(
656
+ client, node['nodes'],
657
+ node.get('description', ""),
658
+ decisional_inputs=node.get('decisional_inputs', ['messages']),
659
+ default_output=node.get('default_output', 'END'),
660
+ is_node=True
661
+ ))
662
+
648
663
  # Add a single conditional edge for all routes
649
664
  lg_builder.add_conditional_edges(
650
665
  node_id,
@@ -820,11 +835,19 @@ class LangGraphAgentRunnable(CompiledStateGraph):
820
835
  if not config.get("configurable", {}).get("thread_id", ""):
821
836
  config["configurable"] = {"thread_id": str(uuid4())}
822
837
  thread_id = config.get("configurable", {}).get("thread_id")
838
+
839
+ # Check if checkpoint exists early for chat_history handling
840
+ checkpoint_exists = self.checkpointer and self.checkpointer.get_tuple(config)
841
+
823
842
  # Handle chat history and current input properly
824
843
  if input.get('chat_history') and not input.get('messages'):
825
- # Convert chat history dict messages to LangChain message objects
826
- chat_history = input.pop('chat_history')
827
- input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
844
+ if checkpoint_exists:
845
+ # Checkpoint already has conversation history - discard redundant chat_history
846
+ input.pop('chat_history', None)
847
+ else:
848
+ # No checkpoint - convert chat history dict messages to LangChain message objects
849
+ chat_history = input.pop('chat_history')
850
+ input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
828
851
 
829
852
  # handler for LLM node: if no input (Chat perspective), then take last human message
830
853
  # Track if input came from messages to handle content extraction properly
@@ -872,6 +895,16 @@ class LangGraphAgentRunnable(CompiledStateGraph):
872
895
  else:
873
896
  # All content was text, remove this message from the list
874
897
  input['messages'] = [msg for msg in input['messages'] if msg is not current_message]
898
+ else:
899
+ # Message came from input['input'], not from input['messages']
900
+ # If there are non-text parts (images, etc.), preserve them in messages
901
+ if non_text_parts:
902
+ # Initialize messages if it doesn't exist or is empty
903
+ if not input.get('messages'):
904
+ input['messages'] = []
905
+ # Create a new message with only non-text content
906
+ non_text_message = HumanMessage(content=non_text_parts)
907
+ input['messages'].append(non_text_message)
875
908
 
876
909
  elif isinstance(current_content, str):
877
910
  # on regenerate case
@@ -906,39 +939,98 @@ class LangGraphAgentRunnable(CompiledStateGraph):
906
939
  )
907
940
 
908
941
  logging.info(f"Input: {thread_id} - {input}")
909
- if self.checkpointer and self.checkpointer.get_tuple(config):
910
- self.update_state(config, input)
911
- if config.pop("should_continue", False):
912
- invoke_input = input
942
+ try:
943
+ if self.checkpointer and self.checkpointer.get_tuple(config):
944
+ if config.pop("should_continue", False):
945
+ invoke_input = input
946
+ else:
947
+ self.update_state(config, input)
948
+ invoke_input = None
949
+ result = super().invoke(invoke_input, config=config, *args, **kwargs)
913
950
  else:
914
- invoke_input = None
915
- result = super().invoke(invoke_input, config=config, *args, **kwargs)
916
- else:
917
- result = super().invoke(input, config=config, *args, **kwargs)
951
+ result = super().invoke(input, config=config, *args, **kwargs)
952
+ except GraphRecursionError as e:
953
+ current_recursion_limit = config.get("recursion_limit", 0)
954
+ logger.warning("ToolExecutionLimitReached caught in LangGraphAgentRunnable: %s", e)
955
+ return self._handle_graph_recursion_error(
956
+ config=config,
957
+ thread_id=thread_id,
958
+ current_recursion_limit=current_recursion_limit,
959
+ )
960
+
918
961
  try:
919
- if result.get(PRINTER_NODE_RS) == PRINTER_COMPLETED_STATE:
920
- output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)),
921
- result['messages'][-1].content)
962
+ # Check if printer node output exists
963
+ printer_output = result.get(PRINTER_NODE_RS)
964
+ if printer_output == PRINTER_COMPLETED_STATE:
965
+ # Printer completed, extract last AI message
966
+ messages = result['messages']
967
+ output = next(
968
+ (msg.content for msg in reversed(messages)
969
+ if not isinstance(msg, HumanMessage)),
970
+ messages[-1].content
971
+ )
972
+ elif printer_output is not None:
973
+ # Printer node has output (interrupted state)
974
+ output = printer_output
922
975
  else:
923
- # used for printer node output - it will be reset by next `reset` node
924
- output = result.get(PRINTER_NODE_RS)
925
- except:
926
- output = list(result.values())[-1]
976
+ # No printer node, extract last AI message from messages
977
+ messages = result.get('messages', [])
978
+ output = next(
979
+ (msg.content for msg in reversed(messages)
980
+ if not isinstance(msg, HumanMessage)),
981
+ None
982
+ )
983
+ except Exception:
984
+ # Fallback: try to get last value or last message
985
+ output = list(result.values())[-1] if result else None
927
986
  config_state = self.get_state(config)
928
987
  is_execution_finished = not config_state.next
929
988
  if is_execution_finished:
930
989
  thread_id = None
931
990
 
991
+ final_output = f"Assistant run has been completed, but output is None.\nAdding last message if any: {messages[-1] if messages else []}" if is_execution_finished and output is None else output
992
+
932
993
  result_with_state = {
933
- "output": output,
994
+ "output": final_output,
934
995
  "thread_id": thread_id,
935
996
  "execution_finished": is_execution_finished
936
997
  }
937
998
 
938
999
  # Include all state values in the result
939
1000
  if hasattr(config_state, 'values') and config_state.values:
1001
+ # except of key = 'output' which is already included
1002
+ for key, value in config_state.values.items():
1003
+ if key != 'output':
1004
+ result_with_state[key] = value
1005
+
1006
+ return result_with_state
1007
+
1008
+ def _handle_graph_recursion_error(
1009
+ self,
1010
+ config: RunnableConfig,
1011
+ thread_id: str,
1012
+ current_recursion_limit: int,
1013
+ ) -> dict:
1014
+ """Handle GraphRecursionError by returning a soft-boundary response."""
1015
+ config_state = self.get_state(config)
1016
+ is_execution_finished = False
1017
+
1018
+ friendly_output = (
1019
+ f"Tool step limit {current_recursion_limit} reached for this run. You can continue by sending another "
1020
+ "message or refining your request."
1021
+ )
1022
+
1023
+ result_with_state: dict[str, Any] = {
1024
+ "output": friendly_output,
1025
+ "thread_id": thread_id,
1026
+ "execution_finished": is_execution_finished,
1027
+ "tool_execution_limit_reached": True,
1028
+ }
1029
+
1030
+ if hasattr(config_state, "values") and config_state.values:
940
1031
  for key, value in config_state.values.items():
941
- result_with_state[key] = value
1032
+ if key != "output":
1033
+ result_with_state[key] = value
942
1034
 
943
1035
  return result_with_state
944
1036
 
@@ -105,8 +105,7 @@ class PreloadedChatModel(BaseChatModel): # pylint: disable=R0903
105
105
  model_name: str = ""
106
106
  max_tokens: Optional[int] = 256
107
107
  temperature: Optional[float] = 0.9
108
- top_p: Optional[float] = 0.9
109
- top_k: Optional[int] = 20
108
+ reasoning_effort: Optional[str] = None
110
109
  token_limit: Optional[int] = 1024
111
110
 
112
111
  _local_streams: Any = PrivateAttr()
@@ -252,8 +251,7 @@ class PreloadedChatModel(BaseChatModel): # pylint: disable=R0903
252
251
  "return_full_text": False,
253
252
  "temperature": self.temperature,
254
253
  "do_sample": True,
255
- "top_k": self.top_k,
256
- "top_p": self.top_p,
254
+ "reasoning_effort": self.reasoning_effort
257
255
  }
258
256
  #
259
257
  try:
@@ -302,8 +300,6 @@ class PreloadedChatModel(BaseChatModel): # pylint: disable=R0903
302
300
  "return_full_text": False,
303
301
  "temperature": self.temperature,
304
302
  "do_sample": True,
305
- "top_k": self.top_k,
306
- "top_p": self.top_p,
307
303
  }
308
304
  #
309
305
  while True:
@@ -6,6 +6,7 @@ This module provides various toolkit implementations for LangGraph agents.
6
6
  from .application import ApplicationToolkit
7
7
  from .artifact import ArtifactToolkit
8
8
  from .datasource import DatasourcesToolkit
9
+ from .planning import PlanningToolkit
9
10
  from .prompt import PromptToolkit
10
11
  from .subgraph import SubgraphToolkit
11
12
  from .vectorstore import VectorStoreToolkit
@@ -16,6 +17,7 @@ __all__ = [
16
17
  "ApplicationToolkit",
17
18
  "ArtifactToolkit",
18
19
  "DatasourcesToolkit",
20
+ "PlanningToolkit",
19
21
  "PromptToolkit",
20
22
  "SubgraphToolkit",
21
23
  "VectorStoreToolkit",
@@ -28,7 +28,7 @@ class ApplicationToolkit(BaseToolkit):
28
28
  version_details = client.get_app_version_details(application_id, application_version_id)
29
29
  model_settings = {
30
30
  "max_tokens": version_details['llm_settings']['max_tokens'],
31
- "top_p": version_details['llm_settings']['top_p'],
31
+ "reasoning_effort": version_details['llm_settings'].get('reasoning_effort'),
32
32
  "temperature": version_details['llm_settings']['temperature'],
33
33
  }
34
34
 
@@ -6,19 +6,18 @@ Following MCP specification: https://modelcontextprotocol.io/specification/2025-
6
6
 
7
7
  import logging
8
8
  import re
9
- import requests
10
9
  import asyncio
11
10
  from typing import List, Optional, Any, Dict, Literal, ClassVar, Union
12
11
 
13
12
  from langchain_core.tools import BaseToolkit, BaseTool
14
- from pydantic import BaseModel, ConfigDict, Field
13
+ from pydantic import BaseModel, ConfigDict, Field, SecretStr
15
14
 
16
15
  from ..tools.mcp_server_tool import McpServerTool
17
16
  from ..tools.mcp_remote_tool import McpRemoteTool
18
17
  from ..tools.mcp_inspect_tool import McpInspectTool
19
18
  from ...tools.utils import TOOLKIT_SPLITTER, clean_string
20
19
  from ..models.mcp_models import McpConnectionConfig
21
- from ..utils.mcp_sse_client import McpSseClient
20
+ from ..utils.mcp_client import McpClient
22
21
  from ..utils.mcp_oauth import (
23
22
  McpAuthorizationRequired,
24
23
  canonical_resource,
@@ -208,28 +207,32 @@ class McpToolkit(BaseToolkit):
208
207
  }
209
208
  )
210
209
  ),
211
- timeout=(
212
- Union[int, str], # TODO: remove one I will figure out why UI sends str
210
+ client_id=(
211
+ Optional[str],
213
212
  Field(
214
- default=300,
215
- description="Request timeout in seconds (1-3600)"
213
+ default=None,
214
+ description="OAuth Client ID (if applicable)"
216
215
  )
217
216
  ),
218
- discovery_mode=(
219
- Literal['static', 'dynamic', 'hybrid'],
217
+ client_secret=(
218
+ Optional[SecretStr],
220
219
  Field(
221
- default="dynamic",
222
- description="Discovery mode",
223
- json_schema_extra={
224
- 'tooltip': 'static: use registry, dynamic: live discovery, hybrid: try dynamic first'
225
- }
220
+ default=None,
221
+ description="OAuth Client Secret (if applicable)"
226
222
  )
227
223
  ),
228
- discovery_interval=(
229
- Union[int, str],
224
+ scopes=(
225
+ Optional[List[str]],
226
+ Field(
227
+ default=None,
228
+ description="OAuth Scopes (if applicable)"
229
+ )
230
+ ),
231
+ timeout=(
232
+ Union[int, str], # TODO: remove one I will figure out why UI sends str
230
233
  Field(
231
234
  default=300,
232
- description="Discovery interval in seconds (60-3600, for periodic discovery)"
235
+ description="Request timeout in seconds (1-3600)"
233
236
  )
234
237
  ),
235
238
  selected_tools=(
@@ -259,7 +262,7 @@ class McpToolkit(BaseToolkit):
259
262
  __config__=ConfigDict(
260
263
  json_schema_extra={
261
264
  'metadata': {
262
- "label": "Remove MCP",
265
+ "label": "Remote MCP",
263
266
  "icon_url": None,
264
267
  "categories": ["other"],
265
268
  "extra_categories": ["remote tools", "sse", "http"],
@@ -275,8 +278,6 @@ class McpToolkit(BaseToolkit):
275
278
  url: str,
276
279
  headers: Optional[Dict[str, str]] = None,
277
280
  timeout: int = 60,
278
- discovery_mode: str = "hybrid",
279
- discovery_interval: int = 300,
280
281
  selected_tools: List[str] = None,
281
282
  enable_caching: bool = True,
282
283
  cache_ttl: int = 300,
@@ -297,8 +298,6 @@ class McpToolkit(BaseToolkit):
297
298
  url: MCP server HTTP URL
298
299
  headers: HTTP headers for authentication
299
300
  timeout: Request timeout in seconds
300
- discovery_mode: Discovery mode ('static', 'dynamic', 'hybrid')
301
- discovery_interval: Discovery interval in seconds (for periodic discovery)
302
301
  selected_tools: List of specific tools to enable (empty = all tools)
303
302
  enable_caching: Whether to enable caching
304
303
  cache_ttl: Cache TTL in seconds
@@ -317,7 +316,6 @@ class McpToolkit(BaseToolkit):
317
316
 
318
317
  # Convert numeric parameters that may come as strings from UI
319
318
  timeout = safe_int(timeout, 60)
320
- discovery_interval = safe_int(discovery_interval, 300)
321
319
  cache_ttl = safe_int(cache_ttl, 300)
322
320
 
323
321
  logger.info(f"Creating MCP toolkit: {toolkit_name}")
@@ -363,8 +361,7 @@ class McpToolkit(BaseToolkit):
363
361
  connection_config=connection_config,
364
362
  timeout=timeout,
365
363
  selected_tools=selected_tools,
366
- client=client,
367
- discovery_mode=discovery_mode
364
+ client=client
368
365
  )
369
366
 
370
367
  return toolkit
@@ -376,8 +373,7 @@ class McpToolkit(BaseToolkit):
376
373
  connection_config: McpConnectionConfig,
377
374
  timeout: int,
378
375
  selected_tools: List[str],
379
- client,
380
- discovery_mode: str = "dynamic"
376
+ client
381
377
  ) -> List[BaseTool]:
382
378
  """
383
379
  Create tools from a single MCP server. Always performs live discovery when connection config is provided.
@@ -423,19 +419,23 @@ class McpToolkit(BaseToolkit):
423
419
 
424
420
  logger.info(f"Successfully created {len(tools)} MCP tools from toolkit '{toolkit_name}' via direct discovery")
425
421
 
422
+ except McpAuthorizationRequired:
423
+ # Authorization is required; surface upstream so the caller can prompt the user
424
+ logger.info(f"MCP toolkit '{toolkit_name}' requires OAuth authorization")
425
+ raise
426
426
  except Exception as e:
427
427
  logger.error(f"Direct discovery failed for MCP toolkit '{toolkit_name}': {e}", exc_info=True)
428
428
  logger.error(f"Discovery error details - URL: {connection_config.url}, Timeout: {timeout}s")
429
-
430
- # Fallback to static mode if available and not already static
431
- if isinstance(e, McpAuthorizationRequired):
432
- # Authorization is required; surface upstream so the caller can prompt the user
429
+
430
+ # For new MCP toolkits (no client), don't silently return empty - surface the error
431
+ # This helps users understand why tool discovery failed
432
+ if not client:
433
+ logger.warning(f"No fallback available for toolkit '{toolkit_name}' - re-raising discovery error")
433
434
  raise
434
- if client and discovery_mode != "static":
435
- logger.info(f"Falling back to static discovery for toolkit '{toolkit_name}'")
436
- tools = cls._create_tools_static(toolkit_name, selected_tools, timeout, client)
437
- else:
438
- logger.warning(f"No fallback available for toolkit '{toolkit_name}' - returning empty tools list")
435
+
436
+ # Only fall back to static discovery for existing toolkits with a client
437
+ logger.info(f"Falling back to static discovery for toolkit '{toolkit_name}'")
438
+ tools = cls._create_tools_static(toolkit_name, selected_tools, timeout, client)
439
439
 
440
440
  # Don't add inspection tool to agent - it's only for internal use by toolkit
441
441
  # inspection_tool = cls._create_inspection_tool(
@@ -459,28 +459,34 @@ class McpToolkit(BaseToolkit):
459
459
  toolkit_name: str,
460
460
  connection_config: McpConnectionConfig,
461
461
  timeout: int
462
- ) -> List[Dict[str, Any]]:
462
+ ) -> tuple[List[Dict[str, Any]], Optional[str]]:
463
463
  """
464
464
  Discover tools and prompts from MCP server using SSE client.
465
- Returns list of tool/prompt dictionaries with name, description, and inputSchema.
466
- Prompts are converted to tools that can be invoked.
465
+
466
+ Returns:
467
+ Tuple of (tool_list, server_session_id) - session_id may be server-provided
467
468
  """
468
- session_id = connection_config.session_id
469
+ initial_session_id = connection_config.session_id
469
470
 
470
- if not session_id:
471
- logger.warning(f"[MCP Session] No session_id provided for '{toolkit_name}' - server may require it")
472
- logger.warning(f"[MCP Session] Frontend should generate a UUID and include it with mcp_tokens")
471
+ if not initial_session_id:
472
+ logger.warning(f"[MCP Session] No session_id provided for '{toolkit_name}' - will generate one")
473
473
 
474
474
  # Run async discovery in sync context
475
475
  try:
476
- all_tools = asyncio.run(
476
+ all_tools, server_session_id = asyncio.run(
477
477
  cls._discover_tools_async(
478
478
  toolkit_name=toolkit_name,
479
479
  connection_config=connection_config,
480
480
  timeout=timeout
481
481
  )
482
482
  )
483
- return all_tools, session_id
483
+ # Return tools and the session_id (server-provided or generated)
484
+ logger.info(f"[MCP Session] Final session_id for '{toolkit_name}': {server_session_id}")
485
+ return all_tools, server_session_id
486
+ except McpAuthorizationRequired:
487
+ # Re-raise auth required exceptions directly
488
+ logger.info(f"[MCP SSE] Authorization required for '{toolkit_name}'")
489
+ raise
484
490
  except Exception as e:
485
491
  logger.error(f"[MCP SSE] Discovery failed for '{toolkit_name}': {e}")
486
492
  raise
@@ -491,9 +497,12 @@ class McpToolkit(BaseToolkit):
491
497
  toolkit_name: str,
492
498
  connection_config: McpConnectionConfig,
493
499
  timeout: int
494
- ) -> List[Dict[str, Any]]:
500
+ ) -> tuple[List[Dict[str, Any]], Optional[str]]:
495
501
  """
496
502
  Async implementation of tool discovery using SSE client.
503
+
504
+ Returns:
505
+ Tuple of (tool_list, server_session_id)
497
506
  """
498
507
  all_tools = []
499
508
  session_id = connection_config.session_id
@@ -505,65 +514,74 @@ class McpToolkit(BaseToolkit):
505
514
  session_id = str(uuid.uuid4())
506
515
  logger.info(f"[MCP SSE] Generated temporary session_id for OAuth: {session_id}")
507
516
 
508
- logger.info(f"[MCP SSE] Discovering from {connection_config.url} with session {session_id}")
517
+ logger.info(f"[MCP] Discovering from {connection_config.url} with session {session_id}")
509
518
 
510
519
  # Prepare headers
511
520
  headers = {}
512
521
  if connection_config.headers:
513
522
  headers.update(connection_config.headers)
514
523
 
515
- # Create SSE client
516
- client = McpSseClient(
524
+ # Create unified MCP client (auto-detects SSE vs Streamable HTTP)
525
+ client = McpClient(
517
526
  url=connection_config.url,
518
527
  session_id=session_id,
519
528
  headers=headers,
520
529
  timeout=timeout
521
530
  )
522
531
 
523
- # Initialize MCP session
524
- await client.initialize()
525
- logger.info(f"[MCP SSE] Session initialized for '{toolkit_name}'")
526
-
527
- # Discover tools
528
- tools = await client.list_tools()
529
- all_tools.extend(tools)
530
- logger.info(f"[MCP SSE] Discovered {len(tools)} tools from '{toolkit_name}'")
531
-
532
- # Discover prompts
533
- try:
534
- prompts = await client.list_prompts()
535
- # Convert prompts to tool format
536
- for prompt in prompts:
537
- prompt_tool = {
538
- "name": f"prompt_{prompt.get('name', 'unnamed')}",
539
- "description": prompt.get('description', f"Execute prompt: {prompt.get('name')}"),
540
- "inputSchema": {
541
- "type": "object",
542
- "properties": {
543
- "arguments": {
544
- "type": "object",
545
- "description": "Arguments for the prompt template",
546
- "properties": {
547
- arg.get("name"): {
548
- "type": "string",
549
- "description": arg.get("description", ""),
550
- "required": arg.get("required", False)
532
+ server_session_id = None
533
+ async with client:
534
+ # Initialize MCP session
535
+ await client.initialize()
536
+ logger.info(f"[MCP] Session initialized for '{toolkit_name}' (transport={client.detected_transport})")
537
+
538
+ # Capture server-provided session_id (from mcp-session-id header)
539
+ server_session_id = client.server_session_id
540
+ if server_session_id:
541
+ logger.info(f"[MCP] Server provided session_id: {server_session_id}")
542
+
543
+ # Discover tools
544
+ tools = await client.list_tools()
545
+ all_tools.extend(tools)
546
+ logger.info(f"[MCP] Discovered {len(tools)} tools from '{toolkit_name}'")
547
+
548
+ # Discover prompts
549
+ try:
550
+ prompts = await client.list_prompts()
551
+ # Convert prompts to tool format
552
+ for prompt in prompts:
553
+ prompt_tool = {
554
+ "name": f"prompt_{prompt.get('name', 'unnamed')}",
555
+ "description": prompt.get('description', f"Execute prompt: {prompt.get('name')}"),
556
+ "inputSchema": {
557
+ "type": "object",
558
+ "properties": {
559
+ "arguments": {
560
+ "type": "object",
561
+ "description": "Arguments for the prompt template",
562
+ "properties": {
563
+ arg.get("name"): {
564
+ "type": "string",
565
+ "description": arg.get("description", ""),
566
+ "required": arg.get("required", False)
567
+ }
568
+ for arg in prompt.get("arguments", [])
551
569
  }
552
- for arg in prompt.get("arguments", [])
553
570
  }
554
571
  }
555
- }
556
- },
557
- "_mcp_type": "prompt",
558
- "_mcp_prompt_name": prompt.get('name')
559
- }
560
- all_tools.append(prompt_tool)
561
- logger.info(f"[MCP SSE] Discovered {len(prompts)} prompts from '{toolkit_name}'")
562
- except Exception as e:
563
- logger.warning(f"[MCP SSE] Failed to discover prompts: {e}")
572
+ },
573
+ "_mcp_type": "prompt",
574
+ "_mcp_prompt_name": prompt.get('name')
575
+ }
576
+ all_tools.append(prompt_tool)
577
+ logger.info(f"[MCP] Discovered {len(prompts)} prompts from '{toolkit_name}'")
578
+ except Exception as e:
579
+ logger.warning(f"[MCP] Failed to discover prompts: {e}")
564
580
 
565
- logger.info(f"[MCP SSE] Total discovered {len(all_tools)} items from '{toolkit_name}'")
566
- return all_tools
581
+ logger.info(f"[MCP] Total discovered {len(all_tools)} items from '{toolkit_name}'")
582
+ # Return tools and server-provided session_id (use server's if available, else the one we sent)
583
+ final_session_id = server_session_id or session_id
584
+ return all_tools, final_session_id
567
585
 
568
586
  @classmethod
569
587
  def _create_tool_from_dict(
@@ -858,8 +876,6 @@ def get_tools(tool_config: dict, alita_client, llm=None, memory_store=None) -> L
858
876
  url=url,
859
877
  headers=headers,
860
878
  timeout=safe_int(settings.get('timeout'), 60),
861
- discovery_mode=settings.get('discovery_mode', 'dynamic'),
862
- discovery_interval=safe_int(settings.get('discovery_interval'), 300),
863
879
  selected_tools=settings.get('selected_tools', []),
864
880
  enable_caching=settings.get('enable_caching', True),
865
881
  cache_ttl=safe_int(settings.get('cache_ttl'), 300),