alita-sdk 0.3.435__py3-none-any.whl → 0.3.449__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

@@ -303,7 +303,7 @@ class AlitaClient:
303
303
  app_type=None, memory=None, runtime='langchain',
304
304
  application_variables: Optional[dict] = None,
305
305
  version_details: Optional[dict] = None, store: Optional[BaseStore] = None,
306
- llm: Optional[ChatOpenAI] = None):
306
+ llm: Optional[ChatOpenAI] = None, mcp_tokens: Optional[dict] = None):
307
307
  if tools is None:
308
308
  tools = []
309
309
  if chat_history is None:
@@ -342,13 +342,16 @@ class AlitaClient:
342
342
  app_type = "react"
343
343
  elif app_type == 'autogen':
344
344
  app_type = "react"
345
+
346
+ # LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
347
+ # The exception will propagate naturally to the indexer worker's outer handler
345
348
  if runtime == 'nonrunnable':
346
349
  return LangChainAssistant(self, data, llm, chat_history, app_type,
347
- tools=tools, memory=memory, store=store)
350
+ tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens)
348
351
  if runtime == 'langchain':
349
352
  return LangChainAssistant(self, data, llm,
350
353
  chat_history, app_type,
351
- tools=tools, memory=memory, store=store).runnable()
354
+ tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens).runnable()
352
355
  elif runtime == 'llama':
353
356
  raise NotImplementedError("LLama runtime is not supported")
354
357
 
@@ -568,7 +571,8 @@ class AlitaClient:
568
571
  def predict_agent(self, llm: ChatOpenAI, instructions: str = "You are a helpful assistant.",
569
572
  tools: Optional[list] = None, chat_history: Optional[List[Any]] = None,
570
573
  memory=None, runtime='langchain', variables: Optional[list] = None,
571
- store: Optional[BaseStore] = None, debug_mode: Optional[bool] = False):
574
+ store: Optional[BaseStore] = None, debug_mode: Optional[bool] = False,
575
+ mcp_tokens: Optional[dict] = None):
572
576
  """
573
577
  Create a predict-type agent with minimal configuration.
574
578
 
@@ -604,8 +608,20 @@ class AlitaClient:
604
608
  'tools': tools, # Tool configs that will be processed by get_tools()
605
609
  'variables': variables
606
610
  }
607
- return LangChainAssistant(self, agent_data, llm,
608
- chat_history, "predict", memory=memory, store=store, debug_mode=debug_mode).runnable()
611
+
612
+ # LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
613
+ # The exception will propagate naturally to the indexer worker's outer handler
614
+ return LangChainAssistant(
615
+ self,
616
+ agent_data,
617
+ llm,
618
+ chat_history,
619
+ "predict",
620
+ memory=memory,
621
+ store=store,
622
+ debug_mode=debug_mode,
623
+ mcp_tokens=mcp_tokens
624
+ ).runnable()
609
625
 
610
626
  def test_toolkit_tool(self, toolkit_config: dict, tool_name: str, tool_params: dict = None,
611
627
  runtime_config: dict = None, llm_model: str = None,
@@ -746,7 +762,23 @@ class AlitaClient:
746
762
  }
747
763
 
748
764
  # Instantiate the toolkit with client and LLM support
749
- tools = instantiate_toolkit_with_client(toolkit_config, llm, self)
765
+ try:
766
+ tools = instantiate_toolkit_with_client(toolkit_config, llm, self)
767
+ except Exception as toolkit_error:
768
+ # Re-raise McpAuthorizationRequired to allow proper handling upstream
769
+ from ..utils.mcp_oauth import McpAuthorizationRequired
770
+ if isinstance(toolkit_error, McpAuthorizationRequired):
771
+ raise
772
+ # For other errors, return error response
773
+ return {
774
+ "success": False,
775
+ "error": f"Failed to instantiate toolkit '{toolkit_config.get('toolkit_name')}': {str(toolkit_error)}",
776
+ "tool_name": tool_name,
777
+ "toolkit_config": toolkit_config_parsed_json,
778
+ "llm_model": llm_model,
779
+ "events_dispatched": events_dispatched,
780
+ "execution_time_seconds": 0.0
781
+ }
750
782
 
751
783
  if not tools:
752
784
  return {
@@ -31,7 +31,8 @@ class Assistant:
31
31
  tools: Optional[list] = [],
32
32
  memory: Optional[Any] = None,
33
33
  store: Optional[BaseStore] = None,
34
- debug_mode: Optional[bool] = False):
34
+ debug_mode: Optional[bool] = False,
35
+ mcp_tokens: Optional[dict] = None):
35
36
 
36
37
  self.app_type = app_type
37
38
  self.memory = memory
@@ -89,7 +90,14 @@ class Assistant:
89
90
  for internal_tool_name in meta.get("internal_tools"):
90
91
  version_tools.append({"type": "internal_tool", "name": internal_tool_name})
91
92
 
92
- self.tools = get_tools(version_tools, alita_client=alita, llm=self.client, memory_store=self.store, debug_mode=debug_mode)
93
+ self.tools = get_tools(
94
+ version_tools,
95
+ alita_client=alita,
96
+ llm=self.client,
97
+ memory_store=self.store,
98
+ debug_mode=debug_mode,
99
+ mcp_tokens=mcp_tokens
100
+ )
93
101
  if tools:
94
102
  self.tools += tools
95
103
  # Handle prompt setup
@@ -19,6 +19,10 @@ class McpConnectionConfig(BaseModel):
19
19
  default=None,
20
20
  description="HTTP headers for the connection (JSON object)"
21
21
  )
22
+ session_id: Optional[str] = Field(
23
+ default=None,
24
+ description="MCP session ID for stateful SSE servers (managed by client)"
25
+ )
22
26
 
23
27
  @validator('url')
24
28
  def validate_url(cls, v):