alita-sdk 0.3.428.post2__py3-none-any.whl → 0.3.430__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,248 @@
1
+ """
2
+ MCP Server Inspection Tool.
3
+ Allows inspecting available tools, prompts, and resources on an MCP server.
4
+ """
5
+
6
+ import asyncio
7
+ import json
8
+ import logging
9
+ import time
10
+ from typing import Any, Type, Dict, List, Optional
11
+
12
+ from langchain_core.tools import BaseTool
13
+ from pydantic import BaseModel, Field, ConfigDict
14
+ import aiohttp
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class McpInspectInput(BaseModel):
20
+ """Input schema for MCP server inspection tool."""
21
+
22
+ resource_type: str = Field(
23
+ default="all",
24
+ description="What to inspect: 'tools', 'prompts', 'resources', or 'all'"
25
+ )
26
+
27
+
28
+ class McpInspectTool(BaseTool):
29
+ """Tool for inspecting available tools, prompts, and resources on an MCP server."""
30
+
31
+ name: str = "mcp_inspect"
32
+ description: str = "List available tools, prompts, and resources from the MCP server"
33
+ args_schema: Type[BaseModel] = McpInspectInput
34
+ return_type: str = "str"
35
+
36
+ # MCP server connection details
37
+ server_name: str = Field(..., description="Name of the MCP server")
38
+ server_url: str = Field(..., description="URL of the MCP server")
39
+ server_headers: Optional[Dict[str, str]] = Field(default=None, description="HTTP headers for authentication")
40
+ timeout: int = Field(default=30, description="Request timeout in seconds")
41
+
42
+ model_config = ConfigDict(arbitrary_types_allowed=True)
43
+
44
+ def __getstate__(self):
45
+ """Custom serialization for pickle compatibility."""
46
+ state = self.__dict__.copy()
47
+ # Convert headers dict to regular dict to avoid any reference issues
48
+ if 'server_headers' in state and state['server_headers'] is not None:
49
+ state['server_headers'] = dict(state['server_headers'])
50
+ return state
51
+
52
+ def __setstate__(self, state):
53
+ """Custom deserialization for pickle compatibility."""
54
+ # Initialize Pydantic internal attributes if needed
55
+ if '__pydantic_fields_set__' not in state:
56
+ state['__pydantic_fields_set__'] = set(state.keys())
57
+ if '__pydantic_extra__' not in state:
58
+ state['__pydantic_extra__'] = None
59
+ if '__pydantic_private__' not in state:
60
+ state['__pydantic_private__'] = None
61
+
62
+ # Update object state
63
+ self.__dict__.update(state)
64
+
65
+ def _run(self, resource_type: str = "all") -> str:
66
+ """Inspect the MCP server for available resources."""
67
+ try:
68
+ # Run the async inspection
69
+ loop = asyncio.get_event_loop()
70
+ if loop.is_running():
71
+ # If we're in an async context, we need to create a new event loop
72
+ import concurrent.futures
73
+ with concurrent.futures.ThreadPoolExecutor() as executor:
74
+ future = executor.submit(self._run_async_inspection, resource_type)
75
+ return future.result(timeout=self.timeout)
76
+ else:
77
+ return loop.run_until_complete(self._run_async_inspection(resource_type))
78
+ except Exception as e:
79
+ logger.error(f"Error inspecting MCP server '{self.server_name}': {e}")
80
+ return f"Error inspecting MCP server: {e}"
81
+
82
+ def _run_async_inspection(self, resource_type: str) -> str:
83
+ """Run the async inspection in a new event loop."""
84
+ return asyncio.run(self._inspect_server(resource_type))
85
+
86
+ async def _inspect_server(self, resource_type: str) -> str:
87
+ """Perform the actual MCP server inspection."""
88
+ results = {}
89
+
90
+ # Determine what to inspect
91
+ inspect_tools = resource_type in ["all", "tools"]
92
+ inspect_prompts = resource_type in ["all", "prompts"]
93
+ inspect_resources = resource_type in ["all", "resources"]
94
+
95
+ async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=self.timeout)) as session:
96
+
97
+ # List tools
98
+ if inspect_tools:
99
+ try:
100
+ tools = await self._list_tools(session)
101
+ results["tools"] = tools
102
+ except Exception as e:
103
+ logger.warning(f"Failed to list tools from {self.server_name}: {e}")
104
+ results["tools"] = {"error": str(e)}
105
+
106
+ # List prompts
107
+ if inspect_prompts:
108
+ try:
109
+ prompts = await self._list_prompts(session)
110
+ results["prompts"] = prompts
111
+ except Exception as e:
112
+ logger.warning(f"Failed to list prompts from {self.server_name}: {e}")
113
+ results["prompts"] = {"error": str(e)}
114
+
115
+ # List resources
116
+ if inspect_resources:
117
+ try:
118
+ resources = await self._list_resources(session)
119
+ results["resources"] = resources
120
+ except Exception as e:
121
+ logger.warning(f"Failed to list resources from {self.server_name}: {e}")
122
+ results["resources"] = {"error": str(e)}
123
+
124
+ return self._format_results(results, resource_type)
125
+
126
+ async def _list_tools(self, session: aiohttp.ClientSession) -> Dict[str, Any]:
127
+ """List available tools from the MCP server."""
128
+ request = {
129
+ "jsonrpc": "2.0",
130
+ "id": f"list_tools_{int(time.time())}",
131
+ "method": "tools/list",
132
+ "params": {}
133
+ }
134
+
135
+ headers = {"Content-Type": "application/json", **self.server_headers}
136
+
137
+ async with session.post(self.server_url, json=request, headers=headers) as response:
138
+ if response.status != 200:
139
+ raise Exception(f"HTTP {response.status}: {await response.text()}")
140
+
141
+ data = await response.json()
142
+
143
+ if "error" in data:
144
+ raise Exception(f"MCP Error: {data['error']}")
145
+
146
+ return data.get("result", {})
147
+
148
+ async def _list_prompts(self, session: aiohttp.ClientSession) -> Dict[str, Any]:
149
+ """List available prompts from the MCP server."""
150
+ request = {
151
+ "jsonrpc": "2.0",
152
+ "id": f"list_prompts_{int(time.time())}",
153
+ "method": "prompts/list",
154
+ "params": {}
155
+ }
156
+
157
+ headers = {"Content-Type": "application/json", **self.server_headers}
158
+
159
+ async with session.post(self.server_url, json=request, headers=headers) as response:
160
+ if response.status != 200:
161
+ raise Exception(f"HTTP {response.status}: {await response.text()}")
162
+
163
+ data = await response.json()
164
+
165
+ if "error" in data:
166
+ raise Exception(f"MCP Error: {data['error']}")
167
+
168
+ return data.get("result", {})
169
+
170
+ async def _list_resources(self, session: aiohttp.ClientSession) -> Dict[str, Any]:
171
+ """List available resources from the MCP server."""
172
+ request = {
173
+ "jsonrpc": "2.0",
174
+ "id": f"list_resources_{int(time.time())}",
175
+ "method": "resources/list",
176
+ "params": {}
177
+ }
178
+
179
+ headers = {"Content-Type": "application/json", **self.server_headers}
180
+
181
+ async with session.post(self.server_url, json=request, headers=headers) as response:
182
+ if response.status != 200:
183
+ raise Exception(f"HTTP {response.status}: {await response.text()}")
184
+
185
+ data = await response.json()
186
+
187
+ if "error" in data:
188
+ raise Exception(f"MCP Error: {data['error']}")
189
+
190
+ return data.get("result", {})
191
+
192
+ def _format_results(self, results: Dict[str, Any], resource_type: str) -> str:
193
+ """Format the inspection results for display."""
194
+ output_lines = [f"=== MCP Server Inspection: {self.server_name} ==="]
195
+ output_lines.append(f"Server URL: {self.server_url}")
196
+ output_lines.append("")
197
+
198
+ # Format tools
199
+ if "tools" in results:
200
+ if "error" in results["tools"]:
201
+ output_lines.append(f"❌ TOOLS: Error - {results['tools']['error']}")
202
+ else:
203
+ tools = results["tools"].get("tools", [])
204
+ output_lines.append(f"🔧 TOOLS ({len(tools)} available):")
205
+ if tools:
206
+ for tool in tools:
207
+ name = tool.get("name", "Unknown")
208
+ desc = tool.get("description", "No description")
209
+ output_lines.append(f" • {name}: {desc}")
210
+ else:
211
+ output_lines.append(" (No tools available)")
212
+ output_lines.append("")
213
+
214
+ # Format prompts
215
+ if "prompts" in results:
216
+ if "error" in results["prompts"]:
217
+ output_lines.append(f"❌ PROMPTS: Error - {results['prompts']['error']}")
218
+ else:
219
+ prompts = results["prompts"].get("prompts", [])
220
+ output_lines.append(f"💬 PROMPTS ({len(prompts)} available):")
221
+ if prompts:
222
+ for prompt in prompts:
223
+ name = prompt.get("name", "Unknown")
224
+ desc = prompt.get("description", "No description")
225
+ output_lines.append(f" • {name}: {desc}")
226
+ else:
227
+ output_lines.append(" (No prompts available)")
228
+ output_lines.append("")
229
+
230
+ # Format resources
231
+ if "resources" in results:
232
+ if "error" in results["resources"]:
233
+ output_lines.append(f"❌ RESOURCES: Error - {results['resources']['error']}")
234
+ else:
235
+ resources = results["resources"].get("resources", [])
236
+ output_lines.append(f"📁 RESOURCES ({len(resources)} available):")
237
+ if resources:
238
+ for resource in resources:
239
+ uri = resource.get("uri", "Unknown")
240
+ name = resource.get("name", uri)
241
+ desc = resource.get("description", "No description")
242
+ output_lines.append(f" • {name}: {desc}")
243
+ output_lines.append(f" URI: {uri}")
244
+ else:
245
+ output_lines.append(" (No resources available)")
246
+ output_lines.append("")
247
+
248
+ return "\n".join(output_lines)
@@ -3,7 +3,7 @@ from logging import getLogger
3
3
  from typing import Any, Type, Literal, Optional, Union, List
4
4
 
5
5
  from langchain_core.tools import BaseTool
6
- from pydantic import BaseModel, Field, create_model, EmailStr, constr
6
+ from pydantic import BaseModel, Field, create_model, EmailStr, constr, ConfigDict
7
7
 
8
8
  from ...tools.utils import TOOLKIT_SPLITTER
9
9
 
@@ -15,10 +15,60 @@ class McpServerTool(BaseTool):
15
15
  description: str
16
16
  args_schema: Optional[Type[BaseModel]] = None
17
17
  return_type: str = "str"
18
- client: Any
18
+ client: Any = Field(default=None, exclude=True) # Exclude from serialization
19
19
  server: str
20
20
  tool_timeout_sec: int = 60
21
21
 
22
+ model_config = ConfigDict(arbitrary_types_allowed=True)
23
+
24
+ def __getstate__(self):
25
+ """Custom serialization to exclude non-serializable objects."""
26
+ state = self.__dict__.copy()
27
+ # Remove the client since it contains threading objects that can't be pickled
28
+ state['client'] = None
29
+ # Store args_schema as a schema dict instead of the dynamic class
30
+ if hasattr(self, 'args_schema') and self.args_schema is not None:
31
+ # Convert the Pydantic model back to schema dict for pickling
32
+ try:
33
+ state['_args_schema_dict'] = self.args_schema.model_json_schema()
34
+ state['args_schema'] = None
35
+ except Exception as e:
36
+ logger.warning(f"Failed to serialize args_schema: {e}")
37
+ # If conversion fails, just remove it
38
+ state['args_schema'] = None
39
+ state['_args_schema_dict'] = {}
40
+ return state
41
+
42
+ def __setstate__(self, state):
43
+ """Custom deserialization to handle missing objects."""
44
+ # Restore the args_schema from the stored schema dict
45
+ args_schema_dict = state.pop('_args_schema_dict', {})
46
+
47
+ # Initialize required Pydantic internal attributes
48
+ if '__pydantic_fields_set__' not in state:
49
+ state['__pydantic_fields_set__'] = set(state.keys())
50
+ if '__pydantic_extra__' not in state:
51
+ state['__pydantic_extra__'] = None
52
+ if '__pydantic_private__' not in state:
53
+ state['__pydantic_private__'] = None
54
+
55
+ # Directly update the object's __dict__ to bypass Pydantic validation
56
+ self.__dict__.update(state)
57
+
58
+ # Recreate the args_schema from the stored dict if available
59
+ if args_schema_dict:
60
+ try:
61
+ recreated_schema = self.create_pydantic_model_from_schema(args_schema_dict)
62
+ self.__dict__['args_schema'] = recreated_schema
63
+ except Exception as e:
64
+ logger.warning(f"Failed to recreate args_schema: {e}")
65
+ self.__dict__['args_schema'] = None
66
+ else:
67
+ self.__dict__['args_schema'] = None
68
+
69
+ # Note: client will be None after unpickling
70
+ # The toolkit should reinitialize the client when needed
71
+
22
72
 
23
73
  @staticmethod
24
74
  def create_pydantic_model_from_schema(schema: dict, model_name: str = "ArgsSchema"):
@@ -868,10 +868,24 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
868
868
  label = f"{'🔒 ' if is_secret else ''}{'*' if is_required else ''}{field_name.replace('_', ' ').title()}"
869
869
 
870
870
  if field_type == 'string':
871
- if is_secret:
871
+ # Check if this is an enum field
872
+ if field_schema.get('enum'):
873
+ # Dropdown for enum values
874
+ options = field_schema['enum']
875
+ default_index = 0
876
+ if default_value and str(default_value) in options:
877
+ default_index = options.index(str(default_value))
878
+ toolkit_config_values[field_name] = st.selectbox(
879
+ label,
880
+ options=options,
881
+ index=default_index,
882
+ help=field_description,
883
+ key=f"config_{field_name}_{selected_toolkit_idx}"
884
+ )
885
+ elif is_secret:
872
886
  toolkit_config_values[field_name] = st.text_input(
873
887
  label,
874
- value=str(default_value) if default_value else '',
888
+ value=str(default_value) if default_value else '',
875
889
  help=field_description,
876
890
  type="password",
877
891
  key=f"config_{field_name}_{selected_toolkit_idx}"
@@ -879,7 +893,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
879
893
  else:
880
894
  toolkit_config_values[field_name] = st.text_input(
881
895
  label,
882
- value=str(default_value) if default_value else '',
896
+ value=str(default_value) if default_value else '',
883
897
  help=field_description,
884
898
  key=f"config_{field_name}_{selected_toolkit_idx}"
885
899
  )
@@ -971,6 +985,23 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
971
985
  key=f"config_{field_name}_{selected_toolkit_idx}"
972
986
  )
973
987
  toolkit_config_values[field_name] = [line.strip() for line in array_input.split('\n') if line.strip()]
988
+ elif field_type == 'object':
989
+ # Handle object/dict types (like headers)
990
+ obj_input = st.text_area(
991
+ f"{label} (JSON object)",
992
+ value=json.dumps(default_value) if isinstance(default_value, dict) else str(default_value) if default_value else '',
993
+ help=f"{field_description} - Enter as JSON object, e.g. {{\"Authorization\": \"Bearer token\"}}",
994
+ placeholder='{"key": "value"}',
995
+ key=f"config_{field_name}_{selected_toolkit_idx}"
996
+ )
997
+ try:
998
+ if obj_input.strip():
999
+ toolkit_config_values[field_name] = json.loads(obj_input)
1000
+ else:
1001
+ toolkit_config_values[field_name] = None
1002
+ except json.JSONDecodeError as e:
1003
+ st.error(f"Invalid JSON format for {field_name}: {e}")
1004
+ toolkit_config_values[field_name] = None
974
1005
  else:
975
1006
  st.info("This toolkit doesn't require additional configuration.")
976
1007
 
@@ -46,11 +46,14 @@ def instantiate_toolkit_with_client(toolkit_config: Dict[str, Any],
46
46
  # Log the configuration being used
47
47
  logger.info(f"Instantiating toolkit {toolkit_name} with LLM client")
48
48
  logger.debug(f"Toolkit {toolkit_name} configuration: {toolkit_config}")
49
-
49
+
50
+ # Use toolkit type from config, or fall back to lowercase toolkit name
51
+ toolkit_type = toolkit_config.get('type', toolkit_name.lower())
52
+
50
53
  # Create a tool configuration dict with required fields
51
54
  tool_config = {
52
55
  'id': toolkit_config.get('id', random.randint(1, 1000000)),
53
- 'type': toolkit_config.get('type', toolkit_name.lower()),
56
+ 'type': toolkit_config.get('type', toolkit_type),
54
57
  'settings': settings,
55
58
  'toolkit_name': toolkit_name
56
59
  }
@@ -131,6 +131,17 @@ def get_tools(tools_list, alita, llm, store: Optional[BaseStore] = None, *args,
131
131
  logger.error(f"Error getting ADO repos tools: {e}")
132
132
  continue
133
133
 
134
+ # Handle MCP toolkit (located in runtime/toolkits)
135
+ if tool_type == 'mcp':
136
+ try:
137
+ from alita_sdk.runtime.toolkits.mcp import get_tools as mcp_get_tools
138
+ tools.extend(mcp_get_tools(tool, alita, llm))
139
+ logger.debug(f"Successfully loaded MCP tools")
140
+ except Exception as e:
141
+ logger.error(f"Error getting MCP tools: {e}")
142
+ raise ToolException(f"Error getting MCP tools: {e}")
143
+ continue
144
+
134
145
  # Handle standard tools
135
146
  if tool_type in AVAILABLE_TOOLS and 'get_tools' in AVAILABLE_TOOLS[tool_type]:
136
147
  try:
@@ -6,7 +6,7 @@ from langchain_core.prompts import ChatPromptTemplate
6
6
  from langchain.text_splitter import TokenTextSplitter
7
7
 
8
8
  from typing import Optional, List
9
- from langchain_core.pydantic_v1 import BaseModel
9
+ from pydantic import BaseModel
10
10
  from ..utils import tiktoken_length
11
11
 
12
12
  logger = getLogger(__name__)
@@ -1,16 +1,10 @@
1
- from typing import Dict, List, Literal, Optional
1
+ from typing import List, Literal, Optional
2
2
 
3
3
  from langchain_core.tools import BaseTool, BaseToolkit
4
4
  from pydantic import BaseModel, ConfigDict, Field, create_model
5
5
 
6
6
  from ..base.tool import BaseAction
7
- from .api_wrapper import (
8
- FigmaApiWrapper,
9
- GLOBAL_LIMIT,
10
- DEFAULT_FIGMA_IMAGES_PROMPT,
11
- DEFAULT_FIGMA_SUMMARY_PROMPT,
12
- DEFAULT_NUMBER_OF_THREADS,
13
- )
7
+ from .api_wrapper import FigmaApiWrapper, GLOBAL_LIMIT
14
8
  from ..elitea_base import filter_missconfigured_index_tools
15
9
  from ..utils import clean_string, TOOLKIT_SPLITTER, get_max_toolkit_length
16
10
  from ...configurations.figma import FigmaConfiguration
@@ -34,14 +28,7 @@ def get_tools(tool):
34
28
  collection_name=str(tool['toolkit_name']),
35
29
  doctype='doc',
36
30
  embedding_model=tool['settings'].get('embedding_model'),
37
- vectorstore_type="PGVector",
38
- # figma summary/image prompt settings (toolkit-level)
39
- # TODO disabled until new requirements
40
- # apply_images_prompt=tool["settings"].get("apply_images_prompt"),
41
- # images_prompt=tool["settings"].get("images_prompt"),
42
- # apply_summary_prompt=tool["settings"].get("apply_summary_prompt"),
43
- # summary_prompt=tool["settings"].get("summary_prompt"),
44
- # number_of_threads=tool["settings"].get("number_of_threads"),
31
+ vectorstore_type="PGVector"
45
32
  )
46
33
  .get_tools()
47
34
  )
@@ -60,39 +47,6 @@ class FigmaToolkit(BaseToolkit):
60
47
  FigmaToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
61
48
  return create_model(
62
49
  name,
63
- # TODO disabled until new requirements
64
- # apply_images_prompt=(Optional[bool], Field(
65
- # description="Enable advanced image processing instructions for Figma image nodes.",
66
- # default=True,
67
- # )),
68
- # images_prompt=(Optional[Dict[str, str]], Field(
69
- # description=(
70
- # "Instruction for how to analyze image-based nodes "
71
- # "(screenshots, diagrams, etc.) during Figma file retrieving. "
72
- # "Must contain a single 'prompt' key with the text."
73
- # ),
74
- # default=DEFAULT_FIGMA_IMAGES_PROMPT,
75
- # )),
76
- # apply_summary_prompt=(Optional[bool], Field(
77
- # description="Enable LLM-based summarization over loaded Figma data.",
78
- # default=True,
79
- # )),
80
- # summary_prompt=(Optional[Dict[str, str]], Field(
81
- # description=(
82
- # "Instruction for the LLM on how to summarize loaded Figma data. "
83
- # "Must contain a single 'prompt' key with the text."
84
- # ),
85
- # default=DEFAULT_FIGMA_SUMMARY_PROMPT,
86
- # )),
87
- number_of_threads=(Optional[int], Field(
88
- description=(
89
- "Number of worker threads to use when downloading and processing Figma images. "
90
- f"Valid values are from 1 to 5. Default is {DEFAULT_NUMBER_OF_THREADS}."
91
- ),
92
- default=DEFAULT_NUMBER_OF_THREADS,
93
- ge=1,
94
- le=5,
95
- )),
96
50
  global_limit=(Optional[int], Field(description="Global limit", default=GLOBAL_LIMIT)),
97
51
  global_regexp=(Optional[str], Field(description="Global regex pattern", default=None)),
98
52
  selected_tools=(