mseep-lightfast-mcp 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. common/__init__.py +21 -0
  2. common/types.py +182 -0
  3. lightfast_mcp/__init__.py +50 -0
  4. lightfast_mcp/core/__init__.py +14 -0
  5. lightfast_mcp/core/base_server.py +205 -0
  6. lightfast_mcp/exceptions.py +55 -0
  7. lightfast_mcp/servers/__init__.py +1 -0
  8. lightfast_mcp/servers/blender/__init__.py +5 -0
  9. lightfast_mcp/servers/blender/server.py +358 -0
  10. lightfast_mcp/servers/blender_mcp_server.py +82 -0
  11. lightfast_mcp/servers/mock/__init__.py +5 -0
  12. lightfast_mcp/servers/mock/server.py +101 -0
  13. lightfast_mcp/servers/mock/tools.py +161 -0
  14. lightfast_mcp/servers/mock_server.py +78 -0
  15. lightfast_mcp/utils/__init__.py +1 -0
  16. lightfast_mcp/utils/logging_utils.py +69 -0
  17. mseep_lightfast_mcp-0.0.1.dist-info/METADATA +36 -0
  18. mseep_lightfast_mcp-0.0.1.dist-info/RECORD +43 -0
  19. mseep_lightfast_mcp-0.0.1.dist-info/WHEEL +5 -0
  20. mseep_lightfast_mcp-0.0.1.dist-info/entry_points.txt +7 -0
  21. mseep_lightfast_mcp-0.0.1.dist-info/licenses/LICENSE +21 -0
  22. mseep_lightfast_mcp-0.0.1.dist-info/top_level.txt +3 -0
  23. tools/__init__.py +46 -0
  24. tools/ai/__init__.py +8 -0
  25. tools/ai/conversation_cli.py +345 -0
  26. tools/ai/conversation_client.py +399 -0
  27. tools/ai/conversation_session.py +342 -0
  28. tools/ai/providers/__init__.py +11 -0
  29. tools/ai/providers/base_provider.py +64 -0
  30. tools/ai/providers/claude_provider.py +200 -0
  31. tools/ai/providers/openai_provider.py +204 -0
  32. tools/ai/tool_executor.py +257 -0
  33. tools/common/__init__.py +99 -0
  34. tools/common/async_utils.py +419 -0
  35. tools/common/errors.py +222 -0
  36. tools/common/logging.py +252 -0
  37. tools/common/types.py +130 -0
  38. tools/orchestration/__init__.py +15 -0
  39. tools/orchestration/cli.py +320 -0
  40. tools/orchestration/config_loader.py +348 -0
  41. tools/orchestration/server_orchestrator.py +466 -0
  42. tools/orchestration/server_registry.py +187 -0
  43. tools/orchestration/server_selector.py +242 -0
@@ -0,0 +1,204 @@
1
+ """OpenAI AI provider implementation."""
2
+
3
+ import json
4
+ from typing import Any, Dict, List
5
+
6
+ import mcp.types as mcp_types
7
+ import openai
8
+
9
+ from tools.common import (
10
+ ConversationStep,
11
+ OperationStatus,
12
+ Result,
13
+ ToolCall,
14
+ get_logger,
15
+ )
16
+
17
+ from .base_provider import BaseAIProvider
18
+
19
+ logger = get_logger("OpenAIProvider")
20
+
21
+
22
+ class OpenAIProvider(BaseAIProvider):
23
+ """OpenAI AI provider implementation."""
24
+
25
+ def __init__(self, api_key: str):
26
+ """Initialize OpenAI provider."""
27
+ super().__init__(api_key)
28
+ self.client = openai.AsyncOpenAI(api_key=api_key)
29
+
30
+ @property
31
+ def provider_name(self) -> str:
32
+ """Get the provider name."""
33
+ return "openai"
34
+
35
+ @property
36
+ def default_model(self) -> str:
37
+ """Get the default model for this provider."""
38
+ return "gpt-4o"
39
+
40
+ async def generate_step(
41
+ self,
42
+ messages: List[Dict[str, Any]],
43
+ available_tools: Dict[str, tuple[mcp_types.Tool, str]],
44
+ step_number: int,
45
+ ) -> Result[ConversationStep]:
46
+ """Generate a single conversation step with potential tool calls."""
47
+ try:
48
+ # Build system prompt with tools context
49
+ tools_context = self.build_tools_context(available_tools)
50
+ system_prompt = f"""You are an AI assistant that can control multiple creative applications through MCP servers.
51
+
52
+ {tools_context}
53
+
54
+ You can use the available tools to interact with the connected servers. When you need to perform actions, use the appropriate tools. For conversational responses, respond normally with helpful information."""
55
+
56
+ # Format messages for OpenAI (includes system message in messages array)
57
+ formatted_messages = [
58
+ {"role": "system", "content": system_prompt}
59
+ ] + self.format_messages_for_api(messages)
60
+
61
+ # Build tools for OpenAI's function calling
62
+ openai_tools = []
63
+ for tool_name, (mcp_tool, server_name) in available_tools.items():
64
+ openai_tool = self.format_tool_for_api(mcp_tool, server_name)
65
+ openai_tools.append(openai_tool)
66
+
67
+ logger.debug(f"Making OpenAI API call with {len(openai_tools)} tools")
68
+
69
+ # Make API call with explicit parameters
70
+ if openai_tools:
71
+ response = await self.client.chat.completions.create(
72
+ model=self.default_model,
73
+ messages=formatted_messages, # type: ignore
74
+ max_tokens=4000,
75
+ tools=openai_tools, # type: ignore
76
+ tool_choice="auto",
77
+ )
78
+ else:
79
+ response = await self.client.chat.completions.create(
80
+ model=self.default_model,
81
+ messages=formatted_messages, # type: ignore
82
+ max_tokens=4000,
83
+ )
84
+
85
+ # Create conversation step
86
+ step = ConversationStep(step_number=step_number)
87
+ message = response.choices[0].message
88
+
89
+ # Parse the response
90
+ if message.content:
91
+ step.text = message.content
92
+
93
+ if message.tool_calls:
94
+ # Parse OpenAI tool calls
95
+ tool_calls = self.parse_tool_calls(message)
96
+ for tool_call in tool_calls:
97
+ step.add_tool_call(tool_call)
98
+
99
+ # Set finish reason if no tool calls and no text
100
+ if not step.tool_calls and not step.text:
101
+ step.finish_reason = "stop"
102
+
103
+ return Result(status=OperationStatus.SUCCESS, data=step)
104
+
105
+ except Exception as e:
106
+ logger.error("OpenAI API call failed", error=e)
107
+ return Result(
108
+ status=OperationStatus.FAILED,
109
+ error=f"OpenAI API error: {e}",
110
+ error_code="OPENAI_API_ERROR",
111
+ )
112
+
113
+ def build_tools_context(
114
+ self, available_tools: Dict[str, tuple[mcp_types.Tool, str]]
115
+ ) -> str:
116
+ """Build a context description of available tools."""
117
+ if not available_tools:
118
+ return "No connected servers or tools available."
119
+
120
+ tools_desc = []
121
+ tools_by_server: Dict[str, List[mcp_types.Tool]] = {}
122
+
123
+ # Group tools by server
124
+ for tool_name, (mcp_tool, server_name) in available_tools.items():
125
+ if server_name not in tools_by_server:
126
+ tools_by_server[server_name] = []
127
+ tools_by_server[server_name].append(mcp_tool)
128
+
129
+ # Build description
130
+ for server_name, server_tools in tools_by_server.items():
131
+ tools_desc.append(f"**{server_name} Server**:")
132
+ for tool in server_tools:
133
+ description = tool.description or "No description available"
134
+ tools_desc.append(f" - {tool.name}: {description}")
135
+
136
+ return "Connected Servers and Available Tools:\n" + "\n".join(tools_desc)
137
+
138
+ def format_tool_for_api(
139
+ self, mcp_tool: mcp_types.Tool, server_name: str
140
+ ) -> Dict[str, Any]:
141
+ """Convert MCP tool to OpenAI tool format."""
142
+ return {
143
+ "type": "function",
144
+ "function": {
145
+ "name": mcp_tool.name,
146
+ "description": mcp_tool.description
147
+ or f"Call {mcp_tool.name} on {server_name} server",
148
+ "parameters": mcp_tool.inputSchema,
149
+ },
150
+ }
151
+
152
+ def parse_tool_calls(self, message: Any) -> List[ToolCall]:
153
+ """Parse tool calls from OpenAI response."""
154
+ tool_calls = []
155
+
156
+ if hasattr(message, "tool_calls") and message.tool_calls:
157
+ for tool_call in message.tool_calls:
158
+ try:
159
+ arguments = json.loads(
160
+ tool_call.function.arguments
161
+ if hasattr(tool_call.function, "arguments")
162
+ else "{}"
163
+ )
164
+ except json.JSONDecodeError:
165
+ arguments = {}
166
+
167
+ parsed_call = ToolCall(
168
+ id=tool_call.id if hasattr(tool_call, "id") else "",
169
+ tool_name=tool_call.function.name
170
+ if hasattr(tool_call.function, "name")
171
+ else "",
172
+ arguments=arguments,
173
+ )
174
+ tool_calls.append(parsed_call)
175
+
176
+ return tool_calls
177
+
178
+ def format_messages_for_api(
179
+ self, messages: List[Dict[str, Any]]
180
+ ) -> List[Dict[str, Any]]:
181
+ """Format messages for OpenAI's API."""
182
+ # OpenAI accepts all message types in the messages array
183
+ return messages
184
+
185
+ def format_tool_results_for_api(
186
+ self, tool_calls: List[ToolCall], tool_results: List[Any]
187
+ ) -> List[Dict[str, Any]]:
188
+ """Format tool results for OpenAI's API."""
189
+ # OpenAI expects tool results as separate tool messages
190
+ tool_messages = []
191
+
192
+ for tool_call, result in zip(tool_calls, tool_results):
193
+ tool_message = {
194
+ "role": "tool",
195
+ "tool_call_id": tool_call.id,
196
+ "content": json.dumps(result.result)
197
+ if hasattr(result, "result") and result.result
198
+ else result.error
199
+ if hasattr(result, "error")
200
+ else "No result",
201
+ }
202
+ tool_messages.append(tool_message)
203
+
204
+ return tool_messages
@@ -0,0 +1,257 @@
1
+ """Tool executor for handling MCP tool calls with connection pooling."""
2
+
3
+ import asyncio
4
+ import json
5
+ import time
6
+ from typing import Any, Dict, List, Optional, Tuple
7
+
8
+ import mcp.types as mcp_types
9
+
10
+ from tools.common import (
11
+ ToolCall,
12
+ ToolExecutionError,
13
+ ToolResult,
14
+ ToolTimeoutError,
15
+ get_logger,
16
+ run_concurrent_operations,
17
+ with_correlation_id,
18
+ )
19
+ from tools.common.async_utils import ConnectionPool
20
+
21
+ logger = get_logger("ToolExecutor")
22
+
23
+
24
+ class ToolExecutor:
25
+ """Handles execution of MCP tool calls with connection pooling and concurrency."""
26
+
27
+ def __init__(self, max_concurrent: int = 5, tool_timeout: float = 30.0):
28
+ """Initialize the tool executor."""
29
+ self.max_concurrent = max_concurrent
30
+ self.tool_timeout = tool_timeout
31
+ self.available_tools: Dict[str, Tuple[mcp_types.Tool, str]] = {}
32
+ self.connection_pool: Optional[ConnectionPool] = None
33
+
34
+ async def update_tools(
35
+ self,
36
+ available_tools: Dict[str, Tuple[mcp_types.Tool, str]],
37
+ connection_pool: ConnectionPool,
38
+ ):
39
+ """Update the available tools and connection pool."""
40
+ self.available_tools = available_tools
41
+ self.connection_pool = connection_pool
42
+ logger.info(f"Updated tool executor with {len(available_tools)} tools")
43
+
44
+ @with_correlation_id
45
+ async def execute_tool(self, tool_call: ToolCall) -> ToolResult:
46
+ """Execute a single tool call."""
47
+ start_time = time.time()
48
+
49
+ # Check if tool exists
50
+ if tool_call.tool_name not in self.available_tools:
51
+ return ToolResult(
52
+ id=tool_call.id,
53
+ tool_name=tool_call.tool_name,
54
+ arguments=tool_call.arguments,
55
+ error=f"Tool {tool_call.tool_name} not found",
56
+ error_code="TOOL_NOT_FOUND",
57
+ duration_ms=(time.time() - start_time) * 1000,
58
+ )
59
+
60
+ mcp_tool, server_name = self.available_tools[tool_call.tool_name]
61
+
62
+ if not self.connection_pool:
63
+ return ToolResult(
64
+ id=tool_call.id,
65
+ tool_name=tool_call.tool_name,
66
+ arguments=tool_call.arguments,
67
+ error="Connection pool not available",
68
+ error_code="NO_CONNECTION_POOL",
69
+ duration_ms=(time.time() - start_time) * 1000,
70
+ )
71
+
72
+ try:
73
+ # Execute with timeout
74
+ result = await asyncio.wait_for(
75
+ self._execute_tool_with_connection(tool_call, server_name),
76
+ timeout=self.tool_timeout,
77
+ )
78
+
79
+ duration_ms = (time.time() - start_time) * 1000
80
+ result.duration_ms = duration_ms
81
+ result.server_name = server_name
82
+
83
+ logger.debug(
84
+ f"Tool {tool_call.tool_name} executed successfully",
85
+ tool_name=tool_call.tool_name,
86
+ server_name=server_name,
87
+ duration_ms=duration_ms,
88
+ )
89
+
90
+ return result
91
+
92
+ except asyncio.TimeoutError:
93
+ error = ToolTimeoutError(
94
+ f"Tool {tool_call.tool_name} timed out after {self.tool_timeout}s",
95
+ tool_name=tool_call.tool_name,
96
+ server_name=server_name,
97
+ )
98
+ logger.error("Tool execution timed out", error=error)
99
+ return ToolResult(
100
+ id=tool_call.id,
101
+ tool_name=tool_call.tool_name,
102
+ arguments=tool_call.arguments,
103
+ error=str(error),
104
+ error_code=error.error_code,
105
+ server_name=server_name,
106
+ duration_ms=(time.time() - start_time) * 1000,
107
+ )
108
+
109
+ except Exception as e:
110
+ execution_error = ToolExecutionError(
111
+ f"Error executing tool {tool_call.tool_name}: {e}",
112
+ tool_name=tool_call.tool_name,
113
+ server_name=server_name,
114
+ cause=e,
115
+ )
116
+ logger.error("Tool execution failed", error=execution_error)
117
+ return ToolResult(
118
+ id=tool_call.id,
119
+ tool_name=tool_call.tool_name,
120
+ arguments=tool_call.arguments,
121
+ error=str(execution_error),
122
+ error_code=execution_error.error_code,
123
+ server_name=server_name,
124
+ duration_ms=(time.time() - start_time) * 1000,
125
+ )
126
+
127
+ async def _execute_tool_with_connection(
128
+ self, tool_call: ToolCall, server_name: str
129
+ ) -> ToolResult:
130
+ """Execute tool using connection pool."""
131
+ if not self.connection_pool:
132
+ raise ToolExecutionError(
133
+ "Connection pool not available",
134
+ tool_name=tool_call.tool_name,
135
+ server_name=server_name,
136
+ )
137
+ async with self.connection_pool.get_connection(server_name) as client:
138
+ # Call the tool
139
+ mcp_result = await client.call_tool(
140
+ tool_call.tool_name, tool_call.arguments
141
+ )
142
+
143
+ # Convert MCP result to our format
144
+ result = self._convert_mcp_result(mcp_result, tool_call)
145
+ return result
146
+
147
+ def _convert_mcp_result(self, mcp_result: Any, tool_call: ToolCall) -> ToolResult:
148
+ """Convert MCP tool call result to our ToolResult format."""
149
+ result = ToolResult(
150
+ id=tool_call.id,
151
+ tool_name=tool_call.tool_name,
152
+ arguments=tool_call.arguments,
153
+ )
154
+
155
+ # Handle different response formats
156
+ if hasattr(mcp_result, "content"):
157
+ content = mcp_result.content
158
+ elif isinstance(mcp_result, list):
159
+ content = mcp_result
160
+ else:
161
+ content = [mcp_result] if mcp_result else []
162
+
163
+ if content and len(content) > 0:
164
+ # Handle different content types safely
165
+ first_content = content[0]
166
+ if hasattr(first_content, "text"):
167
+ try:
168
+ # Try to parse as JSON first
169
+ result.result = json.loads(first_content.text)
170
+ except json.JSONDecodeError:
171
+ # If not JSON, store as text
172
+ result.result = first_content.text
173
+ else:
174
+ # Handle other content types
175
+ result.result = {
176
+ "type": type(first_content).__name__,
177
+ "content": str(first_content),
178
+ }
179
+ else:
180
+ result.error = "No result returned"
181
+ result.error_code = "EMPTY_RESULT"
182
+
183
+ return result
184
+
185
+ async def execute_tools_concurrently(
186
+ self, tool_calls: List[ToolCall]
187
+ ) -> List[ToolResult]:
188
+ """Execute multiple tool calls concurrently."""
189
+ if not tool_calls:
190
+ return []
191
+
192
+ logger.info(f"Executing {len(tool_calls)} tools concurrently")
193
+
194
+ # Create operations for concurrent execution
195
+ async def execute_single_tool(tool_call: ToolCall) -> ToolResult:
196
+ return await self.execute_tool(tool_call)
197
+
198
+ operations = [
199
+ lambda tc=tool_call: execute_single_tool(tc) for tool_call in tool_calls
200
+ ]
201
+ operation_names = [f"tool_{tc.tool_name}" for tc in tool_calls]
202
+
203
+ # Execute concurrently
204
+ results = await run_concurrent_operations(
205
+ operations,
206
+ max_concurrent=self.max_concurrent,
207
+ operation_names=operation_names,
208
+ )
209
+
210
+ # Extract the actual ToolResult objects from Result wrappers
211
+ tool_results = []
212
+ for result in results:
213
+ if result.is_success:
214
+ tool_results.append(result.data)
215
+ else:
216
+ # Create error ToolResult for failed operations
217
+ tool_results.append(
218
+ ToolResult(
219
+ id="",
220
+ tool_name="unknown",
221
+ arguments={},
222
+ error=result.error,
223
+ error_code=result.error_code,
224
+ )
225
+ )
226
+
227
+ successful = sum(1 for tr in tool_results if not tr.error)
228
+ logger.info(
229
+ f"Tool execution completed: {successful}/{len(tool_calls)} successful"
230
+ )
231
+
232
+ return tool_results
233
+
234
+ def get_available_tools(self) -> Dict[str, str]:
235
+ """Get available tools mapped to their server names."""
236
+ return {
237
+ tool_name: server_name
238
+ for tool_name, (_, server_name) in self.available_tools.items()
239
+ }
240
+
241
+ def get_tool_info(self, tool_name: str) -> Optional[Tuple[mcp_types.Tool, str]]:
242
+ """Get information about a specific tool."""
243
+ return self.available_tools.get(tool_name)
244
+
245
+ def validate_tool_call(self, tool_call: ToolCall) -> Tuple[bool, Optional[str]]:
246
+ """Validate a tool call before execution."""
247
+ if tool_call.tool_name not in self.available_tools:
248
+ return False, f"Tool {tool_call.tool_name} not found"
249
+
250
+ if not tool_call.id:
251
+ return False, "Tool call ID is required"
252
+
253
+ if not isinstance(tool_call.arguments, dict):
254
+ return False, "Tool arguments must be a dictionary"
255
+
256
+ # Could add more validation here (e.g., schema validation)
257
+ return True, None
@@ -0,0 +1,99 @@
1
+ """Common utilities and types for the tools package."""
2
+
3
+ # Import shared types from common module
4
+ try:
5
+ from common import (
6
+ HealthStatus,
7
+ OperationStatus,
8
+ ServerInfo,
9
+ ServerState,
10
+ ToolCall,
11
+ ToolCallState,
12
+ ToolResult,
13
+ )
14
+ except ImportError:
15
+ # Fallback for development/testing
16
+ import sys
17
+ from pathlib import Path
18
+
19
+ sys.path.insert(0, str(Path(__file__).parent.parent.parent))
20
+ from common import (
21
+ HealthStatus,
22
+ OperationStatus,
23
+ ServerInfo,
24
+ ServerState,
25
+ ToolCall,
26
+ ToolCallState,
27
+ ToolResult,
28
+ )
29
+
30
+ from .async_utils import (
31
+ ConnectionPool,
32
+ RetryManager,
33
+ get_connection_pool,
34
+ run_concurrent_operations,
35
+ shutdown_connection_pool,
36
+ )
37
+ from .errors import (
38
+ AIProviderError,
39
+ ConfigurationError,
40
+ ConversationError,
41
+ LightfastMCPError,
42
+ ServerConnectionError,
43
+ ServerError,
44
+ ServerStartupError,
45
+ ToolExecutionError,
46
+ ToolNotFoundError,
47
+ ToolTimeoutError,
48
+ ValidationError,
49
+ )
50
+ from .logging import (
51
+ correlation_id,
52
+ get_logger,
53
+ operation_context,
54
+ with_correlation_id,
55
+ with_operation_context,
56
+ )
57
+ from .types import (
58
+ ConversationResult,
59
+ ConversationStep,
60
+ Result,
61
+ )
62
+
63
+ __all__ = [
64
+ # Types
65
+ "OperationStatus",
66
+ "ServerState",
67
+ "Result",
68
+ "ServerInfo",
69
+ "ConversationResult",
70
+ "ConversationStep",
71
+ "ToolCall",
72
+ "ToolCallState",
73
+ "ToolResult",
74
+ "HealthStatus",
75
+ # Errors
76
+ "LightfastMCPError",
77
+ "ConfigurationError",
78
+ "ConversationError",
79
+ "ServerError",
80
+ "ServerStartupError",
81
+ "ServerConnectionError",
82
+ "ToolExecutionError",
83
+ "ToolNotFoundError",
84
+ "ToolTimeoutError",
85
+ "AIProviderError",
86
+ "ValidationError",
87
+ # Logging
88
+ "get_logger",
89
+ "with_correlation_id",
90
+ "with_operation_context",
91
+ "correlation_id",
92
+ "operation_context",
93
+ # Async utilities
94
+ "ConnectionPool",
95
+ "RetryManager",
96
+ "get_connection_pool",
97
+ "run_concurrent_operations",
98
+ "shutdown_connection_pool",
99
+ ]