mseep-lightfast-mcp 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. common/__init__.py +21 -0
  2. common/types.py +182 -0
  3. lightfast_mcp/__init__.py +50 -0
  4. lightfast_mcp/core/__init__.py +14 -0
  5. lightfast_mcp/core/base_server.py +205 -0
  6. lightfast_mcp/exceptions.py +55 -0
  7. lightfast_mcp/servers/__init__.py +1 -0
  8. lightfast_mcp/servers/blender/__init__.py +5 -0
  9. lightfast_mcp/servers/blender/server.py +358 -0
  10. lightfast_mcp/servers/blender_mcp_server.py +82 -0
  11. lightfast_mcp/servers/mock/__init__.py +5 -0
  12. lightfast_mcp/servers/mock/server.py +101 -0
  13. lightfast_mcp/servers/mock/tools.py +161 -0
  14. lightfast_mcp/servers/mock_server.py +78 -0
  15. lightfast_mcp/utils/__init__.py +1 -0
  16. lightfast_mcp/utils/logging_utils.py +69 -0
  17. mseep_lightfast_mcp-0.0.1.dist-info/METADATA +36 -0
  18. mseep_lightfast_mcp-0.0.1.dist-info/RECORD +43 -0
  19. mseep_lightfast_mcp-0.0.1.dist-info/WHEEL +5 -0
  20. mseep_lightfast_mcp-0.0.1.dist-info/entry_points.txt +7 -0
  21. mseep_lightfast_mcp-0.0.1.dist-info/licenses/LICENSE +21 -0
  22. mseep_lightfast_mcp-0.0.1.dist-info/top_level.txt +3 -0
  23. tools/__init__.py +46 -0
  24. tools/ai/__init__.py +8 -0
  25. tools/ai/conversation_cli.py +345 -0
  26. tools/ai/conversation_client.py +399 -0
  27. tools/ai/conversation_session.py +342 -0
  28. tools/ai/providers/__init__.py +11 -0
  29. tools/ai/providers/base_provider.py +64 -0
  30. tools/ai/providers/claude_provider.py +200 -0
  31. tools/ai/providers/openai_provider.py +204 -0
  32. tools/ai/tool_executor.py +257 -0
  33. tools/common/__init__.py +99 -0
  34. tools/common/async_utils.py +419 -0
  35. tools/common/errors.py +222 -0
  36. tools/common/logging.py +252 -0
  37. tools/common/types.py +130 -0
  38. tools/orchestration/__init__.py +15 -0
  39. tools/orchestration/cli.py +320 -0
  40. tools/orchestration/config_loader.py +348 -0
  41. tools/orchestration/server_orchestrator.py +466 -0
  42. tools/orchestration/server_registry.py +187 -0
  43. tools/orchestration/server_selector.py +242 -0
@@ -0,0 +1,342 @@
1
+ """Conversation session management for AI interactions."""
2
+
3
+ import time
4
+ from typing import Any, Dict, List
5
+
6
+ import mcp.types as mcp_types
7
+
8
+ from tools.common import (
9
+ ConversationError,
10
+ ConversationStep,
11
+ OperationStatus,
12
+ Result,
13
+ ToolResult,
14
+ get_logger,
15
+ with_correlation_id,
16
+ with_operation_context,
17
+ )
18
+
19
+ from .providers.base_provider import BaseAIProvider
20
+ from .tool_executor import ToolExecutor
21
+
22
+ logger = get_logger("ConversationSession")
23
+
24
+
25
+ class ConversationSession:
26
+ """Manages a single conversation session with AI and tool execution."""
27
+
28
+ def __init__(
29
+ self,
30
+ session_id: str,
31
+ max_steps: int,
32
+ ai_provider: BaseAIProvider,
33
+ tool_executor: ToolExecutor,
34
+ available_tools: Dict[str, tuple[mcp_types.Tool, str]],
35
+ ):
36
+ """Initialize a conversation session."""
37
+ self.session_id = session_id
38
+ self.max_steps = max_steps
39
+ self.ai_provider = ai_provider
40
+ self.tool_executor = tool_executor
41
+ self.available_tools = available_tools
42
+
43
+ # Conversation state
44
+ self.messages: List[Dict[str, Any]] = []
45
+ self.steps: List[ConversationStep] = []
46
+ self.current_step_number = 0
47
+ self.is_complete = False
48
+
49
+ logger.info(f"Created conversation session {session_id}")
50
+
51
+ @with_correlation_id
52
+ @with_operation_context(operation="process_message")
53
+ async def process_message(self, message: str) -> Result[List[ConversationStep]]:
54
+ """Process a user message and generate response steps."""
55
+ if self.is_complete:
56
+ return Result(
57
+ status=OperationStatus.FAILED,
58
+ error="Conversation session is already complete",
59
+ error_code="SESSION_COMPLETE",
60
+ )
61
+
62
+ # Add user message to conversation
63
+ self.messages.append({"role": "user", "content": message})
64
+
65
+ logger.info(
66
+ f"Processing message in session {self.session_id}",
67
+ session_id=self.session_id,
68
+ message_length=len(message),
69
+ )
70
+
71
+ new_steps: List[ConversationStep] = []
72
+
73
+ try:
74
+ # Generate steps until completion or max steps reached
75
+ for step_num in range(self.current_step_number, self.max_steps):
76
+ if self.is_complete:
77
+ break
78
+
79
+ step_result = await self._generate_step(step_num)
80
+ if not step_result.is_success:
81
+ return Result(
82
+ status=OperationStatus.FAILED,
83
+ error=f"Failed to generate step {step_num}: {step_result.error}",
84
+ error_code="STEP_GENERATION_FAILED",
85
+ )
86
+
87
+ step = step_result.data
88
+ if step is None:
89
+ return Result(
90
+ status=OperationStatus.FAILED,
91
+ error=f"Step {step_num} returned None data",
92
+ error_code="NULL_STEP_DATA",
93
+ )
94
+
95
+ self.steps.append(step)
96
+ new_steps.append(step)
97
+ self.current_step_number += 1
98
+
99
+ # Update conversation messages
100
+ await self._update_conversation_messages(step)
101
+
102
+ # Check if conversation should continue
103
+ if not step.tool_calls or step.finish_reason == "stop":
104
+ self.is_complete = True
105
+ break
106
+
107
+ # Mark as complete if we've reached max steps
108
+ if self.current_step_number >= self.max_steps:
109
+ self.is_complete = True
110
+
111
+ logger.info(
112
+ f"Processed message with {len(new_steps)} steps",
113
+ session_id=self.session_id,
114
+ steps_generated=len(new_steps),
115
+ is_complete=self.is_complete,
116
+ )
117
+
118
+ return Result(status=OperationStatus.SUCCESS, data=new_steps)
119
+
120
+ except Exception as e:
121
+ error = ConversationError(
122
+ f"Error processing message: {e}",
123
+ session_id=self.session_id,
124
+ step_number=self.current_step_number,
125
+ cause=e,
126
+ )
127
+ logger.error("Message processing failed", error=error)
128
+ return Result(
129
+ status=OperationStatus.FAILED,
130
+ error=str(error),
131
+ error_code=error.error_code,
132
+ )
133
+
134
+ async def _generate_step(self, step_number: int) -> Result[ConversationStep]:
135
+ """Generate a single conversation step."""
136
+ start_time = time.time()
137
+
138
+ try:
139
+ # Generate AI response
140
+ step_result = await self.ai_provider.generate_step(
141
+ messages=self.messages,
142
+ available_tools=self.available_tools,
143
+ step_number=step_number,
144
+ )
145
+
146
+ if not step_result.is_success:
147
+ return step_result
148
+
149
+ step = step_result.data
150
+ if step is None:
151
+ return Result(
152
+ status=OperationStatus.FAILED,
153
+ error=f"AI provider returned None step data for step {step_number}",
154
+ error_code="NULL_AI_STEP_DATA",
155
+ )
156
+
157
+ # Execute any tool calls
158
+ if step.tool_calls:
159
+ logger.debug(
160
+ f"Executing {len(step.tool_calls)} tool calls",
161
+ session_id=self.session_id,
162
+ step_number=step_number,
163
+ )
164
+
165
+ tool_results = await self.tool_executor.execute_tools_concurrently(
166
+ step.tool_calls
167
+ )
168
+
169
+ # Add results to step
170
+ for result in tool_results:
171
+ step.add_tool_result(result)
172
+
173
+ # Calculate step duration
174
+ step.duration_ms = (time.time() - start_time) * 1000
175
+
176
+ logger.debug(
177
+ f"Generated step {step_number}",
178
+ session_id=self.session_id,
179
+ step_number=step_number,
180
+ has_text=bool(step.text),
181
+ tool_calls=len(step.tool_calls),
182
+ tool_results=len(step.tool_results),
183
+ duration_ms=step.duration_ms,
184
+ )
185
+
186
+ return Result(status=OperationStatus.SUCCESS, data=step)
187
+
188
+ except Exception as e:
189
+ error = ConversationError(
190
+ f"Error generating step {step_number}: {e}",
191
+ session_id=self.session_id,
192
+ step_number=step_number,
193
+ cause=e,
194
+ )
195
+ logger.error("Step generation failed", error=error)
196
+ return Result(
197
+ status=OperationStatus.FAILED,
198
+ error=str(error),
199
+ error_code=error.error_code,
200
+ )
201
+
202
+ async def _update_conversation_messages(self, step: ConversationStep):
203
+ """Update conversation messages with step results."""
204
+ if self.ai_provider.provider_name == "claude":
205
+ await self._update_messages_claude_format(step)
206
+ elif self.ai_provider.provider_name == "openai":
207
+ await self._update_messages_openai_format(step)
208
+ else:
209
+ # Generic format
210
+ await self._update_messages_generic_format(step)
211
+
212
+ async def _update_messages_claude_format(self, step: ConversationStep):
213
+ """Update messages in Claude format."""
214
+ if step.tool_calls:
215
+ # Claude format: tool calls and results in assistant message content
216
+ content_blocks: List[Dict[str, Any]] = []
217
+
218
+ # Add text content if any
219
+ if step.text:
220
+ content_blocks.append({"type": "text", "text": step.text})
221
+
222
+ # Add tool use blocks
223
+ for tc in step.tool_calls:
224
+ tool_use_block = {
225
+ "type": "tool_use",
226
+ "id": tc.id,
227
+ "name": tc.tool_name,
228
+ "input": tc.arguments,
229
+ }
230
+ content_blocks.append(tool_use_block)
231
+
232
+ assistant_message = {
233
+ "role": "assistant",
234
+ "content": content_blocks,
235
+ }
236
+ self.messages.append(assistant_message)
237
+
238
+ # Add user message with tool results
239
+ tool_result_blocks: List[Dict[str, Any]] = []
240
+ for result in step.tool_results:
241
+ tool_result_blocks.append(
242
+ {
243
+ "type": "tool_result",
244
+ "tool_use_id": result.id,
245
+ "content": self._format_tool_result_content(result),
246
+ }
247
+ )
248
+
249
+ if tool_result_blocks:
250
+ user_message = {
251
+ "role": "user",
252
+ "content": tool_result_blocks,
253
+ }
254
+ self.messages.append(user_message)
255
+ else:
256
+ # Regular text response
257
+ self.messages.append({"role": "assistant", "content": step.text})
258
+
259
+ async def _update_messages_openai_format(self, step: ConversationStep):
260
+ """Update messages in OpenAI format."""
261
+ if step.tool_calls:
262
+ # OpenAI format: separate tool call and tool result messages
263
+ assistant_message = {
264
+ "role": "assistant",
265
+ "content": step.text or "",
266
+ "tool_calls": [
267
+ {
268
+ "id": tc.id,
269
+ "type": "function",
270
+ "function": {
271
+ "name": tc.tool_name,
272
+ "arguments": str(tc.arguments), # OpenAI expects string
273
+ },
274
+ }
275
+ for tc in step.tool_calls
276
+ ],
277
+ }
278
+ self.messages.append(assistant_message)
279
+
280
+ # Add tool result messages
281
+ for result in step.tool_results:
282
+ tool_message = {
283
+ "role": "tool",
284
+ "tool_call_id": result.id,
285
+ "content": self._format_tool_result_content(result),
286
+ }
287
+ self.messages.append(tool_message)
288
+ else:
289
+ # Regular text response
290
+ self.messages.append({"role": "assistant", "content": step.text})
291
+
292
+ async def _update_messages_generic_format(self, step: ConversationStep):
293
+ """Update messages in generic format."""
294
+ # Simple format for unknown providers
295
+ content = step.text or ""
296
+
297
+ if step.tool_calls:
298
+ content += f"\n\nTool calls executed: {len(step.tool_calls)}"
299
+ for result in step.tool_results:
300
+ if result.result:
301
+ content += f"\n- {result.tool_name}: {result.result}"
302
+ elif result.error:
303
+ content += f"\n- {result.tool_name}: Error - {result.error}"
304
+
305
+ self.messages.append({"role": "assistant", "content": content})
306
+
307
+ def _format_tool_result_content(self, result: ToolResult) -> str:
308
+ """Format tool result content for messages."""
309
+ if result.result is not None:
310
+ if isinstance(result.result, (dict, list)):
311
+ import json
312
+
313
+ return json.dumps(result.result)
314
+ else:
315
+ return str(result.result)
316
+ elif result.error:
317
+ return f"Error: {result.error}"
318
+ else:
319
+ return "No result"
320
+
321
+ def get_conversation_summary(self) -> Dict[str, Any]:
322
+ """Get a summary of the conversation session."""
323
+ total_tool_calls = sum(len(step.tool_calls) for step in self.steps)
324
+ successful_tool_calls = sum(
325
+ len([r for r in step.tool_results if not r.error]) for step in self.steps
326
+ )
327
+
328
+ return {
329
+ "session_id": self.session_id,
330
+ "steps": len(self.steps),
331
+ "messages": len(self.messages),
332
+ "total_tool_calls": total_tool_calls,
333
+ "successful_tool_calls": successful_tool_calls,
334
+ "is_complete": self.is_complete,
335
+ "max_steps": self.max_steps,
336
+ }
337
+
338
+ async def close(self):
339
+ """Close the conversation session and clean up resources."""
340
+ logger.info(f"Closing conversation session {self.session_id}")
341
+ self.is_complete = True
342
+ # Could add cleanup logic here if needed
@@ -0,0 +1,11 @@
1
+ """AI provider abstractions for different AI services."""
2
+
3
+ from .base_provider import BaseAIProvider
4
+ from .claude_provider import ClaudeProvider
5
+ from .openai_provider import OpenAIProvider
6
+
7
+ __all__ = [
8
+ "BaseAIProvider",
9
+ "ClaudeProvider",
10
+ "OpenAIProvider",
11
+ ]
@@ -0,0 +1,64 @@
1
+ """Base AI provider interface for different AI services."""
2
+
3
+ from abc import ABC, abstractmethod
4
+ from typing import Any, Dict, List
5
+
6
+ import mcp.types as mcp_types
7
+
8
+ from tools.common import ConversationStep, Result, ToolCall
9
+
10
+
11
+ class BaseAIProvider(ABC):
12
+ """Base interface for AI providers (Claude, OpenAI, etc.)."""
13
+
14
+ def __init__(self, api_key: str):
15
+ """Initialize the AI provider."""
16
+ self.api_key = api_key
17
+
18
+ @abstractmethod
19
+ async def generate_step(
20
+ self,
21
+ messages: List[Dict[str, Any]],
22
+ available_tools: Dict[str, tuple[mcp_types.Tool, str]],
23
+ step_number: int,
24
+ ) -> Result[ConversationStep]:
25
+ """Generate a single conversation step with potential tool calls."""
26
+ pass
27
+
28
+ @abstractmethod
29
+ def build_tools_context(
30
+ self, available_tools: Dict[str, tuple[mcp_types.Tool, str]]
31
+ ) -> str:
32
+ """Build a context description of available tools."""
33
+ pass
34
+
35
+ @abstractmethod
36
+ def format_tool_for_api(
37
+ self, mcp_tool: mcp_types.Tool, server_name: str
38
+ ) -> Dict[str, Any]:
39
+ """Convert MCP tool to provider-specific format."""
40
+ pass
41
+
42
+ @abstractmethod
43
+ def parse_tool_calls(self, response: Any) -> List[ToolCall]:
44
+ """Parse tool calls from provider response."""
45
+ pass
46
+
47
+ @abstractmethod
48
+ def format_messages_for_api(
49
+ self, messages: List[Dict[str, Any]]
50
+ ) -> List[Dict[str, Any]]:
51
+ """Format messages for the provider's API."""
52
+ pass
53
+
54
+ @property
55
+ @abstractmethod
56
+ def provider_name(self) -> str:
57
+ """Get the provider name."""
58
+ pass
59
+
60
+ @property
61
+ @abstractmethod
62
+ def default_model(self) -> str:
63
+ """Get the default model for this provider."""
64
+ pass
@@ -0,0 +1,200 @@
1
+ """Claude AI provider implementation."""
2
+
3
+ import json
4
+ from typing import Any, Dict, List
5
+
6
+ import anthropic
7
+ import mcp.types as mcp_types
8
+
9
+ from tools.common import (
10
+ ConversationStep,
11
+ OperationStatus,
12
+ Result,
13
+ ToolCall,
14
+ get_logger,
15
+ )
16
+
17
+ from .base_provider import BaseAIProvider
18
+
19
+ logger = get_logger("ClaudeProvider")
20
+
21
+
22
+ class ClaudeProvider(BaseAIProvider):
23
+ """Claude AI provider implementation."""
24
+
25
+ def __init__(self, api_key: str):
26
+ """Initialize Claude provider."""
27
+ super().__init__(api_key)
28
+ self.client = anthropic.AsyncAnthropic(api_key=api_key)
29
+
30
+ @property
31
+ def provider_name(self) -> str:
32
+ """Get the provider name."""
33
+ return "claude"
34
+
35
+ @property
36
+ def default_model(self) -> str:
37
+ """Get the default model for this provider."""
38
+ return "claude-3-5-sonnet-20241022"
39
+
40
+ async def generate_step(
41
+ self,
42
+ messages: List[Dict[str, Any]],
43
+ available_tools: Dict[str, tuple[mcp_types.Tool, str]],
44
+ step_number: int,
45
+ ) -> Result[ConversationStep]:
46
+ """Generate a single conversation step with potential tool calls."""
47
+ try:
48
+ # Build system prompt with tools context
49
+ tools_context = self.build_tools_context(available_tools)
50
+ system_prompt = f"""You are an AI assistant that can control multiple creative applications through MCP servers.
51
+
52
+ {tools_context}
53
+
54
+ You can use the available tools to interact with the connected servers. When you need to perform actions, use the appropriate tools. For conversational responses, respond normally with helpful information."""
55
+
56
+ # Build tools for Claude's native tool calling
57
+ claude_tools = []
58
+ for tool_name, (mcp_tool, server_name) in available_tools.items():
59
+ claude_tool = self.format_tool_for_api(mcp_tool, server_name)
60
+ claude_tools.append(claude_tool)
61
+
62
+ logger.debug(f"Making Claude API call with {len(claude_tools)} tools")
63
+
64
+ # Make API call with explicit parameters
65
+ if claude_tools:
66
+ response = await self.client.messages.create(
67
+ model=self.default_model,
68
+ max_tokens=4000,
69
+ system=system_prompt,
70
+ messages=self.format_messages_for_api(messages), # type: ignore
71
+ tools=claude_tools, # type: ignore
72
+ )
73
+ else:
74
+ response = await self.client.messages.create(
75
+ model=self.default_model,
76
+ max_tokens=4000,
77
+ system=system_prompt,
78
+ messages=self.format_messages_for_api(messages), # type: ignore
79
+ )
80
+
81
+ # Create conversation step
82
+ step = ConversationStep(step_number=step_number)
83
+
84
+ # Parse Claude's response
85
+ if response.content:
86
+ for content_block in response.content:
87
+ if content_block.type == "text":
88
+ step.text = content_block.text
89
+ elif content_block.type == "tool_use":
90
+ # Convert Claude's tool call to our format
91
+ arguments = content_block.input
92
+ if not isinstance(arguments, dict):
93
+ arguments = {}
94
+ tool_call = ToolCall(
95
+ id=content_block.id,
96
+ tool_name=content_block.name,
97
+ arguments=arguments,
98
+ )
99
+ step.add_tool_call(tool_call)
100
+
101
+ # Set finish reason if no tool calls and no text
102
+ if not step.tool_calls and not step.text:
103
+ step.finish_reason = "stop"
104
+
105
+ return Result(status=OperationStatus.SUCCESS, data=step)
106
+
107
+ except Exception as e:
108
+ logger.error("Claude API call failed", error=e)
109
+ return Result(
110
+ status=OperationStatus.FAILED,
111
+ error=f"Claude API error: {e}",
112
+ error_code="CLAUDE_API_ERROR",
113
+ )
114
+
115
+ def build_tools_context(
116
+ self, available_tools: Dict[str, tuple[mcp_types.Tool, str]]
117
+ ) -> str:
118
+ """Build a context description of available tools."""
119
+ if not available_tools:
120
+ return "No connected servers or tools available."
121
+
122
+ tools_desc = []
123
+ tools_by_server: Dict[str, List[mcp_types.Tool]] = {}
124
+
125
+ # Group tools by server
126
+ for tool_name, (mcp_tool, server_name) in available_tools.items():
127
+ if server_name not in tools_by_server:
128
+ tools_by_server[server_name] = []
129
+ tools_by_server[server_name].append(mcp_tool)
130
+
131
+ # Build description
132
+ for server_name, server_tools in tools_by_server.items():
133
+ tools_desc.append(f"**{server_name} Server**:")
134
+ for tool in server_tools:
135
+ description = tool.description or "No description available"
136
+ tools_desc.append(f" - {tool.name}: {description}")
137
+
138
+ return "Connected Servers and Available Tools:\n" + "\n".join(tools_desc)
139
+
140
+ def format_tool_for_api(
141
+ self, mcp_tool: mcp_types.Tool, server_name: str
142
+ ) -> Dict[str, Any]:
143
+ """Convert MCP tool to Claude tool format."""
144
+ return {
145
+ "name": mcp_tool.name,
146
+ "description": mcp_tool.description
147
+ or f"Call {mcp_tool.name} on {server_name} server",
148
+ "input_schema": mcp_tool.inputSchema,
149
+ }
150
+
151
+ def parse_tool_calls(self, response: Any) -> List[ToolCall]:
152
+ """Parse tool calls from Claude response."""
153
+ tool_calls = []
154
+
155
+ if hasattr(response, "content"):
156
+ for content_block in response.content:
157
+ if hasattr(content_block, "type") and content_block.type == "tool_use":
158
+ tool_call = ToolCall(
159
+ id=getattr(content_block, "id", ""),
160
+ tool_name=getattr(content_block, "name", ""),
161
+ arguments=getattr(content_block, "input", {}),
162
+ )
163
+ tool_calls.append(tool_call)
164
+
165
+ return tool_calls
166
+
167
+ def format_messages_for_api(
168
+ self, messages: List[Dict[str, Any]]
169
+ ) -> List[Dict[str, Any]]:
170
+ """Format messages for Claude's API."""
171
+ # Claude expects messages without system messages (those go in system parameter)
172
+ formatted_messages = []
173
+
174
+ for message in messages:
175
+ if message.get("role") != "system":
176
+ formatted_messages.append(message)
177
+
178
+ return formatted_messages
179
+
180
+ def format_tool_results_for_api(
181
+ self, tool_calls: List[ToolCall], tool_results: List[Any]
182
+ ) -> List[Dict[str, Any]]:
183
+ """Format tool results for Claude's API."""
184
+ # Claude expects tool results in user messages
185
+ tool_result_blocks = []
186
+
187
+ for tool_call, result in zip(tool_calls, tool_results):
188
+ tool_result_blocks.append(
189
+ {
190
+ "type": "tool_result",
191
+ "tool_use_id": tool_call.id,
192
+ "content": json.dumps(result.result)
193
+ if hasattr(result, "result") and result.result
194
+ else result.error
195
+ if hasattr(result, "error")
196
+ else "No result",
197
+ }
198
+ )
199
+
200
+ return tool_result_blocks