casual-mcp 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,72 @@
1
+ from typing import Any
2
+
3
+ import mcp
4
+ import ollama
5
+ from ollama import ChatResponse, Client, ResponseError
6
+
7
+ from casual_mcp.logging import get_logger
8
+ from casual_mcp.models.generation_error import GenerationError
9
+ from casual_mcp.models.messages import AssistantMessage, CasualMcpMessage
10
+ from casual_mcp.providers.abstract_provider import CasualMcpProvider
11
+
12
+ logger = get_logger("providers.ollama")
13
+
14
+ def convert_tools(mcp_tools: list[mcp.Tool]) -> list[ollama.Tool]:
15
+ raise Exception({"message": "under development"})
16
+
17
+
18
+ def convert_messages(messages: list[CasualMcpMessage]) -> list[ollama.Message]:
19
+ raise Exception({"message": "under development"})
20
+
21
+
22
+ def convert_tool_calls(response_tool_calls: list[ollama.Message.ToolCall]) -> list[dict[str, Any]]:
23
+ raise Exception({"message": "under development"})
24
+
25
+
26
+ class OllamaProvider(CasualMcpProvider):
27
+ def __init__(self, model: str, endpoint: str = None):
28
+ self.model = model
29
+ self.client = Client(
30
+ host=endpoint,
31
+ )
32
+
33
+ async def generate(
34
+ self,
35
+ messages: list[CasualMcpMessage],
36
+ tools: list[mcp.Tool]
37
+ ) -> CasualMcpMessage:
38
+ logger.info("Start Generating")
39
+ logger.debug(f"Model: {self.model}")
40
+
41
+ # Convert tools to Ollama format
42
+ converted_tools = convert_tools(tools)
43
+ logger.debug(f"Converted Tools: {converted_tools}")
44
+ logger.info(f"Adding {len(converted_tools)} tools")
45
+
46
+ # Convert Messages to Ollama format
47
+ converted_messages = convert_messages(messages)
48
+ logger.debug(f"Converted Messages: {converted_messages}")
49
+
50
+ # Call Ollama API
51
+ try:
52
+ response: ChatResponse = self.client.chat(
53
+ model=self.model, messages=converted_messages, stream=False, tools=converted_tools
54
+ )
55
+ except ResponseError as e:
56
+ if e.status_code == 404:
57
+ logger.info(f"Model {self.model} not found, pulling")
58
+ self.client.pull(self.model)
59
+ return self.generate(messages, tools)
60
+
61
+ raise e
62
+ except Exception as e:
63
+ logger.warning(f"Error in Generation: {e}")
64
+ raise GenerationError(str(e))
65
+
66
+ # Convert any tool calls
67
+ tool_calls = []
68
+ if hasattr(response.message, "tool_calls") and response.message.tool_calls:
69
+ logger.debug(f"Assistant requested {len(response.message.tool_calls)} tool calls")
70
+ tool_calls = convert_tool_calls(response.message.tool_calls)
71
+
72
+ return AssistantMessage(content=response.message.content, tool_calls=tool_calls)
@@ -0,0 +1,178 @@
1
+ from typing import Any
2
+
3
+ import mcp
4
+ from openai import OpenAI
5
+ from openai.types.chat import (
6
+ ChatCompletionAssistantMessageParam,
7
+ ChatCompletionMessageParam,
8
+ ChatCompletionMessageToolCall,
9
+ ChatCompletionMessageToolCallParam,
10
+ ChatCompletionSystemMessageParam,
11
+ ChatCompletionToolMessageParam,
12
+ ChatCompletionToolParam,
13
+ ChatCompletionUserMessageParam,
14
+ )
15
+
16
+ from casual_mcp.logging import get_logger
17
+ from casual_mcp.models.generation_error import GenerationError
18
+ from casual_mcp.models.messages import AssistantMessage, CasualMcpMessage
19
+ from casual_mcp.models.tool_call import AssistantToolCall, AssistantToolCallFunction
20
+ from casual_mcp.providers.abstract_provider import CasualMcpProvider
21
+
22
+ logger = get_logger("providers.openai")
23
+
24
+ MessageType = dict[str, Any]
25
+
26
+
27
+ def convert_tools(mcp_tools: list[mcp.Tool]) -> list[ChatCompletionToolParam]:
28
+ logger.info("Converting MCP tools to OpenAI format")
29
+ tools = []
30
+
31
+ # Convert all the tools
32
+ for i, mcp_tool in enumerate(mcp_tools):
33
+ if mcp_tool.name and mcp_tool.description:
34
+ # Convert the tool
35
+ tool = convert_tool(mcp_tool)
36
+ tools.append(tool)
37
+ else:
38
+ logger.warning(
39
+ f"Tool missing attributes: name = {mcp_tool.name}, description = {mcp_tool.description}" # noqa: E501
40
+ )
41
+
42
+ return tools
43
+
44
+
45
+ def convert_tool(mcp_tool: mcp.Tool) -> ChatCompletionToolParam | None:
46
+ logger.debug(f"Converting: {mcp_tool.name}")
47
+ tool = {
48
+ "type": "function",
49
+ "function": {
50
+ "name": mcp_tool.name,
51
+ "description": mcp_tool.description,
52
+ "parameters": {
53
+ "type": "object",
54
+ "properties": mcp_tool.inputSchema["properties"],
55
+ "required": mcp_tool.inputSchema.get("required", []),
56
+ },
57
+ },
58
+ }
59
+ return ChatCompletionToolParam(**tool)
60
+
61
+
62
+ def convert_messages(messages: list[CasualMcpMessage]) -> list[ChatCompletionMessageParam]:
63
+ if not messages:
64
+ return messages
65
+
66
+ logger.info("Converting messages to OpenAI format")
67
+
68
+ openai_messages: list[ChatCompletionMessageParam] = []
69
+ for msg in messages:
70
+ match msg.role:
71
+ case "assistant":
72
+ tool_calls = None
73
+ if msg.tool_calls:
74
+ tool_calls = []
75
+ for tool_call in msg.tool_calls:
76
+ function = {
77
+ "name": tool_call.function.name,
78
+ "arguments": tool_call.function.arguments
79
+ }
80
+ tool_calls.append(
81
+ ChatCompletionMessageToolCallParam(
82
+ id=tool_call.id,
83
+ type=tool_call.type,
84
+ function=function
85
+ )
86
+ )
87
+ openai_messages.append(
88
+ ChatCompletionAssistantMessageParam(
89
+ role="assistant", content=msg.content, tool_calls=tool_calls
90
+ )
91
+ )
92
+ case "system":
93
+ openai_messages.append(
94
+ ChatCompletionSystemMessageParam(role="system", content=msg.content)
95
+ )
96
+ case "tool":
97
+ openai_messages.append(
98
+ ChatCompletionToolMessageParam(
99
+ role="tool", content=msg.content, tool_call_id=msg.tool_call_id
100
+ )
101
+ )
102
+ case "user":
103
+ openai_messages.append(
104
+ ChatCompletionUserMessageParam(role="user", content=msg.content)
105
+ )
106
+
107
+ return openai_messages
108
+
109
+
110
+ def convert_tool_calls(
111
+ response_tool_calls: list[ChatCompletionMessageToolCall],
112
+ ) -> list[AssistantToolCall]:
113
+ tool_calls = []
114
+
115
+ for i, tool in enumerate(response_tool_calls):
116
+ logger.debug(f"Convert Tool: {tool}")
117
+
118
+ # Create the tool object in the format expected by client.py
119
+ tool_call = AssistantToolCall(
120
+ id=tool.id,
121
+ function=AssistantToolCallFunction(
122
+ name=tool.function.name,
123
+ type="function",
124
+ arguments=tool.function.arguments
125
+ )
126
+ )
127
+ tool_calls.append(tool_call)
128
+
129
+ return tool_calls
130
+
131
+
132
+ class OpenAiProvider(CasualMcpProvider):
133
+ def __init__(self, model: str, api_key: str, tools: list[mcp.Tool], endpoint: str = None):
134
+
135
+ # Convert MCP Tools to OpenAI format
136
+ self.tools = convert_tools(tools)
137
+ logger.debug(f"Converted Tools: {self.tools}")
138
+ logger.info(f"Adding {len(self.tools)} tools")
139
+ self.model = model
140
+ self.client = OpenAI(
141
+ base_url=endpoint,
142
+ api_key=api_key,
143
+ )
144
+
145
+ async def generate(
146
+ self,
147
+ messages: list[CasualMcpMessage],
148
+ tools: list[mcp.Tool]
149
+ ) -> AssistantMessage:
150
+ logger.info("Start Generating")
151
+ logger.debug(f"Model: {self.model}")
152
+
153
+ # Convert Messages to OpenAI format
154
+ converted_messages = convert_messages(messages)
155
+ logger.debug(f"Converted Messages: {converted_messages}")
156
+ logger.info(f"Adding {len(converted_messages)} messages")
157
+
158
+ # Call OpenAi API
159
+ try:
160
+ result = self.client.chat.completions.create(
161
+ model=self.model, messages=converted_messages, tools=self.tools
162
+ )
163
+
164
+ response = result.choices[0]
165
+ except Exception as e:
166
+ logger.warning(f"Error in Generation: {e}")
167
+ raise GenerationError(str(e))
168
+
169
+ logger.debug(response)
170
+
171
+ # Convert any tool calls
172
+ tool_calls = None
173
+ if hasattr(response.message, "tool_calls") and response.message.tool_calls:
174
+ logger.debug(f"Assistant requested {len(response.message.tool_calls)} tool calls")
175
+ tool_calls = convert_tool_calls(response.message.tool_calls)
176
+ logger.debug(f"Converted {len(tool_calls)} tool calls")
177
+
178
+ return AssistantMessage(content=response.message.content, tool_calls=tool_calls)
@@ -0,0 +1,48 @@
1
+ import os
2
+ from typing import TypeAlias
3
+
4
+ import mcp
5
+
6
+ from casual_mcp.logging import get_logger
7
+ from casual_mcp.models.model_config import ModelConfig
8
+ from casual_mcp.providers.ollama_provider import OllamaProvider
9
+ from casual_mcp.providers.openai_provider import OpenAiProvider
10
+
11
+ logger = get_logger("providers.factory")
12
+
13
+ LLMProvider: TypeAlias = OpenAiProvider | OllamaProvider
14
+
15
+ class ProviderFactory:
16
+ def __init__(self):
17
+ self.providers: dict[str, LLMProvider] = {}
18
+
19
+
20
+ def set_tools(self, tools: list[mcp.Tool]):
21
+ self.tools = tools
22
+
23
+
24
+ def get_provider(self, name: str, config: ModelConfig) -> LLMProvider:
25
+ if self.providers.get(name):
26
+ return self.providers.get(name)
27
+
28
+ match config.provider:
29
+ case "ollama":
30
+ logger.info(f"Creating Ollama provider for {config.model} at {config.endpoint}")
31
+ provider = OllamaProvider(config.model, endpoint=config.endpoint.__str__())
32
+
33
+ case "openai":
34
+ endpoint = None
35
+ if config.endpoint:
36
+ endpoint = config.endpoint.__str__()
37
+
38
+ logger.info(f"Creating OpenAI provider for {config.model} at {endpoint}")
39
+ api_key = os.getenv("OPEN_AI_API_KEY")
40
+ provider = OpenAiProvider(
41
+ config.model,
42
+ api_key,
43
+ self.tools,
44
+ endpoint=endpoint,
45
+ )
46
+
47
+ self.providers[name] = provider
48
+ return provider
casual_mcp/utils.py ADDED
@@ -0,0 +1,90 @@
1
+ import json
2
+ from pathlib import Path
3
+
4
+ import mcp
5
+ from jinja2 import Environment, FileSystemLoader
6
+ from pydantic import ValidationError
7
+
8
+ from casual_mcp.models.config import Config
9
+ from casual_mcp.models.tool_call import AssistantToolCall
10
+
11
+
12
+ def load_config(path: str | Path) -> Config:
13
+ path = Path(path)
14
+
15
+ if not path.exists():
16
+ raise FileNotFoundError(f"Config file not found: {path}")
17
+
18
+ try:
19
+ with path.open("r", encoding="utf-8") as f:
20
+ raw_data = json.load(f)
21
+
22
+ return Config(**raw_data)
23
+ except ValidationError as ve:
24
+ raise ValueError(f"Invalid config:\n{ve}") from ve
25
+ except json.JSONDecodeError as je:
26
+ raise ValueError(f"Could not parse config JSON:\n{je}") from je
27
+
28
+ def format_tool_call_result(
29
+ tool_call: AssistantToolCall,
30
+ result: str,
31
+ style: str = "function_result",
32
+ include_id: bool = False
33
+ ) -> str:
34
+ """
35
+ Format a tool call and result into a prompt-friendly string.
36
+
37
+ Supported styles:
38
+ - "result": Only the result text
39
+ - "function_result": function → result
40
+ - "function_args_result": function(args) → result
41
+
42
+ Include ID to add the tool call ID above the result
43
+
44
+ Args:
45
+ tool_call (AssistantToolCall): Tool call
46
+ result (str): Output of the tool
47
+ style (str): One of the supported formatting styles
48
+ include_id (bool): Whether to include the tool call ID
49
+
50
+ Returns:
51
+ str: Formatted content string
52
+ """
53
+ func_name = tool_call.function.name
54
+ args = json.loads(tool_call.function.arguments)
55
+
56
+ if style == "result":
57
+ result_str = result
58
+
59
+ elif style == "function_result":
60
+ result_str = f"{func_name} → {result}"
61
+
62
+ elif style == "function_args_result":
63
+ arg_string = ", ".join(f"{k}={repr(v)}" for k, v in args.items())
64
+ result_str = f"{func_name}({arg_string}) → {result}"
65
+
66
+ else:
67
+ raise ValueError(f"Unsupported style: {style}")
68
+
69
+ if (include_id):
70
+ return f"ID: {tool_call.id}\n{result_str}"
71
+
72
+ return result_str
73
+
74
+
75
+ def render_system_prompt(template_name: str, tools: list[mcp.Tool], extra: dict = None) -> str:
76
+ """
77
+ Renders a system prompt template with tool definitions.
78
+
79
+ :param template_name: e.g. 'stealth_tools_prompt.j2'
80
+ :param tools: list of dicts with 'name' and 'description' (at minimum)
81
+ :param extra: optional additional variables for template
82
+ :return: rendered system prompt
83
+ """
84
+ TEMPLATE_DIR = Path("prompt-templates").resolve()
85
+ env = Environment(loader=FileSystemLoader(TEMPLATE_DIR), autoescape=False)
86
+ template = env.get_template(template_name)
87
+ context = {"tools": tools}
88
+ if extra:
89
+ context.update(extra)
90
+ return template.render(**context)