casual-mcp 0.1.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
casual_mcp/__init__.py CHANGED
@@ -1,13 +1,13 @@
1
1
  from . import models
2
2
  from .mcp_tool_chat import McpToolChat
3
- from .multi_server_mcp_client import MultiServerMCPClient
4
3
  from .providers.provider_factory import ProviderFactory
5
- from .utils import load_config
4
+ from .utils import load_config, load_mcp_client, render_system_prompt
6
5
 
7
6
  __all__ = [
8
7
  "McpToolChat",
9
- "MultiServerMCPClient",
10
8
  "ProviderFactory",
11
9
  "load_config",
10
+ "load_mcp_client",
11
+ "render_system_prompt",
12
12
  "models",
13
13
  ]
casual_mcp/cli.py CHANGED
@@ -3,6 +3,7 @@ import uvicorn
3
3
  from rich.console import Console
4
4
  from rich.table import Table
5
5
 
6
+ from casual_mcp.models.mcp_server_config import RemoteServerConfig
6
7
  from casual_mcp.utils import load_config
7
8
 
8
9
  app = typer.Typer()
@@ -26,23 +27,22 @@ def servers():
26
27
  """
27
28
  Return a table of all configured servers
28
29
  """
29
- config = load_config('config.json')
30
- table = Table("Name", "Type", "Path / Package / Url", "Env")
30
+ config = load_config('casual_mcp_config.json')
31
+ table = Table("Name", "Type", "Command / Url", "Env")
31
32
 
32
33
  for name, server in config.servers.items():
34
+ type = 'local'
35
+ if isinstance(server, RemoteServerConfig):
36
+ type = 'remote'
37
+
33
38
  path = ''
34
- match server.type:
35
- case 'python':
36
- path = server.path
37
- case 'node':
38
- path = server.path
39
- case 'http':
40
- path = server.url
41
- case 'uvx':
42
- path = server.package
39
+ if isinstance(server, RemoteServerConfig):
40
+ path = server.url
41
+ else:
42
+ path = f"{server.command} {" ".join(server.args)}"
43
43
  env = ''
44
44
 
45
- table.add_row(name, server.type, path, env)
45
+ table.add_row(name, type, path, env)
46
46
 
47
47
  console.print(table)
48
48
 
@@ -51,7 +51,7 @@ def models():
51
51
  """
52
52
  Return a table of all configured models
53
53
  """
54
- config = load_config('config.json')
54
+ config = load_config('casual_mcp_config.json')
55
55
  table = Table("Name", "Provider", "Model", "Endpoint")
56
56
 
57
57
  for name, model in config.models.items():
casual_mcp/main.py CHANGED
@@ -6,16 +6,16 @@ from dotenv import load_dotenv
6
6
  from fastapi import FastAPI, HTTPException
7
7
  from pydantic import BaseModel, Field
8
8
 
9
- from casual_mcp import McpToolChat, MultiServerMCPClient
9
+ from casual_mcp import McpToolChat
10
10
  from casual_mcp.logging import configure_logging, get_logger
11
- from casual_mcp.models.messages import CasualMcpMessage
11
+ from casual_mcp.models.messages import ChatMessage
12
12
  from casual_mcp.providers.provider_factory import ProviderFactory
13
- from casual_mcp.utils import load_config, render_system_prompt
13
+ from casual_mcp.utils import load_config, load_mcp_client, render_system_prompt
14
14
 
15
15
  load_dotenv()
16
- config = load_config("config.json")
17
- mcp_client = MultiServerMCPClient(namespace_tools=config.namespace_tools)
18
- provider_factory = ProviderFactory()
16
+ config = load_config("casual_mcp_config.json")
17
+ mcp_client = load_mcp_client(config)
18
+ provider_factory = ProviderFactory(mcp_client)
19
19
 
20
20
  app = FastAPI()
21
21
 
@@ -32,6 +32,7 @@ You must not speculate or guess about dates — if a date is given to you by a t
32
32
  Always present information as current and factual.
33
33
  """
34
34
 
35
+
35
36
  class GenerateRequest(BaseModel):
36
37
  session_id: str | None = Field(
37
38
  default=None, title="Session to use"
@@ -42,11 +43,20 @@ class GenerateRequest(BaseModel):
42
43
  system_prompt: str | None = Field(
43
44
  default=None, title="System Prompt to use"
44
45
  )
45
- user_prompt: str = Field(
46
+ prompt: str = Field(
46
47
  title="User Prompt"
47
48
  )
48
- messages: list[CasualMcpMessage] | None = Field(
49
- default=None, title="Previous messages to supply to the LLM"
49
+
50
+
51
+ class ChatRequest(BaseModel):
52
+ model: str = Field(
53
+ title="Model to user"
54
+ )
55
+ system_prompt: str | None = Field(
56
+ default=None, title="System Prompt to use"
57
+ )
58
+ messages: list[ChatMessage] = Field(
59
+ title="Previous messages to supply to the LLM"
50
60
  )
51
61
 
52
62
  sys.path.append(str(Path(__file__).parent.resolve()))
@@ -55,47 +65,11 @@ sys.path.append(str(Path(__file__).parent.resolve()))
55
65
  configure_logging(os.getenv("LOG_LEVEL", 'INFO'))
56
66
  logger = get_logger("main")
57
67
 
58
- async def perform_chat(
59
- model,
60
- user,
61
- system: str | None = None,
62
- messages: list[CasualMcpMessage] = None,
63
- session_id: str | None = None
64
- ) -> list[CasualMcpMessage]:
65
- # Get Provider from Model Config
66
- model_config = config.models[model]
67
- provider = provider_factory.get_provider(model, model_config)
68
-
69
- if not system:
70
- if (model_config.template):
71
- system = render_system_prompt(
72
- f"{model_config.template}.j2",
73
- await mcp_client.list_tools()
74
- )
75
- else:
76
- system = default_system_prompt
77
-
78
- chat = McpToolChat(mcp_client, provider, system)
79
- return await chat.chat(
80
- prompt=user,
81
- messages=messages,
82
- session_id=session_id
83
- )
84
-
85
68
 
86
69
  @app.post("/chat")
87
- async def chat(req: GenerateRequest):
88
- if len(mcp_client.tools) == 0:
89
- await mcp_client.load_config(config.servers)
90
- provider_factory.set_tools(await mcp_client.list_tools())
91
-
92
- messages = await perform_chat(
93
- req.model,
94
- system=req.system_prompt,
95
- user=req.user_prompt,
96
- messages=req.messages,
97
- session_id=req.session_id
98
- )
70
+ async def chat(req: ChatRequest):
71
+ chat = await get_chat(req.model, req.system_prompt)
72
+ messages = await chat.chat(req.messages)
99
73
 
100
74
  return {
101
75
  "messages": messages,
@@ -103,16 +77,43 @@ async def chat(req: GenerateRequest):
103
77
  }
104
78
 
105
79
 
106
- # This endpoint will either go away or be used for something else, don't use it
107
80
  @app.post("/generate")
108
- async def generate_response(req: GenerateRequest):
109
- return await chat(req)
81
+ async def generate(req: GenerateRequest):
82
+ chat = await get_chat(req.model, req.system_prompt)
83
+ messages = await chat.generate(
84
+ req.prompt,
85
+ req.session_id
86
+ )
110
87
 
88
+ return {
89
+ "messages": messages,
90
+ "response": messages[len(messages) - 1].content
91
+ }
111
92
 
112
- @app.get("/chat/session/{session_id}")
113
- async def get_chat_session(session_id):
93
+
94
+ @app.get("/generate/session/{session_id}")
95
+ async def get_generate_session(session_id):
114
96
  session = McpToolChat.get_session(session_id)
115
97
  if not session:
116
98
  raise HTTPException(status_code=404, detail="Session not found")
117
99
 
118
100
  return session
101
+
102
+
103
+ async def get_chat(model: str, system: str | None = None) -> McpToolChat:
104
+ # Get Provider from Model Config
105
+ model_config = config.models[model]
106
+ provider = await provider_factory.get_provider(model, model_config)
107
+
108
+ # Get the system prompt
109
+ if not system:
110
+ if (model_config.template):
111
+ async with mcp_client:
112
+ system = render_system_prompt(
113
+ f"{model_config.template}.j2",
114
+ await mcp_client.list_tools()
115
+ )
116
+ else:
117
+ system = default_system_prompt
118
+
119
+ return McpToolChat(mcp_client, provider, system)
@@ -1,64 +1,102 @@
1
+ import json
2
+ import os
3
+
4
+ from fastmcp import Client
1
5
 
2
6
  from casual_mcp.logging import get_logger
3
- from casual_mcp.models.messages import CasualMcpMessage, SystemMessage, UserMessage
4
- from casual_mcp.multi_server_mcp_client import MultiServerMCPClient
7
+ from casual_mcp.models.messages import (
8
+ ChatMessage,
9
+ SystemMessage,
10
+ ToolResultMessage,
11
+ UserMessage,
12
+ )
13
+ from casual_mcp.models.tool_call import AssistantToolCall
5
14
  from casual_mcp.providers.provider_factory import LLMProvider
15
+ from casual_mcp.utils import format_tool_call_result
6
16
 
7
17
  logger = get_logger("mcp_tool_chat")
8
- sessions: dict[str, list[CasualMcpMessage]] = {}
18
+ sessions: dict[str, list[ChatMessage]] = {}
19
+
20
+
21
+ def get_session_messages(session_id: str | None):
22
+ global sessions
23
+
24
+ if not sessions.get(session_id):
25
+ logger.info(f"Starting new session {session_id}")
26
+ sessions[session_id] = []
27
+ else:
28
+ logger.info(
29
+ f"Retrieving session {session_id} of length {len(sessions[session_id])}"
30
+ )
31
+ return sessions[session_id].copy()
32
+
33
+
34
+ def add_messages_to_session(session_id: str, messages: list[ChatMessage]):
35
+ global sessions
36
+ sessions[session_id].extend(messages.copy())
9
37
 
10
38
 
11
39
  class McpToolChat:
12
- def __init__(self, tool_client: MultiServerMCPClient, provider: LLMProvider, system: str):
40
+ def __init__(self, mcp_client: Client, provider: LLMProvider, system: str = None):
13
41
  self.provider = provider
14
- self.tool_client = tool_client
42
+ self.mcp_client = mcp_client
15
43
  self.system = system
16
44
 
17
45
  @staticmethod
18
- def get_session(session_id) -> list[CasualMcpMessage] | None:
46
+ def get_session(session_id) -> list[ChatMessage] | None:
19
47
  global sessions
20
48
  return sessions.get(session_id)
21
49
 
22
- async def chat(
50
+ async def generate(
23
51
  self,
24
- prompt: str | None = None,
25
- messages: list[CasualMcpMessage] = None,
52
+ prompt: str,
26
53
  session_id: str | None = None
27
- ) -> list[CasualMcpMessage]:
28
- global sessions
54
+ ) -> list[ChatMessage]:
55
+ # Fetch the session if we have a session ID
56
+ if session_id:
57
+ messages = get_session_messages(session_id)
58
+ else:
59
+ messages: list[ChatMessage] = []
29
60
 
30
- # todo: check that we have a prompt or that there is a user message in messages
61
+ # Add the prompt as a user message
62
+ user_message = UserMessage(content=prompt)
63
+ messages.append(user_message)
31
64
 
32
- # If we have a session ID then create if new and fetch it
65
+ # Add the user message to the session
33
66
  if session_id:
34
- if not sessions.get(session_id):
35
- logger.info(f"Starting new session {session_id}")
36
- sessions[session_id] = []
37
- else:
38
- logger.info(
39
- f"Retrieving session {session_id} of length {len(sessions[session_id])}"
40
- )
41
- messages = sessions[session_id].copy()
67
+ add_messages_to_session(session_id, [user_message])
42
68
 
43
- logger.info("Start Chat")
44
- tools = await self.tool_client.list_tools()
69
+ # Perform Chat
70
+ response = await self.chat(messages=messages)
45
71
 
46
- if messages is None or len(messages) == 0:
47
- message_history = []
48
- messages = [SystemMessage(content=self.system)]
49
- else:
50
- message_history = messages.copy()
72
+ # Add responses to session
73
+ if session_id:
74
+ add_messages_to_session(session_id, response)
75
+
76
+ return response
51
77
 
52
- if prompt:
53
- messages.append(UserMessage(content=prompt))
54
78
 
55
- response: str | None = None
79
+ async def chat(
80
+ self,
81
+ messages: list[ChatMessage]
82
+ ) -> list[ChatMessage]:
83
+ # Add a system message if required
84
+ has_system_message = any(message.role == 'system' for message in messages)
85
+ if self.system and not has_system_message:
86
+ # Insert the system message at the start of the messages
87
+ messages.insert(0, SystemMessage(content=self.system))
88
+
89
+ logger.info("Start Chat")
90
+ async with self.mcp_client:
91
+ tools = await self.mcp_client.list_tools()
92
+
93
+ response_messages: list[ChatMessage] = []
56
94
  while True:
57
95
  logger.info("Calling the LLM")
58
96
  ai_message = await self.provider.generate(messages, tools)
59
- response = ai_message.content
60
97
 
61
98
  # Add the assistant's message
99
+ response_messages.append(ai_message)
62
100
  messages.append(ai_message)
63
101
 
64
102
  if not ai_message.tool_calls:
@@ -69,22 +107,47 @@ class McpToolChat:
69
107
  result_count = 0
70
108
  for tool_call in ai_message.tool_calls:
71
109
  try:
72
- result = await self.tool_client.execute(tool_call)
110
+ result = await self.execute(tool_call)
73
111
  except Exception as e:
74
112
  logger.error(e)
75
113
  return messages
76
114
  if result:
77
115
  messages.append(result)
116
+ response_messages.append(result)
78
117
  result_count = result_count + 1
79
118
 
80
119
  logger.info(f"Added {result_count} tool results")
81
120
 
82
- logger.debug(f"""Final Response:
83
- {response} """)
121
+ logger.debug(f"Final Response: {response_messages[-1].content}")
84
122
 
85
- new_messages = [item for item in messages if item not in message_history]
86
- if session_id:
87
- sessions[session_id].extend(new_messages)
123
+ return response_messages
124
+
125
+
126
+ async def execute(self, tool_call: AssistantToolCall):
127
+ tool_name = tool_call.function.name
128
+ tool_args = json.loads(tool_call.function.arguments)
129
+ try:
130
+ async with self.mcp_client:
131
+ result = await self.mcp_client.call_tool(tool_name, tool_args)
132
+ except Exception as e:
133
+ if isinstance(e, ValueError):
134
+ logger.warning(e)
135
+ else:
136
+ logger.error(f"Error calling tool: {e}")
137
+
138
+ return ToolResultMessage(
139
+ name=tool_call.function.name,
140
+ tool_call_id=tool_call.id,
141
+ content=str(e),
142
+ )
143
+
144
+ logger.debug(f"Tool Call Result: {result}")
88
145
 
89
- return new_messages
146
+ result_format = os.getenv('TOOL_RESULT_FORMAT', 'result')
147
+ content = format_tool_call_result(tool_call, result[0].text, style=result_format)
90
148
 
149
+ return ToolResultMessage(
150
+ name=tool_call.function.name,
151
+ tool_call_id=tool_call.id,
152
+ content=content,
153
+ )
@@ -1,13 +1,11 @@
1
1
  from .mcp_server_config import (
2
- HttpMcpServerConfig,
3
2
  McpServerConfig,
4
- NodeMcpServerConfig,
5
- PythonMcpServerConfig,
6
- UvxMcpServerConfig,
3
+ RemoteServerConfig,
4
+ StdioServerConfig,
7
5
  )
8
6
  from .messages import (
9
7
  AssistantMessage,
10
- CasualMcpMessage,
8
+ ChatMessage,
11
9
  SystemMessage,
12
10
  ToolResultMessage,
13
11
  UserMessage,
@@ -22,12 +20,10 @@ __all__ = [
22
20
  "AssistantMessage",
23
21
  "ToolResultMessage",
24
22
  "SystemMessage",
25
- "CasualMcpMessage",
23
+ "ChatMessage",
26
24
  "ModelConfig",
27
25
  "OpenAIModelConfig",
28
26
  "McpServerConfig",
29
- "PythonMcpServerConfig",
30
- "UvxMcpServerConfig",
31
- "NodeMcpServerConfig",
32
- "HttpMcpServerConfig",
27
+ "StdioServerConfig",
28
+ "RemoteServerConfig",
33
29
  ]
@@ -1,39 +1,20 @@
1
- from typing import Literal
1
+ from typing import Any, Literal
2
2
 
3
- from pydantic import BaseModel
3
+ from pydantic import BaseModel, Field
4
4
 
5
5
 
6
- class BaseMcpServerConfig(BaseModel):
7
- type: Literal["python", "node", "http", "uvx"]
8
- system_prompt: str | None | None = None
6
+ class StdioServerConfig(BaseModel):
7
+ command: str
8
+ args: list[str] = Field(default_factory=list)
9
+ env: dict[str, Any] = Field(default_factory=dict)
10
+ cwd: str | None = None
11
+ transport: Literal["stdio"] = "stdio"
9
12
 
10
13
 
11
- class PythonMcpServerConfig(BaseMcpServerConfig):
12
- type: Literal["python"] = "python"
13
- path: str
14
- env: dict[str, str] | None | None = None
15
-
16
-
17
- class UvxMcpServerConfig(BaseMcpServerConfig):
18
- type: Literal["uvx"] = "uvx"
19
- package: str
20
- env: dict[str, str] | None | None = None
21
-
22
-
23
- class NodeMcpServerConfig(BaseMcpServerConfig):
24
- type: Literal["node"] = "node"
25
- path: str
26
- env: dict[str, str] | None | None = None
27
-
28
-
29
- class HttpMcpServerConfig(BaseMcpServerConfig):
30
- type: Literal["http"] = "http"
14
+ class RemoteServerConfig(BaseModel):
31
15
  url: str
16
+ headers: dict[str, str] = Field(default_factory=dict)
17
+ transport: Literal["streamable-http", "sse", "http"] | None = None
32
18
 
33
19
 
34
- McpServerConfig = (
35
- PythonMcpServerConfig
36
- | NodeMcpServerConfig
37
- | HttpMcpServerConfig
38
- | UvxMcpServerConfig
39
- )
20
+ McpServerConfig = StdioServerConfig | RemoteServerConfig
@@ -28,4 +28,4 @@ class UserMessage(BaseModel):
28
28
  content: str | None
29
29
 
30
30
 
31
- CasualMcpMessage: TypeAlias = AssistantMessage | SystemMessage | ToolResultMessage | UserMessage
31
+ ChatMessage: TypeAlias = AssistantMessage | SystemMessage | ToolResultMessage | UserMessage
@@ -2,14 +2,14 @@ from abc import ABC, abstractmethod
2
2
 
3
3
  import mcp
4
4
 
5
- from casual_mcp.models.messages import CasualMcpMessage
5
+ from casual_mcp.models.messages import ChatMessage
6
6
 
7
7
 
8
8
  class CasualMcpProvider(ABC):
9
9
  @abstractmethod
10
10
  async def generate(
11
11
  self,
12
- messages: list[CasualMcpMessage],
12
+ messages: list[ChatMessage],
13
13
  tools: list[mcp.Tool]
14
- ) -> CasualMcpMessage:
14
+ ) -> ChatMessage:
15
15
  pass
@@ -6,7 +6,7 @@ from ollama import ChatResponse, Client, ResponseError
6
6
 
7
7
  from casual_mcp.logging import get_logger
8
8
  from casual_mcp.models.generation_error import GenerationError
9
- from casual_mcp.models.messages import AssistantMessage, CasualMcpMessage
9
+ from casual_mcp.models.messages import AssistantMessage, ChatMessage
10
10
  from casual_mcp.providers.abstract_provider import CasualMcpProvider
11
11
 
12
12
  logger = get_logger("providers.ollama")
@@ -15,7 +15,7 @@ def convert_tools(mcp_tools: list[mcp.Tool]) -> list[ollama.Tool]:
15
15
  raise Exception({"message": "under development"})
16
16
 
17
17
 
18
- def convert_messages(messages: list[CasualMcpMessage]) -> list[ollama.Message]:
18
+ def convert_messages(messages: list[ChatMessage]) -> list[ollama.Message]:
19
19
  raise Exception({"message": "under development"})
20
20
 
21
21
 
@@ -32,9 +32,9 @@ class OllamaProvider(CasualMcpProvider):
32
32
 
33
33
  async def generate(
34
34
  self,
35
- messages: list[CasualMcpMessage],
35
+ messages: list[ChatMessage],
36
36
  tools: list[mcp.Tool]
37
- ) -> CasualMcpMessage:
37
+ ) -> ChatMessage:
38
38
  logger.info("Start Generating")
39
39
  logger.debug(f"Model: {self.model}")
40
40
 
@@ -15,7 +15,7 @@ from openai.types.chat import (
15
15
 
16
16
  from casual_mcp.logging import get_logger
17
17
  from casual_mcp.models.generation_error import GenerationError
18
- from casual_mcp.models.messages import AssistantMessage, CasualMcpMessage
18
+ from casual_mcp.models.messages import AssistantMessage, ChatMessage
19
19
  from casual_mcp.models.tool_call import AssistantToolCall, AssistantToolCallFunction
20
20
  from casual_mcp.providers.abstract_provider import CasualMcpProvider
21
21
 
@@ -59,7 +59,7 @@ def convert_tool(mcp_tool: mcp.Tool) -> ChatCompletionToolParam | None:
59
59
  return ChatCompletionToolParam(**tool)
60
60
 
61
61
 
62
- def convert_messages(messages: list[CasualMcpMessage]) -> list[ChatCompletionMessageParam]:
62
+ def convert_messages(messages: list[ChatMessage]) -> list[ChatCompletionMessageParam]:
63
63
  if not messages:
64
64
  return messages
65
65
 
@@ -144,7 +144,7 @@ class OpenAiProvider(CasualMcpProvider):
144
144
 
145
145
  async def generate(
146
146
  self,
147
- messages: list[CasualMcpMessage],
147
+ messages: list[ChatMessage],
148
148
  tools: list[mcp.Tool]
149
149
  ) -> AssistantMessage:
150
150
  logger.info("Start Generating")
@@ -2,6 +2,7 @@ import os
2
2
  from typing import TypeAlias
3
3
 
4
4
  import mcp
5
+ from fastmcp import Client
5
6
 
6
7
  from casual_mcp.logging import get_logger
7
8
  from casual_mcp.models.model_config import ModelConfig
@@ -13,15 +14,22 @@ logger = get_logger("providers.factory")
13
14
  LLMProvider: TypeAlias = OpenAiProvider | OllamaProvider
14
15
 
15
16
  class ProviderFactory:
16
- def __init__(self):
17
- self.providers: dict[str, LLMProvider] = {}
17
+ providers: dict[str, LLMProvider] = {}
18
+ tools: list[mcp.Tool] = None
19
+
20
+ def __init__(self, mcp_client: Client):
21
+ self.mcp_client = mcp_client
18
22
 
19
23
 
20
24
  def set_tools(self, tools: list[mcp.Tool]):
21
25
  self.tools = tools
22
26
 
23
27
 
24
- def get_provider(self, name: str, config: ModelConfig) -> LLMProvider:
28
+ async def get_provider(self, name: str, config: ModelConfig) -> LLMProvider:
29
+ if not self.tools:
30
+ async with self.mcp_client:
31
+ self.tools = await self.mcp_client.list_tools()
32
+
25
33
  if self.providers.get(name):
26
34
  return self.providers.get(name)
27
35
 
casual_mcp/utils.py CHANGED
@@ -2,6 +2,7 @@ import json
2
2
  from pathlib import Path
3
3
 
4
4
  import mcp
5
+ from fastmcp import Client
5
6
  from jinja2 import Environment, FileSystemLoader
6
7
  from pydantic import ValidationError
7
8
 
@@ -9,6 +10,14 @@ from casual_mcp.models.config import Config
9
10
  from casual_mcp.models.tool_call import AssistantToolCall
10
11
 
11
12
 
13
+ def load_mcp_client(config: Config) -> Client:
14
+ servers = {
15
+ key: value.model_dump()
16
+ for key, value in config.servers.items()
17
+ }
18
+ return Client(servers)
19
+
20
+
12
21
  def load_config(path: str | Path) -> Config:
13
22
  path = Path(path)
14
23
 
@@ -1,16 +1,18 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: casual-mcp
3
- Version: 0.1.0
3
+ Version: 0.3.0
4
4
  Summary: Multi-server MCP client for LLM tool orchestration
5
5
  Author: Alex Stansfield
6
6
  License: MIT
7
+ Project-URL: Homepage, https://github.com/AlexStansfield/casual-mcp
8
+ Project-URL: Repository, https://github.com/AlexStansfield/casual-mcp
9
+ Project-URL: Issue Tracker, https://github.com/AlexStansfield/casual-mcp/issues
7
10
  Requires-Python: >=3.10
8
11
  Description-Content-Type: text/markdown
9
12
  License-File: LICENSE
10
- Requires-Dist: amadeus>=12.0.0
11
13
  Requires-Dist: dateparser>=1.2.1
12
14
  Requires-Dist: fastapi>=0.115.12
13
- Requires-Dist: fastmcp>=2.3.4
15
+ Requires-Dist: fastmcp>=2.5.1
14
16
  Requires-Dist: jinja2>=3.1.6
15
17
  Requires-Dist: ollama>=0.4.8
16
18
  Requires-Dist: openai>=1.78.0
@@ -34,7 +36,7 @@ Dynamic: license-file
34
36
  **Casual MCP** is a Python framework for building, evaluating, and serving LLMs with tool-calling capabilities using [Model Context Protocol (MCP)](https://modelcontextprotocol.io).
35
37
  It includes:
36
38
 
37
- - ✅ A multi-server MCP client
39
+ - ✅ A multi-server MCP client using [FastMCP](https://github.com/jlowin/fastmcp)
38
40
  - ✅ Provider support for OpenAI (and OpenAI compatible APIs)
39
41
  - ✅ A recursive tool-calling chat loop
40
42
  - ✅ System prompt templating with Jinja2
@@ -98,11 +100,11 @@ Here is a list of functions in JSON format that you can invoke:
98
100
  ]
99
101
  ```
100
102
 
101
- ## ⚙️ Configuration File (`config.json`)
103
+ ## ⚙️ Configuration File (`casual_mcp_config.json`)
102
104
 
103
105
  📄 See the [Programmatic Usage](#-programmatic-usage) section to build configs and messages with typed models.
104
106
 
105
- The CLI and API can be configured using a `config.json` file that defines:
107
+ The CLI and API can be configured using a `casual_mcp_config.json` file that defines:
106
108
 
107
109
  - 🔧 Available **models** and their providers
108
110
  - 🧰 Available **MCP tool servers**
@@ -112,7 +114,6 @@ The CLI and API can be configured using a `config.json` file that defines:
112
114
 
113
115
  ```json
114
116
  {
115
- "namespaced_tools": false,
116
117
  "models": {
117
118
  "lm-qwen-3": {
118
119
  "provider": "openai",
@@ -127,11 +128,10 @@ The CLI and API can be configured using a `config.json` file that defines:
127
128
  },
128
129
  "servers": {
129
130
  "time": {
130
- "type": "python",
131
- "path": "mcp-servers/time/server.py"
131
+ "command": "python",
132
+ "args": ["mcp-servers/time/server.py"]
132
133
  },
133
134
  "weather": {
134
- "type": "http",
135
135
  "url": "http://localhost:5050/mcp"
136
136
  }
137
137
  }
@@ -142,25 +142,31 @@ The CLI and API can be configured using a `config.json` file that defines:
142
142
 
143
143
  Each model has:
144
144
 
145
- - `provider`: `"openai"` or `"ollama"`
145
+ - `provider`: `"openai"` (more to come)
146
146
  - `model`: the model name (e.g., `gpt-4.1`, `qwen3-8b`)
147
147
  - `endpoint`: required for custom OpenAI-compatible backends (e.g., LM Studio)
148
148
  - `template`: optional name used to apply model-specific tool calling formatting
149
149
 
150
150
  ### 🔹 `servers`
151
151
 
152
- Each server has:
152
+ Servers can either be local (over stdio) or remote.
153
153
 
154
- - `type`: `"python"`, `"http"`, `"node"`, or `"uvx"`
155
- - For `python`/`node`: `path` to the script
156
- - For `http`: `url` to the remote MCP endpoint
157
- - For `uvx`: `package` for the package to run
158
- - Optional: `env` for subprocess environments, `system_prompt` to override server prompt
154
+ #### Local Config:
155
+ - `command`: the command to run the server, e.g `python`, `npm`
156
+ - `args`: the arguments to pass to the server as a list, e.g `["time/server.py"]`
157
+ - Optional: `env`: for subprocess environments, `system_prompt` to override server prompt
159
158
 
160
- ### 🔹 `namespaced_tools`
159
+ #### Remote Config:
160
+ - `url`: the url of the mcp server
161
+ - Optional: `transport`: the type of transport, `http`, `sse`, `streamable-http`. Defaults to `http`
161
162
 
162
- If `true`, tools will be prefixed by server name (e.g., `weather-get_weather`).
163
- Useful for disambiguating tool names across servers and avoiding name collision if multiple servers have the same tool name.
163
+ ## Environmental Variables
164
+
165
+ There are two environmental variables:
166
+ - `OPEN_AI_API_KEY`: required when using the `openai` provider, if using a local model with an openai compatible API it can be any string
167
+ - `TOOL_RESULT_FORMAT`: adjusts the format of the tool result given back to the LLM. Options are `result`, `function_result`, `function_args_result`. Defaults to `result`
168
+
169
+ You can set them using `export` or by creating a `.env` file.
164
170
 
165
171
  ## 🛠 CLI Reference
166
172
 
@@ -178,12 +184,12 @@ Loads the config and outputs the list of MCP servers you have configured.
178
184
  ```
179
185
  $ casual-mcp servers
180
186
  ┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━┓
181
- ┃ Name ┃ Type ┃ Path / Package / Url ┃ Env ┃
187
+ ┃ Name ┃ Type ┃ Command / Url ┃ Env ┃
182
188
  ┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━┩
183
- │ math │ python │ mcp-servers/math/server.py │ │
184
- │ time │ python │ mcp-servers/time-v2/server.py │ │
185
- │ weather │ python │ mcp-servers/weather/server.py │ │
186
- │ words │ python │ mcp-servers/words/server.py │ │
189
+ │ math │ local │ mcp-servers/math/server.py │ │
190
+ │ time │ local │ mcp-servers/time-v2/server.py │ │
191
+ │ weather │ local │ mcp-servers/weather/server.py │ │
192
+ │ words │ remotehttps://localhost:3000/mcp │ │
187
193
  └─────────┴────────┴───────────────────────────────┴─────┘
188
194
  ```
189
195
 
@@ -217,57 +223,68 @@ Orchestrates LLM interaction with tools using a recursive loop.
217
223
 
218
224
  ```python
219
225
  from casual_mcp import McpToolChat
226
+ from casual_mcp.models import SystemMessage, UserMessage
220
227
 
221
228
  chat = McpToolChat(mcp_client, provider, system_prompt)
222
- response = await chat.chat(prompt="What time is it in London?")
223
- ```
224
229
 
225
- #### `MultiServerMCPClient`
226
- Connects to multiple MCP tool servers and manages available tools.
230
+ # Generate method to take user prompt
231
+ response = await chat.generate("What time is it in London?")
227
232
 
228
- ```python
229
- from casual_mcp import MultiServerMCPClient
233
+ # Generate method with session
234
+ response = await chat.generate("What time is it in London?", "my-session-id")
230
235
 
231
- mcp_client = MultiServerMCPClient()
232
- await mcp_client.load_config(config["servers"])
233
- tools = await mcp_client.list_tools()
236
+ # Chat method that takes list of chat messages
237
+ # note: system prompt ignored if sent in messages so no need to set
238
+ chat = McpToolChat(mcp_client, provider)
239
+ messages = [
240
+ SystemMessage(content="You are a cool dude who likes to help the user"),
241
+ UserMessage(content="What time is it in London?")
242
+ ]
243
+ response = await chat.chat(messages)
234
244
  ```
235
245
 
236
246
  #### `ProviderFactory`
237
247
  Instantiates LLM providers based on the selected model config.
238
248
 
239
249
  ```python
240
- from casual_mcp.providers.provider_factory import ProviderFactory
250
+ from casual_mcp import ProviderFactory
241
251
 
242
- provider_factory = ProviderFactory()
243
- provider = provider_factory.get_provider("lm-qwen-3", model_config)
252
+ provider_factory = ProviderFactory(mcp_client)
253
+ provider = await provider_factory.get_provider("lm-qwen-3", model_config)
244
254
  ```
245
255
 
246
256
  #### `load_config`
247
- Loads your `config.json` into a validated config object.
257
+ Loads your `casual_mcp_config.json` into a validated config object.
248
258
 
249
259
  ```python
250
- from casual_mcp.utils import load_config
260
+ from casual_mcp import load_config
251
261
 
252
- config = load_config("config.json")
262
+ config = load_config("casual_mcp_config.json")
263
+ ```
264
+
265
+ #### `load_mcp_client`
266
+ Creats a multi server FastMCP client from the config object
267
+
268
+ ```python
269
+ from casual_mcp import load_mcp_client
270
+
271
+ config = load_mcp_client(config)
253
272
  ```
254
273
 
255
274
  #### Model and Server Configs
256
275
 
257
276
  Exported models:
258
- - PythonMcpServerConfig
259
- - UvxMcpServerConfig
260
- - NodeMcpServerConfig
261
- - HttpMcpServerConfig
277
+ - StdioServerConfig
278
+ - RemoteServerConfig
262
279
  - OpenAIModelConfig
263
280
 
264
281
  Use these types to build valid configs:
265
282
 
266
283
  ```python
267
- from casual_mcp.models import OpenAIModelConfig, PythonMcpServerConfig
284
+ from casual_mcp.models import OpenAIModelConfig, StdioServerConfig
268
285
 
269
- model = OpenAIModelConfig( model="llama3", endpoint="http://...")
270
- server = PythonMcpServerConfig(path="time/server.py")
286
+ model = OpenAIModelConfig(model="llama3", endpoint="http://...")
287
+ server = StdioServerConfig(command="python", args=["time/server.py"])
271
288
  ```
272
289
 
273
290
  #### Chat Messages
@@ -292,7 +309,7 @@ messages = [
292
309
  ### Example
293
310
 
294
311
  ```python
295
- from casual_mcp import McpToolChat, MultiServerMCPClient, load_config, ProviderFactory
312
+ from casual_mcp import McpToolChat, load_config, load_mcp_client, ProviderFactory
296
313
  from casual_mcp.models import SystemMessage, UserMessage
297
314
 
298
315
  model = "gpt-4.1-nano"
@@ -304,20 +321,18 @@ Respond naturally and confidently, as if you already know all the facts."""),
304
321
  ]
305
322
 
306
323
  # Load the Config from the File
307
- config = load_config("config.json")
324
+ config = load_config("casual_mcp_config.json")
308
325
 
309
- # Setup the MultiServer MCP Client
310
- mcp_client = MultiServerMCPClient()
311
- await mcp_client.load_config(config.servers)
326
+ # Setup the MCP Client
327
+ mcp_client = load_mcp_client(config)
312
328
 
313
329
  # Get the Provider for the Model
314
- provider_factory.set_tools(await mcp_client.list_tools())
315
- provider_factory = ProviderFactory()
316
- provider = provider_factory.get_provider(model, config.models[model])
330
+ provider_factory = ProviderFactory(mcp_client)
331
+ provider = await provider_factory.get_provider(model, config.models[model])
317
332
 
318
333
  # Perform the Chat and Tool calling
319
- chat = McpToolChat(mcp_client, provider, system_prompt)
320
- response_messages = await chat.chat(messages=messages)
334
+ chat = McpToolChat(mcp_client, provider)
335
+ response_messages = await chat.chat(messages)
321
336
  ```
322
337
 
323
338
  ## 🚀 API Usage
@@ -328,25 +343,56 @@ response_messages = await chat.chat(messages=messages)
328
343
  casual-mcp serve --host 0.0.0.0 --port 8000
329
344
  ```
330
345
 
331
- You can then POST to `/chat` to trigger tool-calling LLM responses.
346
+ ### Chat
347
+
348
+ #### Endpoint: `POST /chat`
332
349
 
333
- The request takes a json body consisting of:
350
+ #### Request Body:
334
351
  - `model`: the LLM model to use
335
- - `user_prompt`: optional, the latest user message (required if messages isn't provided)
336
- - `messages`: optional, list of chat messages (system, assistant, user, etc) that you can pass to the api, allowing you to keep your own chat session in the client calling the api
337
- - `session_id`: an optional ID that stores all the messages from the session and provides them back to the LLM for context
352
+ - `messages`: list of chat messages (system, assistant, user, etc) that you can pass to the api, allowing you to keep your own chat session in the client calling the api
338
353
 
339
- You can either pass in a `user_prompt` or a list of `messages` depending on your use case.
354
+ #### Example:
355
+ ```
356
+ {
357
+ "model": "gpt-4.1-nano",
358
+ "messages": [
359
+ {
360
+ "role": "user",
361
+ "content": "can you explain what the word consistent means?"
362
+ }
363
+ ]
364
+ }
365
+ ```
366
+
367
+ ### Generate
368
+
369
+ The generate endpoint allows you to send a user prompt as a string.
370
+
371
+ It also support sessions that keep a record of all messages in the session and feeds them back into the LLM for context. Sessions are stored in memory so are cleared when the server is restarted
372
+
373
+ #### Endpoint: `POST /generate`
340
374
 
341
- Example:
375
+ #### Request Body:
376
+ - `model`: the LLM model to use
377
+ - `prompt`: the user prompt
378
+ - `session_id`: an optional ID that stores all the messages from the session and provides them back to the LLM for context
379
+
380
+ #### Example:
342
381
  ```
343
382
  {
344
- "session_id": "my-test-session",
383
+ "session_id": "my-session",
345
384
  "model": "gpt-4o-mini",
346
- "user_prompt": "can you explain what the word consistent means?"
385
+ "prompt": "can you explain what the word consistent means?"
347
386
  }
348
387
  ```
349
388
 
389
+ ### Get Session
390
+
391
+ Get all the messages from a session
392
+
393
+ #### Endpoint: `GET /generate/session/{session_id}`
394
+
395
+
350
396
  ## License
351
397
 
352
398
  This software is released under the [MIT License](LICENSE)
@@ -0,0 +1,24 @@
1
+ casual_mcp/__init__.py,sha256=pInJdGkFqSH8RwbQq-9mc96GWIQjLrtExeXnTYGtNHw,327
2
+ casual_mcp/cli.py,sha256=TSk12nXJH86f0WAR_u5hIJV9IAHeGHrkgFs7ZZ63Lug,1627
3
+ casual_mcp/logging.py,sha256=o3rvT8GLJKGlu0ieeC9TY_SRSEUY-VO8jRQZjx-sSvY,863
4
+ casual_mcp/main.py,sha256=AzqQ6SUJsyKyMaqd3HIxLDozoftMd27KQAQNsfM9e2I,3385
5
+ casual_mcp/mcp_tool_chat.py,sha256=BebLuo2F4nStd4vVO3BftfG8Sa6Zlx11UBuMezpbtIE,4897
6
+ casual_mcp/utils.py,sha256=Nea0aRbPyjqm7mIjffJtGP2NssE7BsdPleO-yiuAWPE,2964
7
+ casual_mcp/models/__init__.py,sha256=qlKylcCyRJOSIVteU2feiLOigZoY-m-soVGp4NALM_c,538
8
+ casual_mcp/models/config.py,sha256=ITu3WAPMad7i2CS3ljkHapjT8lLm7k6HFUF6N73U1oo,294
9
+ casual_mcp/models/generation_error.py,sha256=n1mF3vc1Sg_9yIe603G1nTP395Tht8JMKHqdMWFNAn0,259
10
+ casual_mcp/models/mcp_server_config.py,sha256=0OHsHUEKxRoCl21lsye4E5GoCNmdZWIZCOOthcTpdsE,539
11
+ casual_mcp/models/messages.py,sha256=7C0SoCC6Ee970iHprpCpsKsQrwvM66e39o96wfYm1Y8,683
12
+ casual_mcp/models/model_config.py,sha256=gN5hNDfbur_bHgrji87CcU2WgNZO-F3eveK4pVWVSAE,435
13
+ casual_mcp/models/tool_call.py,sha256=BKMxcmyW7EmNoG1jgS9PXXvf6RQIHf7wB8fElEbc4gA,271
14
+ casual_mcp/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
+ casual_mcp/providers/abstract_provider.py,sha256=TTEP3FeTxOtbD0By_k17UxS8cqxYCOGNRTRxYRrqGwc,292
16
+ casual_mcp/providers/ollama_provider.py,sha256=IUSJFBtEYmza_-_7bk5YZKqed3N67l8A2lZEmHPiyHo,2581
17
+ casual_mcp/providers/openai_provider.py,sha256=uSjoqM-X9bVp_RVM8Ip6lqjZ7q3DdN0-p7o2HKrWxMI,6138
18
+ casual_mcp/providers/provider_factory.py,sha256=CyFHJ0mU2tjHqj04btF0SL0B3pf12LAJ52Msqsbnv_g,1766
19
+ casual_mcp-0.3.0.dist-info/licenses/LICENSE,sha256=U3Zu2tkrh5vXdy7gIdE8WJGM9D4gGp3hohAAWdre-yo,1058
20
+ casual_mcp-0.3.0.dist-info/METADATA,sha256=ULZbRBwX0FVKjfS2pPu3JWwvD5btiIkA6qOqWoWRa_0,12902
21
+ casual_mcp-0.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
22
+ casual_mcp-0.3.0.dist-info/entry_points.txt,sha256=X48Np2cwl-SlRQdV26y2vPZ-2tJaODgZeVtfpHho-zg,50
23
+ casual_mcp-0.3.0.dist-info/top_level.txt,sha256=K4CiI0Jf8PHICjuQVm32HuNMB44kp8Lb02bbbdiH5bo,11
24
+ casual_mcp-0.3.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.8.0)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,170 +0,0 @@
1
- import json
2
- import os
3
-
4
- import mcp
5
- from fastmcp import Client
6
- from fastmcp.client.logging import LogMessage
7
- from fastmcp.client.transports import (
8
- ClientTransport,
9
- NodeStdioTransport,
10
- PythonStdioTransport,
11
- StreamableHttpTransport,
12
- UvxStdioTransport,
13
- )
14
-
15
- from casual_mcp.logging import get_logger
16
- from casual_mcp.models.mcp_server_config import McpServerConfig
17
- from casual_mcp.models.messages import ToolResultMessage
18
- from casual_mcp.models.tool_call import AssistantToolCall, AssistantToolCallFunction
19
- from casual_mcp.utils import format_tool_call_result
20
-
21
- logger = get_logger("multi_server_mcp_client")
22
-
23
-
24
- async def my_log_handler(params: LogMessage):
25
- logger.log(params.level, params.data)
26
-
27
-
28
- def get_server_transport(config: McpServerConfig) -> ClientTransport:
29
- match config.type:
30
- case 'python':
31
- return PythonStdioTransport(
32
- script_path=config.path,
33
- env=config.env
34
- )
35
- case 'node':
36
- return NodeStdioTransport(
37
- script_path=config.path,
38
- env=config.env
39
- )
40
- case 'http':
41
- return StreamableHttpTransport(
42
- url=config.url
43
- )
44
- case 'uvx':
45
- return UvxStdioTransport(
46
- tool_name=config.package,
47
- env_vars=config.env
48
- )
49
-
50
-
51
- class MultiServerMCPClient:
52
- def __init__(self, namespace_tools: bool = False):
53
- self.servers: dict[str, Client] = {} # Map server names to client connections
54
- self.tools_map = {} # Map tool names to server names
55
- self.tools: list[mcp.types.Tool] = []
56
- self.system_prompts: list[str] = []
57
- self.namespace_tools = namespace_tools
58
-
59
- async def load_config(self, config: dict[str, McpServerConfig]):
60
- # Load the servers from config
61
- logger.info("Loading server config")
62
- for name, server_config in config.items():
63
- transport = get_server_transport(server_config)
64
- await self.connect_to_server(
65
- transport,
66
- name,
67
- system_prompt=server_config.system_prompt
68
- )
69
-
70
-
71
- async def connect_to_server_script(self, path, name, env={}):
72
- # Connect via stdio to a local script
73
- transport = PythonStdioTransport(
74
- script_path=path,
75
- env=env,
76
- )
77
-
78
- return await self.connect_to_server(transport, name)
79
-
80
- async def connect_to_server(self, server, name, system_prompt: str = None):
81
- """Connect to an MCP server and register its tools."""
82
- logger.debug(f"Connecting to server {name}")
83
-
84
- async with Client(
85
- server,
86
- log_handler=my_log_handler,
87
- ) as server_client:
88
- # Store the connection
89
- self.servers[name] = server_client
90
-
91
- # Fetch tools and map them to this server
92
- tools = await server_client.list_tools()
93
-
94
- # If we are namespacing servers then change the tool names
95
- for tool in tools:
96
- if self.namespace_tools:
97
- tool.name = f"{name}-{tool.name}"
98
- else:
99
- if self.tools_map.get(tool.name):
100
- raise SystemError(
101
- f"Tool name collision {name}:{tool.name} already added by {self.tools_map[tool.name]}" # noqa: E501
102
- )
103
-
104
- self.tools_map[tool.name] = name
105
- self.tools.extend(tools)
106
-
107
- if system_prompt:
108
- prompt = await server_client.get_prompt(system_prompt)
109
- if prompt:
110
- self.system_prompts.append(prompt)
111
-
112
- return tools
113
-
114
- async def list_tools(self):
115
- """Fetch and aggregate tools from all connected servers."""
116
- return self.tools
117
-
118
- async def call_tool(self, function: AssistantToolCallFunction):
119
- """Route a tool call to the appropriate server."""
120
- tool_name = function.name
121
- tool_args = json.loads(function.arguments)
122
-
123
- # Find which server has this tool
124
- server_name = self.tools_map.get(tool_name)
125
-
126
- # Remove the sever name if the tools are namespaced
127
- if self.namespace_tools:
128
- tool_name = tool_name.removeprefix(f"{server_name}-")
129
- else:
130
- tool_name = tool_name
131
-
132
- if not self.tools_map.get(tool_name):
133
- raise ValueError(f"Tool not found: {tool_name}")
134
-
135
- logger.info(f"Calling tool {tool_name}")
136
-
137
- server_client = self.servers[server_name]
138
- async with server_client:
139
- return await server_client.call_tool(tool_name, tool_args)
140
-
141
-
142
- async def execute(self, tool_call: AssistantToolCall):
143
- try:
144
- result = await self.call_tool(tool_call.function)
145
- except Exception as e:
146
- if isinstance(e, ValueError):
147
- logger.warning(e)
148
- else:
149
- logger.error(f"Error calling tool: {e}")
150
-
151
- return ToolResultMessage(
152
- name=tool_call.function.name,
153
- tool_call_id=tool_call.id,
154
- content=str(e),
155
- )
156
-
157
- logger.debug(f"Tool Call Result: {result}")
158
-
159
- result_format = os.getenv('TOOL_RESULT_FORMAT', 'result')
160
- content = format_tool_call_result(tool_call, result[0].text, style=result_format)
161
-
162
- return ToolResultMessage(
163
- name=tool_call.function.name,
164
- tool_call_id=tool_call.id,
165
- content=content,
166
- )
167
-
168
-
169
- def get_system_prompts(self) -> list[str]:
170
- return self.system_prompts
@@ -1,25 +0,0 @@
1
- casual_mcp/__init__.py,sha256=UZTKF9qlKijDh2SRCbpz6nPi0now7hi4-VOJBnl7tTk,323
2
- casual_mcp/cli.py,sha256=s5-Mr2XNlzNcsfGwtwP25YBQYzf-orvDIu9gqwrVCI8,1561
3
- casual_mcp/logging.py,sha256=o3rvT8GLJKGlu0ieeC9TY_SRSEUY-VO8jRQZjx-sSvY,863
4
- casual_mcp/main.py,sha256=x-jJUltW4p4j0Vx-LaixbY0Oik6QZ81K2wdDeTNytME,3497
5
- casual_mcp/mcp_tool_chat.py,sha256=pIAQD-ghyLSGuLzXiG5Sv81-NHaNb5NVqRojJalwS1o,3113
6
- casual_mcp/multi_server_mcp_client.py,sha256=RrLO7wFGzkUgzmliagkOAx16lrvEG323MGPEU7Sw56o,5615
7
- casual_mcp/utils.py,sha256=8ekPpIfcqheMMjjKGe6lk81AWKpmCAixOXx_KJXGRAQ,2758
8
- casual_mcp/models/__init__.py,sha256=hHT-GBD0YMjHdJ4QGVefXQZsHu3bPd1vlizVdfYXoQ0,660
9
- casual_mcp/models/config.py,sha256=ITu3WAPMad7i2CS3ljkHapjT8lLm7k6HFUF6N73U1oo,294
10
- casual_mcp/models/generation_error.py,sha256=n1mF3vc1Sg_9yIe603G1nTP395Tht8JMKHqdMWFNAn0,259
11
- casual_mcp/models/mcp_server_config.py,sha256=o4uxq9JnrLRRHe0KNsaYE3P03wJdW1EmX18fmF7SoTQ,857
12
- casual_mcp/models/messages.py,sha256=5UASrYqlXeqaziDT8Zsej0kA7Ofce0109YlFAyQDuTY,688
13
- casual_mcp/models/model_config.py,sha256=gN5hNDfbur_bHgrji87CcU2WgNZO-F3eveK4pVWVSAE,435
14
- casual_mcp/models/tool_call.py,sha256=BKMxcmyW7EmNoG1jgS9PXXvf6RQIHf7wB8fElEbc4gA,271
15
- casual_mcp/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
- casual_mcp/providers/abstract_provider.py,sha256=kACSVgPY9qTqD1IgIWY9HkS8US2B0Nm7MyGJk0GLfDk,307
17
- casual_mcp/providers/ollama_provider.py,sha256=DKX9QTDl9DspWJSghuQgOzHZgjmTVtf5uyRH_DeOgQc,2601
18
- casual_mcp/providers/openai_provider.py,sha256=VIymU3Jimncne0c1fyowy4BFBpyfK70eG-2rP_YGDzc,6153
19
- casual_mcp/providers/provider_factory.py,sha256=Bub4y4uHFc23VCOeRicT_Fi54IdcjHUgYAWPo1oiSo4,1519
20
- casual_mcp-0.1.0.dist-info/licenses/LICENSE,sha256=U3Zu2tkrh5vXdy7gIdE8WJGM9D4gGp3hohAAWdre-yo,1058
21
- casual_mcp-0.1.0.dist-info/METADATA,sha256=PCJXGER8P8MgnqNBNRPVSysqumU4654KfUpUUyNLEdE,11576
22
- casual_mcp-0.1.0.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
23
- casual_mcp-0.1.0.dist-info/entry_points.txt,sha256=X48Np2cwl-SlRQdV26y2vPZ-2tJaODgZeVtfpHho-zg,50
24
- casual_mcp-0.1.0.dist-info/top_level.txt,sha256=K4CiI0Jf8PHICjuQVm32HuNMB44kp8Lb02bbbdiH5bo,11
25
- casual_mcp-0.1.0.dist-info/RECORD,,