casual-mcp 0.1.0__tar.gz → 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. {casual_mcp-0.1.0/src/casual_mcp.egg-info → casual_mcp-0.3.0}/PKG-INFO +112 -66
  2. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/README.md +107 -63
  3. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/pyproject.toml +16 -3
  4. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/__init__.py +3 -3
  5. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/cli.py +13 -13
  6. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/main.py +54 -53
  7. casual_mcp-0.3.0/src/casual_mcp/mcp_tool_chat.py +153 -0
  8. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/models/__init__.py +6 -10
  9. casual_mcp-0.3.0/src/casual_mcp/models/mcp_server_config.py +20 -0
  10. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/models/messages.py +1 -1
  11. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/providers/abstract_provider.py +3 -3
  12. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/providers/ollama_provider.py +4 -4
  13. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/providers/openai_provider.py +3 -3
  14. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/providers/provider_factory.py +11 -3
  15. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/utils.py +9 -0
  16. {casual_mcp-0.1.0 → casual_mcp-0.3.0/src/casual_mcp.egg-info}/PKG-INFO +112 -66
  17. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp.egg-info/SOURCES.txt +0 -1
  18. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp.egg-info/requires.txt +1 -2
  19. casual_mcp-0.1.0/src/casual_mcp/mcp_tool_chat.py +0 -90
  20. casual_mcp-0.1.0/src/casual_mcp/models/mcp_server_config.py +0 -39
  21. casual_mcp-0.1.0/src/casual_mcp/multi_server_mcp_client.py +0 -170
  22. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/LICENSE +0 -0
  23. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/setup.cfg +0 -0
  24. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/logging.py +0 -0
  25. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/models/config.py +0 -0
  26. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/models/generation_error.py +0 -0
  27. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/models/model_config.py +0 -0
  28. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/models/tool_call.py +0 -0
  29. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp/providers/__init__.py +0 -0
  30. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp.egg-info/dependency_links.txt +0 -0
  31. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp.egg-info/entry_points.txt +0 -0
  32. {casual_mcp-0.1.0 → casual_mcp-0.3.0}/src/casual_mcp.egg-info/top_level.txt +0 -0
@@ -1,16 +1,18 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: casual-mcp
3
- Version: 0.1.0
3
+ Version: 0.3.0
4
4
  Summary: Multi-server MCP client for LLM tool orchestration
5
5
  Author: Alex Stansfield
6
6
  License: MIT
7
+ Project-URL: Homepage, https://github.com/AlexStansfield/casual-mcp
8
+ Project-URL: Repository, https://github.com/AlexStansfield/casual-mcp
9
+ Project-URL: Issue Tracker, https://github.com/AlexStansfield/casual-mcp/issues
7
10
  Requires-Python: >=3.10
8
11
  Description-Content-Type: text/markdown
9
12
  License-File: LICENSE
10
- Requires-Dist: amadeus>=12.0.0
11
13
  Requires-Dist: dateparser>=1.2.1
12
14
  Requires-Dist: fastapi>=0.115.12
13
- Requires-Dist: fastmcp>=2.3.4
15
+ Requires-Dist: fastmcp>=2.5.1
14
16
  Requires-Dist: jinja2>=3.1.6
15
17
  Requires-Dist: ollama>=0.4.8
16
18
  Requires-Dist: openai>=1.78.0
@@ -34,7 +36,7 @@ Dynamic: license-file
34
36
  **Casual MCP** is a Python framework for building, evaluating, and serving LLMs with tool-calling capabilities using [Model Context Protocol (MCP)](https://modelcontextprotocol.io).
35
37
  It includes:
36
38
 
37
- - ✅ A multi-server MCP client
39
+ - ✅ A multi-server MCP client using [FastMCP](https://github.com/jlowin/fastmcp)
38
40
  - ✅ Provider support for OpenAI (and OpenAI compatible APIs)
39
41
  - ✅ A recursive tool-calling chat loop
40
42
  - ✅ System prompt templating with Jinja2
@@ -98,11 +100,11 @@ Here is a list of functions in JSON format that you can invoke:
98
100
  ]
99
101
  ```
100
102
 
101
- ## ⚙️ Configuration File (`config.json`)
103
+ ## ⚙️ Configuration File (`casual_mcp_config.json`)
102
104
 
103
105
  📄 See the [Programmatic Usage](#-programmatic-usage) section to build configs and messages with typed models.
104
106
 
105
- The CLI and API can be configured using a `config.json` file that defines:
107
+ The CLI and API can be configured using a `casual_mcp_config.json` file that defines:
106
108
 
107
109
  - 🔧 Available **models** and their providers
108
110
  - 🧰 Available **MCP tool servers**
@@ -112,7 +114,6 @@ The CLI and API can be configured using a `config.json` file that defines:
112
114
 
113
115
  ```json
114
116
  {
115
- "namespaced_tools": false,
116
117
  "models": {
117
118
  "lm-qwen-3": {
118
119
  "provider": "openai",
@@ -127,11 +128,10 @@ The CLI and API can be configured using a `config.json` file that defines:
127
128
  },
128
129
  "servers": {
129
130
  "time": {
130
- "type": "python",
131
- "path": "mcp-servers/time/server.py"
131
+ "command": "python",
132
+ "args": ["mcp-servers/time/server.py"]
132
133
  },
133
134
  "weather": {
134
- "type": "http",
135
135
  "url": "http://localhost:5050/mcp"
136
136
  }
137
137
  }
@@ -142,25 +142,31 @@ The CLI and API can be configured using a `config.json` file that defines:
142
142
 
143
143
  Each model has:
144
144
 
145
- - `provider`: `"openai"` or `"ollama"`
145
+ - `provider`: `"openai"` (more to come)
146
146
  - `model`: the model name (e.g., `gpt-4.1`, `qwen3-8b`)
147
147
  - `endpoint`: required for custom OpenAI-compatible backends (e.g., LM Studio)
148
148
  - `template`: optional name used to apply model-specific tool calling formatting
149
149
 
150
150
  ### 🔹 `servers`
151
151
 
152
- Each server has:
152
+ Servers can either be local (over stdio) or remote.
153
153
 
154
- - `type`: `"python"`, `"http"`, `"node"`, or `"uvx"`
155
- - For `python`/`node`: `path` to the script
156
- - For `http`: `url` to the remote MCP endpoint
157
- - For `uvx`: `package` for the package to run
158
- - Optional: `env` for subprocess environments, `system_prompt` to override server prompt
154
+ #### Local Config:
155
+ - `command`: the command to run the server, e.g `python`, `npm`
156
+ - `args`: the arguments to pass to the server as a list, e.g `["time/server.py"]`
157
+ - Optional: `env`: for subprocess environments, `system_prompt` to override server prompt
159
158
 
160
- ### 🔹 `namespaced_tools`
159
+ #### Remote Config:
160
+ - `url`: the url of the mcp server
161
+ - Optional: `transport`: the type of transport, `http`, `sse`, `streamable-http`. Defaults to `http`
161
162
 
162
- If `true`, tools will be prefixed by server name (e.g., `weather-get_weather`).
163
- Useful for disambiguating tool names across servers and avoiding name collision if multiple servers have the same tool name.
163
+ ## Environmental Variables
164
+
165
+ There are two environmental variables:
166
+ - `OPEN_AI_API_KEY`: required when using the `openai` provider, if using a local model with an openai compatible API it can be any string
167
+ - `TOOL_RESULT_FORMAT`: adjusts the format of the tool result given back to the LLM. Options are `result`, `function_result`, `function_args_result`. Defaults to `result`
168
+
169
+ You can set them using `export` or by creating a `.env` file.
164
170
 
165
171
  ## 🛠 CLI Reference
166
172
 
@@ -178,12 +184,12 @@ Loads the config and outputs the list of MCP servers you have configured.
178
184
  ```
179
185
  $ casual-mcp servers
180
186
  ┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━┓
181
- ┃ Name ┃ Type ┃ Path / Package / Url ┃ Env ┃
187
+ ┃ Name ┃ Type ┃ Command / Url ┃ Env ┃
182
188
  ┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━┩
183
- │ math │ python │ mcp-servers/math/server.py │ │
184
- │ time │ python │ mcp-servers/time-v2/server.py │ │
185
- │ weather │ python │ mcp-servers/weather/server.py │ │
186
- │ words │ python │ mcp-servers/words/server.py │ │
189
+ │ math │ local │ mcp-servers/math/server.py │ │
190
+ │ time │ local │ mcp-servers/time-v2/server.py │ │
191
+ │ weather │ local │ mcp-servers/weather/server.py │ │
192
+ │ words │ remotehttps://localhost:3000/mcp │ │
187
193
  └─────────┴────────┴───────────────────────────────┴─────┘
188
194
  ```
189
195
 
@@ -217,57 +223,68 @@ Orchestrates LLM interaction with tools using a recursive loop.
217
223
 
218
224
  ```python
219
225
  from casual_mcp import McpToolChat
226
+ from casual_mcp.models import SystemMessage, UserMessage
220
227
 
221
228
  chat = McpToolChat(mcp_client, provider, system_prompt)
222
- response = await chat.chat(prompt="What time is it in London?")
223
- ```
224
229
 
225
- #### `MultiServerMCPClient`
226
- Connects to multiple MCP tool servers and manages available tools.
230
+ # Generate method to take user prompt
231
+ response = await chat.generate("What time is it in London?")
227
232
 
228
- ```python
229
- from casual_mcp import MultiServerMCPClient
233
+ # Generate method with session
234
+ response = await chat.generate("What time is it in London?", "my-session-id")
230
235
 
231
- mcp_client = MultiServerMCPClient()
232
- await mcp_client.load_config(config["servers"])
233
- tools = await mcp_client.list_tools()
236
+ # Chat method that takes list of chat messages
237
+ # note: system prompt ignored if sent in messages so no need to set
238
+ chat = McpToolChat(mcp_client, provider)
239
+ messages = [
240
+ SystemMessage(content="You are a cool dude who likes to help the user"),
241
+ UserMessage(content="What time is it in London?")
242
+ ]
243
+ response = await chat.chat(messages)
234
244
  ```
235
245
 
236
246
  #### `ProviderFactory`
237
247
  Instantiates LLM providers based on the selected model config.
238
248
 
239
249
  ```python
240
- from casual_mcp.providers.provider_factory import ProviderFactory
250
+ from casual_mcp import ProviderFactory
241
251
 
242
- provider_factory = ProviderFactory()
243
- provider = provider_factory.get_provider("lm-qwen-3", model_config)
252
+ provider_factory = ProviderFactory(mcp_client)
253
+ provider = await provider_factory.get_provider("lm-qwen-3", model_config)
244
254
  ```
245
255
 
246
256
  #### `load_config`
247
- Loads your `config.json` into a validated config object.
257
+ Loads your `casual_mcp_config.json` into a validated config object.
248
258
 
249
259
  ```python
250
- from casual_mcp.utils import load_config
260
+ from casual_mcp import load_config
251
261
 
252
- config = load_config("config.json")
262
+ config = load_config("casual_mcp_config.json")
263
+ ```
264
+
265
+ #### `load_mcp_client`
266
+ Creats a multi server FastMCP client from the config object
267
+
268
+ ```python
269
+ from casual_mcp import load_mcp_client
270
+
271
+ config = load_mcp_client(config)
253
272
  ```
254
273
 
255
274
  #### Model and Server Configs
256
275
 
257
276
  Exported models:
258
- - PythonMcpServerConfig
259
- - UvxMcpServerConfig
260
- - NodeMcpServerConfig
261
- - HttpMcpServerConfig
277
+ - StdioServerConfig
278
+ - RemoteServerConfig
262
279
  - OpenAIModelConfig
263
280
 
264
281
  Use these types to build valid configs:
265
282
 
266
283
  ```python
267
- from casual_mcp.models import OpenAIModelConfig, PythonMcpServerConfig
284
+ from casual_mcp.models import OpenAIModelConfig, StdioServerConfig
268
285
 
269
- model = OpenAIModelConfig( model="llama3", endpoint="http://...")
270
- server = PythonMcpServerConfig(path="time/server.py")
286
+ model = OpenAIModelConfig(model="llama3", endpoint="http://...")
287
+ server = StdioServerConfig(command="python", args=["time/server.py"])
271
288
  ```
272
289
 
273
290
  #### Chat Messages
@@ -292,7 +309,7 @@ messages = [
292
309
  ### Example
293
310
 
294
311
  ```python
295
- from casual_mcp import McpToolChat, MultiServerMCPClient, load_config, ProviderFactory
312
+ from casual_mcp import McpToolChat, load_config, load_mcp_client, ProviderFactory
296
313
  from casual_mcp.models import SystemMessage, UserMessage
297
314
 
298
315
  model = "gpt-4.1-nano"
@@ -304,20 +321,18 @@ Respond naturally and confidently, as if you already know all the facts."""),
304
321
  ]
305
322
 
306
323
  # Load the Config from the File
307
- config = load_config("config.json")
324
+ config = load_config("casual_mcp_config.json")
308
325
 
309
- # Setup the MultiServer MCP Client
310
- mcp_client = MultiServerMCPClient()
311
- await mcp_client.load_config(config.servers)
326
+ # Setup the MCP Client
327
+ mcp_client = load_mcp_client(config)
312
328
 
313
329
  # Get the Provider for the Model
314
- provider_factory.set_tools(await mcp_client.list_tools())
315
- provider_factory = ProviderFactory()
316
- provider = provider_factory.get_provider(model, config.models[model])
330
+ provider_factory = ProviderFactory(mcp_client)
331
+ provider = await provider_factory.get_provider(model, config.models[model])
317
332
 
318
333
  # Perform the Chat and Tool calling
319
- chat = McpToolChat(mcp_client, provider, system_prompt)
320
- response_messages = await chat.chat(messages=messages)
334
+ chat = McpToolChat(mcp_client, provider)
335
+ response_messages = await chat.chat(messages)
321
336
  ```
322
337
 
323
338
  ## 🚀 API Usage
@@ -328,25 +343,56 @@ response_messages = await chat.chat(messages=messages)
328
343
  casual-mcp serve --host 0.0.0.0 --port 8000
329
344
  ```
330
345
 
331
- You can then POST to `/chat` to trigger tool-calling LLM responses.
346
+ ### Chat
347
+
348
+ #### Endpoint: `POST /chat`
332
349
 
333
- The request takes a json body consisting of:
350
+ #### Request Body:
334
351
  - `model`: the LLM model to use
335
- - `user_prompt`: optional, the latest user message (required if messages isn't provided)
336
- - `messages`: optional, list of chat messages (system, assistant, user, etc) that you can pass to the api, allowing you to keep your own chat session in the client calling the api
337
- - `session_id`: an optional ID that stores all the messages from the session and provides them back to the LLM for context
352
+ - `messages`: list of chat messages (system, assistant, user, etc) that you can pass to the api, allowing you to keep your own chat session in the client calling the api
338
353
 
339
- You can either pass in a `user_prompt` or a list of `messages` depending on your use case.
354
+ #### Example:
355
+ ```
356
+ {
357
+ "model": "gpt-4.1-nano",
358
+ "messages": [
359
+ {
360
+ "role": "user",
361
+ "content": "can you explain what the word consistent means?"
362
+ }
363
+ ]
364
+ }
365
+ ```
366
+
367
+ ### Generate
368
+
369
+ The generate endpoint allows you to send a user prompt as a string.
370
+
371
+ It also support sessions that keep a record of all messages in the session and feeds them back into the LLM for context. Sessions are stored in memory so are cleared when the server is restarted
372
+
373
+ #### Endpoint: `POST /generate`
340
374
 
341
- Example:
375
+ #### Request Body:
376
+ - `model`: the LLM model to use
377
+ - `prompt`: the user prompt
378
+ - `session_id`: an optional ID that stores all the messages from the session and provides them back to the LLM for context
379
+
380
+ #### Example:
342
381
  ```
343
382
  {
344
- "session_id": "my-test-session",
383
+ "session_id": "my-session",
345
384
  "model": "gpt-4o-mini",
346
- "user_prompt": "can you explain what the word consistent means?"
385
+ "prompt": "can you explain what the word consistent means?"
347
386
  }
348
387
  ```
349
388
 
389
+ ### Get Session
390
+
391
+ Get all the messages from a session
392
+
393
+ #### Endpoint: `GET /generate/session/{session_id}`
394
+
395
+
350
396
  ## License
351
397
 
352
398
  This software is released under the [MIT License](LICENSE)
@@ -6,7 +6,7 @@
6
6
  **Casual MCP** is a Python framework for building, evaluating, and serving LLMs with tool-calling capabilities using [Model Context Protocol (MCP)](https://modelcontextprotocol.io).
7
7
  It includes:
8
8
 
9
- - ✅ A multi-server MCP client
9
+ - ✅ A multi-server MCP client using [FastMCP](https://github.com/jlowin/fastmcp)
10
10
  - ✅ Provider support for OpenAI (and OpenAI compatible APIs)
11
11
  - ✅ A recursive tool-calling chat loop
12
12
  - ✅ System prompt templating with Jinja2
@@ -70,11 +70,11 @@ Here is a list of functions in JSON format that you can invoke:
70
70
  ]
71
71
  ```
72
72
 
73
- ## ⚙️ Configuration File (`config.json`)
73
+ ## ⚙️ Configuration File (`casual_mcp_config.json`)
74
74
 
75
75
  📄 See the [Programmatic Usage](#-programmatic-usage) section to build configs and messages with typed models.
76
76
 
77
- The CLI and API can be configured using a `config.json` file that defines:
77
+ The CLI and API can be configured using a `casual_mcp_config.json` file that defines:
78
78
 
79
79
  - 🔧 Available **models** and their providers
80
80
  - 🧰 Available **MCP tool servers**
@@ -84,7 +84,6 @@ The CLI and API can be configured using a `config.json` file that defines:
84
84
 
85
85
  ```json
86
86
  {
87
- "namespaced_tools": false,
88
87
  "models": {
89
88
  "lm-qwen-3": {
90
89
  "provider": "openai",
@@ -99,11 +98,10 @@ The CLI and API can be configured using a `config.json` file that defines:
99
98
  },
100
99
  "servers": {
101
100
  "time": {
102
- "type": "python",
103
- "path": "mcp-servers/time/server.py"
101
+ "command": "python",
102
+ "args": ["mcp-servers/time/server.py"]
104
103
  },
105
104
  "weather": {
106
- "type": "http",
107
105
  "url": "http://localhost:5050/mcp"
108
106
  }
109
107
  }
@@ -114,25 +112,31 @@ The CLI and API can be configured using a `config.json` file that defines:
114
112
 
115
113
  Each model has:
116
114
 
117
- - `provider`: `"openai"` or `"ollama"`
115
+ - `provider`: `"openai"` (more to come)
118
116
  - `model`: the model name (e.g., `gpt-4.1`, `qwen3-8b`)
119
117
  - `endpoint`: required for custom OpenAI-compatible backends (e.g., LM Studio)
120
118
  - `template`: optional name used to apply model-specific tool calling formatting
121
119
 
122
120
  ### 🔹 `servers`
123
121
 
124
- Each server has:
122
+ Servers can either be local (over stdio) or remote.
125
123
 
126
- - `type`: `"python"`, `"http"`, `"node"`, or `"uvx"`
127
- - For `python`/`node`: `path` to the script
128
- - For `http`: `url` to the remote MCP endpoint
129
- - For `uvx`: `package` for the package to run
130
- - Optional: `env` for subprocess environments, `system_prompt` to override server prompt
124
+ #### Local Config:
125
+ - `command`: the command to run the server, e.g `python`, `npm`
126
+ - `args`: the arguments to pass to the server as a list, e.g `["time/server.py"]`
127
+ - Optional: `env`: for subprocess environments, `system_prompt` to override server prompt
131
128
 
132
- ### 🔹 `namespaced_tools`
129
+ #### Remote Config:
130
+ - `url`: the url of the mcp server
131
+ - Optional: `transport`: the type of transport, `http`, `sse`, `streamable-http`. Defaults to `http`
133
132
 
134
- If `true`, tools will be prefixed by server name (e.g., `weather-get_weather`).
135
- Useful for disambiguating tool names across servers and avoiding name collision if multiple servers have the same tool name.
133
+ ## Environmental Variables
134
+
135
+ There are two environmental variables:
136
+ - `OPEN_AI_API_KEY`: required when using the `openai` provider, if using a local model with an openai compatible API it can be any string
137
+ - `TOOL_RESULT_FORMAT`: adjusts the format of the tool result given back to the LLM. Options are `result`, `function_result`, `function_args_result`. Defaults to `result`
138
+
139
+ You can set them using `export` or by creating a `.env` file.
136
140
 
137
141
  ## 🛠 CLI Reference
138
142
 
@@ -150,12 +154,12 @@ Loads the config and outputs the list of MCP servers you have configured.
150
154
  ```
151
155
  $ casual-mcp servers
152
156
  ┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━┓
153
- ┃ Name ┃ Type ┃ Path / Package / Url ┃ Env ┃
157
+ ┃ Name ┃ Type ┃ Command / Url ┃ Env ┃
154
158
  ┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━┩
155
- │ math │ python │ mcp-servers/math/server.py │ │
156
- │ time │ python │ mcp-servers/time-v2/server.py │ │
157
- │ weather │ python │ mcp-servers/weather/server.py │ │
158
- │ words │ python │ mcp-servers/words/server.py │ │
159
+ │ math │ local │ mcp-servers/math/server.py │ │
160
+ │ time │ local │ mcp-servers/time-v2/server.py │ │
161
+ │ weather │ local │ mcp-servers/weather/server.py │ │
162
+ │ words │ remotehttps://localhost:3000/mcp │ │
159
163
  └─────────┴────────┴───────────────────────────────┴─────┘
160
164
  ```
161
165
 
@@ -189,57 +193,68 @@ Orchestrates LLM interaction with tools using a recursive loop.
189
193
 
190
194
  ```python
191
195
  from casual_mcp import McpToolChat
196
+ from casual_mcp.models import SystemMessage, UserMessage
192
197
 
193
198
  chat = McpToolChat(mcp_client, provider, system_prompt)
194
- response = await chat.chat(prompt="What time is it in London?")
195
- ```
196
199
 
197
- #### `MultiServerMCPClient`
198
- Connects to multiple MCP tool servers and manages available tools.
200
+ # Generate method to take user prompt
201
+ response = await chat.generate("What time is it in London?")
199
202
 
200
- ```python
201
- from casual_mcp import MultiServerMCPClient
203
+ # Generate method with session
204
+ response = await chat.generate("What time is it in London?", "my-session-id")
202
205
 
203
- mcp_client = MultiServerMCPClient()
204
- await mcp_client.load_config(config["servers"])
205
- tools = await mcp_client.list_tools()
206
+ # Chat method that takes list of chat messages
207
+ # note: system prompt ignored if sent in messages so no need to set
208
+ chat = McpToolChat(mcp_client, provider)
209
+ messages = [
210
+ SystemMessage(content="You are a cool dude who likes to help the user"),
211
+ UserMessage(content="What time is it in London?")
212
+ ]
213
+ response = await chat.chat(messages)
206
214
  ```
207
215
 
208
216
  #### `ProviderFactory`
209
217
  Instantiates LLM providers based on the selected model config.
210
218
 
211
219
  ```python
212
- from casual_mcp.providers.provider_factory import ProviderFactory
220
+ from casual_mcp import ProviderFactory
213
221
 
214
- provider_factory = ProviderFactory()
215
- provider = provider_factory.get_provider("lm-qwen-3", model_config)
222
+ provider_factory = ProviderFactory(mcp_client)
223
+ provider = await provider_factory.get_provider("lm-qwen-3", model_config)
216
224
  ```
217
225
 
218
226
  #### `load_config`
219
- Loads your `config.json` into a validated config object.
227
+ Loads your `casual_mcp_config.json` into a validated config object.
220
228
 
221
229
  ```python
222
- from casual_mcp.utils import load_config
230
+ from casual_mcp import load_config
223
231
 
224
- config = load_config("config.json")
232
+ config = load_config("casual_mcp_config.json")
233
+ ```
234
+
235
+ #### `load_mcp_client`
236
+ Creats a multi server FastMCP client from the config object
237
+
238
+ ```python
239
+ from casual_mcp import load_mcp_client
240
+
241
+ config = load_mcp_client(config)
225
242
  ```
226
243
 
227
244
  #### Model and Server Configs
228
245
 
229
246
  Exported models:
230
- - PythonMcpServerConfig
231
- - UvxMcpServerConfig
232
- - NodeMcpServerConfig
233
- - HttpMcpServerConfig
247
+ - StdioServerConfig
248
+ - RemoteServerConfig
234
249
  - OpenAIModelConfig
235
250
 
236
251
  Use these types to build valid configs:
237
252
 
238
253
  ```python
239
- from casual_mcp.models import OpenAIModelConfig, PythonMcpServerConfig
254
+ from casual_mcp.models import OpenAIModelConfig, StdioServerConfig
240
255
 
241
- model = OpenAIModelConfig( model="llama3", endpoint="http://...")
242
- server = PythonMcpServerConfig(path="time/server.py")
256
+ model = OpenAIModelConfig(model="llama3", endpoint="http://...")
257
+ server = StdioServerConfig(command="python", args=["time/server.py"])
243
258
  ```
244
259
 
245
260
  #### Chat Messages
@@ -264,7 +279,7 @@ messages = [
264
279
  ### Example
265
280
 
266
281
  ```python
267
- from casual_mcp import McpToolChat, MultiServerMCPClient, load_config, ProviderFactory
282
+ from casual_mcp import McpToolChat, load_config, load_mcp_client, ProviderFactory
268
283
  from casual_mcp.models import SystemMessage, UserMessage
269
284
 
270
285
  model = "gpt-4.1-nano"
@@ -276,20 +291,18 @@ Respond naturally and confidently, as if you already know all the facts."""),
276
291
  ]
277
292
 
278
293
  # Load the Config from the File
279
- config = load_config("config.json")
294
+ config = load_config("casual_mcp_config.json")
280
295
 
281
- # Setup the MultiServer MCP Client
282
- mcp_client = MultiServerMCPClient()
283
- await mcp_client.load_config(config.servers)
296
+ # Setup the MCP Client
297
+ mcp_client = load_mcp_client(config)
284
298
 
285
299
  # Get the Provider for the Model
286
- provider_factory.set_tools(await mcp_client.list_tools())
287
- provider_factory = ProviderFactory()
288
- provider = provider_factory.get_provider(model, config.models[model])
300
+ provider_factory = ProviderFactory(mcp_client)
301
+ provider = await provider_factory.get_provider(model, config.models[model])
289
302
 
290
303
  # Perform the Chat and Tool calling
291
- chat = McpToolChat(mcp_client, provider, system_prompt)
292
- response_messages = await chat.chat(messages=messages)
304
+ chat = McpToolChat(mcp_client, provider)
305
+ response_messages = await chat.chat(messages)
293
306
  ```
294
307
 
295
308
  ## 🚀 API Usage
@@ -300,25 +313,56 @@ response_messages = await chat.chat(messages=messages)
300
313
  casual-mcp serve --host 0.0.0.0 --port 8000
301
314
  ```
302
315
 
303
- You can then POST to `/chat` to trigger tool-calling LLM responses.
316
+ ### Chat
317
+
318
+ #### Endpoint: `POST /chat`
304
319
 
305
- The request takes a json body consisting of:
320
+ #### Request Body:
306
321
  - `model`: the LLM model to use
307
- - `user_prompt`: optional, the latest user message (required if messages isn't provided)
308
- - `messages`: optional, list of chat messages (system, assistant, user, etc) that you can pass to the api, allowing you to keep your own chat session in the client calling the api
309
- - `session_id`: an optional ID that stores all the messages from the session and provides them back to the LLM for context
322
+ - `messages`: list of chat messages (system, assistant, user, etc) that you can pass to the api, allowing you to keep your own chat session in the client calling the api
310
323
 
311
- You can either pass in a `user_prompt` or a list of `messages` depending on your use case.
324
+ #### Example:
325
+ ```
326
+ {
327
+ "model": "gpt-4.1-nano",
328
+ "messages": [
329
+ {
330
+ "role": "user",
331
+ "content": "can you explain what the word consistent means?"
332
+ }
333
+ ]
334
+ }
335
+ ```
336
+
337
+ ### Generate
338
+
339
+ The generate endpoint allows you to send a user prompt as a string.
340
+
341
+ It also support sessions that keep a record of all messages in the session and feeds them back into the LLM for context. Sessions are stored in memory so are cleared when the server is restarted
342
+
343
+ #### Endpoint: `POST /generate`
312
344
 
313
- Example:
345
+ #### Request Body:
346
+ - `model`: the LLM model to use
347
+ - `prompt`: the user prompt
348
+ - `session_id`: an optional ID that stores all the messages from the session and provides them back to the LLM for context
349
+
350
+ #### Example:
314
351
  ```
315
352
  {
316
- "session_id": "my-test-session",
353
+ "session_id": "my-session",
317
354
  "model": "gpt-4o-mini",
318
- "user_prompt": "can you explain what the word consistent means?"
355
+ "prompt": "can you explain what the word consistent means?"
319
356
  }
320
357
  ```
321
358
 
359
+ ### Get Session
360
+
361
+ Get all the messages from a session
362
+
363
+ #### Endpoint: `GET /generate/session/{session_id}`
364
+
365
+
322
366
  ## License
323
367
 
324
368
  This software is released under the [MIT License](LICENSE)
@@ -1,16 +1,15 @@
1
1
  [project]
2
2
  name = "casual-mcp"
3
- version = "0.1.0"
3
+ version = "0.3.0"
4
4
  description = "Multi-server MCP client for LLM tool orchestration"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
7
7
  license = { text = "MIT" }
8
8
  authors = [{ name = "Alex Stansfield" }]
9
9
  dependencies = [
10
- "amadeus>=12.0.0",
11
10
  "dateparser>=1.2.1",
12
11
  "fastapi>=0.115.12",
13
- "fastmcp>=2.3.4",
12
+ "fastmcp>=2.5.1",
14
13
  "jinja2>=3.1.6",
15
14
  "ollama>=0.4.8",
16
15
  "openai>=1.78.0",
@@ -20,6 +19,11 @@ dependencies = [
20
19
  "uvicorn>=0.34.2",
21
20
  ]
22
21
 
22
+ [project.urls]
23
+ "Homepage" = "https://github.com/AlexStansfield/casual-mcp"
24
+ "Repository" = "https://github.com/AlexStansfield/casual-mcp"
25
+ "Issue Tracker" = "https://github.com/AlexStansfield/casual-mcp/issues"
26
+
23
27
  [project.optional-dependencies]
24
28
  dev = [
25
29
  "ruff",
@@ -59,3 +63,12 @@ dev = [
59
63
  "pytest>=8.3.5",
60
64
  "ruff>=0.11.10",
61
65
  ]
66
+
67
+ [tool.bumpversion]
68
+ current_version = "0.3.0"
69
+ commit = true
70
+ tag = true
71
+
72
+ [tool.bumpversion.file."pyproject.toml"]
73
+ search = 'version = "{current_version}"'
74
+ replace = 'version = "{new_version}"'
@@ -1,13 +1,13 @@
1
1
  from . import models
2
2
  from .mcp_tool_chat import McpToolChat
3
- from .multi_server_mcp_client import MultiServerMCPClient
4
3
  from .providers.provider_factory import ProviderFactory
5
- from .utils import load_config
4
+ from .utils import load_config, load_mcp_client, render_system_prompt
6
5
 
7
6
  __all__ = [
8
7
  "McpToolChat",
9
- "MultiServerMCPClient",
10
8
  "ProviderFactory",
11
9
  "load_config",
10
+ "load_mcp_client",
11
+ "render_system_prompt",
12
12
  "models",
13
13
  ]