casual-mcp 0.2.2__tar.gz → 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {casual_mcp-0.2.2/src/casual_mcp.egg-info → casual_mcp-0.3.0}/PKG-INFO +72 -28
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/README.md +71 -27
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/pyproject.toml +2 -2
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/__init__.py +2 -1
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/main.py +48 -44
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/mcp_tool_chat.py +56 -33
- {casual_mcp-0.2.2 → casual_mcp-0.3.0/src/casual_mcp.egg-info}/PKG-INFO +72 -28
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/LICENSE +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/setup.cfg +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/cli.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/logging.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/models/__init__.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/models/config.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/models/generation_error.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/models/mcp_server_config.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/models/messages.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/models/model_config.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/models/tool_call.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/providers/__init__.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/providers/abstract_provider.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/providers/ollama_provider.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/providers/openai_provider.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/providers/provider_factory.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp/utils.py +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp.egg-info/SOURCES.txt +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp.egg-info/dependency_links.txt +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp.egg-info/entry_points.txt +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp.egg-info/requires.txt +0 -0
- {casual_mcp-0.2.2 → casual_mcp-0.3.0}/src/casual_mcp.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: casual-mcp
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.3.0
|
|
4
4
|
Summary: Multi-server MCP client for LLM tool orchestration
|
|
5
5
|
Author: Alex Stansfield
|
|
6
6
|
License: MIT
|
|
@@ -36,7 +36,7 @@ Dynamic: license-file
|
|
|
36
36
|
**Casual MCP** is a Python framework for building, evaluating, and serving LLMs with tool-calling capabilities using [Model Context Protocol (MCP)](https://modelcontextprotocol.io).
|
|
37
37
|
It includes:
|
|
38
38
|
|
|
39
|
-
- ✅ A multi-server MCP client
|
|
39
|
+
- ✅ A multi-server MCP client using [FastMCP](https://github.com/jlowin/fastmcp)
|
|
40
40
|
- ✅ Provider support for OpenAI (and OpenAI compatible APIs)
|
|
41
41
|
- ✅ A recursive tool-calling chat loop
|
|
42
42
|
- ✅ System prompt templating with Jinja2
|
|
@@ -151,12 +151,12 @@ Each model has:
|
|
|
151
151
|
|
|
152
152
|
Servers can either be local (over stdio) or remote.
|
|
153
153
|
|
|
154
|
-
Local Config:
|
|
154
|
+
#### Local Config:
|
|
155
155
|
- `command`: the command to run the server, e.g `python`, `npm`
|
|
156
156
|
- `args`: the arguments to pass to the server as a list, e.g `["time/server.py"]`
|
|
157
157
|
- Optional: `env`: for subprocess environments, `system_prompt` to override server prompt
|
|
158
158
|
|
|
159
|
-
Remote Config:
|
|
159
|
+
#### Remote Config:
|
|
160
160
|
- `url`: the url of the mcp server
|
|
161
161
|
- Optional: `transport`: the type of transport, `http`, `sse`, `streamable-http`. Defaults to `http`
|
|
162
162
|
|
|
@@ -184,12 +184,12 @@ Loads the config and outputs the list of MCP servers you have configured.
|
|
|
184
184
|
```
|
|
185
185
|
$ casual-mcp servers
|
|
186
186
|
┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━┓
|
|
187
|
-
┃ Name ┃ Type ┃
|
|
187
|
+
┃ Name ┃ Type ┃ Command / Url ┃ Env ┃
|
|
188
188
|
┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━┩
|
|
189
|
-
│ math │
|
|
190
|
-
│ time │
|
|
191
|
-
│ weather │
|
|
192
|
-
│ words │
|
|
189
|
+
│ math │ local │ mcp-servers/math/server.py │ │
|
|
190
|
+
│ time │ local │ mcp-servers/time-v2/server.py │ │
|
|
191
|
+
│ weather │ local │ mcp-servers/weather/server.py │ │
|
|
192
|
+
│ words │ remote │ https://localhost:3000/mcp │ │
|
|
193
193
|
└─────────┴────────┴───────────────────────────────┴─────┘
|
|
194
194
|
```
|
|
195
195
|
|
|
@@ -223,9 +223,24 @@ Orchestrates LLM interaction with tools using a recursive loop.
|
|
|
223
223
|
|
|
224
224
|
```python
|
|
225
225
|
from casual_mcp import McpToolChat
|
|
226
|
+
from casual_mcp.models import SystemMessage, UserMessage
|
|
226
227
|
|
|
227
228
|
chat = McpToolChat(mcp_client, provider, system_prompt)
|
|
228
|
-
|
|
229
|
+
|
|
230
|
+
# Generate method to take user prompt
|
|
231
|
+
response = await chat.generate("What time is it in London?")
|
|
232
|
+
|
|
233
|
+
# Generate method with session
|
|
234
|
+
response = await chat.generate("What time is it in London?", "my-session-id")
|
|
235
|
+
|
|
236
|
+
# Chat method that takes list of chat messages
|
|
237
|
+
# note: system prompt ignored if sent in messages so no need to set
|
|
238
|
+
chat = McpToolChat(mcp_client, provider)
|
|
239
|
+
messages = [
|
|
240
|
+
SystemMessage(content="You are a cool dude who likes to help the user"),
|
|
241
|
+
UserMessage(content="What time is it in London?")
|
|
242
|
+
]
|
|
243
|
+
response = await chat.chat(messages)
|
|
229
244
|
```
|
|
230
245
|
|
|
231
246
|
#### `ProviderFactory`
|
|
@@ -234,8 +249,8 @@ Instantiates LLM providers based on the selected model config.
|
|
|
234
249
|
```python
|
|
235
250
|
from casual_mcp import ProviderFactory
|
|
236
251
|
|
|
237
|
-
provider_factory = ProviderFactory()
|
|
238
|
-
provider = provider_factory.get_provider("lm-qwen-3", model_config)
|
|
252
|
+
provider_factory = ProviderFactory(mcp_client)
|
|
253
|
+
provider = await provider_factory.get_provider("lm-qwen-3", model_config)
|
|
239
254
|
```
|
|
240
255
|
|
|
241
256
|
#### `load_config`
|
|
@@ -308,18 +323,16 @@ Respond naturally and confidently, as if you already know all the facts."""),
|
|
|
308
323
|
# Load the Config from the File
|
|
309
324
|
config = load_config("casual_mcp_config.json")
|
|
310
325
|
|
|
311
|
-
# Setup the
|
|
326
|
+
# Setup the MCP Client
|
|
312
327
|
mcp_client = load_mcp_client(config)
|
|
313
|
-
await mcp_client.load_config(config.servers)
|
|
314
328
|
|
|
315
329
|
# Get the Provider for the Model
|
|
316
|
-
provider_factory
|
|
317
|
-
|
|
318
|
-
provider = provider_factory.get_provider(model, config.models[model])
|
|
330
|
+
provider_factory = ProviderFactory(mcp_client)
|
|
331
|
+
provider = await provider_factory.get_provider(model, config.models[model])
|
|
319
332
|
|
|
320
333
|
# Perform the Chat and Tool calling
|
|
321
|
-
chat = McpToolChat(mcp_client, provider
|
|
322
|
-
response_messages = await chat.chat(messages
|
|
334
|
+
chat = McpToolChat(mcp_client, provider)
|
|
335
|
+
response_messages = await chat.chat(messages)
|
|
323
336
|
```
|
|
324
337
|
|
|
325
338
|
## 🚀 API Usage
|
|
@@ -330,25 +343,56 @@ response_messages = await chat.chat(messages=messages)
|
|
|
330
343
|
casual-mcp serve --host 0.0.0.0 --port 8000
|
|
331
344
|
```
|
|
332
345
|
|
|
333
|
-
|
|
346
|
+
### Chat
|
|
347
|
+
|
|
348
|
+
#### Endpoint: `POST /chat`
|
|
334
349
|
|
|
335
|
-
|
|
350
|
+
#### Request Body:
|
|
336
351
|
- `model`: the LLM model to use
|
|
337
|
-
- `
|
|
338
|
-
|
|
339
|
-
|
|
352
|
+
- `messages`: list of chat messages (system, assistant, user, etc) that you can pass to the api, allowing you to keep your own chat session in the client calling the api
|
|
353
|
+
|
|
354
|
+
#### Example:
|
|
355
|
+
```
|
|
356
|
+
{
|
|
357
|
+
"model": "gpt-4.1-nano",
|
|
358
|
+
"messages": [
|
|
359
|
+
{
|
|
360
|
+
"role": "user",
|
|
361
|
+
"content": "can you explain what the word consistent means?"
|
|
362
|
+
}
|
|
363
|
+
]
|
|
364
|
+
}
|
|
365
|
+
```
|
|
366
|
+
|
|
367
|
+
### Generate
|
|
368
|
+
|
|
369
|
+
The generate endpoint allows you to send a user prompt as a string.
|
|
370
|
+
|
|
371
|
+
It also support sessions that keep a record of all messages in the session and feeds them back into the LLM for context. Sessions are stored in memory so are cleared when the server is restarted
|
|
372
|
+
|
|
373
|
+
#### Endpoint: `POST /generate`
|
|
340
374
|
|
|
341
|
-
|
|
375
|
+
#### Request Body:
|
|
376
|
+
- `model`: the LLM model to use
|
|
377
|
+
- `prompt`: the user prompt
|
|
378
|
+
- `session_id`: an optional ID that stores all the messages from the session and provides them back to the LLM for context
|
|
342
379
|
|
|
343
|
-
Example:
|
|
380
|
+
#### Example:
|
|
344
381
|
```
|
|
345
382
|
{
|
|
346
|
-
"session_id": "my-
|
|
383
|
+
"session_id": "my-session",
|
|
347
384
|
"model": "gpt-4o-mini",
|
|
348
|
-
"
|
|
385
|
+
"prompt": "can you explain what the word consistent means?"
|
|
349
386
|
}
|
|
350
387
|
```
|
|
351
388
|
|
|
389
|
+
### Get Session
|
|
390
|
+
|
|
391
|
+
Get all the messages from a session
|
|
392
|
+
|
|
393
|
+
#### Endpoint: `GET /generate/session/{session_id}`
|
|
394
|
+
|
|
395
|
+
|
|
352
396
|
## License
|
|
353
397
|
|
|
354
398
|
This software is released under the [MIT License](LICENSE)
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
**Casual MCP** is a Python framework for building, evaluating, and serving LLMs with tool-calling capabilities using [Model Context Protocol (MCP)](https://modelcontextprotocol.io).
|
|
7
7
|
It includes:
|
|
8
8
|
|
|
9
|
-
- ✅ A multi-server MCP client
|
|
9
|
+
- ✅ A multi-server MCP client using [FastMCP](https://github.com/jlowin/fastmcp)
|
|
10
10
|
- ✅ Provider support for OpenAI (and OpenAI compatible APIs)
|
|
11
11
|
- ✅ A recursive tool-calling chat loop
|
|
12
12
|
- ✅ System prompt templating with Jinja2
|
|
@@ -121,12 +121,12 @@ Each model has:
|
|
|
121
121
|
|
|
122
122
|
Servers can either be local (over stdio) or remote.
|
|
123
123
|
|
|
124
|
-
Local Config:
|
|
124
|
+
#### Local Config:
|
|
125
125
|
- `command`: the command to run the server, e.g `python`, `npm`
|
|
126
126
|
- `args`: the arguments to pass to the server as a list, e.g `["time/server.py"]`
|
|
127
127
|
- Optional: `env`: for subprocess environments, `system_prompt` to override server prompt
|
|
128
128
|
|
|
129
|
-
Remote Config:
|
|
129
|
+
#### Remote Config:
|
|
130
130
|
- `url`: the url of the mcp server
|
|
131
131
|
- Optional: `transport`: the type of transport, `http`, `sse`, `streamable-http`. Defaults to `http`
|
|
132
132
|
|
|
@@ -154,12 +154,12 @@ Loads the config and outputs the list of MCP servers you have configured.
|
|
|
154
154
|
```
|
|
155
155
|
$ casual-mcp servers
|
|
156
156
|
┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━┓
|
|
157
|
-
┃ Name ┃ Type ┃
|
|
157
|
+
┃ Name ┃ Type ┃ Command / Url ┃ Env ┃
|
|
158
158
|
┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━┩
|
|
159
|
-
│ math │
|
|
160
|
-
│ time │
|
|
161
|
-
│ weather │
|
|
162
|
-
│ words │
|
|
159
|
+
│ math │ local │ mcp-servers/math/server.py │ │
|
|
160
|
+
│ time │ local │ mcp-servers/time-v2/server.py │ │
|
|
161
|
+
│ weather │ local │ mcp-servers/weather/server.py │ │
|
|
162
|
+
│ words │ remote │ https://localhost:3000/mcp │ │
|
|
163
163
|
└─────────┴────────┴───────────────────────────────┴─────┘
|
|
164
164
|
```
|
|
165
165
|
|
|
@@ -193,9 +193,24 @@ Orchestrates LLM interaction with tools using a recursive loop.
|
|
|
193
193
|
|
|
194
194
|
```python
|
|
195
195
|
from casual_mcp import McpToolChat
|
|
196
|
+
from casual_mcp.models import SystemMessage, UserMessage
|
|
196
197
|
|
|
197
198
|
chat = McpToolChat(mcp_client, provider, system_prompt)
|
|
198
|
-
|
|
199
|
+
|
|
200
|
+
# Generate method to take user prompt
|
|
201
|
+
response = await chat.generate("What time is it in London?")
|
|
202
|
+
|
|
203
|
+
# Generate method with session
|
|
204
|
+
response = await chat.generate("What time is it in London?", "my-session-id")
|
|
205
|
+
|
|
206
|
+
# Chat method that takes list of chat messages
|
|
207
|
+
# note: system prompt ignored if sent in messages so no need to set
|
|
208
|
+
chat = McpToolChat(mcp_client, provider)
|
|
209
|
+
messages = [
|
|
210
|
+
SystemMessage(content="You are a cool dude who likes to help the user"),
|
|
211
|
+
UserMessage(content="What time is it in London?")
|
|
212
|
+
]
|
|
213
|
+
response = await chat.chat(messages)
|
|
199
214
|
```
|
|
200
215
|
|
|
201
216
|
#### `ProviderFactory`
|
|
@@ -204,8 +219,8 @@ Instantiates LLM providers based on the selected model config.
|
|
|
204
219
|
```python
|
|
205
220
|
from casual_mcp import ProviderFactory
|
|
206
221
|
|
|
207
|
-
provider_factory = ProviderFactory()
|
|
208
|
-
provider = provider_factory.get_provider("lm-qwen-3", model_config)
|
|
222
|
+
provider_factory = ProviderFactory(mcp_client)
|
|
223
|
+
provider = await provider_factory.get_provider("lm-qwen-3", model_config)
|
|
209
224
|
```
|
|
210
225
|
|
|
211
226
|
#### `load_config`
|
|
@@ -278,18 +293,16 @@ Respond naturally and confidently, as if you already know all the facts."""),
|
|
|
278
293
|
# Load the Config from the File
|
|
279
294
|
config = load_config("casual_mcp_config.json")
|
|
280
295
|
|
|
281
|
-
# Setup the
|
|
296
|
+
# Setup the MCP Client
|
|
282
297
|
mcp_client = load_mcp_client(config)
|
|
283
|
-
await mcp_client.load_config(config.servers)
|
|
284
298
|
|
|
285
299
|
# Get the Provider for the Model
|
|
286
|
-
provider_factory
|
|
287
|
-
|
|
288
|
-
provider = provider_factory.get_provider(model, config.models[model])
|
|
300
|
+
provider_factory = ProviderFactory(mcp_client)
|
|
301
|
+
provider = await provider_factory.get_provider(model, config.models[model])
|
|
289
302
|
|
|
290
303
|
# Perform the Chat and Tool calling
|
|
291
|
-
chat = McpToolChat(mcp_client, provider
|
|
292
|
-
response_messages = await chat.chat(messages
|
|
304
|
+
chat = McpToolChat(mcp_client, provider)
|
|
305
|
+
response_messages = await chat.chat(messages)
|
|
293
306
|
```
|
|
294
307
|
|
|
295
308
|
## 🚀 API Usage
|
|
@@ -300,25 +313,56 @@ response_messages = await chat.chat(messages=messages)
|
|
|
300
313
|
casual-mcp serve --host 0.0.0.0 --port 8000
|
|
301
314
|
```
|
|
302
315
|
|
|
303
|
-
|
|
316
|
+
### Chat
|
|
317
|
+
|
|
318
|
+
#### Endpoint: `POST /chat`
|
|
304
319
|
|
|
305
|
-
|
|
320
|
+
#### Request Body:
|
|
306
321
|
- `model`: the LLM model to use
|
|
307
|
-
- `
|
|
308
|
-
|
|
309
|
-
|
|
322
|
+
- `messages`: list of chat messages (system, assistant, user, etc) that you can pass to the api, allowing you to keep your own chat session in the client calling the api
|
|
323
|
+
|
|
324
|
+
#### Example:
|
|
325
|
+
```
|
|
326
|
+
{
|
|
327
|
+
"model": "gpt-4.1-nano",
|
|
328
|
+
"messages": [
|
|
329
|
+
{
|
|
330
|
+
"role": "user",
|
|
331
|
+
"content": "can you explain what the word consistent means?"
|
|
332
|
+
}
|
|
333
|
+
]
|
|
334
|
+
}
|
|
335
|
+
```
|
|
336
|
+
|
|
337
|
+
### Generate
|
|
338
|
+
|
|
339
|
+
The generate endpoint allows you to send a user prompt as a string.
|
|
340
|
+
|
|
341
|
+
It also support sessions that keep a record of all messages in the session and feeds them back into the LLM for context. Sessions are stored in memory so are cleared when the server is restarted
|
|
342
|
+
|
|
343
|
+
#### Endpoint: `POST /generate`
|
|
310
344
|
|
|
311
|
-
|
|
345
|
+
#### Request Body:
|
|
346
|
+
- `model`: the LLM model to use
|
|
347
|
+
- `prompt`: the user prompt
|
|
348
|
+
- `session_id`: an optional ID that stores all the messages from the session and provides them back to the LLM for context
|
|
312
349
|
|
|
313
|
-
Example:
|
|
350
|
+
#### Example:
|
|
314
351
|
```
|
|
315
352
|
{
|
|
316
|
-
"session_id": "my-
|
|
353
|
+
"session_id": "my-session",
|
|
317
354
|
"model": "gpt-4o-mini",
|
|
318
|
-
"
|
|
355
|
+
"prompt": "can you explain what the word consistent means?"
|
|
319
356
|
}
|
|
320
357
|
```
|
|
321
358
|
|
|
359
|
+
### Get Session
|
|
360
|
+
|
|
361
|
+
Get all the messages from a session
|
|
362
|
+
|
|
363
|
+
#### Endpoint: `GET /generate/session/{session_id}`
|
|
364
|
+
|
|
365
|
+
|
|
322
366
|
## License
|
|
323
367
|
|
|
324
368
|
This software is released under the [MIT License](LICENSE)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "casual-mcp"
|
|
3
|
-
version = "0.
|
|
3
|
+
version = "0.3.0"
|
|
4
4
|
description = "Multi-server MCP client for LLM tool orchestration"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">=3.10"
|
|
@@ -65,7 +65,7 @@ dev = [
|
|
|
65
65
|
]
|
|
66
66
|
|
|
67
67
|
[tool.bumpversion]
|
|
68
|
-
current_version = "0.
|
|
68
|
+
current_version = "0.3.0"
|
|
69
69
|
commit = true
|
|
70
70
|
tag = true
|
|
71
71
|
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
from . import models
|
|
2
2
|
from .mcp_tool_chat import McpToolChat
|
|
3
3
|
from .providers.provider_factory import ProviderFactory
|
|
4
|
-
from .utils import load_config, load_mcp_client
|
|
4
|
+
from .utils import load_config, load_mcp_client, render_system_prompt
|
|
5
5
|
|
|
6
6
|
__all__ = [
|
|
7
7
|
"McpToolChat",
|
|
8
8
|
"ProviderFactory",
|
|
9
9
|
"load_config",
|
|
10
10
|
"load_mcp_client",
|
|
11
|
+
"render_system_prompt",
|
|
11
12
|
"models",
|
|
12
13
|
]
|
|
@@ -32,6 +32,7 @@ You must not speculate or guess about dates — if a date is given to you by a t
|
|
|
32
32
|
Always present information as current and factual.
|
|
33
33
|
"""
|
|
34
34
|
|
|
35
|
+
|
|
35
36
|
class GenerateRequest(BaseModel):
|
|
36
37
|
session_id: str | None = Field(
|
|
37
38
|
default=None, title="Session to use"
|
|
@@ -42,11 +43,20 @@ class GenerateRequest(BaseModel):
|
|
|
42
43
|
system_prompt: str | None = Field(
|
|
43
44
|
default=None, title="System Prompt to use"
|
|
44
45
|
)
|
|
45
|
-
|
|
46
|
+
prompt: str = Field(
|
|
46
47
|
title="User Prompt"
|
|
47
48
|
)
|
|
48
|
-
|
|
49
|
-
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class ChatRequest(BaseModel):
|
|
52
|
+
model: str = Field(
|
|
53
|
+
title="Model to user"
|
|
54
|
+
)
|
|
55
|
+
system_prompt: str | None = Field(
|
|
56
|
+
default=None, title="System Prompt to use"
|
|
57
|
+
)
|
|
58
|
+
messages: list[ChatMessage] = Field(
|
|
59
|
+
title="Previous messages to supply to the LLM"
|
|
50
60
|
)
|
|
51
61
|
|
|
52
62
|
sys.path.append(str(Path(__file__).parent.resolve()))
|
|
@@ -55,44 +65,11 @@ sys.path.append(str(Path(__file__).parent.resolve()))
|
|
|
55
65
|
configure_logging(os.getenv("LOG_LEVEL", 'INFO'))
|
|
56
66
|
logger = get_logger("main")
|
|
57
67
|
|
|
58
|
-
async def perform_chat(
|
|
59
|
-
model,
|
|
60
|
-
user,
|
|
61
|
-
system: str | None = None,
|
|
62
|
-
messages: list[ChatMessage] = None,
|
|
63
|
-
session_id: str | None = None
|
|
64
|
-
) -> list[ChatMessage]:
|
|
65
|
-
# Get Provider from Model Config
|
|
66
|
-
model_config = config.models[model]
|
|
67
|
-
provider = await provider_factory.get_provider(model, model_config)
|
|
68
|
-
|
|
69
|
-
if not system:
|
|
70
|
-
if (model_config.template):
|
|
71
|
-
async with mcp_client:
|
|
72
|
-
system = render_system_prompt(
|
|
73
|
-
f"{model_config.template}.j2",
|
|
74
|
-
await mcp_client.list_tools()
|
|
75
|
-
)
|
|
76
|
-
else:
|
|
77
|
-
system = default_system_prompt
|
|
78
|
-
|
|
79
|
-
chat = McpToolChat(mcp_client, provider, system)
|
|
80
|
-
return await chat.chat(
|
|
81
|
-
prompt=user,
|
|
82
|
-
messages=messages,
|
|
83
|
-
session_id=session_id
|
|
84
|
-
)
|
|
85
|
-
|
|
86
68
|
|
|
87
69
|
@app.post("/chat")
|
|
88
|
-
async def chat(req:
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
system=req.system_prompt,
|
|
92
|
-
user=req.user_prompt,
|
|
93
|
-
messages=req.messages,
|
|
94
|
-
session_id=req.session_id
|
|
95
|
-
)
|
|
70
|
+
async def chat(req: ChatRequest):
|
|
71
|
+
chat = await get_chat(req.model, req.system_prompt)
|
|
72
|
+
messages = await chat.chat(req.messages)
|
|
96
73
|
|
|
97
74
|
return {
|
|
98
75
|
"messages": messages,
|
|
@@ -100,16 +77,43 @@ async def chat(req: GenerateRequest):
|
|
|
100
77
|
}
|
|
101
78
|
|
|
102
79
|
|
|
103
|
-
# This endpoint will either go away or be used for something else, don't use it
|
|
104
80
|
@app.post("/generate")
|
|
105
|
-
async def
|
|
106
|
-
|
|
81
|
+
async def generate(req: GenerateRequest):
|
|
82
|
+
chat = await get_chat(req.model, req.system_prompt)
|
|
83
|
+
messages = await chat.generate(
|
|
84
|
+
req.prompt,
|
|
85
|
+
req.session_id
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
return {
|
|
89
|
+
"messages": messages,
|
|
90
|
+
"response": messages[len(messages) - 1].content
|
|
91
|
+
}
|
|
107
92
|
|
|
108
93
|
|
|
109
|
-
@app.get("/
|
|
110
|
-
async def
|
|
94
|
+
@app.get("/generate/session/{session_id}")
|
|
95
|
+
async def get_generate_session(session_id):
|
|
111
96
|
session = McpToolChat.get_session(session_id)
|
|
112
97
|
if not session:
|
|
113
98
|
raise HTTPException(status_code=404, detail="Session not found")
|
|
114
99
|
|
|
115
100
|
return session
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
async def get_chat(model: str, system: str | None = None) -> McpToolChat:
|
|
104
|
+
# Get Provider from Model Config
|
|
105
|
+
model_config = config.models[model]
|
|
106
|
+
provider = await provider_factory.get_provider(model, model_config)
|
|
107
|
+
|
|
108
|
+
# Get the system prompt
|
|
109
|
+
if not system:
|
|
110
|
+
if (model_config.template):
|
|
111
|
+
async with mcp_client:
|
|
112
|
+
system = render_system_prompt(
|
|
113
|
+
f"{model_config.template}.j2",
|
|
114
|
+
await mcp_client.list_tools()
|
|
115
|
+
)
|
|
116
|
+
else:
|
|
117
|
+
system = default_system_prompt
|
|
118
|
+
|
|
119
|
+
return McpToolChat(mcp_client, provider, system)
|
|
@@ -18,8 +18,26 @@ logger = get_logger("mcp_tool_chat")
|
|
|
18
18
|
sessions: dict[str, list[ChatMessage]] = {}
|
|
19
19
|
|
|
20
20
|
|
|
21
|
+
def get_session_messages(session_id: str | None):
|
|
22
|
+
global sessions
|
|
23
|
+
|
|
24
|
+
if not sessions.get(session_id):
|
|
25
|
+
logger.info(f"Starting new session {session_id}")
|
|
26
|
+
sessions[session_id] = []
|
|
27
|
+
else:
|
|
28
|
+
logger.info(
|
|
29
|
+
f"Retrieving session {session_id} of length {len(sessions[session_id])}"
|
|
30
|
+
)
|
|
31
|
+
return sessions[session_id].copy()
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def add_messages_to_session(session_id: str, messages: list[ChatMessage]):
|
|
35
|
+
global sessions
|
|
36
|
+
sessions[session_id].extend(messages.copy())
|
|
37
|
+
|
|
38
|
+
|
|
21
39
|
class McpToolChat:
|
|
22
|
-
def __init__(self, mcp_client: Client, provider: LLMProvider, system: str):
|
|
40
|
+
def __init__(self, mcp_client: Client, provider: LLMProvider, system: str = None):
|
|
23
41
|
self.provider = provider
|
|
24
42
|
self.mcp_client = mcp_client
|
|
25
43
|
self.system = system
|
|
@@ -29,47 +47,56 @@ class McpToolChat:
|
|
|
29
47
|
global sessions
|
|
30
48
|
return sessions.get(session_id)
|
|
31
49
|
|
|
32
|
-
async def
|
|
50
|
+
async def generate(
|
|
33
51
|
self,
|
|
34
|
-
prompt: str
|
|
35
|
-
messages: list[ChatMessage] = None,
|
|
52
|
+
prompt: str,
|
|
36
53
|
session_id: str | None = None
|
|
37
54
|
) -> list[ChatMessage]:
|
|
38
|
-
|
|
55
|
+
# Fetch the session if we have a session ID
|
|
56
|
+
if session_id:
|
|
57
|
+
messages = get_session_messages(session_id)
|
|
58
|
+
else:
|
|
59
|
+
messages: list[ChatMessage] = []
|
|
39
60
|
|
|
40
|
-
#
|
|
61
|
+
# Add the prompt as a user message
|
|
62
|
+
user_message = UserMessage(content=prompt)
|
|
63
|
+
messages.append(user_message)
|
|
41
64
|
|
|
42
|
-
#
|
|
65
|
+
# Add the user message to the session
|
|
43
66
|
if session_id:
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
67
|
+
add_messages_to_session(session_id, [user_message])
|
|
68
|
+
|
|
69
|
+
# Perform Chat
|
|
70
|
+
response = await self.chat(messages=messages)
|
|
71
|
+
|
|
72
|
+
# Add responses to session
|
|
73
|
+
if session_id:
|
|
74
|
+
add_messages_to_session(session_id, response)
|
|
75
|
+
|
|
76
|
+
return response
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
async def chat(
|
|
80
|
+
self,
|
|
81
|
+
messages: list[ChatMessage]
|
|
82
|
+
) -> list[ChatMessage]:
|
|
83
|
+
# Add a system message if required
|
|
84
|
+
has_system_message = any(message.role == 'system' for message in messages)
|
|
85
|
+
if self.system and not has_system_message:
|
|
86
|
+
# Insert the system message at the start of the messages
|
|
87
|
+
messages.insert(0, SystemMessage(content=self.system))
|
|
52
88
|
|
|
53
89
|
logger.info("Start Chat")
|
|
54
90
|
async with self.mcp_client:
|
|
55
91
|
tools = await self.mcp_client.list_tools()
|
|
56
92
|
|
|
57
|
-
|
|
58
|
-
message_history = []
|
|
59
|
-
messages = [SystemMessage(content=self.system)]
|
|
60
|
-
else:
|
|
61
|
-
message_history = messages.copy()
|
|
62
|
-
|
|
63
|
-
if prompt:
|
|
64
|
-
messages.append(UserMessage(content=prompt))
|
|
65
|
-
|
|
66
|
-
response: str | None = None
|
|
93
|
+
response_messages: list[ChatMessage] = []
|
|
67
94
|
while True:
|
|
68
95
|
logger.info("Calling the LLM")
|
|
69
96
|
ai_message = await self.provider.generate(messages, tools)
|
|
70
|
-
response = ai_message.content
|
|
71
97
|
|
|
72
98
|
# Add the assistant's message
|
|
99
|
+
response_messages.append(ai_message)
|
|
73
100
|
messages.append(ai_message)
|
|
74
101
|
|
|
75
102
|
if not ai_message.tool_calls:
|
|
@@ -86,18 +113,14 @@ class McpToolChat:
|
|
|
86
113
|
return messages
|
|
87
114
|
if result:
|
|
88
115
|
messages.append(result)
|
|
116
|
+
response_messages.append(result)
|
|
89
117
|
result_count = result_count + 1
|
|
90
118
|
|
|
91
119
|
logger.info(f"Added {result_count} tool results")
|
|
92
120
|
|
|
93
|
-
logger.debug(f"
|
|
94
|
-
{response} """)
|
|
95
|
-
|
|
96
|
-
new_messages = [item for item in messages if item not in message_history]
|
|
97
|
-
if session_id:
|
|
98
|
-
sessions[session_id].extend(new_messages)
|
|
121
|
+
logger.debug(f"Final Response: {response_messages[-1].content}")
|
|
99
122
|
|
|
100
|
-
return
|
|
123
|
+
return response_messages
|
|
101
124
|
|
|
102
125
|
|
|
103
126
|
async def execute(self, tool_call: AssistantToolCall):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: casual-mcp
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.3.0
|
|
4
4
|
Summary: Multi-server MCP client for LLM tool orchestration
|
|
5
5
|
Author: Alex Stansfield
|
|
6
6
|
License: MIT
|
|
@@ -36,7 +36,7 @@ Dynamic: license-file
|
|
|
36
36
|
**Casual MCP** is a Python framework for building, evaluating, and serving LLMs with tool-calling capabilities using [Model Context Protocol (MCP)](https://modelcontextprotocol.io).
|
|
37
37
|
It includes:
|
|
38
38
|
|
|
39
|
-
- ✅ A multi-server MCP client
|
|
39
|
+
- ✅ A multi-server MCP client using [FastMCP](https://github.com/jlowin/fastmcp)
|
|
40
40
|
- ✅ Provider support for OpenAI (and OpenAI compatible APIs)
|
|
41
41
|
- ✅ A recursive tool-calling chat loop
|
|
42
42
|
- ✅ System prompt templating with Jinja2
|
|
@@ -151,12 +151,12 @@ Each model has:
|
|
|
151
151
|
|
|
152
152
|
Servers can either be local (over stdio) or remote.
|
|
153
153
|
|
|
154
|
-
Local Config:
|
|
154
|
+
#### Local Config:
|
|
155
155
|
- `command`: the command to run the server, e.g `python`, `npm`
|
|
156
156
|
- `args`: the arguments to pass to the server as a list, e.g `["time/server.py"]`
|
|
157
157
|
- Optional: `env`: for subprocess environments, `system_prompt` to override server prompt
|
|
158
158
|
|
|
159
|
-
Remote Config:
|
|
159
|
+
#### Remote Config:
|
|
160
160
|
- `url`: the url of the mcp server
|
|
161
161
|
- Optional: `transport`: the type of transport, `http`, `sse`, `streamable-http`. Defaults to `http`
|
|
162
162
|
|
|
@@ -184,12 +184,12 @@ Loads the config and outputs the list of MCP servers you have configured.
|
|
|
184
184
|
```
|
|
185
185
|
$ casual-mcp servers
|
|
186
186
|
┏━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━┓
|
|
187
|
-
┃ Name ┃ Type ┃
|
|
187
|
+
┃ Name ┃ Type ┃ Command / Url ┃ Env ┃
|
|
188
188
|
┡━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━┩
|
|
189
|
-
│ math │
|
|
190
|
-
│ time │
|
|
191
|
-
│ weather │
|
|
192
|
-
│ words │
|
|
189
|
+
│ math │ local │ mcp-servers/math/server.py │ │
|
|
190
|
+
│ time │ local │ mcp-servers/time-v2/server.py │ │
|
|
191
|
+
│ weather │ local │ mcp-servers/weather/server.py │ │
|
|
192
|
+
│ words │ remote │ https://localhost:3000/mcp │ │
|
|
193
193
|
└─────────┴────────┴───────────────────────────────┴─────┘
|
|
194
194
|
```
|
|
195
195
|
|
|
@@ -223,9 +223,24 @@ Orchestrates LLM interaction with tools using a recursive loop.
|
|
|
223
223
|
|
|
224
224
|
```python
|
|
225
225
|
from casual_mcp import McpToolChat
|
|
226
|
+
from casual_mcp.models import SystemMessage, UserMessage
|
|
226
227
|
|
|
227
228
|
chat = McpToolChat(mcp_client, provider, system_prompt)
|
|
228
|
-
|
|
229
|
+
|
|
230
|
+
# Generate method to take user prompt
|
|
231
|
+
response = await chat.generate("What time is it in London?")
|
|
232
|
+
|
|
233
|
+
# Generate method with session
|
|
234
|
+
response = await chat.generate("What time is it in London?", "my-session-id")
|
|
235
|
+
|
|
236
|
+
# Chat method that takes list of chat messages
|
|
237
|
+
# note: system prompt ignored if sent in messages so no need to set
|
|
238
|
+
chat = McpToolChat(mcp_client, provider)
|
|
239
|
+
messages = [
|
|
240
|
+
SystemMessage(content="You are a cool dude who likes to help the user"),
|
|
241
|
+
UserMessage(content="What time is it in London?")
|
|
242
|
+
]
|
|
243
|
+
response = await chat.chat(messages)
|
|
229
244
|
```
|
|
230
245
|
|
|
231
246
|
#### `ProviderFactory`
|
|
@@ -234,8 +249,8 @@ Instantiates LLM providers based on the selected model config.
|
|
|
234
249
|
```python
|
|
235
250
|
from casual_mcp import ProviderFactory
|
|
236
251
|
|
|
237
|
-
provider_factory = ProviderFactory()
|
|
238
|
-
provider = provider_factory.get_provider("lm-qwen-3", model_config)
|
|
252
|
+
provider_factory = ProviderFactory(mcp_client)
|
|
253
|
+
provider = await provider_factory.get_provider("lm-qwen-3", model_config)
|
|
239
254
|
```
|
|
240
255
|
|
|
241
256
|
#### `load_config`
|
|
@@ -308,18 +323,16 @@ Respond naturally and confidently, as if you already know all the facts."""),
|
|
|
308
323
|
# Load the Config from the File
|
|
309
324
|
config = load_config("casual_mcp_config.json")
|
|
310
325
|
|
|
311
|
-
# Setup the
|
|
326
|
+
# Setup the MCP Client
|
|
312
327
|
mcp_client = load_mcp_client(config)
|
|
313
|
-
await mcp_client.load_config(config.servers)
|
|
314
328
|
|
|
315
329
|
# Get the Provider for the Model
|
|
316
|
-
provider_factory
|
|
317
|
-
|
|
318
|
-
provider = provider_factory.get_provider(model, config.models[model])
|
|
330
|
+
provider_factory = ProviderFactory(mcp_client)
|
|
331
|
+
provider = await provider_factory.get_provider(model, config.models[model])
|
|
319
332
|
|
|
320
333
|
# Perform the Chat and Tool calling
|
|
321
|
-
chat = McpToolChat(mcp_client, provider
|
|
322
|
-
response_messages = await chat.chat(messages
|
|
334
|
+
chat = McpToolChat(mcp_client, provider)
|
|
335
|
+
response_messages = await chat.chat(messages)
|
|
323
336
|
```
|
|
324
337
|
|
|
325
338
|
## 🚀 API Usage
|
|
@@ -330,25 +343,56 @@ response_messages = await chat.chat(messages=messages)
|
|
|
330
343
|
casual-mcp serve --host 0.0.0.0 --port 8000
|
|
331
344
|
```
|
|
332
345
|
|
|
333
|
-
|
|
346
|
+
### Chat
|
|
347
|
+
|
|
348
|
+
#### Endpoint: `POST /chat`
|
|
334
349
|
|
|
335
|
-
|
|
350
|
+
#### Request Body:
|
|
336
351
|
- `model`: the LLM model to use
|
|
337
|
-
- `
|
|
338
|
-
|
|
339
|
-
|
|
352
|
+
- `messages`: list of chat messages (system, assistant, user, etc) that you can pass to the api, allowing you to keep your own chat session in the client calling the api
|
|
353
|
+
|
|
354
|
+
#### Example:
|
|
355
|
+
```
|
|
356
|
+
{
|
|
357
|
+
"model": "gpt-4.1-nano",
|
|
358
|
+
"messages": [
|
|
359
|
+
{
|
|
360
|
+
"role": "user",
|
|
361
|
+
"content": "can you explain what the word consistent means?"
|
|
362
|
+
}
|
|
363
|
+
]
|
|
364
|
+
}
|
|
365
|
+
```
|
|
366
|
+
|
|
367
|
+
### Generate
|
|
368
|
+
|
|
369
|
+
The generate endpoint allows you to send a user prompt as a string.
|
|
370
|
+
|
|
371
|
+
It also support sessions that keep a record of all messages in the session and feeds them back into the LLM for context. Sessions are stored in memory so are cleared when the server is restarted
|
|
372
|
+
|
|
373
|
+
#### Endpoint: `POST /generate`
|
|
340
374
|
|
|
341
|
-
|
|
375
|
+
#### Request Body:
|
|
376
|
+
- `model`: the LLM model to use
|
|
377
|
+
- `prompt`: the user prompt
|
|
378
|
+
- `session_id`: an optional ID that stores all the messages from the session and provides them back to the LLM for context
|
|
342
379
|
|
|
343
|
-
Example:
|
|
380
|
+
#### Example:
|
|
344
381
|
```
|
|
345
382
|
{
|
|
346
|
-
"session_id": "my-
|
|
383
|
+
"session_id": "my-session",
|
|
347
384
|
"model": "gpt-4o-mini",
|
|
348
|
-
"
|
|
385
|
+
"prompt": "can you explain what the word consistent means?"
|
|
349
386
|
}
|
|
350
387
|
```
|
|
351
388
|
|
|
389
|
+
### Get Session
|
|
390
|
+
|
|
391
|
+
Get all the messages from a session
|
|
392
|
+
|
|
393
|
+
#### Endpoint: `GET /generate/session/{session_id}`
|
|
394
|
+
|
|
395
|
+
|
|
352
396
|
## License
|
|
353
397
|
|
|
354
398
|
This software is released under the [MIT License](LICENSE)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|