sokrates-mcp 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
File without changes
sokrates_mcp/main.py ADDED
@@ -0,0 +1,332 @@
1
+ # main.py - MCP Server for sokrates library
2
+
3
+ # This script sets up an MCP server using the FastMCP framework to provide tools for prompt refinement and execution workflows.
4
+ # It includes several tools that can be used to refine prompts, execute them with external LLMs, break down tasks,
5
+ # generate ideas, perform code reviews, and list available models/providers.
6
+ #
7
+ # Main Purpose
8
+ # The primary purpose of this script is to create a robust MCP server that facilitates interaction with large language models
9
+ # through various prompt engineering workflows. It provides APIs for refining prompts, executing them externally,
10
+ # breaking down complex tasks, generating ideas, performing code reviews, and listing available models/providers.
11
+ #
12
+ # Parameters
13
+ # - `refine_prompt`: Refines a given prompt by enriching it with additional context.
14
+ # - `prompt` (str): The input prompt to be refined.
15
+ # - `refinement_type` (str, optional): Type of refinement ('code' or 'default'). Default is 'default'.
16
+ # - `provider` (str, optional): Name of the provider to use for refinement. Default is 'default'.
17
+ # - `model` (str, optional): Model name for refinement. Default is 'default'.
18
+ #
19
+ # - `refine_and_execute_external_prompt`: Refines a prompt and executes it with an external LLM.
20
+ # - `prompt` (str): The input prompt to be refined and executed.
21
+ # - `provider` (str, optional): Name of the provider to use for LLM interactions. Default is 'default'.
22
+ # - `refinement_model` (str, optional): Model for refinement. Default is 'default'.
23
+ # - `execution_model` (str, optional): Model for execution. Default is 'default'.
24
+ # - `refinement_type` (str, optional): Type of refinement ('code' or 'default'). Default is 'default'.
25
+ #
26
+ # - `handover_prompt`: Hands over a prompt to an external LLM for processing.
27
+ # - `prompt` (str): The prompt to be executed externally.
28
+ # - `provider` (str, optional): Name of the provider to use for LLM interactions. Default is 'default'.
29
+ # - `model` (str, optional): Model name for execution. Default is 'default'.
30
+ # - `temperature` (float, optional): Temperature for the external execution. Default is 0.7.
31
+ #
32
+ # - `breakdown_task`: Breaks down a task into sub-tasks with complexity ratings.
33
+ # - `task` (str): The full task description to break down.
34
+ # - `provider` (str, optional): Name of the provider to use for LLM interactions. Default is 'default'.
35
+ # - `model` (str, optional): Model name for processing. Default is 'default'.
36
+ #
37
+ # - `generate_random_ideas`: Generates random ideas on a random topic.
38
+ # - `idea_count` (int, optional): Number of ideas to generate. Default is 1.
39
+ # - `provider` (str, optional): Name of the provider to use for LLM interactions. Default is 'default'.
40
+ # - `model` (str, optional): Model name for generation. Default is 'default'.
41
+ # - `temperature` (float, optional): Temperature for idea generation. Default is 0.7.
42
+ #
43
+ # - `generate_ideas_on_topic`: Generates ideas on a specific topic.
44
+ # - `topic` (str): The topic to generate ideas for.
45
+ # - `provider` (str, optional): Name of the provider to use for LLM interactions. Default is 'default'.
46
+ # - `model` (str, optional): Model name for generation. Default is 'default'.
47
+ # - `idea_count` (int, optional): Number of ideas to generate. Default is 1.
48
+ # - `temperature` (float, optional): Temperature for idea generation. Default is 0.7.
49
+ #
50
+ # - `generate_code_review`: Generates a code review in markdown format.
51
+ # - `source_file_paths` (list): List of source file paths to be reviewed.
52
+ # - `target_directory` (str): Directory to store the resulting review files.
53
+ # - `provider` (str, optional): Name of the provider to use for LLM interactions. Default is 'default'.
54
+ # - `model` (str, optional): Model name for code review generation. Default is 'default'.
55
+ # - `review_type` (str, optional): Type of review ('style', 'security', 'performance', 'quality'). Default is 'quality'.
56
+ #
57
+ # - `list_available_models_for_provider`: Lists all available large language models for a specific provider.
58
+ # - `provider_name` (str, optional): Name of the provider to list models for. Default is empty (uses default).
59
+ #
60
+ # - `list_available_providers`: Lists all configured and available API providers.
61
+ #
62
+ # Usage Examples
63
+ # ```python
64
+ # Refine a prompt
65
+ # await refine_prompt("Write a Python function to sort a list", refinement_type="code")
66
+ #
67
+ # # Refine and execute a prompt with an external LLM
68
+ # await refine_and_execute_external_prompt(
69
+ # "Generate a summary of the following text: ...",
70
+ # refinement_model="model1",
71
+ # execution_model="model2"
72
+ # )
73
+ #
74
+ # # Hand over a prompt to an external LLM
75
+ # await handover_prompt("Translate this text to French: ...")
76
+ #
77
+ # # Break down a task into sub-tasks
78
+ # await breakdown_task("Implement user authentication system")
79
+ #
80
+ # # Generate random ideas
81
+ # await generate_random_ideas(idea_count=3)
82
+ #
83
+ # # Generate ideas on a topic
84
+ # await generate_ideas_on_topic("AI in healthcare", idea_count=5)
85
+ #
86
+ # # Generate code review
87
+ # await generate_code_review(
88
+ # source_file_paths=["/path/to/file1.py", "/path/to/file2.py"],
89
+ # target_directory="/path/to/reviews"
90
+ # )
91
+ #
92
+ # # List available models for a provider
93
+ # await list_available_models_for_provider("my-provider")
94
+ #
95
+ # # List all available providers
96
+ # await list_available_providers()
97
+ # ```
98
+ #
99
+
100
+ from typing import Annotated, Optional
101
+ from pydantic import Field
102
+ from .mcp_config import MCPConfig
103
+ from .workflow import Workflow
104
+ from fastmcp import FastMCP, Context
105
+ import logging
106
+ import os
107
+ import argparse
108
+
109
+ MCP_NAME = "sokrates-mcp"
110
+ VERSION = "0.2.0"
111
+ DEFAULT_PROVIDER_IDENTIFIER = "default"
112
+ DEFAULT_MODEL_IDENTIFIER = "default"
113
+ DEFAULT_REFINEMENT_TYPE = "default"
114
+ DEFAULT_CODE_REVIEW_TYPE = "quality"
115
+
116
+ config = MCPConfig()
117
+ workflow = Workflow(config)
118
+
119
+ # Configure logging for better visibility of fastmcp operations
120
+ log_file_path = os.path.expanduser("~/.sokrates-mcp/server.log")
121
+ os.makedirs(os.path.dirname(log_file_path), exist_ok=True)
122
+ logging.basicConfig(level=logging.INFO, filename=log_file_path, filemode='a', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
123
+ logger = logging.getLogger(__name__)
124
+
125
+ # Initialize the MCP Server
126
+ mcp = FastMCP(
127
+ name=MCP_NAME,
128
+ instructions="A MCP server for using sokrates python library's tools: prompt refinement and improvement workflows.",
129
+ version=VERSION
130
+ )
131
+
132
+ # -------------------------------------------------------------------------
133
+
134
+ @mcp.tool(
135
+ name="refine_prompt",
136
+ description="Refines a given prompt by enriching the prompt with additional context and improving clarity for further processing by large language models. A prompt received like this can be sent further directly after receiving the response. The refinement_type can be used to improve the results: e.g. for a coding task this should be set to the code type.",
137
+ tags={"prompt","refinement"}
138
+ )
139
+ async def refine_prompt(prompt: Annotated[str, Field(description="Input prompt that should be refined")],
140
+ ctx: Context,
141
+ refinement_type: Annotated[str, Field(description="The type of the refinement. This could be 'code' (for refining coding tasks) or 'default' . The default type is: default", default=DEFAULT_REFINEMENT_TYPE)],
142
+ provider: Annotated[str, Field(description="The name of the provider to use for the prompt refinement process. The default model name is 'default', which will pick the server's default provider configured.", default=DEFAULT_PROVIDER_IDENTIFIER)],
143
+ model: Annotated[str, Field(description="The name of the model that should be used for the prompt refinement process. The default model name is 'default', which will pick the server's default model.", default=DEFAULT_MODEL_IDENTIFIER)],
144
+ ) -> str:
145
+ """
146
+ Refines a given prompt by enriching the input prompt with additional context and improving clarity
147
+ for further processing by large language models.
148
+
149
+ Args:
150
+ prompt (str): The input prompt to be refined.
151
+ ctx (Context): The MCP context object.
152
+ refinement_type (str, optional): Type of refinement ('code' or 'default'). Default is 'default'.
153
+ provider (str, optional): Name of the provider to use for refinement. Default is 'default'.
154
+ model (str, optional): Model name for refinement. Default is 'default'.
155
+
156
+ Returns:
157
+ str: The refined prompt.
158
+
159
+ This function delegates the actual refinement work to the workflow.refine_prompt method.
160
+ """
161
+ return await workflow.refine_prompt(prompt=prompt, ctx=ctx, provider=provider, model=model, refinement_type=refinement_type)
162
+
163
+ # -------------------------------------------------------------------------
164
+
165
+ @mcp.tool(
166
+ name="refine_and_execute_external_prompt",
167
+ description="Refines a given prompt by enriching the input prompt with additional context and then executes the prompt with an external llm. It delivers back the exection result of the refined prompt on the external llm. The refinement_type can be used to improve the results: e.g. for a coding task this should be set to the code type.",
168
+ tags={"prompt","refinement","external_processing"}
169
+ )
170
+ async def refine_and_execute_external_prompt(prompt: Annotated[str, Field(description="Input prompt that should be refined and then processed.")],
171
+ ctx: Context,
172
+ provider: Annotated[str, Field(description="The name of the provider to use for LLM interactions. The default model name is 'default', which will pick the server's default provider configured.", default=DEFAULT_PROVIDER_IDENTIFIER)],
173
+ refinement_model: Annotated[str, Field(description="[Optional] The name of the model that should be used for the prompt refinement process. The default refinement model name is 'default', which will pick the server's default model.", default=DEFAULT_MODEL_IDENTIFIER)],
174
+ execution_model: Annotated[str, Field(description="[Optional] The name of the external model that should be used for the execution of the refined prompt. The default execution model name is 'default', which will pick the server's default model.", default=DEFAULT_MODEL_IDENTIFIER)],
175
+ refinement_type: Annotated[str, Field(description="The type of the refinement. This could be 'code' (for refining coding tasks) or 'default' for any general refinement tasks. The default type is: default", default=DEFAULT_REFINEMENT_TYPE)],
176
+ ) -> str:
177
+ """
178
+ Refines a given prompt and executes it with an external LLM.
179
+
180
+ Args:
181
+ prompt (str): The input prompt to be refined and executed.
182
+ ctx (Context): The MCP context object.
183
+ provider (str, optional): Name of the provider to use for LLM interactions. Default is 'default'.
184
+ refinement_model (str, optional): Model for refinement. Default is 'default'.
185
+ execution_model (str, optional): Model for execution. Default is 'default'.
186
+ refinement_type (str, optional): Type of refinement ('code' or 'default'). Default is 'default'.
187
+
188
+ Returns:
189
+ str: The execution result of the refined prompt from the external LLM.
190
+
191
+ This function first refines the prompt and then executes it with an external LLM.
192
+ """
193
+ return await workflow.refine_and_execute_external_prompt(prompt=prompt, ctx=ctx, provider=provider, refinement_model=refinement_model, execution_model=execution_model, refinement_type=refinement_type)
194
+
195
+ # -------------------------------------------------------------------------
196
+
197
+ @mcp.tool(
198
+ name="handover_prompt",
199
+ description="Hands over a prompt to an external llm for processing and delivers back the processed result.",
200
+ tags={"prompt","refinement"}
201
+ )
202
+ async def handover_prompt(prompt: Annotated[str, Field(description="Prompt that should be executed externally.")],
203
+ ctx: Context,
204
+ provider: Annotated[str, Field(description="The name of the provider to use for LLM interactions. The default model name is 'default', which will pick the server's default provider configured.", default=DEFAULT_PROVIDER_IDENTIFIER)],
205
+ temperature: Annotated[float, Field(description="[Optional] The temperature of the llm to use for generating the ideas. The default value is 0.7 .", default=0.7)],
206
+ model: Annotated[str, Field(description="[Optional] The name of the model that should be used for the external prompt processing. The default model name is 'default', which will pick the server's default model.", default=DEFAULT_MODEL_IDENTIFIER)],
207
+ ) -> str:
208
+ """
209
+ Hands over a prompt to an external LLM for processing.
210
+
211
+ Args:
212
+ prompt (str): The prompt to be executed externally.
213
+ ctx (Context): The MCP context object.
214
+ provider (str, optional): Name of the provider to use for LLM interactions. Default is 'default'.
215
+ model (str, optional): Model name for execution. Default is 'default'.
216
+ temperature (float, optional): Temperature to use for the external execution. Default is 0.7.
217
+
218
+ Returns:
219
+ str: The processed result from the external LLM.
220
+
221
+ This function delegates the prompt execution to an external LLM and returns the result.
222
+ """
223
+ return await workflow.handover_prompt(prompt=prompt, ctx=ctx, provider=provider, model=model)
224
+
225
+ # -------------------------------------------------------------------------
226
+
227
+ @mcp.tool(
228
+ name="breakdown_task",
229
+ description="Breaks down a task into sub-tasks back a json list of sub-tasks with complexity ratings.",
230
+ tags={"prompt","task","breakdown"}
231
+ )
232
+ async def breakdown_task(task: Annotated[str, Field(description="The full task description to break down further.")],
233
+ ctx: Context,
234
+ provider: Annotated[str, Field(description="The name of the provider to use for LLM interactions. The default model name is 'default', which will pick the server's default provider configured.", default=DEFAULT_PROVIDER_IDENTIFIER)],
235
+ model: Annotated[str, Field(description="[Optional] The name of the model that should be used for the external prompt processing. The default model name is 'default', which will pick the server's default model.", default=DEFAULT_MODEL_IDENTIFIER)],
236
+ ) -> str:
237
+ """
238
+ Breaks down a task into sub-tasks and returns a JSON list of sub-tasks with complexity ratings.
239
+
240
+ Args:
241
+ task (str): The full task description to break down.
242
+ ctx (Context): The MCP context object.
243
+ provider (str, optional): Name of the provider to use for LLM interactions. Default is 'default'.
244
+ model (str, optional): Model name for processing. Default is 'default'.
245
+
246
+ Returns:
247
+ str: A JSON string containing the list of sub-tasks with complexity ratings.
248
+
249
+ This function uses an LLM to analyze the task and break it down into manageable sub-tasks.
250
+ """
251
+ return await workflow.breakdown_task(task=task, ctx=ctx, provider=provider, model=model)
252
+
253
+ @mcp.tool(
254
+ name="generate_random_ideas",
255
+ description="Invents and generates a random topic an generates the provided count of ideas on the topic.",
256
+ tags={"idea", "generator","invention","random"}
257
+ )
258
+ async def generate_random_ideas(ctx: Context,
259
+ idea_count: Annotated[int, Field(description="[Optional] The number of ideas to generate. The default value is 1.", default=1)],
260
+ provider: Annotated[str, Field(description="The name of the provider to use for LLM interactions. The default model name is 'default', which will pick the server's default provider configured.", default=DEFAULT_PROVIDER_IDENTIFIER)],
261
+ model: Annotated[str, Field(description="[Optional] The name of the model that should be used for the generation. The default model name is 'default', which will pick the server's default model.", default=DEFAULT_MODEL_IDENTIFIER)],
262
+ temperature: Annotated[float, Field(description="[Optional] The temperature of the llm to use for generating the ideas. The default value is 0.7 .", default=0.7)]
263
+ ) -> str:
264
+ return await workflow.generate_random_ideas(ctx=ctx, provider=provider, model=model, idea_count=idea_count, temperature=temperature)
265
+
266
+ @mcp.tool(
267
+ name="generate_ideas_on_topic",
268
+ description="Generates the provided count of ideas on the provided topic.",
269
+ tags={"idea","generator", "idea generation", "invention"}
270
+ )
271
+ async def generate_ideas_on_topic(
272
+ ctx: Context,
273
+ topic: Annotated[str, Field(description="The topic to generate ideas for.")],
274
+ provider: Annotated[str, Field(description="The name of the provider to use for LLM interactions. The default model name is 'default', which will pick the server's default provider configured.", default=DEFAULT_PROVIDER_IDENTIFIER)],
275
+ model: Annotated[str, Field(description="[Optional] The name of the model that should be used for the generation. The default model name is 'default', which will pick the server's default model.", default=DEFAULT_MODEL_IDENTIFIER)],
276
+ idea_count: Annotated[int, Field(description="[Optional] The number of ideas to generate. The default value is 1.", default=1)],
277
+ temperature: Annotated[float, Field(description="The temperature of the llm to use for generating the ideas. The default value is 0.7 .", default=0.7)]
278
+ ) -> str:
279
+ return await workflow.generate_ideas_on_topic(ctx=ctx, provider=provider, model=model, topic=topic, idea_count=idea_count, temperature=temperature)
280
+
281
+ @mcp.tool(
282
+ name="generate_code_review",
283
+ description="Generates a code review in markdown format in a file on the local file system and returns the path to the code review. It supports multiple types of code reviews.",
284
+ tags={"coding","review","markdown","file"}
285
+ )
286
+ async def generate_code_review(
287
+ ctx: Context,
288
+ source_directory: Annotated[str, Field(description="The absolute directory path containing source files to create reviews for. This should contain source files on the local filesystem.")],
289
+ source_file_paths: Annotated[list, Field(description="A list of absolute source file paths that should be reviewed. The paths should be absolute paths in the local filesystem.")],
290
+ target_directory: Annotated[str, Field(description="The directory to store the resulting review markdown files. This should point to the desired target path for the markdown files on the local filesystem.")],
291
+ provider: Annotated[str, Field(description="The name of the provider to use for LLM interactions. The default model name is 'default', which will pick the server's default provider configured.", default=DEFAULT_PROVIDER_IDENTIFIER)],
292
+ model: Annotated[str, Field(description="[Optional] The name of the model that should be used for the generation. The default model name is 'default', which will pick the server's default model.", default=DEFAULT_MODEL_IDENTIFIER)],
293
+ review_type: Annotated[str, Field(description="[Optional] The type of review to execute. Choices are: 'style', 'security', 'performance', 'quality' . The default is 'quality'", default=DEFAULT_CODE_REVIEW_TYPE)]
294
+ ) -> str:
295
+ return await workflow.generate_code_review(ctx=ctx, provider=provider, model=model, review_type=review_type, source_directory=source_directory, source_file_paths=source_file_paths, target_directory=target_directory)
296
+
297
+ @mcp.tool(
298
+ name="list_available_models_for_provider",
299
+ description="Lists all available large language models and the target api endpoint configured as provider for the sokrates-mcp server.",
300
+ tags={"external","llm","models","list"}
301
+ )
302
+ async def list_available_models_for_provider(ctx: Context, provider_name: Annotated[str, Field(description="The provider name to list the available models for", default="")]) -> str:
303
+ return await workflow.list_available_models_for_provider(ctx=ctx, provider_name=provider_name)
304
+
305
+ @mcp.tool(
306
+ name="list_available_providers",
307
+ description="Lists all configured and available API providers for large language models for the sokrates-mcp server.",
308
+ tags={"external","llm","providers","list"}
309
+ )
310
+ async def list_available_providers(ctx: Context):
311
+ return await workflow.list_available_providers(ctx=ctx)
312
+
313
+ def main():
314
+ # Set up argument parsing
315
+ parser = argparse.ArgumentParser(description='Sokrates MCP Server')
316
+ parser.add_argument('--transport', choices=['stdio', 'sse', 'http'], default='stdio',
317
+ help='Transport method (default: stdio)')
318
+ parser.add_argument('--host', type=str, default="127.0.0.1",
319
+ help='host for HTTP and sse transport (default: 127.0.0.1)')
320
+ parser.add_argument('--port', type=int, default=8000,
321
+ help='Port number for HTTP transport (default: 8000)')
322
+
323
+ args = parser.parse_args()
324
+
325
+ # Run the MCP server with specified transport and port
326
+ if args.transport == 'stdio':
327
+ mcp.run(transport=args.transport)
328
+ else:
329
+ mcp.run(transport=args.transport, port=args.port, host=args.host)
330
+
331
+ if __name__ == "__main__":
332
+ main()
@@ -0,0 +1,236 @@
1
+ # MCP Configuration Module
2
+ #
3
+ # This module provides configuration management for the MCP server.
4
+ # It loads configuration from a YAML file and sets default values if needed.
5
+ #
6
+ # Parameters:
7
+ # - config_file_path: Path to the YAML configuration file (default: ~/.sokrates-mcp/config.yml)
8
+ # - api_endpoint: API endpoint URL (default: http://localhost:1234/v1)
9
+ # - api_key: API key for authentication (default: mykey)
10
+ # - model: Model name to use (default: qwen/qwen3-4b-2507)
11
+ # - verbose: Enable verbose logging (default: False)
12
+ #
13
+ # Usage example:
14
+ # config = MCPConfig(api_endpoint="https://api.example.com", model="my-model")
15
+ import os
16
+ import yaml
17
+ import logging
18
+ from urllib.parse import urlparse
19
+ from pathlib import Path
20
+ from sokrates import Config
21
+
22
+ DEFAULT_API_ENDPOINT = "http://localhost:1234/v1"
23
+ DEFAULT_API_KEY = "mykey"
24
+ DEFAULT_MODEL = "qwen/qwen3-4b-2507"
25
+ DEFAULT_PROVIDER_NAME = "default"
26
+ DEFAULT_PROVIDER_TYPE = "openai"
27
+ DEFAULT_PROVIDER_CONFIGURATION = {
28
+ "name": DEFAULT_PROVIDER_NAME,
29
+ "type": DEFAULT_PROVIDER_TYPE,
30
+ "api_endpoint": DEFAULT_API_ENDPOINT,
31
+ "api_key": DEFAULT_API_KEY,
32
+ "default_model": DEFAULT_MODEL
33
+ }
34
+
35
+ class MCPConfig:
36
+ """Configuration management class for MCP server.
37
+
38
+ This class handles loading configuration from a YAML file and provides
39
+ default values for various parameters.
40
+
41
+ Attributes:
42
+ CONFIG_FILE_PATH (str): Default path to the configuration file
43
+ DEFAULT_PROMPTS_DIRECTORY (str): Default directory for prompts
44
+ DEFAULT_REFINEMENT_PROMPT_FILENAME (str): Default refinement prompt filename
45
+ DEFAULT_REFINEMENT_CODING_PROMPT_FILENAME (str): Default refinement coding prompt filename
46
+ PROVIDER_TYPES (list): List of supported provider types
47
+ """
48
+ CONFIG_FILE_PATH = os.path.expanduser("~/.sokrates-mcp/config.yml")
49
+ DEFAULT_PROMPTS_DIRECTORY = Config().prompts_directory
50
+ DEFAULT_REFINEMENT_PROMPT_FILENAME = "refine-prompt.md"
51
+ DEFAULT_REFINEMENT_CODING_PROMPT_FILENAME = "refine-coding-v3.md"
52
+ PROVIDER_TYPES = [
53
+ "openai"
54
+ ]
55
+
56
+ def __init__(self, config_file_path=CONFIG_FILE_PATH, api_endpoint = DEFAULT_API_ENDPOINT, api_key = DEFAULT_API_KEY, model= DEFAULT_MODEL, verbose=False):
57
+ """Initialize MCP configuration.
58
+
59
+ Args:
60
+ config_file_path (str): Path to the YAML configuration file.
61
+ Defaults to CONFIG_FILE_PATH.
62
+ api_endpoint (str): API endpoint URL. Defaults to DEFAULT_API_ENDPOINT.
63
+ api_key (str): API key for authentication. Defaults to DEFAULT_API_KEY.
64
+ model (str): Model name to use. Defaults to DEFAULT_MODEL.
65
+ verbose (bool): Enable verbose logging. Defaults to False.
66
+
67
+ Returns:
68
+ None
69
+
70
+ Side Effects:
71
+ Initializes instance attributes with values from config file or defaults
72
+ Sets up logging based on verbose parameter
73
+ """
74
+ self.logger = logging.getLogger(__name__)
75
+ self.config_file_path = config_file_path
76
+ config_data = self._load_config_from_file(self.config_file_path)
77
+
78
+ prompts_directory = config_data.get("prompts_directory", self.DEFAULT_PROMPTS_DIRECTORY)
79
+ if not self._ensure_directory_exists(prompts_directory):
80
+ raise ValueError(f"Invalid prompts directory: {prompts_directory}")
81
+ self.prompts_directory = prompts_directory
82
+
83
+ refinement_prompt_filename = config_data.get("refinement_prompt_filename", self.DEFAULT_REFINEMENT_PROMPT_FILENAME)
84
+ if not os.path.exists(os.path.join(prompts_directory, refinement_prompt_filename)):
85
+ raise FileNotFoundError(f"Refinement prompt file not found: {refinement_prompt_filename}")
86
+ self.refinement_prompt_filename = refinement_prompt_filename
87
+
88
+ refinement_coding_prompt_filename = config_data.get("refinement_coding_prompt_filename", self.DEFAULT_REFINEMENT_CODING_PROMPT_FILENAME)
89
+ if not os.path.exists(os.path.join(prompts_directory, refinement_coding_prompt_filename)):
90
+ raise FileNotFoundError(f"Refinement coding prompt file not found: {refinement_coding_prompt_filename}")
91
+ self.refinement_coding_prompt_filename = refinement_coding_prompt_filename
92
+
93
+
94
+ self._configure_providers(config_data=config_data)
95
+ self.logger.info(f"Configuration loaded from {self.config_file_path}:")
96
+ self.logger.info(f" Prompts Directory: {self.prompts_directory}")
97
+ self.logger.info(f" Refinement Prompt Filename: {self.refinement_prompt_filename}")
98
+ self.logger.info(f" Refinement Coding Prompt Filename: {self.refinement_coding_prompt_filename}")
99
+ self.logger.info(f" Default Provider: {self.default_provider}")
100
+ for prov in self.providers:
101
+ self.logger.info(f"Configured provider name: {prov["name"]} , api_endpoint: {prov["api_endpoint"]} , default_model: {prov["default_model"]}")
102
+
103
+ def available_providers(self):
104
+ return list(map(lambda prov: {'name': prov['name'], 'api_endpoint': prov['api_endpoint'], 'type': prov['type']}, self.providers))
105
+
106
+ def get_provider_by_name(self, provider_name):
107
+ providers = list(filter(lambda x: x['name'] == provider_name, self.providers))
108
+ return providers[0]
109
+
110
+ def get_default_provider(self):
111
+ return self.get_provider_by_name(self.default_provider)
112
+
113
+ def _configure_providers(self, config_data):
114
+ # configure defaults if not config_data could be loaded
115
+ self.providers = config_data.get("providers", {})
116
+ if len(self.providers) < 1:
117
+ self.providers = [
118
+ DEFAULT_PROVIDER_CONFIGURATION
119
+ ]
120
+ self.default_provider = DEFAULT_PROVIDER_NAME
121
+ return
122
+
123
+ provider_names = []
124
+ for provider in self.providers:
125
+ if provider.get("name") in provider_names:
126
+ raise ValueError("Duplicate provider names in the config providers section")
127
+ self._validate_provider(provider)
128
+ provider_names.append(provider['name'])
129
+
130
+ if not config_data['default_provider']:
131
+ raise ValueError(f"No default_provider was configured at the root level of the config file in {self.config_file_path}")
132
+ self.default_provider = config_data['default_provider']
133
+
134
+ def _validate_provider(self, provider):
135
+ self._validate_provider_name(provider.get("name", ""))
136
+ self._validate_provider_type(provider.get("type", ""))
137
+ self._validate_url(provider.get("api_endpoint", ""))
138
+ self._validate_api_key(provider.get("api_key", ""))
139
+ self._validate_model_name(provider.get("default_model", ""))
140
+
141
+ def _validate_provider_name(self, provider_name):
142
+ if len(provider_name) < 1:
143
+ raise ValueError(f"The provider name: {provider_name} is not a valid provider name")
144
+
145
+ def _validate_provider_type(self, provider_type):
146
+ if not provider_type in self.PROVIDER_TYPES:
147
+ raise ValueError(f"The provider type: {provider_type} is not supported by sokrates-mcp")
148
+
149
+ def _validate_url(self, url):
150
+ """Validate URL format.
151
+
152
+ Args:
153
+ url (str): URL to validate
154
+
155
+ Returns:
156
+ bool: True if valid URL, False otherwise
157
+ """
158
+ try:
159
+ result = urlparse(url)
160
+ return all([result.scheme in ['http', 'https'], result.netloc])
161
+ except:
162
+ raise ValueError(f"The api_endpoint: {url} is not a valid llm API endpoint")
163
+
164
+ def _validate_api_key(self, api_key):
165
+ """Validate API key format.
166
+
167
+ Args:
168
+ api_key (str): API key to validate
169
+
170
+ Returns:
171
+ bool: True if valid API key, False otherwise
172
+ """
173
+ if len(api_key) < 1:
174
+ raise ValueError("The api key is empty")
175
+
176
+ def _validate_model_name(self, model):
177
+ """Validate model name format.
178
+
179
+ Args:
180
+ model (str): Model name to validate
181
+
182
+ Returns:
183
+ bool: True if valid model name, False otherwise
184
+ """
185
+ if len(model) < 1:
186
+ raise ValueError("The model is empty")
187
+
188
+ def _ensure_directory_exists(self, directory_path):
189
+ """Ensure directory exists and is valid.
190
+
191
+ Args:
192
+ directory_path (str): Directory path to check/validate
193
+
194
+ Returns:
195
+ bool: True if directory exists or was created successfully, False otherwise
196
+ """
197
+ try:
198
+ path = Path(directory_path)
199
+ if not path.exists():
200
+ path.mkdir(parents=True, exist_ok=True)
201
+ return path.is_dir()
202
+ except Exception as e:
203
+ self.logger.error(f"Error ensuring directory exists: {e}")
204
+ return False
205
+
206
+ def _load_config_from_file(self, config_file_path):
207
+ """Load configuration data from a YAML file.
208
+
209
+ Args:
210
+ config_file_path (str): Path to the YAML configuration file
211
+
212
+ Returns:
213
+ dict: Parsed configuration data or empty dict if file doesn't exist
214
+ or cannot be parsed
215
+
216
+ Side Effects:
217
+ Logs error messages if file reading or parsing fails
218
+ """
219
+ try:
220
+ # Ensure config directory exists
221
+ Path(config_file_path).parent.mkdir(parents=True, exist_ok=True)
222
+
223
+ if os.path.exists(config_file_path):
224
+ with open(config_file_path, 'r') as f:
225
+ return yaml.safe_load(f) or {}
226
+ else:
227
+ self.logger.warning(f"Config file not found at {config_file_path}. Using defaults.")
228
+ # Create empty config file
229
+ with open(config_file_path, 'w') as f:
230
+ yaml.dump({}, f)
231
+ return {}
232
+ except yaml.YAMLError as e:
233
+ self.logger.error(f"Error parsing YAML config file {config_file_path}: {e}")
234
+ except Exception as e:
235
+ self.logger.error(f"Error reading config file {config_file_path}: {e}")
236
+ return {}
@@ -0,0 +1,293 @@
1
+ from fastmcp import Context
2
+ from .mcp_config import MCPConfig
3
+ from sokrates import FileHelper, RefinementWorkflow, LLMApi, PromptRefiner, IdeaGenerationWorkflow
4
+ from sokrates.coding.code_review_workflow import run_code_review
5
+ from pathlib import Path
6
+ from typing import List
7
+ class Workflow:
8
+
9
+ WORKFLOW_COMPLETION_MESSAGE = "Workflow completed."
10
+
11
+ def __init__(self, config: MCPConfig):
12
+ """Initialize the workflow with configuration.
13
+
14
+ Args:
15
+ config (MCPConfig): The MCP configuration object
16
+ """
17
+ self.config = config
18
+ default_provider = self.config.get_default_provider()
19
+ self.default_model = default_provider['default_model']
20
+ self.default_api_endpoint = default_provider['api_endpoint']
21
+ self.default_api_key = default_provider['api_key']
22
+
23
+ self.prompt_refiner = PromptRefiner()
24
+
25
+ def _get_model(self, provider, model=''):
26
+ if not model or model == 'default':
27
+ return provider['default_model']
28
+ return model
29
+
30
+ def _get_provider(self, provider_name: str = ''):
31
+ if not provider_name or provider_name == 'default':
32
+ provider = self.config.get_default_provider()
33
+ else:
34
+ provider = self.config.get_provider_by_name(provider_name)
35
+
36
+ if not provider:
37
+ raise ValueError(f"Provider '{provider_name}' not found in configuration")
38
+ return provider
39
+
40
+ def _initialize_refinement_workflow(self, provider_name: str = '', model: str = ''):
41
+ provider = self._get_provider(provider_name)
42
+ model = self._get_model(provider=provider, model=model)
43
+ refinement_workflow = RefinementWorkflow(api_endpoint=provider['api_endpoint'], api_key=provider['api_key'], model=model)
44
+ return refinement_workflow
45
+
46
+ def load_refinement_prompt(self, refinement_type : str = 'default'):
47
+ """Load a refinement prompt based on the refinement type.
48
+
49
+ Args:
50
+ refinement_type (str): Type of refinement ('code' or 'default'). Default is 'default'.
51
+
52
+ Returns:
53
+ str: The content of the refinement prompt file.
54
+ """
55
+ path=self.config.prompts_directory
56
+
57
+ if refinement_type == 'code' or refinement_type == 'coding':
58
+ refinement_prompt_file = str(Path(f"{path}/{self.config.refinement_coding_prompt_filename}").resolve())
59
+ else:
60
+ refinement_prompt_file = str(Path(f"{path}/{self.config.refinement_prompt_filename}").resolve())
61
+
62
+ return FileHelper.read_file(refinement_prompt_file, verbose=False)
63
+
64
+ async def refine_prompt(self, prompt: str, ctx: Context, provider: str, model: str, refinement_type: str = 'default') -> str:
65
+ """Refine a given prompt by enriching it with additional context.
66
+
67
+ Args:
68
+ prompt (str): The input prompt to be refined.
69
+ ctx (Context): The MCP context object.
70
+ provider (str): Name of the provider to use for refinement.
71
+ model (str): Model name for refinement.
72
+ refinement_type (str, optional): Type of refinement ('code' or 'default'). Default is 'default'.
73
+
74
+ Returns:
75
+ str: The refined prompt.
76
+ """
77
+ refinement_prompt = self.load_refinement_prompt(refinement_type)
78
+ workflow = self._initialize_refinement_workflow(provider_name=provider, model=model)
79
+
80
+ await ctx.info(f"Prompt refinement and execution workflow started with refinement model: {workflow.model} . Waiting for the response from the LLM...")
81
+ refined = workflow.refine_prompt(input_prompt=prompt, refinement_prompt=refinement_prompt)
82
+ await ctx.info(self.WORKFLOW_COMPLETION_MESSAGE)
83
+ return refined
84
+
85
+ async def refine_and_execute_external_prompt(self, prompt: str, ctx: Context, provider: str, refinement_model: str, execution_model: str, refinement_type: str = 'default') -> str:
86
+ """Refine a given prompt and execute it with an external LLM.
87
+
88
+ Args:
89
+ prompt (str): The input prompt to be refined and executed.
90
+ ctx (Context): The MCP context object.
91
+ provider (str): Name of the provider to use for LLM interactions.
92
+ refinement_model (str): Model for refinement.
93
+ execution_model (str): Model for execution.
94
+ refinement_type (str, optional): Type of refinement ('code' or 'default'). Default is 'default'.
95
+
96
+ Returns:
97
+ str: The execution result of the refined prompt from the external LLM.
98
+ """
99
+ refinement_prompt = self.load_refinement_prompt(refinement_type)
100
+
101
+ prov = self._get_provider(provider)
102
+ refinement_model = self._get_model(provider=prov, model=refinement_model)
103
+ execution_model = self._get_model(provider=prov, model=execution_model)
104
+
105
+ workflow = self._initialize_refinement_workflow(provider_name=provider, model=execution_model)
106
+ await ctx.info(f"Prompt refinement and execution workflow started with refinement model: {refinement_model} and execution model {execution_model} . Waiting for the responses from the LLMs...")
107
+ result = workflow.refine_and_send_prompt(input_prompt=prompt, refinement_prompt=refinement_prompt, refinement_model=refinement_model, execution_model=execution_model)
108
+ await ctx.info(self.WORKFLOW_COMPLETION_MESSAGE)
109
+ return result
110
+
111
+ async def handover_prompt(self, prompt: str, ctx: Context, provider: str, model: str, temperature=0.7) -> str:
112
+ """Hands over a prompt to an external LLM for processing.
113
+
114
+ Args:
115
+ prompt (str): The prompt to be executed externally.
116
+ ctx (Context): The MCP context object.
117
+ provider (str): Name of the provider to use for LLM interactions.
118
+ model (str): Model name for execution.
119
+ temperature (float, optional): Temperature to use for the external execution. Default is 0.7.
120
+
121
+ Returns:
122
+ str: The processed result from the external LLM.
123
+ """
124
+ refiner = PromptRefiner()
125
+
126
+ prov = self._get_provider(provider)
127
+ model = self._get_model(provider=prov, model=model)
128
+ llm_api = LLMApi(api_endpoint=prov['api_endpoint'], api_key=prov['api_key'])
129
+
130
+ result = llm_api.send(prompt,model=model, temperature=temperature)
131
+ result = refiner.clean_response(result)
132
+
133
+ await ctx.info(f"External Prompt execution workflow started with model: {model} . Waiting for the responses from the LLM...")
134
+ await ctx.info(self.WORKFLOW_COMPLETION_MESSAGE)
135
+ return result
136
+
137
+ async def breakdown_task(self, task: str, ctx: Context, provider: str, model: str) -> str:
138
+ """Breaks down a task into sub-tasks with complexity ratings.
139
+
140
+ Args:
141
+ task (str): The full task description to break down.
142
+ ctx (Context): The MCP context object.
143
+ provider (str): Name of the provider to use for LLM interactions.
144
+ model (str): Model name for processing.
145
+
146
+ Returns:
147
+ str: A JSON string containing the list of sub-tasks with complexity ratings.
148
+ """
149
+ workflow = self._initialize_refinement_workflow(provider_name=provider, model=model)
150
+ await ctx.info(f"Task break-down started with model: {workflow.model} . Waiting for the response from the LLM...")
151
+ result = workflow.breakdown_task(task=task)
152
+ await ctx.info(self.WORKFLOW_COMPLETION_MESSAGE)
153
+ return result
154
+
155
+ async def generate_random_ideas(self, ctx: Context, provider: str, idea_count: int = 1, temperature: float = 0.7, model: str = None) -> str:
156
+ """Generate random ideas on a random topic.
157
+
158
+ Args:
159
+ ctx (Context): The MCP context object.
160
+ provider (str): Name of the provider to use for LLM interactions.
161
+ idea_count (int, optional): Number of ideas to generate. Default is 1.
162
+ temperature (float, optional): Temperature for idea generation. Default is 0.7.
163
+ model (str, optional): Model name for generation. Default is 'default'.
164
+
165
+ Returns:
166
+ str: Generated ideas separated by ---.
167
+ """
168
+ prov = self._get_provider(provider)
169
+ model = self._get_model(provider=prov, model=model)
170
+ await ctx.info(f"Task `generate random ideas` started at provider: {prov['name']} with model: {model} , idea_count: {idea_count} and temperature: {temperature}. Waiting for the response from the LLM...")
171
+
172
+ idea_generation_workflow = IdeaGenerationWorkflow(api_endpoint=prov['api_endpoint'],
173
+ api_key=prov['api_key'],
174
+ idea_count=idea_count,
175
+ temperature=temperature,
176
+ generator_llm_model=model,
177
+ refinement_llm_model=model,
178
+ execution_llm_model=model,
179
+ topic_generation_llm_model=model
180
+ )
181
+ results = idea_generation_workflow.run()
182
+ result_text = f"\n---\n".join(results)
183
+ await ctx.info(self.WORKFLOW_COMPLETION_MESSAGE)
184
+ return result_text
185
+
186
+ async def generate_ideas_on_topic(self, ctx: Context, topic: str, provider: str, model: str, idea_count: int = 1, temperature: float = 0.7) -> str:
187
+ """Generate ideas on a specific topic.
188
+
189
+ Args:
190
+ ctx (Context): The MCP context object.
191
+ topic (str): The topic to generate ideas for.
192
+ provider (str): Name of the provider to use for LLM interactions.
193
+ model (str): Model name for generation.
194
+ idea_count (int, optional): Number of ideas to generate. Default is 1.
195
+ temperature (float, optional): Temperature for idea generation. Default is 0.7.
196
+
197
+ Returns:
198
+ str: Generated ideas separated by ---.
199
+ """
200
+ prov = self._get_provider(provider)
201
+ model = self._get_model(provider=prov, model=model)
202
+
203
+ await ctx.info(f"Task `generate ideas on topic` started with topic: '{topic}' , model: {model} , idea_count: {idea_count} and temperature: {temperature}. Waiting for the response from the LLM...")
204
+ idea_generation_workflow = IdeaGenerationWorkflow(api_endpoint=prov['api_endpoint'],
205
+ api_key=prov['api_key'],
206
+ topic=topic,
207
+ idea_count=idea_count,
208
+ temperature=temperature,
209
+ generator_llm_model=model,
210
+ refinement_llm_model=model,
211
+ execution_llm_model=model,
212
+ topic_generation_llm_model=model
213
+ )
214
+ results = idea_generation_workflow.run()
215
+ result_text = f"\n---\n".join(results)
216
+ await ctx.info(self.WORKFLOW_COMPLETION_MESSAGE)
217
+ return result_text
218
+
219
+ async def generate_code_review(self, ctx: Context, source_directory: str, source_file_paths: List[str], target_directory: str, provider: str, model:str, review_type:str):
220
+ """Generate a code review in markdown format.
221
+
222
+ Args:
223
+ ctx (Context): The MCP context object.
224
+ source_file_paths (list): List of source file paths to be reviewed.
225
+ target_directory (str): Directory to store the resulting review files.
226
+ provider (str): Name of the provider to use for LLM interactions.
227
+ model (str): Model name for code review generation.
228
+ review_type (str): Type of review ('style', 'security', 'performance', 'quality'). Default is 'quality'.
229
+
230
+ Returns:
231
+ str: Success message with path to generated files.
232
+ """
233
+ prov = self._get_provider(provider)
234
+ model = self._get_model(provider=prov, model=model)
235
+
236
+ await ctx.info(f"Generating code review of type: {review_type} - using model: {model} ...")
237
+ run_code_review(file_paths=source_file_paths,
238
+ directory_path=source_directory,
239
+ output_dir=target_directory,
240
+ review_type=review_type,
241
+ api_endpoint=prov['api_endpoint'],
242
+ api_key=prov['api_key'],
243
+ model=model)
244
+ # TODO: also include some basic info of the review results (e.g. the complete review file list)
245
+ # so that the caller gains more information about the result and file locations
246
+ await ctx.info(self.WORKFLOW_COMPLETION_MESSAGE)
247
+ return f"Successfully generated review files in {target_directory} ."
248
+
249
+
250
+ async def list_available_providers(self, ctx: Context) -> str:
251
+ """List all configured and available API providers.
252
+
253
+ Args:
254
+ ctx (Context): The MCP context object.
255
+
256
+ Returns:
257
+ str: Formatted list of configured providers.
258
+ """
259
+ providers = self.config.available_providers()
260
+ result = "# Configured providers"
261
+ for prov in providers:
262
+ prov_string = f"-{prov['name']} : type: {prov['type']} - api_endpoint: {prov['api_endpoint']}"
263
+ result = f"{result}\n{prov_string}"
264
+ await ctx.info(self.WORKFLOW_COMPLETION_MESSAGE)
265
+ return result
266
+
267
+ async def list_available_models_for_provider(self, ctx: Context, provider_name: str = "") -> str:
268
+ """List all available large language models for a specific provider.
269
+
270
+ Args:
271
+ ctx (Context): The MCP context object.
272
+ provider_name (str, optional): Name of the provider to list models for. Default is empty (uses default).
273
+
274
+ Returns:
275
+ str: Formatted list of available models and API endpoint.
276
+ """
277
+ await ctx.info(f"Retrieving endpoint information and list of available models for configured provider {provider_name} ...")
278
+ if not provider_name:
279
+ provider = self.config.get_default_provider()
280
+ else:
281
+ provider = self.config.get_provider_by_name(provider_name)
282
+
283
+ llm_api = LLMApi(api_endpoint=provider['api_endpoint'], api_key=provider['api_key'])
284
+ models = llm_api.list_models()
285
+ if not models:
286
+ return "# No models available"
287
+
288
+ api_headline = f"# Target API Endpoint\n{provider['api_endpoint']}\n"
289
+
290
+ model_list = "\n".join([f"- {model}" for model in models])
291
+ result = f"{api_headline}\n# List of available models\n{model_list}"
292
+ await ctx.info(self.WORKFLOW_COMPLETION_MESSAGE)
293
+ return result
@@ -0,0 +1,307 @@
1
+ Metadata-Version: 2.4
2
+ Name: sokrates-mcp
3
+ Version: 0.2.0
4
+ Summary: A templated MCP server for demonstration and quick start.
5
+ Author-email: Julian Weber <julianweberdev@gmail.com>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2025 Julian Weber
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+ Project-URL: Homepage, https://github.com/Kubementat/sokrates-mcp
28
+ Project-URL: Repository, https://github.com/Kubementat/sokrates-mcp
29
+ Keywords: mcp,llm,tools,system-monitoring,ai,prompt refinement,idea generation
30
+ Requires-Python: >=3.10
31
+ Description-Content-Type: text/markdown
32
+ License-File: LICENSE
33
+ Requires-Dist: fastmcp
34
+ Requires-Dist: sokrates
35
+ Requires-Dist: pydantic
36
+ Requires-Dist: PyYAML
37
+ Dynamic: license-file
38
+
39
+ # sokrates-mcp
40
+
41
+ A MCP server offering tools for prompt refinement and execution workflows using the FastMCP framework and the `sokrates` python library.
42
+
43
+ ## Features
44
+
45
+ - Multiple provider/APU support
46
+ - Available Model/Provider listing
47
+ - Prompt refinement with different types (code/default)
48
+ - External LLM processing
49
+ - Task breakdown into sub-tasks
50
+ - Create code reviews for Python source files
51
+ - Generate random ideas
52
+ - Generate ideas to a topic
53
+
54
+ Have a look at the [sokrates library](https://github.com/Kubementat/sokrates).
55
+
56
+ ## Installation & Setup
57
+
58
+ ### Prerequisites
59
+
60
+ Ensure you have:
61
+ * Python 3.10+
62
+ * uv (fast package installer)
63
+
64
+ ### Install from PyPi
65
+ ```bash
66
+ pip install sokrates-mcp
67
+
68
+ # or using uv (recommended)
69
+ ## basic version:
70
+ uv pip install sokrates-mcp
71
+ ```
72
+
73
+ ### Alternative - Local Configuration from git
74
+
75
+ 1. Clone the repository if hosted:
76
+ ```bash
77
+ git clone https://github.com/Kubementat/sokrates-mcp.git
78
+ cd sokrates-mcp
79
+ ```
80
+
81
+ 2. Install dependencies using pyproject.toml:
82
+ ```bash
83
+ uv sync
84
+ ```
85
+
86
+ ### Setup Server Configuration File
87
+
88
+ #### Via git installed version
89
+ ```bash
90
+ mkdir $HOME/.sokrates-mcp
91
+ cp config.yml.example $HOME/.sokrates-mcp/config.yml
92
+ # edit the according endpoints to your use case
93
+ vim $HOME/.sokrates-mcp/config.yml
94
+ ```
95
+
96
+ #### From scratch
97
+ Create the configuration file:
98
+ ```bash
99
+ mkdir $HOME/.sokrates-mcp
100
+ vim $HOME/.sokrates-mcp/config.yml
101
+ ```
102
+
103
+ Then use this as template and adjust it to your use case:
104
+ ```yaml
105
+ refinement_prompt_filename: refine-prompt.md
106
+ refinement_coding_prompt_filename: refine-coding-v3.md
107
+
108
+ # providers
109
+ default_provider: local
110
+ providers:
111
+ - name: local
112
+ type: openai
113
+ api_endpoint: http://localhost:1234/v1
114
+ api_key: "not-required"
115
+ default_model: "qwen/qwen3-4b-2507"
116
+ - name: external
117
+ type: openai
118
+ api_endpoint: http://CHANGEME/v1
119
+ api_key: CHANGEME
120
+ default_model: CHANGEME
121
+ ```
122
+
123
+ ### Setup as mcp server in other tools (Example for LM Studio)
124
+
125
+ #### For local Git installed version
126
+ ```yaml
127
+ {
128
+ "mcpServers": {
129
+ "sokrates": {
130
+ "command": "uv",
131
+ "args": [
132
+ "run",
133
+ "sokrates-mcp"
134
+ ],
135
+ "cwd": "YOUR_PATH_TO_sokrates-mcp",
136
+ "timeout": 600000
137
+ }
138
+ }
139
+ }
140
+ ```
141
+
142
+ #### via uvx
143
+ ```yaml
144
+ {
145
+ "mcpServers": {
146
+ "sokrates": {
147
+ "command": "uvx",
148
+ "args": [
149
+ "sokrates-mcp"
150
+ ]
151
+ }
152
+ }
153
+ }
154
+ ```
155
+
156
+ ## Usage Examples
157
+
158
+ ### Starting the Server
159
+
160
+ ```bash
161
+ uv run sokrates-mcp
162
+ ```
163
+
164
+ ### Listing available command line options
165
+ ```bash
166
+ uv run sokrates-mcp --help
167
+ ```
168
+
169
+ ## Architecture & Technical Details
170
+
171
+ The server follows a modular design pattern:
172
+ 1. Tools are registered in `main.py` using FastMCP decorators
173
+ 2. Dependency management via pyproject.toml
174
+ 3. Configuration files stored in `$HOME/.sokrates-mcp/` directory
175
+
176
+
177
+ ## Contributing Guidelines
178
+
179
+ 1. Fork the repository and create feature branches
180
+ 2. Follow PEP8 style guide with 4-space indentation
181
+ 3. Submit pull requests with:
182
+ - Clear description of changes
183
+ - Updated tests (see Testing section)
184
+ - Documentation updates
185
+
186
+ ## Available Tools
187
+
188
+ ### main.py
189
+
190
+ - **refine_prompt**: Refines a given prompt by enriching it with additional context.
191
+ - Parameters:
192
+ - `prompt` (str): The input prompt to be refined
193
+ - `refinement_type` (str, optional): Type of refinement ('code' or 'default'). Default is 'default'
194
+ - `model` (str, optional): Model name for refinement. Default is 'default'
195
+
196
+ - **refine_and_execute_external_prompt**: Refines a prompt and executes it with an external LLM.
197
+ - Parameters:
198
+ - `prompt` (str): The input prompt to be refined and executed
199
+ - `refinement_model` (str, optional): Model for refinement. Default is 'default'
200
+ - `execution_model` (str, optional): Model for execution. Default is 'default'
201
+ - `refinement_type` (str, optional): Type of refinement ('code' or 'default'). Default is 'default'
202
+
203
+ - **handover_prompt**: Hands over a prompt to an external LLM for processing.
204
+ - Parameters:
205
+ - `prompt` (str): The prompt to be executed externally
206
+ - `model` (str, optional): Model name for execution. Default is 'default'
207
+
208
+ - **breakdown_task**: Breaks down a task into sub-tasks with complexity ratings.
209
+ - Parameters:
210
+ - `task` (str): The full task description to break down
211
+ - `model` (str, optional): Model name for processing. Default is 'default'
212
+
213
+ - **list_available_models**: Lists all available large language models accessible by the server.
214
+
215
+ ### mcp_config.py
216
+
217
+ - **MCPConfig** class: Manages configuration settings for the MCP server.
218
+ - Parameters:
219
+ - `config_file_path` (str, optional): Path to YAML config file
220
+ - `api_endpoint` (str, optional): API endpoint URL
221
+ - `api_key` (str, optional): API key for authentication
222
+ - `model` (str, optional): Model name
223
+
224
+ ### workflow.py
225
+
226
+ - **Workflow** class: Implements the business logic for prompt refinement and execution.
227
+ - Methods:
228
+ - `refine_prompt`: Refines a given prompt
229
+ - `refine_and_execute_external_prompt`: Refines and executes a prompt with an external LLM
230
+ - `handover_prompt`: Hands over a prompt to an external LLM for processing
231
+ - `breakdown_task`: Breaks down a task into sub-tasks
232
+ - `list_available_models`: Lists all available models
233
+
234
+ ## Project Structure
235
+
236
+ - `src/sokrates_mcp/main.py`: Sets up the MCP server and registers tools
237
+ - `src/sokrates_mcp/mcp_config.py`: Configuration management
238
+ - `src/sokrates_mcp/workflow.py`: Business logic for prompt refinement and execution
239
+ - `pyproject.toml`: Dependency management
240
+
241
+
242
+ ## Script List
243
+
244
+ ### `main.py`
245
+ Sets up an MCP server using the FastMCP framework to provide tools for prompt refinement and execution workflows.
246
+ #### Usage
247
+ - `uv run python main.py` - Start the MCP server (default port: 8000)
248
+ - `uv run fastmcp dev main.py` - Run in development mode with auto-reload
249
+
250
+ ### `mcp_config.py`
251
+ Provides configuration management for the MCP server. Loads configuration from a YAML file and sets default values if needed.
252
+ #### Usage
253
+ - Import and use in other scripts:
254
+ ```python
255
+ from mcp_config import MCPConfig
256
+ config = MCPConfig(api_endpoint="https://api.example.com", model="my-model")
257
+ ```
258
+
259
+ ### `workflow.py`
260
+ Implements the business logic for prompt refinement and execution workflows. Contains methods to refine prompts, execute them with external LLMs, break down tasks, etc.
261
+ #### Usage
262
+ - Import and use in other scripts:
263
+ ```python
264
+ from workflow import Workflow
265
+ from mcp_config import MCPConfig
266
+
267
+ config = MCPConfig()
268
+ workflow = Workflow(config)
269
+ result = await workflow.refine_prompt("Write a Python function to sort a list", refinement_type="code")
270
+ ```
271
+
272
+ ### `src/mcp_client_example.py`
273
+ Demonstrates a basic Model Context Protocol (MCP) client using the fastmcp library. Defines a simple model and registers it with the client.
274
+
275
+ #### Usage
276
+ - Run as a standalone script:
277
+ ```bash
278
+ python src/mcp_client_example.py
279
+ ```
280
+ - Or use with an ASGI server like Uvicorn:
281
+ ```bash
282
+ uvicorn src.mcp_client_example:main --factory
283
+ ```
284
+
285
+ **Common Error:**
286
+ If you see "ModuleNotFoundError: fastmcp", ensure:
287
+ 1. Dependencies are installed (`uv pip install .`)
288
+ 2. Python virtual environment is activated
289
+
290
+ ## Changelog
291
+
292
+ **0.2.0 (Aug 2025)**
293
+ - First published version
294
+ - Update to latest sokrates library version
295
+ - bugfixes and cleanup
296
+ - multi provider/API support in the configuration file
297
+
298
+ **0.1.5 (July 2025)**
299
+ - Updated README with comprehensive documentation
300
+ - Added tool descriptions and usage examples
301
+ - Improved project structure overview
302
+
303
+ **0.1.0 (March 7, 2025)**
304
+ - Initial release with refinement tools
305
+ - Basic FastMCP integration
306
+
307
+ Bug reports and feature requests: [GitHub Issues](https://github.com/Kubementat/sokrates-mcp/issues)
@@ -0,0 +1,12 @@
1
+ sokrates_mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ sokrates_mcp/main.py,sha256=2zIm3lkP3xJyS-_w6oJb-qovdjjCw0D4oocAapgdzVA,20697
3
+ sokrates_mcp/mcp_config.py,sha256=5LA72MwmoM8LpUNx4cUkU4e5Xif6nhL16I68JfRelAE,10089
4
+ sokrates_mcp/workflow.py,sha256=OyiLFbh3bj8fBQYPt1YNjcj9HY3v--xu03PZVmGJgig,13439
5
+ sokrates_mcp-0.2.0.dist-info/licenses/LICENSE,sha256=OgJ7nuNhaIefjDRK0wTGOErJ_c1984Eg9oUweycmal0,1068
6
+ sokrates_mcp_client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ sokrates_mcp_client/mcp_client_example.py,sha256=L5_xH0u7lt0k0t_eiFFhN9FVU__seFhxHfRixdy14PU,3866
8
+ sokrates_mcp-0.2.0.dist-info/METADATA,sha256=vTdQwxkRk-1NaiVcIDIryvsLlBxWS6tuLGjlyIm-m8A,9475
9
+ sokrates_mcp-0.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
10
+ sokrates_mcp-0.2.0.dist-info/entry_points.txt,sha256=7gYOgoyRs_mE6dmwMJrAtrMns2mxv4ZbqXBznRh3sUc,56
11
+ sokrates_mcp-0.2.0.dist-info/top_level.txt,sha256=Nbwxz5Mm6LVkglOxqt4ZyEO5A6D4VjjN8c6d-fQyc3k,33
12
+ sokrates_mcp-0.2.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ sokrates-mcp = sokrates_mcp.main:main
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Julian Weber
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,2 @@
1
+ sokrates_mcp
2
+ sokrates_mcp_client
File without changes
@@ -0,0 +1,97 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ This script demonstrates a basic Model Context Protocol (MCP) client
4
+ using the fastmcp library. It defines a simple model and registers it
5
+ with the client, making it ready to receive requests.
6
+ """
7
+
8
+ import logging
9
+ from fastmcp import Client, Model
10
+ from fastmcp.context import Context
11
+ from fastmcp.model import ModelInput, ModelOutput
12
+
13
+ # Configure logging for better visibility of fastmcp operations
14
+ logging.basicConfig(level=logging.INFO)
15
+ logger = logging.getLogger(__name__)
16
+
17
+ class ExampleModel(Model):
18
+ """
19
+ A simple example model that processes text input and returns a modified text.
20
+ This model demonstrates how to define a model and implement its 'call' method.
21
+ """
22
+
23
+ def __init__(self):
24
+ super().__init__()
25
+ self.name = "example-model"
26
+ self.version = "1.0.0"
27
+ logger.info(f"Initialized {self.name} v{self.version}")
28
+
29
+ async def call(self, inputs: ModelInput, context: Context) -> ModelOutput:
30
+ """
31
+ The core method where the model's logic resides.
32
+ It takes ModelInput and Context, and returns ModelOutput.
33
+
34
+ Args:
35
+ inputs (ModelInput): The input data for the model.
36
+ Expected to contain a 'text' field.
37
+ context (Context): The context object providing access to
38
+ session information, logging, etc.
39
+
40
+ Returns:
41
+ ModelOutput: The output data from the model.
42
+ Contains a 'processed_text' field.
43
+ """
44
+ logger.info(f"Model '{self.name}' received a call.")
45
+
46
+ # Access input data. ModelInput is typically a dictionary-like object.
47
+ input_text = inputs.get("text", "No text provided")
48
+ logger.info(f"Input text: '{input_text}'")
49
+
50
+ # Simulate some processing
51
+ processed_text = f"Processed: {input_text.upper()} (by {self.name})"
52
+
53
+ # You can also access context information, e.g., session ID
54
+ session_id = context.session_id
55
+ logger.info(f"Processing for session ID: {session_id}")
56
+
57
+ # Return the processed output as a ModelOutput object
58
+ return ModelOutput({"processed_text": processed_text})
59
+
60
+ async def main():
61
+ """
62
+ Main function to initialize and run the fastmcp client.
63
+ """
64
+ logger.info("Starting FastMCP client setup...")
65
+
66
+ # Create an instance of the FastMCP client
67
+ # You can specify the host and port where the client will listen for requests.
68
+ # By default, it listens on 0.0.0.0:8000
69
+ client = Client(host="0.0.0.0", port=8000)
70
+ logger.info(f"FastMCP client initialized on {client.host}:{client.port}")
71
+
72
+ # Create an instance of your custom model
73
+ example_model = ExampleModel()
74
+
75
+ # Register the model with the client
76
+ # The model will be accessible via its name (e.g., "example-model")
77
+ client.register_model(example_model)
78
+ logger.info(f"Model '{example_model.name}' registered with the client.")
79
+
80
+ # Start the client. This will block and listen for incoming requests.
81
+ # For a real application, you might integrate this into a larger ASGI server
82
+ # or a systemd service.
83
+ logger.info("FastMCP client is starting to listen for requests...")
84
+ await client.start()
85
+
86
+ if __name__ == "__main__":
87
+ # To run this script, you would typically use an ASGI server like Uvicorn:
88
+ # uvicorn your_script_name:main --factory
89
+ #
90
+ # However, for a simple direct run to see it initialize, you can use:
91
+ import asyncio
92
+ asyncio.run(main())
93
+ # Note: Running directly with asyncio.run(main()) will start the server,
94
+ # but you'll need to send requests to it from another process.
95
+ # For proper testing, use `uvicorn your_script_name:client.app` after
96
+ # changing `await client.start()` to `return client.app` in `main()`
97
+ # and importing `main` as the ASGI app.