phone-a-friend-mcp-server 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,53 @@
1
+ import asyncio
2
+ import logging
3
+ import os
4
+ import sys
5
+
6
+ import click
7
+
8
+ from phone_a_friend_mcp_server.config import PhoneAFriendConfig
9
+ from phone_a_friend_mcp_server.server import serve
10
+
11
+
12
+ @click.command()
13
+ @click.option("-v", "--verbose", count=True, help="Increase verbosity")
14
+ @click.option("--api-key", help="API key for external AI services")
15
+ @click.option("--model", help="Model to use (e.g., 'gpt-4', 'anthropic/claude-3.5-sonnet')")
16
+ @click.option("--provider", help="Provider type ('openai', 'openrouter', 'anthropic', 'google')")
17
+ @click.option("--base-url", help="Base URL for API")
18
+ def main(verbose: int, api_key: str = None, model: str = None, provider: str = None, base_url: str = None) -> None:
19
+ """MCP server for Phone-a-Friend AI consultation"""
20
+ logging_level = logging.WARN
21
+ if verbose == 1:
22
+ logging_level = logging.INFO
23
+ elif verbose >= 2:
24
+ logging_level = logging.DEBUG
25
+
26
+ logging.basicConfig(level=logging_level, stream=sys.stderr)
27
+
28
+ # Read environment variables with proper precedence
29
+ config_api_key = (
30
+ api_key
31
+ or os.environ.get("OPENROUTER_API_KEY")
32
+ or os.environ.get("OPENAI_API_KEY")
33
+ or os.environ.get("ANTHROPIC_API_KEY")
34
+ or os.environ.get("GOOGLE_API_KEY")
35
+ or os.environ.get("GEMINI_API_KEY")
36
+ )
37
+ config_model = model or os.environ.get("PHONE_A_FRIEND_MODEL")
38
+ config_provider = provider or os.environ.get("PHONE_A_FRIEND_PROVIDER")
39
+ config_base_url = base_url or os.environ.get("PHONE_A_FRIEND_BASE_URL")
40
+
41
+ # Initialize configuration
42
+ try:
43
+ config = PhoneAFriendConfig(api_key=config_api_key, model=config_model, provider=config_provider, base_url=config_base_url)
44
+ except ValueError as e:
45
+ click.echo(f"Configuration error: {e}", err=True)
46
+ sys.exit(1)
47
+
48
+ # Start the server
49
+ asyncio.run(serve(config))
50
+
51
+
52
+ if __name__ == "__main__":
53
+ main()
@@ -0,0 +1,3 @@
1
+ from phone_a_friend_mcp_server import main
2
+
3
+ main()
@@ -0,0 +1 @@
1
+ # Client package for Phone-a-Friend MCP server
@@ -0,0 +1,48 @@
1
+ class PhoneAFriendConfig:
2
+ """Centralized configuration for Phone-a-Friend MCP server."""
3
+
4
+ def __init__(self, api_key: str | None = None, model: str | None = None, base_url: str | None = None, provider: str | None = None) -> None:
5
+ """Initialize configuration with provided values.
6
+
7
+ Args:
8
+ api_key: API key for external AI services
9
+ model: Model to use (e.g., 'gpt-4', 'anthropic/claude-3.5-sonnet')
10
+ base_url: Custom base URL for API (optional, providers use defaults)
11
+ provider: Provider type ('openai', 'openrouter', 'anthropic')
12
+ """
13
+ self.api_key = api_key
14
+ self.provider = provider or self._detect_provider()
15
+ self.model = model or self._get_default_model()
16
+ self.base_url = base_url # Only use if explicitly provided
17
+
18
+ # Validate required configuration
19
+ if not self.api_key:
20
+ raise ValueError(f"Missing required API key for {self.provider}. Set {self._get_env_var_name()} environment variable or pass --api-key")
21
+
22
+ def _detect_provider(self) -> str:
23
+ """Detect provider based on available environment variables."""
24
+ import os
25
+
26
+ if os.environ.get("OPENROUTER_API_KEY"):
27
+ return "openrouter"
28
+ elif os.environ.get("ANTHROPIC_API_KEY"):
29
+ return "anthropic"
30
+ elif os.environ.get("GOOGLE_API_KEY") or os.environ.get("GEMINI_API_KEY"):
31
+ return "google"
32
+ elif os.environ.get("OPENAI_API_KEY"):
33
+ return "openai"
34
+ else:
35
+ # Default to OpenAI
36
+ return "openai"
37
+
38
+ def _get_default_model(self) -> str:
39
+ """Get default model based on provider."""
40
+ models = {"openai": "o3", "openrouter": "anthropic/claude-4-opus", "anthropic": "claude-4-opus", "google": "gemini-2.5-pro-preview-05-06"}
41
+ if self.provider not in models:
42
+ raise ValueError(f"Unknown provider: {self.provider}. Supported providers: {list(models.keys())}")
43
+ return models[self.provider]
44
+
45
+ def _get_env_var_name(self) -> str:
46
+ """Get environment variable name for the provider."""
47
+ env_vars = {"openai": "OPENAI_API_KEY", "openrouter": "OPENROUTER_API_KEY", "anthropic": "ANTHROPIC_API_KEY", "google": "GOOGLE_API_KEY or GEMINI_API_KEY"}
48
+ return env_vars.get(self.provider, "OPENAI_API_KEY")
@@ -0,0 +1,87 @@
1
+ import logging
2
+ from typing import Any
3
+
4
+ import anyio
5
+ from mcp.server import Server
6
+ from mcp.server.stdio import stdio_server
7
+ from mcp.types import Prompt, Resource, ResourceTemplate, TextContent, Tool
8
+
9
+ from phone_a_friend_mcp_server.config import PhoneAFriendConfig
10
+ from phone_a_friend_mcp_server.tools.tool_manager import ToolManager
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ async def serve(config: PhoneAFriendConfig) -> None:
16
+ """Start the Phone-a-Friend MCP server.
17
+
18
+ Args:
19
+ config: Configuration object with settings
20
+ """
21
+ server = Server("phone-a-friend-mcp-server")
22
+ tool_manager = ToolManager(config)
23
+
24
+ @server.list_tools()
25
+ async def list_tools() -> list[Tool]:
26
+ """List all available tools."""
27
+ try:
28
+ return tool_manager.list_tools()
29
+ except Exception as e:
30
+ logger.error("Failed to list tools: %s", e)
31
+ raise
32
+
33
+ @server.list_resources()
34
+ async def list_resources() -> list[Resource]:
35
+ """List available resources (returns empty list for now)."""
36
+ logger.info("Resources list requested - returning empty list")
37
+ return []
38
+
39
+ @server.list_resource_templates()
40
+ async def list_resource_templates() -> list[ResourceTemplate]:
41
+ """List available resource templates (returns empty list for now)."""
42
+ logger.info("Resource templates list requested - returning empty list")
43
+ return []
44
+
45
+ @server.list_prompts()
46
+ async def list_prompts() -> list[Prompt]:
47
+ """List available prompts (returns empty list for now)."""
48
+ logger.info("Prompts list requested - returning empty list")
49
+ return []
50
+
51
+ @server.call_tool()
52
+ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
53
+ """Execute a tool with the given arguments."""
54
+ try:
55
+ logger.info(f"Calling tool: {name} with arguments: {arguments}")
56
+ tool = tool_manager.get_tool(name)
57
+ result = await tool.run(**arguments)
58
+
59
+ if isinstance(result, dict):
60
+ formatted_result = ""
61
+ for key, value in result.items():
62
+ if isinstance(value, list):
63
+ formatted_result += f"{key.title()}:\n"
64
+ for item in value:
65
+ formatted_result += f" • {item}\n"
66
+ else:
67
+ formatted_result += f"{key.title()}: {value}\n"
68
+ return [TextContent(type="text", text=formatted_result.strip())]
69
+ else:
70
+ return [TextContent(type="text", text=str(result))]
71
+
72
+ except Exception as e:
73
+ logger.error("Tool execution failed: %s", e)
74
+ error_msg = f"Error executing tool '{name}': {str(e)}"
75
+ return [TextContent(type="text", text=error_msg)]
76
+
77
+ # Start the server
78
+ options = server.create_initialization_options()
79
+ async with stdio_server() as (read_stream, write_stream):
80
+ try:
81
+ logger.info("Starting Phone-a-Friend MCP server...")
82
+ await server.run(read_stream, write_stream, options, raise_exceptions=True)
83
+ except anyio.BrokenResourceError:
84
+ logger.error("BrokenResourceError: Stream was closed unexpectedly. Exiting gracefully.")
85
+ except Exception as e:
86
+ logger.error(f"Unexpected error in server.run: {e}")
87
+ raise
@@ -0,0 +1 @@
1
+ # Tools package for Phone-a-Friend MCP server
@@ -0,0 +1,34 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Any
3
+
4
+ from phone_a_friend_mcp_server.config import PhoneAFriendConfig
5
+
6
+
7
+ class BaseTool(ABC):
8
+ """Base class for all Phone-a-Friend tools."""
9
+
10
+ def __init__(self, config: PhoneAFriendConfig):
11
+ self.config = config
12
+
13
+ @abstractmethod
14
+ async def run(self, **kwargs) -> dict[str, Any]:
15
+ """Execute the tool with given parameters."""
16
+ pass
17
+
18
+ @property
19
+ @abstractmethod
20
+ def name(self) -> str:
21
+ """Tool name."""
22
+ pass
23
+
24
+ @property
25
+ @abstractmethod
26
+ def description(self) -> str:
27
+ """Tool description."""
28
+ pass
29
+
30
+ @property
31
+ @abstractmethod
32
+ def parameters(self) -> dict[str, Any]:
33
+ """Tool parameters schema."""
34
+ pass
@@ -0,0 +1,202 @@
1
+ import os
2
+ from typing import Any
3
+
4
+ import aiofiles
5
+
6
+ from phone_a_friend_mcp_server.tools.base_tools import BaseTool
7
+
8
+
9
+ class FaxAFriendTool(BaseTool):
10
+ """
11
+ Fax-a-Friend: Generate a master prompt file for manual AI consultation.
12
+
13
+ ⚠️ ONLY USE WHEN EXPLICITLY REQUESTED BY USER ⚠️
14
+
15
+ This tool creates a comprehensive master prompt and saves it to a file for manual
16
+ copy-paste into external AI interfaces. It uses the same prompt structure as the
17
+ phone_a_friend tool but requires manual intervention to get the AI response.
18
+ """
19
+
20
+ @property
21
+ def name(self) -> str:
22
+ return "fax_a_friend"
23
+
24
+ @property
25
+ def description(self) -> str:
26
+ return """🚨 **USE ONLY WHEN USER ASKS TO "fax a friend".**
27
+
28
+ Purpose: pair-programming caliber *coding help* — reviews, debugging,
29
+ refactors, design, migrations.
30
+
31
+ This tool creates a file for manual AI consultation. After file creation,
32
+ wait for the user to return with the external AI's response.
33
+
34
+ Hard restrictions:
35
+ • Generated prompt includes *only* the two context blocks you send.
36
+ • No memory, no internet, no tools.
37
+ • You must spell out every fact it should rely on.
38
+
39
+ Required I/O format:
40
+ ```
41
+ <file_tree>
42
+ .
43
+ ├── Dockerfile
44
+ ├── some_doc_file.md
45
+ ├── LICENSE
46
+ ├── pyproject.toml
47
+ ├── README.md
48
+ ├── src
49
+ │ └── some_module
50
+ │ ├── **init**.py
51
+ │ ├── **main**.py
52
+ │ ├── client
53
+ │ │ └── **init**.py
54
+ │ ├── config.py
55
+ │ ├── server.py
56
+ │ └── tools
57
+ │ ├── **init**.py
58
+ │ ├── base_tools.py
59
+ │ └── tool_manager.py
60
+ ├── tests
61
+ │ ├── **init**.py
62
+ │ └── test_tools.py
63
+ └── uv.lock
64
+ </file_tree>
65
+
66
+ <file="src/some_module/server.py">
67
+ # full source here …
68
+ </file>
69
+ ```
70
+ The generated prompt expects AI to reply in the same XML structure, adding or
71
+ replacing <file="…"> blocks as needed. Commentary goes outside those tags."""
72
+
73
+ @property
74
+ def parameters(self) -> dict[str, Any]:
75
+ return {
76
+ "type": "object",
77
+ "properties": {
78
+ "all_related_context": {
79
+ "type": "string",
80
+ "description": (
81
+ "MANDATORY. Everything the friend AI needs to see:\n"
82
+ "- The full <file_tree> block (ASCII tree).\n"
83
+ '- One or more <file="…"> blocks with the current code.\n'
84
+ "- Known constraints (Python version, allowed deps, runtime limits, etc.).\n"
85
+ "- Any failing test output or traceback.\n"
86
+ "If it's not here, the friend AI can't use it."
87
+ ),
88
+ },
89
+ "any_additional_context": {
90
+ "type": "string",
91
+ "description": (
92
+ "Optional extras that help but aren't core code:\n"
93
+ "- Style guides, architecture docs, API specs.\n"
94
+ "- Performance targets, security rules, deployment notes.\n"
95
+ "- Similar past solutions or reference snippets.\n"
96
+ "Skip it if there's nothing useful."
97
+ ),
98
+ },
99
+ "task": {
100
+ "type": "string",
101
+ "description": (
102
+ "Plain-English ask. Be surgical.\n"
103
+ "Good examples:\n"
104
+ "- Refactor synchronous Flask app to async Quart. Keep py3.10.\n"
105
+ "- Identify and fix memory leak in src/cache.py.\n"
106
+ "- Add unit tests for edge cases in utils/math.py.\n"
107
+ 'Bad: vague stuff like "make code better".'
108
+ ),
109
+ },
110
+ },
111
+ "required": ["all_related_context", "task"],
112
+ }
113
+
114
+ async def run(self, **kwargs) -> dict[str, Any]:
115
+ all_related_context = kwargs.get("all_related_context", "")
116
+ any_additional_context = kwargs.get("any_additional_context", "")
117
+ task = kwargs.get("task", "")
118
+
119
+ # Create master prompt using the same logic as phone_a_friend
120
+ master_prompt = self._create_master_prompt(all_related_context, any_additional_context, task)
121
+
122
+ try:
123
+ # Write to fax_a_friend.md in current working directory
124
+ file_path = "fax_a_friend.md"
125
+
126
+ async with aiofiles.open(file_path, "w", encoding="utf-8") as f:
127
+ await f.write(master_prompt)
128
+
129
+ # Get absolute path for user reference
130
+ abs_path = os.path.abspath(file_path)
131
+
132
+ return {
133
+ "status": "success",
134
+ "file_path": abs_path,
135
+ "file_name": "fax_a_friend.md",
136
+ "prompt_length": len(master_prompt),
137
+ "context_length": len(all_related_context + any_additional_context),
138
+ "task": task,
139
+ "instructions": self._get_manual_workflow_instructions(abs_path),
140
+ }
141
+
142
+ except Exception as e:
143
+ return {"status": "failed", "error": str(e), "file_path": "fax_a_friend.md", "context_length": len(all_related_context + any_additional_context), "task": task}
144
+
145
+ def _create_master_prompt(self, all_related_context: str, any_additional_context: str, task: str) -> str:
146
+ """Create a comprehensive prompt identical to PhoneAFriendTool's version."""
147
+
148
+ prompt_parts = [
149
+ "You are a highly capable AI assistant being consulted for critical thinking, complex reasoning and pair-programming caliber coding help.",
150
+ "You have no memory of previous conversations, so all necessary context is provided below.",
151
+ "",
152
+ "=== TASK ===",
153
+ task,
154
+ "",
155
+ "=== ALL RELATED CONTEXT ===",
156
+ all_related_context,
157
+ ]
158
+
159
+ if any_additional_context.strip():
160
+ prompt_parts.extend(
161
+ [
162
+ "",
163
+ "=== ADDITIONAL CONTEXT ===",
164
+ any_additional_context,
165
+ ]
166
+ )
167
+
168
+ prompt_parts.extend(
169
+ [
170
+ "",
171
+ "=== INSTRUCTIONS ===",
172
+ "- Analyze the code and requirements step-by-step.",
173
+ "- Show your reasoning and propose concrete changes.",
174
+ '- Provide updated code using the XML format (<file_tree> plus <file="…"> blocks).',
175
+ "- Be explicit and practical.",
176
+ "",
177
+ "Please provide your analysis and updated code:",
178
+ ]
179
+ )
180
+
181
+ return "\n".join(prompt_parts)
182
+
183
+ def _get_manual_workflow_instructions(self, file_path: str) -> str:
184
+ """Generate clear instructions for the manual workflow."""
185
+ return f"""
186
+ 🚨 MANUAL INTERVENTION REQUIRED 🚨
187
+
188
+ Your master prompt has been saved to: {file_path}
189
+
190
+ NEXT STEPS - Please follow these instructions:
191
+
192
+ 1. 📂 Open the file: {file_path}
193
+ 2. 📋 Copy the ENTIRE prompt content from the file
194
+ 3. 🤖 Paste it into your preferred AI chat interface (ChatGPT, Claude, Gemini, etc.)
195
+ 4. ⏳ Wait for the AI's response
196
+ 5. 📝 Copy the AI's complete response
197
+ 6. 🔄 Return to this conversation and provide the AI's response
198
+
199
+ The prompt is ready for any external AI service. Simply copy and paste the entire content.
200
+
201
+ 💡 TIP: You can use the same prompt with multiple AI services to compare responses!
202
+ """
@@ -0,0 +1,216 @@
1
+ from typing import Any
2
+
3
+ from pydantic_ai import Agent
4
+ from pydantic_ai.models.anthropic import AnthropicModel
5
+ from pydantic_ai.models.google import GoogleModel
6
+ from pydantic_ai.models.openai import OpenAIModel
7
+ from pydantic_ai.providers.anthropic import AnthropicProvider
8
+ from pydantic_ai.providers.google import GoogleProvider
9
+ from pydantic_ai.providers.openai import OpenAIProvider
10
+ from pydantic_ai.providers.openrouter import OpenRouterProvider
11
+
12
+ from phone_a_friend_mcp_server.tools.base_tools import BaseTool
13
+
14
+
15
+ class PhoneAFriendTool(BaseTool):
16
+ """
17
+ Phone-a-Friend: Consult an external AI for critical thinking and complex reasoning.
18
+
19
+ ⚠️ ONLY USE WHEN EXPLICITLY REQUESTED BY USER ⚠️
20
+
21
+ This tool sends your problem to an external AI model for analysis and gets back a response.
22
+ The external AI has no memory of previous conversations, so you must provide all relevant context.
23
+ """
24
+
25
+ @property
26
+ def name(self) -> str:
27
+ return "phone_a_friend"
28
+
29
+ @property
30
+ def description(self) -> str:
31
+ return """🚨 **USE ONLY WHEN USER ASKS TO "phone a friend".**
32
+
33
+ Purpose: pair-programming caliber *coding help* — reviews, debugging,
34
+ refactors, design, migrations.
35
+
36
+ Hard restrictions:
37
+ • Friend AI sees *only* the two context blocks you send.
38
+ • No memory, no internet, no tools.
39
+ • You must spell out every fact it should rely on.
40
+
41
+ Required I/O format:
42
+ ```
43
+ <file_tree>
44
+ .
45
+ ├── Dockerfile
46
+ ├── some_doc_file.md
47
+ ├── LICENSE
48
+ ├── pyproject.toml
49
+ ├── README.md
50
+ ├── src
51
+ │ └── some_module
52
+ │ ├── **init**.py
53
+ │ ├── **main**.py
54
+ │ ├── client
55
+ │ │ └── **init**.py
56
+ │ ├── config.py
57
+ │ ├── server.py
58
+ │ └── tools
59
+ │ ├── **init**.py
60
+ │ ├── base_tools.py
61
+ │ └── tool_manager.py
62
+ ├── tests
63
+ │ ├── **init**.py
64
+ │ └── test_tools.py
65
+ └── uv.lock
66
+ </file_tree>
67
+
68
+ <file="src/some_module/server.py">
69
+ # full source here …
70
+ </file>
71
+ ```
72
+ The friend AI must reply in the same XML structure, adding or
73
+ replacing <file="…"> blocks as needed. Commentary goes outside those tags."""
74
+
75
+ @property
76
+ def parameters(self) -> dict[str, Any]:
77
+ return {
78
+ "type": "object",
79
+ "properties": {
80
+ "all_related_context": {
81
+ "type": "string",
82
+ "description": (
83
+ "MANDATORY. Everything the friend AI needs to see:\n"
84
+ "- The full <file_tree> block (ASCII tree).\n"
85
+ '- One or more <file="…"> blocks with the current code.\n'
86
+ "- Known constraints (Python version, allowed deps, runtime limits, etc.).\n"
87
+ "- Any failing test output or traceback.\n"
88
+ "If it's not here, the friend AI can't use it."
89
+ ),
90
+ },
91
+ "any_additional_context": {
92
+ "type": "string",
93
+ "description": (
94
+ "Optional extras that help but aren't core code:\n"
95
+ "- Style guides, architecture docs, API specs.\n"
96
+ "- Performance targets, security rules, deployment notes.\n"
97
+ "- Similar past solutions or reference snippets.\n"
98
+ "Skip it if there's nothing useful."
99
+ ),
100
+ },
101
+ "task": {
102
+ "type": "string",
103
+ "description": (
104
+ "Plain-English ask. Be surgical.\n"
105
+ "Good examples:\n"
106
+ "- Refactor synchronous Flask app to async Quart. Keep py3.10.\n"
107
+ "- Identify and fix memory leak in src/cache.py.\n"
108
+ "- Add unit tests for edge cases in utils/math.py.\n"
109
+ 'Bad: vague stuff like "make code better".'
110
+ ),
111
+ },
112
+ },
113
+ "required": ["all_related_context", "task"],
114
+ }
115
+
116
+ async def run(self, **kwargs) -> dict[str, Any]:
117
+ all_related_context = kwargs.get("all_related_context", "")
118
+ any_additional_context = kwargs.get("any_additional_context", "")
119
+ task = kwargs.get("task", "")
120
+
121
+ # Create master prompt for external AI
122
+ master_prompt = self._create_master_prompt(all_related_context, any_additional_context, task)
123
+
124
+ try:
125
+ # Create Pydantic-AI agent with appropriate provider
126
+ agent = self._create_agent()
127
+
128
+ # Send to external AI
129
+ result = await agent.run(master_prompt)
130
+
131
+ return {
132
+ "response": result.data,
133
+ "status": "success",
134
+ "provider": self.config.provider,
135
+ "model": self.config.model,
136
+ "context_length": len(all_related_context + any_additional_context),
137
+ "task": task,
138
+ }
139
+
140
+ except Exception as e:
141
+ return {
142
+ "error": str(e),
143
+ "status": "failed",
144
+ "provider": self.config.provider,
145
+ "model": self.config.model,
146
+ "context_length": len(all_related_context + any_additional_context),
147
+ "task": task,
148
+ "master_prompt": master_prompt, # Include for debugging
149
+ }
150
+
151
+ def _create_agent(self) -> Agent:
152
+ """Create Pydantic-AI agent with appropriate provider."""
153
+ if self.config.provider == "openrouter":
154
+ # OpenRouter has its own dedicated provider
155
+ provider_kwargs = {"api_key": self.config.api_key}
156
+ if self.config.base_url:
157
+ provider_kwargs["base_url"] = self.config.base_url
158
+ provider = OpenRouterProvider(**provider_kwargs)
159
+ model = OpenAIModel(self.config.model, provider=provider)
160
+ elif self.config.provider == "anthropic":
161
+ # Use Anthropic directly
162
+ provider_kwargs = {"api_key": self.config.api_key}
163
+ provider = AnthropicProvider(**provider_kwargs)
164
+ model = AnthropicModel(self.config.model, provider=provider)
165
+ elif self.config.provider == "google":
166
+ # Use Google/Gemini directly
167
+ provider_kwargs = {"api_key": self.config.api_key}
168
+ provider = GoogleProvider(**provider_kwargs)
169
+ model = GoogleModel(self.config.model, provider=provider)
170
+ else:
171
+ # Default to OpenAI
172
+ provider_kwargs = {"api_key": self.config.api_key}
173
+ if self.config.base_url:
174
+ provider_kwargs["base_url"] = self.config.base_url
175
+ provider = OpenAIProvider(**provider_kwargs)
176
+ model = OpenAIModel(self.config.model, provider=provider)
177
+
178
+ return Agent(model)
179
+
180
+ def _create_master_prompt(self, all_related_context: str, any_additional_context: str, task: str) -> str:
181
+ """Create a comprehensive prompt for the external AI."""
182
+
183
+ prompt_parts = [
184
+ "You are a highly capable AI assistant being consulted for critical thinking, complex reasoning and pair-programming caliber coding help.",
185
+ "You have no memory of previous conversations, so all necessary context is provided below.",
186
+ "",
187
+ "=== TASK ===",
188
+ task,
189
+ "",
190
+ "=== ALL RELATED CONTEXT ===",
191
+ all_related_context,
192
+ ]
193
+
194
+ if any_additional_context.strip():
195
+ prompt_parts.extend(
196
+ [
197
+ "",
198
+ "=== ADDITIONAL CONTEXT ===",
199
+ any_additional_context,
200
+ ]
201
+ )
202
+
203
+ prompt_parts.extend(
204
+ [
205
+ "",
206
+ "=== INSTRUCTIONS ===",
207
+ "- Analyze the code and requirements step-by-step.",
208
+ "- Show your reasoning and propose concrete changes.",
209
+ '- Provide updated code using the XML format (<file_tree> plus <file="…"> blocks).',
210
+ "- Be explicit and practical.",
211
+ "",
212
+ "Please provide your analysis and updated code:",
213
+ ]
214
+ )
215
+
216
+ return "\n".join(prompt_parts)
@@ -0,0 +1,43 @@
1
+ from mcp.types import Tool
2
+
3
+ from phone_a_friend_mcp_server.config import PhoneAFriendConfig
4
+ from phone_a_friend_mcp_server.tools.base_tools import BaseTool
5
+ from phone_a_friend_mcp_server.tools.fax_tool import FaxAFriendTool
6
+ from phone_a_friend_mcp_server.tools.phone_tool import PhoneAFriendTool
7
+
8
+
9
+ class ToolManager:
10
+ """Manages all available tools for the Phone-a-Friend MCP server."""
11
+
12
+ def __init__(self, config: PhoneAFriendConfig):
13
+ self.config = config
14
+ self._tools: dict[str, BaseTool] = {}
15
+ self._initialize_tools()
16
+
17
+ def _initialize_tools(self):
18
+ """Initialize all available tools."""
19
+ tools = [
20
+ PhoneAFriendTool(self.config),
21
+ FaxAFriendTool(self.config),
22
+ ]
23
+
24
+ for tool in tools:
25
+ self._tools[tool.name] = tool
26
+
27
+ def get_tool(self, name: str) -> BaseTool:
28
+ """Get a tool by name."""
29
+ if name not in self._tools:
30
+ raise ValueError(f"Tool '{name}' not found")
31
+ return self._tools[name]
32
+
33
+ def list_tools(self) -> list[Tool]:
34
+ """List all available tools in MCP format."""
35
+ mcp_tools = []
36
+ for tool in self._tools.values():
37
+ mcp_tool = Tool(name=tool.name, description=tool.description, inputSchema=tool.parameters)
38
+ mcp_tools.append(mcp_tool)
39
+ return mcp_tools
40
+
41
+ def get_tool_names(self) -> list[str]:
42
+ """Get list of all tool names."""
43
+ return list(self._tools.keys())
@@ -0,0 +1,320 @@
1
+ Metadata-Version: 2.4
2
+ Name: phone-a-friend-mcp-server
3
+ Version: 0.1.0
4
+ Summary: MCP Server for Phone-a-Friend assistance
5
+ Project-URL: GitHub, https://github.com/abhishekbhakat/phone-a-friend-mcp-server
6
+ Project-URL: Issues, https://github.com/abhishekbhakat/phone-a-friend-mcp-server/issues
7
+ Author-email: Abhishek Bhakat <abhishek.bhakat@hotmail.com>
8
+ License-Expression: MIT
9
+ License-File: LICENSE
10
+ Classifier: Development Status :: 3 - Alpha
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Programming Language :: Python :: 3.10
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Requires-Python: >=3.11
16
+ Requires-Dist: aiofiles>=24.1.0
17
+ Requires-Dist: aiohttp>=3.12.7
18
+ Requires-Dist: click>=8.2.1
19
+ Requires-Dist: mcp>=1.9.2
20
+ Requires-Dist: pydantic-ai-slim[anthropic,google,openai]>=0.2.14
21
+ Requires-Dist: pydantic>=2.11.5
22
+ Requires-Dist: pyyaml>=6.0.0
23
+ Provides-Extra: dev
24
+ Requires-Dist: build>=1.2.2.post1; extra == 'dev'
25
+ Requires-Dist: pre-commit>=4.2.0; extra == 'dev'
26
+ Requires-Dist: pytest-asyncio>=0.26.0; extra == 'dev'
27
+ Requires-Dist: pytest-mock>=3.14.1; extra == 'dev'
28
+ Requires-Dist: pytest>=8.3.4; extra == 'dev'
29
+ Requires-Dist: ruff>=0.11.12; extra == 'dev'
30
+ Description-Content-Type: text/markdown
31
+
32
+ # Phone-a-Friend MCP Server 🧠📞
33
+
34
+ An AI-to-AI consultation system that enables one AI to "phone a friend" (another AI) for critical thinking, long context reasoning, and complex problem solving via OpenRouter.
35
+
36
+ ## The Problem 🤔
37
+
38
+ Sometimes an AI encounters complex problems that require:
39
+ - **Deep critical thinking** beyond immediate capabilities
40
+ - **Long context reasoning** with extensive information
41
+ - **Multi-step analysis** that benefits from external perspective
42
+ - **Specialized expertise** from different AI models
43
+
44
+ ## The Solution �
45
+
46
+ Phone-a-Friend MCP Server creates a **two-step consultation process**:
47
+
48
+ 1. **Context + Reasoning**: Package all relevant context and send to external AI for deep analysis
49
+ 2. **Extract Actionable Insights**: Process the reasoning response into usable format for the primary AI
50
+
51
+ This enables AI systems to leverage other AI models as "consultants" for complex reasoning tasks.
52
+
53
+ ## Architecture 🏗️
54
+
55
+ ```
56
+ Primary AI → Phone-a-Friend MCP → OpenRouter → External AI (GPT-4, Claude, etc.) → Processed Response → Primary AI
57
+ ```
58
+
59
+ **Sequential Workflow:**
60
+ 1. `analyze_context` - Gather and structure all relevant context
61
+ 2. `get_critical_thinking` - Send context to external AI via OpenRouter for reasoning
62
+ 3. `extract_actionable_insights` - Process response into actionable format
63
+
64
+ ## When to Use 🎯
65
+
66
+ **Ideal for:**
67
+ - Complex multi-step problems requiring deep analysis
68
+ - Situations needing long context reasoning (>100k tokens)
69
+ - Cross-domain expertise consultation
70
+ - Critical decision-making with high stakes
71
+ - Problems requiring multiple perspectives
72
+
73
+ **Not needed for:**
74
+ - Simple factual questions
75
+ - Basic reasoning tasks
76
+ - Quick responses
77
+ - Well-defined procedural tasks
78
+
79
+ ## Installation 🚀
80
+
81
+ 1. Clone the repository:
82
+ ```bash
83
+ git clone https://github.com/abhishekbhakat/phone-a-friend-mcp-server.git
84
+ cd phone-a-friend-mcp-server
85
+ ```
86
+
87
+ 2. Install dependencies:
88
+ ```bash
89
+ uv pip install -e .
90
+ ```
91
+
92
+ 3. Configure your preferred AI provider:
93
+
94
+ **OpenRouter (recommended - access to multiple models):**
95
+ ```bash
96
+ export OPENROUTER_API_KEY="your-openrouter-key"
97
+ # Model will auto-select based on provider
98
+ ```
99
+
100
+ **OpenAI:**
101
+ ```bash
102
+ export OPENAI_API_KEY="your-openai-key"
103
+ # Uses latest available model by default
104
+ ```
105
+
106
+ **Anthropic:**
107
+ ```bash
108
+ export ANTHROPIC_API_KEY="your-anthropic-key"
109
+ # Uses latest available model by default
110
+ ```
111
+
112
+ **Google/Gemini:**
113
+ ```bash
114
+ export GOOGLE_API_KEY="your-google-key" # or GEMINI_API_KEY
115
+ # Uses latest available model by default
116
+ ```
117
+
118
+ ## Usage 💡
119
+
120
+ ### Command Line
121
+ ```bash
122
+ # Start the server
123
+ phone-a-friend-mcp-server
124
+
125
+ # With verbose logging
126
+ phone-a-friend-mcp-server -v
127
+
128
+ # With specific provider (uses optimal model automatically)
129
+ phone-a-friend-mcp-server --provider anthropic
130
+ phone-a-friend-mcp-server --provider google
131
+
132
+ # Override with custom model if needed
133
+ phone-a-friend-mcp-server --provider anthropic --model "your-preferred-model"
134
+ ```
135
+
136
+ ### Environment Variables
137
+ ```bash
138
+ # Auto-detects provider based on available API keys
139
+ export OPENROUTER_API_KEY="your-openrouter-key" # Preferred
140
+ export OPENAI_API_KEY="your-openai-key" # Default fallback
141
+ export ANTHROPIC_API_KEY="your-anthropic-key" # Direct Anthropic
142
+ export GOOGLE_API_KEY="your-google-key" # Google/Gemini
143
+
144
+ # Optional overrides (only if you want to override auto-selection)
145
+ export PHONE_A_FRIEND_MODEL="your-preferred-model"
146
+ export PHONE_A_FRIEND_PROVIDER="your-preferred-provider"
147
+ ```
148
+
149
+ ## Model Selection 🤖
150
+
151
+ The system automatically selects the most capable model for each provider:
152
+ - **OpenAI**: Latest reasoning model
153
+ - **Anthropic**: Latest Claude model
154
+ - **Google**: Latest Gemini Pro model
155
+ - **OpenRouter**: Access to latest models from all providers
156
+
157
+ You can override the auto-selection by setting `PHONE_A_FRIEND_MODEL` environment variable or using the `--model` CLI option.
158
+
159
+ ## Available Tools 🛠️
160
+
161
+ ### phone_a_friend
162
+ Consult an external AI for critical thinking and complex reasoning via OpenRouter.
163
+
164
+ **IMPORTANT:** The external AI is very smart but has NO MEMORY of previous conversations.
165
+ The quality of the response depends ENTIRELY on the quality and completeness of the context you provide.
166
+
167
+ **Parameters:**
168
+ - `all_related_context` (required): ALL context directly related to the problem. Include:
169
+ - Background information and history
170
+ - Previous attempts and their outcomes
171
+ - Stakeholders and their perspectives
172
+ - Constraints, requirements, and limitations
173
+ - Current situation and circumstances
174
+ - Any relevant data, metrics, or examples
175
+ - Timeline and deadlines
176
+ - Success criteria and goals
177
+
178
+ - `any_additional_context` (optional): ANY additional context that might be helpful. Include:
179
+ - Relevant documentation, specifications, or guidelines
180
+ - Industry standards or best practices
181
+ - Similar cases or precedents
182
+ - Technical details or domain knowledge
183
+ - Regulatory or compliance requirements
184
+ - Tools, resources, or technologies available
185
+ - Budget or resource constraints
186
+ - Organizational context or culture
187
+
188
+ - `task` (required): The specific task or question for the external AI. Be clear about:
189
+ - What exactly you need help with
190
+ - What type of analysis or reasoning you want
191
+ - What format you prefer for the response
192
+ - What decisions need to be made
193
+ - What problems need to be solved
194
+
195
+ **Example Usage:**
196
+ ```
197
+ all_related_context: "We're a SaaS startup with 50 employees. Our customer churn rate increased from 5% to 12% over the last quarter. We recently changed our pricing model and added new features. Customer support tickets increased 40%. Our main competitors are offering similar features at lower prices."
198
+
199
+ any_additional_context: "Industry benchmark for SaaS churn is 6-8%. Our pricing increased by 30%. New features include AI analytics and advanced reporting. Customer feedback mentions complexity and cost concerns."
200
+
201
+ task: "Analyze the churn increase and provide a comprehensive action plan to reduce it back to 5% within 6 months. Include specific strategies, timeline, and success metrics."
202
+ ```
203
+
204
+ The system will automatically route this to the most capable AI model available based on your configured provider.
205
+
206
+ ## Claude Desktop Configuration 🖥️
207
+
208
+ To use Phone-a-Friend MCP server with Claude Desktop, add this configuration to your `claude_desktop_config.json` file:
209
+
210
+ ### Configuration File Location
211
+ - **macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json`
212
+ - **Windows**: `%APPDATA%\Claude\claude_desktop_config.json`
213
+
214
+ ### Configuration
215
+
216
+ **Option 1: Using uv (Recommended)**
217
+ ```json
218
+ {
219
+ "mcpServers": {
220
+ "phone-a-friend": {
221
+ "command": "uvx",
222
+ "args": [
223
+ "run",
224
+ "--refresh",
225
+ "phone-a-friend-mcp-server",
226
+ ],
227
+ "env": {
228
+ "OPENROUTER_API_KEY": "your-openrouter-api-key",
229
+ "PHONE_A_FRIEND_MODEL": "anthropic/claude-4-opus"
230
+ }
231
+ }
232
+ }
233
+ }
234
+ ```
235
+
236
+ ### Environment Variables in Configuration
237
+
238
+ You can configure different AI providers directly in the Claude Desktop config:
239
+
240
+ ```json
241
+ {
242
+ "mcpServers": {
243
+ "phone-a-friend": {
244
+ "command": "phone-a-friend-mcp-server",
245
+ "env": {
246
+ "OPENROUTER_API_KEY": "your-openrouter-api-key",
247
+ "PHONE_A_FRIEND_MODEL": "anthropic/claude-4-opus"
248
+ }
249
+ }
250
+ }
251
+ }
252
+ ```
253
+
254
+ **Alternative Providers:**
255
+ ```json
256
+ {
257
+ "mcpServers": {
258
+ "phone-a-friend-openai": {
259
+ "command": "phone-a-friend-mcp-server",
260
+ "env": {
261
+ "OPENAI_API_KEY": "your-openai-api-key"
262
+ }
263
+ },
264
+ "phone-a-friend-anthropic": {
265
+ "command": "phone-a-friend-mcp-server",
266
+ "env": {
267
+ "ANTHROPIC_API_KEY": "your-anthropic-api-key"
268
+ }
269
+ },
270
+ "phone-a-friend-google": {
271
+ "command": "phone-a-friend-mcp-server",
272
+ "env": {
273
+ "GOOGLE_API_KEY": "your-google-api-key"
274
+ }
275
+ }
276
+ }
277
+ }
278
+ ```
279
+
280
+ ### Setup Steps
281
+
282
+ 1. **Install Phone-a-Friend MCP Server** (see Installation section above)
283
+ 2. **Open Claude Desktop Settings** → Developer → Edit Config
284
+ 3. **Add the configuration** (choose one of the options above)
285
+ 4. **Replace paths and API keys** with your actual values
286
+ 5. **Restart Claude Desktop**
287
+ 6. **Look for the 🔨 hammer icon** in the input box to confirm the server is connected
288
+
289
+ ### Troubleshooting
290
+
291
+ If the server doesn't appear in Claude Desktop:
292
+
293
+ 1. **Check logs**:
294
+ - macOS: `~/Library/Logs/Claude/mcp*.log`
295
+ - Windows: `%APPDATA%\Claude\logs\mcp*.log`
296
+
297
+ 2. **Verify paths** are absolute and correct
298
+ 3. **Test manually** in terminal:
299
+ ```bash
300
+ phone-a-friend-mcp-server -v
301
+ ```
302
+ 4. **Restart Claude Desktop** completely
303
+ 5. **Check API keys** are valid and have sufficient credits
304
+
305
+ ## Development 🔧
306
+
307
+ ### Running Tests
308
+ ```bash
309
+ pytest
310
+ ```
311
+
312
+ ### Code Formatting
313
+ ```bash
314
+ ruff format .
315
+ ruff check .
316
+ ```
317
+
318
+ ## License 📄
319
+
320
+ MIT License - see LICENSE file for details.
@@ -0,0 +1,15 @@
1
+ phone_a_friend_mcp_server/__init__.py,sha256=RaayGu6L95bFNEioVLZwFifnKMl9-yhUU7glBInuqXA,1895
2
+ phone_a_friend_mcp_server/__main__.py,sha256=A-8-jkY2FK2foabew5I-Wk2A54IwzWZcydlQKfiR-p4,51
3
+ phone_a_friend_mcp_server/config.py,sha256=Wfs68Zw7xXhAQ-77z3gblqsnmqO5bed-f2ggJkvgzUM,2356
4
+ phone_a_friend_mcp_server/server.py,sha256=ppx8QxQvJihcOzJkrJFlh9qyZ0fvI_eGP0TgYUC0Vcw,3394
5
+ phone_a_friend_mcp_server/client/__init__.py,sha256=fsa8DXjz4rzYXmOUAdLdTpTwPSlZ3zobmBGXqnCEaWs,47
6
+ phone_a_friend_mcp_server/tools/__init__.py,sha256=jtuvmcStXzbaM8wuhOKC8M8mBqDjHr-ypZ2ct1Rgi7Q,46
7
+ phone_a_friend_mcp_server/tools/base_tools.py,sha256=DMjFq0E3TO9a9I7QY4wQ_B4-SntdXzSZzrYymFzSmVE,765
8
+ phone_a_friend_mcp_server/tools/fax_tool.py,sha256=vzcGITygR49q9cu7Fw7SYcER7u_bKY6FKfvwBGKrRGs,7573
9
+ phone_a_friend_mcp_server/tools/phone_tool.py,sha256=zvZOU9TgC7PR6nm7rj13PPH0dRnBN9XYk2bf2frIWpE,8356
10
+ phone_a_friend_mcp_server/tools/tool_manager.py,sha256=VVtENC-n3D4GV6Cy3l9--30SJi06mJdyEiG7F_mfP7I,1474
11
+ phone_a_friend_mcp_server-0.1.0.dist-info/METADATA,sha256=jXvQP4I_B2uhleQWo-gIcvWntCEJfeVe0Bz2VxLI_sE,10048
12
+ phone_a_friend_mcp_server-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
13
+ phone_a_friend_mcp_server-0.1.0.dist-info/entry_points.txt,sha256=c_08XI-vG07VmUT3mtzyuCQjaus5l1NBl4q00Q3jLug,86
14
+ phone_a_friend_mcp_server-0.1.0.dist-info/licenses/LICENSE,sha256=-8bInetillKZC0qZDT8RWYIOrph3HIU5cr5N4Pg7bBE,1065
15
+ phone_a_friend_mcp_server-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ phone-a-friend-mcp-server = phone_a_friend_mcp_server.__main__:main
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Abhishek
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.