phone-a-friend-mcp-server 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- phone_a_friend_mcp_server/__init__.py +4 -7
- phone_a_friend_mcp_server/config.py +48 -8
- phone_a_friend_mcp_server/server.py +18 -13
- phone_a_friend_mcp_server/tools/fax_tool.py +62 -35
- phone_a_friend_mcp_server/tools/phone_tool.py +39 -40
- phone_a_friend_mcp_server/utils/__init__.py +1 -0
- phone_a_friend_mcp_server/utils/context_builder.py +93 -0
- phone_a_friend_mcp_server-0.2.0.dist-info/METADATA +176 -0
- phone_a_friend_mcp_server-0.2.0.dist-info/RECORD +17 -0
- phone_a_friend_mcp_server-0.1.0.dist-info/METADATA +0 -320
- phone_a_friend_mcp_server-0.1.0.dist-info/RECORD +0 -15
- {phone_a_friend_mcp_server-0.1.0.dist-info → phone_a_friend_mcp_server-0.2.0.dist-info}/WHEEL +0 -0
- {phone_a_friend_mcp_server-0.1.0.dist-info → phone_a_friend_mcp_server-0.2.0.dist-info}/entry_points.txt +0 -0
- {phone_a_friend_mcp_server-0.1.0.dist-info → phone_a_friend_mcp_server-0.2.0.dist-info}/licenses/LICENSE +0 -0
@@ -15,7 +15,8 @@ from phone_a_friend_mcp_server.server import serve
|
|
15
15
|
@click.option("--model", help="Model to use (e.g., 'gpt-4', 'anthropic/claude-3.5-sonnet')")
|
16
16
|
@click.option("--provider", help="Provider type ('openai', 'openrouter', 'anthropic', 'google')")
|
17
17
|
@click.option("--base-url", help="Base URL for API")
|
18
|
-
|
18
|
+
@click.option("--temperature", type=float, help="Temperature for the model (0.0-2.0). Lower values = more deterministic, higher = more creative")
|
19
|
+
def main(verbose: int, api_key: str = None, model: str = None, provider: str = None, base_url: str = None, temperature: float = None) -> None:
|
19
20
|
"""MCP server for Phone-a-Friend AI consultation"""
|
20
21
|
logging_level = logging.WARN
|
21
22
|
if verbose == 1:
|
@@ -25,7 +26,6 @@ def main(verbose: int, api_key: str = None, model: str = None, provider: str = N
|
|
25
26
|
|
26
27
|
logging.basicConfig(level=logging_level, stream=sys.stderr)
|
27
28
|
|
28
|
-
# Read environment variables with proper precedence
|
29
29
|
config_api_key = (
|
30
30
|
api_key
|
31
31
|
or os.environ.get("OPENROUTER_API_KEY")
|
@@ -37,15 +37,12 @@ def main(verbose: int, api_key: str = None, model: str = None, provider: str = N
|
|
37
37
|
config_model = model or os.environ.get("PHONE_A_FRIEND_MODEL")
|
38
38
|
config_provider = provider or os.environ.get("PHONE_A_FRIEND_PROVIDER")
|
39
39
|
config_base_url = base_url or os.environ.get("PHONE_A_FRIEND_BASE_URL")
|
40
|
-
|
41
|
-
# Initialize configuration
|
40
|
+
config_temperature = temperature
|
42
41
|
try:
|
43
|
-
config = PhoneAFriendConfig(api_key=config_api_key, model=config_model, provider=config_provider, base_url=config_base_url)
|
42
|
+
config = PhoneAFriendConfig(api_key=config_api_key, model=config_model, provider=config_provider, base_url=config_base_url, temperature=config_temperature)
|
44
43
|
except ValueError as e:
|
45
44
|
click.echo(f"Configuration error: {e}", err=True)
|
46
45
|
sys.exit(1)
|
47
|
-
|
48
|
-
# Start the server
|
49
46
|
asyncio.run(serve(config))
|
50
47
|
|
51
48
|
|
@@ -1,28 +1,30 @@
|
|
1
|
+
import os
|
2
|
+
|
3
|
+
|
1
4
|
class PhoneAFriendConfig:
|
2
5
|
"""Centralized configuration for Phone-a-Friend MCP server."""
|
3
6
|
|
4
|
-
def __init__(self, api_key: str | None = None, model: str | None = None, base_url: str | None = None, provider: str | None = None) -> None:
|
7
|
+
def __init__(self, api_key: str | None = None, model: str | None = None, base_url: str | None = None, provider: str | None = None, temperature: float | None = None) -> None:
|
5
8
|
"""Initialize configuration with provided values.
|
6
9
|
|
7
10
|
Args:
|
8
11
|
api_key: API key for external AI services
|
9
12
|
model: Model to use (e.g., 'gpt-4', 'anthropic/claude-3.5-sonnet')
|
10
13
|
base_url: Custom base URL for API (optional, providers use defaults)
|
11
|
-
provider: Provider type ('openai', 'openrouter', 'anthropic')
|
14
|
+
provider: Provider type ('openai', 'openrouter', 'anthropic', 'google')
|
15
|
+
temperature: Temperature value for the model (0.0-2.0), overrides defaults
|
12
16
|
"""
|
13
17
|
self.api_key = api_key
|
14
18
|
self.provider = provider or self._detect_provider()
|
15
19
|
self.model = model or self._get_default_model()
|
16
|
-
self.base_url = base_url
|
20
|
+
self.base_url = base_url
|
21
|
+
self.temperature = self._validate_temperature(temperature)
|
17
22
|
|
18
|
-
# Validate required configuration
|
19
23
|
if not self.api_key:
|
20
24
|
raise ValueError(f"Missing required API key for {self.provider}. Set {self._get_env_var_name()} environment variable or pass --api-key")
|
21
25
|
|
22
26
|
def _detect_provider(self) -> str:
|
23
27
|
"""Detect provider based on available environment variables."""
|
24
|
-
import os
|
25
|
-
|
26
28
|
if os.environ.get("OPENROUTER_API_KEY"):
|
27
29
|
return "openrouter"
|
28
30
|
elif os.environ.get("ANTHROPIC_API_KEY"):
|
@@ -32,12 +34,11 @@ class PhoneAFriendConfig:
|
|
32
34
|
elif os.environ.get("OPENAI_API_KEY"):
|
33
35
|
return "openai"
|
34
36
|
else:
|
35
|
-
# Default to OpenAI
|
36
37
|
return "openai"
|
37
38
|
|
38
39
|
def _get_default_model(self) -> str:
|
39
40
|
"""Get default model based on provider."""
|
40
|
-
models = {"openai": "o3", "openrouter": "anthropic/claude-4-opus", "anthropic": "claude-4-opus", "google": "gemini-2.5-pro-preview-05
|
41
|
+
models = {"openai": "o3", "openrouter": "anthropic/claude-4-opus", "anthropic": "claude-4-opus", "google": "gemini-2.5-pro-preview-06-05"}
|
41
42
|
if self.provider not in models:
|
42
43
|
raise ValueError(f"Unknown provider: {self.provider}. Supported providers: {list(models.keys())}")
|
43
44
|
return models[self.provider]
|
@@ -46,3 +47,42 @@ class PhoneAFriendConfig:
|
|
46
47
|
"""Get environment variable name for the provider."""
|
47
48
|
env_vars = {"openai": "OPENAI_API_KEY", "openrouter": "OPENROUTER_API_KEY", "anthropic": "ANTHROPIC_API_KEY", "google": "GOOGLE_API_KEY or GEMINI_API_KEY"}
|
48
49
|
return env_vars.get(self.provider, "OPENAI_API_KEY")
|
50
|
+
|
51
|
+
def _validate_temperature(self, temperature: float | None) -> float | None:
|
52
|
+
"""Validate temperature value or get from environment variable."""
|
53
|
+
temp_value = temperature
|
54
|
+
if temp_value is None:
|
55
|
+
env_temp = os.environ.get("PHONE_A_FRIEND_TEMPERATURE")
|
56
|
+
if env_temp is not None:
|
57
|
+
try:
|
58
|
+
temp_value = float(env_temp)
|
59
|
+
except ValueError:
|
60
|
+
raise ValueError(f"Invalid temperature value in PHONE_A_FRIEND_TEMPERATURE: {env_temp}")
|
61
|
+
|
62
|
+
if temp_value is None:
|
63
|
+
temp_value = self._get_default_temperature_for_model()
|
64
|
+
|
65
|
+
if temp_value is not None:
|
66
|
+
if not isinstance(temp_value, int | float):
|
67
|
+
raise ValueError(f"Temperature must be a number, got {type(temp_value).__name__}")
|
68
|
+
if not (0.0 <= temp_value <= 2.0):
|
69
|
+
raise ValueError(f"Temperature must be between 0.0 and 2.0, got {temp_value}")
|
70
|
+
|
71
|
+
return temp_value
|
72
|
+
|
73
|
+
def _get_default_temperature_for_model(self) -> float | None:
|
74
|
+
"""Get default temperature for specific models that benefit from it."""
|
75
|
+
default_temperatures = {
|
76
|
+
"gemini-2.5-pro-preview-06-05": 0.0,
|
77
|
+
"gemini-2.5-pro": 0.0,
|
78
|
+
}
|
79
|
+
|
80
|
+
return default_temperatures.get(self.model)
|
81
|
+
|
82
|
+
def get_temperature(self) -> float | None:
|
83
|
+
"""Get the temperature setting for the current model.
|
84
|
+
|
85
|
+
Returns:
|
86
|
+
Temperature value if set, None otherwise
|
87
|
+
"""
|
88
|
+
return self.temperature
|
@@ -12,6 +12,22 @@ from phone_a_friend_mcp_server.tools.tool_manager import ToolManager
|
|
12
12
|
logger = logging.getLogger(__name__)
|
13
13
|
|
14
14
|
|
15
|
+
def _format_tool_result(result: Any) -> str:
|
16
|
+
"""Format tool result for display."""
|
17
|
+
if isinstance(result, dict):
|
18
|
+
formatted_result = ""
|
19
|
+
for key, value in result.items():
|
20
|
+
if isinstance(value, list):
|
21
|
+
formatted_result += f"{key.title()}:\n"
|
22
|
+
for item in value:
|
23
|
+
formatted_result += f" • {item}\n"
|
24
|
+
else:
|
25
|
+
formatted_result += f"{key.title()}: {value}\n"
|
26
|
+
return formatted_result.strip()
|
27
|
+
else:
|
28
|
+
return str(result)
|
29
|
+
|
30
|
+
|
15
31
|
async def serve(config: PhoneAFriendConfig) -> None:
|
16
32
|
"""Start the Phone-a-Friend MCP server.
|
17
33
|
|
@@ -55,19 +71,8 @@ async def serve(config: PhoneAFriendConfig) -> None:
|
|
55
71
|
logger.info(f"Calling tool: {name} with arguments: {arguments}")
|
56
72
|
tool = tool_manager.get_tool(name)
|
57
73
|
result = await tool.run(**arguments)
|
58
|
-
|
59
|
-
|
60
|
-
formatted_result = ""
|
61
|
-
for key, value in result.items():
|
62
|
-
if isinstance(value, list):
|
63
|
-
formatted_result += f"{key.title()}:\n"
|
64
|
-
for item in value:
|
65
|
-
formatted_result += f" • {item}\n"
|
66
|
-
else:
|
67
|
-
formatted_result += f"{key.title()}: {value}\n"
|
68
|
-
return [TextContent(type="text", text=formatted_result.strip())]
|
69
|
-
else:
|
70
|
-
return [TextContent(type="text", text=str(result))]
|
74
|
+
formatted_result = _format_tool_result(result)
|
75
|
+
return [TextContent(type="text", text=formatted_result)]
|
71
76
|
|
72
77
|
except Exception as e:
|
73
78
|
logger.error("Tool execution failed: %s", e)
|
@@ -4,6 +4,7 @@ from typing import Any
|
|
4
4
|
import aiofiles
|
5
5
|
|
6
6
|
from phone_a_friend_mcp_server.tools.base_tools import BaseTool
|
7
|
+
from phone_a_friend_mcp_server.utils.context_builder import build_code_context
|
7
8
|
|
8
9
|
|
9
10
|
class FaxAFriendTool(BaseTool):
|
@@ -23,16 +24,22 @@ class FaxAFriendTool(BaseTool):
|
|
23
24
|
|
24
25
|
@property
|
25
26
|
def description(self) -> str:
|
26
|
-
return """
|
27
|
+
return """🚨🚨🚨 **EXCLUSIVE USE ONLY** 🚨🚨🚨
|
28
|
+
|
29
|
+
**USE ONLY WHEN USER EXPLICITLY ASKS TO "fax a friend"**
|
30
|
+
**DO NOT use as fallback if phone_a_friend fails**
|
31
|
+
**DO NOT auto-switch between fax/phone tools**
|
32
|
+
**If this tool fails, ask user for guidance - do NOT try phone_a_friend**
|
27
33
|
|
28
34
|
Purpose: pair-programming caliber *coding help* — reviews, debugging,
|
29
35
|
refactors, design, migrations.
|
30
36
|
|
31
37
|
This tool creates a file for manual AI consultation. After file creation,
|
32
38
|
wait for the user to return with the external AI's response.
|
39
|
+
Replies must be exhaustively detailed. Do **NOT** include files ignored by .gitignore (e.g., *.pyc).
|
33
40
|
|
34
41
|
Hard restrictions:
|
35
|
-
• Generated prompt includes *only* the
|
42
|
+
• Generated prompt includes *only* the context you provide.
|
36
43
|
• No memory, no internet, no tools.
|
37
44
|
• You must spell out every fact it should rely on.
|
38
45
|
|
@@ -78,22 +85,17 @@ replacing <file="…"> blocks as needed. Commentary goes outside those tags."""
|
|
78
85
|
"all_related_context": {
|
79
86
|
"type": "string",
|
80
87
|
"description": (
|
81
|
-
"MANDATORY.
|
82
|
-
"
|
83
|
-
|
84
|
-
"- Known constraints (Python version, allowed deps, runtime limits, etc.).\n"
|
85
|
-
"- Any failing test output or traceback.\n"
|
86
|
-
"If it's not here, the friend AI can't use it."
|
88
|
+
"MANDATORY. General, non-code context for the friend AI. "
|
89
|
+
"Include known constraints (Python version, allowed deps, etc.), "
|
90
|
+
"failing test output, or tracebacks. DO NOT include file contents here."
|
87
91
|
),
|
88
92
|
},
|
89
|
-
"
|
90
|
-
"type": "
|
93
|
+
"file_list": {
|
94
|
+
"type": "array",
|
95
|
+
"items": {"type": "string"},
|
91
96
|
"description": (
|
92
|
-
"
|
93
|
-
"
|
94
|
-
"- Performance targets, security rules, deployment notes.\n"
|
95
|
-
"- Similar past solutions or reference snippets.\n"
|
96
|
-
"Skip it if there's nothing useful."
|
97
|
+
"MANDATORY. A list of file paths or glob patterns to be included in the code context. "
|
98
|
+
"The tool will automatically read these files, filter them against .gitignore, and build the context."
|
97
99
|
),
|
98
100
|
},
|
99
101
|
"task": {
|
@@ -107,42 +109,53 @@ replacing <file="…"> blocks as needed. Commentary goes outside those tags."""
|
|
107
109
|
'Bad: vague stuff like "make code better".'
|
108
110
|
),
|
109
111
|
},
|
112
|
+
"output_directory": {
|
113
|
+
"type": "string",
|
114
|
+
"description": (
|
115
|
+
"Directory path where the fax_a_friend.md file will be created.\n"
|
116
|
+
"Recommended: Use the user's current working directory for convenience.\n"
|
117
|
+
"Must be a valid, writable directory path.\n"
|
118
|
+
"Examples: '/tmp', '~/Documents', './output', '/Users/username/Desktop'"
|
119
|
+
),
|
120
|
+
},
|
110
121
|
},
|
111
|
-
"required": ["all_related_context", "task"],
|
122
|
+
"required": ["all_related_context", "file_list", "task", "output_directory"],
|
112
123
|
}
|
113
124
|
|
114
125
|
async def run(self, **kwargs) -> dict[str, Any]:
|
115
126
|
all_related_context = kwargs.get("all_related_context", "")
|
116
|
-
|
127
|
+
file_list = kwargs.get("file_list", [])
|
117
128
|
task = kwargs.get("task", "")
|
129
|
+
output_directory = kwargs.get("output_directory", "")
|
118
130
|
|
119
|
-
|
120
|
-
master_prompt = self._create_master_prompt(all_related_context,
|
131
|
+
code_context = build_code_context(file_list)
|
132
|
+
master_prompt = self._create_master_prompt(all_related_context, code_context, task)
|
121
133
|
|
122
134
|
try:
|
123
|
-
|
124
|
-
|
135
|
+
output_dir = self._prepare_output_directory(output_directory)
|
136
|
+
|
137
|
+
file_path = os.path.join(output_dir, "fax_a_friend.md")
|
125
138
|
|
126
139
|
async with aiofiles.open(file_path, "w", encoding="utf-8") as f:
|
127
140
|
await f.write(master_prompt)
|
128
141
|
|
129
|
-
# Get absolute path for user reference
|
130
142
|
abs_path = os.path.abspath(file_path)
|
131
143
|
|
132
144
|
return {
|
133
145
|
"status": "success",
|
134
146
|
"file_path": abs_path,
|
135
147
|
"file_name": "fax_a_friend.md",
|
148
|
+
"output_directory": output_dir,
|
136
149
|
"prompt_length": len(master_prompt),
|
137
|
-
"context_length": len(
|
150
|
+
"context_length": len(master_prompt),
|
138
151
|
"task": task,
|
139
152
|
"instructions": self._get_manual_workflow_instructions(abs_path),
|
140
153
|
}
|
141
154
|
|
142
155
|
except Exception as e:
|
143
|
-
return {"status": "failed", "error": str(e), "
|
156
|
+
return {"status": "failed", "error": str(e), "output_directory": output_directory, "context_length": len(master_prompt), "task": task}
|
144
157
|
|
145
|
-
def _create_master_prompt(self, all_related_context: str,
|
158
|
+
def _create_master_prompt(self, all_related_context: str, code_context: str, task: str) -> str:
|
146
159
|
"""Create a comprehensive prompt identical to PhoneAFriendTool's version."""
|
147
160
|
|
148
161
|
prompt_parts = [
|
@@ -152,23 +165,19 @@ replacing <file="…"> blocks as needed. Commentary goes outside those tags."""
|
|
152
165
|
"=== TASK ===",
|
153
166
|
task,
|
154
167
|
"",
|
155
|
-
"===
|
168
|
+
"=== GENERAL CONTEXT ===",
|
156
169
|
all_related_context,
|
170
|
+
"",
|
171
|
+
"=== CODE CONTEXT ===",
|
172
|
+
code_context,
|
157
173
|
]
|
158
174
|
|
159
|
-
if any_additional_context.strip():
|
160
|
-
prompt_parts.extend(
|
161
|
-
[
|
162
|
-
"",
|
163
|
-
"=== ADDITIONAL CONTEXT ===",
|
164
|
-
any_additional_context,
|
165
|
-
]
|
166
|
-
)
|
167
|
-
|
168
175
|
prompt_parts.extend(
|
169
176
|
[
|
170
177
|
"",
|
171
178
|
"=== INSTRUCTIONS ===",
|
179
|
+
"- Provide exhaustive, step-by-step reasoning.",
|
180
|
+
"- Never include files matching .gitignore patterns.",
|
172
181
|
"- Analyze the code and requirements step-by-step.",
|
173
182
|
"- Show your reasoning and propose concrete changes.",
|
174
183
|
'- Provide updated code using the XML format (<file_tree> plus <file="…"> blocks).',
|
@@ -180,6 +189,24 @@ replacing <file="…"> blocks as needed. Commentary goes outside those tags."""
|
|
180
189
|
|
181
190
|
return "\n".join(prompt_parts)
|
182
191
|
|
192
|
+
def _prepare_output_directory(self, output_directory: str) -> str:
|
193
|
+
"""Validate and prepare the output directory."""
|
194
|
+
if not output_directory:
|
195
|
+
raise ValueError("output_directory parameter is required")
|
196
|
+
|
197
|
+
expanded_path = os.path.expanduser(output_directory)
|
198
|
+
resolved_path = os.path.abspath(expanded_path)
|
199
|
+
|
200
|
+
try:
|
201
|
+
os.makedirs(resolved_path, exist_ok=True)
|
202
|
+
except OSError as e:
|
203
|
+
raise ValueError(f"Cannot create directory '{resolved_path}': {e}")
|
204
|
+
|
205
|
+
if not os.access(resolved_path, os.W_OK):
|
206
|
+
raise ValueError(f"Directory '{resolved_path}' is not writable")
|
207
|
+
|
208
|
+
return resolved_path
|
209
|
+
|
183
210
|
def _get_manual_workflow_instructions(self, file_path: str) -> str:
|
184
211
|
"""Generate clear instructions for the manual workflow."""
|
185
212
|
return f"""
|
@@ -10,6 +10,7 @@ from pydantic_ai.providers.openai import OpenAIProvider
|
|
10
10
|
from pydantic_ai.providers.openrouter import OpenRouterProvider
|
11
11
|
|
12
12
|
from phone_a_friend_mcp_server.tools.base_tools import BaseTool
|
13
|
+
from phone_a_friend_mcp_server.utils.context_builder import build_code_context
|
13
14
|
|
14
15
|
|
15
16
|
class PhoneAFriendTool(BaseTool):
|
@@ -28,15 +29,21 @@ class PhoneAFriendTool(BaseTool):
|
|
28
29
|
|
29
30
|
@property
|
30
31
|
def description(self) -> str:
|
31
|
-
return """
|
32
|
+
return """🚨🚨🚨 **EXCLUSIVE USE ONLY** 🚨🚨🚨
|
33
|
+
|
34
|
+
**USE ONLY WHEN USER EXPLICITLY ASKS TO "phone a friend"**
|
35
|
+
**DO NOT use as fallback if fax_a_friend fails**
|
36
|
+
**DO NOT auto-switch between phone/fax tools**
|
37
|
+
**If this tool fails, ask user for guidance - do NOT try fax_a_friend**
|
32
38
|
|
33
39
|
Purpose: pair-programming caliber *coding help* — reviews, debugging,
|
34
40
|
refactors, design, migrations.
|
35
41
|
|
36
42
|
Hard restrictions:
|
37
|
-
• Friend AI sees *only* the
|
43
|
+
• Friend AI sees *only* the context you provide.
|
38
44
|
• No memory, no internet, no tools.
|
39
45
|
• You must spell out every fact it should rely on.
|
46
|
+
Replies must be exhaustively detailed. Do **NOT** include files ignored by .gitignore (e.g., *.pyc).
|
40
47
|
|
41
48
|
Required I/O format:
|
42
49
|
```
|
@@ -80,22 +87,17 @@ replacing <file="…"> blocks as needed. Commentary goes outside those tags."""
|
|
80
87
|
"all_related_context": {
|
81
88
|
"type": "string",
|
82
89
|
"description": (
|
83
|
-
"MANDATORY.
|
84
|
-
"
|
85
|
-
|
86
|
-
"- Known constraints (Python version, allowed deps, runtime limits, etc.).\n"
|
87
|
-
"- Any failing test output or traceback.\n"
|
88
|
-
"If it's not here, the friend AI can't use it."
|
90
|
+
"MANDATORY. General, non-code context for the friend AI. "
|
91
|
+
"Include known constraints (Python version, allowed deps, etc.), "
|
92
|
+
"failing test output, or tracebacks. DO NOT include file contents here."
|
89
93
|
),
|
90
94
|
},
|
91
|
-
"
|
92
|
-
"type": "
|
95
|
+
"file_list": {
|
96
|
+
"type": "array",
|
97
|
+
"items": {"type": "string"},
|
93
98
|
"description": (
|
94
|
-
"
|
95
|
-
"
|
96
|
-
"- Performance targets, security rules, deployment notes.\n"
|
97
|
-
"- Similar past solutions or reference snippets.\n"
|
98
|
-
"Skip it if there's nothing useful."
|
99
|
+
"MANDATORY. A list of file paths or glob patterns to be included in the code context. "
|
100
|
+
"The tool will automatically read these files, filter them against .gitignore, and build the context."
|
99
101
|
),
|
100
102
|
},
|
101
103
|
"task": {
|
@@ -110,65 +112,66 @@ replacing <file="…"> blocks as needed. Commentary goes outside those tags."""
|
|
110
112
|
),
|
111
113
|
},
|
112
114
|
},
|
113
|
-
"required": ["all_related_context", "task"],
|
115
|
+
"required": ["all_related_context", "file_list", "task"],
|
114
116
|
}
|
115
117
|
|
116
118
|
async def run(self, **kwargs) -> dict[str, Any]:
|
117
119
|
all_related_context = kwargs.get("all_related_context", "")
|
118
|
-
|
120
|
+
file_list = kwargs.get("file_list", [])
|
119
121
|
task = kwargs.get("task", "")
|
120
122
|
|
121
|
-
|
122
|
-
master_prompt = self._create_master_prompt(all_related_context,
|
123
|
+
code_context = build_code_context(file_list)
|
124
|
+
master_prompt = self._create_master_prompt(all_related_context, code_context, task)
|
123
125
|
|
124
126
|
try:
|
125
|
-
# Create Pydantic-AI agent with appropriate provider
|
126
127
|
agent = self._create_agent()
|
128
|
+
temperature = self.config.get_temperature()
|
127
129
|
|
128
|
-
|
129
|
-
|
130
|
+
if temperature is not None:
|
131
|
+
result = await agent.run(master_prompt, model_settings={"temperature": temperature})
|
132
|
+
else:
|
133
|
+
result = await agent.run(master_prompt)
|
130
134
|
|
131
135
|
return {
|
132
136
|
"response": result.data,
|
133
137
|
"status": "success",
|
134
138
|
"provider": self.config.provider,
|
135
139
|
"model": self.config.model,
|
136
|
-
"
|
140
|
+
"temperature": temperature,
|
141
|
+
"context_length": len(master_prompt),
|
137
142
|
"task": task,
|
138
143
|
}
|
139
144
|
|
140
145
|
except Exception as e:
|
146
|
+
temperature = self.config.get_temperature()
|
141
147
|
return {
|
142
148
|
"error": str(e),
|
143
149
|
"status": "failed",
|
144
150
|
"provider": self.config.provider,
|
145
151
|
"model": self.config.model,
|
146
|
-
"
|
152
|
+
"temperature": temperature,
|
153
|
+
"context_length": len(master_prompt),
|
147
154
|
"task": task,
|
148
|
-
"master_prompt": master_prompt,
|
155
|
+
"master_prompt": master_prompt,
|
149
156
|
}
|
150
157
|
|
151
158
|
def _create_agent(self) -> Agent:
|
152
159
|
"""Create Pydantic-AI agent with appropriate provider."""
|
153
160
|
if self.config.provider == "openrouter":
|
154
|
-
# OpenRouter has its own dedicated provider
|
155
161
|
provider_kwargs = {"api_key": self.config.api_key}
|
156
162
|
if self.config.base_url:
|
157
163
|
provider_kwargs["base_url"] = self.config.base_url
|
158
164
|
provider = OpenRouterProvider(**provider_kwargs)
|
159
165
|
model = OpenAIModel(self.config.model, provider=provider)
|
160
166
|
elif self.config.provider == "anthropic":
|
161
|
-
# Use Anthropic directly
|
162
167
|
provider_kwargs = {"api_key": self.config.api_key}
|
163
168
|
provider = AnthropicProvider(**provider_kwargs)
|
164
169
|
model = AnthropicModel(self.config.model, provider=provider)
|
165
170
|
elif self.config.provider == "google":
|
166
|
-
# Use Google/Gemini directly
|
167
171
|
provider_kwargs = {"api_key": self.config.api_key}
|
168
172
|
provider = GoogleProvider(**provider_kwargs)
|
169
173
|
model = GoogleModel(self.config.model, provider=provider)
|
170
174
|
else:
|
171
|
-
# Default to OpenAI
|
172
175
|
provider_kwargs = {"api_key": self.config.api_key}
|
173
176
|
if self.config.base_url:
|
174
177
|
provider_kwargs["base_url"] = self.config.base_url
|
@@ -177,7 +180,7 @@ replacing <file="…"> blocks as needed. Commentary goes outside those tags."""
|
|
177
180
|
|
178
181
|
return Agent(model)
|
179
182
|
|
180
|
-
def _create_master_prompt(self, all_related_context: str,
|
183
|
+
def _create_master_prompt(self, all_related_context: str, code_context: str, task: str) -> str:
|
181
184
|
"""Create a comprehensive prompt for the external AI."""
|
182
185
|
|
183
186
|
prompt_parts = [
|
@@ -187,23 +190,19 @@ replacing <file="…"> blocks as needed. Commentary goes outside those tags."""
|
|
187
190
|
"=== TASK ===",
|
188
191
|
task,
|
189
192
|
"",
|
190
|
-
"===
|
193
|
+
"=== GENERAL CONTEXT ===",
|
191
194
|
all_related_context,
|
195
|
+
"",
|
196
|
+
"=== CODE CONTEXT ===",
|
197
|
+
code_context,
|
192
198
|
]
|
193
199
|
|
194
|
-
if any_additional_context.strip():
|
195
|
-
prompt_parts.extend(
|
196
|
-
[
|
197
|
-
"",
|
198
|
-
"=== ADDITIONAL CONTEXT ===",
|
199
|
-
any_additional_context,
|
200
|
-
]
|
201
|
-
)
|
202
|
-
|
203
200
|
prompt_parts.extend(
|
204
201
|
[
|
205
202
|
"",
|
206
203
|
"=== INSTRUCTIONS ===",
|
204
|
+
"- Provide exhaustive, step-by-step reasoning.",
|
205
|
+
"- Never include files matching .gitignore patterns.",
|
207
206
|
"- Analyze the code and requirements step-by-step.",
|
208
207
|
"- Show your reasoning and propose concrete changes.",
|
209
208
|
'- Provide updated code using the XML format (<file_tree> plus <file="…"> blocks).',
|
@@ -0,0 +1 @@
|
|
1
|
+
# This file makes the `utils` directory a Python package.
|
@@ -0,0 +1,93 @@
|
|
1
|
+
import glob
|
2
|
+
import os
|
3
|
+
|
4
|
+
import pathspec
|
5
|
+
|
6
|
+
|
7
|
+
def load_gitignore(base_dir: str) -> pathspec.PathSpec:
|
8
|
+
"""Loads .gitignore patterns from the specified base directory."""
|
9
|
+
gitignore_path = os.path.join(base_dir, ".gitignore")
|
10
|
+
patterns = []
|
11
|
+
if os.path.exists(gitignore_path):
|
12
|
+
with open(gitignore_path, encoding="utf-8") as f:
|
13
|
+
patterns = f.read().splitlines()
|
14
|
+
return pathspec.PathSpec.from_lines("gitwildmatch", patterns)
|
15
|
+
|
16
|
+
|
17
|
+
def filter_paths(paths: list[str], spec: pathspec.PathSpec, base_dir: str = ".") -> list[str]:
|
18
|
+
"""Filters out paths that match the .gitignore spec and non-text files."""
|
19
|
+
filtered_paths = []
|
20
|
+
for path in paths:
|
21
|
+
normalized_path = os.path.normpath(os.path.join(base_dir, path))
|
22
|
+
if not spec.match_file(normalized_path) and os.path.isfile(normalized_path):
|
23
|
+
try:
|
24
|
+
with open(normalized_path, encoding="utf-8") as f:
|
25
|
+
f.read(1024)
|
26
|
+
filtered_paths.append(path)
|
27
|
+
except (OSError, UnicodeDecodeError):
|
28
|
+
pass
|
29
|
+
return filtered_paths
|
30
|
+
|
31
|
+
|
32
|
+
def build_file_tree(paths: list[str], base_dir: str = ".") -> str:
|
33
|
+
"""Builds an ASCII file tree from a list of paths."""
|
34
|
+
tree = {}
|
35
|
+
for path in paths:
|
36
|
+
parts = path.split(os.sep)
|
37
|
+
current_level = tree
|
38
|
+
for part in parts:
|
39
|
+
if part not in current_level:
|
40
|
+
current_level[part] = {}
|
41
|
+
current_level = current_level[part]
|
42
|
+
|
43
|
+
def generate_tree_lines(d, prefix=""):
|
44
|
+
lines = []
|
45
|
+
entries = sorted(d.keys())
|
46
|
+
for i, entry in enumerate(entries):
|
47
|
+
connector = "├── " if i < len(entries) - 1 else "└── "
|
48
|
+
lines.append(prefix + connector + entry)
|
49
|
+
if d[entry]:
|
50
|
+
extension = "│ " if i < len(entries) - 1 else " "
|
51
|
+
lines.extend(generate_tree_lines(d[entry], prefix + extension))
|
52
|
+
return lines
|
53
|
+
|
54
|
+
tree_lines = [base_dir] + generate_tree_lines(tree)
|
55
|
+
return "\n".join(tree_lines)
|
56
|
+
|
57
|
+
|
58
|
+
def build_file_blocks(paths: list[str], base_dir: str = ".") -> str:
|
59
|
+
"""Creates <file="..." /> blocks with the full source code for each file."""
|
60
|
+
blocks = []
|
61
|
+
for path in paths:
|
62
|
+
full_path = os.path.join(base_dir, path)
|
63
|
+
try:
|
64
|
+
with open(full_path, encoding="utf-8") as f:
|
65
|
+
content = f.read()
|
66
|
+
blocks.append(f'<file="{path}">\n{content}\n</file>')
|
67
|
+
except (OSError, UnicodeDecodeError) as e:
|
68
|
+
blocks.append(f'<file="{path}">\nError reading file: {e}\n</file>')
|
69
|
+
return "\n\n".join(blocks)
|
70
|
+
|
71
|
+
|
72
|
+
def build_code_context(file_list: list[str], base_dir: str = ".") -> str:
|
73
|
+
"""
|
74
|
+
Builds the complete code context, including a file tree and file content blocks,
|
75
|
+
filtering out ignored and binary files.
|
76
|
+
"""
|
77
|
+
spec = load_gitignore(base_dir)
|
78
|
+
|
79
|
+
all_files = []
|
80
|
+
for pattern in file_list:
|
81
|
+
all_files.extend(glob.glob(pattern, recursive=True))
|
82
|
+
|
83
|
+
unique_files = sorted(list(set(all_files)))
|
84
|
+
|
85
|
+
filtered_files = filter_paths(unique_files, spec, base_dir)
|
86
|
+
|
87
|
+
if not filtered_files:
|
88
|
+
return "No files to display. Check your `file_list` and `.gitignore`."
|
89
|
+
|
90
|
+
file_tree = build_file_tree(filtered_files, base_dir)
|
91
|
+
file_blocks = build_file_blocks(filtered_files, base_dir)
|
92
|
+
|
93
|
+
return f"<file_tree>\n{file_tree}\n</file_tree>\n\n{file_blocks}"
|
@@ -0,0 +1,176 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: phone-a-friend-mcp-server
|
3
|
+
Version: 0.2.0
|
4
|
+
Summary: MCP Server for Phone-a-Friend assistance
|
5
|
+
Project-URL: GitHub, https://github.com/abhishekbhakat/phone-a-friend-mcp-server
|
6
|
+
Project-URL: Issues, https://github.com/abhishekbhakat/phone-a-friend-mcp-server/issues
|
7
|
+
Author-email: Abhishek Bhakat <abhishek.bhakat@hotmail.com>
|
8
|
+
License-Expression: MIT
|
9
|
+
License-File: LICENSE
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
11
|
+
Classifier: Operating System :: OS Independent
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
15
|
+
Requires-Python: >=3.11
|
16
|
+
Requires-Dist: aiofiles>=24.1.0
|
17
|
+
Requires-Dist: aiohttp>=3.12.7
|
18
|
+
Requires-Dist: click>=8.2.1
|
19
|
+
Requires-Dist: mcp>=1.9.2
|
20
|
+
Requires-Dist: pathspec>=0.12.1
|
21
|
+
Requires-Dist: pydantic-ai-slim[anthropic,google,openai]>=0.2.14
|
22
|
+
Requires-Dist: pydantic>=2.11.5
|
23
|
+
Requires-Dist: pyyaml>=6.0.0
|
24
|
+
Description-Content-Type: text/markdown
|
25
|
+
|
26
|
+
# Phone-a-Friend MCP Server 🧠📞
|
27
|
+
|
28
|
+
An AI-to-AI consultation system that enables one AI to "phone a friend" (another AI) for critical thinking, long context reasoning, and complex problem solving via OpenRouter.
|
29
|
+
|
30
|
+
## The Problem 🤔
|
31
|
+
|
32
|
+
Sometimes an AI encounters complex problems that require:
|
33
|
+
- **Deep critical thinking** beyond immediate capabilities
|
34
|
+
- **Long context reasoning** with extensive information
|
35
|
+
- **Multi-step analysis** that benefits from external perspective
|
36
|
+
- **Specialized expertise** from different AI models
|
37
|
+
|
38
|
+
## The Solution �
|
39
|
+
|
40
|
+
Phone-a-Friend MCP Server creates a **two-step consultation process**:
|
41
|
+
|
42
|
+
1. **Context + Reasoning**: Package all relevant context and send to external AI for deep analysis
|
43
|
+
2. **Extract Actionable Insights**: Process the reasoning response into usable format for the primary AI
|
44
|
+
|
45
|
+
This enables AI systems to leverage other AI models as "consultants" for complex reasoning tasks.
|
46
|
+
|
47
|
+
## Architecture 🏗️
|
48
|
+
|
49
|
+
```
|
50
|
+
Primary AI → Phone-a-Friend MCP → OpenRouter → External AI (GPT-4, Claude, etc.) → Processed Response → Primary AI
|
51
|
+
```
|
52
|
+
|
53
|
+
**Sequential Workflow:**
|
54
|
+
1. `analyze_context` - Gather and structure all relevant context
|
55
|
+
2. `get_critical_thinking` - Send context to external AI via OpenRouter for reasoning
|
56
|
+
3. `extract_actionable_insights` - Process response into actionable format
|
57
|
+
|
58
|
+
## When to Use 🎯
|
59
|
+
|
60
|
+
**Ideal for:**
|
61
|
+
- Complex multi-step problems requiring deep analysis
|
62
|
+
- Situations needing long context reasoning (>100k tokens)
|
63
|
+
- Cross-domain expertise consultation
|
64
|
+
- Critical decision-making with high stakes
|
65
|
+
- Problems requiring multiple perspectives
|
66
|
+
|
67
|
+
## Quick Start ⚡
|
68
|
+
|
69
|
+
Configure your MCP client (e.g., Claude Desktop) using the JSON block below—no cloning or manual installation required.
|
70
|
+
The `uv` runner will automatically download and execute the server package if it isn't present.
|
71
|
+
|
72
|
+
Add the following JSON configuration to your MCP client and replace `<YOUR_API_KEY>` with your key:
|
73
|
+
|
74
|
+
```json
|
75
|
+
{
|
76
|
+
"mcpServers": {
|
77
|
+
"phone-a-friend": {
|
78
|
+
"command": "uv",
|
79
|
+
"args": [
|
80
|
+
"run",
|
81
|
+
"phone-a-friend-mcp-server",
|
82
|
+
"--provider", "openai",
|
83
|
+
"--api-key", "<YOUR_API_KEY>"
|
84
|
+
]
|
85
|
+
}
|
86
|
+
}
|
87
|
+
}
|
88
|
+
```
|
89
|
+
> That's it! You can now use the `phone_a_friend` tool in any compatible client. For more options, see the Advanced Configuration section.
|
90
|
+
|
91
|
+
## Available Tools 🛠️
|
92
|
+
|
93
|
+
### phone_a_friend
|
94
|
+
📞 Consult external AI for critical thinking and complex reasoning. Makes API calls to get responses.
|
95
|
+
|
96
|
+
### fax_a_friend
|
97
|
+
📠 Generate master prompt file for manual AI consultation. Creates file for copy-paste workflow.
|
98
|
+
|
99
|
+
**Parameters**
|
100
|
+
|
101
|
+
*phone_a_friend*
|
102
|
+
|
103
|
+
- `all_related_context` (required): General, non-code context such as constraints, tracebacks, or high-level requirements.
|
104
|
+
- `file_list` (required): Array of file paths or glob patterns. **Just pass the paths** – the server automatically reads those files (skips anything in `.gitignore` or non-text/binary) and builds the full code context for the external AI.
|
105
|
+
- `task` (required): A clear, specific description of what you want the external AI to do.
|
106
|
+
|
107
|
+
*fax_a_friend*
|
108
|
+
|
109
|
+
- `all_related_context` (required): Same as above.
|
110
|
+
- `file_list` (required): Same as above.
|
111
|
+
- `task` (required): Same as above.
|
112
|
+
- `output_directory` (required): Directory where the generated `fax_a_friend.md` master prompt file will be saved.
|
113
|
+
|
114
|
+
## Advanced Configuration 🔧
|
115
|
+
|
116
|
+
This section covers all configuration options, including environment variables, CLI flags, and model selection.
|
117
|
+
|
118
|
+
### Providers and API Keys
|
119
|
+
|
120
|
+
The server can be configured via CLI flags or environment variables.
|
121
|
+
|
122
|
+
| Provider | CLI Flag | Environment Variable |
|
123
|
+
| :--- | :--- | :--- |
|
124
|
+
| OpenAI | `--provider openai` | `OPENAI_API_KEY` |
|
125
|
+
| OpenRouter | `--provider openrouter` | `OPENROUTER_API_KEY` |
|
126
|
+
| Anthropic | `--provider anthropic` | `ANTHROPIC_API_KEY` |
|
127
|
+
| Google | `--provider google` | `GOOGLE_API_KEY` |
|
128
|
+
|
129
|
+
**CLI Example:**
|
130
|
+
```bash
|
131
|
+
phone-a-friend-mcp-server --provider openai --api-key "sk-..."
|
132
|
+
```
|
133
|
+
|
134
|
+
**Environment Variable Example:**
|
135
|
+
```bash
|
136
|
+
export OPENAI_API_KEY="sk-..."
|
137
|
+
phone-a-friend-mcp-server
|
138
|
+
```
|
139
|
+
|
140
|
+
### Model Selection
|
141
|
+
|
142
|
+
You can override the default model for each provider.
|
143
|
+
|
144
|
+
| Provider | Default Model |
|
145
|
+
| :--- | :--- |
|
146
|
+
| **OpenAI** | `o3` |
|
147
|
+
| **Anthropic** | `Claude 4 Opus` |
|
148
|
+
| **Google** | `Gemini 2.5 Pro Preview 05-06` |
|
149
|
+
| **OpenRouter**| `anthropic/claude-4-opus` |
|
150
|
+
|
151
|
+
**Override with CLI:**
|
152
|
+
```bash
|
153
|
+
phone-a-friend-mcp-server --model "gpt-4-turbo"
|
154
|
+
```
|
155
|
+
|
156
|
+
**Override with Environment Variable:**
|
157
|
+
```bash
|
158
|
+
export PHONE_A_FRIEND_MODEL="gpt-4-turbo"
|
159
|
+
```
|
160
|
+
|
161
|
+
### Additional Options
|
162
|
+
|
163
|
+
| Feature | CLI Flag | Environment Variable | Default |
|
164
|
+
| :--- | :--- | :--- | :--- |
|
165
|
+
| **Temperature** | `--temperature 0.5` | `PHONE_A_FRIEND_TEMPERATURE` | `0.4` |
|
166
|
+
| **Base URL** | `--base-url ...` | `PHONE_A_FRIEND_BASE_URL` | Provider default |
|
167
|
+
|
168
|
+
## Use Cases 🎯
|
169
|
+
|
170
|
+
1. In-depth Reasoning for Vibe Coding
|
171
|
+
2. For complex algorithms, data structures, or mathematical computations
|
172
|
+
3. Frontend Development with React, Vue, CSS, or modern frontend frameworks
|
173
|
+
|
174
|
+
## License 📄
|
175
|
+
|
176
|
+
MIT License - see LICENSE file for details.
|
@@ -0,0 +1,17 @@
|
|
1
|
+
phone_a_friend_mcp_server/__init__.py,sha256=9sn_dPrIzLz4W7_Ww--o8aUxIhUI3YGNrxPa26pKShw,2025
|
2
|
+
phone_a_friend_mcp_server/__main__.py,sha256=A-8-jkY2FK2foabew5I-Wk2A54IwzWZcydlQKfiR-p4,51
|
3
|
+
phone_a_friend_mcp_server/config.py,sha256=AP8ZU5VEJjr73QUNAoPKidAhO2O_WOGO9mmj7-rTAz0,3988
|
4
|
+
phone_a_friend_mcp_server/server.py,sha256=z-O20j-j2oHFfFK8o0u9kn-MR8Q-Te0lRZOQfLkYUbM,3448
|
5
|
+
phone_a_friend_mcp_server/client/__init__.py,sha256=fsa8DXjz4rzYXmOUAdLdTpTwPSlZ3zobmBGXqnCEaWs,47
|
6
|
+
phone_a_friend_mcp_server/tools/__init__.py,sha256=jtuvmcStXzbaM8wuhOKC8M8mBqDjHr-ypZ2ct1Rgi7Q,46
|
7
|
+
phone_a_friend_mcp_server/tools/base_tools.py,sha256=DMjFq0E3TO9a9I7QY4wQ_B4-SntdXzSZzrYymFzSmVE,765
|
8
|
+
phone_a_friend_mcp_server/tools/fax_tool.py,sha256=esQwxt-j69CiBRQHaVVShRbwEHdcvAppwO1uBD6UCQs,8836
|
9
|
+
phone_a_friend_mcp_server/tools/phone_tool.py,sha256=4OLRGThNQO8aAFWL8Bz5J40RPKqDqSZuGXpJc0oRL94,8396
|
10
|
+
phone_a_friend_mcp_server/tools/tool_manager.py,sha256=VVtENC-n3D4GV6Cy3l9--30SJi06mJdyEiG7F_mfP7I,1474
|
11
|
+
phone_a_friend_mcp_server/utils/__init__.py,sha256=1IwFDtwJ76i1O7_iM4LLGqwgtt11y0PIV0DWubh8nLU,58
|
12
|
+
phone_a_friend_mcp_server/utils/context_builder.py,sha256=bw3FwnRg1WQsXjxyKPwLpqrTObPkmQnZ_MtBzaz6WnM,3398
|
13
|
+
phone_a_friend_mcp_server-0.2.0.dist-info/METADATA,sha256=wMrRoQMFcWN8DZoHDV1eqHKmsCziQxy3H7dsxIccepY,6126
|
14
|
+
phone_a_friend_mcp_server-0.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
15
|
+
phone_a_friend_mcp_server-0.2.0.dist-info/entry_points.txt,sha256=c_08XI-vG07VmUT3mtzyuCQjaus5l1NBl4q00Q3jLug,86
|
16
|
+
phone_a_friend_mcp_server-0.2.0.dist-info/licenses/LICENSE,sha256=-8bInetillKZC0qZDT8RWYIOrph3HIU5cr5N4Pg7bBE,1065
|
17
|
+
phone_a_friend_mcp_server-0.2.0.dist-info/RECORD,,
|
@@ -1,320 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.4
|
2
|
-
Name: phone-a-friend-mcp-server
|
3
|
-
Version: 0.1.0
|
4
|
-
Summary: MCP Server for Phone-a-Friend assistance
|
5
|
-
Project-URL: GitHub, https://github.com/abhishekbhakat/phone-a-friend-mcp-server
|
6
|
-
Project-URL: Issues, https://github.com/abhishekbhakat/phone-a-friend-mcp-server/issues
|
7
|
-
Author-email: Abhishek Bhakat <abhishek.bhakat@hotmail.com>
|
8
|
-
License-Expression: MIT
|
9
|
-
License-File: LICENSE
|
10
|
-
Classifier: Development Status :: 3 - Alpha
|
11
|
-
Classifier: Operating System :: OS Independent
|
12
|
-
Classifier: Programming Language :: Python :: 3.10
|
13
|
-
Classifier: Programming Language :: Python :: 3.11
|
14
|
-
Classifier: Programming Language :: Python :: 3.12
|
15
|
-
Requires-Python: >=3.11
|
16
|
-
Requires-Dist: aiofiles>=24.1.0
|
17
|
-
Requires-Dist: aiohttp>=3.12.7
|
18
|
-
Requires-Dist: click>=8.2.1
|
19
|
-
Requires-Dist: mcp>=1.9.2
|
20
|
-
Requires-Dist: pydantic-ai-slim[anthropic,google,openai]>=0.2.14
|
21
|
-
Requires-Dist: pydantic>=2.11.5
|
22
|
-
Requires-Dist: pyyaml>=6.0.0
|
23
|
-
Provides-Extra: dev
|
24
|
-
Requires-Dist: build>=1.2.2.post1; extra == 'dev'
|
25
|
-
Requires-Dist: pre-commit>=4.2.0; extra == 'dev'
|
26
|
-
Requires-Dist: pytest-asyncio>=0.26.0; extra == 'dev'
|
27
|
-
Requires-Dist: pytest-mock>=3.14.1; extra == 'dev'
|
28
|
-
Requires-Dist: pytest>=8.3.4; extra == 'dev'
|
29
|
-
Requires-Dist: ruff>=0.11.12; extra == 'dev'
|
30
|
-
Description-Content-Type: text/markdown
|
31
|
-
|
32
|
-
# Phone-a-Friend MCP Server 🧠📞
|
33
|
-
|
34
|
-
An AI-to-AI consultation system that enables one AI to "phone a friend" (another AI) for critical thinking, long context reasoning, and complex problem solving via OpenRouter.
|
35
|
-
|
36
|
-
## The Problem 🤔
|
37
|
-
|
38
|
-
Sometimes an AI encounters complex problems that require:
|
39
|
-
- **Deep critical thinking** beyond immediate capabilities
|
40
|
-
- **Long context reasoning** with extensive information
|
41
|
-
- **Multi-step analysis** that benefits from external perspective
|
42
|
-
- **Specialized expertise** from different AI models
|
43
|
-
|
44
|
-
## The Solution �
|
45
|
-
|
46
|
-
Phone-a-Friend MCP Server creates a **two-step consultation process**:
|
47
|
-
|
48
|
-
1. **Context + Reasoning**: Package all relevant context and send to external AI for deep analysis
|
49
|
-
2. **Extract Actionable Insights**: Process the reasoning response into usable format for the primary AI
|
50
|
-
|
51
|
-
This enables AI systems to leverage other AI models as "consultants" for complex reasoning tasks.
|
52
|
-
|
53
|
-
## Architecture 🏗️
|
54
|
-
|
55
|
-
```
|
56
|
-
Primary AI → Phone-a-Friend MCP → OpenRouter → External AI (GPT-4, Claude, etc.) → Processed Response → Primary AI
|
57
|
-
```
|
58
|
-
|
59
|
-
**Sequential Workflow:**
|
60
|
-
1. `analyze_context` - Gather and structure all relevant context
|
61
|
-
2. `get_critical_thinking` - Send context to external AI via OpenRouter for reasoning
|
62
|
-
3. `extract_actionable_insights` - Process response into actionable format
|
63
|
-
|
64
|
-
## When to Use 🎯
|
65
|
-
|
66
|
-
**Ideal for:**
|
67
|
-
- Complex multi-step problems requiring deep analysis
|
68
|
-
- Situations needing long context reasoning (>100k tokens)
|
69
|
-
- Cross-domain expertise consultation
|
70
|
-
- Critical decision-making with high stakes
|
71
|
-
- Problems requiring multiple perspectives
|
72
|
-
|
73
|
-
**Not needed for:**
|
74
|
-
- Simple factual questions
|
75
|
-
- Basic reasoning tasks
|
76
|
-
- Quick responses
|
77
|
-
- Well-defined procedural tasks
|
78
|
-
|
79
|
-
## Installation 🚀
|
80
|
-
|
81
|
-
1. Clone the repository:
|
82
|
-
```bash
|
83
|
-
git clone https://github.com/abhishekbhakat/phone-a-friend-mcp-server.git
|
84
|
-
cd phone-a-friend-mcp-server
|
85
|
-
```
|
86
|
-
|
87
|
-
2. Install dependencies:
|
88
|
-
```bash
|
89
|
-
uv pip install -e .
|
90
|
-
```
|
91
|
-
|
92
|
-
3. Configure your preferred AI provider:
|
93
|
-
|
94
|
-
**OpenRouter (recommended - access to multiple models):**
|
95
|
-
```bash
|
96
|
-
export OPENROUTER_API_KEY="your-openrouter-key"
|
97
|
-
# Model will auto-select based on provider
|
98
|
-
```
|
99
|
-
|
100
|
-
**OpenAI:**
|
101
|
-
```bash
|
102
|
-
export OPENAI_API_KEY="your-openai-key"
|
103
|
-
# Uses latest available model by default
|
104
|
-
```
|
105
|
-
|
106
|
-
**Anthropic:**
|
107
|
-
```bash
|
108
|
-
export ANTHROPIC_API_KEY="your-anthropic-key"
|
109
|
-
# Uses latest available model by default
|
110
|
-
```
|
111
|
-
|
112
|
-
**Google/Gemini:**
|
113
|
-
```bash
|
114
|
-
export GOOGLE_API_KEY="your-google-key" # or GEMINI_API_KEY
|
115
|
-
# Uses latest available model by default
|
116
|
-
```
|
117
|
-
|
118
|
-
## Usage 💡
|
119
|
-
|
120
|
-
### Command Line
|
121
|
-
```bash
|
122
|
-
# Start the server
|
123
|
-
phone-a-friend-mcp-server
|
124
|
-
|
125
|
-
# With verbose logging
|
126
|
-
phone-a-friend-mcp-server -v
|
127
|
-
|
128
|
-
# With specific provider (uses optimal model automatically)
|
129
|
-
phone-a-friend-mcp-server --provider anthropic
|
130
|
-
phone-a-friend-mcp-server --provider google
|
131
|
-
|
132
|
-
# Override with custom model if needed
|
133
|
-
phone-a-friend-mcp-server --provider anthropic --model "your-preferred-model"
|
134
|
-
```
|
135
|
-
|
136
|
-
### Environment Variables
|
137
|
-
```bash
|
138
|
-
# Auto-detects provider based on available API keys
|
139
|
-
export OPENROUTER_API_KEY="your-openrouter-key" # Preferred
|
140
|
-
export OPENAI_API_KEY="your-openai-key" # Default fallback
|
141
|
-
export ANTHROPIC_API_KEY="your-anthropic-key" # Direct Anthropic
|
142
|
-
export GOOGLE_API_KEY="your-google-key" # Google/Gemini
|
143
|
-
|
144
|
-
# Optional overrides (only if you want to override auto-selection)
|
145
|
-
export PHONE_A_FRIEND_MODEL="your-preferred-model"
|
146
|
-
export PHONE_A_FRIEND_PROVIDER="your-preferred-provider"
|
147
|
-
```
|
148
|
-
|
149
|
-
## Model Selection 🤖
|
150
|
-
|
151
|
-
The system automatically selects the most capable model for each provider:
|
152
|
-
- **OpenAI**: Latest reasoning model
|
153
|
-
- **Anthropic**: Latest Claude model
|
154
|
-
- **Google**: Latest Gemini Pro model
|
155
|
-
- **OpenRouter**: Access to latest models from all providers
|
156
|
-
|
157
|
-
You can override the auto-selection by setting `PHONE_A_FRIEND_MODEL` environment variable or using the `--model` CLI option.
|
158
|
-
|
159
|
-
## Available Tools 🛠️
|
160
|
-
|
161
|
-
### phone_a_friend
|
162
|
-
Consult an external AI for critical thinking and complex reasoning via OpenRouter.
|
163
|
-
|
164
|
-
**IMPORTANT:** The external AI is very smart but has NO MEMORY of previous conversations.
|
165
|
-
The quality of the response depends ENTIRELY on the quality and completeness of the context you provide.
|
166
|
-
|
167
|
-
**Parameters:**
|
168
|
-
- `all_related_context` (required): ALL context directly related to the problem. Include:
|
169
|
-
- Background information and history
|
170
|
-
- Previous attempts and their outcomes
|
171
|
-
- Stakeholders and their perspectives
|
172
|
-
- Constraints, requirements, and limitations
|
173
|
-
- Current situation and circumstances
|
174
|
-
- Any relevant data, metrics, or examples
|
175
|
-
- Timeline and deadlines
|
176
|
-
- Success criteria and goals
|
177
|
-
|
178
|
-
- `any_additional_context` (optional): ANY additional context that might be helpful. Include:
|
179
|
-
- Relevant documentation, specifications, or guidelines
|
180
|
-
- Industry standards or best practices
|
181
|
-
- Similar cases or precedents
|
182
|
-
- Technical details or domain knowledge
|
183
|
-
- Regulatory or compliance requirements
|
184
|
-
- Tools, resources, or technologies available
|
185
|
-
- Budget or resource constraints
|
186
|
-
- Organizational context or culture
|
187
|
-
|
188
|
-
- `task` (required): The specific task or question for the external AI. Be clear about:
|
189
|
-
- What exactly you need help with
|
190
|
-
- What type of analysis or reasoning you want
|
191
|
-
- What format you prefer for the response
|
192
|
-
- What decisions need to be made
|
193
|
-
- What problems need to be solved
|
194
|
-
|
195
|
-
**Example Usage:**
|
196
|
-
```
|
197
|
-
all_related_context: "We're a SaaS startup with 50 employees. Our customer churn rate increased from 5% to 12% over the last quarter. We recently changed our pricing model and added new features. Customer support tickets increased 40%. Our main competitors are offering similar features at lower prices."
|
198
|
-
|
199
|
-
any_additional_context: "Industry benchmark for SaaS churn is 6-8%. Our pricing increased by 30%. New features include AI analytics and advanced reporting. Customer feedback mentions complexity and cost concerns."
|
200
|
-
|
201
|
-
task: "Analyze the churn increase and provide a comprehensive action plan to reduce it back to 5% within 6 months. Include specific strategies, timeline, and success metrics."
|
202
|
-
```
|
203
|
-
|
204
|
-
The system will automatically route this to the most capable AI model available based on your configured provider.
|
205
|
-
|
206
|
-
## Claude Desktop Configuration 🖥️
|
207
|
-
|
208
|
-
To use Phone-a-Friend MCP server with Claude Desktop, add this configuration to your `claude_desktop_config.json` file:
|
209
|
-
|
210
|
-
### Configuration File Location
|
211
|
-
- **macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json`
|
212
|
-
- **Windows**: `%APPDATA%\Claude\claude_desktop_config.json`
|
213
|
-
|
214
|
-
### Configuration
|
215
|
-
|
216
|
-
**Option 1: Using uv (Recommended)**
|
217
|
-
```json
|
218
|
-
{
|
219
|
-
"mcpServers": {
|
220
|
-
"phone-a-friend": {
|
221
|
-
"command": "uvx",
|
222
|
-
"args": [
|
223
|
-
"run",
|
224
|
-
"--refresh",
|
225
|
-
"phone-a-friend-mcp-server",
|
226
|
-
],
|
227
|
-
"env": {
|
228
|
-
"OPENROUTER_API_KEY": "your-openrouter-api-key",
|
229
|
-
"PHONE_A_FRIEND_MODEL": "anthropic/claude-4-opus"
|
230
|
-
}
|
231
|
-
}
|
232
|
-
}
|
233
|
-
}
|
234
|
-
```
|
235
|
-
|
236
|
-
### Environment Variables in Configuration
|
237
|
-
|
238
|
-
You can configure different AI providers directly in the Claude Desktop config:
|
239
|
-
|
240
|
-
```json
|
241
|
-
{
|
242
|
-
"mcpServers": {
|
243
|
-
"phone-a-friend": {
|
244
|
-
"command": "phone-a-friend-mcp-server",
|
245
|
-
"env": {
|
246
|
-
"OPENROUTER_API_KEY": "your-openrouter-api-key",
|
247
|
-
"PHONE_A_FRIEND_MODEL": "anthropic/claude-4-opus"
|
248
|
-
}
|
249
|
-
}
|
250
|
-
}
|
251
|
-
}
|
252
|
-
```
|
253
|
-
|
254
|
-
**Alternative Providers:**
|
255
|
-
```json
|
256
|
-
{
|
257
|
-
"mcpServers": {
|
258
|
-
"phone-a-friend-openai": {
|
259
|
-
"command": "phone-a-friend-mcp-server",
|
260
|
-
"env": {
|
261
|
-
"OPENAI_API_KEY": "your-openai-api-key"
|
262
|
-
}
|
263
|
-
},
|
264
|
-
"phone-a-friend-anthropic": {
|
265
|
-
"command": "phone-a-friend-mcp-server",
|
266
|
-
"env": {
|
267
|
-
"ANTHROPIC_API_KEY": "your-anthropic-api-key"
|
268
|
-
}
|
269
|
-
},
|
270
|
-
"phone-a-friend-google": {
|
271
|
-
"command": "phone-a-friend-mcp-server",
|
272
|
-
"env": {
|
273
|
-
"GOOGLE_API_KEY": "your-google-api-key"
|
274
|
-
}
|
275
|
-
}
|
276
|
-
}
|
277
|
-
}
|
278
|
-
```
|
279
|
-
|
280
|
-
### Setup Steps
|
281
|
-
|
282
|
-
1. **Install Phone-a-Friend MCP Server** (see Installation section above)
|
283
|
-
2. **Open Claude Desktop Settings** → Developer → Edit Config
|
284
|
-
3. **Add the configuration** (choose one of the options above)
|
285
|
-
4. **Replace paths and API keys** with your actual values
|
286
|
-
5. **Restart Claude Desktop**
|
287
|
-
6. **Look for the 🔨 hammer icon** in the input box to confirm the server is connected
|
288
|
-
|
289
|
-
### Troubleshooting
|
290
|
-
|
291
|
-
If the server doesn't appear in Claude Desktop:
|
292
|
-
|
293
|
-
1. **Check logs**:
|
294
|
-
- macOS: `~/Library/Logs/Claude/mcp*.log`
|
295
|
-
- Windows: `%APPDATA%\Claude\logs\mcp*.log`
|
296
|
-
|
297
|
-
2. **Verify paths** are absolute and correct
|
298
|
-
3. **Test manually** in terminal:
|
299
|
-
```bash
|
300
|
-
phone-a-friend-mcp-server -v
|
301
|
-
```
|
302
|
-
4. **Restart Claude Desktop** completely
|
303
|
-
5. **Check API keys** are valid and have sufficient credits
|
304
|
-
|
305
|
-
## Development 🔧
|
306
|
-
|
307
|
-
### Running Tests
|
308
|
-
```bash
|
309
|
-
pytest
|
310
|
-
```
|
311
|
-
|
312
|
-
### Code Formatting
|
313
|
-
```bash
|
314
|
-
ruff format .
|
315
|
-
ruff check .
|
316
|
-
```
|
317
|
-
|
318
|
-
## License 📄
|
319
|
-
|
320
|
-
MIT License - see LICENSE file for details.
|
@@ -1,15 +0,0 @@
|
|
1
|
-
phone_a_friend_mcp_server/__init__.py,sha256=RaayGu6L95bFNEioVLZwFifnKMl9-yhUU7glBInuqXA,1895
|
2
|
-
phone_a_friend_mcp_server/__main__.py,sha256=A-8-jkY2FK2foabew5I-Wk2A54IwzWZcydlQKfiR-p4,51
|
3
|
-
phone_a_friend_mcp_server/config.py,sha256=Wfs68Zw7xXhAQ-77z3gblqsnmqO5bed-f2ggJkvgzUM,2356
|
4
|
-
phone_a_friend_mcp_server/server.py,sha256=ppx8QxQvJihcOzJkrJFlh9qyZ0fvI_eGP0TgYUC0Vcw,3394
|
5
|
-
phone_a_friend_mcp_server/client/__init__.py,sha256=fsa8DXjz4rzYXmOUAdLdTpTwPSlZ3zobmBGXqnCEaWs,47
|
6
|
-
phone_a_friend_mcp_server/tools/__init__.py,sha256=jtuvmcStXzbaM8wuhOKC8M8mBqDjHr-ypZ2ct1Rgi7Q,46
|
7
|
-
phone_a_friend_mcp_server/tools/base_tools.py,sha256=DMjFq0E3TO9a9I7QY4wQ_B4-SntdXzSZzrYymFzSmVE,765
|
8
|
-
phone_a_friend_mcp_server/tools/fax_tool.py,sha256=vzcGITygR49q9cu7Fw7SYcER7u_bKY6FKfvwBGKrRGs,7573
|
9
|
-
phone_a_friend_mcp_server/tools/phone_tool.py,sha256=zvZOU9TgC7PR6nm7rj13PPH0dRnBN9XYk2bf2frIWpE,8356
|
10
|
-
phone_a_friend_mcp_server/tools/tool_manager.py,sha256=VVtENC-n3D4GV6Cy3l9--30SJi06mJdyEiG7F_mfP7I,1474
|
11
|
-
phone_a_friend_mcp_server-0.1.0.dist-info/METADATA,sha256=jXvQP4I_B2uhleQWo-gIcvWntCEJfeVe0Bz2VxLI_sE,10048
|
12
|
-
phone_a_friend_mcp_server-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
13
|
-
phone_a_friend_mcp_server-0.1.0.dist-info/entry_points.txt,sha256=c_08XI-vG07VmUT3mtzyuCQjaus5l1NBl4q00Q3jLug,86
|
14
|
-
phone_a_friend_mcp_server-0.1.0.dist-info/licenses/LICENSE,sha256=-8bInetillKZC0qZDT8RWYIOrph3HIU5cr5N4Pg7bBE,1065
|
15
|
-
phone_a_friend_mcp_server-0.1.0.dist-info/RECORD,,
|
{phone_a_friend_mcp_server-0.1.0.dist-info → phone_a_friend_mcp_server-0.2.0.dist-info}/WHEEL
RENAMED
File without changes
|
File without changes
|
File without changes
|