architectgbt-mcp 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +133 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +51 -0
- package/dist/templates/index.d.ts +6 -0
- package/dist/templates/index.js +116 -0
- package/dist/tools/get-recommendation.d.ts +37 -0
- package/dist/tools/get-recommendation.js +94 -0
- package/dist/tools/get-template.d.ts +25 -0
- package/dist/tools/get-template.js +78 -0
- package/dist/tools/list-models.d.ts +32 -0
- package/dist/tools/list-models.js +61 -0
- package/package.json +31 -0
package/README.md
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
# ArchitectGBT MCP Server
|
|
2
|
+
|
|
3
|
+
AI model recommendation engine for Cursor, Claude Desktop, and Windsurf.
|
|
4
|
+
|
|
5
|
+
Get instant AI model recommendations without leaving your IDE.
|
|
6
|
+
|
|
7
|
+
## Features
|
|
8
|
+
|
|
9
|
+
- 🎯 **Smart Recommendations** - Get the best AI model for your use case
|
|
10
|
+
- 📝 **Code Templates** - Production-ready integration code
|
|
11
|
+
- 📊 **Model Database** - Compare 50+ AI models with pricing
|
|
12
|
+
|
|
13
|
+
## Installation
|
|
14
|
+
|
|
15
|
+
### For Cursor
|
|
16
|
+
|
|
17
|
+
Add to your `~/.cursor/mcp.json`:
|
|
18
|
+
|
|
19
|
+
```json
|
|
20
|
+
{
|
|
21
|
+
"mcpServers": {
|
|
22
|
+
"architectgbt": {
|
|
23
|
+
"command": "npx",
|
|
24
|
+
"args": ["@architectgbt/mcp"]
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
### For Claude Desktop
|
|
31
|
+
|
|
32
|
+
Add to your Claude Desktop config (`%APPDATA%\Claude\claude_desktop_config.json` on Windows):
|
|
33
|
+
|
|
34
|
+
```json
|
|
35
|
+
{
|
|
36
|
+
"mcpServers": {
|
|
37
|
+
"architectgbt": {
|
|
38
|
+
"command": "npx",
|
|
39
|
+
"args": ["@architectgbt/mcp"]
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
### For Windsurf
|
|
46
|
+
|
|
47
|
+
Add to your MCP configuration:
|
|
48
|
+
|
|
49
|
+
```json
|
|
50
|
+
{
|
|
51
|
+
"mcpServers": {
|
|
52
|
+
"architectgbt": {
|
|
53
|
+
"command": "npx",
|
|
54
|
+
"args": ["@architectgbt/mcp"]
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
## Available Tools
|
|
61
|
+
|
|
62
|
+
### `get_ai_recommendation`
|
|
63
|
+
|
|
64
|
+
Get personalized AI model recommendations based on your project description.
|
|
65
|
+
|
|
66
|
+
**Example prompts:**
|
|
67
|
+
- "What AI model should I use for a customer support chatbot?"
|
|
68
|
+
- "Recommend a model for code generation on a budget"
|
|
69
|
+
- "Best model for document analysis with 100K context?"
|
|
70
|
+
|
|
71
|
+
### `get_code_template`
|
|
72
|
+
|
|
73
|
+
Get production-ready integration code for any AI model.
|
|
74
|
+
|
|
75
|
+
**Example prompts:**
|
|
76
|
+
- "Give me a TypeScript template for Claude"
|
|
77
|
+
- "Python code for OpenAI GPT-4"
|
|
78
|
+
- "Gemini integration in TypeScript"
|
|
79
|
+
|
|
80
|
+
### `list_models`
|
|
81
|
+
|
|
82
|
+
List available AI models with pricing information.
|
|
83
|
+
|
|
84
|
+
**Example prompts:**
|
|
85
|
+
- "List all Anthropic models"
|
|
86
|
+
- "Show me the cheapest models"
|
|
87
|
+
- "What OpenAI models are available?"
|
|
88
|
+
|
|
89
|
+
## Development
|
|
90
|
+
|
|
91
|
+
```bash
|
|
92
|
+
# Install dependencies
|
|
93
|
+
npm install
|
|
94
|
+
|
|
95
|
+
# Run in development mode
|
|
96
|
+
npm run dev
|
|
97
|
+
|
|
98
|
+
# Build for production
|
|
99
|
+
npm run build
|
|
100
|
+
|
|
101
|
+
# Start production server
|
|
102
|
+
npm start
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
## Local Testing
|
|
106
|
+
|
|
107
|
+
To test locally before publishing, update your MCP config to point to the built file:
|
|
108
|
+
|
|
109
|
+
```json
|
|
110
|
+
{
|
|
111
|
+
"mcpServers": {
|
|
112
|
+
"architectgbt": {
|
|
113
|
+
"command": "node",
|
|
114
|
+
"args": ["C:/Users/pravi/workspacenew/architectgbt-mcp/dist/index.js"]
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
## Environment Variables
|
|
121
|
+
|
|
122
|
+
| Variable | Description | Default |
|
|
123
|
+
|----------|-------------|---------|
|
|
124
|
+
| `ARCHITECTGBT_API_URL` | API base URL | `https://architectgbt.com` |
|
|
125
|
+
|
|
126
|
+
## License
|
|
127
|
+
|
|
128
|
+
MIT
|
|
129
|
+
|
|
130
|
+
## Links
|
|
131
|
+
|
|
132
|
+
- [ArchitectGBT Website](https://architectgbt.com)
|
|
133
|
+
- [MCP Documentation](https://modelcontextprotocol.io)
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
|
3
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
4
|
+
import { CallToolRequestSchema, ListToolsRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
|
|
5
|
+
import { getRecommendationTool, handleGetRecommendation } from "./tools/get-recommendation.js";
|
|
6
|
+
import { getTemplateTool, handleGetTemplate } from "./tools/get-template.js";
|
|
7
|
+
import { listModelsTool, handleListModels } from "./tools/list-models.js";
|
|
8
|
+
const server = new Server({
|
|
9
|
+
name: "architectgbt-mcp",
|
|
10
|
+
version: "0.1.0",
|
|
11
|
+
}, {
|
|
12
|
+
capabilities: {
|
|
13
|
+
tools: {},
|
|
14
|
+
},
|
|
15
|
+
});
|
|
16
|
+
// List available tools
|
|
17
|
+
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
18
|
+
return {
|
|
19
|
+
tools: [getRecommendationTool, getTemplateTool, listModelsTool],
|
|
20
|
+
};
|
|
21
|
+
});
|
|
22
|
+
// Handle tool calls
|
|
23
|
+
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
24
|
+
const { name, arguments: args } = request.params;
|
|
25
|
+
try {
|
|
26
|
+
switch (name) {
|
|
27
|
+
case "get_ai_recommendation":
|
|
28
|
+
return await handleGetRecommendation(args);
|
|
29
|
+
case "get_code_template":
|
|
30
|
+
return await handleGetTemplate(args);
|
|
31
|
+
case "list_models":
|
|
32
|
+
return await handleListModels(args);
|
|
33
|
+
default:
|
|
34
|
+
throw new Error(`Unknown tool: ${name}`);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
catch (error) {
|
|
38
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
39
|
+
return {
|
|
40
|
+
content: [{ type: "text", text: `Error: ${message}` }],
|
|
41
|
+
isError: true,
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
});
|
|
45
|
+
// Start the server
|
|
46
|
+
async function main() {
|
|
47
|
+
const transport = new StdioServerTransport();
|
|
48
|
+
await server.connect(transport);
|
|
49
|
+
console.error("ArchitectGBT MCP server running on stdio");
|
|
50
|
+
}
|
|
51
|
+
main().catch(console.error);
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
export const templates = {
|
|
2
|
+
anthropic: {
|
|
3
|
+
typescript: {
|
|
4
|
+
install: "npm install @anthropic-ai/sdk",
|
|
5
|
+
envVars: ["ANTHROPIC_API_KEY=your-api-key"],
|
|
6
|
+
code: `import Anthropic from "@anthropic-ai/sdk";
|
|
7
|
+
|
|
8
|
+
const client = new Anthropic({
|
|
9
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
10
|
+
});
|
|
11
|
+
|
|
12
|
+
export async function chat(message: string): Promise<string> {
|
|
13
|
+
const response = await client.messages.create({
|
|
14
|
+
model: "claude-sonnet-4-20250514",
|
|
15
|
+
max_tokens: 1024,
|
|
16
|
+
messages: [{ role: "user", content: message }],
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
const textBlock = response.content[0];
|
|
20
|
+
if (textBlock.type === "text") {
|
|
21
|
+
return textBlock.text;
|
|
22
|
+
}
|
|
23
|
+
throw new Error("Unexpected response type");
|
|
24
|
+
}`,
|
|
25
|
+
usage: `const answer = await chat("What is the capital of France?");
|
|
26
|
+
console.log(answer);`,
|
|
27
|
+
},
|
|
28
|
+
python: {
|
|
29
|
+
install: "pip install anthropic",
|
|
30
|
+
envVars: ["ANTHROPIC_API_KEY=your-api-key"],
|
|
31
|
+
code: `import anthropic
|
|
32
|
+
|
|
33
|
+
client = anthropic.Anthropic()
|
|
34
|
+
|
|
35
|
+
def chat(message: str) -> str:
|
|
36
|
+
response = client.messages.create(
|
|
37
|
+
model="claude-sonnet-4-20250514",
|
|
38
|
+
max_tokens=1024,
|
|
39
|
+
messages=[{"role": "user", "content": message}]
|
|
40
|
+
)
|
|
41
|
+
return response.content[0].text`,
|
|
42
|
+
usage: `answer = chat("What is the capital of France?")
|
|
43
|
+
print(answer)`,
|
|
44
|
+
},
|
|
45
|
+
},
|
|
46
|
+
openai: {
|
|
47
|
+
typescript: {
|
|
48
|
+
install: "npm install openai",
|
|
49
|
+
envVars: ["OPENAI_API_KEY=your-api-key"],
|
|
50
|
+
code: `import OpenAI from "openai";
|
|
51
|
+
|
|
52
|
+
const client = new OpenAI({
|
|
53
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
export async function chat(message: string): Promise<string> {
|
|
57
|
+
const response = await client.chat.completions.create({
|
|
58
|
+
model: "gpt-4o",
|
|
59
|
+
messages: [{ role: "user", content: message }],
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
return response.choices[0].message.content || "";
|
|
63
|
+
}`,
|
|
64
|
+
usage: `const answer = await chat("What is the capital of France?");
|
|
65
|
+
console.log(answer);`,
|
|
66
|
+
},
|
|
67
|
+
python: {
|
|
68
|
+
install: "pip install openai",
|
|
69
|
+
envVars: ["OPENAI_API_KEY=your-api-key"],
|
|
70
|
+
code: `from openai import OpenAI
|
|
71
|
+
|
|
72
|
+
client = OpenAI()
|
|
73
|
+
|
|
74
|
+
def chat(message: str) -> str:
|
|
75
|
+
response = client.chat.completions.create(
|
|
76
|
+
model="gpt-4o",
|
|
77
|
+
messages=[{"role": "user", "content": message}]
|
|
78
|
+
)
|
|
79
|
+
return response.choices[0].message.content`,
|
|
80
|
+
usage: `answer = chat("What is the capital of France?")
|
|
81
|
+
print(answer)`,
|
|
82
|
+
},
|
|
83
|
+
},
|
|
84
|
+
google: {
|
|
85
|
+
typescript: {
|
|
86
|
+
install: "npm install @google/generative-ai",
|
|
87
|
+
envVars: ["GOOGLE_API_KEY=your-api-key"],
|
|
88
|
+
code: `import { GoogleGenerativeAI } from "@google/generative-ai";
|
|
89
|
+
|
|
90
|
+
const genAI = new GoogleGenerativeAI(process.env.GOOGLE_API_KEY!);
|
|
91
|
+
|
|
92
|
+
export async function chat(message: string): Promise<string> {
|
|
93
|
+
const model = genAI.getGenerativeModel({ model: "gemini-2.0-flash" });
|
|
94
|
+
const result = await model.generateContent(message);
|
|
95
|
+
return result.response.text();
|
|
96
|
+
}`,
|
|
97
|
+
usage: `const answer = await chat("What is the capital of France?");
|
|
98
|
+
console.log(answer);`,
|
|
99
|
+
},
|
|
100
|
+
python: {
|
|
101
|
+
install: "pip install google-generativeai",
|
|
102
|
+
envVars: ["GOOGLE_API_KEY=your-api-key"],
|
|
103
|
+
code: `import google.generativeai as genai
|
|
104
|
+
import os
|
|
105
|
+
|
|
106
|
+
genai.configure(api_key=os.environ["GOOGLE_API_KEY"])
|
|
107
|
+
|
|
108
|
+
def chat(message: str) -> str:
|
|
109
|
+
model = genai.GenerativeModel("gemini-2.0-flash")
|
|
110
|
+
response = model.generate_content(message)
|
|
111
|
+
return response.text`,
|
|
112
|
+
usage: `answer = chat("What is the capital of France?")
|
|
113
|
+
print(answer)`,
|
|
114
|
+
},
|
|
115
|
+
},
|
|
116
|
+
};
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
export declare const getRecommendationTool: {
|
|
2
|
+
name: string;
|
|
3
|
+
description: string;
|
|
4
|
+
inputSchema: {
|
|
5
|
+
type: "object";
|
|
6
|
+
properties: {
|
|
7
|
+
prompt: {
|
|
8
|
+
type: string;
|
|
9
|
+
description: string;
|
|
10
|
+
};
|
|
11
|
+
budget: {
|
|
12
|
+
type: string;
|
|
13
|
+
enum: string[];
|
|
14
|
+
description: string;
|
|
15
|
+
};
|
|
16
|
+
priority: {
|
|
17
|
+
type: string;
|
|
18
|
+
enum: string[];
|
|
19
|
+
description: string;
|
|
20
|
+
};
|
|
21
|
+
};
|
|
22
|
+
required: string[];
|
|
23
|
+
};
|
|
24
|
+
};
|
|
25
|
+
export declare function handleGetRecommendation(args: unknown): Promise<{
|
|
26
|
+
content: {
|
|
27
|
+
type: string;
|
|
28
|
+
text: string;
|
|
29
|
+
}[];
|
|
30
|
+
isError?: undefined;
|
|
31
|
+
} | {
|
|
32
|
+
content: {
|
|
33
|
+
type: string;
|
|
34
|
+
text: string;
|
|
35
|
+
}[];
|
|
36
|
+
isError: boolean;
|
|
37
|
+
}>;
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
const API_BASE = process.env.ARCHITECTGBT_API_URL || "https://architectgbt.com";
|
|
3
|
+
export const getRecommendationTool = {
|
|
4
|
+
name: "get_ai_recommendation",
|
|
5
|
+
description: "Analyze a project description and recommend the best AI model with pricing, reasoning, and alternatives. Use this when someone asks which AI model to use for their project.",
|
|
6
|
+
inputSchema: {
|
|
7
|
+
type: "object",
|
|
8
|
+
properties: {
|
|
9
|
+
prompt: {
|
|
10
|
+
type: "string",
|
|
11
|
+
description: "Description of what you want to build (e.g., 'customer support chatbot for e-commerce')",
|
|
12
|
+
},
|
|
13
|
+
budget: {
|
|
14
|
+
type: "string",
|
|
15
|
+
enum: ["low", "medium", "high", "unlimited"],
|
|
16
|
+
description: "Budget constraint for API costs",
|
|
17
|
+
},
|
|
18
|
+
priority: {
|
|
19
|
+
type: "string",
|
|
20
|
+
enum: ["cost", "speed", "quality", "balanced"],
|
|
21
|
+
description: "What matters most for this project",
|
|
22
|
+
},
|
|
23
|
+
},
|
|
24
|
+
required: ["prompt"],
|
|
25
|
+
},
|
|
26
|
+
};
|
|
27
|
+
const InputSchema = z.object({
|
|
28
|
+
prompt: z.string(),
|
|
29
|
+
budget: z.enum(["low", "medium", "high", "unlimited"]).optional(),
|
|
30
|
+
priority: z.enum(["cost", "speed", "quality", "balanced"]).optional(),
|
|
31
|
+
});
|
|
32
|
+
export async function handleGetRecommendation(args) {
|
|
33
|
+
const input = InputSchema.parse(args);
|
|
34
|
+
try {
|
|
35
|
+
const response = await fetch(`${API_BASE}/api/recommend`, {
|
|
36
|
+
method: "POST",
|
|
37
|
+
headers: {
|
|
38
|
+
"Content-Type": "application/json",
|
|
39
|
+
},
|
|
40
|
+
body: JSON.stringify({
|
|
41
|
+
prompt: input.prompt,
|
|
42
|
+
budget: input.budget,
|
|
43
|
+
priority: input.priority,
|
|
44
|
+
}),
|
|
45
|
+
});
|
|
46
|
+
if (!response.ok) {
|
|
47
|
+
throw new Error(`API error: ${response.status}`);
|
|
48
|
+
}
|
|
49
|
+
const data = await response.json();
|
|
50
|
+
// Format the response nicely
|
|
51
|
+
const result = formatRecommendation(data);
|
|
52
|
+
return {
|
|
53
|
+
content: [{ type: "text", text: result }],
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
catch (error) {
|
|
57
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
58
|
+
return {
|
|
59
|
+
content: [
|
|
60
|
+
{
|
|
61
|
+
type: "text",
|
|
62
|
+
text: `Failed to get recommendation: ${message}. Please try again.`,
|
|
63
|
+
},
|
|
64
|
+
],
|
|
65
|
+
isError: true,
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
function formatRecommendation(data) {
|
|
70
|
+
const { recommendation, reasoning, alternatives, model } = data;
|
|
71
|
+
let result = `## 🎯 AI Model Recommendation\n\n`;
|
|
72
|
+
if (model) {
|
|
73
|
+
result += `### Recommended: ${model.name}\n`;
|
|
74
|
+
result += `- **Provider:** ${model.provider}\n`;
|
|
75
|
+
result += `- **Model ID:** ${model.model_id || "N/A"}\n`;
|
|
76
|
+
if (model.input_price || model.output_price) {
|
|
77
|
+
result += `- **Pricing:** $${model.input_price}/1M input, $${model.output_price}/1M output\n`;
|
|
78
|
+
}
|
|
79
|
+
if (model.context_window) {
|
|
80
|
+
result += `- **Context Window:** ${model.context_window.toLocaleString()} tokens\n`;
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
if (reasoning) {
|
|
84
|
+
result += `\n### Why This Model?\n${reasoning}\n`;
|
|
85
|
+
}
|
|
86
|
+
if (alternatives && alternatives.length > 0) {
|
|
87
|
+
result += `\n### Alternatives\n`;
|
|
88
|
+
alternatives.forEach((alt, i) => {
|
|
89
|
+
result += `${i + 1}. **${alt.name}** - ${alt.reason || alt.description || ""}\n`;
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
result += `\n---\n*Powered by [ArchitectGBT](https://architectgbt.com)*`;
|
|
93
|
+
return result;
|
|
94
|
+
}
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
export declare const getTemplateTool: {
|
|
2
|
+
name: string;
|
|
3
|
+
description: string;
|
|
4
|
+
inputSchema: {
|
|
5
|
+
type: "object";
|
|
6
|
+
properties: {
|
|
7
|
+
model: {
|
|
8
|
+
type: string;
|
|
9
|
+
description: string;
|
|
10
|
+
};
|
|
11
|
+
language: {
|
|
12
|
+
type: string;
|
|
13
|
+
enum: string[];
|
|
14
|
+
description: string;
|
|
15
|
+
};
|
|
16
|
+
};
|
|
17
|
+
required: string[];
|
|
18
|
+
};
|
|
19
|
+
};
|
|
20
|
+
export declare function handleGetTemplate(args: unknown): Promise<{
|
|
21
|
+
content: {
|
|
22
|
+
type: string;
|
|
23
|
+
text: string;
|
|
24
|
+
}[];
|
|
25
|
+
}>;
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { templates } from "../templates/index.js";
|
|
3
|
+
export const getTemplateTool = {
|
|
4
|
+
name: "get_code_template",
|
|
5
|
+
description: "Get a production-ready code template for integrating a specific AI model. Returns working code with setup instructions.",
|
|
6
|
+
inputSchema: {
|
|
7
|
+
type: "object",
|
|
8
|
+
properties: {
|
|
9
|
+
model: {
|
|
10
|
+
type: "string",
|
|
11
|
+
description: "The AI model name (e.g., 'Claude', 'GPT-4', 'Gemini')",
|
|
12
|
+
},
|
|
13
|
+
language: {
|
|
14
|
+
type: "string",
|
|
15
|
+
enum: ["typescript", "python"],
|
|
16
|
+
description: "Programming language for the template",
|
|
17
|
+
},
|
|
18
|
+
},
|
|
19
|
+
required: ["model"],
|
|
20
|
+
},
|
|
21
|
+
};
|
|
22
|
+
const InputSchema = z.object({
|
|
23
|
+
model: z.string(),
|
|
24
|
+
language: z.enum(["typescript", "python"]).default("typescript"),
|
|
25
|
+
});
|
|
26
|
+
export async function handleGetTemplate(args) {
|
|
27
|
+
const input = InputSchema.parse(args);
|
|
28
|
+
const modelKey = input.model.toLowerCase();
|
|
29
|
+
const lang = input.language;
|
|
30
|
+
// Find matching template
|
|
31
|
+
let template = null;
|
|
32
|
+
if (modelKey.includes("claude") || modelKey.includes("anthropic")) {
|
|
33
|
+
template = templates.anthropic[lang];
|
|
34
|
+
}
|
|
35
|
+
else if (modelKey.includes("gpt") || modelKey.includes("openai")) {
|
|
36
|
+
template = templates.openai[lang];
|
|
37
|
+
}
|
|
38
|
+
else if (modelKey.includes("gemini") || modelKey.includes("google")) {
|
|
39
|
+
template = templates.google[lang];
|
|
40
|
+
}
|
|
41
|
+
if (!template) {
|
|
42
|
+
return {
|
|
43
|
+
content: [
|
|
44
|
+
{
|
|
45
|
+
type: "text",
|
|
46
|
+
text: `No template found for "${input.model}" in ${lang}. Available: Claude/Anthropic, GPT/OpenAI, Gemini/Google.`,
|
|
47
|
+
},
|
|
48
|
+
],
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
const result = `## 📝 Code Template: ${input.model} (${lang})
|
|
52
|
+
|
|
53
|
+
### Installation
|
|
54
|
+
\`\`\`bash
|
|
55
|
+
${template.install}
|
|
56
|
+
\`\`\`
|
|
57
|
+
|
|
58
|
+
### Environment Variables
|
|
59
|
+
\`\`\`
|
|
60
|
+
${template.envVars.join("\n")}
|
|
61
|
+
\`\`\`
|
|
62
|
+
|
|
63
|
+
### Code
|
|
64
|
+
\`\`\`${lang}
|
|
65
|
+
${template.code}
|
|
66
|
+
\`\`\`
|
|
67
|
+
|
|
68
|
+
### Usage Example
|
|
69
|
+
\`\`\`${lang}
|
|
70
|
+
${template.usage}
|
|
71
|
+
\`\`\`
|
|
72
|
+
|
|
73
|
+
---
|
|
74
|
+
*Powered by [ArchitectGBT](https://architectgbt.com)*`;
|
|
75
|
+
return {
|
|
76
|
+
content: [{ type: "text", text: result }],
|
|
77
|
+
};
|
|
78
|
+
}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
export declare const listModelsTool: {
|
|
2
|
+
name: string;
|
|
3
|
+
description: string;
|
|
4
|
+
inputSchema: {
|
|
5
|
+
type: "object";
|
|
6
|
+
properties: {
|
|
7
|
+
provider: {
|
|
8
|
+
type: string;
|
|
9
|
+
enum: string[];
|
|
10
|
+
description: string;
|
|
11
|
+
};
|
|
12
|
+
limit: {
|
|
13
|
+
type: string;
|
|
14
|
+
description: string;
|
|
15
|
+
};
|
|
16
|
+
};
|
|
17
|
+
required: never[];
|
|
18
|
+
};
|
|
19
|
+
};
|
|
20
|
+
export declare function handleListModels(args: unknown): Promise<{
|
|
21
|
+
content: {
|
|
22
|
+
type: string;
|
|
23
|
+
text: string;
|
|
24
|
+
}[];
|
|
25
|
+
isError?: undefined;
|
|
26
|
+
} | {
|
|
27
|
+
content: {
|
|
28
|
+
type: string;
|
|
29
|
+
text: string;
|
|
30
|
+
}[];
|
|
31
|
+
isError: boolean;
|
|
32
|
+
}>;
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
const API_BASE = process.env.ARCHITECTGBT_API_URL || "https://architectgbt.com";
|
|
3
|
+
export const listModelsTool = {
|
|
4
|
+
name: "list_models",
|
|
5
|
+
description: "List available AI models with optional filtering by provider or capability.",
|
|
6
|
+
inputSchema: {
|
|
7
|
+
type: "object",
|
|
8
|
+
properties: {
|
|
9
|
+
provider: {
|
|
10
|
+
type: "string",
|
|
11
|
+
enum: ["OpenAI", "Anthropic", "Google", "Meta", "Mistral", "all"],
|
|
12
|
+
description: "Filter by provider",
|
|
13
|
+
},
|
|
14
|
+
limit: {
|
|
15
|
+
type: "number",
|
|
16
|
+
description: "Maximum number of models to return (default: 10)",
|
|
17
|
+
},
|
|
18
|
+
},
|
|
19
|
+
required: [],
|
|
20
|
+
},
|
|
21
|
+
};
|
|
22
|
+
const InputSchema = z.object({
|
|
23
|
+
provider: z
|
|
24
|
+
.enum(["OpenAI", "Anthropic", "Google", "Meta", "Mistral", "all"])
|
|
25
|
+
.optional(),
|
|
26
|
+
limit: z.number().default(10),
|
|
27
|
+
});
|
|
28
|
+
export async function handleListModels(args) {
|
|
29
|
+
const input = InputSchema.parse(args);
|
|
30
|
+
try {
|
|
31
|
+
const response = await fetch(`${API_BASE}/api/models`);
|
|
32
|
+
if (!response.ok) {
|
|
33
|
+
throw new Error(`API error: ${response.status}`);
|
|
34
|
+
}
|
|
35
|
+
let models = await response.json();
|
|
36
|
+
// Filter by provider if specified
|
|
37
|
+
if (input.provider && input.provider !== "all") {
|
|
38
|
+
models = models.filter((m) => m.provider?.toLowerCase() === input.provider?.toLowerCase());
|
|
39
|
+
}
|
|
40
|
+
// Limit results
|
|
41
|
+
models = models.slice(0, input.limit);
|
|
42
|
+
// Format output
|
|
43
|
+
let result = `## 📊 Available AI Models\n\n`;
|
|
44
|
+
result += `| Model | Provider | Input $/1M | Output $/1M |\n`;
|
|
45
|
+
result += `|-------|----------|------------|-------------|\n`;
|
|
46
|
+
models.forEach((m) => {
|
|
47
|
+
result += `| ${m.name} | ${m.provider} | $${m.input_price || "?"} | $${m.output_price || "?"} |\n`;
|
|
48
|
+
});
|
|
49
|
+
result += `\n*Showing ${models.length} models. Use \`get_ai_recommendation\` for personalized suggestions.*`;
|
|
50
|
+
return {
|
|
51
|
+
content: [{ type: "text", text: result }],
|
|
52
|
+
};
|
|
53
|
+
}
|
|
54
|
+
catch (error) {
|
|
55
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
56
|
+
return {
|
|
57
|
+
content: [{ type: "text", text: `Failed to list models: ${message}` }],
|
|
58
|
+
isError: true,
|
|
59
|
+
};
|
|
60
|
+
}
|
|
61
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "architectgbt-mcp",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "MCP server for AI model recommendations from ArchitectGBT",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "dist/index.js",
|
|
7
|
+
"bin": {
|
|
8
|
+
"architectgbt-mcp": "./dist/index.js"
|
|
9
|
+
},
|
|
10
|
+
"scripts": {
|
|
11
|
+
"build": "tsc",
|
|
12
|
+
"start": "node dist/index.js",
|
|
13
|
+
"dev": "tsx src/index.ts"
|
|
14
|
+
},
|
|
15
|
+
"keywords": ["mcp", "ai", "model", "recommendation", "architectgbt", "cursor", "claude"],
|
|
16
|
+
"author": "ArchitectGBT",
|
|
17
|
+
"license": "MIT",
|
|
18
|
+
"dependencies": {
|
|
19
|
+
"@modelcontextprotocol/sdk": "^1.0.0",
|
|
20
|
+
"zod": "^3.22.0"
|
|
21
|
+
},
|
|
22
|
+
"devDependencies": {
|
|
23
|
+
"@types/node": "^20.0.0",
|
|
24
|
+
"tsx": "^4.0.0",
|
|
25
|
+
"typescript": "^5.0.0"
|
|
26
|
+
},
|
|
27
|
+
"files": ["dist"],
|
|
28
|
+
"engines": {
|
|
29
|
+
"node": ">=18"
|
|
30
|
+
}
|
|
31
|
+
}
|