architectgbt-mcp 0.2.0 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CURSOR_SETUP.md +135 -0
- package/TESTING.md +138 -0
- package/dist/tools/list-models.js +21 -9
- package/package.json +26 -13
- package/src/index.ts +64 -0
- package/src/templates/index.ts +123 -0
- package/src/tools/get-recommendation.ts +161 -0
- package/src/tools/get-template.ts +87 -0
- package/src/tools/list-models.ts +104 -0
- package/tsconfig.json +16 -0
package/CURSOR_SETUP.md
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
# Cursor IDE Setup for ArchitectGBT MCP
|
|
2
|
+
|
|
3
|
+
## Quick Start
|
|
4
|
+
|
|
5
|
+
1. **Open Cursor Settings**
|
|
6
|
+
- Press `Ctrl+Shift+J` (Windows/Linux) or `Cmd+Shift+J` (Mac)
|
|
7
|
+
- Or go to: Settings → Features → Model Context Protocol
|
|
8
|
+
|
|
9
|
+
2. **Add MCP Server Configuration**
|
|
10
|
+
|
|
11
|
+
Create or edit `.cursor/mcp.json` in your project:
|
|
12
|
+
|
|
13
|
+
```json
|
|
14
|
+
{
|
|
15
|
+
"mcpServers": {
|
|
16
|
+
"architectgbt": {
|
|
17
|
+
"command": "npx",
|
|
18
|
+
"args": ["-y", "architectgbt-mcp@0.2.0"]
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
3. **Restart Cursor**
|
|
25
|
+
- Close and reopen Cursor to load the MCP server
|
|
26
|
+
|
|
27
|
+
## Usage
|
|
28
|
+
|
|
29
|
+
### Free Tier (No API Key)
|
|
30
|
+
Get 3 AI model recommendations per day without authentication:
|
|
31
|
+
|
|
32
|
+
```
|
|
33
|
+
Ask Claude: "recommend an AI model for building a chatbot with 100k daily users"
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
The MCP server will automatically use the anonymous endpoint.
|
|
37
|
+
|
|
38
|
+
### Pro Tier (Unlimited Access)
|
|
39
|
+
|
|
40
|
+
1. **Generate API Key**
|
|
41
|
+
- Sign in at [architectgbt.com](https://architectgbt.com)
|
|
42
|
+
- Go to Settings → API Keys
|
|
43
|
+
- Click "Generate New Key"
|
|
44
|
+
- Copy the key (shown only once!)
|
|
45
|
+
|
|
46
|
+
2. **Set Environment Variable**
|
|
47
|
+
|
|
48
|
+
**Windows (PowerShell):**
|
|
49
|
+
```powershell
|
|
50
|
+
$env:ARCHITECTGBT_API_KEY = "agbt_your_key_here"
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
**macOS/Linux:**
|
|
54
|
+
```bash
|
|
55
|
+
export ARCHITECTGBT_API_KEY=agbt_your_key_here
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
**Permanent Setup:**
|
|
59
|
+
- Windows: Add to System Environment Variables
|
|
60
|
+
- macOS/Linux: Add to `~/.bashrc` or `~/.zshrc`
|
|
61
|
+
|
|
62
|
+
3. **Restart Cursor** with environment variable loaded
|
|
63
|
+
|
|
64
|
+
## Available Tools
|
|
65
|
+
|
|
66
|
+
### 1. list_models
|
|
67
|
+
Lists all available AI models with pricing
|
|
68
|
+
|
|
69
|
+
**Example:**
|
|
70
|
+
```
|
|
71
|
+
"Show me all available AI models"
|
|
72
|
+
"What models support vision?"
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
### 2. get_ai_recommendation
|
|
76
|
+
Get personalized model recommendations based on requirements
|
|
77
|
+
|
|
78
|
+
**Example:**
|
|
79
|
+
```
|
|
80
|
+
"Recommend a model for:
|
|
81
|
+
- Building a code assistant
|
|
82
|
+
- Budget: $500/month
|
|
83
|
+
- Needs: function calling, code completion"
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
### 3. get_template
|
|
87
|
+
Get ready-to-use code templates
|
|
88
|
+
|
|
89
|
+
**Example:**
|
|
90
|
+
```
|
|
91
|
+
"Show me the Next.js SaaS template"
|
|
92
|
+
"Get the FastAPI template"
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
## Pricing
|
|
96
|
+
|
|
97
|
+
- **Free**: 3 recommendations/day (no API key needed)
|
|
98
|
+
- **Pro Monthly ($15)**: 200 credits + unlimited MCP
|
|
99
|
+
- **Pro Annual ($150)**: 150 credits/month + unlimited MCP
|
|
100
|
+
|
|
101
|
+
[View full pricing →](https://architectgbt.com#pricing)
|
|
102
|
+
|
|
103
|
+
## Troubleshooting
|
|
104
|
+
|
|
105
|
+
### MCP Server Not Found
|
|
106
|
+
```bash
|
|
107
|
+
# Force install latest version
|
|
108
|
+
npx -y architectgbt-mcp@latest
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
### Rate Limited (Free Tier)
|
|
112
|
+
```
|
|
113
|
+
Error: Rate limit exceeded. Limit: 3 requests per day.
|
|
114
|
+
```
|
|
115
|
+
- Wait 24 hours, or
|
|
116
|
+
- Upgrade to Pro for unlimited access
|
|
117
|
+
|
|
118
|
+
### API Key Invalid
|
|
119
|
+
```
|
|
120
|
+
Error: Invalid API key
|
|
121
|
+
```
|
|
122
|
+
- Check key is correct (starts with `agbt_`)
|
|
123
|
+
- Verify environment variable is set
|
|
124
|
+
- Restart Cursor after setting variable
|
|
125
|
+
|
|
126
|
+
### Connection Failed
|
|
127
|
+
- Check internet connection
|
|
128
|
+
- Verify firewall allows npm/npx
|
|
129
|
+
- Try manual test: `npx -y architectgbt-mcp@0.2.0`
|
|
130
|
+
|
|
131
|
+
## Support
|
|
132
|
+
|
|
133
|
+
- 📧 Email: support@architectgbt.com
|
|
134
|
+
- 🐛 Issues: [github.com/3rdbrain/architectgbt-mcp](https://github.com/3rdbrain/architectgbt-mcp/issues)
|
|
135
|
+
- 📚 Docs: [architectgbt.com/docs](https://architectgbt.com)
|
package/TESTING.md
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
# Testing ArchitectGBT MCP Integration
|
|
2
|
+
|
|
3
|
+
## Quick Test Guide
|
|
4
|
+
|
|
5
|
+
### 1. Set Your API Key
|
|
6
|
+
|
|
7
|
+
**Windows (PowerShell):**
|
|
8
|
+
```powershell
|
|
9
|
+
$env:ARCHITECTGBT_API_KEY = "your_api_key_here"
|
|
10
|
+
```
|
|
11
|
+
|
|
12
|
+
**macOS/Linux:**
|
|
13
|
+
```bash
|
|
14
|
+
export ARCHITECTGBT_API_KEY=your_api_key_here
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
### 2. Configure Cursor IDE
|
|
18
|
+
|
|
19
|
+
Create or edit `.cursor/mcp.json` in your project:
|
|
20
|
+
|
|
21
|
+
```json
|
|
22
|
+
{
|
|
23
|
+
"mcpServers": {
|
|
24
|
+
"architectgbt": {
|
|
25
|
+
"command": "npx",
|
|
26
|
+
"args": ["-y", "architectgbt-mcp@0.2.0"]
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
### 3. Restart Cursor
|
|
33
|
+
|
|
34
|
+
Close and reopen Cursor IDE to load the MCP server with your API key.
|
|
35
|
+
|
|
36
|
+
### 4. Test Commands
|
|
37
|
+
|
|
38
|
+
#### Test 1: List Models
|
|
39
|
+
Ask Claude in Cursor:
|
|
40
|
+
```
|
|
41
|
+
Show me all available AI models
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
Expected: List of models with pricing
|
|
45
|
+
|
|
46
|
+
#### Test 2: Get Recommendation
|
|
47
|
+
Ask Claude:
|
|
48
|
+
```
|
|
49
|
+
Recommend an AI model for building a chatbot with 100k daily users and $500/month budget
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
Expected: Top 3 model recommendations with cost breakdown
|
|
53
|
+
|
|
54
|
+
#### Test 3: Get Template
|
|
55
|
+
Ask Claude:
|
|
56
|
+
```
|
|
57
|
+
Show me the Next.js SaaS template
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
Expected: Template code and description
|
|
61
|
+
|
|
62
|
+
### 5. Verify Unlimited Access
|
|
63
|
+
|
|
64
|
+
With your API key set:
|
|
65
|
+
- ✅ No rate limits (unlimited recommendations)
|
|
66
|
+
- ✅ Usage tracked in your dashboard
|
|
67
|
+
- ✅ `last_used_at` updates in Settings → API Keys
|
|
68
|
+
|
|
69
|
+
### 6. Test Without API Key
|
|
70
|
+
|
|
71
|
+
Remove the environment variable:
|
|
72
|
+
```powershell
|
|
73
|
+
Remove-Item Env:\ARCHITECTGBT_API_KEY
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
Restart Cursor and try again:
|
|
77
|
+
- ✅ First 3 requests work (anonymous tier)
|
|
78
|
+
- ✅ 4th request shows rate limit error
|
|
79
|
+
|
|
80
|
+
## Troubleshooting
|
|
81
|
+
|
|
82
|
+
### API Key Not Working
|
|
83
|
+
|
|
84
|
+
1. **Check environment variable is set:**
|
|
85
|
+
```powershell
|
|
86
|
+
echo $env:ARCHITECTGBT_API_KEY
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
2. **Verify format:**
|
|
90
|
+
- Must start with `agbt_`
|
|
91
|
+
- Should be 37+ characters long
|
|
92
|
+
|
|
93
|
+
3. **Check Cursor loaded it:**
|
|
94
|
+
- Restart Cursor after setting the variable
|
|
95
|
+
- Check Cursor's MCP logs
|
|
96
|
+
|
|
97
|
+
### MCP Server Not Found
|
|
98
|
+
|
|
99
|
+
```powershell
|
|
100
|
+
# Test MCP server directly
|
|
101
|
+
npx -y architectgbt-mcp@0.2.0
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
### Still Getting Rate Limited
|
|
105
|
+
|
|
106
|
+
- Verify API key is in environment variables
|
|
107
|
+
- Check it matches the key in your dashboard
|
|
108
|
+
- Ensure Cursor was restarted after setting the variable
|
|
109
|
+
|
|
110
|
+
## Production Testing Checklist
|
|
111
|
+
|
|
112
|
+
- [ ] Anonymous access works (3/day limit)
|
|
113
|
+
- [ ] API key provides unlimited access
|
|
114
|
+
- [ ] Usage count increments in dashboard
|
|
115
|
+
- [ ] last_used_at updates after each request
|
|
116
|
+
- [ ] Rate limit resets after 24 hours
|
|
117
|
+
- [ ] All 3 tools work (list_models, get_ai_recommendation, get_template)
|
|
118
|
+
- [ ] Error messages are user-friendly
|
|
119
|
+
- [ ] MCP works in both Cursor and Claude Desktop
|
|
120
|
+
|
|
121
|
+
## API Endpoints Being Used
|
|
122
|
+
|
|
123
|
+
When using MCP with API key:
|
|
124
|
+
- **GET** `https://architectgbt.com/api/models` - List models
|
|
125
|
+
- **POST** `https://architectgbt.com/api/recommend` - Get recommendations
|
|
126
|
+
|
|
127
|
+
Headers sent:
|
|
128
|
+
```
|
|
129
|
+
Authorization: Bearer agbt_your_key_here
|
|
130
|
+
Content-Type: application/json
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
## Next Steps
|
|
134
|
+
|
|
135
|
+
1. Monitor usage in Settings → API Keys
|
|
136
|
+
2. Check MCP usage analytics (coming soon)
|
|
137
|
+
3. Share MCP package with team members
|
|
138
|
+
4. Upgrade to Pro for unlimited access ($15/month)
|
|
@@ -48,17 +48,29 @@ export async function handleListModels(args) {
|
|
|
48
48
|
}
|
|
49
49
|
// Limit results
|
|
50
50
|
models = models.slice(0, input.limit);
|
|
51
|
-
//
|
|
52
|
-
|
|
53
|
-
result += `| Model | Provider | Input $/1M | Output $/1M |\n`;
|
|
54
|
-
result += `|-------|----------|------------|-------------|\n`;
|
|
51
|
+
// Group by provider for better readability
|
|
52
|
+
const groupedModels = {};
|
|
55
53
|
models.forEach((m) => {
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
54
|
+
const provider = m.provider || "Unknown";
|
|
55
|
+
if (!groupedModels[provider]) {
|
|
56
|
+
groupedModels[provider] = [];
|
|
57
|
+
}
|
|
58
|
+
groupedModels[provider].push(m);
|
|
60
59
|
});
|
|
61
|
-
|
|
60
|
+
// Format output with grouping
|
|
61
|
+
let result = `Available AI Models (${models.length} total)\n`;
|
|
62
|
+
result += `${"=".repeat(70)}\n\n`;
|
|
63
|
+
Object.entries(groupedModels).forEach(([provider, providerModels]) => {
|
|
64
|
+
result += `${provider}:\n`;
|
|
65
|
+
providerModels.forEach((m) => {
|
|
66
|
+
const inputPer1M = m.input_cost_per_1k ? (m.input_cost_per_1k * 1000).toFixed(2) : "?";
|
|
67
|
+
const outputPer1M = m.output_cost_per_1k ? (m.output_cost_per_1k * 1000).toFixed(2) : "?";
|
|
68
|
+
result += ` • ${m.name.padEnd(30)} $${inputPer1M.padStart(6)} / $${outputPer1M.padStart(6)} (in/out per 1M tokens)\n`;
|
|
69
|
+
});
|
|
70
|
+
result += `\n`;
|
|
71
|
+
});
|
|
72
|
+
result += `${"=".repeat(70)}\n`;
|
|
73
|
+
result += `💡 Tip: Use get_ai_recommendation for personalized model suggestions based on your needs.`;
|
|
62
74
|
return {
|
|
63
75
|
content: [{ type: "text", text: result }],
|
|
64
76
|
};
|
package/package.json
CHANGED
|
@@ -1,31 +1,44 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "architectgbt-mcp",
|
|
3
|
-
"version": "0.2.
|
|
4
|
-
"description": "
|
|
3
|
+
"version": "0.2.1",
|
|
4
|
+
"description": "Model Context Protocol server for ArchitectGBT - AI architecture recommendations",
|
|
5
5
|
"type": "module",
|
|
6
|
-
"main": "dist/index.js",
|
|
7
6
|
"bin": {
|
|
8
7
|
"architectgbt-mcp": "./dist/index.js"
|
|
9
8
|
},
|
|
9
|
+
"main": "./dist/index.js",
|
|
10
10
|
"scripts": {
|
|
11
|
-
"build": "tsc",
|
|
12
|
-
"
|
|
13
|
-
"dev": "tsx src/index.ts"
|
|
11
|
+
"build": "tsc && node -e \"require('fs').chmodSync('./dist/index.js', '755')\"",
|
|
12
|
+
"prepare": "npm run build",
|
|
13
|
+
"dev": "tsx src/index.ts",
|
|
14
|
+
"test": "echo 'No tests yet'"
|
|
14
15
|
},
|
|
15
|
-
"keywords": [
|
|
16
|
+
"keywords": [
|
|
17
|
+
"mcp",
|
|
18
|
+
"model-context-protocol",
|
|
19
|
+
"ai",
|
|
20
|
+
"architecture",
|
|
21
|
+
"recommendations",
|
|
22
|
+
"openai",
|
|
23
|
+
"anthropic",
|
|
24
|
+
"claude"
|
|
25
|
+
],
|
|
16
26
|
"author": "ArchitectGBT",
|
|
17
27
|
"license": "MIT",
|
|
28
|
+
"repository": {
|
|
29
|
+
"type": "git",
|
|
30
|
+
"url": "https://github.com/yourusername/architectgbt-mcp.git"
|
|
31
|
+
},
|
|
18
32
|
"dependencies": {
|
|
19
33
|
"@modelcontextprotocol/sdk": "^1.0.0",
|
|
20
|
-
"zod": "^3.
|
|
34
|
+
"zod": "^3.23.8"
|
|
21
35
|
},
|
|
22
36
|
"devDependencies": {
|
|
23
37
|
"@types/node": "^20.0.0",
|
|
24
|
-
"tsx": "^4.
|
|
25
|
-
"typescript": "^5.
|
|
38
|
+
"tsx": "^4.7.0",
|
|
39
|
+
"typescript": "^5.3.0"
|
|
26
40
|
},
|
|
27
|
-
"files": ["dist"],
|
|
28
41
|
"engines": {
|
|
29
|
-
"node": ">=18"
|
|
42
|
+
"node": ">=18.0.0"
|
|
30
43
|
}
|
|
31
|
-
}
|
|
44
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
|
4
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
5
|
+
import {
|
|
6
|
+
CallToolRequestSchema,
|
|
7
|
+
ListToolsRequestSchema,
|
|
8
|
+
} from "@modelcontextprotocol/sdk/types.js";
|
|
9
|
+
|
|
10
|
+
import { getRecommendationTool, handleGetRecommendation } from "./tools/get-recommendation.js";
|
|
11
|
+
import { getTemplateTool, handleGetTemplate } from "./tools/get-template.js";
|
|
12
|
+
import { listModelsTool, handleListModels } from "./tools/list-models.js";
|
|
13
|
+
|
|
14
|
+
const server = new Server(
|
|
15
|
+
{
|
|
16
|
+
name: "architectgbt-mcp",
|
|
17
|
+
version: "0.1.0",
|
|
18
|
+
},
|
|
19
|
+
{
|
|
20
|
+
capabilities: {
|
|
21
|
+
tools: {},
|
|
22
|
+
},
|
|
23
|
+
}
|
|
24
|
+
);
|
|
25
|
+
|
|
26
|
+
// List available tools
|
|
27
|
+
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
28
|
+
return {
|
|
29
|
+
tools: [getRecommendationTool, getTemplateTool, listModelsTool],
|
|
30
|
+
};
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
// Handle tool calls
|
|
34
|
+
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
35
|
+
const { name, arguments: args } = request.params;
|
|
36
|
+
|
|
37
|
+
try {
|
|
38
|
+
switch (name) {
|
|
39
|
+
case "get_ai_recommendation":
|
|
40
|
+
return await handleGetRecommendation(args);
|
|
41
|
+
case "get_code_template":
|
|
42
|
+
return await handleGetTemplate(args);
|
|
43
|
+
case "list_models":
|
|
44
|
+
return await handleListModels(args);
|
|
45
|
+
default:
|
|
46
|
+
throw new Error(`Unknown tool: ${name}`);
|
|
47
|
+
}
|
|
48
|
+
} catch (error) {
|
|
49
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
50
|
+
return {
|
|
51
|
+
content: [{ type: "text", text: `Error: ${message}` }],
|
|
52
|
+
isError: true,
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
// Start the server
|
|
58
|
+
async function main() {
|
|
59
|
+
const transport = new StdioServerTransport();
|
|
60
|
+
await server.connect(transport);
|
|
61
|
+
console.error("ArchitectGBT MCP server running on stdio");
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
main().catch(console.error);
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
export const templates: Record<string, Record<string, {
|
|
2
|
+
install: string;
|
|
3
|
+
envVars: string[];
|
|
4
|
+
code: string;
|
|
5
|
+
usage: string;
|
|
6
|
+
}>> = {
|
|
7
|
+
anthropic: {
|
|
8
|
+
typescript: {
|
|
9
|
+
install: "npm install @anthropic-ai/sdk",
|
|
10
|
+
envVars: ["ANTHROPIC_API_KEY=your-api-key"],
|
|
11
|
+
code: `import Anthropic from "@anthropic-ai/sdk";
|
|
12
|
+
|
|
13
|
+
const client = new Anthropic({
|
|
14
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
15
|
+
});
|
|
16
|
+
|
|
17
|
+
export async function chat(message: string): Promise<string> {
|
|
18
|
+
const response = await client.messages.create({
|
|
19
|
+
model: "claude-sonnet-4-20250514",
|
|
20
|
+
max_tokens: 1024,
|
|
21
|
+
messages: [{ role: "user", content: message }],
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
const textBlock = response.content[0];
|
|
25
|
+
if (textBlock.type === "text") {
|
|
26
|
+
return textBlock.text;
|
|
27
|
+
}
|
|
28
|
+
throw new Error("Unexpected response type");
|
|
29
|
+
}`,
|
|
30
|
+
usage: `const answer = await chat("What is the capital of France?");
|
|
31
|
+
console.log(answer);`,
|
|
32
|
+
},
|
|
33
|
+
python: {
|
|
34
|
+
install: "pip install anthropic",
|
|
35
|
+
envVars: ["ANTHROPIC_API_KEY=your-api-key"],
|
|
36
|
+
code: `import anthropic
|
|
37
|
+
|
|
38
|
+
client = anthropic.Anthropic()
|
|
39
|
+
|
|
40
|
+
def chat(message: str) -> str:
|
|
41
|
+
response = client.messages.create(
|
|
42
|
+
model="claude-sonnet-4-20250514",
|
|
43
|
+
max_tokens=1024,
|
|
44
|
+
messages=[{"role": "user", "content": message}]
|
|
45
|
+
)
|
|
46
|
+
return response.content[0].text`,
|
|
47
|
+
usage: `answer = chat("What is the capital of France?")
|
|
48
|
+
print(answer)`,
|
|
49
|
+
},
|
|
50
|
+
},
|
|
51
|
+
|
|
52
|
+
openai: {
|
|
53
|
+
typescript: {
|
|
54
|
+
install: "npm install openai",
|
|
55
|
+
envVars: ["OPENAI_API_KEY=your-api-key"],
|
|
56
|
+
code: `import OpenAI from "openai";
|
|
57
|
+
|
|
58
|
+
const client = new OpenAI({
|
|
59
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
export async function chat(message: string): Promise<string> {
|
|
63
|
+
const response = await client.chat.completions.create({
|
|
64
|
+
model: "gpt-4o",
|
|
65
|
+
messages: [{ role: "user", content: message }],
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
return response.choices[0].message.content || "";
|
|
69
|
+
}`,
|
|
70
|
+
usage: `const answer = await chat("What is the capital of France?");
|
|
71
|
+
console.log(answer);`,
|
|
72
|
+
},
|
|
73
|
+
python: {
|
|
74
|
+
install: "pip install openai",
|
|
75
|
+
envVars: ["OPENAI_API_KEY=your-api-key"],
|
|
76
|
+
code: `from openai import OpenAI
|
|
77
|
+
|
|
78
|
+
client = OpenAI()
|
|
79
|
+
|
|
80
|
+
def chat(message: str) -> str:
|
|
81
|
+
response = client.chat.completions.create(
|
|
82
|
+
model="gpt-4o",
|
|
83
|
+
messages=[{"role": "user", "content": message}]
|
|
84
|
+
)
|
|
85
|
+
return response.choices[0].message.content`,
|
|
86
|
+
usage: `answer = chat("What is the capital of France?")
|
|
87
|
+
print(answer)`,
|
|
88
|
+
},
|
|
89
|
+
},
|
|
90
|
+
|
|
91
|
+
google: {
|
|
92
|
+
typescript: {
|
|
93
|
+
install: "npm install @google/generative-ai",
|
|
94
|
+
envVars: ["GOOGLE_API_KEY=your-api-key"],
|
|
95
|
+
code: `import { GoogleGenerativeAI } from "@google/generative-ai";
|
|
96
|
+
|
|
97
|
+
const genAI = new GoogleGenerativeAI(process.env.GOOGLE_API_KEY!);
|
|
98
|
+
|
|
99
|
+
export async function chat(message: string): Promise<string> {
|
|
100
|
+
const model = genAI.getGenerativeModel({ model: "gemini-2.0-flash" });
|
|
101
|
+
const result = await model.generateContent(message);
|
|
102
|
+
return result.response.text();
|
|
103
|
+
}`,
|
|
104
|
+
usage: `const answer = await chat("What is the capital of France?");
|
|
105
|
+
console.log(answer);`,
|
|
106
|
+
},
|
|
107
|
+
python: {
|
|
108
|
+
install: "pip install google-generativeai",
|
|
109
|
+
envVars: ["GOOGLE_API_KEY=your-api-key"],
|
|
110
|
+
code: `import google.generativeai as genai
|
|
111
|
+
import os
|
|
112
|
+
|
|
113
|
+
genai.configure(api_key=os.environ["GOOGLE_API_KEY"])
|
|
114
|
+
|
|
115
|
+
def chat(message: str) -> str:
|
|
116
|
+
model = genai.GenerativeModel("gemini-2.0-flash")
|
|
117
|
+
response = model.generate_content(message)
|
|
118
|
+
return response.text`,
|
|
119
|
+
usage: `answer = chat("What is the capital of France?")
|
|
120
|
+
print(answer)`,
|
|
121
|
+
},
|
|
122
|
+
},
|
|
123
|
+
};
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
|
|
3
|
+
const API_BASE = process.env.ARCHITECTGBT_API_URL || "https://architectgbt.com";
|
|
4
|
+
const API_KEY = process.env.ARCHITECTGBT_API_KEY;
|
|
5
|
+
|
|
6
|
+
export const getRecommendationTool = {
|
|
7
|
+
name: "get_ai_recommendation",
|
|
8
|
+
description:
|
|
9
|
+
"Analyze a project description and recommend the best AI model with pricing, reasoning, and alternatives. Free tier: 3 recommendations/day. Add ARCHITECTGBT_API_KEY for unlimited access.",
|
|
10
|
+
inputSchema: {
|
|
11
|
+
type: "object" as const,
|
|
12
|
+
properties: {
|
|
13
|
+
prompt: {
|
|
14
|
+
type: "string",
|
|
15
|
+
description:
|
|
16
|
+
"Description of what you want to build (e.g., 'customer support chatbot for e-commerce')",
|
|
17
|
+
},
|
|
18
|
+
budget: {
|
|
19
|
+
type: "string",
|
|
20
|
+
enum: ["low", "medium", "high", "unlimited"],
|
|
21
|
+
description: "Budget constraint for API costs",
|
|
22
|
+
},
|
|
23
|
+
priority: {
|
|
24
|
+
type: "string",
|
|
25
|
+
enum: ["cost", "speed", "quality", "balanced"],
|
|
26
|
+
description: "What matters most for this project",
|
|
27
|
+
},
|
|
28
|
+
},
|
|
29
|
+
required: ["prompt"],
|
|
30
|
+
},
|
|
31
|
+
};
|
|
32
|
+
|
|
33
|
+
const InputSchema = z.object({
|
|
34
|
+
prompt: z.string(),
|
|
35
|
+
budget: z.enum(["low", "medium", "high", "unlimited"]).optional(),
|
|
36
|
+
priority: z.enum(["cost", "speed", "quality", "balanced"]).optional(),
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
export async function handleGetRecommendation(args: unknown) {
|
|
40
|
+
const input = InputSchema.parse(args);
|
|
41
|
+
|
|
42
|
+
try {
|
|
43
|
+
// Determine which endpoint to use
|
|
44
|
+
const endpoint = API_KEY ? `${API_BASE}/api/recommend` : `${API_BASE}/api/recommend/public`;
|
|
45
|
+
|
|
46
|
+
// Build headers
|
|
47
|
+
const headers: HeadersInit = {
|
|
48
|
+
"Content-Type": "application/json",
|
|
49
|
+
};
|
|
50
|
+
|
|
51
|
+
// Add API key if available
|
|
52
|
+
if (API_KEY) {
|
|
53
|
+
headers["Authorization"] = `Bearer ${API_KEY}`;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
const response = await fetch(endpoint, {
|
|
57
|
+
method: "POST",
|
|
58
|
+
headers,
|
|
59
|
+
body: JSON.stringify({
|
|
60
|
+
prompt: input.prompt,
|
|
61
|
+
budget: input.budget,
|
|
62
|
+
priority: input.priority,
|
|
63
|
+
}),
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
if (!response.ok) {
|
|
67
|
+
// Handle rate limiting for free tier
|
|
68
|
+
if (response.status === 429) {
|
|
69
|
+
const data = await response.json();
|
|
70
|
+
const resetHeader = response.headers.get('X-RateLimit-Reset');
|
|
71
|
+
const resetTime = resetHeader ? new Date(resetHeader).toLocaleString() : 'tomorrow';
|
|
72
|
+
|
|
73
|
+
return {
|
|
74
|
+
content: [
|
|
75
|
+
{
|
|
76
|
+
type: "text",
|
|
77
|
+
text: `🚫 **Daily Limit Reached**\n\n${data.error?.message || 'You\'ve used all 3 free recommendations today.'}\n\nResets at: ${resetTime}\n\n**Get unlimited access:**\n1. Visit https://architectgbt.com\n2. Sign up for free (10 recommendations/month)\n3. Generate an API key from Settings\n4. Add to your MCP config:\n\`\`\`json\n{\n "mcpServers": {\n "architectgbt": {\n "command": "npx",\n "args": ["-y", "architectgbt-mcp"],\n "env": {\n "ARCHITECTGBT_API_KEY": "your_api_key_here"\n }\n }\n }\n}\n\`\`\`\n\n💡 Pro tip: Upgrade to Pro ($15/mo) for unlimited recommendations!`,
|
|
78
|
+
},
|
|
79
|
+
],
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// Handle authentication requirement
|
|
84
|
+
if (response.status === 401 || response.status === 405) {
|
|
85
|
+
return {
|
|
86
|
+
content: [
|
|
87
|
+
{
|
|
88
|
+
type: "text",
|
|
89
|
+
text: `❌ **API Key Invalid or Expired**\n\nYour API key is not valid. To fix this:\n\n1. Visit https://architectgbt.com/settings\n2. Generate a new API key\n3. Update your MCP config\n\n**Without an API key:**\nYou can still use the free tier (3 recommendations/day). Remove the ARCHITECTGBT_API_KEY from your config.`,
|
|
90
|
+
},
|
|
91
|
+
],
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
throw new Error(`API error: ${response.status}`);
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
const data = await response.json();
|
|
98
|
+
|
|
99
|
+
// Show rate limit info if present
|
|
100
|
+
const remaining = response.headers.get('X-RateLimit-Remaining');
|
|
101
|
+
const limit = response.headers.get('X-RateLimit-Limit');
|
|
102
|
+
|
|
103
|
+
// Format the response nicely
|
|
104
|
+
let result = formatRecommendation(data);
|
|
105
|
+
|
|
106
|
+
// Add rate limit footer for free tier
|
|
107
|
+
if (remaining !== null && limit !== null && !API_KEY) {
|
|
108
|
+
result += `\n\n---\n📊 **Free Tier:** ${remaining}/${limit} recommendations remaining today\n💎 Get unlimited access at https://architectgbt.com`;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
return {
|
|
112
|
+
content: [{ type: "text", text: result }],
|
|
113
|
+
};
|
|
114
|
+
} catch (error) {
|
|
115
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
116
|
+
return {
|
|
117
|
+
content: [
|
|
118
|
+
{
|
|
119
|
+
type: "text",
|
|
120
|
+
text: `Failed to get recommendation: ${message}. Please try again.`,
|
|
121
|
+
},
|
|
122
|
+
],
|
|
123
|
+
isError: true,
|
|
124
|
+
};
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
function formatRecommendation(data: any): string {
|
|
129
|
+
const { recommendation, reasoning, alternatives, model } = data;
|
|
130
|
+
|
|
131
|
+
let result = `## 🎯 AI Model Recommendation\n\n`;
|
|
132
|
+
|
|
133
|
+
if (model) {
|
|
134
|
+
result += `### Recommended: ${model.name}\n`;
|
|
135
|
+
result += `- **Provider:** ${model.provider}\n`;
|
|
136
|
+
result += `- **Model ID:** ${model.model_id || "N/A"}\n`;
|
|
137
|
+
|
|
138
|
+
if (model.input_price || model.output_price) {
|
|
139
|
+
result += `- **Pricing:** $${model.input_price}/1M input, $${model.output_price}/1M output\n`;
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
if (model.context_window) {
|
|
143
|
+
result += `- **Context Window:** ${model.context_window.toLocaleString()} tokens\n`;
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
if (reasoning) {
|
|
148
|
+
result += `\n### Why This Model?\n${reasoning}\n`;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
if (alternatives && alternatives.length > 0) {
|
|
152
|
+
result += `\n### Alternatives\n`;
|
|
153
|
+
alternatives.forEach((alt: any, i: number) => {
|
|
154
|
+
result += `${i + 1}. **${alt.name}** - ${alt.reason || alt.description || ""}\n`;
|
|
155
|
+
});
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
result += `\n---\n*Powered by [ArchitectGBT](https://architectgbt.com)*`;
|
|
159
|
+
|
|
160
|
+
return result;
|
|
161
|
+
}
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { templates } from "../templates/index.js";
|
|
3
|
+
|
|
4
|
+
export const getTemplateTool = {
|
|
5
|
+
name: "get_code_template",
|
|
6
|
+
description:
|
|
7
|
+
"Get a production-ready code template for integrating a specific AI model. Returns working code with setup instructions.",
|
|
8
|
+
inputSchema: {
|
|
9
|
+
type: "object" as const,
|
|
10
|
+
properties: {
|
|
11
|
+
model: {
|
|
12
|
+
type: "string",
|
|
13
|
+
description:
|
|
14
|
+
"The AI model name (e.g., 'Claude', 'GPT-4', 'Gemini')",
|
|
15
|
+
},
|
|
16
|
+
language: {
|
|
17
|
+
type: "string",
|
|
18
|
+
enum: ["typescript", "python"],
|
|
19
|
+
description: "Programming language for the template",
|
|
20
|
+
},
|
|
21
|
+
},
|
|
22
|
+
required: ["model"],
|
|
23
|
+
},
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
const InputSchema = z.object({
|
|
27
|
+
model: z.string(),
|
|
28
|
+
language: z.enum(["typescript", "python"]).default("typescript"),
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
export async function handleGetTemplate(args: unknown) {
|
|
32
|
+
const input = InputSchema.parse(args);
|
|
33
|
+
|
|
34
|
+
const modelKey = input.model.toLowerCase();
|
|
35
|
+
const lang = input.language;
|
|
36
|
+
|
|
37
|
+
// Find matching template
|
|
38
|
+
let template = null;
|
|
39
|
+
|
|
40
|
+
if (modelKey.includes("claude") || modelKey.includes("anthropic")) {
|
|
41
|
+
template = templates.anthropic[lang];
|
|
42
|
+
} else if (modelKey.includes("gpt") || modelKey.includes("openai")) {
|
|
43
|
+
template = templates.openai[lang];
|
|
44
|
+
} else if (modelKey.includes("gemini") || modelKey.includes("google")) {
|
|
45
|
+
template = templates.google[lang];
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
if (!template) {
|
|
49
|
+
return {
|
|
50
|
+
content: [
|
|
51
|
+
{
|
|
52
|
+
type: "text",
|
|
53
|
+
text: `No template found for "${input.model}" in ${lang}. Available: Claude/Anthropic, GPT/OpenAI, Gemini/Google.`,
|
|
54
|
+
},
|
|
55
|
+
],
|
|
56
|
+
};
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
const result = `## 📝 Code Template: ${input.model} (${lang})
|
|
60
|
+
|
|
61
|
+
### Installation
|
|
62
|
+
\`\`\`bash
|
|
63
|
+
${template.install}
|
|
64
|
+
\`\`\`
|
|
65
|
+
|
|
66
|
+
### Environment Variables
|
|
67
|
+
\`\`\`
|
|
68
|
+
${template.envVars.join("\n")}
|
|
69
|
+
\`\`\`
|
|
70
|
+
|
|
71
|
+
### Code
|
|
72
|
+
\`\`\`${lang}
|
|
73
|
+
${template.code}
|
|
74
|
+
\`\`\`
|
|
75
|
+
|
|
76
|
+
### Usage Example
|
|
77
|
+
\`\`\`${lang}
|
|
78
|
+
${template.usage}
|
|
79
|
+
\`\`\`
|
|
80
|
+
|
|
81
|
+
---
|
|
82
|
+
*Powered by [ArchitectGBT](https://architectgbt.com)*`;
|
|
83
|
+
|
|
84
|
+
return {
|
|
85
|
+
content: [{ type: "text", text: result }],
|
|
86
|
+
};
|
|
87
|
+
}
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
|
|
3
|
+
const API_BASE = process.env.ARCHITECTGBT_API_URL || "https://architectgbt.com";
|
|
4
|
+
|
|
5
|
+
export const listModelsTool = {
|
|
6
|
+
name: "list_models",
|
|
7
|
+
description:
|
|
8
|
+
"List available AI models with optional filtering by provider or capability.",
|
|
9
|
+
inputSchema: {
|
|
10
|
+
type: "object" as const,
|
|
11
|
+
properties: {
|
|
12
|
+
provider: {
|
|
13
|
+
type: "string",
|
|
14
|
+
enum: ["OpenAI", "Anthropic", "Google", "Meta", "Mistral", "all"],
|
|
15
|
+
description: "Filter by provider",
|
|
16
|
+
},
|
|
17
|
+
limit: {
|
|
18
|
+
type: "number",
|
|
19
|
+
description: "Maximum number of models to return (default: 10)",
|
|
20
|
+
},
|
|
21
|
+
},
|
|
22
|
+
required: [],
|
|
23
|
+
},
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
const InputSchema = z.object({
|
|
27
|
+
provider: z
|
|
28
|
+
.enum(["OpenAI", "Anthropic", "Google", "Meta", "Mistral", "all"])
|
|
29
|
+
.optional(),
|
|
30
|
+
limit: z.number().default(10),
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
export async function handleListModels(args: unknown) {
|
|
34
|
+
const input = InputSchema.parse(args);
|
|
35
|
+
|
|
36
|
+
try {
|
|
37
|
+
const response = await fetch(`${API_BASE}/api/models`);
|
|
38
|
+
|
|
39
|
+
if (!response.ok) {
|
|
40
|
+
throw new Error(`API error: ${response.status}`);
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
const responseData = await response.json();
|
|
44
|
+
|
|
45
|
+
// Handle the API response structure: { success: true, data: [...] }
|
|
46
|
+
let models = responseData.success && Array.isArray(responseData.data)
|
|
47
|
+
? responseData.data
|
|
48
|
+
: Array.isArray(responseData)
|
|
49
|
+
? responseData
|
|
50
|
+
: [];
|
|
51
|
+
|
|
52
|
+
if (!Array.isArray(models)) {
|
|
53
|
+
throw new Error(`Expected array but got ${typeof models}. API might have changed.`);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// Filter by provider if specified
|
|
57
|
+
if (input.provider && input.provider !== "all") {
|
|
58
|
+
models = models.filter(
|
|
59
|
+
(m: any) =>
|
|
60
|
+
m.provider?.toLowerCase() === input.provider?.toLowerCase()
|
|
61
|
+
);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// Limit results
|
|
65
|
+
models = models.slice(0, input.limit);
|
|
66
|
+
|
|
67
|
+
// Group by provider for better readability
|
|
68
|
+
const groupedModels: Record<string, any[]> = {};
|
|
69
|
+
models.forEach((m: any) => {
|
|
70
|
+
const provider = m.provider || "Unknown";
|
|
71
|
+
if (!groupedModels[provider]) {
|
|
72
|
+
groupedModels[provider] = [];
|
|
73
|
+
}
|
|
74
|
+
groupedModels[provider].push(m);
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
// Format output with grouping
|
|
78
|
+
let result = `Available AI Models (${models.length} total)\n`;
|
|
79
|
+
result += `${"=".repeat(70)}\n\n`;
|
|
80
|
+
|
|
81
|
+
Object.entries(groupedModels).forEach(([provider, providerModels]) => {
|
|
82
|
+
result += `${provider}:\n`;
|
|
83
|
+
providerModels.forEach((m: any) => {
|
|
84
|
+
const inputPer1M = m.input_cost_per_1k ? (m.input_cost_per_1k * 1000).toFixed(2) : "?";
|
|
85
|
+
const outputPer1M = m.output_cost_per_1k ? (m.output_cost_per_1k * 1000).toFixed(2) : "?";
|
|
86
|
+
result += ` • ${m.name.padEnd(30)} $${inputPer1M.padStart(6)} / $${outputPer1M.padStart(6)} (in/out per 1M tokens)\n`;
|
|
87
|
+
});
|
|
88
|
+
result += `\n`;
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
result += `${"=".repeat(70)}\n`;
|
|
92
|
+
result += `💡 Tip: Use get_ai_recommendation for personalized model suggestions based on your needs.`;
|
|
93
|
+
|
|
94
|
+
return {
|
|
95
|
+
content: [{ type: "text", text: result }],
|
|
96
|
+
};
|
|
97
|
+
} catch (error) {
|
|
98
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
99
|
+
return {
|
|
100
|
+
content: [{ type: "text", text: `Failed to list models: ${message}` }],
|
|
101
|
+
isError: true,
|
|
102
|
+
};
|
|
103
|
+
}
|
|
104
|
+
}
|
package/tsconfig.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
{
|
|
2
|
+
"compilerOptions": {
|
|
3
|
+
"target": "ES2022",
|
|
4
|
+
"module": "NodeNext",
|
|
5
|
+
"moduleResolution": "NodeNext",
|
|
6
|
+
"outDir": "./dist",
|
|
7
|
+
"rootDir": "./src",
|
|
8
|
+
"strict": true,
|
|
9
|
+
"esModuleInterop": true,
|
|
10
|
+
"skipLibCheck": true,
|
|
11
|
+
"forceConsistentCasingInFileNames": true,
|
|
12
|
+
"declaration": true
|
|
13
|
+
},
|
|
14
|
+
"include": ["src/**/*"],
|
|
15
|
+
"exclude": ["node_modules", "dist"]
|
|
16
|
+
}
|