architectgbt-mcp 0.2.0 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,135 @@
1
+ # Cursor IDE Setup for ArchitectGBT MCP
2
+
3
+ ## Quick Start
4
+
5
+ 1. **Open Cursor Settings**
6
+ - Press `Ctrl+Shift+J` (Windows/Linux) or `Cmd+Shift+J` (Mac)
7
+ - Or go to: Settings → Features → Model Context Protocol
8
+
9
+ 2. **Add MCP Server Configuration**
10
+
11
+ Create or edit `.cursor/mcp.json` in your project:
12
+
13
+ ```json
14
+ {
15
+ "mcpServers": {
16
+ "architectgbt": {
17
+ "command": "npx",
18
+ "args": ["-y", "architectgbt-mcp@0.2.0"]
19
+ }
20
+ }
21
+ }
22
+ ```
23
+
24
+ 3. **Restart Cursor**
25
+ - Close and reopen Cursor to load the MCP server
26
+
27
+ ## Usage
28
+
29
+ ### Free Tier (No API Key)
30
+ Get 3 AI model recommendations per day without authentication:
31
+
32
+ ```
33
+ Ask Claude: "recommend an AI model for building a chatbot with 100k daily users"
34
+ ```
35
+
36
+ The MCP server will automatically use the anonymous endpoint.
37
+
38
+ ### Pro Tier (Unlimited Access)
39
+
40
+ 1. **Generate API Key**
41
+ - Sign in at [architectgbt.com](https://architectgbt.com)
42
+ - Go to Settings → API Keys
43
+ - Click "Generate New Key"
44
+ - Copy the key (shown only once!)
45
+
46
+ 2. **Set Environment Variable**
47
+
48
+ **Windows (PowerShell):**
49
+ ```powershell
50
+ $env:ARCHITECTGBT_API_KEY = "agbt_your_key_here"
51
+ ```
52
+
53
+ **macOS/Linux:**
54
+ ```bash
55
+ export ARCHITECTGBT_API_KEY=agbt_your_key_here
56
+ ```
57
+
58
+ **Permanent Setup:**
59
+ - Windows: Add to System Environment Variables
60
+ - macOS/Linux: Add to `~/.bashrc` or `~/.zshrc`
61
+
62
+ 3. **Restart Cursor** with environment variable loaded
63
+
64
+ ## Available Tools
65
+
66
+ ### 1. list_models
67
+ Lists all available AI models with pricing
68
+
69
+ **Example:**
70
+ ```
71
+ "Show me all available AI models"
72
+ "What models support vision?"
73
+ ```
74
+
75
+ ### 2. get_ai_recommendation
76
+ Get personalized model recommendations based on requirements
77
+
78
+ **Example:**
79
+ ```
80
+ "Recommend a model for:
81
+ - Building a code assistant
82
+ - Budget: $500/month
83
+ - Needs: function calling, code completion"
84
+ ```
85
+
86
+ ### 3. get_template
87
+ Get ready-to-use code templates
88
+
89
+ **Example:**
90
+ ```
91
+ "Show me the Next.js SaaS template"
92
+ "Get the FastAPI template"
93
+ ```
94
+
95
+ ## Pricing
96
+
97
+ - **Free**: 3 recommendations/day (no API key needed)
98
+ - **Pro Monthly ($15)**: 200 credits + unlimited MCP
99
+ - **Pro Annual ($150)**: 150 credits/month + unlimited MCP
100
+
101
+ [View full pricing →](https://architectgbt.com#pricing)
102
+
103
+ ## Troubleshooting
104
+
105
+ ### MCP Server Not Found
106
+ ```bash
107
+ # Force install latest version
108
+ npx -y architectgbt-mcp@latest
109
+ ```
110
+
111
+ ### Rate Limited (Free Tier)
112
+ ```
113
+ Error: Rate limit exceeded. Limit: 3 requests per day.
114
+ ```
115
+ - Wait 24 hours, or
116
+ - Upgrade to Pro for unlimited access
117
+
118
+ ### API Key Invalid
119
+ ```
120
+ Error: Invalid API key
121
+ ```
122
+ - Check key is correct (starts with `agbt_`)
123
+ - Verify environment variable is set
124
+ - Restart Cursor after setting variable
125
+
126
+ ### Connection Failed
127
+ - Check internet connection
128
+ - Verify firewall allows npm/npx
129
+ - Try manual test: `npx -y architectgbt-mcp@0.2.0`
130
+
131
+ ## Support
132
+
133
+ - 📧 Email: support@architectgbt.com
134
+ - 🐛 Issues: [github.com/3rdbrain/architectgbt-mcp](https://github.com/3rdbrain/architectgbt-mcp/issues)
135
+ - 📚 Docs: [architectgbt.com/docs](https://architectgbt.com)
package/TESTING.md ADDED
@@ -0,0 +1,138 @@
1
+ # Testing ArchitectGBT MCP Integration
2
+
3
+ ## Quick Test Guide
4
+
5
+ ### 1. Set Your API Key
6
+
7
+ **Windows (PowerShell):**
8
+ ```powershell
9
+ $env:ARCHITECTGBT_API_KEY = "your_api_key_here"
10
+ ```
11
+
12
+ **macOS/Linux:**
13
+ ```bash
14
+ export ARCHITECTGBT_API_KEY=your_api_key_here
15
+ ```
16
+
17
+ ### 2. Configure Cursor IDE
18
+
19
+ Create or edit `.cursor/mcp.json` in your project:
20
+
21
+ ```json
22
+ {
23
+ "mcpServers": {
24
+ "architectgbt": {
25
+ "command": "npx",
26
+ "args": ["-y", "architectgbt-mcp@0.2.0"]
27
+ }
28
+ }
29
+ }
30
+ ```
31
+
32
+ ### 3. Restart Cursor
33
+
34
+ Close and reopen Cursor IDE to load the MCP server with your API key.
35
+
36
+ ### 4. Test Commands
37
+
38
+ #### Test 1: List Models
39
+ Ask Claude in Cursor:
40
+ ```
41
+ Show me all available AI models
42
+ ```
43
+
44
+ Expected: List of models with pricing
45
+
46
+ #### Test 2: Get Recommendation
47
+ Ask Claude:
48
+ ```
49
+ Recommend an AI model for building a chatbot with 100k daily users and $500/month budget
50
+ ```
51
+
52
+ Expected: Top 3 model recommendations with cost breakdown
53
+
54
+ #### Test 3: Get Template
55
+ Ask Claude:
56
+ ```
57
+ Show me the Next.js SaaS template
58
+ ```
59
+
60
+ Expected: Template code and description
61
+
62
+ ### 5. Verify Unlimited Access
63
+
64
+ With your API key set:
65
+ - ✅ No rate limits (unlimited recommendations)
66
+ - ✅ Usage tracked in your dashboard
67
+ - ✅ `last_used_at` updates in Settings → API Keys
68
+
69
+ ### 6. Test Without API Key
70
+
71
+ Remove the environment variable:
72
+ ```powershell
73
+ Remove-Item Env:\ARCHITECTGBT_API_KEY
74
+ ```
75
+
76
+ Restart Cursor and try again:
77
+ - ✅ First 3 requests work (anonymous tier)
78
+ - ✅ 4th request shows rate limit error
79
+
80
+ ## Troubleshooting
81
+
82
+ ### API Key Not Working
83
+
84
+ 1. **Check environment variable is set:**
85
+ ```powershell
86
+ echo $env:ARCHITECTGBT_API_KEY
87
+ ```
88
+
89
+ 2. **Verify format:**
90
+ - Must start with `agbt_`
91
+ - Should be 37+ characters long
92
+
93
+ 3. **Check Cursor loaded it:**
94
+ - Restart Cursor after setting the variable
95
+ - Check Cursor's MCP logs
96
+
97
+ ### MCP Server Not Found
98
+
99
+ ```powershell
100
+ # Test MCP server directly
101
+ npx -y architectgbt-mcp@0.2.0
102
+ ```
103
+
104
+ ### Still Getting Rate Limited
105
+
106
+ - Verify API key is in environment variables
107
+ - Check it matches the key in your dashboard
108
+ - Ensure Cursor was restarted after setting the variable
109
+
110
+ ## Production Testing Checklist
111
+
112
+ - [ ] Anonymous access works (3/day limit)
113
+ - [ ] API key provides unlimited access
114
+ - [ ] Usage count increments in dashboard
115
+ - [ ] last_used_at updates after each request
116
+ - [ ] Rate limit resets after 24 hours
117
+ - [ ] All 3 tools work (list_models, get_ai_recommendation, get_template)
118
+ - [ ] Error messages are user-friendly
119
+ - [ ] MCP works in both Cursor and Claude Desktop
120
+
121
+ ## API Endpoints Being Used
122
+
123
+ When using MCP with API key:
124
+ - **GET** `https://architectgbt.com/api/models` - List models
125
+ - **POST** `https://architectgbt.com/api/recommend` - Get recommendations
126
+
127
+ Headers sent:
128
+ ```
129
+ Authorization: Bearer agbt_your_key_here
130
+ Content-Type: application/json
131
+ ```
132
+
133
+ ## Next Steps
134
+
135
+ 1. Monitor usage in Settings → API Keys
136
+ 2. Check MCP usage analytics (coming soon)
137
+ 3. Share MCP package with team members
138
+ 4. Upgrade to Pro for unlimited access ($15/month)
@@ -108,28 +108,87 @@ export async function handleGetRecommendation(args) {
108
108
  }
109
109
  }
110
110
  function formatRecommendation(data) {
111
- const { recommendation, reasoning, alternatives, model } = data;
112
- let result = `## 🎯 AI Model Recommendation\n\n`;
113
- if (model) {
114
- result += `### Recommended: ${model.name}\n`;
115
- result += `- **Provider:** ${model.provider}\n`;
116
- result += `- **Model ID:** ${model.model_id || "N/A"}\n`;
117
- if (model.input_price || model.output_price) {
118
- result += `- **Pricing:** $${model.input_price}/1M input, $${model.output_price}/1M output\n`;
119
- }
120
- if (model.context_window) {
121
- result += `- **Context Window:** ${model.context_window.toLocaleString()} tokens\n`;
111
+ // Handle conversational/off-topic responses
112
+ if (data.off_topic || data.needs_clarification) {
113
+ let result = data.message || '';
114
+ if (data.questions && Array.isArray(data.questions) && data.questions.length > 0) {
115
+ result += `\n\n📋 To help me recommend the perfect AI model, please tell me:\n`;
116
+ data.questions.forEach((q, i) => {
117
+ result += `${i + 1}. ${q}\n`;
118
+ });
122
119
  }
120
+ return result;
121
+ }
122
+ const recommendations = data.recommendations || [];
123
+ if (recommendations.length === 0) {
124
+ return `❌ No recommendations found. Try describing your project in more detail.`;
125
+ }
126
+ let result = `🎯 AI Model Recommendation — Analysis Complete!\n`;
127
+ result += `${"=".repeat(70)}\n\n`;
128
+ // Main recommendation
129
+ const top = recommendations[0];
130
+ result += `✨ TOP MATCH (${top.match_score || top.score || 95}% match)\n\n`;
131
+ result += `${top.model_name || top.name}\n`;
132
+ result += `Provider: ${top.provider}\n`;
133
+ // Pricing
134
+ if (top.estimated_cost) {
135
+ const cost = top.estimated_cost;
136
+ result += `Estimated Cost: $${cost.total_cost_usd?.toFixed(4) || '0.0000'}\n`;
137
+ result += ` └─ ${cost.input_tokens?.toLocaleString() || '0'} input + ${cost.output_tokens?.toLocaleString() || '0'} output tokens\n`;
138
+ }
139
+ else if (top.input_price !== undefined && top.output_price !== undefined) {
140
+ result += `Pricing: $${top.input_price}/1M input • $${top.output_price}/1M output\n`;
141
+ }
142
+ // Capabilities
143
+ if (top.capabilities?.context_window || top.context_window) {
144
+ const contextWindow = top.capabilities?.context_window || top.context_window;
145
+ result += `Context Window: ${contextWindow.toLocaleString()} tokens\n`;
146
+ }
147
+ // Reasoning
148
+ if (top.reasoning) {
149
+ result += `\n💡 Why this model?\n${top.reasoning}\n`;
123
150
  }
124
- if (reasoning) {
125
- result += `\n### Why This Model?\n${reasoning}\n`;
151
+ // Pros and Cons
152
+ if (top.pros || top.cons) {
153
+ result += `\n`;
154
+ if (top.pros && top.pros.length > 0) {
155
+ result += `✅ Pros:\n`;
156
+ top.pros.forEach((pro) => {
157
+ result += ` • ${pro}\n`;
158
+ });
159
+ }
160
+ if (top.cons && top.cons.length > 0) {
161
+ result += `⚠️ Cons:\n`;
162
+ top.cons.forEach((con) => {
163
+ result += ` • ${con}\n`;
164
+ });
165
+ }
126
166
  }
127
- if (alternatives && alternatives.length > 0) {
128
- result += `\n### Alternatives\n`;
129
- alternatives.forEach((alt, i) => {
130
- result += `${i + 1}. **${alt.name}** - ${alt.reason || alt.description || ""}\n`;
167
+ // Alternative recommendations
168
+ if (recommendations.length > 1) {
169
+ result += `\n${"─".repeat(70)}\n`;
170
+ result += `\nAlternative Options:\n\n`;
171
+ recommendations.slice(1, 3).forEach((rec, i) => {
172
+ result += `${i + 2}. ${rec.model_name || rec.name} (${rec.match_score || rec.score || '??'}% match)\n`;
173
+ result += ` Provider: ${rec.provider}\n`;
174
+ if (rec.estimated_cost) {
175
+ result += ` Cost: $${rec.estimated_cost.total_cost_usd?.toFixed(4) || '0.0000'}\n`;
176
+ }
177
+ else if (rec.input_price !== undefined) {
178
+ result += ` Pricing: $${rec.input_price}/1M in • $${rec.output_price}/1M out\n`;
179
+ }
180
+ if (rec.reasoning) {
181
+ result += ` Reason: ${rec.reasoning.substring(0, 150)}${rec.reasoning.length > 150 ? '...' : ''}\n`;
182
+ }
183
+ result += `\n`;
131
184
  });
132
185
  }
133
- result += `\n---\n*Powered by [ArchitectGBT](https://architectgbt.com)*`;
186
+ // Analysis summary
187
+ if (data.analysis_summary) {
188
+ result += `${"─".repeat(70)}\n`;
189
+ result += `\n📊 Analysis Summary:\n${data.analysis_summary}\n\n`;
190
+ }
191
+ result += `${"=".repeat(70)}\n`;
192
+ result += `💎 Powered by ArchitectGBT • https://architectgbt.com`;
134
193
  return result;
135
194
  }
@@ -48,17 +48,29 @@ export async function handleListModels(args) {
48
48
  }
49
49
  // Limit results
50
50
  models = models.slice(0, input.limit);
51
- // Format output
52
- let result = `## 📊 Available AI Models\n\n`;
53
- result += `| Model | Provider | Input $/1M | Output $/1M |\n`;
54
- result += `|-------|----------|------------|-------------|\n`;
51
+ // Group by provider for better readability
52
+ const groupedModels = {};
55
53
  models.forEach((m) => {
56
- // Convert per-1K pricing to per-1M for display
57
- const inputPer1M = m.input_cost_per_1k ? (m.input_cost_per_1k * 1000).toFixed(2) : "?";
58
- const outputPer1M = m.output_cost_per_1k ? (m.output_cost_per_1k * 1000).toFixed(2) : "?";
59
- result += `| ${m.name} | ${m.provider} | $${inputPer1M} | $${outputPer1M} |\n`;
54
+ const provider = m.provider || "Unknown";
55
+ if (!groupedModels[provider]) {
56
+ groupedModels[provider] = [];
57
+ }
58
+ groupedModels[provider].push(m);
60
59
  });
61
- result += `\n*Showing ${models.length} models. Use \`get_ai_recommendation\` for personalized suggestions.*`;
60
+ // Format output with grouping
61
+ let result = `Available AI Models (${models.length} total)\n`;
62
+ result += `${"=".repeat(70)}\n\n`;
63
+ Object.entries(groupedModels).forEach(([provider, providerModels]) => {
64
+ result += `${provider}:\n`;
65
+ providerModels.forEach((m) => {
66
+ const inputPer1M = m.input_cost_per_1k ? (m.input_cost_per_1k * 1000).toFixed(2) : "?";
67
+ const outputPer1M = m.output_cost_per_1k ? (m.output_cost_per_1k * 1000).toFixed(2) : "?";
68
+ result += ` • ${m.name.padEnd(30)} $${inputPer1M.padStart(6)} / $${outputPer1M.padStart(6)} (in/out per 1M tokens)\n`;
69
+ });
70
+ result += `\n`;
71
+ });
72
+ result += `${"=".repeat(70)}\n`;
73
+ result += `💡 Tip: Use get_ai_recommendation for personalized model suggestions based on your needs.`;
62
74
  return {
63
75
  content: [{ type: "text", text: result }],
64
76
  };
package/package.json CHANGED
@@ -1,31 +1,44 @@
1
1
  {
2
2
  "name": "architectgbt-mcp",
3
- "version": "0.2.0",
4
- "description": "MCP server for AI model recommendations from ArchitectGBT",
3
+ "version": "0.2.2",
4
+ "description": "Model Context Protocol server for ArchitectGBT - AI architecture recommendations",
5
5
  "type": "module",
6
- "main": "dist/index.js",
7
6
  "bin": {
8
7
  "architectgbt-mcp": "./dist/index.js"
9
8
  },
9
+ "main": "./dist/index.js",
10
10
  "scripts": {
11
- "build": "tsc",
12
- "start": "node dist/index.js",
13
- "dev": "tsx src/index.ts"
11
+ "build": "tsc && node -e \"require('fs').chmodSync('./dist/index.js', '755')\"",
12
+ "prepare": "npm run build",
13
+ "dev": "tsx src/index.ts",
14
+ "test": "echo 'No tests yet'"
14
15
  },
15
- "keywords": ["mcp", "ai", "model", "recommendation", "architectgbt", "cursor", "claude"],
16
+ "keywords": [
17
+ "mcp",
18
+ "model-context-protocol",
19
+ "ai",
20
+ "architecture",
21
+ "recommendations",
22
+ "openai",
23
+ "anthropic",
24
+ "claude"
25
+ ],
16
26
  "author": "ArchitectGBT",
17
27
  "license": "MIT",
28
+ "repository": {
29
+ "type": "git",
30
+ "url": "https://github.com/yourusername/architectgbt-mcp.git"
31
+ },
18
32
  "dependencies": {
19
33
  "@modelcontextprotocol/sdk": "^1.0.0",
20
- "zod": "^3.22.0"
34
+ "zod": "^3.23.8"
21
35
  },
22
36
  "devDependencies": {
23
37
  "@types/node": "^20.0.0",
24
- "tsx": "^4.0.0",
25
- "typescript": "^5.0.0"
38
+ "tsx": "^4.7.0",
39
+ "typescript": "^5.3.0"
26
40
  },
27
- "files": ["dist"],
28
41
  "engines": {
29
- "node": ">=18"
42
+ "node": ">=18.0.0"
30
43
  }
31
- }
44
+ }
package/src/index.ts ADDED
@@ -0,0 +1,64 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { Server } from "@modelcontextprotocol/sdk/server/index.js";
4
+ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
5
+ import {
6
+ CallToolRequestSchema,
7
+ ListToolsRequestSchema,
8
+ } from "@modelcontextprotocol/sdk/types.js";
9
+
10
+ import { getRecommendationTool, handleGetRecommendation } from "./tools/get-recommendation.js";
11
+ import { getTemplateTool, handleGetTemplate } from "./tools/get-template.js";
12
+ import { listModelsTool, handleListModels } from "./tools/list-models.js";
13
+
14
+ const server = new Server(
15
+ {
16
+ name: "architectgbt-mcp",
17
+ version: "0.1.0",
18
+ },
19
+ {
20
+ capabilities: {
21
+ tools: {},
22
+ },
23
+ }
24
+ );
25
+
26
+ // List available tools
27
+ server.setRequestHandler(ListToolsRequestSchema, async () => {
28
+ return {
29
+ tools: [getRecommendationTool, getTemplateTool, listModelsTool],
30
+ };
31
+ });
32
+
33
+ // Handle tool calls
34
+ server.setRequestHandler(CallToolRequestSchema, async (request) => {
35
+ const { name, arguments: args } = request.params;
36
+
37
+ try {
38
+ switch (name) {
39
+ case "get_ai_recommendation":
40
+ return await handleGetRecommendation(args);
41
+ case "get_code_template":
42
+ return await handleGetTemplate(args);
43
+ case "list_models":
44
+ return await handleListModels(args);
45
+ default:
46
+ throw new Error(`Unknown tool: ${name}`);
47
+ }
48
+ } catch (error) {
49
+ const message = error instanceof Error ? error.message : "Unknown error";
50
+ return {
51
+ content: [{ type: "text", text: `Error: ${message}` }],
52
+ isError: true,
53
+ };
54
+ }
55
+ });
56
+
57
+ // Start the server
58
+ async function main() {
59
+ const transport = new StdioServerTransport();
60
+ await server.connect(transport);
61
+ console.error("ArchitectGBT MCP server running on stdio");
62
+ }
63
+
64
+ main().catch(console.error);
@@ -0,0 +1,123 @@
1
+ export const templates: Record<string, Record<string, {
2
+ install: string;
3
+ envVars: string[];
4
+ code: string;
5
+ usage: string;
6
+ }>> = {
7
+ anthropic: {
8
+ typescript: {
9
+ install: "npm install @anthropic-ai/sdk",
10
+ envVars: ["ANTHROPIC_API_KEY=your-api-key"],
11
+ code: `import Anthropic from "@anthropic-ai/sdk";
12
+
13
+ const client = new Anthropic({
14
+ apiKey: process.env.ANTHROPIC_API_KEY,
15
+ });
16
+
17
+ export async function chat(message: string): Promise<string> {
18
+ const response = await client.messages.create({
19
+ model: "claude-sonnet-4-20250514",
20
+ max_tokens: 1024,
21
+ messages: [{ role: "user", content: message }],
22
+ });
23
+
24
+ const textBlock = response.content[0];
25
+ if (textBlock.type === "text") {
26
+ return textBlock.text;
27
+ }
28
+ throw new Error("Unexpected response type");
29
+ }`,
30
+ usage: `const answer = await chat("What is the capital of France?");
31
+ console.log(answer);`,
32
+ },
33
+ python: {
34
+ install: "pip install anthropic",
35
+ envVars: ["ANTHROPIC_API_KEY=your-api-key"],
36
+ code: `import anthropic
37
+
38
+ client = anthropic.Anthropic()
39
+
40
+ def chat(message: str) -> str:
41
+ response = client.messages.create(
42
+ model="claude-sonnet-4-20250514",
43
+ max_tokens=1024,
44
+ messages=[{"role": "user", "content": message}]
45
+ )
46
+ return response.content[0].text`,
47
+ usage: `answer = chat("What is the capital of France?")
48
+ print(answer)`,
49
+ },
50
+ },
51
+
52
+ openai: {
53
+ typescript: {
54
+ install: "npm install openai",
55
+ envVars: ["OPENAI_API_KEY=your-api-key"],
56
+ code: `import OpenAI from "openai";
57
+
58
+ const client = new OpenAI({
59
+ apiKey: process.env.OPENAI_API_KEY,
60
+ });
61
+
62
+ export async function chat(message: string): Promise<string> {
63
+ const response = await client.chat.completions.create({
64
+ model: "gpt-4o",
65
+ messages: [{ role: "user", content: message }],
66
+ });
67
+
68
+ return response.choices[0].message.content || "";
69
+ }`,
70
+ usage: `const answer = await chat("What is the capital of France?");
71
+ console.log(answer);`,
72
+ },
73
+ python: {
74
+ install: "pip install openai",
75
+ envVars: ["OPENAI_API_KEY=your-api-key"],
76
+ code: `from openai import OpenAI
77
+
78
+ client = OpenAI()
79
+
80
+ def chat(message: str) -> str:
81
+ response = client.chat.completions.create(
82
+ model="gpt-4o",
83
+ messages=[{"role": "user", "content": message}]
84
+ )
85
+ return response.choices[0].message.content`,
86
+ usage: `answer = chat("What is the capital of France?")
87
+ print(answer)`,
88
+ },
89
+ },
90
+
91
+ google: {
92
+ typescript: {
93
+ install: "npm install @google/generative-ai",
94
+ envVars: ["GOOGLE_API_KEY=your-api-key"],
95
+ code: `import { GoogleGenerativeAI } from "@google/generative-ai";
96
+
97
+ const genAI = new GoogleGenerativeAI(process.env.GOOGLE_API_KEY!);
98
+
99
+ export async function chat(message: string): Promise<string> {
100
+ const model = genAI.getGenerativeModel({ model: "gemini-2.0-flash" });
101
+ const result = await model.generateContent(message);
102
+ return result.response.text();
103
+ }`,
104
+ usage: `const answer = await chat("What is the capital of France?");
105
+ console.log(answer);`,
106
+ },
107
+ python: {
108
+ install: "pip install google-generativeai",
109
+ envVars: ["GOOGLE_API_KEY=your-api-key"],
110
+ code: `import google.generativeai as genai
111
+ import os
112
+
113
+ genai.configure(api_key=os.environ["GOOGLE_API_KEY"])
114
+
115
+ def chat(message: str) -> str:
116
+ model = genai.GenerativeModel("gemini-2.0-flash")
117
+ response = model.generate_content(message)
118
+ return response.text`,
119
+ usage: `answer = chat("What is the capital of France?")
120
+ print(answer)`,
121
+ },
122
+ },
123
+ };
@@ -0,0 +1,225 @@
1
+ import { z } from "zod";
2
+
3
+ const API_BASE = process.env.ARCHITECTGBT_API_URL || "https://architectgbt.com";
4
+ const API_KEY = process.env.ARCHITECTGBT_API_KEY;
5
+
6
+ export const getRecommendationTool = {
7
+ name: "get_ai_recommendation",
8
+ description:
9
+ "Analyze a project description and recommend the best AI model with pricing, reasoning, and alternatives. Free tier: 3 recommendations/day. Add ARCHITECTGBT_API_KEY for unlimited access.",
10
+ inputSchema: {
11
+ type: "object" as const,
12
+ properties: {
13
+ prompt: {
14
+ type: "string",
15
+ description:
16
+ "Description of what you want to build (e.g., 'customer support chatbot for e-commerce')",
17
+ },
18
+ budget: {
19
+ type: "string",
20
+ enum: ["low", "medium", "high", "unlimited"],
21
+ description: "Budget constraint for API costs",
22
+ },
23
+ priority: {
24
+ type: "string",
25
+ enum: ["cost", "speed", "quality", "balanced"],
26
+ description: "What matters most for this project",
27
+ },
28
+ },
29
+ required: ["prompt"],
30
+ },
31
+ };
32
+
33
+ const InputSchema = z.object({
34
+ prompt: z.string(),
35
+ budget: z.enum(["low", "medium", "high", "unlimited"]).optional(),
36
+ priority: z.enum(["cost", "speed", "quality", "balanced"]).optional(),
37
+ });
38
+
39
+ export async function handleGetRecommendation(args: unknown) {
40
+ const input = InputSchema.parse(args);
41
+
42
+ try {
43
+ // Determine which endpoint to use
44
+ const endpoint = API_KEY ? `${API_BASE}/api/recommend` : `${API_BASE}/api/recommend/public`;
45
+
46
+ // Build headers
47
+ const headers: HeadersInit = {
48
+ "Content-Type": "application/json",
49
+ };
50
+
51
+ // Add API key if available
52
+ if (API_KEY) {
53
+ headers["Authorization"] = `Bearer ${API_KEY}`;
54
+ }
55
+
56
+ const response = await fetch(endpoint, {
57
+ method: "POST",
58
+ headers,
59
+ body: JSON.stringify({
60
+ prompt: input.prompt,
61
+ budget: input.budget,
62
+ priority: input.priority,
63
+ }),
64
+ });
65
+
66
+ if (!response.ok) {
67
+ // Handle rate limiting for free tier
68
+ if (response.status === 429) {
69
+ const data = await response.json();
70
+ const resetHeader = response.headers.get('X-RateLimit-Reset');
71
+ const resetTime = resetHeader ? new Date(resetHeader).toLocaleString() : 'tomorrow';
72
+
73
+ return {
74
+ content: [
75
+ {
76
+ type: "text",
77
+ text: `🚫 **Daily Limit Reached**\n\n${data.error?.message || 'You\'ve used all 3 free recommendations today.'}\n\nResets at: ${resetTime}\n\n**Get unlimited access:**\n1. Visit https://architectgbt.com\n2. Sign up for free (10 recommendations/month)\n3. Generate an API key from Settings\n4. Add to your MCP config:\n\`\`\`json\n{\n "mcpServers": {\n "architectgbt": {\n "command": "npx",\n "args": ["-y", "architectgbt-mcp"],\n "env": {\n "ARCHITECTGBT_API_KEY": "your_api_key_here"\n }\n }\n }\n}\n\`\`\`\n\n💡 Pro tip: Upgrade to Pro ($15/mo) for unlimited recommendations!`,
78
+ },
79
+ ],
80
+ };
81
+ }
82
+
83
+ // Handle authentication requirement
84
+ if (response.status === 401 || response.status === 405) {
85
+ return {
86
+ content: [
87
+ {
88
+ type: "text",
89
+ text: `❌ **API Key Invalid or Expired**\n\nYour API key is not valid. To fix this:\n\n1. Visit https://architectgbt.com/settings\n2. Generate a new API key\n3. Update your MCP config\n\n**Without an API key:**\nYou can still use the free tier (3 recommendations/day). Remove the ARCHITECTGBT_API_KEY from your config.`,
90
+ },
91
+ ],
92
+ };
93
+ }
94
+ throw new Error(`API error: ${response.status}`);
95
+ }
96
+
97
+ const data = await response.json();
98
+
99
+ // Show rate limit info if present
100
+ const remaining = response.headers.get('X-RateLimit-Remaining');
101
+ const limit = response.headers.get('X-RateLimit-Limit');
102
+
103
+ // Format the response nicely
104
+ let result = formatRecommendation(data);
105
+
106
+ // Add rate limit footer for free tier
107
+ if (remaining !== null && limit !== null && !API_KEY) {
108
+ result += `\n\n---\n📊 **Free Tier:** ${remaining}/${limit} recommendations remaining today\n💎 Get unlimited access at https://architectgbt.com`;
109
+ }
110
+
111
+ return {
112
+ content: [{ type: "text", text: result }],
113
+ };
114
+ } catch (error) {
115
+ const message = error instanceof Error ? error.message : "Unknown error";
116
+ return {
117
+ content: [
118
+ {
119
+ type: "text",
120
+ text: `Failed to get recommendation: ${message}. Please try again.`,
121
+ },
122
+ ],
123
+ isError: true,
124
+ };
125
+ }
126
+ }
127
+
128
+ function formatRecommendation(data: any): string {
129
+ // Handle conversational/off-topic responses
130
+ if (data.off_topic || data.needs_clarification) {
131
+ let result = data.message || '';
132
+
133
+ if (data.questions && Array.isArray(data.questions) && data.questions.length > 0) {
134
+ result += `\n\n📋 To help me recommend the perfect AI model, please tell me:\n`;
135
+ data.questions.forEach((q: string, i: number) => {
136
+ result += `${i + 1}. ${q}\n`;
137
+ });
138
+ }
139
+
140
+ return result;
141
+ }
142
+
143
+ const recommendations = data.recommendations || [];
144
+
145
+ if (recommendations.length === 0) {
146
+ return `❌ No recommendations found. Try describing your project in more detail.`;
147
+ }
148
+
149
+ let result = `🎯 AI Model Recommendation — Analysis Complete!\n`;
150
+ result += `${"=".repeat(70)}\n\n`;
151
+
152
+ // Main recommendation
153
+ const top = recommendations[0];
154
+ result += `✨ TOP MATCH (${top.match_score || top.score || 95}% match)\n\n`;
155
+ result += `${top.model_name || top.name}\n`;
156
+ result += `Provider: ${top.provider}\n`;
157
+
158
+ // Pricing
159
+ if (top.estimated_cost) {
160
+ const cost = top.estimated_cost;
161
+ result += `Estimated Cost: $${cost.total_cost_usd?.toFixed(4) || '0.0000'}\n`;
162
+ result += ` └─ ${cost.input_tokens?.toLocaleString() || '0'} input + ${cost.output_tokens?.toLocaleString() || '0'} output tokens\n`;
163
+ } else if (top.input_price !== undefined && top.output_price !== undefined) {
164
+ result += `Pricing: $${top.input_price}/1M input • $${top.output_price}/1M output\n`;
165
+ }
166
+
167
+ // Capabilities
168
+ if (top.capabilities?.context_window || top.context_window) {
169
+ const contextWindow = top.capabilities?.context_window || top.context_window;
170
+ result += `Context Window: ${contextWindow.toLocaleString()} tokens\n`;
171
+ }
172
+
173
+ // Reasoning
174
+ if (top.reasoning) {
175
+ result += `\n💡 Why this model?\n${top.reasoning}\n`;
176
+ }
177
+
178
+ // Pros and Cons
179
+ if (top.pros || top.cons) {
180
+ result += `\n`;
181
+ if (top.pros && top.pros.length > 0) {
182
+ result += `✅ Pros:\n`;
183
+ top.pros.forEach((pro: string) => {
184
+ result += ` • ${pro}\n`;
185
+ });
186
+ }
187
+ if (top.cons && top.cons.length > 0) {
188
+ result += `⚠️ Cons:\n`;
189
+ top.cons.forEach((con: string) => {
190
+ result += ` • ${con}\n`;
191
+ });
192
+ }
193
+ }
194
+
195
+ // Alternative recommendations
196
+ if (recommendations.length > 1) {
197
+ result += `\n${"─".repeat(70)}\n`;
198
+ result += `\nAlternative Options:\n\n`;
199
+
200
+ recommendations.slice(1, 3).forEach((rec: any, i: number) => {
201
+ result += `${i + 2}. ${rec.model_name || rec.name} (${rec.match_score || rec.score || '??'}% match)\n`;
202
+ result += ` Provider: ${rec.provider}\n`;
203
+ if (rec.estimated_cost) {
204
+ result += ` Cost: $${rec.estimated_cost.total_cost_usd?.toFixed(4) || '0.0000'}\n`;
205
+ } else if (rec.input_price !== undefined) {
206
+ result += ` Pricing: $${rec.input_price}/1M in • $${rec.output_price}/1M out\n`;
207
+ }
208
+ if (rec.reasoning) {
209
+ result += ` Reason: ${rec.reasoning.substring(0, 150)}${rec.reasoning.length > 150 ? '...' : ''}\n`;
210
+ }
211
+ result += `\n`;
212
+ });
213
+ }
214
+
215
+ // Analysis summary
216
+ if (data.analysis_summary) {
217
+ result += `${"─".repeat(70)}\n`;
218
+ result += `\n📊 Analysis Summary:\n${data.analysis_summary}\n\n`;
219
+ }
220
+
221
+ result += `${"=".repeat(70)}\n`;
222
+ result += `💎 Powered by ArchitectGBT • https://architectgbt.com`;
223
+
224
+ return result;
225
+ }
@@ -0,0 +1,87 @@
1
+ import { z } from "zod";
2
+ import { templates } from "../templates/index.js";
3
+
4
+ export const getTemplateTool = {
5
+ name: "get_code_template",
6
+ description:
7
+ "Get a production-ready code template for integrating a specific AI model. Returns working code with setup instructions.",
8
+ inputSchema: {
9
+ type: "object" as const,
10
+ properties: {
11
+ model: {
12
+ type: "string",
13
+ description:
14
+ "The AI model name (e.g., 'Claude', 'GPT-4', 'Gemini')",
15
+ },
16
+ language: {
17
+ type: "string",
18
+ enum: ["typescript", "python"],
19
+ description: "Programming language for the template",
20
+ },
21
+ },
22
+ required: ["model"],
23
+ },
24
+ };
25
+
26
+ const InputSchema = z.object({
27
+ model: z.string(),
28
+ language: z.enum(["typescript", "python"]).default("typescript"),
29
+ });
30
+
31
+ export async function handleGetTemplate(args: unknown) {
32
+ const input = InputSchema.parse(args);
33
+
34
+ const modelKey = input.model.toLowerCase();
35
+ const lang = input.language;
36
+
37
+ // Find matching template
38
+ let template = null;
39
+
40
+ if (modelKey.includes("claude") || modelKey.includes("anthropic")) {
41
+ template = templates.anthropic[lang];
42
+ } else if (modelKey.includes("gpt") || modelKey.includes("openai")) {
43
+ template = templates.openai[lang];
44
+ } else if (modelKey.includes("gemini") || modelKey.includes("google")) {
45
+ template = templates.google[lang];
46
+ }
47
+
48
+ if (!template) {
49
+ return {
50
+ content: [
51
+ {
52
+ type: "text",
53
+ text: `No template found for "${input.model}" in ${lang}. Available: Claude/Anthropic, GPT/OpenAI, Gemini/Google.`,
54
+ },
55
+ ],
56
+ };
57
+ }
58
+
59
+ const result = `## 📝 Code Template: ${input.model} (${lang})
60
+
61
+ ### Installation
62
+ \`\`\`bash
63
+ ${template.install}
64
+ \`\`\`
65
+
66
+ ### Environment Variables
67
+ \`\`\`
68
+ ${template.envVars.join("\n")}
69
+ \`\`\`
70
+
71
+ ### Code
72
+ \`\`\`${lang}
73
+ ${template.code}
74
+ \`\`\`
75
+
76
+ ### Usage Example
77
+ \`\`\`${lang}
78
+ ${template.usage}
79
+ \`\`\`
80
+
81
+ ---
82
+ *Powered by [ArchitectGBT](https://architectgbt.com)*`;
83
+
84
+ return {
85
+ content: [{ type: "text", text: result }],
86
+ };
87
+ }
@@ -0,0 +1,104 @@
1
+ import { z } from "zod";
2
+
3
+ const API_BASE = process.env.ARCHITECTGBT_API_URL || "https://architectgbt.com";
4
+
5
+ export const listModelsTool = {
6
+ name: "list_models",
7
+ description:
8
+ "List available AI models with optional filtering by provider or capability.",
9
+ inputSchema: {
10
+ type: "object" as const,
11
+ properties: {
12
+ provider: {
13
+ type: "string",
14
+ enum: ["OpenAI", "Anthropic", "Google", "Meta", "Mistral", "all"],
15
+ description: "Filter by provider",
16
+ },
17
+ limit: {
18
+ type: "number",
19
+ description: "Maximum number of models to return (default: 10)",
20
+ },
21
+ },
22
+ required: [],
23
+ },
24
+ };
25
+
26
+ const InputSchema = z.object({
27
+ provider: z
28
+ .enum(["OpenAI", "Anthropic", "Google", "Meta", "Mistral", "all"])
29
+ .optional(),
30
+ limit: z.number().default(10),
31
+ });
32
+
33
+ export async function handleListModels(args: unknown) {
34
+ const input = InputSchema.parse(args);
35
+
36
+ try {
37
+ const response = await fetch(`${API_BASE}/api/models`);
38
+
39
+ if (!response.ok) {
40
+ throw new Error(`API error: ${response.status}`);
41
+ }
42
+
43
+ const responseData = await response.json();
44
+
45
+ // Handle the API response structure: { success: true, data: [...] }
46
+ let models = responseData.success && Array.isArray(responseData.data)
47
+ ? responseData.data
48
+ : Array.isArray(responseData)
49
+ ? responseData
50
+ : [];
51
+
52
+ if (!Array.isArray(models)) {
53
+ throw new Error(`Expected array but got ${typeof models}. API might have changed.`);
54
+ }
55
+
56
+ // Filter by provider if specified
57
+ if (input.provider && input.provider !== "all") {
58
+ models = models.filter(
59
+ (m: any) =>
60
+ m.provider?.toLowerCase() === input.provider?.toLowerCase()
61
+ );
62
+ }
63
+
64
+ // Limit results
65
+ models = models.slice(0, input.limit);
66
+
67
+ // Group by provider for better readability
68
+ const groupedModels: Record<string, any[]> = {};
69
+ models.forEach((m: any) => {
70
+ const provider = m.provider || "Unknown";
71
+ if (!groupedModels[provider]) {
72
+ groupedModels[provider] = [];
73
+ }
74
+ groupedModels[provider].push(m);
75
+ });
76
+
77
+ // Format output with grouping
78
+ let result = `Available AI Models (${models.length} total)\n`;
79
+ result += `${"=".repeat(70)}\n\n`;
80
+
81
+ Object.entries(groupedModels).forEach(([provider, providerModels]) => {
82
+ result += `${provider}:\n`;
83
+ providerModels.forEach((m: any) => {
84
+ const inputPer1M = m.input_cost_per_1k ? (m.input_cost_per_1k * 1000).toFixed(2) : "?";
85
+ const outputPer1M = m.output_cost_per_1k ? (m.output_cost_per_1k * 1000).toFixed(2) : "?";
86
+ result += ` • ${m.name.padEnd(30)} $${inputPer1M.padStart(6)} / $${outputPer1M.padStart(6)} (in/out per 1M tokens)\n`;
87
+ });
88
+ result += `\n`;
89
+ });
90
+
91
+ result += `${"=".repeat(70)}\n`;
92
+ result += `💡 Tip: Use get_ai_recommendation for personalized model suggestions based on your needs.`;
93
+
94
+ return {
95
+ content: [{ type: "text", text: result }],
96
+ };
97
+ } catch (error) {
98
+ const message = error instanceof Error ? error.message : "Unknown error";
99
+ return {
100
+ content: [{ type: "text", text: `Failed to list models: ${message}` }],
101
+ isError: true,
102
+ };
103
+ }
104
+ }
package/tsconfig.json ADDED
@@ -0,0 +1,16 @@
1
+ {
2
+ "compilerOptions": {
3
+ "target": "ES2022",
4
+ "module": "NodeNext",
5
+ "moduleResolution": "NodeNext",
6
+ "outDir": "./dist",
7
+ "rootDir": "./src",
8
+ "strict": true,
9
+ "esModuleInterop": true,
10
+ "skipLibCheck": true,
11
+ "forceConsistentCasingInFileNames": true,
12
+ "declaration": true
13
+ },
14
+ "include": ["src/**/*"],
15
+ "exclude": ["node_modules", "dist"]
16
+ }