pqs-mcp-server 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +33 -0
- package/index.js +177 -0
- package/package.json +36 -0
package/README.md
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# PQS MCP Server
|
|
2
|
+
|
|
3
|
+
The world's first named AI prompt quality score — as an MCP server.
|
|
4
|
+
|
|
5
|
+
Score, optimize, and compare LLM prompts before they hit any model. Built on PEEM, RAGAS, G-Eval, and MT-Bench frameworks.
|
|
6
|
+
|
|
7
|
+
## Install
|
|
8
|
+
|
|
9
|
+
Add to your Claude Desktop config (`~/Library/Application Support/Claude/claude_desktop_config.json`):
|
|
10
|
+
```json
|
|
11
|
+
{
|
|
12
|
+
"mcpServers": {
|
|
13
|
+
"pqs": {
|
|
14
|
+
"command": "npx",
|
|
15
|
+
"args": ["pqs-mcp-server"]
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## Tools
|
|
22
|
+
|
|
23
|
+
- **score_prompt** — Free. Score any prompt, get grade + percentile. No API key needed.
|
|
24
|
+
- **optimize_prompt** — $0.025 USDC via x402. Full dimension breakdown + optimized prompt.
|
|
25
|
+
- **compare_models** — $0.50 USDC via x402. Claude vs GPT-4o head-to-head.
|
|
26
|
+
|
|
27
|
+
## Get an API Key
|
|
28
|
+
|
|
29
|
+
pqs.onchainintel.net
|
|
30
|
+
|
|
31
|
+
## Built by
|
|
32
|
+
|
|
33
|
+
John / OnChainIntel — @OnChainAIIntel
|
package/index.js
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
|
4
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
5
|
+
import {
|
|
6
|
+
CallToolRequestSchema,
|
|
7
|
+
ListToolsRequestSchema,
|
|
8
|
+
} from "@modelcontextprotocol/sdk/types.js";
|
|
9
|
+
|
|
10
|
+
const PQS_BASE = "https://pqs.onchainintel.net";
|
|
11
|
+
|
|
12
|
+
const server = new Server(
|
|
13
|
+
{
|
|
14
|
+
name: "pqs-mcp-server",
|
|
15
|
+
version: "1.0.0",
|
|
16
|
+
},
|
|
17
|
+
{
|
|
18
|
+
capabilities: {
|
|
19
|
+
tools: {},
|
|
20
|
+
},
|
|
21
|
+
}
|
|
22
|
+
);
|
|
23
|
+
|
|
24
|
+
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
25
|
+
return {
|
|
26
|
+
tools: [
|
|
27
|
+
{
|
|
28
|
+
name: "score_prompt",
|
|
29
|
+
description:
|
|
30
|
+
"Score any LLM prompt for quality using PQS (Prompt Quality Score). Returns a grade (A-F), score out of 40, and percentile. Free tier — no payment required. Use this before sending any prompt to an LLM to check if it is worth running.",
|
|
31
|
+
inputSchema: {
|
|
32
|
+
type: "object",
|
|
33
|
+
properties: {
|
|
34
|
+
prompt: {
|
|
35
|
+
type: "string",
|
|
36
|
+
description: "The prompt to score",
|
|
37
|
+
},
|
|
38
|
+
vertical: {
|
|
39
|
+
type: "string",
|
|
40
|
+
enum: ["software", "content", "business", "education", "science", "crypto", "general"],
|
|
41
|
+
description: "The domain context for scoring. Defaults to general.",
|
|
42
|
+
},
|
|
43
|
+
},
|
|
44
|
+
required: ["prompt"],
|
|
45
|
+
},
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
name: "optimize_prompt",
|
|
49
|
+
description:
|
|
50
|
+
"Score AND optimize any LLM prompt using PQS. Returns the original score, an optimized version of the prompt, and dimension-by-dimension breakdown across 8 quality dimensions based on PEEM, RAGAS, G-Eval, and MT-Bench frameworks. Costs $0.025 USDC via x402. Use this when you want to improve a prompt before running it.",
|
|
51
|
+
inputSchema: {
|
|
52
|
+
type: "object",
|
|
53
|
+
properties: {
|
|
54
|
+
prompt: {
|
|
55
|
+
type: "string",
|
|
56
|
+
description: "The prompt to optimize",
|
|
57
|
+
},
|
|
58
|
+
vertical: {
|
|
59
|
+
type: "string",
|
|
60
|
+
enum: ["software", "content", "business", "education", "science", "crypto", "general"],
|
|
61
|
+
description: "The domain context for optimization. Defaults to general.",
|
|
62
|
+
},
|
|
63
|
+
api_key: {
|
|
64
|
+
type: "string",
|
|
65
|
+
description: "PQS API key for authentication. Get one at pqs.onchainintel.net",
|
|
66
|
+
},
|
|
67
|
+
},
|
|
68
|
+
required: ["prompt", "api_key"],
|
|
69
|
+
},
|
|
70
|
+
},
|
|
71
|
+
{
|
|
72
|
+
name: "compare_models",
|
|
73
|
+
description:
|
|
74
|
+
"Compare how Claude vs GPT-4o handles the same prompt using PQS. Both models are scored head-to-head by a third model judge. Returns winner, scores, and recommendation on which model to use for this prompt type. Costs $0.50 USDC via x402.",
|
|
75
|
+
inputSchema: {
|
|
76
|
+
type: "object",
|
|
77
|
+
properties: {
|
|
78
|
+
prompt: {
|
|
79
|
+
type: "string",
|
|
80
|
+
description: "The prompt to compare across models",
|
|
81
|
+
},
|
|
82
|
+
vertical: {
|
|
83
|
+
type: "string",
|
|
84
|
+
enum: ["software", "content", "business", "education", "science", "crypto", "general"],
|
|
85
|
+
description: "The domain context. Defaults to general.",
|
|
86
|
+
},
|
|
87
|
+
api_key: {
|
|
88
|
+
type: "string",
|
|
89
|
+
description: "PQS API key for authentication. Get one at pqs.onchainintel.net",
|
|
90
|
+
},
|
|
91
|
+
},
|
|
92
|
+
required: ["prompt", "api_key"],
|
|
93
|
+
},
|
|
94
|
+
},
|
|
95
|
+
],
|
|
96
|
+
};
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
100
|
+
const { name, arguments: args } = request.params;
|
|
101
|
+
|
|
102
|
+
if (name === "score_prompt") {
|
|
103
|
+
const response = await fetch(`${PQS_BASE}/api/score/free`, {
|
|
104
|
+
method: "POST",
|
|
105
|
+
headers: { "Content-Type": "application/json" },
|
|
106
|
+
body: JSON.stringify({
|
|
107
|
+
prompt: args.prompt,
|
|
108
|
+
vertical: args.vertical || "general",
|
|
109
|
+
}),
|
|
110
|
+
});
|
|
111
|
+
const data = await response.json();
|
|
112
|
+
return {
|
|
113
|
+
content: [
|
|
114
|
+
{
|
|
115
|
+
type: "text",
|
|
116
|
+
text: JSON.stringify(data, null, 2),
|
|
117
|
+
},
|
|
118
|
+
],
|
|
119
|
+
};
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
if (name === "optimize_prompt") {
|
|
123
|
+
const response = await fetch(`${PQS_BASE}/api/score/full`, {
|
|
124
|
+
method: "POST",
|
|
125
|
+
headers: {
|
|
126
|
+
"Content-Type": "application/json",
|
|
127
|
+
"X-API-Key": args.api_key,
|
|
128
|
+
},
|
|
129
|
+
body: JSON.stringify({
|
|
130
|
+
prompt: args.prompt,
|
|
131
|
+
vertical: args.vertical || "general",
|
|
132
|
+
}),
|
|
133
|
+
});
|
|
134
|
+
const data = await response.json();
|
|
135
|
+
return {
|
|
136
|
+
content: [
|
|
137
|
+
{
|
|
138
|
+
type: "text",
|
|
139
|
+
text: JSON.stringify(data, null, 2),
|
|
140
|
+
},
|
|
141
|
+
],
|
|
142
|
+
};
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
if (name === "compare_models") {
|
|
146
|
+
const response = await fetch(`${PQS_BASE}/api/score/compare`, {
|
|
147
|
+
method: "POST",
|
|
148
|
+
headers: {
|
|
149
|
+
"Content-Type": "application/json",
|
|
150
|
+
"X-API-Key": args.api_key,
|
|
151
|
+
},
|
|
152
|
+
body: JSON.stringify({
|
|
153
|
+
prompt: args.prompt,
|
|
154
|
+
vertical: args.vertical || "general",
|
|
155
|
+
}),
|
|
156
|
+
});
|
|
157
|
+
const data = await response.json();
|
|
158
|
+
return {
|
|
159
|
+
content: [
|
|
160
|
+
{
|
|
161
|
+
type: "text",
|
|
162
|
+
text: JSON.stringify(data, null, 2),
|
|
163
|
+
},
|
|
164
|
+
],
|
|
165
|
+
};
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
throw new Error(`Unknown tool: ${name}`);
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
async function main() {
|
|
172
|
+
const transport = new StdioServerTransport();
|
|
173
|
+
await server.connect(transport);
|
|
174
|
+
console.error("PQS MCP Server running on stdio");
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
main().catch(console.error);
|
package/package.json
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "pqs-mcp-server",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "PQS (Prompt Quality Score) MCP server \u2014 score, optimize, and compare LLM prompts before they hit any model. x402-native, built on PEEM, RAGAS, G-Eval, and MT-Bench.",
|
|
5
|
+
"main": "index.js",
|
|
6
|
+
"scripts": {
|
|
7
|
+
"start": "node index.js"
|
|
8
|
+
},
|
|
9
|
+
"keywords": [
|
|
10
|
+
"mcp",
|
|
11
|
+
"prompt",
|
|
12
|
+
"quality",
|
|
13
|
+
"score",
|
|
14
|
+
"llm",
|
|
15
|
+
"x402",
|
|
16
|
+
"pqs",
|
|
17
|
+
"ai",
|
|
18
|
+
"claude",
|
|
19
|
+
"gpt"
|
|
20
|
+
],
|
|
21
|
+
"author": "OnChainIntel",
|
|
22
|
+
"license": "MIT",
|
|
23
|
+
"type": "module",
|
|
24
|
+
"dependencies": {
|
|
25
|
+
"@modelcontextprotocol/sdk": "^1.29.0",
|
|
26
|
+
"node-fetch": "^3.3.2"
|
|
27
|
+
},
|
|
28
|
+
"bin": {
|
|
29
|
+
"pqs-mcp-server": "./index.js"
|
|
30
|
+
},
|
|
31
|
+
"homepage": "https://pqs.onchainintel.net",
|
|
32
|
+
"repository": {
|
|
33
|
+
"type": "git",
|
|
34
|
+
"url": "https://github.com/OnChainAIIntel/pqs-mcp-server"
|
|
35
|
+
}
|
|
36
|
+
}
|