@arizeai/phoenix-mcp 2.1.11 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -0
- package/build/index.js +2 -0
- package/build/supportTools.js +73 -0
- package/package.json +2 -2
package/README.md
CHANGED
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
|
|
15
15
|
[](https://github.com/Arize-ai/phoenix/blob/main/js/packages/phoenix-mcp/LICENSE)
|
|
16
16
|
<img src="https://badge.mcpx.dev?status=on" title="MCP Enabled"/>
|
|
17
|
+
<a href="https://cursor.com/install-mcp?name=phoenix&config=eyJjb21tYW5kIjoibnB4IC15IEBhcml6ZWFpL3Bob2VuaXgtbWNwQGxhdGVzdCAtLWJhc2VVcmwgaHR0cDovL2xvY2FsaG9zdDo2MDA2IC0tYXBpS2V5IHlvdXItYXBpLWtleSJ9"><img src="https://cursor.com/deeplink/mcp-install-dark.svg" alt="Add Arize Phoenix MCP server to Cursor" height=20 /></a>
|
|
17
18
|
|
|
18
19
|
Phoenix MCP Server is an implementation of the Model Context Protocol for the Arize Phoenix platform. It provides a unified interface to Phoenix's capabilites.
|
|
19
20
|
|
|
@@ -46,6 +47,7 @@ This MCP server can be used using `npx` and can be directly integrated with clie
|
|
|
46
47
|
]
|
|
47
48
|
}
|
|
48
49
|
}
|
|
50
|
+
}
|
|
49
51
|
```
|
|
50
52
|
|
|
51
53
|
## Development
|
package/build/index.js
CHANGED
|
@@ -10,6 +10,7 @@ import { initializePromptTools } from "./promptTools.js";
|
|
|
10
10
|
import { initializeProjectTools } from "./projectTools.js";
|
|
11
11
|
import { initializeSpanTools } from "./spanTools.js";
|
|
12
12
|
import { initializeReadmeResources } from "./readmeResource.js";
|
|
13
|
+
import { initializeSupportTools } from "./supportTools.js";
|
|
13
14
|
const argv = minimist(process.argv.slice(2));
|
|
14
15
|
const headers = argv.apiKey
|
|
15
16
|
? {
|
|
@@ -38,6 +39,7 @@ initializeExperimentTools({ client, server });
|
|
|
38
39
|
initializeDatasetTools({ client, server });
|
|
39
40
|
initializeProjectTools({ client, server });
|
|
40
41
|
initializeSpanTools({ client, server });
|
|
42
|
+
initializeSupportTools({ server });
|
|
41
43
|
async function main() {
|
|
42
44
|
// Initialize readme resources first
|
|
43
45
|
if (process.env.DANGEROUSLY_READ_README_FILES === "true") {
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
|
2
|
+
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
|
|
3
|
+
import z from "zod";
|
|
4
|
+
const PHOENIX_SUPPORT_DESCRIPTION = `Get help with Phoenix and OpenInference.
|
|
5
|
+
|
|
6
|
+
- Tracing AI applications via OpenInference and OpenTelemetry
|
|
7
|
+
- Phoenix datasets, experiments, and prompt management
|
|
8
|
+
- Phoenix evals and annotations
|
|
9
|
+
|
|
10
|
+
Use this tool when you need assistance with Phoenix features, troubleshooting,
|
|
11
|
+
or best practices.
|
|
12
|
+
|
|
13
|
+
Expected return:
|
|
14
|
+
Expert guidance about how to use and integrate Phoenix`;
|
|
15
|
+
/**
|
|
16
|
+
* Creates an MCP client connected to the RunLLM server via HTTP
|
|
17
|
+
*/
|
|
18
|
+
async function createRunLLMClient() {
|
|
19
|
+
const transport = new StreamableHTTPClientTransport(new URL("https://mcp.runllm.com/mcp/"), {
|
|
20
|
+
requestInit: {
|
|
21
|
+
headers: {
|
|
22
|
+
"assistant-name": "arize-phoenix",
|
|
23
|
+
},
|
|
24
|
+
},
|
|
25
|
+
});
|
|
26
|
+
const client = new Client({
|
|
27
|
+
name: "runllm-client",
|
|
28
|
+
version: "1.0.0",
|
|
29
|
+
});
|
|
30
|
+
await client.connect(transport);
|
|
31
|
+
return client;
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Calls the chat tool on the RunLLM MCP server
|
|
35
|
+
*/
|
|
36
|
+
export async function callRunLLMChat({ question, }) {
|
|
37
|
+
const client = await createRunLLMClient();
|
|
38
|
+
// Call the chat tool with the user's question
|
|
39
|
+
const result = await client.callTool({
|
|
40
|
+
name: "chat",
|
|
41
|
+
arguments: {
|
|
42
|
+
message: question,
|
|
43
|
+
},
|
|
44
|
+
});
|
|
45
|
+
// There's usually only one content item, but we'll handle multiple for safety
|
|
46
|
+
if (result.content && Array.isArray(result.content)) {
|
|
47
|
+
const textContent = result.content
|
|
48
|
+
.filter((item) => item.type === "text")
|
|
49
|
+
.map((item) => item.text)
|
|
50
|
+
.join("\n");
|
|
51
|
+
if (textContent) {
|
|
52
|
+
return textContent;
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
return "No response received from support";
|
|
56
|
+
}
|
|
57
|
+
export const initializeSupportTools = async ({ server, }) => {
|
|
58
|
+
server.tool("phoenix-support", PHOENIX_SUPPORT_DESCRIPTION, {
|
|
59
|
+
question: z
|
|
60
|
+
.string()
|
|
61
|
+
.describe("Your question about Arize Phoenix, OpenInference, or related topics"),
|
|
62
|
+
}, async ({ question }) => {
|
|
63
|
+
const result = await callRunLLMChat({ question });
|
|
64
|
+
return {
|
|
65
|
+
content: [
|
|
66
|
+
{
|
|
67
|
+
type: "text",
|
|
68
|
+
text: result,
|
|
69
|
+
},
|
|
70
|
+
],
|
|
71
|
+
};
|
|
72
|
+
});
|
|
73
|
+
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@arizeai/phoenix-mcp",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.2.0",
|
|
4
4
|
"description": "A MCP server for Arize Phoenix",
|
|
5
5
|
"bin": {
|
|
6
6
|
"@arizeai/phoenix-mcp": "./build/index.js"
|
|
@@ -16,7 +16,7 @@
|
|
|
16
16
|
"author": "oss@arize.com",
|
|
17
17
|
"license": "Apache-2.0",
|
|
18
18
|
"dependencies": {
|
|
19
|
-
"@modelcontextprotocol/sdk": "^1.
|
|
19
|
+
"@modelcontextprotocol/sdk": "^1.13.3",
|
|
20
20
|
"glob": "^11.0.1",
|
|
21
21
|
"minimist": "^1.2.8",
|
|
22
22
|
"zod": "^3.24.2",
|