@arizeai/phoenix-mcp 2.1.10 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -0
- package/build/index.js +4 -0
- package/build/spanTools.js +143 -0
- package/build/supportTools.js +73 -0
- package/package.json +3 -3
package/README.md
CHANGED
|
@@ -14,12 +14,14 @@
|
|
|
14
14
|
|
|
15
15
|
[](https://github.com/Arize-ai/phoenix/blob/main/js/packages/phoenix-mcp/LICENSE)
|
|
16
16
|
<img src="https://badge.mcpx.dev?status=on" title="MCP Enabled"/>
|
|
17
|
+
<a href="https://cursor.com/install-mcp?name=phoenix&config=eyJjb21tYW5kIjoibnB4IC15IEBhcml6ZWFpL3Bob2VuaXgtbWNwQGxhdGVzdCAtLWJhc2VVcmwgaHR0cDovL2xvY2FsaG9zdDo2MDA2IC0tYXBpS2V5IHlvdXItYXBpLWtleSJ9"><img src="https://cursor.com/deeplink/mcp-install-dark.svg" alt="Add Arize Phoenix MCP server to Cursor" height=20 /></a>
|
|
17
18
|
|
|
18
19
|
Phoenix MCP Server is an implementation of the Model Context Protocol for the Arize Phoenix platform. It provides a unified interface to Phoenix's capabilites.
|
|
19
20
|
|
|
20
21
|
You can use Phoenix MCP Server for:
|
|
21
22
|
|
|
22
23
|
- **Projects Management**: List and explore projects that organize your observability data
|
|
24
|
+
- **Spans & Annotations**: Retrieve spans and their annotations for analysis and debugging
|
|
23
25
|
- **Prompts Management**: Create, list, update, and iterate on prompts
|
|
24
26
|
- **Datasets**: Explore datasets, and syntesize new examples
|
|
25
27
|
- **Experiments**: Pull experiment results and visualize them with the help of an LLM
|
|
@@ -45,6 +47,7 @@ This MCP server can be used using `npx` and can be directly integrated with clie
|
|
|
45
47
|
]
|
|
46
48
|
}
|
|
47
49
|
}
|
|
50
|
+
}
|
|
48
51
|
```
|
|
49
52
|
|
|
50
53
|
## Development
|
package/build/index.js
CHANGED
|
@@ -8,7 +8,9 @@ import { initializeDatasetTools } from "./datasetTools.js";
|
|
|
8
8
|
import { initializeExperimentTools } from "./experimentTools.js";
|
|
9
9
|
import { initializePromptTools } from "./promptTools.js";
|
|
10
10
|
import { initializeProjectTools } from "./projectTools.js";
|
|
11
|
+
import { initializeSpanTools } from "./spanTools.js";
|
|
11
12
|
import { initializeReadmeResources } from "./readmeResource.js";
|
|
13
|
+
import { initializeSupportTools } from "./supportTools.js";
|
|
12
14
|
const argv = minimist(process.argv.slice(2));
|
|
13
15
|
const headers = argv.apiKey
|
|
14
16
|
? {
|
|
@@ -36,6 +38,8 @@ initializePromptTools({ client, server });
|
|
|
36
38
|
initializeExperimentTools({ client, server });
|
|
37
39
|
initializeDatasetTools({ client, server });
|
|
38
40
|
initializeProjectTools({ client, server });
|
|
41
|
+
initializeSpanTools({ client, server });
|
|
42
|
+
initializeSupportTools({ server });
|
|
39
43
|
async function main() {
|
|
40
44
|
// Initialize readme resources first
|
|
41
45
|
if (process.env.DANGEROUSLY_READ_README_FILES === "true") {
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
import z from "zod";
|
|
2
|
+
const GET_SPANS_DESCRIPTION = `Get spans from a project with filtering criteria.
|
|
3
|
+
|
|
4
|
+
Spans represent individual operations or units of work within a trace. They contain timing information,
|
|
5
|
+
attributes, and context about the operation being performed.
|
|
6
|
+
|
|
7
|
+
Example usage:
|
|
8
|
+
Get recent spans from project "my-project"
|
|
9
|
+
Get spans in a time range from project "my-project"
|
|
10
|
+
|
|
11
|
+
Expected return:
|
|
12
|
+
Object containing spans array and optional next cursor for pagination.
|
|
13
|
+
Example: {
|
|
14
|
+
"spans": [
|
|
15
|
+
{
|
|
16
|
+
"id": "span123",
|
|
17
|
+
"name": "http_request",
|
|
18
|
+
"context": {
|
|
19
|
+
"trace_id": "trace456",
|
|
20
|
+
"span_id": "span123"
|
|
21
|
+
},
|
|
22
|
+
"start_time": "2024-01-01T12:00:00Z",
|
|
23
|
+
"end_time": "2024-01-01T12:00:01Z",
|
|
24
|
+
"attributes": {
|
|
25
|
+
"http.method": "GET",
|
|
26
|
+
"http.url": "/api/users"
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
],
|
|
30
|
+
"nextCursor": "cursor_for_pagination"
|
|
31
|
+
}`;
|
|
32
|
+
const GET_SPAN_ANNOTATIONS_DESCRIPTION = `Get span annotations for a list of span IDs.
|
|
33
|
+
|
|
34
|
+
Span annotations provide additional metadata, scores, or labels for spans. They can be created
|
|
35
|
+
by humans, LLMs, or code and help in analyzing and categorizing spans.
|
|
36
|
+
|
|
37
|
+
Example usage:
|
|
38
|
+
Get annotations for spans ["span1", "span2"] from project "my-project"
|
|
39
|
+
Get quality score annotations for span "span1" from project "my-project"
|
|
40
|
+
|
|
41
|
+
Expected return:
|
|
42
|
+
Object containing annotations array and optional next cursor for pagination.
|
|
43
|
+
Example: {
|
|
44
|
+
"annotations": [
|
|
45
|
+
{
|
|
46
|
+
"id": "annotation123",
|
|
47
|
+
"span_id": "span1",
|
|
48
|
+
"name": "quality_score",
|
|
49
|
+
"result": {
|
|
50
|
+
"label": "good",
|
|
51
|
+
"score": 0.95,
|
|
52
|
+
"explanation": null
|
|
53
|
+
},
|
|
54
|
+
"annotator_kind": "LLM",
|
|
55
|
+
"metadata": {
|
|
56
|
+
"model": "gpt-4"
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
],
|
|
60
|
+
"nextCursor": "cursor_for_pagination"
|
|
61
|
+
}`;
|
|
62
|
+
export const initializeSpanTools = ({ client, server, }) => {
|
|
63
|
+
server.tool("get-spans", GET_SPANS_DESCRIPTION, {
|
|
64
|
+
projectName: z.string(),
|
|
65
|
+
startTime: z.string().optional(),
|
|
66
|
+
endTime: z.string().optional(),
|
|
67
|
+
cursor: z.string().optional(),
|
|
68
|
+
limit: z.number().min(1).max(1000).default(100).optional(),
|
|
69
|
+
}, async ({ projectName, startTime, endTime, cursor, limit = 100 }) => {
|
|
70
|
+
const params = {
|
|
71
|
+
limit,
|
|
72
|
+
};
|
|
73
|
+
if (cursor) {
|
|
74
|
+
params.cursor = cursor;
|
|
75
|
+
}
|
|
76
|
+
if (startTime) {
|
|
77
|
+
params.start_time = startTime;
|
|
78
|
+
}
|
|
79
|
+
if (endTime) {
|
|
80
|
+
params.end_time = endTime;
|
|
81
|
+
}
|
|
82
|
+
const response = await client.GET("/v1/projects/{project_identifier}/spans", {
|
|
83
|
+
params: {
|
|
84
|
+
path: {
|
|
85
|
+
project_identifier: projectName,
|
|
86
|
+
},
|
|
87
|
+
query: params,
|
|
88
|
+
},
|
|
89
|
+
});
|
|
90
|
+
return {
|
|
91
|
+
content: [
|
|
92
|
+
{
|
|
93
|
+
type: "text",
|
|
94
|
+
text: JSON.stringify({
|
|
95
|
+
spans: response.data?.data ?? [],
|
|
96
|
+
nextCursor: response.data?.next_cursor ?? null,
|
|
97
|
+
}, null, 2),
|
|
98
|
+
},
|
|
99
|
+
],
|
|
100
|
+
};
|
|
101
|
+
});
|
|
102
|
+
server.tool("get-span-annotations", GET_SPAN_ANNOTATIONS_DESCRIPTION, {
|
|
103
|
+
projectName: z.string(),
|
|
104
|
+
spanIds: z.array(z.string()),
|
|
105
|
+
includeAnnotationNames: z.array(z.string()).optional(),
|
|
106
|
+
excludeAnnotationNames: z.array(z.string()).optional(),
|
|
107
|
+
cursor: z.string().optional(),
|
|
108
|
+
limit: z.number().min(1).max(1000).default(100).optional(),
|
|
109
|
+
}, async ({ projectName, spanIds, includeAnnotationNames, excludeAnnotationNames, cursor, limit = 100, }) => {
|
|
110
|
+
const params = {
|
|
111
|
+
span_ids: spanIds,
|
|
112
|
+
limit,
|
|
113
|
+
};
|
|
114
|
+
if (cursor) {
|
|
115
|
+
params.cursor = cursor;
|
|
116
|
+
}
|
|
117
|
+
if (includeAnnotationNames) {
|
|
118
|
+
params.include_annotation_names = includeAnnotationNames;
|
|
119
|
+
}
|
|
120
|
+
if (excludeAnnotationNames) {
|
|
121
|
+
params.exclude_annotation_names = excludeAnnotationNames;
|
|
122
|
+
}
|
|
123
|
+
const response = await client.GET("/v1/projects/{project_identifier}/span_annotations", {
|
|
124
|
+
params: {
|
|
125
|
+
path: {
|
|
126
|
+
project_identifier: projectName,
|
|
127
|
+
},
|
|
128
|
+
query: params,
|
|
129
|
+
},
|
|
130
|
+
});
|
|
131
|
+
return {
|
|
132
|
+
content: [
|
|
133
|
+
{
|
|
134
|
+
type: "text",
|
|
135
|
+
text: JSON.stringify({
|
|
136
|
+
annotations: response.data?.data ?? [],
|
|
137
|
+
nextCursor: response.data?.next_cursor ?? null,
|
|
138
|
+
}, null, 2),
|
|
139
|
+
},
|
|
140
|
+
],
|
|
141
|
+
};
|
|
142
|
+
});
|
|
143
|
+
};
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
|
2
|
+
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
|
|
3
|
+
import z from "zod";
|
|
4
|
+
const PHOENIX_SUPPORT_DESCRIPTION = `Get help with Phoenix and OpenInference.
|
|
5
|
+
|
|
6
|
+
- Tracing AI applications via OpenInference and OpenTelemetry
|
|
7
|
+
- Phoenix datasets, experiments, and prompt management
|
|
8
|
+
- Phoenix evals and annotations
|
|
9
|
+
|
|
10
|
+
Use this tool when you need assistance with Phoenix features, troubleshooting,
|
|
11
|
+
or best practices.
|
|
12
|
+
|
|
13
|
+
Expected return:
|
|
14
|
+
Expert guidance about how to use and integrate Phoenix`;
|
|
15
|
+
/**
|
|
16
|
+
* Creates an MCP client connected to the RunLLM server via HTTP
|
|
17
|
+
*/
|
|
18
|
+
async function createRunLLMClient() {
|
|
19
|
+
const transport = new StreamableHTTPClientTransport(new URL("https://mcp.runllm.com/mcp/"), {
|
|
20
|
+
requestInit: {
|
|
21
|
+
headers: {
|
|
22
|
+
"assistant-name": "arize-phoenix",
|
|
23
|
+
},
|
|
24
|
+
},
|
|
25
|
+
});
|
|
26
|
+
const client = new Client({
|
|
27
|
+
name: "runllm-client",
|
|
28
|
+
version: "1.0.0",
|
|
29
|
+
});
|
|
30
|
+
await client.connect(transport);
|
|
31
|
+
return client;
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Calls the chat tool on the RunLLM MCP server
|
|
35
|
+
*/
|
|
36
|
+
export async function callRunLLMChat({ question, }) {
|
|
37
|
+
const client = await createRunLLMClient();
|
|
38
|
+
// Call the chat tool with the user's question
|
|
39
|
+
const result = await client.callTool({
|
|
40
|
+
name: "chat",
|
|
41
|
+
arguments: {
|
|
42
|
+
message: question,
|
|
43
|
+
},
|
|
44
|
+
});
|
|
45
|
+
// There's usually only one content item, but we'll handle multiple for safety
|
|
46
|
+
if (result.content && Array.isArray(result.content)) {
|
|
47
|
+
const textContent = result.content
|
|
48
|
+
.filter((item) => item.type === "text")
|
|
49
|
+
.map((item) => item.text)
|
|
50
|
+
.join("\n");
|
|
51
|
+
if (textContent) {
|
|
52
|
+
return textContent;
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
return "No response received from support";
|
|
56
|
+
}
|
|
57
|
+
export const initializeSupportTools = async ({ server, }) => {
|
|
58
|
+
server.tool("phoenix-support", PHOENIX_SUPPORT_DESCRIPTION, {
|
|
59
|
+
question: z
|
|
60
|
+
.string()
|
|
61
|
+
.describe("Your question about Arize Phoenix, OpenInference, or related topics"),
|
|
62
|
+
}, async ({ question }) => {
|
|
63
|
+
const result = await callRunLLMChat({ question });
|
|
64
|
+
return {
|
|
65
|
+
content: [
|
|
66
|
+
{
|
|
67
|
+
type: "text",
|
|
68
|
+
text: result,
|
|
69
|
+
},
|
|
70
|
+
],
|
|
71
|
+
};
|
|
72
|
+
});
|
|
73
|
+
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@arizeai/phoenix-mcp",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.2.0",
|
|
4
4
|
"description": "A MCP server for Arize Phoenix",
|
|
5
5
|
"bin": {
|
|
6
6
|
"@arizeai/phoenix-mcp": "./build/index.js"
|
|
@@ -16,11 +16,11 @@
|
|
|
16
16
|
"author": "oss@arize.com",
|
|
17
17
|
"license": "Apache-2.0",
|
|
18
18
|
"dependencies": {
|
|
19
|
-
"@modelcontextprotocol/sdk": "^1.
|
|
19
|
+
"@modelcontextprotocol/sdk": "^1.13.3",
|
|
20
20
|
"glob": "^11.0.1",
|
|
21
21
|
"minimist": "^1.2.8",
|
|
22
22
|
"zod": "^3.24.2",
|
|
23
|
-
"@arizeai/phoenix-client": "2.
|
|
23
|
+
"@arizeai/phoenix-client": "2.2.0"
|
|
24
24
|
},
|
|
25
25
|
"devDependencies": {
|
|
26
26
|
"@types/glob": "^8.1.0",
|