@arizeai/phoenix-mcp 2.1.0 → 2.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE CHANGED
@@ -188,7 +188,7 @@ Copyright (c) Arize AI, Inc
188
188
  same "printed page" as the copyright notice for easier
189
189
  identification within third-party archives.
190
190
 
191
- Copyright 2024 Comet ML, Inc
191
+ Copyright 2025 Arize AI, Inc
192
192
 
193
193
  Licensed under the Apache License, Version 2.0 (the "License");
194
194
  you may not use this file except in compliance with the License.
package/README.md CHANGED
@@ -1,22 +1,70 @@
1
- # Phoenix MCP Server
1
+ <h1 align="center" style="border-bottom: none">
2
+ <div>
3
+ <a href="https://phoenix.arize.com/?utm_medium=github&utm_content=header_img&utm_campaign=phoenix-mcp">
4
+ <picture>
5
+ <source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/Arize-ai/phoenix-assets/refs/heads/main/logos/Phoenix/phoenix.svg">
6
+ <source media="(prefers-color-scheme: light)" srcset="https://raw.githubusercontent.com/Arize-ai/phoenix-assets/refs/heads/main/logos/Phoenix/phoenix-white.svg">
7
+ <img alt="Arize Phoenix logo" src="https://raw.githubusercontent.com/Arize-ai/phoenix-assets/refs/heads/main/logos/Phoenix/phoenix.svg" width="100" />
8
+ </picture>
9
+ </a>
10
+ <br>
11
+ Arize Phoenix MCP Server
12
+ </div>
13
+ </h1>
2
14
 
3
- A MCP server for Arize Phoenix.
15
+ [![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://github.com/Arize-ai/phoenix/blob/main/js/packages/phoenix-mcp/LICENSE)
16
+ <img src="https://badge.mcpx.dev?status=on" title="MCP Enabled"/>
4
17
 
5
- ## Installation
18
+ Phoenix MCP Server is an implementation of the Model Context Protocol for the Arize Phoenix platform. It provides a unified interface to Phoenix's capabilites.
6
19
 
7
- This package is installed using PNPM:
20
+ You can use Phoenix MCP Server for:
8
21
 
9
- ```bash
10
- pnpm install @arize/phoenix-mcp
22
+ - **Prompts Management**: Create, list, update, and iterate on prompts
23
+ - **Datasets**: Explore datasets, and syntesize new examples
24
+ - **Experiments**: Pull experiment results and visualize them with the help of an LLM
25
+
26
+ Don't see a use-case covered? `@arizeai/phoenix-mcp` is [open-source](https://github.com/Arize-ai/phoenix)! Issues and PRs welcome.
27
+
28
+ ## Installation
29
+
30
+ This MCP server can be used using `npx` and can be directly integrated with clients like Claude Desktop, Cursor, and more.
31
+
32
+ ```json
33
+ {
34
+ "mcpServers": {
35
+ "phoenix": {
36
+ "command": "npx",
37
+ "args": [
38
+ "-y",
39
+ "@arizeai/phoenix-mcp@latest",
40
+ "--baseUrl",
41
+ "https://my-phoenix.com",
42
+ "--apiKey",
43
+ "your-api-key"
44
+ ]
45
+ }
46
+ }
11
47
  ```
12
48
 
13
49
  ## Development
14
50
 
51
+ ## Install
52
+
53
+ This package is managed via a pnpm workspace.
54
+
55
+ ```sh
56
+ // From the /js/ directory
57
+ pnpm install
58
+ pnpm build
59
+ ```
60
+
61
+ This only needs to be repeated if dependencies change or there is a change to the phoenix-client.
62
+
15
63
  ### Building
16
64
 
17
65
  To build the project:
18
66
 
19
- ```bash
67
+ ```sh
20
68
  pnpm build
21
69
  ```
22
70
 
@@ -30,19 +78,21 @@ pnpm dev
30
78
 
31
79
  ### Debugging
32
80
 
33
- To run the debugger:
81
+ You can build and run the MCP inspector using the following:
34
82
 
35
83
  ```bash
36
- npx @modelcontextprotocol/inspector node ./build/index.js
84
+ pnpm inspect
37
85
  ```
38
86
 
39
87
  ## Environment Variables
40
88
 
41
- The server requires the following environment variables:
89
+ When developing, the server requires the following environment variables:
42
90
 
43
91
  - `PHOENIX_API_KEY`: Your Phoenix API key
44
92
  - `PHOENIX_BASE_URL`: The base URL for Phoenix
45
93
 
46
- ## License
94
+ Make sure to set these in a `.env` file. See `.env.example`.
95
+
96
+ # License
47
97
 
48
- This project is licensed under the Elv2 license.
98
+ Apache 2.0
@@ -1,6 +1,91 @@
1
1
  import z from "zod";
2
+ const LIST_DATASETS_DESCRIPTION = `Get a list of all datasets.
3
+
4
+ Datasets are collections of 'dataset examples' that each example includes an input,
5
+ (expected) output, and optional metadata. They are primarily used as inputs for experiments.
6
+
7
+ Example usage:
8
+ Show me all available datasets
9
+
10
+ Expected return:
11
+ Array of dataset objects with metadata.
12
+ Example: [
13
+ {
14
+ "id": "RGF0YXNldDox",
15
+ "name": "my-dataset",
16
+ "description": "A dataset for testing",
17
+ "metadata": {},
18
+ "created_at": "2024-03-20T12:00:00Z",
19
+ "updated_at": "2024-03-20T12:00:00Z"
20
+ }
21
+ ]`;
22
+ const GET_DATASET_EXAMPLES_DESCRIPTION = `Get examples from a dataset.
23
+
24
+ Dataset examples are an array of objects that each include an input,
25
+ (expected) output, and optional metadata. These examples are typically used to represent
26
+ input to an application or model (e.g. prompt template variables, a code file, or image)
27
+ and used to test or benchmark changes.
28
+
29
+ Example usage:
30
+ Show me all examples from dataset RGF0YXNldDox
31
+
32
+ Expected return:
33
+ Object containing dataset ID, version ID, and array of examples.
34
+ Example: {
35
+ "dataset_id": "datasetid1234",
36
+ "version_id": "datasetversionid1234",
37
+ "examples": [
38
+ {
39
+ "id": "exampleid1234",
40
+ "input": {
41
+ "text": "Sample input text"
42
+ },
43
+ "output": {
44
+ "text": "Expected output text"
45
+ },
46
+ "metadata": {},
47
+ "updated_at": "YYYY-MM-DDTHH:mm:ssZ"
48
+ }
49
+ ]
50
+ }`;
51
+ const GET_DATASET_EXPERIMENTS_DESCRIPTION = `List experiments run on a dataset.
52
+
53
+ Example usage:
54
+ Show me all experiments run on dataset RGF0YXNldDox
55
+
56
+ Expected return:
57
+ Array of experiment objects with metadata.
58
+ Example: [
59
+ {
60
+ "id": "experimentid1234",
61
+ "dataset_id": "datasetid1234",
62
+ "dataset_version_id": "datasetversionid1234",
63
+ "repetitions": 1,
64
+ "metadata": {},
65
+ "project_name": "Experiment-abc123",
66
+ "created_at": "YYYY-MM-DDTHH:mm:ssZ",
67
+ "updated_at": "YYYY-MM-DDTHH:mm:ssZ"
68
+ }
69
+ ]`;
70
+ const ADD_DATASET_EXAMPLES_DESCRIPTION = `Add examples to an existing dataset.
71
+
72
+ This tool adds one or more examples to an existing dataset. Each example includes an input,
73
+ output, and metadata. The metadata will automatically include information indicating that
74
+ these examples were synthetically generated via MCP. When calling this tool, check existing
75
+ examples using the "get-dataset-examples" tool to ensure that you are not adding duplicate
76
+ examples and following existing patterns for how data should be structured.
77
+
78
+ Example usage:
79
+ Look at the analyze "my-dataset" and augment them with new examples to cover relevant edge cases
80
+
81
+ Expected return:
82
+ Confirmation of successful addition of examples to the dataset.
83
+ Example: {
84
+ "dataset_name": "my-dataset",
85
+ "message": "Successfully added examples to dataset"
86
+ }`;
2
87
  export const initializeDatasetTools = ({ client, server, }) => {
3
- server.tool("list-datasets", "Get a list of all the datasets", {
88
+ server.tool("list-datasets", LIST_DATASETS_DESCRIPTION, {
4
89
  limit: z.number().min(1).max(100).default(100),
5
90
  }, async ({ limit }) => {
6
91
  const response = await client.GET("/v1/datasets", {
@@ -17,7 +102,7 @@ export const initializeDatasetTools = ({ client, server, }) => {
17
102
  ],
18
103
  };
19
104
  });
20
- server.tool("get-dataset-examples", "Get examples from a dataset", {
105
+ server.tool("get-dataset-examples", GET_DATASET_EXAMPLES_DESCRIPTION, {
21
106
  datasetId: z.string(),
22
107
  }, async ({ datasetId }) => {
23
108
  const response = await client.GET("/v1/datasets/{id}/examples", {
@@ -34,7 +119,7 @@ export const initializeDatasetTools = ({ client, server, }) => {
34
119
  ],
35
120
  };
36
121
  });
37
- server.tool("get-dataset-experiments", "List experiments run on a dataset", {
122
+ server.tool("get-dataset-experiments", GET_DATASET_EXPERIMENTS_DESCRIPTION, {
38
123
  datasetId: z.string(),
39
124
  }, async ({ datasetId }) => {
40
125
  const response = await client.GET("/v1/datasets/{dataset_id}/experiments", {
@@ -51,4 +136,50 @@ export const initializeDatasetTools = ({ client, server, }) => {
51
136
  ],
52
137
  };
53
138
  });
139
+ server.tool("add-dataset-examples", ADD_DATASET_EXAMPLES_DESCRIPTION, {
140
+ datasetName: z.string(),
141
+ examples: z.array(z.object({
142
+ input: z.record(z.any()),
143
+ output: z.record(z.any()),
144
+ metadata: z.record(z.any()).optional(),
145
+ })),
146
+ }, async ({ datasetName, examples }) => {
147
+ // Add MCP metadata to each example
148
+ const examplesWithMetadata = examples.map((example) => ({
149
+ ...example,
150
+ metadata: {
151
+ ...example.metadata,
152
+ source: "Synthetic Example added via MCP",
153
+ },
154
+ }));
155
+ const response = await client.POST("/v1/datasets/upload", {
156
+ body: {
157
+ action: "append",
158
+ name: datasetName,
159
+ inputs: examplesWithMetadata.map((e) => e.input),
160
+ outputs: examplesWithMetadata.map((e) => e.output),
161
+ metadata: examplesWithMetadata.map((e) => e.metadata),
162
+ },
163
+ params: {
164
+ query: {
165
+ sync: true,
166
+ },
167
+ },
168
+ });
169
+ if (!response.data?.data?.dataset_id) {
170
+ throw new Error("Failed to add examples to dataset: No dataset ID received");
171
+ }
172
+ return {
173
+ content: [
174
+ {
175
+ type: "text",
176
+ text: JSON.stringify({
177
+ dataset_name: datasetName,
178
+ dataset_id: response.data.data.dataset_id,
179
+ message: "Successfully added examples to dataset",
180
+ }, null, 2),
181
+ },
182
+ ],
183
+ };
184
+ });
54
185
  };
@@ -1,6 +1,84 @@
1
1
  import z from "zod";
2
+ const LIST_EXPERIMENTS_DESCRIPTION = `Get a list of all the experiments run on a given dataset.
3
+
4
+ Experiments are collections of experiment runs, each experiment run corresponds to a single
5
+ dataset example. The dataset example is passed to an implied \`task\` which in turn
6
+ produces an output.
7
+
8
+ Example usage:
9
+ Show me all the experiments I've run on dataset RGF0YXNldDox
10
+
11
+ Expected return:
12
+ Array of experiment objects with metadata.
13
+ Example: [
14
+ {
15
+ "id": "experimentid1234",
16
+ "dataset_id": "datasetid1234",
17
+ "dataset_version_id": "datasetversionid1234",
18
+ "repetitions": 1,
19
+ "metadata": {},
20
+ "project_name": "Experiment-abc123",
21
+ "created_at": "YYYY-MM-DDTHH:mm:ssZ",
22
+ "updated_at": "YYYY-MM-DDTHH:mm:ssZ"
23
+ }
24
+ ]`;
25
+ const GET_EXPERIMENT_DESCRIPTION = `Get an experiment by its ID.
26
+
27
+ The tool returns experiment metadata in the first content block and a JSON object with the
28
+ experiment data in the second. The experiment data contains both the results of each
29
+ experiment run and the annotations made by an evaluator to score or label the results,
30
+ for example, comparing the output of an experiment run to the expected output from the
31
+ dataset example.
32
+
33
+ Example usage:
34
+ Show me the experiment results for experiment RXhwZXJpbWVudDo4
35
+
36
+ Expected return:
37
+ Object containing experiment metadata and results.
38
+ Example: {
39
+ "metadata": {
40
+ "id": "experimentid1234",
41
+ "dataset_id": "datasetid1234",
42
+ "dataset_version_id": "datasetversionid1234",
43
+ "repetitions": 1,
44
+ "metadata": {},
45
+ "project_name": "Experiment-abc123",
46
+ "created_at": "YYYY-MM-DDTHH:mm:ssZ",
47
+ "updated_at": "YYYY-MM-DDTHH:mm:ssZ"
48
+ },
49
+ "experimentResult": [
50
+ {
51
+ "example_id": "exampleid1234",
52
+ "repetition_number": 0,
53
+ "input": "Sample input text",
54
+ "reference_output": "Expected output text",
55
+ "output": "Actual output text",
56
+ "error": null,
57
+ "latency_ms": 1000,
58
+ "start_time": "2025-03-20T12:00:00Z",
59
+ "end_time": "2025-03-20T12:00:01Z",
60
+ "trace_id": "trace-123",
61
+ "prompt_token_count": 10,
62
+ "completion_token_count": 20,
63
+ "annotations": [
64
+ {
65
+ "name": "quality",
66
+ "annotator_kind": "HUMAN",
67
+ "label": "good",
68
+ "score": 0.9,
69
+ "explanation": "Output matches expected format",
70
+ "trace_id": "trace-456",
71
+ "error": null,
72
+ "metadata": {},
73
+ "start_time": "YYYY-MM-DDTHH:mm:ssZ",
74
+ "end_time": "YYYY-MM-DDTHH:mm:ssZ"
75
+ }
76
+ ]
77
+ }
78
+ ]
79
+ }`;
2
80
  export const initializeExperimentTools = ({ client, server, }) => {
3
- server.tool("list-experiments-for-dataset", "Get a list of all the experiments for a given dataset", {
81
+ server.tool("list-experiments-for-dataset", LIST_EXPERIMENTS_DESCRIPTION, {
4
82
  dataset_id: z.string(),
5
83
  }, async ({ dataset_id }) => {
6
84
  const response = await client.GET("/v1/datasets/{dataset_id}/experiments", {
@@ -16,7 +94,7 @@ export const initializeExperimentTools = ({ client, server, }) => {
16
94
  ],
17
95
  };
18
96
  });
19
- server.tool("get-experiment-by-id", "Get an experiment by its ID. The tool returns experiment metadata in the first content block and a JSON object with the experiment data in the second.", {
97
+ server.tool("get-experiment-by-id", GET_EXPERIMENT_DESCRIPTION, {
20
98
  experiment_id: z.string(),
21
99
  }, async ({ experiment_id }) => {
22
100
  const [experimentMetadataResponse, experimentDataResponse] = await Promise.all([
@@ -41,3 +41,20 @@ export const updatePromptSchema = z.object({
41
41
  export const deletePromptSchema = z.object({
42
42
  prompt_identifier: z.string(),
43
43
  });
44
+ export const listPromptVersionsSchema = z.object({
45
+ prompt_identifier: z.string(),
46
+ limit: z.number().min(1).max(100).default(100),
47
+ });
48
+ export const getPromptVersionByTagSchema = z.object({
49
+ prompt_identifier: z.string(),
50
+ tag_name: z.string(),
51
+ });
52
+ export const listPromptVersionTagsSchema = z.object({
53
+ prompt_version_id: z.string(),
54
+ limit: z.number().min(1).max(100).default(100),
55
+ });
56
+ export const addPromptVersionTagSchema = z.object({
57
+ prompt_version_id: z.string(),
58
+ name: z.string(),
59
+ description: z.string().optional(),
60
+ });
@@ -1,7 +1,222 @@
1
1
  import { createPrompt, promptVersion } from "@arizeai/phoenix-client/prompts";
2
- import { listPromptsSchema, getLatestPromptSchema, getPromptByIdentifierSchema, getPromptVersionSchema, createPromptSchema, } from "./promptSchemas.js";
2
+ import { listPromptsSchema, getLatestPromptSchema, getPromptByIdentifierSchema, getPromptVersionSchema, createPromptSchema, listPromptVersionsSchema, getPromptVersionByTagSchema, listPromptVersionTagsSchema, addPromptVersionTagSchema, } from "./promptSchemas.js";
3
+ // Tool descriptions as template literals for better readability
4
+ const LIST_PROMPTS_DESCRIPTION = `Get a list of all the prompts.
5
+
6
+ Prompts (templates, prompt templates) are versioned templates for input messages to an LLM.
7
+ Each prompt includes both the input messages, but also the model and invocation parameters
8
+ to use when generating outputs.
9
+
10
+ Returns a list of prompt objects with their IDs, names, and descriptions.
11
+
12
+ Example usage:
13
+ List all available prompts
14
+
15
+ Expected return:
16
+ Array of prompt objects with metadata.
17
+ Example: [{
18
+ "name": "article-summarizer",
19
+ "description": "Summarizes an article into concise bullet points",
20
+ "source_prompt_id": null,
21
+ "id": "promptid1234"
22
+ }]`;
23
+ const GET_LATEST_PROMPT_DESCRIPTION = `Get the latest version of a prompt. Returns the prompt version with its template, model configuration, and invocation parameters.
24
+
25
+ Example usage:
26
+ Get the latest version of a prompt named 'article-summarizer'
27
+
28
+ Expected return:
29
+ Prompt version object with template and configuration.
30
+ Example: {
31
+ "description": "Initial version",
32
+ "model_provider": "OPENAI",
33
+ "model_name": "gpt-3.5-turbo",
34
+ "template": {
35
+ "type": "chat",
36
+ "messages": [
37
+ {
38
+ "role": "system",
39
+ "content": "You are an expert summarizer. Create clear, concise bullet points highlighting the key information."
40
+ },
41
+ {
42
+ "role": "user",
43
+ "content": "Please summarize the following {{topic}} article:\n\n{{article}}"
44
+ }
45
+ ]
46
+ },
47
+ "template_type": "CHAT",
48
+ "template_format": "MUSTACHE",
49
+ "invocation_parameters": {
50
+ "type": "openai",
51
+ "openai": {}
52
+ },
53
+ "id": "promptversionid1234"
54
+ }`;
55
+ const GET_PROMPT_BY_IDENTIFIER_DESCRIPTION = `Get a prompt's latest version by its identifier (name or ID). Returns the prompt version with its template, model configuration, and invocation parameters.
56
+
57
+ Example usage:
58
+ Get the latest version of a prompt with name 'article-summarizer'
59
+
60
+ Expected return:
61
+ Prompt version object with template and configuration.
62
+ Example: {
63
+ "description": "Initial version",
64
+ "model_provider": "OPENAI",
65
+ "model_name": "gpt-3.5-turbo",
66
+ "template": {
67
+ "type": "chat",
68
+ "messages": [
69
+ {
70
+ "role": "system",
71
+ "content": "You are an expert summarizer. Create clear, concise bullet points highlighting the key information."
72
+ },
73
+ {
74
+ "role": "user",
75
+ "content": "Please summarize the following {{topic}} article:\n\n{{article}}"
76
+ }
77
+ ]
78
+ },
79
+ "template_type": "CHAT",
80
+ "template_format": "MUSTACHE",
81
+ "invocation_parameters": {
82
+ "type": "openai",
83
+ "openai": {}
84
+ },
85
+ "id": "promptversionid1234"
86
+ }`;
87
+ const GET_PROMPT_VERSION_DESCRIPTION = `Get a specific version of a prompt using its version ID. Returns the prompt version with its template, model configuration, and invocation parameters.
88
+
89
+ Example usage:
90
+ Get a specific prompt version with ID 'promptversionid1234'
91
+
92
+ Expected return:
93
+ Prompt version object with template and configuration.
94
+ Example: {
95
+ "description": "Initial version",
96
+ "model_provider": "OPENAI",
97
+ "model_name": "gpt-3.5-turbo",
98
+ "template": {
99
+ "type": "chat",
100
+ "messages": [
101
+ {
102
+ "role": "system",
103
+ "content": "You are an expert summarizer. Create clear, concise bullet points highlighting the key information."
104
+ },
105
+ {
106
+ "role": "user",
107
+ "content": "Please summarize the following {{topic}} article:\n\n{{article}}"
108
+ }
109
+ ]
110
+ },
111
+ "template_type": "CHAT",
112
+ "template_format": "MUSTACHE",
113
+ "invocation_parameters": {
114
+ "type": "openai",
115
+ "openai": {}
116
+ },
117
+ "id": "promptversionid1234"
118
+ }`;
119
+ const UPSERT_PROMPT_DESCRIPTION = `Create or update a prompt with its template and configuration. Creates a new prompt and its initial version with specified model settings.
120
+
121
+ Example usage:
122
+ Create a new prompt named 'email_generator' with a template for generating emails
123
+
124
+ Expected return:
125
+ A confirmation message of successful prompt creation`;
126
+ const LIST_PROMPT_VERSIONS_DESCRIPTION = `Get a list of all versions for a specific prompt. Returns versions with pagination support.
127
+
128
+ Example usage:
129
+ List all versions of a prompt named 'article-summarizer'
130
+
131
+ Expected return:
132
+ Array of prompt version objects with IDs and configuration.
133
+ Example: [
134
+ {
135
+ "description": "Initial version",
136
+ "model_provider": "OPENAI",
137
+ "model_name": "gpt-3.5-turbo",
138
+ "template": {
139
+ "type": "chat",
140
+ "messages": [
141
+ {
142
+ "role": "system",
143
+ "content": "You are an expert summarizer. Create clear, concise bullet points highlighting the key information."
144
+ },
145
+ {
146
+ "role": "user",
147
+ "content": "Please summarize the following {{topic}} article:\n\n{{article}}"
148
+ }
149
+ ]
150
+ },
151
+ "template_type": "CHAT",
152
+ "template_format": "MUSTACHE",
153
+ "invocation_parameters": {
154
+ "type": "openai",
155
+ "openai": {}
156
+ },
157
+ "id": "promptversionid1234"
158
+ }
159
+ ]`;
160
+ const GET_PROMPT_VERSION_BY_TAG_DESCRIPTION = `Get a prompt version by its tag name. Returns the prompt version with its template, model configuration, and invocation parameters.
161
+
162
+ Example usage:
163
+ Get the 'production' tagged version of prompt 'article-summarizer'
164
+
165
+ Expected return:
166
+ Prompt version object with template and configuration.
167
+ Example: {
168
+ "description": "Initial version",
169
+ "model_provider": "OPENAI",
170
+ "model_name": "gpt-3.5-turbo",
171
+ "template": {
172
+ "type": "chat",
173
+ "messages": [
174
+ {
175
+ "role": "system",
176
+ "content": "You are an expert summarizer. Create clear, concise bullet points highlighting the key information."
177
+ },
178
+ {
179
+ "role": "user",
180
+ "content": "Please summarize the following {{topic}} article:\n\n{{article}}"
181
+ }
182
+ ]
183
+ },
184
+ "template_type": "CHAT",
185
+ "template_format": "MUSTACHE",
186
+ "invocation_parameters": {
187
+ "type": "openai",
188
+ "openai": {}
189
+ },
190
+ "id": "promptversionid1234"
191
+ }`;
192
+ const LIST_PROMPT_VERSION_TAGS_DESCRIPTION = `Get a list of all tags for a specific prompt version. Returns tag objects with pagination support.
193
+
194
+ Example usage:
195
+ List all tags associated with prompt version 'promptversionid1234'
196
+
197
+ Expected return:
198
+ Array of tag objects with names and IDs.
199
+ Example: [
200
+ {
201
+ "name": "staging",
202
+ "description": "The version deployed to staging",
203
+ "id": "promptversionid1234"
204
+ },
205
+ {
206
+ "name": "development",
207
+ "description": "The version deployed for development",
208
+ "id": "promptversionid1234"
209
+ }
210
+ ]`;
211
+ const ADD_PROMPT_VERSION_TAG_DESCRIPTION = `Add a tag to a specific prompt version. The operation returns no content on success (204 status code).
212
+
213
+ Example usage:
214
+ Tag prompt version 'promptversionid1234' with the name 'production'
215
+
216
+ Expected return:
217
+ Confirmation message of successful tag addition`;
3
218
  export const initializePromptTools = ({ client, server, }) => {
4
- server.tool("list-prompts", "Get a list of all the prompts", listPromptsSchema.shape, async ({ limit }) => {
219
+ server.tool("list-prompts", LIST_PROMPTS_DESCRIPTION, listPromptsSchema.shape, async ({ limit }) => {
5
220
  const response = await client.GET("/v1/prompts", {
6
221
  params: {
7
222
  query: {
@@ -18,7 +233,7 @@ export const initializePromptTools = ({ client, server, }) => {
18
233
  ],
19
234
  };
20
235
  });
21
- server.tool("get-latest-prompt", "Get the latest prompt", getLatestPromptSchema.shape, async ({ prompt_identifier }) => {
236
+ server.tool("get-latest-prompt", GET_LATEST_PROMPT_DESCRIPTION, getLatestPromptSchema.shape, async ({ prompt_identifier }) => {
22
237
  const response = await client.GET("/v1/prompts/{prompt_identifier}/latest", {
23
238
  params: {
24
239
  path: {
@@ -35,7 +250,7 @@ export const initializePromptTools = ({ client, server, }) => {
35
250
  ],
36
251
  };
37
252
  });
38
- server.tool("get-prompt-by-identifier", "Get a prompt's latest version by its identifier", getPromptByIdentifierSchema.shape, async ({ prompt_identifier }) => {
253
+ server.tool("get-prompt-by-identifier", GET_PROMPT_BY_IDENTIFIER_DESCRIPTION, getPromptByIdentifierSchema.shape, async ({ prompt_identifier }) => {
39
254
  const response = await client.GET("/v1/prompts/{prompt_identifier}/latest", {
40
255
  params: {
41
256
  path: {
@@ -52,7 +267,7 @@ export const initializePromptTools = ({ client, server, }) => {
52
267
  ],
53
268
  };
54
269
  });
55
- server.tool("get-prompt-version", "Get a specific version of a prompt given a prompt version id", getPromptVersionSchema.shape, async ({ prompt_version_id }) => {
270
+ server.tool("get-prompt-version", GET_PROMPT_VERSION_DESCRIPTION, getPromptVersionSchema.shape, async ({ prompt_version_id }) => {
56
271
  const response = await client.GET("/v1/prompt_versions/{prompt_version_id}", {
57
272
  params: {
58
273
  path: {
@@ -69,7 +284,7 @@ export const initializePromptTools = ({ client, server, }) => {
69
284
  ],
70
285
  };
71
286
  });
72
- server.tool("upsert-prompt", "Create or update a prompt", createPromptSchema.shape, async ({ name, description, template, model_provider, model_name, temperature, }) => {
287
+ server.tool("upsert-prompt", UPSERT_PROMPT_DESCRIPTION, createPromptSchema.shape, async ({ name, description, template, model_provider, model_name, temperature, }) => {
73
288
  let promptVersionData;
74
289
  switch (model_provider) {
75
290
  case "OPENAI":
@@ -153,27 +368,83 @@ export const initializePromptTools = ({ client, server, }) => {
153
368
  ],
154
369
  };
155
370
  });
371
+ server.tool("list-prompt-versions", LIST_PROMPT_VERSIONS_DESCRIPTION, listPromptVersionsSchema.shape, async ({ prompt_identifier, limit }) => {
372
+ const response = await client.GET("/v1/prompts/{prompt_identifier}/versions", {
373
+ params: {
374
+ path: {
375
+ prompt_identifier,
376
+ },
377
+ query: {
378
+ limit,
379
+ },
380
+ },
381
+ });
382
+ return {
383
+ content: [
384
+ {
385
+ type: "text",
386
+ text: JSON.stringify(response.data, null, 2),
387
+ },
388
+ ],
389
+ };
390
+ });
391
+ server.tool("get-prompt-version-by-tag", GET_PROMPT_VERSION_BY_TAG_DESCRIPTION, getPromptVersionByTagSchema.shape, async ({ prompt_identifier, tag_name }) => {
392
+ const response = await client.GET("/v1/prompts/{prompt_identifier}/tags/{tag_name}", {
393
+ params: {
394
+ path: {
395
+ prompt_identifier,
396
+ tag_name,
397
+ },
398
+ },
399
+ });
400
+ return {
401
+ content: [
402
+ {
403
+ type: "text",
404
+ text: JSON.stringify(response.data, null, 2),
405
+ },
406
+ ],
407
+ };
408
+ });
409
+ server.tool("list-prompt-version-tags", LIST_PROMPT_VERSION_TAGS_DESCRIPTION, listPromptVersionTagsSchema.shape, async ({ prompt_version_id, limit }) => {
410
+ const response = await client.GET("/v1/prompt_versions/{prompt_version_id}/tags", {
411
+ params: {
412
+ path: {
413
+ prompt_version_id,
414
+ },
415
+ query: {
416
+ limit,
417
+ },
418
+ },
419
+ });
420
+ return {
421
+ content: [
422
+ {
423
+ type: "text",
424
+ text: JSON.stringify(response.data, null, 2),
425
+ },
426
+ ],
427
+ };
428
+ });
429
+ server.tool("add-prompt-version-tag", ADD_PROMPT_VERSION_TAG_DESCRIPTION, addPromptVersionTagSchema.shape, async ({ prompt_version_id, name, description }) => {
430
+ await client.POST("/v1/prompt_versions/{prompt_version_id}/tags", {
431
+ params: {
432
+ path: {
433
+ prompt_version_id,
434
+ },
435
+ },
436
+ body: {
437
+ name,
438
+ description,
439
+ },
440
+ });
441
+ return {
442
+ content: [
443
+ {
444
+ type: "text",
445
+ text: `Successfully added tag "${name}" to prompt version ${prompt_version_id}`,
446
+ },
447
+ ],
448
+ };
449
+ });
156
450
  };
157
- // server.tool(
158
- // "delete-prompt",
159
- // "Delete a prompt",
160
- // deletePromptSchema.shape,
161
- // async ({ prompt_identifier }) => {
162
- // const response = await client.DELETE("/v1/prompts/{prompt_identifier}", {
163
- // params: {
164
- // path: {
165
- // prompt_identifier,
166
- // },
167
- // },
168
- // });
169
- // return {
170
- // content: [
171
- // {
172
- // type: "text",
173
- // text: "Prompt deleted successfully",
174
- // },
175
- // ],
176
- // };
177
- // }
178
- // );
179
- // };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@arizeai/phoenix-mcp",
3
- "version": "2.1.0",
3
+ "version": "2.1.2",
4
4
  "description": "A MCP server for Arize Phoenix",
5
5
  "bin": {
6
6
  "@arizeai/phoenix-mcp": "./build/index.js"