arize-phoenix 4.12.0rc1__py3-none-any.whl → 4.12.1rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of arize-phoenix might be problematic. Click here for more details.
- {arize_phoenix-4.12.0rc1.dist-info → arize_phoenix-4.12.1rc1.dist-info}/METADATA +4 -3
- {arize_phoenix-4.12.0rc1.dist-info → arize_phoenix-4.12.1rc1.dist-info}/RECORD +24 -23
- phoenix/server/api/context.py +3 -7
- phoenix/server/api/openapi/main.py +18 -2
- phoenix/server/api/openapi/schema.py +12 -12
- phoenix/server/api/routers/v1/__init__.py +36 -83
- phoenix/server/api/routers/v1/dataset_examples.py +102 -123
- phoenix/server/api/routers/v1/datasets.py +390 -506
- phoenix/server/api/routers/v1/evaluations.py +73 -66
- phoenix/server/api/routers/v1/experiment_evaluations.py +68 -91
- phoenix/server/api/routers/v1/experiment_runs.py +98 -155
- phoenix/server/api/routers/v1/experiments.py +132 -181
- phoenix/server/api/routers/v1/pydantic_compat.py +78 -0
- phoenix/server/api/routers/v1/spans.py +144 -173
- phoenix/server/api/routers/v1/traces.py +115 -128
- phoenix/server/api/routers/v1/utils.py +95 -0
- phoenix/server/app.py +154 -183
- phoenix/server/templates/index.html +51 -43
- phoenix/server/thread_server.py +2 -2
- phoenix/session/client.py +3 -2
- phoenix/version.py +1 -1
- phoenix/server/openapi/docs.py +0 -221
- {arize_phoenix-4.12.0rc1.dist-info → arize_phoenix-4.12.1rc1.dist-info}/WHEEL +0 -0
- {arize_phoenix-4.12.0rc1.dist-info → arize_phoenix-4.12.1rc1.dist-info}/licenses/IP_NOTICE +0 -0
- {arize_phoenix-4.12.0rc1.dist-info → arize_phoenix-4.12.1rc1.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
import gzip
|
|
2
2
|
from itertools import chain
|
|
3
|
-
from typing import AsyncContextManager, Callable, Iterator, Tuple
|
|
3
|
+
from typing import AsyncContextManager, Callable, Iterator, Optional, Tuple
|
|
4
4
|
|
|
5
5
|
import pandas as pd
|
|
6
6
|
import pyarrow as pa
|
|
7
|
+
from fastapi import APIRouter, Header, HTTPException, Query
|
|
7
8
|
from google.protobuf.message import DecodeError
|
|
8
9
|
from pandas import DataFrame
|
|
9
10
|
from sqlalchemy import select
|
|
@@ -16,7 +17,7 @@ from starlette.datastructures import State
|
|
|
16
17
|
from starlette.requests import Request
|
|
17
18
|
from starlette.responses import Response, StreamingResponse
|
|
18
19
|
from starlette.status import (
|
|
19
|
-
|
|
20
|
+
HTTP_204_NO_CONTENT,
|
|
20
21
|
HTTP_404_NOT_FOUND,
|
|
21
22
|
HTTP_415_UNSUPPORTED_MEDIA_TYPE,
|
|
22
23
|
HTTP_422_UNPROCESSABLE_ENTITY,
|
|
@@ -36,86 +37,92 @@ from phoenix.trace.span_evaluations import (
|
|
|
36
37
|
TraceEvaluations,
|
|
37
38
|
)
|
|
38
39
|
|
|
40
|
+
from .utils import add_errors_to_responses
|
|
41
|
+
|
|
39
42
|
EvaluationName: TypeAlias = str
|
|
40
43
|
|
|
44
|
+
router = APIRouter(tags=["traces"], include_in_schema=False)
|
|
41
45
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
46
|
+
|
|
47
|
+
@router.post(
|
|
48
|
+
"/evaluations",
|
|
49
|
+
operation_id="addEvaluations",
|
|
50
|
+
summary="Add span, trace, or document evaluations",
|
|
51
|
+
status_code=HTTP_204_NO_CONTENT,
|
|
52
|
+
responses=add_errors_to_responses(
|
|
53
|
+
[
|
|
54
|
+
{
|
|
55
|
+
"status_code": HTTP_415_UNSUPPORTED_MEDIA_TYPE,
|
|
56
|
+
"description": (
|
|
57
|
+
"Unsupported content type, "
|
|
58
|
+
"only gzipped protobuf and pandas-arrow are supported"
|
|
59
|
+
),
|
|
60
|
+
},
|
|
61
|
+
HTTP_422_UNPROCESSABLE_ENTITY,
|
|
62
|
+
]
|
|
63
|
+
),
|
|
64
|
+
openapi_extra={
|
|
65
|
+
"requestBody": {
|
|
66
|
+
"required": True,
|
|
67
|
+
"content": {
|
|
68
|
+
"application/x-protobuf": {"schema": {"type": "string", "format": "binary"}},
|
|
69
|
+
"application/x-pandas-arrow": {"schema": {"type": "string", "format": "binary"}},
|
|
70
|
+
},
|
|
71
|
+
},
|
|
72
|
+
},
|
|
73
|
+
)
|
|
74
|
+
async def post_evaluations(
|
|
75
|
+
request: Request,
|
|
76
|
+
content_type: Optional[str] = Header(default=None),
|
|
77
|
+
content_encoding: Optional[str] = Header(default=None),
|
|
78
|
+
) -> Response:
|
|
72
79
|
if content_type == "application/x-pandas-arrow":
|
|
73
80
|
return await _process_pyarrow(request)
|
|
74
81
|
if content_type != "application/x-protobuf":
|
|
75
|
-
|
|
82
|
+
raise HTTPException(
|
|
83
|
+
detail="Unsupported content type", status_code=HTTP_415_UNSUPPORTED_MEDIA_TYPE
|
|
84
|
+
)
|
|
76
85
|
body = await request.body()
|
|
77
|
-
content_encoding = request.headers.get("content-encoding")
|
|
78
86
|
if content_encoding == "gzip":
|
|
79
87
|
body = gzip.decompress(body)
|
|
80
88
|
elif content_encoding:
|
|
81
|
-
|
|
89
|
+
raise HTTPException(
|
|
90
|
+
detail="Unsupported content encoding", status_code=HTTP_415_UNSUPPORTED_MEDIA_TYPE
|
|
91
|
+
)
|
|
82
92
|
evaluation = pb.Evaluation()
|
|
83
93
|
try:
|
|
84
94
|
evaluation.ParseFromString(body)
|
|
85
95
|
except DecodeError:
|
|
86
|
-
|
|
96
|
+
raise HTTPException(
|
|
97
|
+
detail="Request body is invalid", status_code=HTTP_422_UNPROCESSABLE_ENTITY
|
|
98
|
+
)
|
|
87
99
|
if not evaluation.name.strip():
|
|
88
|
-
|
|
89
|
-
"Evaluation name must not be blank/empty",
|
|
100
|
+
raise HTTPException(
|
|
101
|
+
detail="Evaluation name must not be blank/empty",
|
|
90
102
|
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
|
|
91
103
|
)
|
|
92
104
|
await request.state.queue_evaluation_for_bulk_insert(evaluation)
|
|
93
105
|
return Response()
|
|
94
106
|
|
|
95
107
|
|
|
96
|
-
|
|
97
|
-
""
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
403:
|
|
113
|
-
description: Forbidden
|
|
114
|
-
404:
|
|
115
|
-
description: Not found
|
|
116
|
-
"""
|
|
108
|
+
@router.get(
|
|
109
|
+
"/evaluations",
|
|
110
|
+
operation_id="getEvaluations",
|
|
111
|
+
summary="Get span, trace, or document evaluations from a project",
|
|
112
|
+
responses=add_errors_to_responses([HTTP_404_NOT_FOUND]),
|
|
113
|
+
)
|
|
114
|
+
async def get_evaluations(
|
|
115
|
+
request: Request,
|
|
116
|
+
project_name: Optional[str] = Query(
|
|
117
|
+
default=None,
|
|
118
|
+
description=(
|
|
119
|
+
"The name of the project to get evaluations from (if omitted, "
|
|
120
|
+
f"evaluations will be drawn from the `{DEFAULT_PROJECT_NAME}` project)"
|
|
121
|
+
),
|
|
122
|
+
),
|
|
123
|
+
) -> Response:
|
|
117
124
|
project_name = (
|
|
118
|
-
|
|
125
|
+
project_name
|
|
119
126
|
or request.query_params.get("project-name") # for backward compatibility
|
|
120
127
|
or request.headers.get("project-name") # read from headers for backwards compatibility
|
|
121
128
|
or DEFAULT_PROJECT_NAME
|
|
@@ -169,20 +176,20 @@ async def _process_pyarrow(request: Request) -> Response:
|
|
|
169
176
|
try:
|
|
170
177
|
reader = pa.ipc.open_stream(body)
|
|
171
178
|
except pa.ArrowInvalid:
|
|
172
|
-
|
|
173
|
-
|
|
179
|
+
raise HTTPException(
|
|
180
|
+
detail="Request body is not valid pyarrow",
|
|
174
181
|
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
|
|
175
182
|
)
|
|
176
183
|
try:
|
|
177
184
|
evaluations = Evaluations.from_pyarrow_reader(reader)
|
|
178
185
|
except Exception as e:
|
|
179
186
|
if isinstance(e, PhoenixEvaluationNameIsMissing):
|
|
180
|
-
|
|
181
|
-
"Evaluation name must not be blank/empty",
|
|
187
|
+
raise HTTPException(
|
|
188
|
+
detail="Evaluation name must not be blank/empty",
|
|
182
189
|
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
|
|
183
190
|
)
|
|
184
|
-
|
|
185
|
-
|
|
191
|
+
raise HTTPException(
|
|
192
|
+
detail="Invalid data in request body",
|
|
186
193
|
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
|
|
187
194
|
)
|
|
188
195
|
return Response(background=BackgroundTask(_add_evaluations, request.state, evaluations))
|
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
from datetime import datetime
|
|
2
|
+
from typing import Any, Dict, Literal, Optional
|
|
2
3
|
|
|
4
|
+
from fastapi import APIRouter, HTTPException
|
|
5
|
+
from pydantic import Field
|
|
3
6
|
from starlette.requests import Request
|
|
4
|
-
from starlette.responses import JSONResponse, Response
|
|
5
7
|
from starlette.status import HTTP_404_NOT_FOUND
|
|
6
8
|
from strawberry.relay import GlobalID
|
|
7
9
|
|
|
@@ -10,103 +12,76 @@ from phoenix.db.helpers import SupportedSQLDialect
|
|
|
10
12
|
from phoenix.db.insertion.helpers import insert_on_conflict
|
|
11
13
|
from phoenix.server.api.types.node import from_global_id_with_expected_type
|
|
12
14
|
|
|
15
|
+
from .pydantic_compat import V1RoutesBaseModel
|
|
16
|
+
from .utils import ResponseBody, add_errors_to_responses
|
|
13
17
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
type: string
|
|
65
|
-
format: date-time
|
|
66
|
-
description: The end time of the evaluation in ISO format
|
|
67
|
-
trace_id:
|
|
68
|
-
type: string
|
|
69
|
-
description: Optional trace ID for tracking
|
|
70
|
-
required:
|
|
71
|
-
- experiment_run_id
|
|
72
|
-
- name
|
|
73
|
-
- annotator_kind
|
|
74
|
-
- start_time
|
|
75
|
-
- end_time
|
|
76
|
-
responses:
|
|
77
|
-
200:
|
|
78
|
-
description: Experiment evaluation upserted successfully
|
|
79
|
-
content:
|
|
80
|
-
application/json:
|
|
81
|
-
schema:
|
|
82
|
-
type: object
|
|
83
|
-
properties:
|
|
84
|
-
data:
|
|
85
|
-
type: object
|
|
86
|
-
properties:
|
|
87
|
-
id:
|
|
88
|
-
type: string
|
|
89
|
-
description: The ID of the upserted experiment evaluation
|
|
90
|
-
404:
|
|
91
|
-
description: ExperimentRun not found
|
|
92
|
-
"""
|
|
18
|
+
router = APIRouter(tags=["experiments"], include_in_schema=False)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ExperimentEvaluationResult(V1RoutesBaseModel):
|
|
22
|
+
label: Optional[str] = Field(default=None, description="The label assigned by the evaluation")
|
|
23
|
+
score: Optional[float] = Field(default=None, description="The score assigned by the evaluation")
|
|
24
|
+
explanation: Optional[str] = Field(
|
|
25
|
+
default=None, description="Explanation of the evaluation result"
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class UpsertExperimentEvaluationRequestBody(V1RoutesBaseModel):
|
|
30
|
+
experiment_run_id: str = Field(description="The ID of the experiment run being evaluated")
|
|
31
|
+
name: str = Field(description="The name of the evaluation")
|
|
32
|
+
annotator_kind: Literal["LLM", "CODE", "HUMAN"] = Field(
|
|
33
|
+
description="The kind of annotator used for the evaluation"
|
|
34
|
+
)
|
|
35
|
+
start_time: datetime = Field(description="The start time of the evaluation in ISO format")
|
|
36
|
+
end_time: datetime = Field(description="The end time of the evaluation in ISO format")
|
|
37
|
+
result: ExperimentEvaluationResult = Field(description="The result of the evaluation")
|
|
38
|
+
error: Optional[str] = Field(
|
|
39
|
+
None, description="Optional error message if the evaluation encountered an error"
|
|
40
|
+
)
|
|
41
|
+
metadata: Optional[Dict[str, Any]] = Field(
|
|
42
|
+
default=None, description="Metadata for the evaluation"
|
|
43
|
+
)
|
|
44
|
+
trace_id: Optional[str] = Field(default=None, description="Optional trace ID for tracking")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class UpsertExperimentEvaluationResponseBodyData(V1RoutesBaseModel):
|
|
48
|
+
id: str = Field(description="The ID of the upserted experiment evaluation")
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class UpsertExperimentEvaluationResponseBody(
|
|
52
|
+
ResponseBody[UpsertExperimentEvaluationResponseBodyData]
|
|
53
|
+
):
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@router.post(
|
|
58
|
+
"/experiment_evaluations",
|
|
59
|
+
operation_id="upsertExperimentEvaluation",
|
|
60
|
+
summary="Create or update evaluation for an experiment run",
|
|
61
|
+
responses=add_errors_to_responses(
|
|
62
|
+
[{"status_code": HTTP_404_NOT_FOUND, "description": "Experiment run not found"}]
|
|
63
|
+
),
|
|
64
|
+
)
|
|
65
|
+
async def upsert_experiment_evaluation(
|
|
66
|
+
request: Request, request_body: UpsertExperimentEvaluationRequestBody
|
|
67
|
+
) -> UpsertExperimentEvaluationResponseBody:
|
|
93
68
|
payload = await request.json()
|
|
94
69
|
experiment_run_gid = GlobalID.from_id(payload["experiment_run_id"])
|
|
95
70
|
try:
|
|
96
71
|
experiment_run_id = from_global_id_with_expected_type(experiment_run_gid, "ExperimentRun")
|
|
97
72
|
except ValueError:
|
|
98
|
-
|
|
99
|
-
|
|
73
|
+
raise HTTPException(
|
|
74
|
+
detail=f"ExperimentRun with ID {experiment_run_gid} does not exist",
|
|
100
75
|
status_code=HTTP_404_NOT_FOUND,
|
|
101
76
|
)
|
|
102
|
-
name =
|
|
103
|
-
annotator_kind =
|
|
104
|
-
result =
|
|
105
|
-
label = result.
|
|
106
|
-
score = result.
|
|
107
|
-
explanation = result.
|
|
108
|
-
error =
|
|
109
|
-
metadata =
|
|
77
|
+
name = request_body.name
|
|
78
|
+
annotator_kind = request_body.annotator_kind
|
|
79
|
+
result = request_body.result
|
|
80
|
+
label = result.label if result else None
|
|
81
|
+
score = result.score if result else None
|
|
82
|
+
explanation = result.explanation if result else None
|
|
83
|
+
error = request_body.error
|
|
84
|
+
metadata = request_body.metadata or {}
|
|
110
85
|
start_time = payload["start_time"]
|
|
111
86
|
end_time = payload["end_time"]
|
|
112
87
|
async with request.app.state.db() as session:
|
|
@@ -133,4 +108,6 @@ async def upsert_experiment_evaluation(request: Request) -> Response:
|
|
|
133
108
|
).returning(models.ExperimentRunAnnotation)
|
|
134
109
|
)
|
|
135
110
|
evaluation_gid = GlobalID("ExperimentEvaluation", str(exp_eval_run.id))
|
|
136
|
-
return
|
|
111
|
+
return UpsertExperimentEvaluationResponseBody(
|
|
112
|
+
data=UpsertExperimentEvaluationResponseBodyData(id=str(evaluation_gid))
|
|
113
|
+
)
|