arize-phoenix 3.16.0__py3-none-any.whl → 7.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of arize-phoenix might be problematic. Click here for more details.
- arize_phoenix-7.7.0.dist-info/METADATA +261 -0
- arize_phoenix-7.7.0.dist-info/RECORD +345 -0
- {arize_phoenix-3.16.0.dist-info → arize_phoenix-7.7.0.dist-info}/WHEEL +1 -1
- arize_phoenix-7.7.0.dist-info/entry_points.txt +3 -0
- phoenix/__init__.py +86 -14
- phoenix/auth.py +309 -0
- phoenix/config.py +675 -45
- phoenix/core/model.py +32 -30
- phoenix/core/model_schema.py +102 -109
- phoenix/core/model_schema_adapter.py +48 -45
- phoenix/datetime_utils.py +24 -3
- phoenix/db/README.md +54 -0
- phoenix/db/__init__.py +4 -0
- phoenix/db/alembic.ini +85 -0
- phoenix/db/bulk_inserter.py +294 -0
- phoenix/db/engines.py +208 -0
- phoenix/db/enums.py +20 -0
- phoenix/db/facilitator.py +113 -0
- phoenix/db/helpers.py +159 -0
- phoenix/db/insertion/constants.py +2 -0
- phoenix/db/insertion/dataset.py +227 -0
- phoenix/db/insertion/document_annotation.py +171 -0
- phoenix/db/insertion/evaluation.py +191 -0
- phoenix/db/insertion/helpers.py +98 -0
- phoenix/db/insertion/span.py +193 -0
- phoenix/db/insertion/span_annotation.py +158 -0
- phoenix/db/insertion/trace_annotation.py +158 -0
- phoenix/db/insertion/types.py +256 -0
- phoenix/db/migrate.py +86 -0
- phoenix/db/migrations/data_migration_scripts/populate_project_sessions.py +199 -0
- phoenix/db/migrations/env.py +114 -0
- phoenix/db/migrations/script.py.mako +26 -0
- phoenix/db/migrations/versions/10460e46d750_datasets.py +317 -0
- phoenix/db/migrations/versions/3be8647b87d8_add_token_columns_to_spans_table.py +126 -0
- phoenix/db/migrations/versions/4ded9e43755f_create_project_sessions_table.py +66 -0
- phoenix/db/migrations/versions/cd164e83824f_users_and_tokens.py +157 -0
- phoenix/db/migrations/versions/cf03bd6bae1d_init.py +280 -0
- phoenix/db/models.py +807 -0
- phoenix/exceptions.py +5 -1
- phoenix/experiments/__init__.py +6 -0
- phoenix/experiments/evaluators/__init__.py +29 -0
- phoenix/experiments/evaluators/base.py +158 -0
- phoenix/experiments/evaluators/code_evaluators.py +184 -0
- phoenix/experiments/evaluators/llm_evaluators.py +473 -0
- phoenix/experiments/evaluators/utils.py +236 -0
- phoenix/experiments/functions.py +772 -0
- phoenix/experiments/tracing.py +86 -0
- phoenix/experiments/types.py +726 -0
- phoenix/experiments/utils.py +25 -0
- phoenix/inferences/__init__.py +0 -0
- phoenix/{datasets → inferences}/errors.py +6 -5
- phoenix/{datasets → inferences}/fixtures.py +49 -42
- phoenix/{datasets/dataset.py → inferences/inferences.py} +121 -105
- phoenix/{datasets → inferences}/schema.py +11 -11
- phoenix/{datasets → inferences}/validation.py +13 -14
- phoenix/logging/__init__.py +3 -0
- phoenix/logging/_config.py +90 -0
- phoenix/logging/_filter.py +6 -0
- phoenix/logging/_formatter.py +69 -0
- phoenix/metrics/__init__.py +5 -4
- phoenix/metrics/binning.py +4 -3
- phoenix/metrics/metrics.py +2 -1
- phoenix/metrics/mixins.py +7 -6
- phoenix/metrics/retrieval_metrics.py +2 -1
- phoenix/metrics/timeseries.py +5 -4
- phoenix/metrics/wrappers.py +9 -3
- phoenix/pointcloud/clustering.py +5 -5
- phoenix/pointcloud/pointcloud.py +7 -5
- phoenix/pointcloud/projectors.py +5 -6
- phoenix/pointcloud/umap_parameters.py +53 -52
- phoenix/server/api/README.md +28 -0
- phoenix/server/api/auth.py +44 -0
- phoenix/server/api/context.py +152 -9
- phoenix/server/api/dataloaders/__init__.py +91 -0
- phoenix/server/api/dataloaders/annotation_summaries.py +139 -0
- phoenix/server/api/dataloaders/average_experiment_run_latency.py +54 -0
- phoenix/server/api/dataloaders/cache/__init__.py +3 -0
- phoenix/server/api/dataloaders/cache/two_tier_cache.py +68 -0
- phoenix/server/api/dataloaders/dataset_example_revisions.py +131 -0
- phoenix/server/api/dataloaders/dataset_example_spans.py +38 -0
- phoenix/server/api/dataloaders/document_evaluation_summaries.py +144 -0
- phoenix/server/api/dataloaders/document_evaluations.py +31 -0
- phoenix/server/api/dataloaders/document_retrieval_metrics.py +89 -0
- phoenix/server/api/dataloaders/experiment_annotation_summaries.py +79 -0
- phoenix/server/api/dataloaders/experiment_error_rates.py +58 -0
- phoenix/server/api/dataloaders/experiment_run_annotations.py +36 -0
- phoenix/server/api/dataloaders/experiment_run_counts.py +49 -0
- phoenix/server/api/dataloaders/experiment_sequence_number.py +44 -0
- phoenix/server/api/dataloaders/latency_ms_quantile.py +188 -0
- phoenix/server/api/dataloaders/min_start_or_max_end_times.py +85 -0
- phoenix/server/api/dataloaders/project_by_name.py +31 -0
- phoenix/server/api/dataloaders/record_counts.py +116 -0
- phoenix/server/api/dataloaders/session_io.py +79 -0
- phoenix/server/api/dataloaders/session_num_traces.py +30 -0
- phoenix/server/api/dataloaders/session_num_traces_with_error.py +32 -0
- phoenix/server/api/dataloaders/session_token_usages.py +41 -0
- phoenix/server/api/dataloaders/session_trace_latency_ms_quantile.py +55 -0
- phoenix/server/api/dataloaders/span_annotations.py +26 -0
- phoenix/server/api/dataloaders/span_dataset_examples.py +31 -0
- phoenix/server/api/dataloaders/span_descendants.py +57 -0
- phoenix/server/api/dataloaders/span_projects.py +33 -0
- phoenix/server/api/dataloaders/token_counts.py +124 -0
- phoenix/server/api/dataloaders/trace_by_trace_ids.py +25 -0
- phoenix/server/api/dataloaders/trace_root_spans.py +32 -0
- phoenix/server/api/dataloaders/user_roles.py +30 -0
- phoenix/server/api/dataloaders/users.py +33 -0
- phoenix/server/api/exceptions.py +48 -0
- phoenix/server/api/helpers/__init__.py +12 -0
- phoenix/server/api/helpers/dataset_helpers.py +217 -0
- phoenix/server/api/helpers/experiment_run_filters.py +763 -0
- phoenix/server/api/helpers/playground_clients.py +948 -0
- phoenix/server/api/helpers/playground_registry.py +70 -0
- phoenix/server/api/helpers/playground_spans.py +455 -0
- phoenix/server/api/input_types/AddExamplesToDatasetInput.py +16 -0
- phoenix/server/api/input_types/AddSpansToDatasetInput.py +14 -0
- phoenix/server/api/input_types/ChatCompletionInput.py +38 -0
- phoenix/server/api/input_types/ChatCompletionMessageInput.py +24 -0
- phoenix/server/api/input_types/ClearProjectInput.py +15 -0
- phoenix/server/api/input_types/ClusterInput.py +2 -2
- phoenix/server/api/input_types/CreateDatasetInput.py +12 -0
- phoenix/server/api/input_types/CreateSpanAnnotationInput.py +18 -0
- phoenix/server/api/input_types/CreateTraceAnnotationInput.py +18 -0
- phoenix/server/api/input_types/DataQualityMetricInput.py +5 -2
- phoenix/server/api/input_types/DatasetExampleInput.py +14 -0
- phoenix/server/api/input_types/DatasetSort.py +17 -0
- phoenix/server/api/input_types/DatasetVersionSort.py +16 -0
- phoenix/server/api/input_types/DeleteAnnotationsInput.py +7 -0
- phoenix/server/api/input_types/DeleteDatasetExamplesInput.py +13 -0
- phoenix/server/api/input_types/DeleteDatasetInput.py +7 -0
- phoenix/server/api/input_types/DeleteExperimentsInput.py +7 -0
- phoenix/server/api/input_types/DimensionFilter.py +4 -4
- phoenix/server/api/input_types/GenerativeModelInput.py +17 -0
- phoenix/server/api/input_types/Granularity.py +1 -1
- phoenix/server/api/input_types/InvocationParameters.py +162 -0
- phoenix/server/api/input_types/PatchAnnotationInput.py +19 -0
- phoenix/server/api/input_types/PatchDatasetExamplesInput.py +35 -0
- phoenix/server/api/input_types/PatchDatasetInput.py +14 -0
- phoenix/server/api/input_types/PerformanceMetricInput.py +5 -2
- phoenix/server/api/input_types/ProjectSessionSort.py +29 -0
- phoenix/server/api/input_types/SpanAnnotationSort.py +17 -0
- phoenix/server/api/input_types/SpanSort.py +134 -69
- phoenix/server/api/input_types/TemplateOptions.py +10 -0
- phoenix/server/api/input_types/TraceAnnotationSort.py +17 -0
- phoenix/server/api/input_types/UserRoleInput.py +9 -0
- phoenix/server/api/mutations/__init__.py +28 -0
- phoenix/server/api/mutations/api_key_mutations.py +167 -0
- phoenix/server/api/mutations/chat_mutations.py +593 -0
- phoenix/server/api/mutations/dataset_mutations.py +591 -0
- phoenix/server/api/mutations/experiment_mutations.py +75 -0
- phoenix/server/api/{types/ExportEventsMutation.py → mutations/export_events_mutations.py} +21 -18
- phoenix/server/api/mutations/project_mutations.py +57 -0
- phoenix/server/api/mutations/span_annotations_mutations.py +128 -0
- phoenix/server/api/mutations/trace_annotations_mutations.py +127 -0
- phoenix/server/api/mutations/user_mutations.py +329 -0
- phoenix/server/api/openapi/__init__.py +0 -0
- phoenix/server/api/openapi/main.py +17 -0
- phoenix/server/api/openapi/schema.py +16 -0
- phoenix/server/api/queries.py +738 -0
- phoenix/server/api/routers/__init__.py +11 -0
- phoenix/server/api/routers/auth.py +284 -0
- phoenix/server/api/routers/embeddings.py +26 -0
- phoenix/server/api/routers/oauth2.py +488 -0
- phoenix/server/api/routers/v1/__init__.py +64 -0
- phoenix/server/api/routers/v1/datasets.py +1017 -0
- phoenix/server/api/routers/v1/evaluations.py +362 -0
- phoenix/server/api/routers/v1/experiment_evaluations.py +115 -0
- phoenix/server/api/routers/v1/experiment_runs.py +167 -0
- phoenix/server/api/routers/v1/experiments.py +308 -0
- phoenix/server/api/routers/v1/pydantic_compat.py +78 -0
- phoenix/server/api/routers/v1/spans.py +267 -0
- phoenix/server/api/routers/v1/traces.py +208 -0
- phoenix/server/api/routers/v1/utils.py +95 -0
- phoenix/server/api/schema.py +44 -247
- phoenix/server/api/subscriptions.py +597 -0
- phoenix/server/api/types/Annotation.py +21 -0
- phoenix/server/api/types/AnnotationSummary.py +55 -0
- phoenix/server/api/types/AnnotatorKind.py +16 -0
- phoenix/server/api/types/ApiKey.py +27 -0
- phoenix/server/api/types/AuthMethod.py +9 -0
- phoenix/server/api/types/ChatCompletionMessageRole.py +11 -0
- phoenix/server/api/types/ChatCompletionSubscriptionPayload.py +46 -0
- phoenix/server/api/types/Cluster.py +25 -24
- phoenix/server/api/types/CreateDatasetPayload.py +8 -0
- phoenix/server/api/types/DataQualityMetric.py +31 -13
- phoenix/server/api/types/Dataset.py +288 -63
- phoenix/server/api/types/DatasetExample.py +85 -0
- phoenix/server/api/types/DatasetExampleRevision.py +34 -0
- phoenix/server/api/types/DatasetVersion.py +14 -0
- phoenix/server/api/types/Dimension.py +32 -31
- phoenix/server/api/types/DocumentEvaluationSummary.py +9 -8
- phoenix/server/api/types/EmbeddingDimension.py +56 -49
- phoenix/server/api/types/Evaluation.py +25 -31
- phoenix/server/api/types/EvaluationSummary.py +30 -50
- phoenix/server/api/types/Event.py +20 -20
- phoenix/server/api/types/ExampleRevisionInterface.py +14 -0
- phoenix/server/api/types/Experiment.py +152 -0
- phoenix/server/api/types/ExperimentAnnotationSummary.py +13 -0
- phoenix/server/api/types/ExperimentComparison.py +17 -0
- phoenix/server/api/types/ExperimentRun.py +119 -0
- phoenix/server/api/types/ExperimentRunAnnotation.py +56 -0
- phoenix/server/api/types/GenerativeModel.py +9 -0
- phoenix/server/api/types/GenerativeProvider.py +85 -0
- phoenix/server/api/types/Inferences.py +80 -0
- phoenix/server/api/types/InferencesRole.py +23 -0
- phoenix/server/api/types/LabelFraction.py +7 -0
- phoenix/server/api/types/MimeType.py +2 -2
- phoenix/server/api/types/Model.py +54 -54
- phoenix/server/api/types/PerformanceMetric.py +8 -5
- phoenix/server/api/types/Project.py +407 -142
- phoenix/server/api/types/ProjectSession.py +139 -0
- phoenix/server/api/types/Segments.py +4 -4
- phoenix/server/api/types/Span.py +221 -176
- phoenix/server/api/types/SpanAnnotation.py +43 -0
- phoenix/server/api/types/SpanIOValue.py +15 -0
- phoenix/server/api/types/SystemApiKey.py +9 -0
- phoenix/server/api/types/TemplateLanguage.py +10 -0
- phoenix/server/api/types/TimeSeries.py +19 -15
- phoenix/server/api/types/TokenUsage.py +11 -0
- phoenix/server/api/types/Trace.py +154 -0
- phoenix/server/api/types/TraceAnnotation.py +45 -0
- phoenix/server/api/types/UMAPPoints.py +7 -7
- phoenix/server/api/types/User.py +60 -0
- phoenix/server/api/types/UserApiKey.py +45 -0
- phoenix/server/api/types/UserRole.py +15 -0
- phoenix/server/api/types/node.py +13 -107
- phoenix/server/api/types/pagination.py +156 -57
- phoenix/server/api/utils.py +34 -0
- phoenix/server/app.py +864 -115
- phoenix/server/bearer_auth.py +163 -0
- phoenix/server/dml_event.py +136 -0
- phoenix/server/dml_event_handler.py +256 -0
- phoenix/server/email/__init__.py +0 -0
- phoenix/server/email/sender.py +97 -0
- phoenix/server/email/templates/__init__.py +0 -0
- phoenix/server/email/templates/password_reset.html +19 -0
- phoenix/server/email/types.py +11 -0
- phoenix/server/grpc_server.py +102 -0
- phoenix/server/jwt_store.py +505 -0
- phoenix/server/main.py +305 -116
- phoenix/server/oauth2.py +52 -0
- phoenix/server/openapi/__init__.py +0 -0
- phoenix/server/prometheus.py +111 -0
- phoenix/server/rate_limiters.py +188 -0
- phoenix/server/static/.vite/manifest.json +87 -0
- phoenix/server/static/assets/components-Cy9nwIvF.js +2125 -0
- phoenix/server/static/assets/index-BKvHIxkk.js +113 -0
- phoenix/server/static/assets/pages-CUi2xCVQ.js +4449 -0
- phoenix/server/static/assets/vendor-DvC8cT4X.js +894 -0
- phoenix/server/static/assets/vendor-DxkFTwjz.css +1 -0
- phoenix/server/static/assets/vendor-arizeai-Do1793cv.js +662 -0
- phoenix/server/static/assets/vendor-codemirror-BzwZPyJM.js +24 -0
- phoenix/server/static/assets/vendor-recharts-_Jb7JjhG.js +59 -0
- phoenix/server/static/assets/vendor-shiki-Cl9QBraO.js +5 -0
- phoenix/server/static/assets/vendor-three-DwGkEfCM.js +2998 -0
- phoenix/server/telemetry.py +68 -0
- phoenix/server/templates/index.html +82 -23
- phoenix/server/thread_server.py +3 -3
- phoenix/server/types.py +275 -0
- phoenix/services.py +27 -18
- phoenix/session/client.py +743 -68
- phoenix/session/data_extractor.py +31 -7
- phoenix/session/evaluation.py +3 -9
- phoenix/session/session.py +263 -219
- phoenix/settings.py +22 -0
- phoenix/trace/__init__.py +2 -22
- phoenix/trace/attributes.py +338 -0
- phoenix/trace/dsl/README.md +116 -0
- phoenix/trace/dsl/filter.py +663 -213
- phoenix/trace/dsl/helpers.py +73 -21
- phoenix/trace/dsl/query.py +574 -201
- phoenix/trace/exporter.py +24 -19
- phoenix/trace/fixtures.py +368 -32
- phoenix/trace/otel.py +71 -219
- phoenix/trace/projects.py +3 -2
- phoenix/trace/schemas.py +33 -11
- phoenix/trace/span_evaluations.py +21 -16
- phoenix/trace/span_json_decoder.py +6 -4
- phoenix/trace/span_json_encoder.py +2 -2
- phoenix/trace/trace_dataset.py +47 -32
- phoenix/trace/utils.py +21 -4
- phoenix/utilities/__init__.py +0 -26
- phoenix/utilities/client.py +132 -0
- phoenix/utilities/deprecation.py +31 -0
- phoenix/utilities/error_handling.py +3 -2
- phoenix/utilities/json.py +109 -0
- phoenix/utilities/logging.py +8 -0
- phoenix/utilities/project.py +2 -2
- phoenix/utilities/re.py +49 -0
- phoenix/utilities/span_store.py +0 -23
- phoenix/utilities/template_formatters.py +99 -0
- phoenix/version.py +1 -1
- arize_phoenix-3.16.0.dist-info/METADATA +0 -495
- arize_phoenix-3.16.0.dist-info/RECORD +0 -178
- phoenix/core/project.py +0 -617
- phoenix/core/traces.py +0 -100
- phoenix/experimental/evals/__init__.py +0 -73
- phoenix/experimental/evals/evaluators.py +0 -413
- phoenix/experimental/evals/functions/__init__.py +0 -4
- phoenix/experimental/evals/functions/classify.py +0 -453
- phoenix/experimental/evals/functions/executor.py +0 -353
- phoenix/experimental/evals/functions/generate.py +0 -138
- phoenix/experimental/evals/functions/processing.py +0 -76
- phoenix/experimental/evals/models/__init__.py +0 -14
- phoenix/experimental/evals/models/anthropic.py +0 -175
- phoenix/experimental/evals/models/base.py +0 -170
- phoenix/experimental/evals/models/bedrock.py +0 -221
- phoenix/experimental/evals/models/litellm.py +0 -134
- phoenix/experimental/evals/models/openai.py +0 -448
- phoenix/experimental/evals/models/rate_limiters.py +0 -246
- phoenix/experimental/evals/models/vertex.py +0 -173
- phoenix/experimental/evals/models/vertexai.py +0 -186
- phoenix/experimental/evals/retrievals.py +0 -96
- phoenix/experimental/evals/templates/__init__.py +0 -50
- phoenix/experimental/evals/templates/default_templates.py +0 -472
- phoenix/experimental/evals/templates/template.py +0 -195
- phoenix/experimental/evals/utils/__init__.py +0 -172
- phoenix/experimental/evals/utils/threads.py +0 -27
- phoenix/server/api/helpers.py +0 -11
- phoenix/server/api/routers/evaluation_handler.py +0 -109
- phoenix/server/api/routers/span_handler.py +0 -70
- phoenix/server/api/routers/trace_handler.py +0 -60
- phoenix/server/api/types/DatasetRole.py +0 -23
- phoenix/server/static/index.css +0 -6
- phoenix/server/static/index.js +0 -7447
- phoenix/storage/span_store/__init__.py +0 -23
- phoenix/storage/span_store/text_file.py +0 -85
- phoenix/trace/dsl/missing.py +0 -60
- phoenix/trace/langchain/__init__.py +0 -3
- phoenix/trace/langchain/instrumentor.py +0 -35
- phoenix/trace/llama_index/__init__.py +0 -3
- phoenix/trace/llama_index/callback.py +0 -102
- phoenix/trace/openai/__init__.py +0 -3
- phoenix/trace/openai/instrumentor.py +0 -30
- {arize_phoenix-3.16.0.dist-info → arize_phoenix-7.7.0.dist-info}/licenses/IP_NOTICE +0 -0
- {arize_phoenix-3.16.0.dist-info → arize_phoenix-7.7.0.dist-info}/licenses/LICENSE +0 -0
- /phoenix/{datasets → db/insertion}/__init__.py +0 -0
- /phoenix/{experimental → db/migrations}/__init__.py +0 -0
- /phoenix/{storage → db/migrations/data_migration_scripts}/__init__.py +0 -0
|
@@ -0,0 +1,362 @@
|
|
|
1
|
+
import gzip
|
|
2
|
+
from collections.abc import Callable
|
|
3
|
+
from itertools import chain
|
|
4
|
+
from typing import Any, Iterator, Optional, Union, cast
|
|
5
|
+
|
|
6
|
+
import pandas as pd
|
|
7
|
+
import pyarrow as pa
|
|
8
|
+
from fastapi import APIRouter, Header, HTTPException, Query
|
|
9
|
+
from google.protobuf.message import DecodeError
|
|
10
|
+
from pandas import DataFrame
|
|
11
|
+
from sqlalchemy import select
|
|
12
|
+
from sqlalchemy.engine import Connectable
|
|
13
|
+
from starlette.background import BackgroundTask
|
|
14
|
+
from starlette.datastructures import State
|
|
15
|
+
from starlette.requests import Request
|
|
16
|
+
from starlette.responses import Response, StreamingResponse
|
|
17
|
+
from starlette.status import (
|
|
18
|
+
HTTP_204_NO_CONTENT,
|
|
19
|
+
HTTP_404_NOT_FOUND,
|
|
20
|
+
HTTP_415_UNSUPPORTED_MEDIA_TYPE,
|
|
21
|
+
HTTP_422_UNPROCESSABLE_ENTITY,
|
|
22
|
+
)
|
|
23
|
+
from typing_extensions import TypeAlias
|
|
24
|
+
|
|
25
|
+
import phoenix.trace.v1 as pb
|
|
26
|
+
from phoenix.config import DEFAULT_PROJECT_NAME
|
|
27
|
+
from phoenix.db import models
|
|
28
|
+
from phoenix.db.insertion.types import Precursors
|
|
29
|
+
from phoenix.exceptions import PhoenixEvaluationNameIsMissing
|
|
30
|
+
from phoenix.server.api.routers.utils import table_to_bytes
|
|
31
|
+
from phoenix.server.types import DbSessionFactory
|
|
32
|
+
from phoenix.trace.span_evaluations import (
|
|
33
|
+
DocumentEvaluations,
|
|
34
|
+
Evaluations,
|
|
35
|
+
SpanEvaluations,
|
|
36
|
+
TraceEvaluations,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
from .utils import add_errors_to_responses
|
|
40
|
+
|
|
41
|
+
EvaluationName: TypeAlias = str
|
|
42
|
+
|
|
43
|
+
router = APIRouter(tags=["traces"], include_in_schema=True)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@router.post(
|
|
47
|
+
"/evaluations",
|
|
48
|
+
operation_id="addEvaluations",
|
|
49
|
+
summary="Add span, trace, or document evaluations",
|
|
50
|
+
status_code=HTTP_204_NO_CONTENT,
|
|
51
|
+
responses=add_errors_to_responses(
|
|
52
|
+
[
|
|
53
|
+
{
|
|
54
|
+
"status_code": HTTP_415_UNSUPPORTED_MEDIA_TYPE,
|
|
55
|
+
"description": (
|
|
56
|
+
"Unsupported content type, "
|
|
57
|
+
"only gzipped protobuf and pandas-arrow are supported"
|
|
58
|
+
),
|
|
59
|
+
},
|
|
60
|
+
HTTP_422_UNPROCESSABLE_ENTITY,
|
|
61
|
+
]
|
|
62
|
+
),
|
|
63
|
+
openapi_extra={
|
|
64
|
+
"requestBody": {
|
|
65
|
+
"required": True,
|
|
66
|
+
"content": {
|
|
67
|
+
"application/x-protobuf": {"schema": {"type": "string", "format": "binary"}},
|
|
68
|
+
"application/x-pandas-arrow": {"schema": {"type": "string", "format": "binary"}},
|
|
69
|
+
},
|
|
70
|
+
},
|
|
71
|
+
},
|
|
72
|
+
)
|
|
73
|
+
async def post_evaluations(
|
|
74
|
+
request: Request,
|
|
75
|
+
content_type: Optional[str] = Header(default=None),
|
|
76
|
+
content_encoding: Optional[str] = Header(default=None),
|
|
77
|
+
) -> Response:
|
|
78
|
+
if content_type == "application/x-pandas-arrow":
|
|
79
|
+
return await _process_pyarrow(request)
|
|
80
|
+
if content_type != "application/x-protobuf":
|
|
81
|
+
raise HTTPException(
|
|
82
|
+
detail="Unsupported content type", status_code=HTTP_415_UNSUPPORTED_MEDIA_TYPE
|
|
83
|
+
)
|
|
84
|
+
body = await request.body()
|
|
85
|
+
if content_encoding == "gzip":
|
|
86
|
+
body = gzip.decompress(body)
|
|
87
|
+
elif content_encoding:
|
|
88
|
+
raise HTTPException(
|
|
89
|
+
detail="Unsupported content encoding", status_code=HTTP_415_UNSUPPORTED_MEDIA_TYPE
|
|
90
|
+
)
|
|
91
|
+
evaluation = pb.Evaluation()
|
|
92
|
+
try:
|
|
93
|
+
evaluation.ParseFromString(body)
|
|
94
|
+
except DecodeError:
|
|
95
|
+
raise HTTPException(
|
|
96
|
+
detail="Request body is invalid", status_code=HTTP_422_UNPROCESSABLE_ENTITY
|
|
97
|
+
)
|
|
98
|
+
if not evaluation.name.strip():
|
|
99
|
+
raise HTTPException(
|
|
100
|
+
detail="Evaluation name must not be blank/empty",
|
|
101
|
+
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
|
|
102
|
+
)
|
|
103
|
+
await request.state.queue_evaluation_for_bulk_insert(evaluation)
|
|
104
|
+
return Response()
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
@router.get(
|
|
108
|
+
"/evaluations",
|
|
109
|
+
operation_id="getEvaluations",
|
|
110
|
+
summary="Get span, trace, or document evaluations from a project",
|
|
111
|
+
responses=add_errors_to_responses([HTTP_404_NOT_FOUND]),
|
|
112
|
+
)
|
|
113
|
+
async def get_evaluations(
|
|
114
|
+
request: Request,
|
|
115
|
+
project_name: Optional[str] = Query(
|
|
116
|
+
default=None,
|
|
117
|
+
description=(
|
|
118
|
+
"The name of the project to get evaluations from (if omitted, "
|
|
119
|
+
f"evaluations will be drawn from the `{DEFAULT_PROJECT_NAME}` project)"
|
|
120
|
+
),
|
|
121
|
+
),
|
|
122
|
+
) -> Response:
|
|
123
|
+
project_name = (
|
|
124
|
+
project_name
|
|
125
|
+
or request.query_params.get("project-name") # for backward compatibility
|
|
126
|
+
or request.headers.get("project-name") # read from headers for backwards compatibility
|
|
127
|
+
or DEFAULT_PROJECT_NAME
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
db: DbSessionFactory = request.app.state.db
|
|
131
|
+
async with db() as session:
|
|
132
|
+
connection = await session.connection()
|
|
133
|
+
trace_evals_dataframe = await connection.run_sync(
|
|
134
|
+
_read_sql_trace_evaluations_into_dataframe,
|
|
135
|
+
project_name,
|
|
136
|
+
)
|
|
137
|
+
span_evals_dataframe = await connection.run_sync(
|
|
138
|
+
_read_sql_span_evaluations_into_dataframe,
|
|
139
|
+
project_name,
|
|
140
|
+
)
|
|
141
|
+
document_evals_dataframe = await connection.run_sync(
|
|
142
|
+
_read_sql_document_evaluations_into_dataframe,
|
|
143
|
+
project_name,
|
|
144
|
+
)
|
|
145
|
+
if (
|
|
146
|
+
trace_evals_dataframe.empty
|
|
147
|
+
and span_evals_dataframe.empty
|
|
148
|
+
and document_evals_dataframe.empty
|
|
149
|
+
):
|
|
150
|
+
return Response(status_code=HTTP_404_NOT_FOUND)
|
|
151
|
+
|
|
152
|
+
evals = chain(
|
|
153
|
+
map(
|
|
154
|
+
lambda args: TraceEvaluations(*args),
|
|
155
|
+
_groupby_eval_name(trace_evals_dataframe),
|
|
156
|
+
),
|
|
157
|
+
map(
|
|
158
|
+
lambda args: SpanEvaluations(*args),
|
|
159
|
+
_groupby_eval_name(span_evals_dataframe),
|
|
160
|
+
),
|
|
161
|
+
map(
|
|
162
|
+
lambda args: DocumentEvaluations(*args),
|
|
163
|
+
_groupby_eval_name(document_evals_dataframe),
|
|
164
|
+
),
|
|
165
|
+
)
|
|
166
|
+
bytestream = map(lambda evals: table_to_bytes(evals.to_pyarrow_table()), evals)
|
|
167
|
+
return StreamingResponse(
|
|
168
|
+
content=bytestream,
|
|
169
|
+
media_type="application/x-pandas-arrow",
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
async def _process_pyarrow(request: Request) -> Response:
|
|
174
|
+
body = await request.body()
|
|
175
|
+
try:
|
|
176
|
+
reader = pa.ipc.open_stream(body)
|
|
177
|
+
except pa.ArrowInvalid:
|
|
178
|
+
raise HTTPException(
|
|
179
|
+
detail="Request body is not valid pyarrow",
|
|
180
|
+
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
|
|
181
|
+
)
|
|
182
|
+
try:
|
|
183
|
+
evaluations = Evaluations.from_pyarrow_reader(reader)
|
|
184
|
+
except Exception as e:
|
|
185
|
+
if isinstance(e, PhoenixEvaluationNameIsMissing):
|
|
186
|
+
raise HTTPException(
|
|
187
|
+
detail="Evaluation name must not be blank/empty",
|
|
188
|
+
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
|
|
189
|
+
)
|
|
190
|
+
raise HTTPException(
|
|
191
|
+
detail="Invalid data in request body",
|
|
192
|
+
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
|
|
193
|
+
)
|
|
194
|
+
return Response(background=BackgroundTask(_add_evaluations, request.state, evaluations))
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
async def _add_evaluations(state: State, evaluations: Evaluations) -> None:
|
|
198
|
+
dataframe = evaluations.dataframe
|
|
199
|
+
eval_name = evaluations.eval_name
|
|
200
|
+
names = dataframe.index.names
|
|
201
|
+
if (
|
|
202
|
+
len(names) == 2
|
|
203
|
+
and "document_position" in names
|
|
204
|
+
and ("context.span_id" in names or "span_id" in names)
|
|
205
|
+
):
|
|
206
|
+
cls = _document_annotation_factory(
|
|
207
|
+
names.index("span_id") if "span_id" in names else names.index("context.span_id"),
|
|
208
|
+
names.index("document_position"),
|
|
209
|
+
)
|
|
210
|
+
for index, row in dataframe.iterrows():
|
|
211
|
+
score, label, explanation = _get_annotation_result(row)
|
|
212
|
+
document_annotation = cls(cast(Union[tuple[str, int], tuple[int, str]], index))(
|
|
213
|
+
name=eval_name,
|
|
214
|
+
annotator_kind="LLM",
|
|
215
|
+
score=score,
|
|
216
|
+
label=label,
|
|
217
|
+
explanation=explanation,
|
|
218
|
+
metadata_={},
|
|
219
|
+
)
|
|
220
|
+
await state.enqueue(document_annotation)
|
|
221
|
+
elif len(names) == 1 and names[0] in ("context.span_id", "span_id"):
|
|
222
|
+
for index, row in dataframe.iterrows():
|
|
223
|
+
score, label, explanation = _get_annotation_result(row)
|
|
224
|
+
span_annotation = _span_annotation_factory(cast(str, index))(
|
|
225
|
+
name=eval_name,
|
|
226
|
+
annotator_kind="LLM",
|
|
227
|
+
score=score,
|
|
228
|
+
label=label,
|
|
229
|
+
explanation=explanation,
|
|
230
|
+
metadata_={},
|
|
231
|
+
)
|
|
232
|
+
await state.enqueue(span_annotation)
|
|
233
|
+
elif len(names) == 1 and names[0] in ("context.trace_id", "trace_id"):
|
|
234
|
+
for index, row in dataframe.iterrows():
|
|
235
|
+
score, label, explanation = _get_annotation_result(row)
|
|
236
|
+
trace_annotation = _trace_annotation_factory(cast(str, index))(
|
|
237
|
+
name=eval_name,
|
|
238
|
+
annotator_kind="LLM",
|
|
239
|
+
score=score,
|
|
240
|
+
label=label,
|
|
241
|
+
explanation=explanation,
|
|
242
|
+
metadata_={},
|
|
243
|
+
)
|
|
244
|
+
await state.enqueue(trace_annotation)
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def _get_annotation_result(
|
|
248
|
+
row: "pd.Series[Any]",
|
|
249
|
+
) -> tuple[Optional[float], Optional[str], Optional[str]]:
|
|
250
|
+
return (
|
|
251
|
+
cast(Optional[float], row.get("score")),
|
|
252
|
+
cast(Optional[str], row.get("label")),
|
|
253
|
+
cast(Optional[str], row.get("explanation")),
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def _document_annotation_factory(
|
|
258
|
+
span_id_idx: int,
|
|
259
|
+
document_position_idx: int,
|
|
260
|
+
) -> Callable[
|
|
261
|
+
[Union[tuple[str, int], tuple[int, str]]],
|
|
262
|
+
Callable[..., Precursors.DocumentAnnotation],
|
|
263
|
+
]:
|
|
264
|
+
return lambda index: lambda **kwargs: Precursors.DocumentAnnotation(
|
|
265
|
+
span_id=str(index[span_id_idx]),
|
|
266
|
+
document_position=int(index[document_position_idx]),
|
|
267
|
+
obj=models.DocumentAnnotation(
|
|
268
|
+
document_position=int(index[document_position_idx]),
|
|
269
|
+
**kwargs,
|
|
270
|
+
),
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
def _span_annotation_factory(span_id: str) -> Callable[..., Precursors.SpanAnnotation]:
|
|
275
|
+
return lambda **kwargs: Precursors.SpanAnnotation(
|
|
276
|
+
span_id=str(span_id),
|
|
277
|
+
obj=models.SpanAnnotation(**kwargs),
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def _trace_annotation_factory(trace_id: str) -> Callable[..., Precursors.TraceAnnotation]:
|
|
282
|
+
return lambda **kwargs: Precursors.TraceAnnotation(
|
|
283
|
+
trace_id=str(trace_id),
|
|
284
|
+
obj=models.TraceAnnotation(**kwargs),
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
def _read_sql_trace_evaluations_into_dataframe(
|
|
289
|
+
connectable: Connectable,
|
|
290
|
+
project_name: str,
|
|
291
|
+
) -> DataFrame:
|
|
292
|
+
"""
|
|
293
|
+
Reads a project's trace evaluations into a pandas dataframe.
|
|
294
|
+
|
|
295
|
+
Inputs a synchronous connectable to pandas.read_sql since it does not
|
|
296
|
+
support async connectables. For more information, see:
|
|
297
|
+
|
|
298
|
+
https://stackoverflow.com/questions/70848256/how-can-i-use-pandas-read-sql-on-an-async-connection
|
|
299
|
+
"""
|
|
300
|
+
return pd.read_sql(
|
|
301
|
+
select(models.TraceAnnotation, models.Trace.trace_id)
|
|
302
|
+
.join_from(models.TraceAnnotation, models.Trace)
|
|
303
|
+
.join_from(models.Trace, models.Project)
|
|
304
|
+
.where(models.Project.name == project_name)
|
|
305
|
+
.where(models.TraceAnnotation.annotator_kind == "LLM"),
|
|
306
|
+
connectable,
|
|
307
|
+
index_col="trace_id",
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
def _read_sql_span_evaluations_into_dataframe(
|
|
312
|
+
connectable: Connectable,
|
|
313
|
+
project_name: str,
|
|
314
|
+
) -> DataFrame:
|
|
315
|
+
"""
|
|
316
|
+
Reads a project's span evaluations into a pandas dataframe.
|
|
317
|
+
|
|
318
|
+
Inputs a synchronous connectable to pandas.read_sql since it does not
|
|
319
|
+
support async connectables. For more information, see:
|
|
320
|
+
|
|
321
|
+
https://stackoverflow.com/questions/70848256/how-can-i-use-pandas-read-sql-on-an-async-connection
|
|
322
|
+
"""
|
|
323
|
+
return pd.read_sql_query(
|
|
324
|
+
select(models.SpanAnnotation, models.Span.span_id)
|
|
325
|
+
.join_from(models.SpanAnnotation, models.Span)
|
|
326
|
+
.join_from(models.Span, models.Trace)
|
|
327
|
+
.join_from(models.Trace, models.Project)
|
|
328
|
+
.where(models.Project.name == project_name)
|
|
329
|
+
.where(models.SpanAnnotation.annotator_kind == "LLM"),
|
|
330
|
+
connectable,
|
|
331
|
+
index_col="span_id",
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
def _read_sql_document_evaluations_into_dataframe(
|
|
336
|
+
connectable: Connectable,
|
|
337
|
+
project_name: str,
|
|
338
|
+
) -> DataFrame:
|
|
339
|
+
"""
|
|
340
|
+
Reads a project's document evaluations into a pandas dataframe.
|
|
341
|
+
|
|
342
|
+
Inputs a synchronous connectable to pandas.read_sql since it does not
|
|
343
|
+
support async connectables. For more information, see:
|
|
344
|
+
|
|
345
|
+
https://stackoverflow.com/questions/70848256/how-can-i-use-pandas-read-sql-on-an-async-connection
|
|
346
|
+
"""
|
|
347
|
+
return pd.read_sql(
|
|
348
|
+
select(models.DocumentAnnotation, models.Span.span_id)
|
|
349
|
+
.join_from(models.DocumentAnnotation, models.Span)
|
|
350
|
+
.join_from(models.Span, models.Trace)
|
|
351
|
+
.join_from(models.Trace, models.Project)
|
|
352
|
+
.where(models.Project.name == project_name)
|
|
353
|
+
.where(models.DocumentAnnotation.annotator_kind == "LLM"),
|
|
354
|
+
connectable,
|
|
355
|
+
).set_index(["span_id", "document_position"])
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
def _groupby_eval_name(
|
|
359
|
+
evals_dataframe: DataFrame,
|
|
360
|
+
) -> Iterator[tuple[EvaluationName, DataFrame]]:
|
|
361
|
+
for eval_name, evals_dataframe_for_name in evals_dataframe.groupby("name", as_index=False):
|
|
362
|
+
yield str(eval_name), evals_dataframe_for_name
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from typing import Any, Literal, Optional
|
|
3
|
+
|
|
4
|
+
from fastapi import APIRouter, HTTPException
|
|
5
|
+
from pydantic import Field
|
|
6
|
+
from starlette.requests import Request
|
|
7
|
+
from starlette.status import HTTP_404_NOT_FOUND
|
|
8
|
+
from strawberry.relay import GlobalID
|
|
9
|
+
|
|
10
|
+
from phoenix.db import models
|
|
11
|
+
from phoenix.db.helpers import SupportedSQLDialect
|
|
12
|
+
from phoenix.db.insertion.helpers import insert_on_conflict
|
|
13
|
+
from phoenix.server.api.types.node import from_global_id_with_expected_type
|
|
14
|
+
from phoenix.server.dml_event import ExperimentRunAnnotationInsertEvent
|
|
15
|
+
|
|
16
|
+
from .pydantic_compat import V1RoutesBaseModel
|
|
17
|
+
from .utils import ResponseBody, add_errors_to_responses
|
|
18
|
+
|
|
19
|
+
router = APIRouter(tags=["experiments"], include_in_schema=False)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class ExperimentEvaluationResult(V1RoutesBaseModel):
|
|
23
|
+
label: Optional[str] = Field(default=None, description="The label assigned by the evaluation")
|
|
24
|
+
score: Optional[float] = Field(default=None, description="The score assigned by the evaluation")
|
|
25
|
+
explanation: Optional[str] = Field(
|
|
26
|
+
default=None, description="Explanation of the evaluation result"
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class UpsertExperimentEvaluationRequestBody(V1RoutesBaseModel):
|
|
31
|
+
experiment_run_id: str = Field(description="The ID of the experiment run being evaluated")
|
|
32
|
+
name: str = Field(description="The name of the evaluation")
|
|
33
|
+
annotator_kind: Literal["LLM", "CODE", "HUMAN"] = Field(
|
|
34
|
+
description="The kind of annotator used for the evaluation"
|
|
35
|
+
)
|
|
36
|
+
start_time: datetime = Field(description="The start time of the evaluation in ISO format")
|
|
37
|
+
end_time: datetime = Field(description="The end time of the evaluation in ISO format")
|
|
38
|
+
result: ExperimentEvaluationResult = Field(description="The result of the evaluation")
|
|
39
|
+
error: Optional[str] = Field(
|
|
40
|
+
None, description="Optional error message if the evaluation encountered an error"
|
|
41
|
+
)
|
|
42
|
+
metadata: Optional[dict[str, Any]] = Field(
|
|
43
|
+
default=None, description="Metadata for the evaluation"
|
|
44
|
+
)
|
|
45
|
+
trace_id: Optional[str] = Field(default=None, description="Optional trace ID for tracking")
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class UpsertExperimentEvaluationResponseBodyData(V1RoutesBaseModel):
|
|
49
|
+
id: str = Field(description="The ID of the upserted experiment evaluation")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class UpsertExperimentEvaluationResponseBody(
|
|
53
|
+
ResponseBody[UpsertExperimentEvaluationResponseBodyData]
|
|
54
|
+
):
|
|
55
|
+
pass
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@router.post(
|
|
59
|
+
"/experiment_evaluations",
|
|
60
|
+
operation_id="upsertExperimentEvaluation",
|
|
61
|
+
summary="Create or update evaluation for an experiment run",
|
|
62
|
+
responses=add_errors_to_responses(
|
|
63
|
+
[{"status_code": HTTP_404_NOT_FOUND, "description": "Experiment run not found"}]
|
|
64
|
+
),
|
|
65
|
+
)
|
|
66
|
+
async def upsert_experiment_evaluation(
|
|
67
|
+
request: Request, request_body: UpsertExperimentEvaluationRequestBody
|
|
68
|
+
) -> UpsertExperimentEvaluationResponseBody:
|
|
69
|
+
payload = await request.json()
|
|
70
|
+
experiment_run_gid = GlobalID.from_id(payload["experiment_run_id"])
|
|
71
|
+
try:
|
|
72
|
+
experiment_run_id = from_global_id_with_expected_type(experiment_run_gid, "ExperimentRun")
|
|
73
|
+
except ValueError:
|
|
74
|
+
raise HTTPException(
|
|
75
|
+
detail=f"ExperimentRun with ID {experiment_run_gid} does not exist",
|
|
76
|
+
status_code=HTTP_404_NOT_FOUND,
|
|
77
|
+
)
|
|
78
|
+
name = request_body.name
|
|
79
|
+
annotator_kind = request_body.annotator_kind
|
|
80
|
+
result = request_body.result
|
|
81
|
+
label = result.label if result else None
|
|
82
|
+
score = result.score if result else None
|
|
83
|
+
explanation = result.explanation if result else None
|
|
84
|
+
error = request_body.error
|
|
85
|
+
metadata = request_body.metadata or {}
|
|
86
|
+
start_time = payload["start_time"]
|
|
87
|
+
end_time = payload["end_time"]
|
|
88
|
+
async with request.app.state.db() as session:
|
|
89
|
+
values = dict(
|
|
90
|
+
experiment_run_id=experiment_run_id,
|
|
91
|
+
name=name,
|
|
92
|
+
annotator_kind=annotator_kind,
|
|
93
|
+
label=label,
|
|
94
|
+
score=score,
|
|
95
|
+
explanation=explanation,
|
|
96
|
+
error=error,
|
|
97
|
+
metadata_=metadata, # `metadata_` must match database
|
|
98
|
+
start_time=datetime.fromisoformat(start_time),
|
|
99
|
+
end_time=datetime.fromisoformat(end_time),
|
|
100
|
+
trace_id=payload.get("trace_id"),
|
|
101
|
+
)
|
|
102
|
+
dialect = SupportedSQLDialect(session.bind.dialect.name)
|
|
103
|
+
exp_eval_run = await session.scalar(
|
|
104
|
+
insert_on_conflict(
|
|
105
|
+
values,
|
|
106
|
+
dialect=dialect,
|
|
107
|
+
table=models.ExperimentRunAnnotation,
|
|
108
|
+
unique_by=("experiment_run_id", "name"),
|
|
109
|
+
).returning(models.ExperimentRunAnnotation)
|
|
110
|
+
)
|
|
111
|
+
evaluation_gid = GlobalID("ExperimentEvaluation", str(exp_eval_run.id))
|
|
112
|
+
request.state.event_queue.put(ExperimentRunAnnotationInsertEvent((exp_eval_run.id,)))
|
|
113
|
+
return UpsertExperimentEvaluationResponseBody(
|
|
114
|
+
data=UpsertExperimentEvaluationResponseBodyData(id=str(evaluation_gid))
|
|
115
|
+
)
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from typing import Any, Optional
|
|
3
|
+
|
|
4
|
+
from fastapi import APIRouter, HTTPException
|
|
5
|
+
from pydantic import Field
|
|
6
|
+
from sqlalchemy import select
|
|
7
|
+
from starlette.requests import Request
|
|
8
|
+
from starlette.status import HTTP_404_NOT_FOUND
|
|
9
|
+
from strawberry.relay import GlobalID
|
|
10
|
+
|
|
11
|
+
from phoenix.db import models
|
|
12
|
+
from phoenix.db.models import ExperimentRunOutput
|
|
13
|
+
from phoenix.server.api.types.node import from_global_id_with_expected_type
|
|
14
|
+
from phoenix.server.dml_event import ExperimentRunInsertEvent
|
|
15
|
+
|
|
16
|
+
from .pydantic_compat import V1RoutesBaseModel
|
|
17
|
+
from .utils import ResponseBody, add_errors_to_responses
|
|
18
|
+
|
|
19
|
+
router = APIRouter(tags=["experiments"], include_in_schema=False)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class ExperimentRun(V1RoutesBaseModel):
|
|
23
|
+
dataset_example_id: str = Field(
|
|
24
|
+
description="The ID of the dataset example used in the experiment run"
|
|
25
|
+
)
|
|
26
|
+
output: Any = Field(description="The output of the experiment task")
|
|
27
|
+
repetition_number: int = Field(description="The repetition number of the experiment run")
|
|
28
|
+
start_time: datetime = Field(description="The start time of the experiment run")
|
|
29
|
+
end_time: datetime = Field(description="The end time of the experiment run")
|
|
30
|
+
trace_id: Optional[str] = Field(
|
|
31
|
+
default=None, description="The ID of the corresponding trace (if one exists)"
|
|
32
|
+
)
|
|
33
|
+
error: Optional[str] = Field(
|
|
34
|
+
default=None,
|
|
35
|
+
description="Optional error message if the experiment run encountered an error",
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class CreateExperimentRunRequestBody(ExperimentRun):
|
|
40
|
+
pass
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class CreateExperimentRunResponseBodyData(V1RoutesBaseModel):
|
|
44
|
+
id: str = Field(description="The ID of the newly created experiment run")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class CreateExperimentResponseBody(ResponseBody[CreateExperimentRunResponseBodyData]):
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
@router.post(
|
|
52
|
+
"/experiments/{experiment_id}/runs",
|
|
53
|
+
operation_id="createExperimentRun",
|
|
54
|
+
summary="Create run for an experiment",
|
|
55
|
+
response_description="Experiment run created successfully",
|
|
56
|
+
responses=add_errors_to_responses(
|
|
57
|
+
[
|
|
58
|
+
{
|
|
59
|
+
"status_code": HTTP_404_NOT_FOUND,
|
|
60
|
+
"description": "Experiment or dataset example not found",
|
|
61
|
+
}
|
|
62
|
+
]
|
|
63
|
+
),
|
|
64
|
+
)
|
|
65
|
+
async def create_experiment_run(
|
|
66
|
+
request: Request, experiment_id: str, request_body: CreateExperimentRunRequestBody
|
|
67
|
+
) -> CreateExperimentResponseBody:
|
|
68
|
+
experiment_gid = GlobalID.from_id(experiment_id)
|
|
69
|
+
try:
|
|
70
|
+
experiment_rowid = from_global_id_with_expected_type(experiment_gid, "Experiment")
|
|
71
|
+
except ValueError:
|
|
72
|
+
raise HTTPException(
|
|
73
|
+
detail=f"Experiment with ID {experiment_gid} does not exist",
|
|
74
|
+
status_code=HTTP_404_NOT_FOUND,
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
example_gid = GlobalID.from_id(request_body.dataset_example_id)
|
|
78
|
+
try:
|
|
79
|
+
dataset_example_id = from_global_id_with_expected_type(example_gid, "DatasetExample")
|
|
80
|
+
except ValueError:
|
|
81
|
+
raise HTTPException(
|
|
82
|
+
detail=f"DatasetExample with ID {example_gid} does not exist",
|
|
83
|
+
status_code=HTTP_404_NOT_FOUND,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
trace_id = request_body.trace_id
|
|
87
|
+
task_output = request_body.output
|
|
88
|
+
repetition_number = request_body.repetition_number
|
|
89
|
+
start_time = request_body.start_time
|
|
90
|
+
end_time = request_body.end_time
|
|
91
|
+
error = request_body.error
|
|
92
|
+
|
|
93
|
+
async with request.app.state.db() as session:
|
|
94
|
+
exp_run = models.ExperimentRun(
|
|
95
|
+
experiment_id=experiment_rowid,
|
|
96
|
+
dataset_example_id=dataset_example_id,
|
|
97
|
+
trace_id=trace_id,
|
|
98
|
+
output=ExperimentRunOutput(task_output=task_output),
|
|
99
|
+
repetition_number=repetition_number,
|
|
100
|
+
start_time=start_time,
|
|
101
|
+
end_time=end_time,
|
|
102
|
+
error=error,
|
|
103
|
+
)
|
|
104
|
+
session.add(exp_run)
|
|
105
|
+
await session.flush()
|
|
106
|
+
request.state.event_queue.put(ExperimentRunInsertEvent((exp_run.id,)))
|
|
107
|
+
run_gid = GlobalID("ExperimentRun", str(exp_run.id))
|
|
108
|
+
return CreateExperimentResponseBody(data=CreateExperimentRunResponseBodyData(id=str(run_gid)))
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class ExperimentRunResponse(ExperimentRun):
|
|
112
|
+
id: str = Field(description="The ID of the experiment run")
|
|
113
|
+
experiment_id: str = Field(description="The ID of the experiment")
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class ListExperimentRunsResponseBody(ResponseBody[list[ExperimentRunResponse]]):
|
|
117
|
+
pass
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
@router.get(
|
|
121
|
+
"/experiments/{experiment_id}/runs",
|
|
122
|
+
operation_id="listExperimentRuns",
|
|
123
|
+
summary="List runs for an experiment",
|
|
124
|
+
response_description="Experiment runs retrieved successfully",
|
|
125
|
+
responses=add_errors_to_responses(
|
|
126
|
+
[{"status_code": HTTP_404_NOT_FOUND, "description": "Experiment not found"}]
|
|
127
|
+
),
|
|
128
|
+
)
|
|
129
|
+
async def list_experiment_runs(
|
|
130
|
+
request: Request, experiment_id: str
|
|
131
|
+
) -> ListExperimentRunsResponseBody:
|
|
132
|
+
experiment_gid = GlobalID.from_id(experiment_id)
|
|
133
|
+
try:
|
|
134
|
+
experiment_rowid = from_global_id_with_expected_type(experiment_gid, "Experiment")
|
|
135
|
+
except ValueError:
|
|
136
|
+
raise HTTPException(
|
|
137
|
+
detail=f"Experiment with ID {experiment_gid} does not exist",
|
|
138
|
+
status_code=HTTP_404_NOT_FOUND,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
async with request.app.state.db() as session:
|
|
142
|
+
experiment_runs = await session.execute(
|
|
143
|
+
select(models.ExperimentRun)
|
|
144
|
+
.where(models.ExperimentRun.experiment_id == experiment_rowid)
|
|
145
|
+
# order by dataset_example_id to be consistent with `list_dataset_examples`
|
|
146
|
+
.order_by(models.ExperimentRun.dataset_example_id.asc())
|
|
147
|
+
)
|
|
148
|
+
experiment_runs = experiment_runs.scalars().all()
|
|
149
|
+
runs = []
|
|
150
|
+
for exp_run in experiment_runs:
|
|
151
|
+
run_gid = GlobalID("ExperimentRun", str(exp_run.id))
|
|
152
|
+
experiment_gid = GlobalID("Experiment", str(exp_run.experiment_id))
|
|
153
|
+
example_gid = GlobalID("DatasetExample", str(exp_run.dataset_example_id))
|
|
154
|
+
runs.append(
|
|
155
|
+
ExperimentRunResponse(
|
|
156
|
+
start_time=exp_run.start_time,
|
|
157
|
+
end_time=exp_run.end_time,
|
|
158
|
+
experiment_id=str(experiment_gid),
|
|
159
|
+
dataset_example_id=str(example_gid),
|
|
160
|
+
repetition_number=exp_run.repetition_number,
|
|
161
|
+
output=exp_run.output.get("task_output"),
|
|
162
|
+
error=exp_run.error,
|
|
163
|
+
id=str(run_gid),
|
|
164
|
+
trace_id=exp_run.trace_id,
|
|
165
|
+
)
|
|
166
|
+
)
|
|
167
|
+
return ListExperimentRunsResponseBody(data=runs)
|