arize-phoenix 3.25.0__py3-none-any.whl → 4.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of arize-phoenix might be problematic. Click here for more details.
- {arize_phoenix-3.25.0.dist-info → arize_phoenix-4.0.0.dist-info}/METADATA +26 -4
- {arize_phoenix-3.25.0.dist-info → arize_phoenix-4.0.0.dist-info}/RECORD +80 -75
- phoenix/__init__.py +9 -5
- phoenix/config.py +109 -53
- phoenix/datetime_utils.py +18 -1
- phoenix/db/README.md +25 -0
- phoenix/db/__init__.py +4 -0
- phoenix/db/alembic.ini +119 -0
- phoenix/db/bulk_inserter.py +206 -0
- phoenix/db/engines.py +152 -0
- phoenix/db/helpers.py +47 -0
- phoenix/db/insertion/evaluation.py +209 -0
- phoenix/db/insertion/helpers.py +54 -0
- phoenix/db/insertion/span.py +142 -0
- phoenix/db/migrate.py +71 -0
- phoenix/db/migrations/env.py +121 -0
- phoenix/db/migrations/script.py.mako +26 -0
- phoenix/db/migrations/versions/cf03bd6bae1d_init.py +280 -0
- phoenix/db/models.py +371 -0
- phoenix/exceptions.py +5 -1
- phoenix/server/api/context.py +40 -3
- phoenix/server/api/dataloaders/__init__.py +97 -0
- phoenix/server/api/dataloaders/cache/__init__.py +3 -0
- phoenix/server/api/dataloaders/cache/two_tier_cache.py +67 -0
- phoenix/server/api/dataloaders/document_evaluation_summaries.py +152 -0
- phoenix/server/api/dataloaders/document_evaluations.py +37 -0
- phoenix/server/api/dataloaders/document_retrieval_metrics.py +98 -0
- phoenix/server/api/dataloaders/evaluation_summaries.py +151 -0
- phoenix/server/api/dataloaders/latency_ms_quantile.py +198 -0
- phoenix/server/api/dataloaders/min_start_or_max_end_times.py +93 -0
- phoenix/server/api/dataloaders/record_counts.py +125 -0
- phoenix/server/api/dataloaders/span_descendants.py +64 -0
- phoenix/server/api/dataloaders/span_evaluations.py +37 -0
- phoenix/server/api/dataloaders/token_counts.py +138 -0
- phoenix/server/api/dataloaders/trace_evaluations.py +37 -0
- phoenix/server/api/input_types/SpanSort.py +138 -68
- phoenix/server/api/routers/v1/__init__.py +11 -0
- phoenix/server/api/routers/v1/evaluations.py +275 -0
- phoenix/server/api/routers/v1/spans.py +126 -0
- phoenix/server/api/routers/v1/traces.py +82 -0
- phoenix/server/api/schema.py +112 -48
- phoenix/server/api/types/DocumentEvaluationSummary.py +1 -1
- phoenix/server/api/types/Evaluation.py +29 -12
- phoenix/server/api/types/EvaluationSummary.py +29 -44
- phoenix/server/api/types/MimeType.py +2 -2
- phoenix/server/api/types/Model.py +9 -9
- phoenix/server/api/types/Project.py +240 -171
- phoenix/server/api/types/Span.py +87 -131
- phoenix/server/api/types/Trace.py +29 -20
- phoenix/server/api/types/pagination.py +151 -10
- phoenix/server/app.py +263 -35
- phoenix/server/grpc_server.py +93 -0
- phoenix/server/main.py +75 -60
- phoenix/server/openapi/docs.py +218 -0
- phoenix/server/prometheus.py +23 -7
- phoenix/server/static/index.js +662 -643
- phoenix/server/telemetry.py +68 -0
- phoenix/services.py +4 -0
- phoenix/session/client.py +34 -30
- phoenix/session/data_extractor.py +8 -3
- phoenix/session/session.py +176 -155
- phoenix/settings.py +13 -0
- phoenix/trace/attributes.py +349 -0
- phoenix/trace/dsl/README.md +116 -0
- phoenix/trace/dsl/filter.py +660 -192
- phoenix/trace/dsl/helpers.py +24 -5
- phoenix/trace/dsl/query.py +562 -185
- phoenix/trace/fixtures.py +69 -7
- phoenix/trace/otel.py +33 -199
- phoenix/trace/schemas.py +14 -8
- phoenix/trace/span_evaluations.py +5 -2
- phoenix/utilities/__init__.py +0 -26
- phoenix/utilities/span_store.py +0 -23
- phoenix/version.py +1 -1
- phoenix/core/project.py +0 -773
- phoenix/core/traces.py +0 -96
- phoenix/datasets/dataset.py +0 -214
- phoenix/datasets/fixtures.py +0 -24
- phoenix/datasets/schema.py +0 -31
- phoenix/experimental/evals/__init__.py +0 -73
- phoenix/experimental/evals/evaluators.py +0 -413
- phoenix/experimental/evals/functions/__init__.py +0 -4
- phoenix/experimental/evals/functions/classify.py +0 -453
- phoenix/experimental/evals/functions/executor.py +0 -353
- phoenix/experimental/evals/functions/generate.py +0 -138
- phoenix/experimental/evals/functions/processing.py +0 -76
- phoenix/experimental/evals/models/__init__.py +0 -14
- phoenix/experimental/evals/models/anthropic.py +0 -175
- phoenix/experimental/evals/models/base.py +0 -170
- phoenix/experimental/evals/models/bedrock.py +0 -221
- phoenix/experimental/evals/models/litellm.py +0 -134
- phoenix/experimental/evals/models/openai.py +0 -453
- phoenix/experimental/evals/models/rate_limiters.py +0 -246
- phoenix/experimental/evals/models/vertex.py +0 -173
- phoenix/experimental/evals/models/vertexai.py +0 -186
- phoenix/experimental/evals/retrievals.py +0 -96
- phoenix/experimental/evals/templates/__init__.py +0 -50
- phoenix/experimental/evals/templates/default_templates.py +0 -472
- phoenix/experimental/evals/templates/template.py +0 -195
- phoenix/experimental/evals/utils/__init__.py +0 -172
- phoenix/experimental/evals/utils/threads.py +0 -27
- phoenix/server/api/routers/evaluation_handler.py +0 -110
- phoenix/server/api/routers/span_handler.py +0 -70
- phoenix/server/api/routers/trace_handler.py +0 -60
- phoenix/storage/span_store/__init__.py +0 -23
- phoenix/storage/span_store/text_file.py +0 -85
- phoenix/trace/dsl/missing.py +0 -60
- {arize_phoenix-3.25.0.dist-info → arize_phoenix-4.0.0.dist-info}/WHEEL +0 -0
- {arize_phoenix-3.25.0.dist-info → arize_phoenix-4.0.0.dist-info}/licenses/IP_NOTICE +0 -0
- {arize_phoenix-3.25.0.dist-info → arize_phoenix-4.0.0.dist-info}/licenses/LICENSE +0 -0
- /phoenix/{datasets → db/insertion}/__init__.py +0 -0
- /phoenix/{experimental → db/migrations}/__init__.py +0 -0
- /phoenix/{storage → server/openapi}/__init__.py +0 -0
phoenix/core/traces.py
DELETED
|
@@ -1,96 +0,0 @@
|
|
|
1
|
-
import weakref
|
|
2
|
-
from collections import defaultdict
|
|
3
|
-
from queue import SimpleQueue
|
|
4
|
-
from threading import RLock, Thread
|
|
5
|
-
from types import MethodType
|
|
6
|
-
from typing import DefaultDict, Iterator, Optional, Tuple, Union
|
|
7
|
-
|
|
8
|
-
from typing_extensions import assert_never
|
|
9
|
-
|
|
10
|
-
import phoenix.trace.v1 as pb
|
|
11
|
-
from phoenix.config import DEFAULT_PROJECT_NAME
|
|
12
|
-
from phoenix.core.project import (
|
|
13
|
-
END_OF_QUEUE,
|
|
14
|
-
Project,
|
|
15
|
-
_ProjectName,
|
|
16
|
-
)
|
|
17
|
-
from phoenix.trace.schemas import Span
|
|
18
|
-
|
|
19
|
-
_SpanItem = Tuple[Span, _ProjectName]
|
|
20
|
-
_EvalItem = Tuple[pb.Evaluation, _ProjectName]
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class Traces:
|
|
24
|
-
def __init__(self) -> None:
|
|
25
|
-
self._span_queue: "SimpleQueue[Optional[_SpanItem]]" = SimpleQueue()
|
|
26
|
-
self._eval_queue: "SimpleQueue[Optional[_EvalItem]]" = SimpleQueue()
|
|
27
|
-
# Putting `None` as the sentinel value for queue termination.
|
|
28
|
-
weakref.finalize(self, self._span_queue.put, END_OF_QUEUE)
|
|
29
|
-
weakref.finalize(self, self._eval_queue.put, END_OF_QUEUE)
|
|
30
|
-
self._lock = RLock()
|
|
31
|
-
self._projects: DefaultDict[_ProjectName, "Project"] = defaultdict(
|
|
32
|
-
Project,
|
|
33
|
-
{DEFAULT_PROJECT_NAME: Project()},
|
|
34
|
-
)
|
|
35
|
-
self._start_consumers()
|
|
36
|
-
|
|
37
|
-
def get_project(self, project_name: str) -> Optional["Project"]:
|
|
38
|
-
with self._lock:
|
|
39
|
-
return self._projects.get(project_name)
|
|
40
|
-
|
|
41
|
-
def get_projects(self) -> Iterator[Tuple[int, str, "Project"]]:
|
|
42
|
-
with self._lock:
|
|
43
|
-
for project_id, (project_name, project) in enumerate(self._projects.items()):
|
|
44
|
-
if project.is_archived:
|
|
45
|
-
continue
|
|
46
|
-
yield project_id, project_name, project
|
|
47
|
-
|
|
48
|
-
def archive_project(self, id: int) -> Optional["Project"]:
|
|
49
|
-
if id == 0:
|
|
50
|
-
raise ValueError("Cannot archive the default project")
|
|
51
|
-
with self._lock:
|
|
52
|
-
for project_id, _, project in self.get_projects():
|
|
53
|
-
if id == project_id:
|
|
54
|
-
project.archive()
|
|
55
|
-
return project
|
|
56
|
-
return None
|
|
57
|
-
|
|
58
|
-
def put(
|
|
59
|
-
self,
|
|
60
|
-
item: Union[Span, pb.Evaluation],
|
|
61
|
-
project_name: Optional[str] = None,
|
|
62
|
-
) -> None:
|
|
63
|
-
if not project_name:
|
|
64
|
-
project_name = DEFAULT_PROJECT_NAME
|
|
65
|
-
if isinstance(item, Span):
|
|
66
|
-
self._span_queue.put((item, project_name))
|
|
67
|
-
elif isinstance(item, pb.Evaluation):
|
|
68
|
-
self._eval_queue.put((item, project_name))
|
|
69
|
-
else:
|
|
70
|
-
assert_never(item)
|
|
71
|
-
|
|
72
|
-
def _start_consumers(self) -> None:
|
|
73
|
-
Thread(
|
|
74
|
-
target=MethodType(self.__class__._consume_spans, weakref.proxy(self)),
|
|
75
|
-
args=(self._span_queue,),
|
|
76
|
-
daemon=True,
|
|
77
|
-
).start()
|
|
78
|
-
Thread(
|
|
79
|
-
target=MethodType(self.__class__._consume_evals, weakref.proxy(self)),
|
|
80
|
-
args=(self._eval_queue,),
|
|
81
|
-
daemon=True,
|
|
82
|
-
).start()
|
|
83
|
-
|
|
84
|
-
def _consume_spans(self, queue: "SimpleQueue[Optional[_SpanItem]]") -> None:
|
|
85
|
-
while (item := queue.get()) is not END_OF_QUEUE:
|
|
86
|
-
span, project_name = item
|
|
87
|
-
with self._lock:
|
|
88
|
-
project = self._projects[project_name]
|
|
89
|
-
project.add_span(span)
|
|
90
|
-
|
|
91
|
-
def _consume_evals(self, queue: "SimpleQueue[Optional[_EvalItem]]") -> None:
|
|
92
|
-
while (item := queue.get()) is not END_OF_QUEUE:
|
|
93
|
-
pb_eval, project_name = item
|
|
94
|
-
with self._lock:
|
|
95
|
-
project = self._projects[project_name]
|
|
96
|
-
project.add_eval(pb_eval)
|
phoenix/datasets/dataset.py
DELETED
|
@@ -1,214 +0,0 @@
|
|
|
1
|
-
import re
|
|
2
|
-
from dataclasses import dataclass, replace
|
|
3
|
-
from enum import Enum
|
|
4
|
-
from itertools import groupby
|
|
5
|
-
from typing import Dict
|
|
6
|
-
|
|
7
|
-
from pandas import DataFrame
|
|
8
|
-
|
|
9
|
-
from phoenix.inferences.inferences import Inferences
|
|
10
|
-
from phoenix.inferences.schema import EmbeddingColumnNames, RetrievalEmbeddingColumnNames, Schema
|
|
11
|
-
from phoenix.utilities.deprecation import deprecated, deprecated_class
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
@deprecated_class("phoenix.Dataset is deprecated, use phoenix.Inference instead.")
|
|
15
|
-
class Dataset(Inferences):
|
|
16
|
-
@classmethod
|
|
17
|
-
@deprecated("Dataset.from_open_inference is deprecated and will be removed.")
|
|
18
|
-
def from_open_inference(cls, dataframe: DataFrame) -> "Dataset":
|
|
19
|
-
schema = Schema()
|
|
20
|
-
column_renaming: Dict[str, str] = {}
|
|
21
|
-
for group_name, group in groupby(
|
|
22
|
-
sorted(
|
|
23
|
-
map(_parse_open_inference_column_name, dataframe.columns),
|
|
24
|
-
key=lambda column: column.name,
|
|
25
|
-
),
|
|
26
|
-
key=lambda column: column.name,
|
|
27
|
-
):
|
|
28
|
-
open_inference_columns = list(group)
|
|
29
|
-
if group_name == "":
|
|
30
|
-
column_names_by_category = {
|
|
31
|
-
column.category: column.full_name for column in open_inference_columns
|
|
32
|
-
}
|
|
33
|
-
schema = replace(
|
|
34
|
-
schema,
|
|
35
|
-
prediction_id_column_name=column_names_by_category.get(
|
|
36
|
-
OpenInferenceCategory.id
|
|
37
|
-
),
|
|
38
|
-
timestamp_column_name=column_names_by_category.get(
|
|
39
|
-
OpenInferenceCategory.timestamp
|
|
40
|
-
),
|
|
41
|
-
)
|
|
42
|
-
continue
|
|
43
|
-
column_names_by_specifier = {
|
|
44
|
-
column.specifier: column.full_name for column in open_inference_columns
|
|
45
|
-
}
|
|
46
|
-
if group_name == "response":
|
|
47
|
-
response_vector_column_name = column_names_by_specifier.get(
|
|
48
|
-
OpenInferenceSpecifier.embedding
|
|
49
|
-
)
|
|
50
|
-
if response_vector_column_name is not None:
|
|
51
|
-
column_renaming[response_vector_column_name] = "response"
|
|
52
|
-
schema = replace(
|
|
53
|
-
schema,
|
|
54
|
-
response_column_names=EmbeddingColumnNames(
|
|
55
|
-
vector_column_name=column_renaming[response_vector_column_name],
|
|
56
|
-
raw_data_column_name=column_names_by_specifier.get(
|
|
57
|
-
OpenInferenceSpecifier.default
|
|
58
|
-
),
|
|
59
|
-
),
|
|
60
|
-
)
|
|
61
|
-
else:
|
|
62
|
-
response_text_column_name = column_names_by_specifier.get(
|
|
63
|
-
OpenInferenceSpecifier.default
|
|
64
|
-
)
|
|
65
|
-
if response_text_column_name is None:
|
|
66
|
-
raise ValueError(
|
|
67
|
-
"invalid OpenInference format: missing text column for response"
|
|
68
|
-
)
|
|
69
|
-
column_renaming[response_text_column_name] = "response"
|
|
70
|
-
schema = replace(
|
|
71
|
-
schema,
|
|
72
|
-
response_column_names=column_renaming[response_text_column_name],
|
|
73
|
-
)
|
|
74
|
-
elif group_name == "prompt":
|
|
75
|
-
prompt_vector_column_name = column_names_by_specifier.get(
|
|
76
|
-
OpenInferenceSpecifier.embedding
|
|
77
|
-
)
|
|
78
|
-
if prompt_vector_column_name is None:
|
|
79
|
-
raise ValueError(
|
|
80
|
-
"invalid OpenInference format: missing embedding vector column for prompt"
|
|
81
|
-
)
|
|
82
|
-
column_renaming[prompt_vector_column_name] = "prompt"
|
|
83
|
-
schema = replace(
|
|
84
|
-
schema,
|
|
85
|
-
prompt_column_names=RetrievalEmbeddingColumnNames(
|
|
86
|
-
vector_column_name=column_renaming[prompt_vector_column_name],
|
|
87
|
-
raw_data_column_name=column_names_by_specifier.get(
|
|
88
|
-
OpenInferenceSpecifier.default
|
|
89
|
-
),
|
|
90
|
-
context_retrieval_ids_column_name=column_names_by_specifier.get(
|
|
91
|
-
OpenInferenceSpecifier.retrieved_document_ids
|
|
92
|
-
),
|
|
93
|
-
context_retrieval_scores_column_name=column_names_by_specifier.get(
|
|
94
|
-
OpenInferenceSpecifier.retrieved_document_scores
|
|
95
|
-
),
|
|
96
|
-
),
|
|
97
|
-
)
|
|
98
|
-
elif OpenInferenceSpecifier.embedding in column_names_by_specifier:
|
|
99
|
-
vector_column_name = column_names_by_specifier[OpenInferenceSpecifier.embedding]
|
|
100
|
-
column_renaming[vector_column_name] = group_name
|
|
101
|
-
embedding_feature_column_names = schema.embedding_feature_column_names or {}
|
|
102
|
-
embedding_feature_column_names.update(
|
|
103
|
-
{
|
|
104
|
-
group_name: EmbeddingColumnNames(
|
|
105
|
-
vector_column_name=column_renaming[vector_column_name],
|
|
106
|
-
raw_data_column_name=column_names_by_specifier.get(
|
|
107
|
-
OpenInferenceSpecifier.raw_data
|
|
108
|
-
),
|
|
109
|
-
link_to_data_column_name=column_names_by_specifier.get(
|
|
110
|
-
OpenInferenceSpecifier.link_to_data
|
|
111
|
-
),
|
|
112
|
-
)
|
|
113
|
-
}
|
|
114
|
-
)
|
|
115
|
-
schema = replace(
|
|
116
|
-
schema,
|
|
117
|
-
embedding_feature_column_names=embedding_feature_column_names,
|
|
118
|
-
)
|
|
119
|
-
elif len(open_inference_columns) == 1:
|
|
120
|
-
open_inference_column = open_inference_columns[0]
|
|
121
|
-
raw_column_name = open_inference_column.full_name
|
|
122
|
-
column_renaming[raw_column_name] = open_inference_column.name
|
|
123
|
-
if open_inference_column.category is OpenInferenceCategory.feature:
|
|
124
|
-
schema = replace(
|
|
125
|
-
schema,
|
|
126
|
-
feature_column_names=(
|
|
127
|
-
(schema.feature_column_names or []) + [column_renaming[raw_column_name]]
|
|
128
|
-
),
|
|
129
|
-
)
|
|
130
|
-
elif open_inference_column.category is OpenInferenceCategory.tag:
|
|
131
|
-
schema = replace(
|
|
132
|
-
schema,
|
|
133
|
-
tag_column_names=(
|
|
134
|
-
(schema.tag_column_names or []) + [column_renaming[raw_column_name]]
|
|
135
|
-
),
|
|
136
|
-
)
|
|
137
|
-
elif open_inference_column.category is OpenInferenceCategory.prediction:
|
|
138
|
-
if open_inference_column.specifier is OpenInferenceSpecifier.score:
|
|
139
|
-
schema = replace(
|
|
140
|
-
schema,
|
|
141
|
-
prediction_score_column_name=column_renaming[raw_column_name],
|
|
142
|
-
)
|
|
143
|
-
if open_inference_column.specifier is OpenInferenceSpecifier.label:
|
|
144
|
-
schema = replace(
|
|
145
|
-
schema,
|
|
146
|
-
prediction_label_column_name=column_renaming[raw_column_name],
|
|
147
|
-
)
|
|
148
|
-
elif open_inference_column.category is OpenInferenceCategory.actual:
|
|
149
|
-
if open_inference_column.specifier is OpenInferenceSpecifier.score:
|
|
150
|
-
schema = replace(
|
|
151
|
-
schema,
|
|
152
|
-
actual_score_column_name=column_renaming[raw_column_name],
|
|
153
|
-
)
|
|
154
|
-
if open_inference_column.specifier is OpenInferenceSpecifier.label:
|
|
155
|
-
schema = replace(
|
|
156
|
-
schema,
|
|
157
|
-
actual_label_column_name=column_renaming[raw_column_name],
|
|
158
|
-
)
|
|
159
|
-
else:
|
|
160
|
-
raise ValueError(f"invalid OpenInference format: duplicated name `{group_name}`")
|
|
161
|
-
|
|
162
|
-
return cls(
|
|
163
|
-
dataframe.rename(
|
|
164
|
-
column_renaming,
|
|
165
|
-
axis=1,
|
|
166
|
-
copy=False,
|
|
167
|
-
),
|
|
168
|
-
schema,
|
|
169
|
-
)
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
class OpenInferenceCategory(Enum):
|
|
173
|
-
id = "id"
|
|
174
|
-
timestamp = "timestamp"
|
|
175
|
-
feature = "feature"
|
|
176
|
-
tag = "tag"
|
|
177
|
-
prediction = "prediction"
|
|
178
|
-
actual = "actual"
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
class OpenInferenceSpecifier(Enum):
|
|
182
|
-
default = ""
|
|
183
|
-
score = "score"
|
|
184
|
-
label = "label"
|
|
185
|
-
embedding = "embedding"
|
|
186
|
-
raw_data = "raw_data"
|
|
187
|
-
link_to_data = "link_to_data"
|
|
188
|
-
retrieved_document_ids = "retrieved_document_ids"
|
|
189
|
-
retrieved_document_scores = "retrieved_document_scores"
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
@dataclass(frozen=True)
|
|
193
|
-
class _OpenInferenceColumnName:
|
|
194
|
-
full_name: str
|
|
195
|
-
category: OpenInferenceCategory
|
|
196
|
-
data_type: str
|
|
197
|
-
specifier: OpenInferenceSpecifier = OpenInferenceSpecifier.default
|
|
198
|
-
name: str = ""
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
def _parse_open_inference_column_name(column_name: str) -> _OpenInferenceColumnName:
|
|
202
|
-
pattern = (
|
|
203
|
-
r"^:(?P<category>\w+)\.(?P<data_type>\[\w+\]|\w+)(\.(?P<specifier>\w+))?:(?P<name>.*)?$"
|
|
204
|
-
)
|
|
205
|
-
if match := re.match(pattern, column_name):
|
|
206
|
-
extract = match.groupdict(default="")
|
|
207
|
-
return _OpenInferenceColumnName(
|
|
208
|
-
full_name=column_name,
|
|
209
|
-
category=OpenInferenceCategory(extract.get("category", "").lower()),
|
|
210
|
-
data_type=extract.get("data_type", "").lower(),
|
|
211
|
-
specifier=OpenInferenceSpecifier(extract.get("specifier", "").lower()),
|
|
212
|
-
name=extract.get("name", ""),
|
|
213
|
-
)
|
|
214
|
-
raise ValueError(f"Invalid format for column name: {column_name}")
|
phoenix/datasets/fixtures.py
DELETED
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
from phoenix.inferences.fixtures import (
|
|
2
|
-
ExampleInferences as _ExampleInferences,
|
|
3
|
-
)
|
|
4
|
-
from phoenix.inferences.fixtures import (
|
|
5
|
-
load_example as _load_example,
|
|
6
|
-
)
|
|
7
|
-
from phoenix.utilities.deprecation import deprecated, deprecated_class
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
@deprecated_class(
|
|
11
|
-
(
|
|
12
|
-
"The phoenix.datasets.fixtures.ExampleDatasets is deprecated, "
|
|
13
|
-
"use phoenix.inferences.fixtures.ExampleInferences instead."
|
|
14
|
-
)
|
|
15
|
-
)
|
|
16
|
-
class ExampleDatasets(_ExampleInferences):
|
|
17
|
-
pass
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
@deprecated(
|
|
21
|
-
"The phoenix.datasets.fixtures module is deprecated, use phoenix.inferences.fixtures instead."
|
|
22
|
-
)
|
|
23
|
-
def load_example(use_case: str) -> None:
|
|
24
|
-
_load_example(use_case)
|
phoenix/datasets/schema.py
DELETED
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
from phoenix.inferences.schema import (
|
|
2
|
-
EmbeddingColumnNames as _EmbeddingColumnNames,
|
|
3
|
-
)
|
|
4
|
-
from phoenix.inferences.schema import (
|
|
5
|
-
RetrievalEmbeddingColumnNames as _RetrievalEmbeddingColumnNames,
|
|
6
|
-
)
|
|
7
|
-
from phoenix.inferences.schema import (
|
|
8
|
-
Schema as _Schema,
|
|
9
|
-
)
|
|
10
|
-
from phoenix.utilities.deprecation import deprecated_class
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
@deprecated_class(
|
|
14
|
-
"The phoenix.datasets.fixtures module is deprecated, use phoenix.inferences.fixtures instead."
|
|
15
|
-
)
|
|
16
|
-
class EmbeddingColumnNames(_EmbeddingColumnNames):
|
|
17
|
-
pass
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
@deprecated_class(
|
|
21
|
-
"The phoenix.datasets.fixtures module is deprecated, use phoenix.inferences.fixtures instead."
|
|
22
|
-
)
|
|
23
|
-
class RetrievalEmbeddingColumnNames(_RetrievalEmbeddingColumnNames):
|
|
24
|
-
pass
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
@deprecated_class(
|
|
28
|
-
"The phoenix.datasets.fixtures module is deprecated, use phoenix.inferences.fixtures instead."
|
|
29
|
-
)
|
|
30
|
-
class Schema(_Schema):
|
|
31
|
-
pass
|
|
@@ -1,73 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
|
|
3
|
-
from .evaluators import (
|
|
4
|
-
HallucinationEvaluator,
|
|
5
|
-
LLMEvaluator,
|
|
6
|
-
QAEvaluator,
|
|
7
|
-
RelevanceEvaluator,
|
|
8
|
-
SummarizationEvaluator,
|
|
9
|
-
ToxicityEvaluator,
|
|
10
|
-
)
|
|
11
|
-
from .functions import llm_classify, llm_generate, run_evals, run_relevance_eval
|
|
12
|
-
from .models import BedrockModel, LiteLLMModel, OpenAIModel, VertexAIModel
|
|
13
|
-
from .retrievals import compute_precisions_at_k
|
|
14
|
-
from .templates import (
|
|
15
|
-
CODE_READABILITY_PROMPT_RAILS_MAP,
|
|
16
|
-
CODE_READABILITY_PROMPT_TEMPLATE,
|
|
17
|
-
HALLUCINATION_PROMPT_RAILS_MAP,
|
|
18
|
-
HALLUCINATION_PROMPT_TEMPLATE,
|
|
19
|
-
HUMAN_VS_AI_PROMPT_RAILS_MAP,
|
|
20
|
-
HUMAN_VS_AI_PROMPT_TEMPLATE,
|
|
21
|
-
QA_PROMPT_RAILS_MAP,
|
|
22
|
-
QA_PROMPT_TEMPLATE,
|
|
23
|
-
RAG_RELEVANCY_PROMPT_RAILS_MAP,
|
|
24
|
-
RAG_RELEVANCY_PROMPT_TEMPLATE,
|
|
25
|
-
TOXICITY_PROMPT_RAILS_MAP,
|
|
26
|
-
TOXICITY_PROMPT_TEMPLATE,
|
|
27
|
-
ClassificationTemplate,
|
|
28
|
-
PromptTemplate,
|
|
29
|
-
)
|
|
30
|
-
from .utils import NOT_PARSABLE, download_benchmark_dataset
|
|
31
|
-
|
|
32
|
-
logger = logging.getLogger(__name__)
|
|
33
|
-
|
|
34
|
-
__all__ = [
|
|
35
|
-
"compute_precisions_at_k",
|
|
36
|
-
"download_benchmark_dataset",
|
|
37
|
-
"llm_classify",
|
|
38
|
-
"llm_generate",
|
|
39
|
-
"OpenAIModel",
|
|
40
|
-
"VertexAIModel",
|
|
41
|
-
"BedrockModel",
|
|
42
|
-
"LiteLLMModel",
|
|
43
|
-
"PromptTemplate",
|
|
44
|
-
"ClassificationTemplate",
|
|
45
|
-
"CODE_READABILITY_PROMPT_RAILS_MAP",
|
|
46
|
-
"CODE_READABILITY_PROMPT_TEMPLATE",
|
|
47
|
-
"HALLUCINATION_PROMPT_RAILS_MAP",
|
|
48
|
-
"HALLUCINATION_PROMPT_TEMPLATE",
|
|
49
|
-
"RAG_RELEVANCY_PROMPT_RAILS_MAP",
|
|
50
|
-
"RAG_RELEVANCY_PROMPT_TEMPLATE",
|
|
51
|
-
"TOXICITY_PROMPT_RAILS_MAP",
|
|
52
|
-
"TOXICITY_PROMPT_TEMPLATE",
|
|
53
|
-
"HUMAN_VS_AI_PROMPT_RAILS_MAP",
|
|
54
|
-
"HUMAN_VS_AI_PROMPT_TEMPLATE",
|
|
55
|
-
"QA_PROMPT_RAILS_MAP",
|
|
56
|
-
"QA_PROMPT_TEMPLATE",
|
|
57
|
-
"NOT_PARSABLE",
|
|
58
|
-
"run_relevance_eval",
|
|
59
|
-
"run_evals",
|
|
60
|
-
"LLMEvaluator",
|
|
61
|
-
"HallucinationEvaluator",
|
|
62
|
-
"QAEvaluator",
|
|
63
|
-
"RelevanceEvaluator",
|
|
64
|
-
"SummarizationEvaluator",
|
|
65
|
-
"ToxicityEvaluator",
|
|
66
|
-
]
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
logger.warning(
|
|
70
|
-
"Evals are moving out of experimental. "
|
|
71
|
-
"Install the evals extra with `pip install arize-phoenix[evals]` and import `phoenix.evals`. "
|
|
72
|
-
"For more info, see the [migration guide](https://github.com/Arize-ai/phoenix/blob/main/MIGRATION.md)."
|
|
73
|
-
)
|