arize-phoenix 0.0.29rc8__py3-none-any.whl → 0.0.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arize-phoenix
3
- Version: 0.0.29rc8
3
+ Version: 0.0.31
4
4
  Summary: ML Observability in your notebook
5
5
  Project-URL: Documentation, https://docs.arize.com/phoenix/
6
6
  Project-URL: Issues, https://github.com/Arize-ai/phoenix/issues
@@ -41,6 +41,9 @@ Requires-Dist: pytest; extra == 'dev'
41
41
  Requires-Dist: pytest-cov; extra == 'dev'
42
42
  Requires-Dist: pytest-lazy-fixture; extra == 'dev'
43
43
  Requires-Dist: strawberry-graphql[debug-server]==0.178.0; extra == 'dev'
44
+ Provides-Extra: experimental
45
+ Requires-Dist: openai; extra == 'experimental'
46
+ Requires-Dist: tenacity; extra == 'experimental'
44
47
  Description-Content-Type: text/markdown
45
48
 
46
49
  <p align="center">
@@ -1,4 +1,4 @@
1
- phoenix/__init__.py,sha256=GtycPB0XtJn1eIzZcwNUueSxnCun8o6sY827KA_6Fpg,1117
1
+ phoenix/__init__.py,sha256=xUy3vWV6BkKa3ckBCBq0PWsocvw-5YMviTv5yQhF7f8,1189
2
2
  phoenix/config.py,sha256=tjNn9oqDxQmeO85sCchLlTsDiRJ6AoK0CTt_Uc_hrKM,1442
3
3
  phoenix/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
4
4
  phoenix/services.py,sha256=BlQF7lTQwhXUHBZBjZOoTnDM2Qni_hziUXsfp1Hux4Q,3978
@@ -10,11 +10,14 @@ phoenix/core/model.py,sha256=vQ6RxpUPlncezJvur5u6xBN0Lkrk2gW0cTyb-qqaSqA,4713
10
10
  phoenix/core/model_schema.py,sha256=H8EHEsrGdsieD0grSYejunv3vqlzwM-OIxXx6DDGibA,50994
11
11
  phoenix/core/model_schema_adapter.py,sha256=GzJetQALsDUWJuFzLOEBSEC8M0JI0_F7i6CODYK2elI,8292
12
12
  phoenix/datasets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
- phoenix/datasets/dataset.py,sha256=DvaX2o5er0QuzIPcHpp6ujKYXcPAYvkYmtm1sDHKZrs,21873
13
+ phoenix/datasets/dataset.py,sha256=sH4yLFhJjEJrWCd8LjmCoa3FdfcIXAeDpnXUcUzS7dc,30996
14
14
  phoenix/datasets/errors.py,sha256=-Iyk8rsvP_KX-P4gOqjm26slkDq1-9CohK07_LkrYCI,8117
15
15
  phoenix/datasets/fixtures.py,sha256=0_PacL3dw49zulKpFpPdhvxJxeGmHTguqIyf2VXkBkk,19158
16
16
  phoenix/datasets/schema.py,sha256=HlM0f-pLFul2sYyHZM-Av8OFxLFkn57dkK_BWbMzyJY,6668
17
17
  phoenix/datasets/validation.py,sha256=dZ9lCFUV0EY7HCkQkQBrs-GLAEIZdpOqUxwD5l4dp88,8294
18
+ phoenix/experimental/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
+ phoenix/experimental/evals/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
+ phoenix/experimental/evals/retrievals.py,sha256=3PBv2yFIcxdn_y-L6We554TsozqCDjTVBE8017RB2mY,3793
18
21
  phoenix/metrics/README.md,sha256=5gekqTU-5gGdMwvcfNp2Wlu8p1ul9kGY_jq0XXQusoI,1964
19
22
  phoenix/metrics/__init__.py,sha256=sLp7td1GIt_0Z8dPUyP4L0-_4x9c871yAaGX30oMsvg,2433
20
23
  phoenix/metrics/binning.py,sha256=CXPPcAkRmmR__IG36a6UGs5RBtgXXPuWQbafPtuG1ww,12787
@@ -84,12 +87,12 @@ phoenix/server/static/apple-touch-icon.png,sha256=fOfpjqGpWYbJ0eAurKsyoZP1EAs6ZV
84
87
  phoenix/server/static/favicon.ico,sha256=bY0vvCKRftemZfPShwZtE93DiiQdaYaozkPGwNFr6H8,34494
85
88
  phoenix/server/static/index.css,sha256=KKGpx4iwF91VGRm0YN-4cn8oC-oIqC6HecoPf0x3ZM8,1885
86
89
  phoenix/server/static/index.html,sha256=xPZZH-y4dWlbDutPEV1k0rhmWJtIV-Db9aYP-dEc7wM,703
87
- phoenix/server/static/index.js,sha256=Z7vMbIgbNLvLrx-FNUc84R8qvqfFm7XhQ2fCKZ3LvLU,2534545
90
+ phoenix/server/static/index.js,sha256=SJnIFu7ufB_k38YBCI7D1btTSq4mmE7WkUf1iOBxiGw,2573278
88
91
  phoenix/server/static/modernizr.js,sha256=mvK-XtkNqjOral-QvzoqsyOMECXIMu5BQwSVN_wcU9c,2564
89
92
  phoenix/session/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
90
93
  phoenix/session/session.py,sha256=WwGH7qStR4kAhmrRsw35BwXvCQDnEWpGm0crjyrWTvs,9519
91
- arize_phoenix-0.0.29rc8.dist-info/METADATA,sha256=vx4rnznFhvKF0LjDzgZ5_ZRpeqGeQ7dbKxAahoitZJs,10852
92
- arize_phoenix-0.0.29rc8.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87
93
- arize_phoenix-0.0.29rc8.dist-info/licenses/IP_NOTICE,sha256=JBqyyCYYxGDfzQ0TtsQgjts41IJoa-hiwDrBjCb9gHM,469
94
- arize_phoenix-0.0.29rc8.dist-info/licenses/LICENSE,sha256=HFkW9REuMOkvKRACuwLPT0hRydHb3zNg-fdFt94td18,3794
95
- arize_phoenix-0.0.29rc8.dist-info/RECORD,,
94
+ arize_phoenix-0.0.31.dist-info/METADATA,sha256=PLrtaGcA6hRk8KhAjAxHXtvLNNfIrH5HhjnPlhpeiSU,10974
95
+ arize_phoenix-0.0.31.dist-info/WHEEL,sha256=hKi7AIIx6qfnsRbr087vpeJnrVUuDokDHZacPPMW7-Y,87
96
+ arize_phoenix-0.0.31.dist-info/licenses/IP_NOTICE,sha256=JBqyyCYYxGDfzQ0TtsQgjts41IJoa-hiwDrBjCb9gHM,469
97
+ arize_phoenix-0.0.31.dist-info/licenses/LICENSE,sha256=HFkW9REuMOkvKRACuwLPT0hRydHb3zNg-fdFt94td18,3794
98
+ arize_phoenix-0.0.31.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: hatchling 1.18.0
2
+ Generator: hatchling 1.12.2
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
phoenix/__init__.py CHANGED
@@ -2,8 +2,9 @@ from .datasets.dataset import Dataset
2
2
  from .datasets.fixtures import ExampleDatasets, load_example
3
3
  from .datasets.schema import EmbeddingColumnNames, RetrievalEmbeddingColumnNames, Schema
4
4
  from .session.session import Session, active_session, close_app, launch_app
5
+ from .trace.fixtures import load_example_traces
5
6
 
6
- __version__ = "0.0.29rc8"
7
+ __version__ = "0.0.31"
7
8
 
8
9
  # module level doc-string
9
10
  __doc__ = """
@@ -32,4 +33,5 @@ __all__ = [
32
33
  "close_app",
33
34
  "launch_app",
34
35
  "Session",
36
+ "load_example_traces",
35
37
  ]
@@ -1,7 +1,10 @@
1
1
  import logging
2
+ import re
2
3
  import uuid
3
4
  from copy import deepcopy
4
- from dataclasses import fields, replace
5
+ from dataclasses import dataclass, fields, replace
6
+ from enum import Enum
7
+ from itertools import groupby
5
8
  from typing import Any, Dict, List, Optional, Set, Tuple, Union
6
9
 
7
10
  import numpy as np
@@ -25,6 +28,7 @@ from .schema import (
25
28
  SINGLE_COLUMN_SCHEMA_FIELD_NAMES,
26
29
  EmbeddingColumnNames,
27
30
  EmbeddingFeatures,
31
+ RetrievalEmbeddingColumnNames,
28
32
  Schema,
29
33
  SchemaFieldName,
30
34
  SchemaFieldValue,
@@ -121,6 +125,160 @@ class Dataset:
121
125
  schema = Schema.from_json(schema_json)
122
126
  return cls(df, schema, name)
123
127
 
128
+ @classmethod
129
+ def from_open_inference(cls, dataframe: DataFrame) -> "Dataset":
130
+ schema = Schema()
131
+ column_renaming: Dict[str, str] = {}
132
+ for group_name, group in groupby(
133
+ sorted(
134
+ map(_parse_open_inference_column_name, dataframe.columns),
135
+ key=lambda column: column.name,
136
+ ),
137
+ key=lambda column: column.name,
138
+ ):
139
+ open_inference_columns = list(group)
140
+ if group_name == "":
141
+ column_names_by_category = {
142
+ column.category: column.full_name for column in open_inference_columns
143
+ }
144
+ schema = replace(
145
+ schema,
146
+ prediction_id_column_name=column_names_by_category.get(
147
+ OpenInferenceCategory.id
148
+ ),
149
+ timestamp_column_name=column_names_by_category.get(
150
+ OpenInferenceCategory.timestamp
151
+ ),
152
+ )
153
+ continue
154
+ column_names_by_specifier = {
155
+ column.specifier: column.full_name for column in open_inference_columns
156
+ }
157
+ if group_name == "response":
158
+ response_vector_column_name = column_names_by_specifier.get(
159
+ OpenInferenceSpecifier.embedding
160
+ )
161
+ if response_vector_column_name is not None:
162
+ column_renaming[response_vector_column_name] = "response"
163
+ schema = replace(
164
+ schema,
165
+ response_column_names=EmbeddingColumnNames(
166
+ vector_column_name=column_renaming[response_vector_column_name],
167
+ raw_data_column_name=column_names_by_specifier.get(
168
+ OpenInferenceSpecifier.default
169
+ ),
170
+ ),
171
+ )
172
+ else:
173
+ response_text_column_name = column_names_by_specifier.get(
174
+ OpenInferenceSpecifier.default
175
+ )
176
+ if response_text_column_name is None:
177
+ raise ValueError(
178
+ "invalid OpenInference format: missing text column for response"
179
+ )
180
+ column_renaming[response_text_column_name] = "response"
181
+ schema = replace(
182
+ schema,
183
+ response_column_names=column_renaming[response_text_column_name],
184
+ )
185
+ elif group_name == "prompt":
186
+ prompt_vector_column_name = column_names_by_specifier.get(
187
+ OpenInferenceSpecifier.embedding
188
+ )
189
+ if prompt_vector_column_name is None:
190
+ raise ValueError(
191
+ "invalid OpenInference format: missing embedding vector column for prompt"
192
+ )
193
+ column_renaming[prompt_vector_column_name] = "prompt"
194
+ schema = replace(
195
+ schema,
196
+ prompt_column_names=RetrievalEmbeddingColumnNames(
197
+ vector_column_name=column_renaming[prompt_vector_column_name],
198
+ raw_data_column_name=column_names_by_specifier.get(
199
+ OpenInferenceSpecifier.default
200
+ ),
201
+ context_retrieval_ids_column_name=column_names_by_specifier.get(
202
+ OpenInferenceSpecifier.retrieved_document_ids
203
+ ),
204
+ context_retrieval_scores_column_name=column_names_by_specifier.get(
205
+ OpenInferenceSpecifier.retrieved_document_scores
206
+ ),
207
+ ),
208
+ )
209
+ elif OpenInferenceSpecifier.embedding in column_names_by_specifier:
210
+ vector_column_name = column_names_by_specifier[OpenInferenceSpecifier.embedding]
211
+ column_renaming[vector_column_name] = group_name
212
+ embedding_feature_column_names = schema.embedding_feature_column_names or {}
213
+ embedding_feature_column_names.update(
214
+ {
215
+ group_name: EmbeddingColumnNames(
216
+ vector_column_name=column_renaming[vector_column_name],
217
+ raw_data_column_name=column_names_by_specifier.get(
218
+ OpenInferenceSpecifier.raw_data
219
+ ),
220
+ link_to_data_column_name=column_names_by_specifier.get(
221
+ OpenInferenceSpecifier.link_to_data
222
+ ),
223
+ )
224
+ }
225
+ )
226
+ schema = replace(
227
+ schema,
228
+ embedding_feature_column_names=embedding_feature_column_names,
229
+ )
230
+ elif len(open_inference_columns) == 1:
231
+ open_inference_column = open_inference_columns[0]
232
+ raw_column_name = open_inference_column.full_name
233
+ column_renaming[raw_column_name] = open_inference_column.name
234
+ if open_inference_column.category is OpenInferenceCategory.feature:
235
+ schema = replace(
236
+ schema,
237
+ feature_column_names=(
238
+ (schema.feature_column_names or []) + [column_renaming[raw_column_name]]
239
+ ),
240
+ )
241
+ elif open_inference_column.category is OpenInferenceCategory.tag:
242
+ schema = replace(
243
+ schema,
244
+ tag_column_names=(
245
+ (schema.tag_column_names or []) + [column_renaming[raw_column_name]]
246
+ ),
247
+ )
248
+ elif open_inference_column.category is OpenInferenceCategory.prediction:
249
+ if open_inference_column.specifier is OpenInferenceSpecifier.score:
250
+ schema = replace(
251
+ schema,
252
+ prediction_score_column_name=column_renaming[raw_column_name],
253
+ )
254
+ if open_inference_column.specifier is OpenInferenceSpecifier.label:
255
+ schema = replace(
256
+ schema,
257
+ prediction_label_column_name=column_renaming[raw_column_name],
258
+ )
259
+ elif open_inference_column.category is OpenInferenceCategory.actual:
260
+ if open_inference_column.specifier is OpenInferenceSpecifier.score:
261
+ schema = replace(
262
+ schema,
263
+ actual_score_column_name=column_renaming[raw_column_name],
264
+ )
265
+ if open_inference_column.specifier is OpenInferenceSpecifier.label:
266
+ schema = replace(
267
+ schema,
268
+ actual_label_column_name=column_renaming[raw_column_name],
269
+ )
270
+ else:
271
+ raise ValueError(f"invalid OpenInference format: duplicated name `{group_name}`")
272
+
273
+ return cls(
274
+ dataframe.rename(
275
+ column_renaming,
276
+ axis=1,
277
+ copy=False,
278
+ ),
279
+ schema,
280
+ )
281
+
124
282
  def to_disc(self) -> None:
125
283
  """writes the data and schema to disc"""
126
284
  directory = DATASET_DIR / self.name
@@ -528,3 +686,48 @@ def _get_schema_from_unknown_schema_param(schemaLike: SchemaLike) -> Schema:
528
686
 
529
687
  def _add_prediction_id(num_rows: int) -> List[str]:
530
688
  return [str(uuid.uuid4()) for _ in range(num_rows)]
689
+
690
+
691
+ class OpenInferenceCategory(Enum):
692
+ id = "id"
693
+ timestamp = "timestamp"
694
+ feature = "feature"
695
+ tag = "tag"
696
+ prediction = "prediction"
697
+ actual = "actual"
698
+
699
+
700
+ class OpenInferenceSpecifier(Enum):
701
+ default = ""
702
+ score = "score"
703
+ label = "label"
704
+ embedding = "embedding"
705
+ raw_data = "raw_data"
706
+ link_to_data = "link_to_data"
707
+ retrieved_document_ids = "retrieved_document_ids"
708
+ retrieved_document_scores = "retrieved_document_scores"
709
+
710
+
711
+ @dataclass(frozen=True)
712
+ class _OpenInferenceColumnName:
713
+ full_name: str
714
+ category: OpenInferenceCategory
715
+ data_type: str
716
+ specifier: OpenInferenceSpecifier = OpenInferenceSpecifier.default
717
+ name: str = ""
718
+
719
+
720
+ def _parse_open_inference_column_name(column_name: str) -> _OpenInferenceColumnName:
721
+ pattern = (
722
+ r"^:(?P<category>\w+)\.(?P<data_type>\[\w+\]|\w+)(\.(?P<specifier>\w+))?:(?P<name>.*)?$"
723
+ )
724
+ if match := re.match(pattern, column_name):
725
+ extract = match.groupdict(default="")
726
+ return _OpenInferenceColumnName(
727
+ full_name=column_name,
728
+ category=OpenInferenceCategory(extract.get("category", "").lower()),
729
+ data_type=extract.get("data_type", "").lower(),
730
+ specifier=OpenInferenceSpecifier(extract.get("specifier", "").lower()),
731
+ name=extract.get("name", ""),
732
+ )
733
+ raise ValueError(f"Invalid format for column name: {column_name}")
File without changes
File without changes
@@ -0,0 +1,91 @@
1
+ """
2
+ Helper functions for evaluating the retrieval step of retrieval-augmented generation.
3
+ """
4
+
5
+ from typing import List, Optional
6
+
7
+ import openai
8
+ from tenacity import (
9
+ retry,
10
+ stop_after_attempt,
11
+ wait_random_exponential,
12
+ )
13
+
14
+ _EVALUATION_SYSTEM_MESSAGE = (
15
+ "You will be given a query and a reference text. "
16
+ "You must determine whether the reference text contains an answer to the input query. "
17
+ 'Your response must be single word, either "relevant" or "irrelevant", '
18
+ "and should not contain any text or characters aside from that word. "
19
+ '"irrelevant" means that the reference text does not contain an answer to the query. '
20
+ '"relevant" means the reference text contains an answer to the query.'
21
+ )
22
+ _QUERY_CONTEXT_PROMPT_TEMPLATE = """# Query: {query}
23
+
24
+ # Reference: {reference}
25
+
26
+ # Answer ("relevant" or "irrelevant"): """
27
+
28
+
29
+ def compute_precisions_at_k(
30
+ relevance_classifications: List[Optional[bool]],
31
+ ) -> List[Optional[float]]:
32
+ """Given a list of relevance classifications, computes precision@k for k = 1, 2, ..., n, where
33
+ n is the length of the input list.
34
+
35
+ Args:
36
+ relevance_classifications (List[Optional[bool]]): A list of relevance classifications for a
37
+ set of retrieved documents, sorted by order of retrieval (i.e., the first element is the
38
+ classification for the first retrieved document, the second element is the
39
+ classification for the second retrieved document, etc.). The list may contain None
40
+ values, which indicate that the relevance classification for the corresponding document
41
+ is unknown.
42
+
43
+ Returns:
44
+ List[Optional[float]]: A list of precision@k values for k = 1, 2, ..., n, where n is the
45
+ length of the input list. The first element is the precision@1 value, the second element
46
+ is the precision@2 value, etc. If the input list contains any None values, those values
47
+ are omitted when computing the precision@k values.
48
+ """
49
+ precisions_at_k = []
50
+ num_relevant_classifications = 0
51
+ num_non_none_classifications = 0
52
+ for relevance_classification in relevance_classifications:
53
+ if isinstance(relevance_classification, bool):
54
+ num_non_none_classifications += 1
55
+ num_relevant_classifications += int(relevance_classification)
56
+ precisions_at_k.append(
57
+ num_relevant_classifications / num_non_none_classifications
58
+ if num_non_none_classifications > 0
59
+ else None
60
+ )
61
+ return precisions_at_k
62
+
63
+
64
+ @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
65
+ def classify_relevance(query: str, document: str, model_name: str) -> Optional[bool]:
66
+ """Given a query and a document, determines whether the document contains an answer to the
67
+ query.
68
+
69
+ Args:
70
+ query (str): The query text. document (str): The document text. model_name (str): The name
71
+ of the OpenAI API model to use for the classification.
72
+
73
+ Returns:
74
+ Optional[bool]: A boolean indicating whether the document contains an answer to the query
75
+ (True meaning relevant, False meaning irrelevant), or None if the LLM produces an
76
+ unparseable output.
77
+ """
78
+ prompt = _QUERY_CONTEXT_PROMPT_TEMPLATE.format(
79
+ query=query,
80
+ reference=document,
81
+ )
82
+ response = openai.ChatCompletion.create(
83
+ messages=[
84
+ {"role": "system", "content": _EVALUATION_SYSTEM_MESSAGE},
85
+ {"role": "user", "content": prompt},
86
+ ],
87
+ model=model_name,
88
+ )
89
+ raw_response_text = str(response["choices"][0]["message"]["content"]).strip()
90
+ relevance_classification = {"relevant": True, "irrelevant": False}.get(raw_response_text)
91
+ return relevance_classification