arize-phoenix 3.0.2__py3-none-any.whl → 3.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

@@ -477,6 +477,7 @@ def launch_app(
477
477
  f"port {port} is not occupied by another process) or file an issue "
478
478
  f"with us at https://github.com/Arize-ai/phoenix"
479
479
  )
480
+ _session = None
480
481
  return None
481
482
 
482
483
  print(f"🌍 To view the Phoenix app in your browser, visit {_session.url}")
@@ -489,7 +490,9 @@ def active_session() -> Optional[Session]:
489
490
  """
490
491
  Returns the active session if one exists, otherwise returns None
491
492
  """
492
- return _session
493
+ if _session and _session.active:
494
+ return _session
495
+ return None
493
496
 
494
497
 
495
498
  def close_app() -> None:
@@ -1,4 +1,5 @@
1
1
  import ast
2
+ import inspect
2
3
  import sys
3
4
  from dataclasses import dataclass, field
4
5
  from difflib import SequenceMatcher
@@ -15,10 +16,10 @@ from typing import (
15
16
  cast,
16
17
  )
17
18
 
19
+ from openinference.semconv import trace
18
20
  from typing_extensions import TypeGuard
19
21
 
20
22
  import phoenix.trace.v1 as pb
21
- from phoenix.trace import semantic_conventions
22
23
  from phoenix.trace.dsl.missing import MISSING
23
24
  from phoenix.trace.schemas import COMPUTED_PREFIX, ComputedAttributes, Span, SpanID
24
25
 
@@ -137,9 +138,11 @@ def _allowed_replacements() -> Iterator[Tuple[str, ast.expr]]:
137
138
  yield "span.context." + source_segment, ast_replacement
138
139
 
139
140
  for field_name in (
140
- getattr(semantic_conventions, variable_name)
141
- for variable_name in dir(semantic_conventions)
142
- if variable_name.isupper()
141
+ getattr(klass, attr)
142
+ for name in dir(trace)
143
+ if name.endswith("Attributes") and inspect.isclass(klass := getattr(trace, name))
144
+ for attr in dir(klass)
145
+ if attr.isupper()
143
146
  ):
144
147
  source_segment = field_name
145
148
  ast_replacement = _ast_replacement(f"span.attributes.get('{field_name}')")
@@ -163,8 +166,14 @@ class _Translator(ast.NodeTransformer):
163
166
  # In Python 3.9+, we can use `ast.unparse(node)` (no need for `source`).
164
167
  self._source = source
165
168
 
169
+ def visit_Subscript(self, node: ast.Subscript) -> Any:
170
+ if _is_metadata(node) and (key := _get_subscript_key(node)):
171
+ return _ast_metadata_subscript(key)
172
+ source_segment: str = cast(str, ast.get_source_segment(self._source, node))
173
+ raise SyntaxError(f"invalid expression: {source_segment}") # TODO: add details
174
+
166
175
  def visit_Attribute(self, node: ast.Attribute) -> Any:
167
- if _is_eval(node.value) and (eval_name := _get_eval_name(node.value)):
176
+ if _is_eval(node.value) and (eval_name := _get_subscript_key(node.value)):
168
177
  # e.g. `evals["name"].score`
169
178
  return _ast_evaluation_result_value(eval_name, node.attr)
170
179
  source_segment: str = cast(str, ast.get_source_segment(self._source, node))
@@ -206,9 +215,11 @@ def _validate_expression(
206
215
  if i == 0:
207
216
  if isinstance(node, (ast.BoolOp, ast.Compare)):
208
217
  continue
218
+ elif _is_metadata(node):
219
+ continue
209
220
  elif _is_eval(node):
210
221
  # e.g. `evals["name"]`
211
- if not (eval_name := _get_eval_name(node)) or (
222
+ if not (eval_name := _get_subscript_key(node)) or (
212
223
  valid_eval_names is not None and eval_name not in valid_eval_names
213
224
  ):
214
225
  source_segment = cast(str, ast.get_source_segment(source, node))
@@ -293,6 +304,19 @@ def _ast_evaluation_result_value(name: str, attr: str) -> ast.expr:
293
304
  return ast.parse(source, mode="eval").body
294
305
 
295
306
 
307
+ def _ast_metadata_subscript(key: str) -> ast.expr:
308
+ source = (
309
+ f"_MISSING if ("
310
+ f" _MD := span.attributes.get('metadata')"
311
+ f") is None else ("
312
+ f" _MISSING if not hasattr(_MD, 'get') or ("
313
+ f" _VALUE := _MD.get('{key}')"
314
+ f" ) is None else _VALUE"
315
+ f")"
316
+ )
317
+ return ast.parse(source, mode="eval").body
318
+
319
+
296
320
  def _is_eval(node: Any) -> TypeGuard[ast.Subscript]:
297
321
  # e.g. `evals["name"]`
298
322
  return (
@@ -302,7 +326,16 @@ def _is_eval(node: Any) -> TypeGuard[ast.Subscript]:
302
326
  )
303
327
 
304
328
 
305
- def _get_eval_name(node: ast.Subscript) -> Optional[str]:
329
+ def _is_metadata(node: Any) -> TypeGuard[ast.Subscript]:
330
+ # e.g. `metadata["name"]`
331
+ return (
332
+ isinstance(node, ast.Subscript)
333
+ and isinstance(value := node.value, ast.Name)
334
+ and value.id == "metadata"
335
+ )
336
+
337
+
338
+ def _get_subscript_key(node: ast.Subscript) -> Optional[str]:
306
339
  if sys.version_info < (3, 9):
307
340
  # Note that `ast.Index` is deprecated in Python 3.9+, but is necessary
308
341
  # for Python 3.8 as part of `ast.Subscript`.
@@ -1,15 +1,15 @@
1
1
  from typing import List, Optional, Protocol, Union, cast
2
2
 
3
3
  import pandas as pd
4
+ from openinference.semconv.trace import DocumentAttributes, SpanAttributes
4
5
 
5
6
  from phoenix.trace.dsl import SpanQuery
6
- from phoenix.trace.semantic_conventions import (
7
- DOCUMENT_CONTENT,
8
- DOCUMENT_SCORE,
9
- INPUT_VALUE,
10
- OUTPUT_VALUE,
11
- RETRIEVAL_DOCUMENTS,
12
- )
7
+
8
+ DOCUMENT_CONTENT = DocumentAttributes.DOCUMENT_CONTENT
9
+ DOCUMENT_SCORE = DocumentAttributes.DOCUMENT_SCORE
10
+ INPUT_VALUE = SpanAttributes.INPUT_VALUE
11
+ OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE
12
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
13
13
 
14
14
  INPUT = {"input": INPUT_VALUE}
15
15
  OUTPUT = {"output": OUTPUT_VALUE}
@@ -19,13 +19,15 @@ from typing import (
19
19
  )
20
20
 
21
21
  import pandas as pd
22
+ from openinference.semconv.trace import SpanAttributes
22
23
 
23
24
  from phoenix.trace.dsl import SpanFilter
24
25
  from phoenix.trace.dsl.filter import SupportsGetSpanEvaluation
25
26
  from phoenix.trace.schemas import ATTRIBUTE_PREFIX, CONTEXT_PREFIX, Span
26
- from phoenix.trace.semantic_conventions import RETRIEVAL_DOCUMENTS
27
27
  from phoenix.trace.span_json_encoder import span_to_json
28
28
 
29
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
30
+
29
31
  _SPAN_ID = "context.span_id"
30
32
  _PRESCRIBED_POSITION_PREFIXES = {
31
33
  RETRIEVAL_DOCUMENTS: "document_",
phoenix/trace/errors.py CHANGED
@@ -3,3 +3,7 @@ from phoenix.exceptions import PhoenixException
3
3
 
4
4
  class InvalidParquetMetadataError(PhoenixException):
5
5
  pass
6
+
7
+
8
+ class IncompatibleLibraryVersionError(PhoenixException):
9
+ pass
phoenix/trace/fixtures.py CHANGED
@@ -137,8 +137,6 @@ def _download_traces_fixture(
137
137
  def load_example_traces(use_case: str) -> TraceDataset:
138
138
  """
139
139
  Loads a trace dataframe by name.
140
-
141
- NB: this functionality is under active construction.
142
140
  """
143
141
  fixture = _get_trace_fixture_by_name(use_case)
144
142
  return TraceDataset(json_lines_to_df(_download_traces_fixture(fixture)))
@@ -1,4 +1,3 @@
1
1
  from .callback import OpenInferenceTraceCallbackHandler
2
- from .debug_callback import DebugCallbackHandler
3
2
 
4
- __all__ = ["OpenInferenceTraceCallbackHandler", "DebugCallbackHandler"]
3
+ __all__ = ["OpenInferenceTraceCallbackHandler"]
@@ -1,25 +1,64 @@
1
1
  import logging
2
- from importlib.metadata import PackageNotFoundError
2
+ from importlib.metadata import PackageNotFoundError, version
3
3
  from importlib.util import find_spec
4
- from typing import (
5
- Any,
6
- )
7
-
8
- from openinference.instrumentation.llama_index._callback import (
9
- OpenInferenceTraceCallbackHandler as _OpenInferenceTraceCallbackHandler,
10
- )
11
- from openinference.instrumentation.llama_index.version import (
12
- __version__,
13
- )
4
+ from typing import Any
5
+
14
6
  from opentelemetry import trace as trace_api
15
7
  from opentelemetry.sdk import trace as trace_sdk
16
8
  from opentelemetry.sdk.trace.export import SimpleSpanProcessor
17
9
 
10
+ from phoenix.trace.errors import IncompatibleLibraryVersionError
18
11
  from phoenix.trace.exporter import _OpenInferenceExporter
19
12
  from phoenix.trace.tracer import _show_deprecation_warnings
20
13
 
21
14
  logger = logging.getLogger(__name__)
22
15
 
16
+ LLAMA_INDEX_MODERN_VERSION = (0, 10, 0)
17
+ INSTRUMENTATION_MODERN_VERSION = (1, 0, 0)
18
+
19
+
20
+ def _check_instrumentation_compatibility() -> bool:
21
+ if find_spec("llama_index") is None:
22
+ raise PackageNotFoundError("Missing `llama-index`. Install with `pip install llama-index`.")
23
+ # split the version string into a tuple of integers
24
+ llama_index_version_str = version("llama-index")
25
+ llama_index_version = tuple(map(int, llama_index_version_str.split(".")[:3]))
26
+ instrumentation_version_str = version("openinference-instrumentation-llama-index")
27
+ instrumentation_version = tuple(map(int, instrumentation_version_str.split(".")[:3]))
28
+ # check if the llama_index version is compatible with the instrumentation version
29
+ if (
30
+ llama_index_version < LLAMA_INDEX_MODERN_VERSION
31
+ and instrumentation_version >= INSTRUMENTATION_MODERN_VERSION
32
+ ):
33
+ raise IncompatibleLibraryVersionError(
34
+ f"llama-index v{llama_index_version_str} is not compatible with "
35
+ f"openinference-instrumentation-llama-index v{instrumentation_version_str}."
36
+ "Please either migrate llama-index to at least 0.10.0 or downgrade "
37
+ "openinference-instrumentation-llama-index via "
38
+ "`pip install 'openinference-instrumentation-llama-index<1.0.0'`."
39
+ )
40
+ elif (
41
+ llama_index_version >= LLAMA_INDEX_MODERN_VERSION
42
+ and instrumentation_version < INSTRUMENTATION_MODERN_VERSION
43
+ ):
44
+ raise IncompatibleLibraryVersionError(
45
+ f"llama-index v{llama_index_version_str} is not compatible with "
46
+ f"openinference-instrumentation-llama-index v{instrumentation_version_str}."
47
+ "Please upgrade openinference-instrumentation-llama-index to at least 1.0.0"
48
+ "`pip install 'openinference-instrumentation-llama-index>=1.0.0'`."
49
+ )
50
+ # if the versions are compatible, return True
51
+ return True
52
+
53
+
54
+ if _check_instrumentation_compatibility():
55
+ from openinference.instrumentation.llama_index._callback import (
56
+ OpenInferenceTraceCallbackHandler as _OpenInferenceTraceCallbackHandler,
57
+ )
58
+ from openinference.instrumentation.llama_index.version import (
59
+ __version__,
60
+ )
61
+
23
62
 
24
63
  class OpenInferenceTraceCallbackHandler(_OpenInferenceTraceCallbackHandler):
25
64
  """Callback handler for storing LLM application trace data in OpenInference format.
@@ -33,10 +72,6 @@ class OpenInferenceTraceCallbackHandler(_OpenInferenceTraceCallbackHandler):
33
72
 
34
73
  def __init__(self, *args: Any, **kwargs: Any) -> None:
35
74
  _show_deprecation_warnings(self, *args, **kwargs)
36
- if find_spec("llama_index") is None:
37
- raise PackageNotFoundError(
38
- "Missing `llama-index`. Install with `pip install llama-index`."
39
- )
40
75
  tracer_provider = trace_sdk.TracerProvider()
41
76
  tracer_provider.add_span_processor(SimpleSpanProcessor(_OpenInferenceExporter()))
42
77
  super().__init__(trace_api.get_tracer(__name__, __version__, tracer_provider))
phoenix/trace/otel.py CHANGED
@@ -1,3 +1,4 @@
1
+ import inspect
1
2
  import json
2
3
  from binascii import hexlify, unhexlify
3
4
  from datetime import datetime, timezone
@@ -21,12 +22,23 @@ from typing import (
21
22
 
22
23
  import numpy as np
23
24
  import opentelemetry.proto.trace.v1.trace_pb2 as otlp
25
+ from openinference.semconv import trace
26
+ from openinference.semconv.trace import (
27
+ DocumentAttributes,
28
+ EmbeddingAttributes,
29
+ MessageAttributes,
30
+ SpanAttributes,
31
+ ToolCallAttributes,
32
+ )
24
33
  from opentelemetry.proto.common.v1.common_pb2 import AnyValue, ArrayValue, KeyValue
25
34
  from opentelemetry.util.types import Attributes, AttributeValue
26
35
  from typing_extensions import TypeAlias, assert_never
27
36
 
28
- import phoenix.trace.semantic_conventions as sem_conv
29
37
  from phoenix.trace.schemas import (
38
+ EXCEPTION_ESCAPED,
39
+ EXCEPTION_MESSAGE,
40
+ EXCEPTION_STACKTRACE,
41
+ EXCEPTION_TYPE,
30
42
  MimeType,
31
43
  Span,
32
44
  SpanContext,
@@ -37,18 +49,38 @@ from phoenix.trace.schemas import (
37
49
  SpanStatusCode,
38
50
  TraceID,
39
51
  )
40
- from phoenix.trace.semantic_conventions import (
41
- DOCUMENT_METADATA,
42
- EXCEPTION_ESCAPED,
43
- EXCEPTION_MESSAGE,
44
- EXCEPTION_STACKTRACE,
45
- EXCEPTION_TYPE,
46
- INPUT_MIME_TYPE,
47
- LLM_PROMPT_TEMPLATE_VARIABLES,
48
- OPENINFERENCE_SPAN_KIND,
49
- OUTPUT_MIME_TYPE,
50
- TOOL_PARAMETERS,
51
- )
52
+
53
+ DOCUMENT_CONTENT = DocumentAttributes.DOCUMENT_CONTENT
54
+ DOCUMENT_ID = DocumentAttributes.DOCUMENT_ID
55
+ DOCUMENT_METADATA = DocumentAttributes.DOCUMENT_METADATA
56
+ EMBEDDING_EMBEDDINGS = SpanAttributes.EMBEDDING_EMBEDDINGS
57
+ EMBEDDING_MODEL_NAME = SpanAttributes.EMBEDDING_MODEL_NAME
58
+ EMBEDDING_TEXT = EmbeddingAttributes.EMBEDDING_TEXT
59
+ EMBEDDING_VECTOR = EmbeddingAttributes.EMBEDDING_VECTOR
60
+ INPUT_MIME_TYPE = SpanAttributes.INPUT_MIME_TYPE
61
+ INPUT_VALUE = SpanAttributes.INPUT_VALUE
62
+ LLM_INPUT_MESSAGES = SpanAttributes.LLM_INPUT_MESSAGES
63
+ LLM_INVOCATION_PARAMETERS = SpanAttributes.LLM_INVOCATION_PARAMETERS
64
+ LLM_MODEL_NAME = SpanAttributes.LLM_MODEL_NAME
65
+ LLM_OUTPUT_MESSAGES = SpanAttributes.LLM_OUTPUT_MESSAGES
66
+ LLM_PROMPTS = SpanAttributes.LLM_PROMPTS
67
+ LLM_TOKEN_COUNT_COMPLETION = SpanAttributes.LLM_TOKEN_COUNT_COMPLETION
68
+ LLM_TOKEN_COUNT_PROMPT = SpanAttributes.LLM_TOKEN_COUNT_PROMPT
69
+ LLM_TOKEN_COUNT_TOTAL = SpanAttributes.LLM_TOKEN_COUNT_TOTAL
70
+ MESSAGE_CONTENT = MessageAttributes.MESSAGE_CONTENT
71
+ MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON = MessageAttributes.MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON
72
+ MESSAGE_FUNCTION_CALL_NAME = MessageAttributes.MESSAGE_FUNCTION_CALL_NAME
73
+ MESSAGE_ROLE = MessageAttributes.MESSAGE_ROLE
74
+ MESSAGE_TOOL_CALLS = MessageAttributes.MESSAGE_TOOL_CALLS
75
+ OPENINFERENCE_SPAN_KIND = SpanAttributes.OPENINFERENCE_SPAN_KIND
76
+ OUTPUT_MIME_TYPE = SpanAttributes.OUTPUT_MIME_TYPE
77
+ OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE
78
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
79
+ TOOL_CALL_FUNCTION_ARGUMENTS_JSON = ToolCallAttributes.TOOL_CALL_FUNCTION_ARGUMENTS_JSON
80
+ TOOL_CALL_FUNCTION_NAME = ToolCallAttributes.TOOL_CALL_FUNCTION_NAME
81
+ TOOL_PARAMETERS = SpanAttributes.TOOL_PARAMETERS
82
+ LLM_PROMPT_TEMPLATE = SpanAttributes.LLM_PROMPT_TEMPLATE
83
+ LLM_PROMPT_TEMPLATE_VARIABLES = SpanAttributes.LLM_PROMPT_TEMPLATE_VARIABLES
52
84
 
53
85
 
54
86
  def decode(otlp_span: otlp.Span) -> Span:
@@ -186,7 +218,13 @@ def _decode_status(otlp_status: otlp.Status) -> Tuple[SpanStatusCode, StatusMess
186
218
 
187
219
 
188
220
  _SEMANTIC_CONVENTIONS: List[str] = sorted(
189
- (getattr(sem_conv, name) for name in dir(sem_conv) if name.isupper()),
221
+ (
222
+ getattr(klass, attr)
223
+ for name in dir(trace)
224
+ if name.endswith("Attributes") and inspect.isclass(klass := getattr(trace, name))
225
+ for attr in dir(klass)
226
+ if attr.isupper()
227
+ ),
190
228
  reverse=True,
191
229
  ) # sorted so the longer strings go first
192
230
 
phoenix/trace/schemas.py CHANGED
@@ -4,12 +4,10 @@ from enum import Enum
4
4
  from typing import Any, Dict, List, Optional, Union
5
5
  from uuid import UUID
6
6
 
7
- from phoenix.trace.semantic_conventions import (
8
- EXCEPTION_ESCAPED,
9
- EXCEPTION_MESSAGE,
10
- EXCEPTION_STACKTRACE,
11
- EXCEPTION_TYPE,
12
- )
7
+ EXCEPTION_TYPE = "exception.type"
8
+ EXCEPTION_MESSAGE = "exception.message"
9
+ EXCEPTION_ESCAPED = "exception.escaped"
10
+ EXCEPTION_STACKTRACE = "exception.stacktrace"
13
11
 
14
12
 
15
13
  class SpanStatusCode(Enum):
@@ -2,7 +2,10 @@ import json
2
2
  from datetime import datetime
3
3
  from typing import Any, Dict, Optional
4
4
 
5
+ from openinference.semconv.trace import SpanAttributes
6
+
5
7
  from phoenix.trace.schemas import (
8
+ EXCEPTION_MESSAGE,
6
9
  MimeType,
7
10
  Span,
8
11
  SpanContext,
@@ -14,11 +17,9 @@ from phoenix.trace.schemas import (
14
17
  SpanStatusCode,
15
18
  TraceID,
16
19
  )
17
- from phoenix.trace.semantic_conventions import (
18
- EXCEPTION_MESSAGE,
19
- INPUT_MIME_TYPE,
20
- OUTPUT_MIME_TYPE,
21
- )
20
+
21
+ INPUT_MIME_TYPE = SpanAttributes.INPUT_MIME_TYPE
22
+ OUTPUT_MIME_TYPE = SpanAttributes.OUTPUT_MIME_TYPE
22
23
 
23
24
 
24
25
  def json_to_attributes(obj: Optional[Dict[str, Any]]) -> Dict[str, Any]:
@@ -5,12 +5,7 @@ from enum import Enum
5
5
  from typing import Any, List
6
6
  from uuid import UUID
7
7
 
8
- from .schemas import (
9
- Span,
10
- SpanContext,
11
- SpanConversationAttributes,
12
- SpanEvent,
13
- )
8
+ from phoenix.trace.schemas import Span, SpanContext, SpanConversationAttributes, SpanEvent
14
9
 
15
10
 
16
11
  class SpanJSONEncoder(json.JSONEncoder):
@@ -6,23 +6,26 @@ from uuid import UUID, uuid4
6
6
  from warnings import warn
7
7
 
8
8
  import pandas as pd
9
+ from openinference.semconv.trace import (
10
+ DocumentAttributes,
11
+ RerankerAttributes,
12
+ SpanAttributes,
13
+ )
9
14
  from pandas import DataFrame, read_parquet
10
15
  from pyarrow import Schema, Table, parquet
11
16
 
17
+ from phoenix.config import DATASET_DIR, GENERATED_DATASET_NAME_PREFIX, TRACE_DATASET_DIR
12
18
  from phoenix.datetime_utils import normalize_timestamps
13
19
  from phoenix.trace.errors import InvalidParquetMetadataError
20
+ from phoenix.trace.schemas import ATTRIBUTE_PREFIX, CONTEXT_PREFIX, Span
21
+ from phoenix.trace.span_evaluations import Evaluations, SpanEvaluations
22
+ from phoenix.trace.span_json_decoder import json_to_span
23
+ from phoenix.trace.span_json_encoder import span_to_json
14
24
 
15
- from ..config import DATASET_DIR, GENERATED_DATASET_NAME_PREFIX, TRACE_DATASET_DIR
16
- from .schemas import ATTRIBUTE_PREFIX, CONTEXT_PREFIX, Span
17
- from .semantic_conventions import (
18
- DOCUMENT_METADATA,
19
- RERANKER_INPUT_DOCUMENTS,
20
- RERANKER_OUTPUT_DOCUMENTS,
21
- RETRIEVAL_DOCUMENTS,
22
- )
23
- from .span_evaluations import Evaluations, SpanEvaluations
24
- from .span_json_decoder import json_to_span
25
- from .span_json_encoder import span_to_json
25
+ DOCUMENT_METADATA = DocumentAttributes.DOCUMENT_METADATA
26
+ RERANKER_INPUT_DOCUMENTS = RerankerAttributes.RERANKER_INPUT_DOCUMENTS
27
+ RERANKER_OUTPUT_DOCUMENTS = RerankerAttributes.RERANKER_OUTPUT_DOCUMENTS
28
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
26
29
 
27
30
  # A set of columns that is required
28
31
  REQUIRED_COLUMNS = [
@@ -98,9 +101,12 @@ class TraceDataset:
98
101
  """
99
102
 
100
103
  name: str
104
+ """
105
+ A human readable name for the dataset.
106
+ """
101
107
  dataframe: pd.DataFrame
102
108
  evaluations: List[Evaluations] = []
103
- _id: UUID = uuid4()
109
+ _id: UUID
104
110
  _data_file_name: str = "data.parquet"
105
111
 
106
112
  def __init__(
@@ -128,8 +134,10 @@ class TraceDataset:
128
134
  raise ValueError(
129
135
  f"The dataframe is missing some required columns: {', '.join(missing_columns)}"
130
136
  )
137
+ self._id = uuid4()
131
138
  self.dataframe = normalize_dataframe(dataframe)
132
- self.name = name or f"{GENERATED_DATASET_NAME_PREFIX}{str(uuid4())}"
139
+ # TODO: This is not used in any meaningful way. Should remove
140
+ self.name = name or f"{GENERATED_DATASET_NAME_PREFIX}{str(self._id)}"
133
141
  self.evaluations = list(evaluations)
134
142
 
135
143
  @classmethod
@@ -246,6 +254,8 @@ class TraceDataset:
246
254
  }
247
255
  )
248
256
  parquet.write_table(table, path)
257
+ print(f"💾 Trace dataset saved to under ID: {self._id}")
258
+ print(f"📂 Trace dataset path: {path}")
249
259
  return self._id
250
260
 
251
261
  @classmethod
@@ -285,7 +295,7 @@ class TraceDataset:
285
295
  warn(f'Failed to load evaluations with id: "{eval_id}"')
286
296
  table = parquet.read_table(path)
287
297
  dataframe = table.to_pandas()
288
- ds = cls(dataframe, dataset_name, evaluations)
298
+ ds = cls(dataframe=dataframe, name=dataset_name, evaluations=evaluations)
289
299
  ds._id = dataset_id
290
300
  return ds
291
301
 
phoenix/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "3.0.2"
1
+ __version__ = "3.1.0"
@@ -1,50 +0,0 @@
1
- import logging
2
- from typing import Any, Dict, List, Optional
3
-
4
- from llama_index.callbacks.base_handler import BaseCallbackHandler
5
- from llama_index.callbacks.schema import CBEventType
6
-
7
- logger = logging.getLogger(__name__)
8
- logger.addHandler(logging.NullHandler())
9
-
10
- CBEventID = str
11
-
12
-
13
- class DebugCallbackHandler(BaseCallbackHandler):
14
- def _print_event(self, payload: Dict[Any, Any]) -> None:
15
- for k, v in payload.items():
16
- print(f"**{k}: **\n{v}")
17
- print("*" * 50)
18
-
19
- def __init__(self) -> None:
20
- super().__init__(event_starts_to_ignore=[], event_ends_to_ignore=[])
21
-
22
- def on_event_start(
23
- self,
24
- event_type: CBEventType,
25
- payload: Optional[Dict[str, Any]] = None,
26
- event_id: CBEventID = "",
27
- parent_id: CBEventID = "",
28
- **kwargs: Any,
29
- ) -> CBEventID:
30
- return event_id
31
-
32
- def on_event_end(
33
- self,
34
- event_type: CBEventType,
35
- payload: Optional[Dict[str, Any]] = None,
36
- event_id: CBEventID = "",
37
- **kwargs: Any,
38
- ) -> None:
39
- if payload is not None:
40
- self._print_event(payload)
41
-
42
- def start_trace(self, trace_id: Optional[str] = None) -> None:
43
- return
44
-
45
- def end_trace(
46
- self,
47
- trace_id: Optional[str] = None,
48
- trace_map: Optional[Dict[str, List[str]]] = None,
49
- ) -> None:
50
- return