arize-phoenix 5.5.2__py3-none-any.whl → 5.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (172) hide show
  1. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.6.0.dist-info}/METADATA +3 -6
  2. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.6.0.dist-info}/RECORD +171 -171
  3. phoenix/config.py +8 -8
  4. phoenix/core/model.py +3 -3
  5. phoenix/core/model_schema.py +41 -50
  6. phoenix/core/model_schema_adapter.py +17 -16
  7. phoenix/datetime_utils.py +2 -2
  8. phoenix/db/bulk_inserter.py +10 -20
  9. phoenix/db/engines.py +2 -1
  10. phoenix/db/enums.py +2 -2
  11. phoenix/db/helpers.py +8 -7
  12. phoenix/db/insertion/dataset.py +9 -19
  13. phoenix/db/insertion/document_annotation.py +14 -13
  14. phoenix/db/insertion/helpers.py +6 -16
  15. phoenix/db/insertion/span_annotation.py +14 -13
  16. phoenix/db/insertion/trace_annotation.py +14 -13
  17. phoenix/db/insertion/types.py +19 -30
  18. phoenix/db/migrations/versions/3be8647b87d8_add_token_columns_to_spans_table.py +8 -8
  19. phoenix/db/models.py +28 -28
  20. phoenix/experiments/evaluators/base.py +2 -1
  21. phoenix/experiments/evaluators/code_evaluators.py +4 -5
  22. phoenix/experiments/evaluators/llm_evaluators.py +157 -4
  23. phoenix/experiments/evaluators/utils.py +3 -2
  24. phoenix/experiments/functions.py +10 -21
  25. phoenix/experiments/tracing.py +2 -1
  26. phoenix/experiments/types.py +20 -29
  27. phoenix/experiments/utils.py +2 -1
  28. phoenix/inferences/errors.py +6 -5
  29. phoenix/inferences/fixtures.py +6 -5
  30. phoenix/inferences/inferences.py +37 -37
  31. phoenix/inferences/schema.py +11 -10
  32. phoenix/inferences/validation.py +13 -14
  33. phoenix/logging/_formatter.py +3 -3
  34. phoenix/metrics/__init__.py +5 -4
  35. phoenix/metrics/binning.py +2 -1
  36. phoenix/metrics/metrics.py +2 -1
  37. phoenix/metrics/mixins.py +7 -6
  38. phoenix/metrics/retrieval_metrics.py +2 -1
  39. phoenix/metrics/timeseries.py +5 -4
  40. phoenix/metrics/wrappers.py +2 -2
  41. phoenix/pointcloud/clustering.py +3 -4
  42. phoenix/pointcloud/pointcloud.py +7 -5
  43. phoenix/pointcloud/umap_parameters.py +2 -1
  44. phoenix/server/api/dataloaders/annotation_summaries.py +12 -19
  45. phoenix/server/api/dataloaders/average_experiment_run_latency.py +2 -2
  46. phoenix/server/api/dataloaders/cache/two_tier_cache.py +3 -2
  47. phoenix/server/api/dataloaders/dataset_example_revisions.py +3 -8
  48. phoenix/server/api/dataloaders/dataset_example_spans.py +2 -5
  49. phoenix/server/api/dataloaders/document_evaluation_summaries.py +12 -18
  50. phoenix/server/api/dataloaders/document_evaluations.py +3 -7
  51. phoenix/server/api/dataloaders/document_retrieval_metrics.py +6 -13
  52. phoenix/server/api/dataloaders/experiment_annotation_summaries.py +4 -8
  53. phoenix/server/api/dataloaders/experiment_error_rates.py +2 -5
  54. phoenix/server/api/dataloaders/experiment_run_annotations.py +3 -7
  55. phoenix/server/api/dataloaders/experiment_run_counts.py +1 -5
  56. phoenix/server/api/dataloaders/experiment_sequence_number.py +2 -5
  57. phoenix/server/api/dataloaders/latency_ms_quantile.py +21 -30
  58. phoenix/server/api/dataloaders/min_start_or_max_end_times.py +7 -13
  59. phoenix/server/api/dataloaders/project_by_name.py +3 -3
  60. phoenix/server/api/dataloaders/record_counts.py +11 -18
  61. phoenix/server/api/dataloaders/span_annotations.py +3 -7
  62. phoenix/server/api/dataloaders/span_dataset_examples.py +3 -8
  63. phoenix/server/api/dataloaders/span_descendants.py +3 -7
  64. phoenix/server/api/dataloaders/span_projects.py +2 -2
  65. phoenix/server/api/dataloaders/token_counts.py +12 -19
  66. phoenix/server/api/dataloaders/trace_row_ids.py +3 -7
  67. phoenix/server/api/dataloaders/user_roles.py +3 -3
  68. phoenix/server/api/dataloaders/users.py +3 -3
  69. phoenix/server/api/helpers/__init__.py +4 -3
  70. phoenix/server/api/helpers/dataset_helpers.py +10 -9
  71. phoenix/server/api/input_types/AddExamplesToDatasetInput.py +2 -2
  72. phoenix/server/api/input_types/AddSpansToDatasetInput.py +2 -2
  73. phoenix/server/api/input_types/ChatCompletionMessageInput.py +13 -1
  74. phoenix/server/api/input_types/ClusterInput.py +2 -2
  75. phoenix/server/api/input_types/DeleteAnnotationsInput.py +1 -3
  76. phoenix/server/api/input_types/DeleteDatasetExamplesInput.py +2 -2
  77. phoenix/server/api/input_types/DeleteExperimentsInput.py +1 -3
  78. phoenix/server/api/input_types/DimensionFilter.py +4 -4
  79. phoenix/server/api/input_types/Granularity.py +1 -1
  80. phoenix/server/api/input_types/InvocationParameters.py +2 -2
  81. phoenix/server/api/input_types/PatchDatasetExamplesInput.py +2 -2
  82. phoenix/server/api/mutations/dataset_mutations.py +4 -4
  83. phoenix/server/api/mutations/experiment_mutations.py +1 -2
  84. phoenix/server/api/mutations/export_events_mutations.py +7 -7
  85. phoenix/server/api/mutations/span_annotations_mutations.py +4 -4
  86. phoenix/server/api/mutations/trace_annotations_mutations.py +4 -4
  87. phoenix/server/api/mutations/user_mutations.py +4 -4
  88. phoenix/server/api/openapi/schema.py +2 -2
  89. phoenix/server/api/queries.py +20 -20
  90. phoenix/server/api/routers/oauth2.py +4 -4
  91. phoenix/server/api/routers/v1/datasets.py +22 -36
  92. phoenix/server/api/routers/v1/evaluations.py +6 -5
  93. phoenix/server/api/routers/v1/experiment_evaluations.py +2 -2
  94. phoenix/server/api/routers/v1/experiment_runs.py +2 -2
  95. phoenix/server/api/routers/v1/experiments.py +4 -4
  96. phoenix/server/api/routers/v1/spans.py +13 -12
  97. phoenix/server/api/routers/v1/traces.py +5 -5
  98. phoenix/server/api/routers/v1/utils.py +5 -5
  99. phoenix/server/api/subscriptions.py +284 -162
  100. phoenix/server/api/types/AnnotationSummary.py +3 -3
  101. phoenix/server/api/types/Cluster.py +8 -7
  102. phoenix/server/api/types/Dataset.py +5 -4
  103. phoenix/server/api/types/Dimension.py +3 -3
  104. phoenix/server/api/types/DocumentEvaluationSummary.py +8 -7
  105. phoenix/server/api/types/EmbeddingDimension.py +6 -5
  106. phoenix/server/api/types/EvaluationSummary.py +3 -3
  107. phoenix/server/api/types/Event.py +7 -7
  108. phoenix/server/api/types/Experiment.py +3 -3
  109. phoenix/server/api/types/ExperimentComparison.py +2 -4
  110. phoenix/server/api/types/Inferences.py +9 -8
  111. phoenix/server/api/types/InferencesRole.py +2 -2
  112. phoenix/server/api/types/Model.py +2 -2
  113. phoenix/server/api/types/Project.py +11 -18
  114. phoenix/server/api/types/Segments.py +3 -3
  115. phoenix/server/api/types/Span.py +8 -7
  116. phoenix/server/api/types/TimeSeries.py +8 -7
  117. phoenix/server/api/types/Trace.py +2 -2
  118. phoenix/server/api/types/UMAPPoints.py +6 -6
  119. phoenix/server/api/types/User.py +3 -3
  120. phoenix/server/api/types/node.py +1 -3
  121. phoenix/server/api/types/pagination.py +4 -4
  122. phoenix/server/api/utils.py +2 -4
  123. phoenix/server/app.py +16 -25
  124. phoenix/server/bearer_auth.py +4 -10
  125. phoenix/server/dml_event.py +3 -3
  126. phoenix/server/dml_event_handler.py +10 -24
  127. phoenix/server/grpc_server.py +3 -2
  128. phoenix/server/jwt_store.py +22 -21
  129. phoenix/server/main.py +3 -3
  130. phoenix/server/oauth2.py +3 -2
  131. phoenix/server/rate_limiters.py +5 -8
  132. phoenix/server/static/.vite/manifest.json +31 -31
  133. phoenix/server/static/assets/components-C70HJiXz.js +1612 -0
  134. phoenix/server/static/assets/{index-DCzakdJq.js → index-DLe1Oo3l.js} +2 -2
  135. phoenix/server/static/assets/{pages-CAL1FDMt.js → pages-C8-Sl7JI.js} +269 -434
  136. phoenix/server/static/assets/{vendor-6IcPAw_j.js → vendor-CtqfhlbC.js} +6 -6
  137. phoenix/server/static/assets/{vendor-arizeai-DRZuoyuF.js → vendor-arizeai-C_3SBz56.js} +2 -2
  138. phoenix/server/static/assets/{vendor-codemirror-DVE2_WBr.js → vendor-codemirror-wfdk9cjp.js} +1 -1
  139. phoenix/server/static/assets/{vendor-recharts-DwrexFA4.js → vendor-recharts-BiVnSv90.js} +1 -1
  140. phoenix/server/thread_server.py +1 -1
  141. phoenix/server/types.py +17 -29
  142. phoenix/services.py +4 -3
  143. phoenix/session/client.py +12 -24
  144. phoenix/session/data_extractor.py +3 -3
  145. phoenix/session/evaluation.py +1 -2
  146. phoenix/session/session.py +11 -20
  147. phoenix/trace/attributes.py +16 -28
  148. phoenix/trace/dsl/filter.py +17 -21
  149. phoenix/trace/dsl/helpers.py +3 -3
  150. phoenix/trace/dsl/query.py +13 -22
  151. phoenix/trace/fixtures.py +11 -17
  152. phoenix/trace/otel.py +5 -15
  153. phoenix/trace/projects.py +3 -2
  154. phoenix/trace/schemas.py +2 -2
  155. phoenix/trace/span_evaluations.py +9 -8
  156. phoenix/trace/span_json_decoder.py +3 -3
  157. phoenix/trace/span_json_encoder.py +2 -2
  158. phoenix/trace/trace_dataset.py +6 -5
  159. phoenix/trace/utils.py +6 -6
  160. phoenix/utilities/deprecation.py +3 -2
  161. phoenix/utilities/error_handling.py +3 -2
  162. phoenix/utilities/json.py +2 -1
  163. phoenix/utilities/logging.py +2 -2
  164. phoenix/utilities/project.py +1 -1
  165. phoenix/utilities/re.py +3 -4
  166. phoenix/utilities/template_formatters.py +5 -4
  167. phoenix/version.py +1 -1
  168. phoenix/server/static/assets/components-hX0LgYz3.js +0 -1428
  169. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.6.0.dist-info}/WHEEL +0 -0
  170. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.6.0.dist-info}/entry_points.txt +0 -0
  171. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.6.0.dist-info}/licenses/IP_NOTICE +0 -0
  172. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.6.0.dist-info}/licenses/LICENSE +0 -0
@@ -13,7 +13,6 @@ from typing import (
13
13
  Iterator,
14
14
  Optional,
15
15
  Sequence,
16
- Tuple,
17
16
  Union,
18
17
  cast,
19
18
  )
@@ -44,7 +43,7 @@ def encode_evaluations(evaluations: Evaluations) -> Iterator[pb.Evaluation]:
44
43
  for index, row in dataframe.iterrows():
45
44
  subject_id = _extract_subject_id_from_index(
46
45
  index_names,
47
- cast(Union[str, Tuple[Any]], index),
46
+ cast(Union[str, tuple[Any]], index),
48
47
  )
49
48
  if (result := _extract_result(row)) is None:
50
49
  continue
@@ -5,23 +5,14 @@ import shutil
5
5
  import warnings
6
6
  from abc import ABC, abstractmethod
7
7
  from collections import UserList
8
+ from collections.abc import Iterable, Mapping
8
9
  from datetime import datetime
9
10
  from enum import Enum
10
11
  from importlib.util import find_spec
11
12
  from itertools import chain
12
13
  from pathlib import Path
13
14
  from tempfile import TemporaryDirectory
14
- from typing import (
15
- TYPE_CHECKING,
16
- Any,
17
- Iterable,
18
- List,
19
- Mapping,
20
- NamedTuple,
21
- Optional,
22
- Set,
23
- Union,
24
- )
15
+ from typing import TYPE_CHECKING, Any, NamedTuple, Optional, Union
25
16
  from urllib.parse import urljoin
26
17
 
27
18
  import pandas as pd
@@ -88,8 +79,8 @@ class NotebookEnvironment(Enum):
88
79
 
89
80
  class ExportedData(_BaseList):
90
81
  def __init__(self) -> None:
91
- self.paths: Set[Path] = set()
92
- self.names: List[str] = []
82
+ self.paths: set[Path] = set()
83
+ self.names: list[str] = []
93
84
  super().__init__()
94
85
 
95
86
  def __repr__(self) -> str:
@@ -112,7 +103,7 @@ class Session(TraceDataExtractor, ABC):
112
103
  notebook_env: NotebookEnvironment
113
104
  """The notebook environment that the session is running in."""
114
105
 
115
- def __dir__(self) -> List[str]:
106
+ def __dir__(self) -> list[str]:
116
107
  return ["exports", "view", "url"]
117
108
 
118
109
  def __init__(
@@ -157,7 +148,7 @@ class Session(TraceDataExtractor, ABC):
157
148
  # Deprecated fields
158
149
  stop_time: Optional[datetime] = None,
159
150
  timeout: Optional[int] = DEFAULT_TIMEOUT_IN_SECONDS,
160
- ) -> Optional[Union[pd.DataFrame, List[pd.DataFrame]]]:
151
+ ) -> Optional[Union[pd.DataFrame, list[pd.DataFrame]]]:
161
152
  """
162
153
  Queries the spans in the project based on the provided parameters.
163
154
 
@@ -203,7 +194,7 @@ class Session(TraceDataExtractor, ABC):
203
194
  def get_evaluations(
204
195
  self,
205
196
  project_name: Optional[str] = None,
206
- ) -> List[Evaluations]:
197
+ ) -> list[Evaluations]:
207
198
  """
208
199
  Get the evaluations for a project.
209
200
 
@@ -216,7 +207,7 @@ class Session(TraceDataExtractor, ABC):
216
207
 
217
208
  Returns
218
209
  -------
219
- evaluations : List[Evaluations]
210
+ evaluations : list[Evaluations]
220
211
  A list of evaluations for the specified project.
221
212
 
222
213
  """
@@ -471,7 +462,7 @@ def launch_app(
471
462
  Defaults to 6006.
472
463
  run_in_thread: bool, optional, default=True
473
464
  Whether the server should run in a Thread or Process.
474
- default_umap_parameters: Dict[str, Union[int, float]], optional, default=None
465
+ default_umap_parameters: dict[str, Union[int, float]], optional, default=None
475
466
  User specified default UMAP parameters
476
467
  eg: {"n_neighbors": 10, "n_samples": 5, "min_dist": 0.5}
477
468
  notebook_environment: str, optional, default=None
@@ -638,7 +629,7 @@ def close_app(delete_data: bool = False) -> None:
638
629
  def _get_url(host: str, port: int, notebook_env: NotebookEnvironment) -> str:
639
630
  """Determines the IFrame URL based on whether this is in a Colab or in a local notebook"""
640
631
  if notebook_env == NotebookEnvironment.COLAB:
641
- from google.colab.output import eval_js # type: ignore
632
+ from google.colab.output import eval_js
642
633
 
643
634
  return str(eval_js(f"google.colab.kernel.proxyPort({port}, {{'cache': true}})"))
644
635
  if notebook_env == NotebookEnvironment.SAGEMAKER:
@@ -656,7 +647,7 @@ def _get_url(host: str, port: int, notebook_env: NotebookEnvironment) -> str:
656
647
  def _is_colab() -> bool:
657
648
  """Determines whether this is in a Colab"""
658
649
  try:
659
- import google.colab # type: ignore # noqa: F401
650
+ import google.colab # noqa: F401
660
651
  except ImportError:
661
652
  return False
662
653
  try:
@@ -17,21 +17,9 @@ them into a nested list of dictionaries i.e.
17
17
 
18
18
  import inspect
19
19
  import json
20
- from typing import (
21
- Any,
22
- DefaultDict,
23
- Dict,
24
- Iterable,
25
- Iterator,
26
- List,
27
- Mapping,
28
- Optional,
29
- Sequence,
30
- Set,
31
- Tuple,
32
- Union,
33
- cast,
34
- )
20
+ from collections import defaultdict
21
+ from collections.abc import Iterable, Iterator, Mapping, Sequence
22
+ from typing import Any, Optional, Union, cast
35
23
 
36
24
  import numpy as np
37
25
  from openinference.semconv import trace
@@ -51,7 +39,7 @@ JSON_STRING_ATTRIBUTES = (
51
39
  TOOL_PARAMETERS,
52
40
  )
53
41
 
54
- SEMANTIC_CONVENTIONS: List[str] = sorted(
42
+ SEMANTIC_CONVENTIONS: list[str] = sorted(
55
43
  # e.g. "input.value", "llm.token_count.total", etc.
56
44
  (
57
45
  cast(str, getattr(klass, attr))
@@ -66,11 +54,11 @@ SEMANTIC_CONVENTIONS: List[str] = sorted(
66
54
 
67
55
 
68
56
  def unflatten(
69
- key_value_pairs: Iterable[Tuple[str, Any]],
57
+ key_value_pairs: Iterable[tuple[str, Any]],
70
58
  *,
71
59
  prefix_exclusions: Sequence[str] = (),
72
60
  separator: str = ".",
73
- ) -> Dict[str, Any]:
61
+ ) -> dict[str, Any]:
74
62
  # `prefix_exclusions` is intended to contain the semantic conventions
75
63
  trie = _build_trie(key_value_pairs, separator=separator, prefix_exclusions=prefix_exclusions)
76
64
  return dict(_walk(trie, separator=separator))
@@ -83,7 +71,7 @@ def flatten(
83
71
  separator: str = ".",
84
72
  recurse_on_sequence: bool = False,
85
73
  json_string_attributes: Optional[Sequence[str]] = None,
86
- ) -> Iterator[Tuple[str, Any]]:
74
+ ) -> Iterator[tuple[str, Any]]:
87
75
  """
88
76
  Flatten a nested dictionary or a sequence of dictionaries into a list of
89
77
  key value pairs. If `recurse_on_sequence` is True, then the function will
@@ -149,7 +137,7 @@ def get_attribute_value(
149
137
  return attributes.get(sub_keys[-1])
150
138
 
151
139
 
152
- def load_json_strings(key_values: Iterable[Tuple[str, Any]]) -> Iterator[Tuple[str, Any]]:
140
+ def load_json_strings(key_values: Iterable[tuple[str, Any]]) -> Iterator[tuple[str, Any]]:
153
141
  for key, value in key_values:
154
142
  if key.endswith(JSON_STRING_ATTRIBUTES):
155
143
  try:
@@ -167,7 +155,7 @@ def _partition_with_prefix_exclusion(
167
155
  key: str,
168
156
  separator: str = ".",
169
157
  prefix_exclusions: Sequence[str] = (),
170
- ) -> Tuple[str, str, str]:
158
+ ) -> tuple[str, str, str]:
171
159
  """
172
160
  Partition `key` by `separator`, but exclude prefixes in `prefix_exclusions`,
173
161
  which is usually the list of semantic conventions. `prefix_exclusions` should
@@ -181,7 +169,7 @@ def _partition_with_prefix_exclusion(
181
169
  return key.partition(separator)
182
170
 
183
171
 
184
- class _Trie(DefaultDict[Union[str, int], "_Trie"]):
172
+ class _Trie(defaultdict[Union[str, int], "_Trie"]):
185
173
  """
186
174
  Prefix Tree with special handling for indices (i.e. all-digit keys). Indices
187
175
  represent the position of an element in a nested list, while branches represent
@@ -191,8 +179,8 @@ class _Trie(DefaultDict[Union[str, int], "_Trie"]):
191
179
  def __init__(self) -> None:
192
180
  super().__init__(_Trie)
193
181
  self.value: Any = None
194
- self.indices: Set[int] = set()
195
- self.branches: Set[Union[str, int]] = set()
182
+ self.indices: set[int] = set()
183
+ self.branches: set[Union[str, int]] = set()
196
184
 
197
185
  def set_value(self, value: Any) -> None:
198
186
  self.value = value
@@ -215,7 +203,7 @@ class _Trie(DefaultDict[Union[str, int], "_Trie"]):
215
203
 
216
204
 
217
205
  def _build_trie(
218
- key_value_pairs: Iterable[Tuple[str, Any]],
206
+ key_value_pairs: Iterable[tuple[str, Any]],
219
207
  *,
220
208
  prefix_exclusions: Sequence[str] = (),
221
209
  separator: str = ".",
@@ -254,7 +242,7 @@ def _walk(
254
242
  *,
255
243
  prefix: str = "",
256
244
  separator: str = ".",
257
- ) -> Iterator[Tuple[str, Any]]:
245
+ ) -> Iterator[tuple[str, Any]]:
258
246
  """
259
247
  Walk the Trie and yield key value pairs. If the Trie node has a value, then
260
248
  yield the prefix and the value. If the Trie node has indices, then yield the
@@ -286,7 +274,7 @@ def _flatten_mapping(
286
274
  recurse_on_sequence: bool = False,
287
275
  json_string_attributes: Optional[Sequence[str]] = None,
288
276
  separator: str = ".",
289
- ) -> Iterator[Tuple[str, Any]]:
277
+ ) -> Iterator[tuple[str, Any]]:
290
278
  """
291
279
  Flatten a nested dictionary into a list of key value pairs. If `recurse_on_sequence`
292
280
  is True, then the function will also recursively flatten nested sequences of dictionaries.
@@ -327,7 +315,7 @@ def _flatten_sequence(
327
315
  recurse_on_sequence: bool = False,
328
316
  json_string_attributes: Optional[Sequence[str]] = None,
329
317
  separator: str = ".",
330
- ) -> Iterator[Tuple[str, Any]]:
318
+ ) -> Iterator[tuple[str, Any]]:
331
319
  """
332
320
  Flatten a sequence of dictionaries into a list of key value pairs. If `recurse_on_sequence`
333
321
  is True, then the function will also recursively flatten nested sequences of dictionaries.
@@ -17,7 +17,7 @@ from typing_extensions import TypeAlias, TypeGuard, assert_never
17
17
  import phoenix.trace.v1 as pb
18
18
  from phoenix.db import models
19
19
 
20
- _VALID_EVAL_ATTRIBUTES: typing.Tuple[str, ...] = tuple(
20
+ _VALID_EVAL_ATTRIBUTES: tuple[str, ...] = tuple(
21
21
  field.name for field in pb.Evaluation.Result.DESCRIPTOR.fields
22
22
  )
23
23
 
@@ -59,7 +59,7 @@ class AliasedAnnotationRelation:
59
59
  object.__setattr__(self, "table", table)
60
60
 
61
61
  @property
62
- def attributes(self) -> typing.Iterator[typing.Tuple[str, Mapped[typing.Any]]]:
62
+ def attributes(self) -> typing.Iterator[tuple[str, Mapped[typing.Any]]]:
63
63
  """
64
64
  Alias names and attributes (i.e., columns) of the `span_annotation`
65
65
  relation.
@@ -80,7 +80,7 @@ class AliasedAnnotationRelation:
80
80
 
81
81
  # Because postgresql is strongly typed, we cast JSON values to string
82
82
  # by default unless it's hinted otherwise as done here.
83
- _FLOAT_ATTRIBUTES: typing.FrozenSet[str] = frozenset(
83
+ _FLOAT_ATTRIBUTES: frozenset[str] = frozenset(
84
84
  {
85
85
  "llm.token_count.completion",
86
86
  "llm.token_count.prompt",
@@ -142,12 +142,8 @@ class SpanFilter:
142
142
  valid_eval_names: typing.Optional[typing.Sequence[str]] = None
143
143
  translated: ast.Expression = field(init=False, repr=False)
144
144
  compiled: typing.Any = field(init=False, repr=False)
145
- _aliased_annotation_relations: typing.Tuple[AliasedAnnotationRelation] = field(
146
- init=False, repr=False
147
- )
148
- _aliased_annotation_attributes: typing.Dict[str, Mapped[typing.Any]] = field(
149
- init=False, repr=False
150
- )
145
+ _aliased_annotation_relations: tuple[AliasedAnnotationRelation] = field(init=False, repr=False)
146
+ _aliased_annotation_attributes: dict[str, Mapped[typing.Any]] = field(init=False, repr=False)
151
147
 
152
148
  def __bool__(self) -> bool:
153
149
  return bool(self.condition)
@@ -198,7 +194,7 @@ class SpanFilter:
198
194
  )
199
195
  )
200
196
 
201
- def to_dict(self) -> typing.Dict[str, typing.Any]:
197
+ def to_dict(self) -> dict[str, typing.Any]:
202
198
  return {"condition": self.condition}
203
199
 
204
200
  @classmethod
@@ -439,7 +435,7 @@ class _ProjectionTranslator(ast.NodeTransformer):
439
435
  class _FilterTranslator(_ProjectionTranslator):
440
436
  def visit_Compare(self, node: ast.Compare) -> typing.Any:
441
437
  if len(node.comparators) > 1:
442
- args: typing.List[typing.Any] = []
438
+ args: list[typing.Any] = []
443
439
  left = node.left
444
440
  for i, (op, comparator) in enumerate(zip(node.ops, node.comparators)):
445
441
  args.append(self.visit(ast.Compare(left=left, ops=[op], comparators=[comparator])))
@@ -540,7 +536,7 @@ class _FilterTranslator(_ProjectionTranslator):
540
536
  def _validate_expression(
541
537
  expression: ast.Expression,
542
538
  valid_eval_names: typing.Optional[typing.Sequence[str]] = None,
543
- valid_eval_attributes: typing.Tuple[str, ...] = _VALID_EVAL_ATTRIBUTES,
539
+ valid_eval_attributes: tuple[str, ...] = _VALID_EVAL_ATTRIBUTES,
544
540
  ) -> None:
545
541
  """
546
542
  Validate primarily the structural (i.e. not semantic) characteristics of an
@@ -638,7 +634,7 @@ def _validate_expression(
638
634
 
639
635
 
640
636
  def _as_attribute(
641
- keys: typing.List[ast.Constant],
637
+ keys: list[ast.Constant],
642
638
  # as_float: typing.Optional[bool] = None,
643
639
  ) -> ast.Subscript:
644
640
  return ast.Subscript(
@@ -675,14 +671,14 @@ def _is_subscript(
675
671
 
676
672
  def _get_attribute_keys_list(
677
673
  node: typing.Any,
678
- ) -> typing.Optional[typing.List[ast.Constant]]:
674
+ ) -> typing.Optional[list[ast.Constant]]:
679
675
  # e.g. `attributes["key"]` -> `["key"]`
680
676
  # e.g. `attributes["a"]["b.c"][["d"]]` -> `["a", "b.c", "d"]`
681
677
  # e.g. `attributes["a"][["b.c", "d"]]` -> `["a", "b.c", "d"]`
682
678
  # e.g. `metadata["key"]` -> `["metadata", "key"]`
683
679
  # e.g. `metadata["a"]["b.c"][["d"]]` -> `["metadata", "a", "b.c", "d"]`
684
680
  # e.g. `metadata["a"][["b.c", "d"]]` -> `["metadata", "a", "b.c", "d"]`
685
- keys: typing.List[ast.Constant] = []
681
+ keys: list[ast.Constant] = []
686
682
  if isinstance(node, ast.Attribute):
687
683
  while isinstance(node, ast.Attribute):
688
684
  keys.append(ast.Constant(value=node.attr, kind=None))
@@ -707,7 +703,7 @@ def _get_attribute_keys_list(
707
703
 
708
704
  def _get_subscript_keys_list(
709
705
  node: ast.Subscript,
710
- ) -> typing.Optional[typing.List[ast.Constant]]:
706
+ ) -> typing.Optional[list[ast.Constant]]:
711
707
  child = node.slice
712
708
  if isinstance(child, ast.Constant):
713
709
  if not isinstance(child.value, (str, int)) or isinstance(child.value, bool):
@@ -751,7 +747,7 @@ def _disjunction(choices: typing.Sequence[str]) -> str:
751
747
 
752
748
  def _find_best_match(
753
749
  source: str, choices: typing.Iterable[str]
754
- ) -> typing.Tuple[typing.Optional[str], float]:
750
+ ) -> tuple[typing.Optional[str], float]:
755
751
  best_choice, best_score = None, 0.0
756
752
  for choice in choices:
757
753
  score = SequenceMatcher(None, source, choice).ratio()
@@ -762,9 +758,9 @@ def _find_best_match(
762
758
 
763
759
  def _apply_eval_aliasing(
764
760
  source: str,
765
- ) -> typing.Tuple[
761
+ ) -> tuple[
766
762
  str,
767
- typing.Tuple[AliasedAnnotationRelation, ...],
763
+ tuple[AliasedAnnotationRelation, ...],
768
764
  ]:
769
765
  """
770
766
  Substitutes `evals[<eval-name>].<attribute>` with aliases. Returns the
@@ -784,7 +780,7 @@ def _apply_eval_aliasing(
784
780
  span_annotation_0_label_123 == 'correct' or span_annotation_0_score_456 < 0.5
785
781
  ```
786
782
  """
787
- eval_aliases: typing.Dict[AnnotationName, AliasedAnnotationRelation] = {}
783
+ eval_aliases: dict[AnnotationName, AliasedAnnotationRelation] = {}
788
784
  for (
789
785
  annotation_expression,
790
786
  annotation_type,
@@ -802,7 +798,7 @@ def _apply_eval_aliasing(
802
798
  def _parse_annotation_expressions_and_names(
803
799
  source: str,
804
800
  ) -> typing.Iterator[
805
- typing.Tuple[AnnotationExpression, AnnotationType, AnnotationName, AnnotationAttribute]
801
+ tuple[AnnotationExpression, AnnotationType, AnnotationName, AnnotationAttribute]
806
802
  ]:
807
803
  """
808
804
  Parses filter conditions for evaluation expressions of the form:
@@ -1,6 +1,6 @@
1
1
  import warnings
2
2
  from datetime import datetime
3
- from typing import List, Optional, Protocol, Tuple, Union, cast
3
+ from typing import Optional, Protocol, Union, cast
4
4
 
5
5
  import pandas as pd
6
6
  from openinference.semconv.trace import DocumentAttributes, SpanAttributes
@@ -31,7 +31,7 @@ class CanQuerySpans(Protocol):
31
31
  start_time: Optional[datetime] = None,
32
32
  end_time: Optional[datetime] = None,
33
33
  project_name: Optional[str] = None,
34
- ) -> Optional[Union[pd.DataFrame, List[pd.DataFrame]]]: ...
34
+ ) -> Optional[Union[pd.DataFrame, list[pd.DataFrame]]]: ...
35
35
 
36
36
 
37
37
  def get_retrieved_documents(
@@ -94,7 +94,7 @@ def get_qa_with_reference(
94
94
  .with_index("trace_id")
95
95
  )
96
96
  df_qa, df_docs = cast(
97
- Tuple[pd.DataFrame, pd.DataFrame],
97
+ tuple[pd.DataFrame, pd.DataFrame],
98
98
  obj.query_spans(
99
99
  qa_query,
100
100
  docs_query,
@@ -1,22 +1,13 @@
1
1
  import warnings
2
2
  from collections import defaultdict
3
+ from collections.abc import Iterable, Mapping, Sequence
3
4
  from dataclasses import dataclass, field, replace
4
5
  from datetime import datetime
5
6
  from functools import cached_property
6
7
  from itertools import chain
7
8
  from random import randint, random
8
9
  from types import MappingProxyType
9
- from typing import (
10
- Any,
11
- DefaultDict,
12
- Dict,
13
- Iterable,
14
- List,
15
- Mapping,
16
- Optional,
17
- Sequence,
18
- cast,
19
- )
10
+ from typing import Any, Optional, cast
20
11
 
21
12
  import pandas as pd
22
13
  from openinference.semconv.trace import SpanAttributes
@@ -83,7 +74,7 @@ class Projection(_Base):
83
74
  def __call__(self) -> SQLColumnExpression[Any]:
84
75
  return self._projector()
85
76
 
86
- def to_dict(self) -> Dict[str, Any]:
77
+ def to_dict(self) -> dict[str, Any]:
87
78
  return {"key": self.key}
88
79
 
89
80
  @classmethod
@@ -138,7 +129,7 @@ class Explosion(_HasTmpSuffix, Projection):
138
129
  object.__setattr__(self, "_array_tmp_col_label", f"__array_tmp_col_{random()}")
139
130
 
140
131
  @cached_property
141
- def index_keys(self) -> List[str]:
132
+ def index_keys(self) -> list[str]:
142
133
  return [self._primary_index.key, f"{self._position_prefix}position"]
143
134
 
144
135
  def with_primary_index_key(self, _: str) -> "Explosion":
@@ -215,7 +206,7 @@ class Explosion(_HasTmpSuffix, Projection):
215
206
  # Because sqlite doesn't support `WITH ORDINALITY`, the order of
216
207
  # the returned (table) values is not guaranteed. So we resort to
217
208
  # post hoc processing using pandas.
218
- def _extract_values(array: List[Any]) -> List[Dict[str, Any]]:
209
+ def _extract_values(array: list[Any]) -> list[dict[str, Any]]:
219
210
  if not isinstance(array, Iterable):
220
211
  return []
221
212
  if not self.kwargs:
@@ -227,11 +218,11 @@ class Explosion(_HasTmpSuffix, Projection):
227
218
  for i, obj in enumerate(array)
228
219
  if isinstance(obj, Mapping)
229
220
  ]
230
- res: List[Dict[str, Any]] = []
221
+ res: list[dict[str, Any]] = []
231
222
  for i, obj in enumerate(array):
232
223
  if not isinstance(obj, Mapping):
233
224
  continue
234
- values: Dict[str, Any] = {f"{self._position_prefix}position": i}
225
+ values: dict[str, Any] = {f"{self._position_prefix}position": i}
235
226
  for name, key in self.kwargs.items():
236
227
  if (value := get_attribute_value(obj, key)) is not None:
237
228
  values[name] = value
@@ -261,7 +252,7 @@ class Explosion(_HasTmpSuffix, Projection):
261
252
  df = df.set_index(self.index_keys)
262
253
  return df
263
254
 
264
- def to_dict(self) -> Dict[str, Any]:
255
+ def to_dict(self) -> dict[str, Any]:
265
256
  return {
266
257
  **super().to_dict(),
267
258
  **({"kwargs": dict(self.kwargs)} if self.kwargs else {}),
@@ -384,12 +375,12 @@ class Concatenation(_HasTmpSuffix, Projection):
384
375
  # Because SQLite doesn't support `WITH ORDINALITY`, the order of
385
376
  # the returned table-values is not guaranteed. So we resort to
386
377
  # post hoc processing using pandas.
387
- def _concat_values(array: List[Any]) -> Dict[str, Any]:
378
+ def _concat_values(array: list[Any]) -> dict[str, Any]:
388
379
  if not isinstance(array, Iterable):
389
380
  return {}
390
381
  if not self.kwargs:
391
382
  return {self.key: self.separator.join(str(obj) for obj in array)}
392
- values: DefaultDict[str, List[str]] = defaultdict(list)
383
+ values: defaultdict[str, list[str]] = defaultdict(list)
393
384
  for i, obj in enumerate(array):
394
385
  if not isinstance(obj, Mapping):
395
386
  continue
@@ -407,7 +398,7 @@ class Concatenation(_HasTmpSuffix, Projection):
407
398
  assert_never(dialect)
408
399
  return df
409
400
 
410
- def to_dict(self) -> Dict[str, Any]:
401
+ def to_dict(self) -> dict[str, Any]:
411
402
  return {
412
403
  **super().to_dict(),
413
404
  **({"kwargs": dict(self.kwargs)} if self.kwargs else {}),
@@ -619,7 +610,7 @@ class SpanQuery(_HasTmpSuffix):
619
610
  df = df.rename(self._rename, axis=1, errors="ignore")
620
611
  return df
621
612
 
622
- def to_dict(self) -> Dict[str, Any]:
613
+ def to_dict(self) -> dict[str, Any]:
623
614
  return {
624
615
  **(
625
616
  {"select": {name: proj.to_dict() for name, proj in self._select.items()}}
@@ -771,7 +762,7 @@ def _outer_join(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:
771
762
  return df
772
763
 
773
764
 
774
- def _flatten_semantic_conventions(attributes: Mapping[str, Any]) -> Dict[str, Any]:
765
+ def _flatten_semantic_conventions(attributes: Mapping[str, Any]) -> dict[str, Any]:
775
766
  # This may be inefficient, but is needed to preserve backward-compatibility.
776
767
  # For example, custom attributes do not get flattened.
777
768
  ans = unflatten(
phoenix/trace/fixtures.py CHANGED
@@ -2,6 +2,7 @@ import logging
2
2
  import shutil
3
3
  from binascii import hexlify
4
4
  from collections import defaultdict
5
+ from collections.abc import Iterable, Iterator, Sequence
5
6
  from dataclasses import dataclass, field, replace
6
7
  from datetime import datetime, timezone
7
8
  from io import StringIO
@@ -9,15 +10,8 @@ from random import getrandbits
9
10
  from tempfile import NamedTemporaryFile
10
11
  from time import sleep, time
11
12
  from typing import (
12
- DefaultDict,
13
- Dict,
14
- Iterable,
15
- Iterator,
16
- List,
17
13
  NamedTuple,
18
14
  Optional,
19
- Sequence,
20
- Tuple,
21
15
  cast,
22
16
  )
23
17
  from urllib.parse import urljoin
@@ -280,7 +274,7 @@ random_fixture = TracesFixture(
280
274
  file_name="random.jsonl",
281
275
  )
282
276
 
283
- TRACES_FIXTURES: List[TracesFixture] = [
277
+ TRACES_FIXTURES: list[TracesFixture] = [
284
278
  demo_llama_index_rag_fixture,
285
279
  demo_llama_index_rag_llm_fixture,
286
280
  demo_langgraph_agent_fixture,
@@ -298,10 +292,10 @@ TRACES_FIXTURES: List[TracesFixture] = [
298
292
  anthropic_tools_fixture,
299
293
  ]
300
294
 
301
- NAME_TO_TRACES_FIXTURE: Dict[str, TracesFixture] = {
295
+ NAME_TO_TRACES_FIXTURE: dict[str, TracesFixture] = {
302
296
  fixture.name: fixture for fixture in TRACES_FIXTURES
303
297
  }
304
- PROJ_NAME_TO_TRACES_FIXTURE: DefaultDict[str, List[TracesFixture]] = defaultdict(list)
298
+ PROJ_NAME_TO_TRACES_FIXTURE: defaultdict[str, list[TracesFixture]] = defaultdict(list)
305
299
  for fixture in TRACES_FIXTURES:
306
300
  if fixture.project_name:
307
301
  PROJ_NAME_TO_TRACES_FIXTURE[fixture.project_name].append(fixture)
@@ -322,7 +316,7 @@ def get_trace_fixture_by_name(fixture_name: str) -> TracesFixture:
322
316
  return NAME_TO_TRACES_FIXTURE[fixture_name]
323
317
 
324
318
 
325
- def get_trace_fixtures_by_project_name(proj_name: str) -> List[TracesFixture]:
319
+ def get_trace_fixtures_by_project_name(proj_name: str) -> list[TracesFixture]:
326
320
  """
327
321
  Returns a dictionary of project name (key) and set of TracesFixtures (value)
328
322
  whose project name matches the input name.
@@ -437,7 +431,7 @@ def _read_eval_fixture(eval_fixture: EvaluationFixture) -> Iterator[pb.Evaluatio
437
431
  explanation=StringValue(value=cast(str, explanation)) if explanation else None,
438
432
  )
439
433
  if isinstance(eval_fixture, DocumentEvaluationFixture):
440
- span_id, document_position = cast(Tuple[str, int], index)
434
+ span_id, document_position = cast(tuple[str, int], index)
441
435
  # Legacy fixture files contain UUID strings for span_ids. The hyphens in these
442
436
  # strings need to be removed because we are also removing the hyphens from the
443
437
  # span_ids of their corresponding traces. In general, hyphen is not an allowed
@@ -476,10 +470,10 @@ def _url(
476
470
  def reset_fixture_span_ids_and_timestamps(
477
471
  spans: Iterable[Span],
478
472
  evals: Iterable[pb.Evaluation] = (),
479
- ) -> Tuple[List[Span], List[pb.Evaluation]]:
473
+ ) -> tuple[list[Span], list[pb.Evaluation]]:
480
474
  old_spans, old_evals = list(spans), list(evals)
481
- new_trace_ids: Dict[str, str] = {}
482
- new_span_ids: Dict[str, str] = {}
475
+ new_trace_ids: dict[str, str] = {}
476
+ new_span_ids: dict[str, str] = {}
483
477
  for old_span in old_spans:
484
478
  new_trace_ids[old_span.context.trace_id] = _new_trace_id()
485
479
  new_span_ids[old_span.context.span_id] = _new_span_id()
@@ -495,8 +489,8 @@ def reset_fixture_span_ids_and_timestamps(
495
489
  new_span_ids[span_id] = _new_span_id()
496
490
  max_end_time = max(old_span.end_time for old_span in old_spans)
497
491
  time_diff = datetime.now(timezone.utc) - max_end_time
498
- new_spans: List[Span] = []
499
- new_evals: List[pb.Evaluation] = []
492
+ new_spans: list[Span] = []
493
+ new_evals: list[pb.Evaluation] = []
500
494
  for old_span in old_spans:
501
495
  new_trace_id = new_trace_ids[old_span.context.trace_id]
502
496
  new_span_id = new_span_ids[old_span.context.span_id]
phoenix/trace/otel.py CHANGED
@@ -1,19 +1,9 @@
1
1
  import json
2
2
  from binascii import hexlify, unhexlify
3
+ from collections.abc import Iterable, Iterator, Mapping, Sequence
3
4
  from datetime import datetime, timezone
4
5
  from types import MappingProxyType
5
- from typing import (
6
- Any,
7
- Dict,
8
- Iterable,
9
- Iterator,
10
- Mapping,
11
- Optional,
12
- Sequence,
13
- SupportsFloat,
14
- Tuple,
15
- cast,
16
- )
6
+ from typing import Any, Optional, SupportsFloat, cast
17
7
 
18
8
  import numpy as np
19
9
  import opentelemetry.proto.trace.v1.trace_pb2 as otlp
@@ -133,7 +123,7 @@ def _decode_unix_nano(time_unix_nano: int) -> datetime:
133
123
 
134
124
  def _decode_key_values(
135
125
  key_values: Iterable[KeyValue],
136
- ) -> Iterator[Tuple[str, Any]]:
126
+ ) -> Iterator[tuple[str, Any]]:
137
127
  return ((kv.key, _decode_value(kv.value)) for kv in key_values)
138
128
 
139
129
 
@@ -169,7 +159,7 @@ _STATUS_DECODING = MappingProxyType(
169
159
  )
170
160
 
171
161
 
172
- def _decode_status(otlp_status: otlp.Status) -> Tuple[SpanStatusCode, StatusMessage]:
162
+ def _decode_status(otlp_status: otlp.Status) -> tuple[SpanStatusCode, StatusMessage]:
173
163
  status_code = _STATUS_DECODING.get(otlp_status.code, SpanStatusCode.UNSET)
174
164
  return status_code, otlp_status.message
175
165
 
@@ -186,7 +176,7 @@ def encode_span_to_otlp(span: Span) -> otlp.Span:
186
176
  start_time_unix_nano: int = int(span.start_time.timestamp() * _BILLION)
187
177
  end_time_unix_nano: int = int(span.end_time.timestamp() * _BILLION) if span.end_time else 0
188
178
 
189
- attributes: Dict[str, Any] = dict(span.attributes)
179
+ attributes: dict[str, Any] = dict(span.attributes)
190
180
 
191
181
  for key, value in span.attributes.items():
192
182
  if value is None:
phoenix/trace/projects.py CHANGED
@@ -1,5 +1,6 @@
1
1
  import types
2
- from typing import Any, Callable, Optional, Type
2
+ from collections.abc import Callable
3
+ from typing import Any, Optional
3
4
 
4
5
  from openinference.semconv.resource import ResourceAttributes
5
6
  from opentelemetry.sdk import trace
@@ -58,7 +59,7 @@ class using_project:
58
59
 
59
60
  def __exit__(
60
61
  self,
61
- exc_type: Optional[Type[BaseException]],
62
+ exc_type: Optional[type[BaseException]],
62
63
  exc_value: Optional[BaseException],
63
64
  traceback: Optional[types.TracebackType],
64
65
  ) -> None: